body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def scan_status_command(client: Client, args: Dict[(str, Any)]) -> CommandResults: "helloworld-scan-status command: Returns status for HelloWorld scans\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``Dict[str, Any]``\n :param args:\n all command arguments,...
1,625,731,828,764,689,200
helloworld-scan-status command: Returns status for HelloWorld scans :type client: ``Client`` :param Client: HelloWorld client to use :type args: ``Dict[str, Any]`` :param args: all command arguments, usually passed from ``demisto.args()``. ``args['scan_id']`` list of scan IDs or single scan ID :return: A...
Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py
scan_status_command
DeanArbel/content
python
def scan_status_command(client: Client, args: Dict[(str, Any)]) -> CommandResults: "helloworld-scan-status command: Returns status for HelloWorld scans\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``Dict[str, Any]``\n :param args:\n all command arguments,...
def scan_results_command(client: Client, args: Dict[(str, Any)]) -> Union[(Dict[(str, Any)], CommandResults, List[CommandResults])]: "helloworld-scan-results command: Returns results for a HelloWorld scan\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``Dict[str, Any...
-1,730,858,595,813,137,200
helloworld-scan-results command: Returns results for a HelloWorld scan :type client: ``Client`` :param Client: HelloWorld client to use :type args: ``Dict[str, Any]`` :param args: all command arguments, usually passed from ``demisto.args()``. ``args['scan_id']`` scan ID to retrieve results ``args['format'...
Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py
scan_results_command
DeanArbel/content
python
def scan_results_command(client: Client, args: Dict[(str, Any)]) -> Union[(Dict[(str, Any)], CommandResults, List[CommandResults])]: "helloworld-scan-results command: Returns results for a HelloWorld scan\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``Dict[str, Any...
def main() -> None: 'main function, parses params and runs command functions\n\n :return:\n :rtype:\n ' api_key = demisto.params().get('apikey') base_url = urljoin(demisto.params()['url'], '/api/v1') verify_certificate = (not demisto.params().get('insecure', False)) first_fetch_time = arg_t...
-8,174,009,125,033,763,000
main function, parses params and runs command functions :return: :rtype:
Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py
main
DeanArbel/content
python
def main() -> None: 'main function, parses params and runs command functions\n\n :return:\n :rtype:\n ' api_key = demisto.params().get('apikey') base_url = urljoin(demisto.params()['url'], '/api/v1') verify_certificate = (not demisto.params().get('insecure', False)) first_fetch_time = arg_t...
def get_ip_reputation(self, ip: str) -> Dict[(str, Any)]: "Gets the IP reputation using the '/ip' API endpoint\n\n :type ip: ``str``\n :param ip: IP address to get the reputation for\n\n :return: dict containing the IP reputation as returned from the API\n :rtype: ``Dict[str, Any]``\n ...
-5,505,118,003,103,052,000
Gets the IP reputation using the '/ip' API endpoint :type ip: ``str`` :param ip: IP address to get the reputation for :return: dict containing the IP reputation as returned from the API :rtype: ``Dict[str, Any]``
Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py
get_ip_reputation
DeanArbel/content
python
def get_ip_reputation(self, ip: str) -> Dict[(str, Any)]: "Gets the IP reputation using the '/ip' API endpoint\n\n :type ip: ``str``\n :param ip: IP address to get the reputation for\n\n :return: dict containing the IP reputation as returned from the API\n :rtype: ``Dict[str, Any]``\n ...
def get_domain_reputation(self, domain: str) -> Dict[(str, Any)]: "Gets the Domain reputation using the '/domain' API endpoint\n\n :type domain: ``str``\n :param domain: domain name to get the reputation for\n\n :return: dict containing the domain reputation as returned from the API\n :r...
4,621,716,766,601,556,000
Gets the Domain reputation using the '/domain' API endpoint :type domain: ``str`` :param domain: domain name to get the reputation for :return: dict containing the domain reputation as returned from the API :rtype: ``Dict[str, Any]``
Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py
get_domain_reputation
DeanArbel/content
python
def get_domain_reputation(self, domain: str) -> Dict[(str, Any)]: "Gets the Domain reputation using the '/domain' API endpoint\n\n :type domain: ``str``\n :param domain: domain name to get the reputation for\n\n :return: dict containing the domain reputation as returned from the API\n :r...
def search_alerts(self, alert_status: Optional[str], severity: Optional[str], alert_type: Optional[str], max_results: Optional[int], start_time: Optional[int]) -> List[Dict[(str, Any)]]: 'Searches for HelloWorld alerts using the \'/get_alerts\' API endpoint\n\n All the parameters are passed directly to the A...
2,007,290,296,748,268,500
Searches for HelloWorld alerts using the '/get_alerts' API endpoint All the parameters are passed directly to the API as HTTP POST parameters in the request :type alert_status: ``Optional[str]`` :param alert_status: status of the alert to search for. Options are: 'ACTIVE' or 'CLOSED' :type severity: ``Optional[str]`...
Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py
search_alerts
DeanArbel/content
python
def search_alerts(self, alert_status: Optional[str], severity: Optional[str], alert_type: Optional[str], max_results: Optional[int], start_time: Optional[int]) -> List[Dict[(str, Any)]]: 'Searches for HelloWorld alerts using the \'/get_alerts\' API endpoint\n\n All the parameters are passed directly to the A...
def get_alert(self, alert_id: str) -> Dict[(str, Any)]: 'Gets a specific HelloWorld alert by id\n\n :type alert_id: ``str``\n :param alert_id: id of the alert to return\n\n :return: dict containing the alert as returned from the API\n :rtype: ``Dict[str, Any]``\n ' return self...
-3,893,194,839,806,734,300
Gets a specific HelloWorld alert by id :type alert_id: ``str`` :param alert_id: id of the alert to return :return: dict containing the alert as returned from the API :rtype: ``Dict[str, Any]``
Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py
get_alert
DeanArbel/content
python
def get_alert(self, alert_id: str) -> Dict[(str, Any)]: 'Gets a specific HelloWorld alert by id\n\n :type alert_id: ``str``\n :param alert_id: id of the alert to return\n\n :return: dict containing the alert as returned from the API\n :rtype: ``Dict[str, Any]``\n ' return self...
def update_alert_status(self, alert_id: str, alert_status: str) -> Dict[(str, Any)]: "Changes the status of a specific HelloWorld alert\n\n :type alert_id: ``str``\n :param alert_id: id of the alert to return\n\n :type alert_status: ``str``\n :param alert_status: new alert status. Option...
4,261,590,240,170,449,000
Changes the status of a specific HelloWorld alert :type alert_id: ``str`` :param alert_id: id of the alert to return :type alert_status: ``str`` :param alert_status: new alert status. Options are: 'ACTIVE' or 'CLOSED' :return: dict containing the alert as returned from the API :rtype: ``Dict[str, Any]``
Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py
update_alert_status
DeanArbel/content
python
def update_alert_status(self, alert_id: str, alert_status: str) -> Dict[(str, Any)]: "Changes the status of a specific HelloWorld alert\n\n :type alert_id: ``str``\n :param alert_id: id of the alert to return\n\n :type alert_status: ``str``\n :param alert_status: new alert status. Option...
def scan_start(self, hostname: str) -> Dict[(str, Any)]: 'Starts a HelloWorld scan on a specific hostname\n\n :type hostname: ``str``\n :param hostname: hostname of the machine to scan\n\n :return: dict containing the scan status as returned from the API\n :rtype: ``Dict[str, Any]``\n ...
-6,631,833,082,852,968,000
Starts a HelloWorld scan on a specific hostname :type hostname: ``str`` :param hostname: hostname of the machine to scan :return: dict containing the scan status as returned from the API :rtype: ``Dict[str, Any]``
Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py
scan_start
DeanArbel/content
python
def scan_start(self, hostname: str) -> Dict[(str, Any)]: 'Starts a HelloWorld scan on a specific hostname\n\n :type hostname: ``str``\n :param hostname: hostname of the machine to scan\n\n :return: dict containing the scan status as returned from the API\n :rtype: ``Dict[str, Any]``\n ...
def scan_status(self, scan_id: str) -> Dict[(str, Any)]: 'Gets the status of a HelloWorld scan\n\n :type scan_id: ``str``\n :param scan_id: ID of the scan to retrieve status for\n\n :return: dict containing the scan status as returned from the API\n :rtype: ``Dict[str, Any]``\n ' ...
4,874,480,294,823,485,000
Gets the status of a HelloWorld scan :type scan_id: ``str`` :param scan_id: ID of the scan to retrieve status for :return: dict containing the scan status as returned from the API :rtype: ``Dict[str, Any]``
Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py
scan_status
DeanArbel/content
python
def scan_status(self, scan_id: str) -> Dict[(str, Any)]: 'Gets the status of a HelloWorld scan\n\n :type scan_id: ``str``\n :param scan_id: ID of the scan to retrieve status for\n\n :return: dict containing the scan status as returned from the API\n :rtype: ``Dict[str, Any]``\n ' ...
def scan_results(self, scan_id: str) -> Dict[(str, Any)]: 'Gets the results of a HelloWorld scan\n\n :type scan_id: ``str``\n :param scan_id: ID of the scan to retrieve results for\n\n :return: dict containing the scan results as returned from the API\n :rtype: ``Dict[str, Any]``\n ...
-3,338,946,734,264,717,300
Gets the results of a HelloWorld scan :type scan_id: ``str`` :param scan_id: ID of the scan to retrieve results for :return: dict containing the scan results as returned from the API :rtype: ``Dict[str, Any]``
Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py
scan_results
DeanArbel/content
python
def scan_results(self, scan_id: str) -> Dict[(str, Any)]: 'Gets the results of a HelloWorld scan\n\n :type scan_id: ``str``\n :param scan_id: ID of the scan to retrieve results for\n\n :return: dict containing the scan results as returned from the API\n :rtype: ``Dict[str, Any]``\n ...
def say_hello(self, name: str) -> str: "Returns 'Hello {name}'\n\n :type name: ``str``\n :param name: name to append to the 'Hello' string\n\n :return: string containing 'Hello {name}'\n :rtype: ``str``\n " return f'Hello {name}'
-5,721,078,814,974,353,000
Returns 'Hello {name}' :type name: ``str`` :param name: name to append to the 'Hello' string :return: string containing 'Hello {name}' :rtype: ``str``
Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py
say_hello
DeanArbel/content
python
def say_hello(self, name: str) -> str: "Returns 'Hello {name}'\n\n :type name: ``str``\n :param name: name to append to the 'Hello' string\n\n :return: string containing 'Hello {name}'\n :rtype: ``str``\n " return f'Hello {name}'
def calculate_psnr(img1, img2): '\n data range [0, 1]\n ' img1 = img1.clamp(0, 1) img2 = img2.clamp(0, 1) mse = torch.mean(((img1 - img2) ** 2), [1, 2, 3]) PIXEL_MAX = 1 return (20 * torch.mean(torch.log10((PIXEL_MAX / torch.sqrt(mse)))))
847,792,582,663,107,200
data range [0, 1]
utils/metrics.py
calculate_psnr
Wang-jiahao/SimDeblur
python
def calculate_psnr(img1, img2): '\n \n ' img1 = img1.clamp(0, 1) img2 = img2.clamp(0, 1) mse = torch.mean(((img1 - img2) ** 2), [1, 2, 3]) PIXEL_MAX = 1 return (20 * torch.mean(torch.log10((PIXEL_MAX / torch.sqrt(mse)))))
def printParaNum(model): '\n function: print the number of total parameters and trainable parameters\n ' total_params = sum((p.numel() for p in model.parameters())) total_trainable_params = sum((p.numel() for p in model.parameters() if p.requires_grad)) print(('Total parameters: %d' % total_params...
2,902,576,970,362,084,000
function: print the number of total parameters and trainable parameters
src/train_amp.py
printParaNum
suiyizhao/Pytorch-speedup
python
def printParaNum(model): '\n \n ' total_params = sum((p.numel() for p in model.parameters())) total_trainable_params = sum((p.numel() for p in model.parameters() if p.requires_grad)) print(('Total parameters: %d' % total_params)) print(('Trainable parameters: %d' % total_trainable_params))
def set_random_seed(seed, deterministic=False): '\n function: Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.b...
-1,521,102,580,318,788,400
function: Set random seed. Args: seed (int): Seed to be used. deterministic (bool): Whether to set the deterministic option for CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` to True and `torch.backends.cudnn.benchmark` to False. Default: False.
src/train_amp.py
set_random_seed
suiyizhao/Pytorch-speedup
python
def set_random_seed(seed, deterministic=False): '\n function: Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.b...
@ops.RegisterGradient('Batch') def _BatchGrad(op, *out_grads): 'Gradient for batch op.' gradients = [] for i in range(len(op.inputs)): gradients.append(gen_batch_ops.unbatch(out_grads[i], op.outputs[(- 2)], op.outputs[(- 1)], timeout_micros=op.get_attr('grad_timeout_micros'), shared_name='batch_grad...
1,933,694,469,260,478,700
Gradient for batch op.
tensorflow/contrib/batching/python/ops/batch_ops.py
_BatchGrad
ekyuho/tensorflow
python
@ops.RegisterGradient('Batch') def _BatchGrad(op, *out_grads): gradients = [] for i in range(len(op.inputs)): gradients.append(gen_batch_ops.unbatch(out_grads[i], op.outputs[(- 2)], op.outputs[(- 1)], timeout_micros=op.get_attr('grad_timeout_micros'), shared_name='batch_gradient_{}_{}'.format(op.na...
def batch_function(num_batch_threads, max_batch_size, batch_timeout_micros, allowed_batch_sizes=None, grad_timeout_micros=((60 * 1000) * 1000), unbatch_timeout_micros=((60 * 1000) * 1000)): 'Batches the computation done by the decorated function.\n\n So, for example, in the following code\n\n ```python\n @batch_...
-9,184,000,206,858,053,000
Batches the computation done by the decorated function. So, for example, in the following code ```python @batch_function(1, 2, 3) def layer(a): return tf.matmul(a, a) b = layer(w) ``` if more than one session.run call is simultaneously trying to compute `b` the values of `w` will be gathered, non-deterministicall...
tensorflow/contrib/batching/python/ops/batch_ops.py
batch_function
ekyuho/tensorflow
python
def batch_function(num_batch_threads, max_batch_size, batch_timeout_micros, allowed_batch_sizes=None, grad_timeout_micros=((60 * 1000) * 1000), unbatch_timeout_micros=((60 * 1000) * 1000)): 'Batches the computation done by the decorated function.\n\n So, for example, in the following code\n\n ```python\n @batch_...
async def execute_wkhtmltopdf(uri: str) -> bytes: 'Run wkhtmltopdf on the command-line and return the output.' cmd = ['wkhtmltopdf', '--log-level', 'none', uri, '-'] return check_output(cmd)
-8,748,739,963,590,872,000
Run wkhtmltopdf on the command-line and return the output.
src/app.py
execute_wkhtmltopdf
mtik00/wkhtmltopdf-service
python
async def execute_wkhtmltopdf(uri: str) -> bytes: cmd = ['wkhtmltopdf', '--log-level', 'none', uri, '-'] return check_output(cmd)
async def convert_body(request: Request): "\n It's just _way_ easier to deal with files rather than STDIN.\n\n Take the body of the request, write it to a temporary file, then use\n wkhtmltopdf to convert it.\n " data = (await request.body()) if (not data): return Response('ERROR: No bod...
7,125,513,509,955,753,000
It's just _way_ easier to deal with files rather than STDIN. Take the body of the request, write it to a temporary file, then use wkhtmltopdf to convert it.
src/app.py
convert_body
mtik00/wkhtmltopdf-service
python
async def convert_body(request: Request): "\n It's just _way_ easier to deal with files rather than STDIN.\n\n Take the body of the request, write it to a temporary file, then use\n wkhtmltopdf to convert it.\n " data = (await request.body()) if (not data): return Response('ERROR: No bod...
def get_mbreplacer_dir(): '\n Get the mbreplacer dir\n :return str: mbreplacer root dir\n ' return os.getcwd()
1,429,617,218,071,645,200
Get the mbreplacer dir :return str: mbreplacer root dir
mbreplacer.py
get_mbreplacer_dir
ackhoury/mbreplacer
python
def get_mbreplacer_dir(): '\n Get the mbreplacer dir\n :return str: mbreplacer root dir\n ' return os.getcwd()
@app.route('/') def home(): 'Step 1: User Authorization.\n\n Redirect the user/resource owner to the OAuth provider (i.e. Github)\n using an URL with a few key OAuth parameters.\n ' return render_template('index.html')
-7,705,316,012,026,364,000
Step 1: User Authorization. Redirect the user/resource owner to the OAuth provider (i.e. Github) using an URL with a few key OAuth parameters.
td/oauth.py
home
Aspire1Inspire2/td-ameritrade-python-api
python
@app.route('/') def home(): 'Step 1: User Authorization.\n\n Redirect the user/resource owner to the OAuth provider (i.e. Github)\n using an URL with a few key OAuth parameters.\n ' return render_template('index.html')
@app.route('/login') def demo(): 'Step 1: User Authorization.\n\n Redirect the user/resource owner to the OAuth provider (i.e. Github)\n using an URL with a few key OAuth parameters.\n ' auth_tuple = app.config['auth_client'].authorization_url() session['oauth_state'] = auth_tuple[1] return red...
-5,519,529,732,331,011,000
Step 1: User Authorization. Redirect the user/resource owner to the OAuth provider (i.e. Github) using an URL with a few key OAuth parameters.
td/oauth.py
demo
Aspire1Inspire2/td-ameritrade-python-api
python
@app.route('/login') def demo(): 'Step 1: User Authorization.\n\n Redirect the user/resource owner to the OAuth provider (i.e. Github)\n using an URL with a few key OAuth parameters.\n ' auth_tuple = app.config['auth_client'].authorization_url() session['oauth_state'] = auth_tuple[1] return red...
@app.route('/login/callback', methods=['GET']) def callback(): ' Step 3: Retrieving an access token.\n\n The user has been redirected back from the provider to your registered\n callback URL. With this redirection comes an authorization code included\n in the redirect URL. We will use that to obtain an acc...
-1,592,033,070,512,526,000
Step 3: Retrieving an access token. The user has been redirected back from the provider to your registered callback URL. With this redirection comes an authorization code included in the redirect URL. We will use that to obtain an access token.
td/oauth.py
callback
Aspire1Inspire2/td-ameritrade-python-api
python
@app.route('/login/callback', methods=['GET']) def callback(): ' Step 3: Retrieving an access token.\n\n The user has been redirected back from the provider to your registered\n callback URL. With this redirection comes an authorization code included\n in the redirect URL. We will use that to obtain an acc...
def _test_repr_or_str(self, fn, expect_id): "Test Queue's repr or str.\n\n fn is repr or str. expect_id is True if we expect the Queue's id to\n appear in fn(Queue()).\n " def gen(): when = (yield) self.assertAlmostEqual(0.1, when) when = (yield 0.1) self.as...
-2,233,485,092,732,088,300
Test Queue's repr or str. fn is repr or str. expect_id is True if we expect the Queue's id to appear in fn(Queue()).
tests/python/test_queues.py
_test_repr_or_str
ProvoK/trio-asyncio
python
def _test_repr_or_str(self, fn, expect_id): "Test Queue's repr or str.\n\n fn is repr or str. expect_id is True if we expect the Queue's id to\n appear in fn(Queue()).\n " def gen(): when = (yield) self.assertAlmostEqual(0.1, when) when = (yield 0.1) self.as...
def _submit_filtering_jobs(self, uuid): '\n Here we create the task and put it on the job queue.\n ' two_weeks_ago = (datetime.date.today() - datetime.timedelta(14)) params = {'from': int(two_weeks_ago.strftime('%s')), 'to': int(time.time()), 'unit': 'seconds'} location_api_resp = requests...
-3,618,480,378,836,912,000
Here we create the task and put it on the job queue.
users-api/routes.py
_submit_filtering_jobs
pwegrzyn/pandemic-monitor
python
def _submit_filtering_jobs(self, uuid): '\n \n ' two_weeks_ago = (datetime.date.today() - datetime.timedelta(14)) params = {'from': int(two_weeks_ago.strftime('%s')), 'to': int(time.time()), 'unit': 'seconds'} location_api_resp = requests.get(f'http://location-api:5000/geohashRegionsForUse...
def __init__(self, path: str): "\n Create an instance of GitRepoVersionInfo\n :param path: The path to search for git information. It searches for '.git' in this folder or any parent\n folder.\n " self._is_repo = False try: self._repo = git.Repo(path, search_parent_direct...
-4,167,830,116,496,115,700
Create an instance of GitRepoVersionInfo :param path: The path to search for git information. It searches for '.git' in this folder or any parent folder.
step_exec_lib/utils/git.py
__init__
giantswarm/step-exec-lib
python
def __init__(self, path: str): "\n Create an instance of GitRepoVersionInfo\n :param path: The path to search for git information. It searches for '.git' in this folder or any parent\n folder.\n " self._is_repo = False try: self._repo = git.Repo(path, search_parent_direct...
@property def is_git_repo(self) -> bool: '\n Checks if the path given in constructor is a sub-path of a valid git repo.\n :return: Boolean true, if repo was found.\n ' return self._is_repo
5,407,171,041,855,514,000
Checks if the path given in constructor is a sub-path of a valid git repo. :return: Boolean true, if repo was found.
step_exec_lib/utils/git.py
is_git_repo
giantswarm/step-exec-lib
python
@property def is_git_repo(self) -> bool: '\n Checks if the path given in constructor is a sub-path of a valid git repo.\n :return: Boolean true, if repo was found.\n ' return self._is_repo
def get_git_version(self, strip_v_in_version: bool=True) -> str: '\n Gets application version in the format [last-tag]-[last-commit-sha].\n :param strip_v_in_version: If the version tag starts with \'v\' (like \'v1.2.3),\n this chooses if the \'v\' should be stripped, so the resulting tag is \'...
-2,077,656,113,087,697,700
Gets application version in the format [last-tag]-[last-commit-sha]. :param strip_v_in_version: If the version tag starts with 'v' (like 'v1.2.3), this chooses if the 'v' should be stripped, so the resulting tag is '1.2.3'. If there's a "-", "." or "_" separator after "v", it is removed as well. :return: The version st...
step_exec_lib/utils/git.py
get_git_version
giantswarm/step-exec-lib
python
def get_git_version(self, strip_v_in_version: bool=True) -> str: '\n Gets application version in the format [last-tag]-[last-commit-sha].\n :param strip_v_in_version: If the version tag starts with \'v\' (like \'v1.2.3),\n this chooses if the \'v\' should be stripped, so the resulting tag is \'...
def contract_by_lifespan(agent_stats, lifespans): 'Pull agents close to their mean according to how short-lived they were. For punishing abundance of premature death\n when rewarding diversity.' weights = sigmoid_lifespan(lifespans) n_agents = lifespans.shape[0] mean_agent = agent_stats.mean(axis=0) ...
8,577,192,660,219,871,000
Pull agents close to their mean according to how short-lived they were. For punishing abundance of premature death when rewarding diversity.
evolution/diversity.py
contract_by_lifespan
narendasan/neural-mmo
python
def contract_by_lifespan(agent_stats, lifespans): 'Pull agents close to their mean according to how short-lived they were. For punishing abundance of premature death\n when rewarding diversity.' weights = sigmoid_lifespan(lifespans) n_agents = lifespans.shape[0] mean_agent = agent_stats.mean(axis=0) ...
def expand_by_lifespan(agent_stats, lifespans): 'Push agents further from their mean according to how short-lived they were. For punishing abundance of premature\n death when rewarding homogeneity.' weights = sigmoid_lifespan(lifespans) n_agents = lifespans.shape[0] mean_agent = agent_stats.mean(axis=...
556,244,921,716,009,500
Push agents further from their mean according to how short-lived they were. For punishing abundance of premature death when rewarding homogeneity.
evolution/diversity.py
expand_by_lifespan
narendasan/neural-mmo
python
def expand_by_lifespan(agent_stats, lifespans): 'Push agents further from their mean according to how short-lived they were. For punishing abundance of premature\n death when rewarding homogeneity.' weights = sigmoid_lifespan(lifespans) n_agents = lifespans.shape[0] mean_agent = agent_stats.mean(axis=...
def sum_experience(agent_stats, skill_headers=None, verbose=False, pop=None): 'Simply take the sum of XP over skills and agents.' agent_skills = get_pop_stats(agent_stats['skills'], pop) lifespans = get_pop_stats(agent_stats['lifespans'], pop) a_skills = np.vstack(agent_skills) a_lifespans = np.hsta...
1,694,325,565,475,479,300
Simply take the sum of XP over skills and agents.
evolution/diversity.py
sum_experience
narendasan/neural-mmo
python
def sum_experience(agent_stats, skill_headers=None, verbose=False, pop=None): agent_skills = get_pop_stats(agent_stats['skills'], pop) lifespans = get_pop_stats(agent_stats['lifespans'], pop) a_skills = np.vstack(agent_skills) a_lifespans = np.hstack(lifespans) (n_agents, n_skills) = a_skills.s...
def calc_convex_hull(agent_stats, skill_headers=None, verbose=False, infos={}, pop=None, punish_youth=True): 'Calculate the diversity of a population of agents in skill-space by computing the volume inside the convex hull of\n the agents when treated as points in this space.' agent_skills = get_pop_stats(agen...
1,588,951,508,444,711,700
Calculate the diversity of a population of agents in skill-space by computing the volume inside the convex hull of the agents when treated as points in this space.
evolution/diversity.py
calc_convex_hull
narendasan/neural-mmo
python
def calc_convex_hull(agent_stats, skill_headers=None, verbose=False, infos={}, pop=None, punish_youth=True): 'Calculate the diversity of a population of agents in skill-space by computing the volume inside the convex hull of\n the agents when treated as points in this space.' agent_skills = get_pop_stats(agen...
def calc_homogeneity_l2(agent_stats, skill_headers=None, verbose=False, pop=None, punish_youth=True): 'Use L2 distance to punish agents for having high mean pairwise distance. Optimal state is all agents at the same\n point in skill-space, with maximal lifespans.' if ('skills' not in agent_stats): rai...
366,864,616,967,479,600
Use L2 distance to punish agents for having high mean pairwise distance. Optimal state is all agents at the same point in skill-space, with maximal lifespans.
evolution/diversity.py
calc_homogeneity_l2
narendasan/neural-mmo
python
def calc_homogeneity_l2(agent_stats, skill_headers=None, verbose=False, pop=None, punish_youth=True): 'Use L2 distance to punish agents for having high mean pairwise distance. Optimal state is all agents at the same\n point in skill-space, with maximal lifespans.' if ('skills' not in agent_stats): rai...
def test(env, actor_model, is_discrete): '\n\t\tTests the model.\n\t\tParameters:\n\t\t\tenv - the environment to test the policy on\n\t\t\tactor_model - the actor model to load in\n\t\tReturn:\n\t\t\tNone\n\t' print(f'Testing {actor_model}', flush=True) if (actor_model == ''): print(f"Didn't specif...
4,032,199,025,681,221,600
Tests the model. Parameters: env - the environment to test the policy on actor_model - the actor model to load in Return: None
ppoPolicyTraining.py
test
britig/S2RL-Policies
python
def test(env, actor_model, is_discrete): '\n\t\tTests the model.\n\t\tParameters:\n\t\t\tenv - the environment to test the policy on\n\t\t\tactor_model - the actor model to load in\n\t\tReturn:\n\t\t\tNone\n\t' print(f'Testing {actor_model}', flush=True) if (actor_model == ): print(f"Didn't specify ...
def __init__(self, env, **hyperparameters): '\n\t\t\tInitializes the PPO model, including hyperparameters.\n\n\t\t\tParameters:\n\t\t\t\tpolicy_class - the policy class to use for our actor/critic networks.\n\t\t\t\tenv - the environment to train on.\n\t\t\t\thyperparameters - all extra arguments passed into PPO th...
1,361,639,296,199,345,000
Initializes the PPO model, including hyperparameters. Parameters: policy_class - the policy class to use for our actor/critic networks. env - the environment to train on. hyperparameters - all extra arguments passed into PPO that should be hyperparameters. Returns: None
ppoPolicyTraining.py
__init__
britig/S2RL-Policies
python
def __init__(self, env, **hyperparameters): '\n\t\t\tInitializes the PPO model, including hyperparameters.\n\n\t\t\tParameters:\n\t\t\t\tpolicy_class - the policy class to use for our actor/critic networks.\n\t\t\t\tenv - the environment to train on.\n\t\t\t\thyperparameters - all extra arguments passed into PPO th...
def learn(self, env_name, failure_observations, subpolicy): '\n\t\t\tTrain the actor and critic networks. Here is where the main PPO algorithm resides.\n\n\t\t\tParameters:\n\t\t\t\ttotal_timesteps - the total number of timesteps to train for\n\n\t\t\tReturn:\n\t\t\t\tNone\n\t\t' print(f'Learning... Running {se...
270,654,134,278,599,580
Train the actor and critic networks. Here is where the main PPO algorithm resides. Parameters: total_timesteps - the total number of timesteps to train for Return: None
ppoPolicyTraining.py
learn
britig/S2RL-Policies
python
def learn(self, env_name, failure_observations, subpolicy): '\n\t\t\tTrain the actor and critic networks. Here is where the main PPO algorithm resides.\n\n\t\t\tParameters:\n\t\t\t\ttotal_timesteps - the total number of timesteps to train for\n\n\t\t\tReturn:\n\t\t\t\tNone\n\t\t' print(f'Learning... Running {se...
def rollout(self, subpolicy, failure_observations): "\n\t\t\tThis is where we collect the batch of data\n\t\t\tfrom simulation. Since this is an on-policy algorithm, we'll need to collect a fresh batch\n\t\t\tof data each time we iterate the actor/critic networks.\n\n\t\t\tParameters:\n\t\t\t\tNone\n\n\t\t\tReturn:...
1,873,087,376,621,526,000
This is where we collect the batch of data from simulation. Since this is an on-policy algorithm, we'll need to collect a fresh batch of data each time we iterate the actor/critic networks. Parameters: None Return: batch_obs - the observations collected this batch. Shape: (number of timesteps, dimensi...
ppoPolicyTraining.py
rollout
britig/S2RL-Policies
python
def rollout(self, subpolicy, failure_observations): "\n\t\t\tThis is where we collect the batch of data\n\t\t\tfrom simulation. Since this is an on-policy algorithm, we'll need to collect a fresh batch\n\t\t\tof data each time we iterate the actor/critic networks.\n\n\t\t\tParameters:\n\t\t\t\tNone\n\n\t\t\tReturn:...
def compute_rtgs(self, batch_rews): '\n\t\t\tCompute the Reward-To-Go of each timestep in a batch given the rewards.\n\n\t\t\tParameters:\n\t\t\t\tbatch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)\n\n\t\t\tReturn:\n\t\t\t\tbatch_rtgs - the rewards to go, Shape: (numbe...
4,242,929,496,582,007,000
Compute the Reward-To-Go of each timestep in a batch given the rewards. Parameters: batch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode) Return: batch_rtgs - the rewards to go, Shape: (number of timesteps in batch)
ppoPolicyTraining.py
compute_rtgs
britig/S2RL-Policies
python
def compute_rtgs(self, batch_rews): '\n\t\t\tCompute the Reward-To-Go of each timestep in a batch given the rewards.\n\n\t\t\tParameters:\n\t\t\t\tbatch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)\n\n\t\t\tReturn:\n\t\t\t\tbatch_rtgs - the rewards to go, Shape: (numbe...
def get_action(self, obs): '\n\t\t\tQueries an action from the actor network, should be called from rollout.\n\n\t\t\tParameters:\n\t\t\t\tobs - the observation at the current timestep\n\n\t\t\tReturn:\n\t\t\t\taction - the action to take, as a numpy array\n\t\t\t\tlog_prob - the log probability of the selected act...
7,726,324,014,643,275,000
Queries an action from the actor network, should be called from rollout. Parameters: obs - the observation at the current timestep Return: action - the action to take, as a numpy array log_prob - the log probability of the selected action in the distribution
ppoPolicyTraining.py
get_action
britig/S2RL-Policies
python
def get_action(self, obs): '\n\t\t\tQueries an action from the actor network, should be called from rollout.\n\n\t\t\tParameters:\n\t\t\t\tobs - the observation at the current timestep\n\n\t\t\tReturn:\n\t\t\t\taction - the action to take, as a numpy array\n\t\t\t\tlog_prob - the log probability of the selected act...
def evaluate(self, batch_obs, batch_acts): '\n\t\t\tEstimate the values of each observation, and the log probs of\n\t\t\teach action in the most recent batch with the most recent\n\t\t\titeration of the actor network. Should be called from learn.\n\n\t\t\tParameters:\n\t\t\t\tbatch_obs - the observations from the m...
-3,305,831,494,162,423,000
Estimate the values of each observation, and the log probs of each action in the most recent batch with the most recent iteration of the actor network. Should be called from learn. Parameters: batch_obs - the observations from the most recently collected batch as a tensor. Shape...
ppoPolicyTraining.py
evaluate
britig/S2RL-Policies
python
def evaluate(self, batch_obs, batch_acts): '\n\t\t\tEstimate the values of each observation, and the log probs of\n\t\t\teach action in the most recent batch with the most recent\n\t\t\titeration of the actor network. Should be called from learn.\n\n\t\t\tParameters:\n\t\t\t\tbatch_obs - the observations from the m...
def _init_hyperparameters(self, hyperparameters): '\n\t\t\tInitialize default and custom values for hyperparameters\n\n\t\t\tParameters:\n\t\t\t\thyperparameters - the extra arguments included when creating the PPO model, should only include\n\t\t\t\t\t\t\t\t\thyperparameters defined below with custom values.\n\n\t...
319,362,538,887,235,700
Initialize default and custom values for hyperparameters Parameters: hyperparameters - the extra arguments included when creating the PPO model, should only include hyperparameters defined below with custom values. Return: None
ppoPolicyTraining.py
_init_hyperparameters
britig/S2RL-Policies
python
def _init_hyperparameters(self, hyperparameters): '\n\t\t\tInitialize default and custom values for hyperparameters\n\n\t\t\tParameters:\n\t\t\t\thyperparameters - the extra arguments included when creating the PPO model, should only include\n\t\t\t\t\t\t\t\t\thyperparameters defined below with custom values.\n\n\t...
def _log_summary(self): "\n\t\t\tPrint to stdout what we've logged so far in the most recent batch.\n\n\t\t\tParameters:\n\t\t\t\tNone\n\n\t\t\tReturn:\n\t\t\t\tNone\n\t\t" t_so_far = self.logger['t_so_far'] i_so_far = self.logger['i_so_far'] avg_ep_lens = np.mean(self.logger['batch_lens']) avg_ep_r...
5,219,838,179,941,541,000
Print to stdout what we've logged so far in the most recent batch. Parameters: None Return: None
ppoPolicyTraining.py
_log_summary
britig/S2RL-Policies
python
def _log_summary(self): "\n\t\t\tPrint to stdout what we've logged so far in the most recent batch.\n\n\t\t\tParameters:\n\t\t\t\tNone\n\n\t\t\tReturn:\n\t\t\t\tNone\n\t\t" t_so_far = self.logger['t_so_far'] i_so_far = self.logger['i_so_far'] avg_ep_lens = np.mean(self.logger['batch_lens']) avg_ep_r...
@_create_dataset_directory(dataset_name=DATASET_NAME) @_wrap_split_argument(('train', 'test')) def AmazonReviewPolarity(root: str, split: Union[(Tuple[str], str)]): "AmazonReviewPolarity Dataset\n\n For additional details refer to https://arxiv.org/abs/1509.01626\n\n Number of lines per split:\n - trai...
1,040,654,380,379,947,600
AmazonReviewPolarity Dataset For additional details refer to https://arxiv.org/abs/1509.01626 Number of lines per split: - train: 3600000 - test: 400000 Args: root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache') split: split or splits to be returned. Can be ...
torchtext/datasets/amazonreviewpolarity.py
AmazonReviewPolarity
abhinavarora/text
python
@_create_dataset_directory(dataset_name=DATASET_NAME) @_wrap_split_argument(('train', 'test')) def AmazonReviewPolarity(root: str, split: Union[(Tuple[str], str)]): "AmazonReviewPolarity Dataset\n\n For additional details refer to https://arxiv.org/abs/1509.01626\n\n Number of lines per split:\n - trai...
def get_relative_errors(test_data_id): '\n Compute and save the relative errors of every point found on every network in a testing set.\n Relative error is defined in (Katz and Reggia 2017).\n test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).\n ' (network_siz...
-6,441,740,488,261,575,000
Compute and save the relative errors of every point found on every network in a testing set. Relative error is defined in (Katz and Reggia 2017). test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
roundoff.py
get_relative_errors
garrettkatz/rnn-fxpts
python
def get_relative_errors(test_data_id): '\n Compute and save the relative errors of every point found on every network in a testing set.\n Relative error is defined in (Katz and Reggia 2017).\n test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).\n ' (network_siz...
def show_traverse_re_fig(test_data_ids, Ns, samp_range): '\n Plot relative errors from points found by fiber traversal.\n test_data_ids and Ns should be length-2 lists.\n Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].\n Similarly the second column draws from ...
-1,005,911,344,185,639,000
Plot relative errors from points found by fiber traversal. test_data_ids and Ns should be length-2 lists. Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0]. Similarly the second column draws from Ns[1], test_data_ids[1]. Each network sample within samp_range is shown on a separa...
roundoff.py
show_traverse_re_fig
garrettkatz/rnn-fxpts
python
def show_traverse_re_fig(test_data_ids, Ns, samp_range): '\n Plot relative errors from points found by fiber traversal.\n test_data_ids and Ns should be length-2 lists.\n Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].\n Similarly the second column draws from ...
def baseline_re_single_analysis(test_data_id, N, samp, cap=10): '\n Analyze edge cases of relative errors on a single network\n Uses the samp^{th} sample network of size N in test data test_data_id.\n Relative errors in the range (0, 2^{cap}) are considered edge cases.\n Returns the number of edge cases...
-598,339,714,970,083,200
Analyze edge cases of relative errors on a single network Uses the samp^{th} sample network of size N in test data test_data_id. Relative errors in the range (0, 2^{cap}) are considered edge cases. Returns the number of edge cases divided by the difference |T-B| - |B-T| as a percent. T and B are as defined in (Katz and...
roundoff.py
baseline_re_single_analysis
garrettkatz/rnn-fxpts
python
def baseline_re_single_analysis(test_data_id, N, samp, cap=10): '\n Analyze edge cases of relative errors on a single network\n Uses the samp^{th} sample network of size N in test data test_data_id.\n Relative errors in the range (0, 2^{cap}) are considered edge cases.\n Returns the number of edge cases...
def baseline_re_batch_analysis(test_data_id, Ns, cap=10): '\n Runs baseline_re_single_analysis on all networks in test_data_id of size N.\n cap is as in baseline_re_single_analysis.\n returns numpy.array percents, where\n percents[i] is as in baseline_re_single_analysis for the i^{th} sample network.\...
-2,609,222,182,299,046,000
Runs baseline_re_single_analysis on all networks in test_data_id of size N. cap is as in baseline_re_single_analysis. returns numpy.array percents, where percents[i] is as in baseline_re_single_analysis for the i^{th} sample network.
roundoff.py
baseline_re_batch_analysis
garrettkatz/rnn-fxpts
python
def baseline_re_batch_analysis(test_data_id, Ns, cap=10): '\n Runs baseline_re_single_analysis on all networks in test_data_id of size N.\n cap is as in baseline_re_single_analysis.\n returns numpy.array percents, where\n percents[i] is as in baseline_re_single_analysis for the i^{th} sample network.\...
def show_baseline_re_fig(test_data_ids, Ns, samp_range): '\n Plot relative errors from points found by the baseline solver.\n test_data_ids and Ns should be length-2 lists.\n Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].\n Similarly the second column draws f...
4,852,017,577,796,613,000
Plot relative errors from points found by the baseline solver. test_data_ids and Ns should be length-2 lists. Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0]. Similarly the second column draws from Ns[1], test_data_ids[1]. Each network sample within samp_range is shown on a se...
roundoff.py
show_baseline_re_fig
garrettkatz/rnn-fxpts
python
def show_baseline_re_fig(test_data_ids, Ns, samp_range): '\n Plot relative errors from points found by the baseline solver.\n test_data_ids and Ns should be length-2 lists.\n Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].\n Similarly the second column draws f...
def get_baseline_rd(test_data_id, N, samp, cap, logfilename=os.devnull): '\n Compute and save relative distances between pairs of points found by the baseline solver.\n Relative distance is defined in (Katz and Reggia 2017).\n Computes for the samp^{th} sample network of size N in test_data_id.\n test_d...
2,217,433,724,058,219,500
Compute and save relative distances between pairs of points found by the baseline solver. Relative distance is defined in (Katz and Reggia 2017). Computes for the samp^{th} sample network of size N in test_data_id. test_data_id should be as in fxpt_experiments.generate_test_data (without file extension). Only pairs wit...
roundoff.py
get_baseline_rd
garrettkatz/rnn-fxpts
python
def get_baseline_rd(test_data_id, N, samp, cap, logfilename=os.devnull): '\n Compute and save relative distances between pairs of points found by the baseline solver.\n Relative distance is defined in (Katz and Reggia 2017).\n Computes for the samp^{th} sample network of size N in test_data_id.\n test_d...
def pool_get_baseline_rd(args): '\n Wrapper function passed to multiprocessing.Pool\n ' get_baseline_rd(*args)
-1,554,045,137,290,081,300
Wrapper function passed to multiprocessing.Pool
roundoff.py
pool_get_baseline_rd
garrettkatz/rnn-fxpts
python
def pool_get_baseline_rd(args): '\n \n ' get_baseline_rd(*args)
def run_baseline_rd(test_data_id, Ns, num_procs): '\n Run get_baseline_rd on all networks in test_data_id whose size is in the list Ns.\n Multiprocessing is used to run on multiple networks in parallel.\n num_procs is the number of processors to use.\n ' cpu_count = mp.cpu_count() print(('%d cpu...
8,696,587,791,661,715,000
Run get_baseline_rd on all networks in test_data_id whose size is in the list Ns. Multiprocessing is used to run on multiple networks in parallel. num_procs is the number of processors to use.
roundoff.py
run_baseline_rd
garrettkatz/rnn-fxpts
python
def run_baseline_rd(test_data_id, Ns, num_procs): '\n Run get_baseline_rd on all networks in test_data_id whose size is in the list Ns.\n Multiprocessing is used to run on multiple networks in parallel.\n num_procs is the number of processors to use.\n ' cpu_count = mp.cpu_count() print(('%d cpu...
def get_traverse_rd(test_data_id, N, samp, cap, logfilename=os.devnull): '\n Compute and save relative distances between pairs of points found by the baseline solver.\n Relative distance is defined in (Katz and Reggia 2017).\n Computes for the samp^{th} sample network of size N in test_data_id.\n test_d...
-2,238,623,223,670,777,900
Compute and save relative distances between pairs of points found by the baseline solver. Relative distance is defined in (Katz and Reggia 2017). Computes for the samp^{th} sample network of size N in test_data_id. test_data_id should be as in fxpt_experiments.generate_test_data (without file extension). Only pairs wit...
roundoff.py
get_traverse_rd
garrettkatz/rnn-fxpts
python
def get_traverse_rd(test_data_id, N, samp, cap, logfilename=os.devnull): '\n Compute and save relative distances between pairs of points found by the baseline solver.\n Relative distance is defined in (Katz and Reggia 2017).\n Computes for the samp^{th} sample network of size N in test_data_id.\n test_d...
def pool_get_traverse_rd(args): '\n Wrapper function passed to multiprocessing.Pool\n ' get_traverse_rd(*args)
-951,652,383,376,324,400
Wrapper function passed to multiprocessing.Pool
roundoff.py
pool_get_traverse_rd
garrettkatz/rnn-fxpts
python
def pool_get_traverse_rd(args): '\n \n ' get_traverse_rd(*args)
def run_traverse_rd(test_data_id, Ns, num_procs): '\n Run get_traverse_rd on all networks in test_data_id whose size is in the list Ns.\n Multiprocessing is used to run on multiple networks in parallel.\n num_procs is the number of processors to use.\n ' cpu_count = mp.cpu_count() print(('%d cpu...
-4,067,689,719,634,688,500
Run get_traverse_rd on all networks in test_data_id whose size is in the list Ns. Multiprocessing is used to run on multiple networks in parallel. num_procs is the number of processors to use.
roundoff.py
run_traverse_rd
garrettkatz/rnn-fxpts
python
def run_traverse_rd(test_data_id, Ns, num_procs): '\n Run get_traverse_rd on all networks in test_data_id whose size is in the list Ns.\n Multiprocessing is used to run on multiple networks in parallel.\n num_procs is the number of processors to use.\n ' cpu_count = mp.cpu_count() print(('%d cpu...
def get_simple_rd(test_data_id, N, samp, cap, logfilename=os.devnull): '\n Use simple unique test: if max absolute coordinate-wise difference < 2**-32\n Compute and save distances between pairs of points found by both solvers.\n Computes for the samp^{th} sample network of size N in test_data_id.\n test...
-3,566,493,107,082,433,000
Use simple unique test: if max absolute coordinate-wise difference < 2**-32 Compute and save distances between pairs of points found by both solvers. Computes for the samp^{th} sample network of size N in test_data_id. test_data_id should be as in fxpt_experiments.generate_test_data (without file extension). Only pairs...
roundoff.py
get_simple_rd
garrettkatz/rnn-fxpts
python
def get_simple_rd(test_data_id, N, samp, cap, logfilename=os.devnull): '\n Use simple unique test: if max absolute coordinate-wise difference < 2**-32\n Compute and save distances between pairs of points found by both solvers.\n Computes for the samp^{th} sample network of size N in test_data_id.\n test...
def pool_get_simple_rd(args): '\n Wrapper function passed to multiprocessing.Pool\n ' get_simple_rd(*args)
-4,100,977,010,512,307,000
Wrapper function passed to multiprocessing.Pool
roundoff.py
pool_get_simple_rd
garrettkatz/rnn-fxpts
python
def pool_get_simple_rd(args): '\n \n ' get_simple_rd(*args)
def run_simple_rd(test_data_id, Ns, num_procs): '\n Run get_simple_rd on all networks in test_data_id whose size is in the list Ns.\n Multiprocessing is used to run on multiple networks in parallel.\n num_procs is the number of processors to use.\n ' cpu_count = mp.cpu_count() print(('%d cpus, u...
7,719,977,757,564,353,000
Run get_simple_rd on all networks in test_data_id whose size is in the list Ns. Multiprocessing is used to run on multiple networks in parallel. num_procs is the number of processors to use.
roundoff.py
run_simple_rd
garrettkatz/rnn-fxpts
python
def run_simple_rd(test_data_id, Ns, num_procs): '\n Run get_simple_rd on all networks in test_data_id whose size is in the list Ns.\n Multiprocessing is used to run on multiple networks in parallel.\n num_procs is the number of processors to use.\n ' cpu_count = mp.cpu_count() print(('%d cpus, u...
def show_traverse_rd_fig(test_data_ids, Ns, samp_range): '\n Plot relative distances from points found by fiber traversal.\n test_ids, Ns, and samp_range should be as in show_traverse_re_fig.\n ' log = True mpl.rcParams['mathtext.default'] = 'regular' sp = 1 for samp in samp_range: ...
783,642,926,598,277,500
Plot relative distances from points found by fiber traversal. test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
roundoff.py
show_traverse_rd_fig
garrettkatz/rnn-fxpts
python
def show_traverse_rd_fig(test_data_ids, Ns, samp_range): '\n Plot relative distances from points found by fiber traversal.\n test_ids, Ns, and samp_range should be as in show_traverse_re_fig.\n ' log = True mpl.rcParams['mathtext.default'] = 'regular' sp = 1 for samp in samp_range: ...
def show_baseline_rd_fig(test_data_ids, Ns, samp_range): '\n Plot relative distances from points found by the baseline solver.\n test_ids, Ns, and samp_range should be as in show_baseline_re_fig.\n ' log = True mpl.rcParams['mathtext.default'] = 'regular' sp = 1 for samp in samp_range: ...
8,544,105,541,260,878,000
Plot relative distances from points found by the baseline solver. test_ids, Ns, and samp_range should be as in show_baseline_re_fig.
roundoff.py
show_baseline_rd_fig
garrettkatz/rnn-fxpts
python
def show_baseline_rd_fig(test_data_ids, Ns, samp_range): '\n Plot relative distances from points found by the baseline solver.\n test_ids, Ns, and samp_range should be as in show_baseline_re_fig.\n ' log = True mpl.rcParams['mathtext.default'] = 'regular' sp = 1 for samp in samp_range: ...
def show_simple_rd_all_fig(test_data_ids, Ns, samp_range): '\n Plot relative distances from points found by fiber traversal or baseline.\n test_ids, Ns, and samp_range should be as in show_traverse_re_fig.\n ' log = True mpl.rcParams['mathtext.default'] = 'regular' mpl.rcParams['pdf.fonttype'] ...
-3,905,793,942,477,665,300
Plot relative distances from points found by fiber traversal or baseline. test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
roundoff.py
show_simple_rd_all_fig
garrettkatz/rnn-fxpts
python
def show_simple_rd_all_fig(test_data_ids, Ns, samp_range): '\n Plot relative distances from points found by fiber traversal or baseline.\n test_ids, Ns, and samp_range should be as in show_traverse_re_fig.\n ' log = True mpl.rcParams['mathtext.default'] = 'regular' mpl.rcParams['pdf.fonttype'] ...
def take_damage(self, dmg, source): ' after taking damage, if the priestess is not dead, it heals itself' hp_before_attack = self.hp super().take_damage(dmg, source) if (self._is_alive and (hp_before_attack > self.hp) and (source != 'pit')): heal_message = self.heal_itself() self.model.a...
1,302,554,236,194,353,700
after taking damage, if the priestess is not dead, it heals itself
priestess.py
take_damage
nvanbaak/dungeon-adventure-2
python
def take_damage(self, dmg, source): ' ' hp_before_attack = self.hp super().take_damage(dmg, source) if (self._is_alive and (hp_before_attack > self.hp) and (source != 'pit')): heal_message = self.heal_itself() self.model.announce(f'{self.name}: {heal_message}')
def resolve_workout(self, info, **kwargs): 'query resolver for workout property' all_exercises = Exercise.objects.all() if kwargs.get('body_part'): all_exercises = all_exercises.select_related('body_part').filter(body_part__name=kwargs.get('body_part').lower()) if kwargs.get('level'): al...
-8,813,676,179,882,051,000
query resolver for workout property
quarantineworkout/workout/schema.py
resolve_workout
adeoke/django-quarantine-workout-graphql
python
def resolve_workout(self, info, **kwargs): all_exercises = Exercise.objects.all() if kwargs.get('body_part'): all_exercises = all_exercises.select_related('body_part').filter(body_part__name=kwargs.get('body_part').lower()) if kwargs.get('level'): all_exercises = all_exercises.select_re...
def __init__(self, zip_code, house_number, house_addition=''): '\n To fetch the garbage calendar, you need to set a zip_code and house_number.\n ' self.zip_code = zip_code.replace(' ', '') self.house_number = house_number.strip() self.house_addition = house_addition.strip()
3,134,568,172,365,344,000
To fetch the garbage calendar, you need to set a zip_code and house_number.
rova/rova.py
__init__
synoniem/rova
python
def __init__(self, zip_code, house_number, house_addition=): '\n \n ' self.zip_code = zip_code.replace(' ', ) self.house_number = house_number.strip() self.house_addition = house_addition.strip()
def is_rova_area(self): '\n Check if ROVA collects garbage at this address\n ' url = 'https://www.rova.nl/api/waste-calendar/upcoming' response = requests.get(url, params={'postalcode': self.zip_code, 'houseNumber': self.house_number, 'addition': self.house_addition, 'take': '1'}) response...
-2,616,346,550,750,675,500
Check if ROVA collects garbage at this address
rova/rova.py
is_rova_area
synoniem/rova
python
def is_rova_area(self): '\n \n ' url = 'https://www.rova.nl/api/waste-calendar/upcoming' response = requests.get(url, params={'postalcode': self.zip_code, 'houseNumber': self.house_number, 'addition': self.house_addition, 'take': '1'}) response.raise_for_status() rova_response = respon...
def get_calendar_items(self, take=5): '\n Get next pickup date for each garbage types\n ' url = 'https://www.rova.nl/api/waste-calendar/upcoming' response = requests.get(url, params={'postalcode': self.zip_code, 'houseNumber': self.house_number, 'addition': self.house_addition, 'take': take}) ...
-7,547,873,869,175,699,000
Get next pickup date for each garbage types
rova/rova.py
get_calendar_items
synoniem/rova
python
def get_calendar_items(self, take=5): '\n \n ' url = 'https://www.rova.nl/api/waste-calendar/upcoming' response = requests.get(url, params={'postalcode': self.zip_code, 'houseNumber': self.house_number, 'addition': self.house_addition, 'take': take}) response.raise_for_status() rova_re...
def __init__(self, state_size, action_size, seed): 'Initialize an Agent object.\n \n Params\n ======\n state_size (int): dimension of each state\n action_size (int): dimension of each action\n seed (int): random seed\n ' self.state_size = state_size ...
2,056,519,366,746,090,000
Initialize an Agent object. Params ====== state_size (int): dimension of each state action_size (int): dimension of each action seed (int): random seed
dqn/exercise/dqn_agent.py
__init__
0xtristan/deep-reinforcement-learning
python
def __init__(self, state_size, action_size, seed): 'Initialize an Agent object.\n \n Params\n ======\n state_size (int): dimension of each state\n action_size (int): dimension of each action\n seed (int): random seed\n ' self.state_size = state_size ...
def act(self, state, eps=0.0): 'Returns actions for given state as per current policy.\n \n Params\n ======\n state (array_like): current state\n eps (float): epsilon, for epsilon-greedy action selection\n ' state = torch.from_numpy(state).float().unsqueeze(0).t...
3,284,820,839,670,036,500
Returns actions for given state as per current policy. Params ====== state (array_like): current state eps (float): epsilon, for epsilon-greedy action selection
dqn/exercise/dqn_agent.py
act
0xtristan/deep-reinforcement-learning
python
def act(self, state, eps=0.0): 'Returns actions for given state as per current policy.\n \n Params\n ======\n state (array_like): current state\n eps (float): epsilon, for epsilon-greedy action selection\n ' state = torch.from_numpy(state).float().unsqueeze(0).t...
def learn(self, experiences, gamma): "Update value parameters using given batch of experience tuples.\n\n Params\n ======\n experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples \n gamma (float): discount factor\n " (states, actions, rewards, next_stat...
8,166,505,585,780,385,000
Update value parameters using given batch of experience tuples. Params ====== experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples gamma (float): discount factor
dqn/exercise/dqn_agent.py
learn
0xtristan/deep-reinforcement-learning
python
def learn(self, experiences, gamma): "Update value parameters using given batch of experience tuples.\n\n Params\n ======\n experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples \n gamma (float): discount factor\n " (states, actions, rewards, next_stat...
def soft_update(self, local_model, target_model, tau): 'Soft update model parameters.\n θ_target = τ*θ_local + (1 - τ)*θ_target\n\n Params\n ======\n local_model (PyTorch model): weights will be copied from\n target_model (PyTorch model): weights will be copied to\n ...
3,655,770,241,422,866,000
Soft update model parameters. θ_target = τ*θ_local + (1 - τ)*θ_target Params ====== local_model (PyTorch model): weights will be copied from target_model (PyTorch model): weights will be copied to tau (float): interpolation parameter
dqn/exercise/dqn_agent.py
soft_update
0xtristan/deep-reinforcement-learning
python
def soft_update(self, local_model, target_model, tau): 'Soft update model parameters.\n θ_target = τ*θ_local + (1 - τ)*θ_target\n\n Params\n ======\n local_model (PyTorch model): weights will be copied from\n target_model (PyTorch model): weights will be copied to\n ...
def __init__(self, action_size, buffer_size, batch_size, seed): 'Initialize a ReplayBuffer object.\n\n Params\n ======\n action_size (int): dimension of each action\n buffer_size (int): maximum size of buffer\n batch_size (int): size of each training batch\n ...
-1,162,416,917,650,856,000
Initialize a ReplayBuffer object. Params ====== action_size (int): dimension of each action buffer_size (int): maximum size of buffer batch_size (int): size of each training batch seed (int): random seed
dqn/exercise/dqn_agent.py
__init__
0xtristan/deep-reinforcement-learning
python
def __init__(self, action_size, buffer_size, batch_size, seed): 'Initialize a ReplayBuffer object.\n\n Params\n ======\n action_size (int): dimension of each action\n buffer_size (int): maximum size of buffer\n batch_size (int): size of each training batch\n ...
def add(self, state, action, reward, next_state, done): 'Add a new experience to memory.' e = self.experience(state, action, reward, next_state, done) self.memory.append(e)
-8,881,662,531,665,694,000
Add a new experience to memory.
dqn/exercise/dqn_agent.py
add
0xtristan/deep-reinforcement-learning
python
def add(self, state, action, reward, next_state, done): e = self.experience(state, action, reward, next_state, done) self.memory.append(e)
def sample(self): 'Randomly sample a batch of experiences from memory.' experiences = random.sample(self.memory, k=self.batch_size) states = torch.from_numpy(np.vstack([e.state for e in experiences if (e is not None)])).float().to(device) actions = torch.from_numpy(np.vstack([e.action for e in experienc...
7,523,822,767,090,451,000
Randomly sample a batch of experiences from memory.
dqn/exercise/dqn_agent.py
sample
0xtristan/deep-reinforcement-learning
python
def sample(self): experiences = random.sample(self.memory, k=self.batch_size) states = torch.from_numpy(np.vstack([e.state for e in experiences if (e is not None)])).float().to(device) actions = torch.from_numpy(np.vstack([e.action for e in experiences if (e is not None)])).long().to(device) reward...
def __len__(self): 'Return the current size of internal memory.' return len(self.memory)
-960,517,394,760,847,000
Return the current size of internal memory.
dqn/exercise/dqn_agent.py
__len__
0xtristan/deep-reinforcement-learning
python
def __len__(self): return len(self.memory)
@classmethod def _connect(cls): 'Connects to vertica.\n \n :return: a connection to vertica.\n ' return connect(**cls._conn_info)
783,110,972,030,852,000
Connects to vertica. :return: a connection to vertica.
vertica_python/tests/base.py
_connect
etsy/vertica-python
python
@classmethod def _connect(cls): 'Connects to vertica.\n \n :return: a connection to vertica.\n ' return connect(**cls._conn_info)
def _query_and_fetchall(self, query): 'Creates a new connection, executes a query and fetches all the results.\n \n :param query: query to execute\n :return: all fetched results as returned by cursor.fetchall()\n ' with self._connect() as conn: cur = conn.cursor() cur...
6,632,299,217,410,403,000
Creates a new connection, executes a query and fetches all the results. :param query: query to execute :return: all fetched results as returned by cursor.fetchall()
vertica_python/tests/base.py
_query_and_fetchall
etsy/vertica-python
python
def _query_and_fetchall(self, query): 'Creates a new connection, executes a query and fetches all the results.\n \n :param query: query to execute\n :return: all fetched results as returned by cursor.fetchall()\n ' with self._connect() as conn: cur = conn.cursor() cur...
def _query_and_fetchone(self, query): 'Creates a new connection, executes a query and fetches one result.\n \n :param query: query to execute\n :return: the first result fetched by cursor.fetchone()\n ' with self._connect() as conn: cur = conn.cursor() cur.execute(que...
-3,428,955,883,212,195,000
Creates a new connection, executes a query and fetches one result. :param query: query to execute :return: the first result fetched by cursor.fetchone()
vertica_python/tests/base.py
_query_and_fetchone
etsy/vertica-python
python
def _query_and_fetchone(self, query): 'Creates a new connection, executes a query and fetches one result.\n \n :param query: query to execute\n :return: the first result fetched by cursor.fetchone()\n ' with self._connect() as conn: cur = conn.cursor() cur.execute(que...
def get_interaction_table(self, user_id, item_id, y): 'Get interaction_table that is used for fetching user-item interaction label in LS regularization.\n\n Args:\n user_id(torch.Tensor): the user id in user-item interactions, shape: [n_interactions, 1]\n item_id(torch.Tensor): the item...
-8,299,757,703,930,673,000
Get interaction_table that is used for fetching user-item interaction label in LS regularization. Args: user_id(torch.Tensor): the user id in user-item interactions, shape: [n_interactions, 1] item_id(torch.Tensor): the item id in user-item interactions, shape: [n_interactions, 1] y(torch.Tensor): the labe...
recbole/model/knowledge_aware_recommender/kgnnls.py
get_interaction_table
xingkongxiaxia/RecBole
python
def get_interaction_table(self, user_id, item_id, y): 'Get interaction_table that is used for fetching user-item interaction label in LS regularization.\n\n Args:\n user_id(torch.Tensor): the user id in user-item interactions, shape: [n_interactions, 1]\n item_id(torch.Tensor): the item...
def sample_neg_interaction(self, pos_interaction_table, offset): 'Sample neg_interaction to construct train data.\n\n Args:\n pos_interaction_table(dict): the interaction_table that only contains pos_interaction.\n offset(int): The offset that is used for calculating the key(index) in i...
3,427,626,011,596,649,500
Sample neg_interaction to construct train data. Args: pos_interaction_table(dict): the interaction_table that only contains pos_interaction. offset(int): The offset that is used for calculating the key(index) in interaction_table Returns: interaction_table(dict): key: user_id * 10^offset + item_id; value:...
recbole/model/knowledge_aware_recommender/kgnnls.py
sample_neg_interaction
xingkongxiaxia/RecBole
python
def sample_neg_interaction(self, pos_interaction_table, offset): 'Sample neg_interaction to construct train data.\n\n Args:\n pos_interaction_table(dict): the interaction_table that only contains pos_interaction.\n offset(int): The offset that is used for calculating the key(index) in i...
def construct_adj(self, kg_graph): 'Get neighbors and corresponding relations for each entity in the KG.\n\n Args:\n kg_graph(scipy.sparse.coo_matrix): an undirected graph\n\n Returns:\n tuple:\n - adj_entity (torch.LongTensor): each line stores the sampled neighbo...
2,217,805,210,382,188,500
Get neighbors and corresponding relations for each entity in the KG. Args: kg_graph(scipy.sparse.coo_matrix): an undirected graph Returns: tuple: - adj_entity (torch.LongTensor): each line stores the sampled neighbor entities for a given entity, shape: [n_entities, neighbor_sample_size] ...
recbole/model/knowledge_aware_recommender/kgnnls.py
construct_adj
xingkongxiaxia/RecBole
python
def construct_adj(self, kg_graph): 'Get neighbors and corresponding relations for each entity in the KG.\n\n Args:\n kg_graph(scipy.sparse.coo_matrix): an undirected graph\n\n Returns:\n tuple:\n - adj_entity (torch.LongTensor): each line stores the sampled neighbo...
def get_neighbors(self, items): "Get neighbors and corresponding relations for each entity in items from adj_entity and adj_relation.\n\n Args:\n items(torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]\n\n Returns:\n tuple:\n - enti...
6,309,155,545,897,962,000
Get neighbors and corresponding relations for each entity in items from adj_entity and adj_relation. Args: items(torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ] Returns: tuple: - entities(list): Entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the b...
recbole/model/knowledge_aware_recommender/kgnnls.py
get_neighbors
xingkongxiaxia/RecBole
python
def get_neighbors(self, items): "Get neighbors and corresponding relations for each entity in items from adj_entity and adj_relation.\n\n Args:\n items(torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]\n\n Returns:\n tuple:\n - enti...
def aggregate(self, user_embeddings, entities, relations): 'For each item, aggregate the entity representation and its neighborhood representation into a single vector.\n\n Args:\n user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]\n entitie...
-6,236,746,642,292,884,000
For each item, aggregate the entity representation and its neighborhood representation into a single vector. Args: user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size] entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of ite...
recbole/model/knowledge_aware_recommender/kgnnls.py
aggregate
xingkongxiaxia/RecBole
python
def aggregate(self, user_embeddings, entities, relations): 'For each item, aggregate the entity representation and its neighborhood representation into a single vector.\n\n Args:\n user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]\n entitie...
def label_smoothness_predict(self, user_embeddings, user, entities, relations): 'Predict the label of items by label smoothness.\n\n Args:\n user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size*2, embedding_size],\n user(torch.FloatTensor): the index of users,...
1,820,534,822,281,268,500
Predict the label of items by label smoothness. Args: user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size*2, embedding_size], user(torch.FloatTensor): the index of users, shape: [batch_size*2] entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the...
recbole/model/knowledge_aware_recommender/kgnnls.py
label_smoothness_predict
xingkongxiaxia/RecBole
python
def label_smoothness_predict(self, user_embeddings, user, entities, relations): 'Predict the label of items by label smoothness.\n\n Args:\n user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size*2, embedding_size],\n user(torch.FloatTensor): the index of users,...
def calculate_ls_loss(self, user, item, target): 'Calculate label smoothness loss.\n\n Args:\n user(torch.FloatTensor): the index of users, shape: [batch_size*2],\n item(torch.FloatTensor): the index of items, shape: [batch_size*2],\n target(torch.FloatTensor): the label of u...
-1,483,023,215,681,424,100
Calculate label smoothness loss. Args: user(torch.FloatTensor): the index of users, shape: [batch_size*2], item(torch.FloatTensor): the index of items, shape: [batch_size*2], target(torch.FloatTensor): the label of user-item, shape: [batch_size*2], Returns: ls_loss: label smoothness loss
recbole/model/knowledge_aware_recommender/kgnnls.py
calculate_ls_loss
xingkongxiaxia/RecBole
python
def calculate_ls_loss(self, user, item, target): 'Calculate label smoothness loss.\n\n Args:\n user(torch.FloatTensor): the index of users, shape: [batch_size*2],\n item(torch.FloatTensor): the index of items, shape: [batch_size*2],\n target(torch.FloatTensor): the label of u...
def to_python(self, value): 'Convert our string value to JSON after we load it from the DB' if ((value is None) or (value == '')): return {} elif isinstance(value, basestring): res = loads(value) if isinstance(res, dict): return JSONDict(**res) else: r...
-834,000,970,839,273,900
Convert our string value to JSON after we load it from the DB
vendor-local/src/django-extensions/build/lib/django_extensions/db/fields/json.py
to_python
Mozilla-GitHub-Standards/b6a5bb5c98b18d87c72c770f29c4270008fc6fc6b787d531a2afcd382dc4cbad
python
def to_python(self, value): if ((value is None) or (value == )): return {} elif isinstance(value, basestring): res = loads(value) if isinstance(res, dict): return JSONDict(**res) else: return JSONList(res) else: return value
def get_db_prep_save(self, value, connection): 'Convert our JSON object to a string before we save' if (not isinstance(value, (list, dict))): return super(JSONField, self).get_db_prep_save('', connection=connection) else: return super(JSONField, self).get_db_prep_save(dumps(value), connectio...
-3,618,754,902,002,978,000
Convert our JSON object to a string before we save
vendor-local/src/django-extensions/build/lib/django_extensions/db/fields/json.py
get_db_prep_save
Mozilla-GitHub-Standards/b6a5bb5c98b18d87c72c770f29c4270008fc6fc6b787d531a2afcd382dc4cbad
python
def get_db_prep_save(self, value, connection): if (not isinstance(value, (list, dict))): return super(JSONField, self).get_db_prep_save(, connection=connection) else: return super(JSONField, self).get_db_prep_save(dumps(value), connection=connection)
def south_field_triple(self): 'Returns a suitable description of this field for South.' from south.modelsinspector import introspector field_class = 'django.db.models.fields.TextField' (args, kwargs) = introspector(self) return (field_class, args, kwargs)
-532,884,842,270,397,060
Returns a suitable description of this field for South.
vendor-local/src/django-extensions/build/lib/django_extensions/db/fields/json.py
south_field_triple
Mozilla-GitHub-Standards/b6a5bb5c98b18d87c72c770f29c4270008fc6fc6b787d531a2afcd382dc4cbad
python
def south_field_triple(self): from south.modelsinspector import introspector field_class = 'django.db.models.fields.TextField' (args, kwargs) = introspector(self) return (field_class, args, kwargs)
@cached_property def additional_properties_type(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n ' lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type)
1,702,168,743,392,494,600
This must be a method because a model may have properties that are of type self, this must run after the class is loaded
code/python/QuotesAPIforDigitalPortals/v3/fds/sdk/QuotesAPIforDigitalPortals/model/inline_response20013.py
additional_properties_type
factset/enterprise-sdk
python
@cached_property def additional_properties_type(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n ' lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type)
@cached_property def openapi_types(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n ...
7,408,037,427,849,946,000
This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type.
code/python/QuotesAPIforDigitalPortals/v3/fds/sdk/QuotesAPIforDigitalPortals/model/inline_response20013.py
openapi_types
factset/enterprise-sdk
python
@cached_property def openapi_types(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n ...
@classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): 'InlineResponse20013 - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError...
9,188,032,339,138,415,000
InlineResponse20013 - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_it...
code/python/QuotesAPIforDigitalPortals/v3/fds/sdk/QuotesAPIforDigitalPortals/model/inline_response20013.py
_from_openapi_data
factset/enterprise-sdk
python
@classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): 'InlineResponse20013 - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError...
@convert_js_args_to_python_args def __init__(self, *args, **kwargs): 'InlineResponse20013 - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n ...
-3,725,525,108,762,265,600
InlineResponse20013 - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_it...
code/python/QuotesAPIforDigitalPortals/v3/fds/sdk/QuotesAPIforDigitalPortals/model/inline_response20013.py
__init__
factset/enterprise-sdk
python
@convert_js_args_to_python_args def __init__(self, *args, **kwargs): 'InlineResponse20013 - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n ...
def load_data(traindir, valdir, **kwargs): 'generate the train and val dataloader, you can change this for your specific task\n\n Args:\n traindir (str): train dataset dir\n valdir (str): validation dataset dir\n\n Returns:\n tuple: the train dataset and validation dataset\n ' trai...
5,464,277,367,755,207,000
generate the train and val dataloader, you can change this for your specific task Args: traindir (str): train dataset dir valdir (str): validation dataset dir Returns: tuple: the train dataset and validation dataset
torchsat/scripts/train_cd.py
load_data
alina2204/contrastive_SSL_ship_detection
python
def load_data(traindir, valdir, **kwargs): 'generate the train and val dataloader, you can change this for your specific task\n\n Args:\n traindir (str): train dataset dir\n valdir (str): validation dataset dir\n\n Returns:\n tuple: the train dataset and validation dataset\n ' trai...
def aggregate_gradients_using_nccl(replica_grads): 'Aggregate gradients using nccl allreduce.' agg_all_g_and_v = [] for single_g_and_v in zip(*replica_grads): single_grads = [g for (g, _) in single_g_and_v] agg_grads = nccl_ops.all_sum(single_grads) agg_all_g_and_v.append([(g, v) for...
-460,152,936,942,818,200
Aggregate gradients using nccl allreduce.
tensorflow/python/distribute/cross_device_utils.py
aggregate_gradients_using_nccl
DeuroIO/Deuro-tensorflow
python
def aggregate_gradients_using_nccl(replica_grads): agg_all_g_and_v = [] for single_g_and_v in zip(*replica_grads): single_grads = [g for (g, _) in single_g_and_v] agg_grads = nccl_ops.all_sum(single_grads) agg_all_g_and_v.append([(g, v) for (g, (_, v)) in zip(agg_grads, single_g_and...
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads): 'Aggregate gradients using hierarchical copies.\n\n Args:\n avail_devices: available GPU devices.\n replica_grads: List of lists of (gradient, variable) tuples. The outer list\n is over replicas. The inner list is over indivi...
-5,807,300,737,462,545,000
Aggregate gradients using hierarchical copies. Args: avail_devices: available GPU devices. replica_grads: List of lists of (gradient, variable) tuples. The outer list is over replicas. The inner list is over individual gradients. Returns: The list of (aggregated_gradient, variable), where the gradient has b...
tensorflow/python/distribute/cross_device_utils.py
aggregate_gradients_using_hierarchical_copy
DeuroIO/Deuro-tensorflow
python
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads): 'Aggregate gradients using hierarchical copies.\n\n Args:\n avail_devices: available GPU devices.\n replica_grads: List of lists of (gradient, variable) tuples. The outer list\n is over replicas. The inner list is over indivi...
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean, check_inf_nan): 'Calculate the average gradient for a shared variable across all replicas.\n\n Note that this function provides a synchronization point across all replicas.\n\n Args:\n grad_and_vars: A list or tuple of (gradient, variable) tuple...
-739,028,824,022,532,700
Calculate the average gradient for a shared variable across all replicas. Note that this function provides a synchronization point across all replicas. Args: grad_and_vars: A list or tuple of (gradient, variable) tuples. Each (gradient, variable) pair within the outer list represents the gradient of the var...
tensorflow/python/distribute/cross_device_utils.py
aggregate_single_gradient_using_copy
DeuroIO/Deuro-tensorflow
python
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean, check_inf_nan): 'Calculate the average gradient for a shared variable across all replicas.\n\n Note that this function provides a synchronization point across all replicas.\n\n Args:\n grad_and_vars: A list or tuple of (gradient, variable) tuple...
def group_device_names(devices, group_size): 'Group device names into groups of group_size.\n\n Args:\n devices: a list of canonical device strings.\n group_size: integer which is equal to or greater than 1.\n\n Returns:\n list of lists of devices, where each inner list is group_size long,\n and eac...
-402,665,421,571,026,240
Group device names into groups of group_size. Args: devices: a list of canonical device strings. group_size: integer which is equal to or greater than 1. Returns: list of lists of devices, where each inner list is group_size long, and each device appears at least once in an inner list. If len(devices) ...
tensorflow/python/distribute/cross_device_utils.py
group_device_names
DeuroIO/Deuro-tensorflow
python
def group_device_names(devices, group_size): 'Group device names into groups of group_size.\n\n Args:\n devices: a list of canonical device strings.\n group_size: integer which is equal to or greater than 1.\n\n Returns:\n list of lists of devices, where each inner list is group_size long,\n and eac...
def split_grads_by_size(threshold_size, device_grads): 'Break gradients into two sets according to tensor size.\n\n Args:\n threshold_size: int size cutoff for small vs large tensor.\n device_grads: List of lists of (gradient, variable) tuples. The outer\n list is over devices. The inner list is over...
7,115,999,087,250,416,000
Break gradients into two sets according to tensor size. Args: threshold_size: int size cutoff for small vs large tensor. device_grads: List of lists of (gradient, variable) tuples. The outer list is over devices. The inner list is over individual gradients. Returns: small_grads: Subset of device_grads wh...
tensorflow/python/distribute/cross_device_utils.py
split_grads_by_size
DeuroIO/Deuro-tensorflow
python
def split_grads_by_size(threshold_size, device_grads): 'Break gradients into two sets according to tensor size.\n\n Args:\n threshold_size: int size cutoff for small vs large tensor.\n device_grads: List of lists of (gradient, variable) tuples. The outer\n list is over devices. The inner list is over...
def build_collective_reduce(input_tensors, num_workers, collective_keys, reduction_op='Add', unary_op='Id'): 'Build a subgraph that does one full all-reduce, using the collective Op.\n\n Args:\n input_tensors: tensors within a single worker graph that are to be reduced\n together; must be one per device.\n...
-1,641,276,473,819,680,500
Build a subgraph that does one full all-reduce, using the collective Op. Args: input_tensors: tensors within a single worker graph that are to be reduced together; must be one per device. num_workers: total number of workers with identical independent graphs that will be doing this same reduction. The red...
tensorflow/python/distribute/cross_device_utils.py
build_collective_reduce
DeuroIO/Deuro-tensorflow
python
def build_collective_reduce(input_tensors, num_workers, collective_keys, reduction_op='Add', unary_op='Id'): 'Build a subgraph that does one full all-reduce, using the collective Op.\n\n Args:\n input_tensors: tensors within a single worker graph that are to be reduced\n together; must be one per device.\n...
def sum_grad_and_var_all_reduce(grad_and_vars, num_workers, alg, gpu_indices, aux_devices=None, num_shards=1): 'Apply all-reduce algorithm over specified gradient tensors.' with ops.name_scope('allreduce'): scaled_grads = [g for (g, _) in grad_and_vars] if (alg == 'nccl'): summed_gra...
-2,988,830,372,582,981,600
Apply all-reduce algorithm over specified gradient tensors.
tensorflow/python/distribute/cross_device_utils.py
sum_grad_and_var_all_reduce
DeuroIO/Deuro-tensorflow
python
def sum_grad_and_var_all_reduce(grad_and_vars, num_workers, alg, gpu_indices, aux_devices=None, num_shards=1): with ops.name_scope('allreduce'): scaled_grads = [g for (g, _) in grad_and_vars] if (alg == 'nccl'): summed_grads = nccl_ops.all_sum(scaled_grads) elif (alg == 'xri...
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg, num_shards, gpu_indices): 'Apply all-reduce algorithm over specified gradient tensors.\n\n Args:\n dev_prefixes: list of prefix strings to use to generate PS device names.\n replica_grads: the gradients to reduce.\n num_workers: n...
4,814,734,914,225,489,000
Apply all-reduce algorithm over specified gradient tensors. Args: dev_prefixes: list of prefix strings to use to generate PS device names. replica_grads: the gradients to reduce. num_workers: number of worker processes across entire job. alg: the all-reduce algorithm to apply. num_shards: alg-specific shardi...
tensorflow/python/distribute/cross_device_utils.py
sum_gradients_all_reduce
DeuroIO/Deuro-tensorflow
python
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg, num_shards, gpu_indices): 'Apply all-reduce algorithm over specified gradient tensors.\n\n Args:\n dev_prefixes: list of prefix strings to use to generate PS device names.\n replica_grads: the gradients to reduce.\n num_workers: n...
def extract_ranges(index_list, range_size_limit=32): 'Extract consecutive ranges and singles from index_list.\n\n Args:\n index_list: List of monotone increasing non-negative integers.\n range_size_limit: Largest size range to return. If a larger\n consecutive range exists, it will be returned as multi...
7,368,154,166,958,740,000
Extract consecutive ranges and singles from index_list. Args: index_list: List of monotone increasing non-negative integers. range_size_limit: Largest size range to return. If a larger consecutive range exists, it will be returned as multiple ranges. Returns: (ranges, singles) where ranges is a list of...
tensorflow/python/distribute/cross_device_utils.py
extract_ranges
DeuroIO/Deuro-tensorflow
python
def extract_ranges(index_list, range_size_limit=32): 'Extract consecutive ranges and singles from index_list.\n\n Args:\n index_list: List of monotone increasing non-negative integers.\n range_size_limit: Largest size range to return. If a larger\n consecutive range exists, it will be returned as multi...