hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b11ef5c02b13123afd23074dcce1f68e408bfa87
| 544
|
py
|
Python
|
Python Exercicios/Curso_Em_Video/Modulo 1/Aula 5 Manipulando Texto/Crie um programa que leia o nome completo de uma pessoa e mostre.py
|
TiagoSo/PYTHON
|
a36032786eb506d9c60853460e6d4b37373bafdb
|
[
"MIT"
] | 1
|
2021-12-23T00:41:22.000Z
|
2021-12-23T00:41:22.000Z
|
Python Exercicios/Curso_Em_Video/Modulo 1/Aula 5 Manipulando Texto/Crie um programa que leia o nome completo de uma pessoa e mostre.py
|
TiagoSo/PYTHON
|
a36032786eb506d9c60853460e6d4b37373bafdb
|
[
"MIT"
] | null | null | null |
Python Exercicios/Curso_Em_Video/Modulo 1/Aula 5 Manipulando Texto/Crie um programa que leia o nome completo de uma pessoa e mostre.py
|
TiagoSo/PYTHON
|
a36032786eb506d9c60853460e6d4b37373bafdb
|
[
"MIT"
] | null | null | null |
#Crie um programa que leia o nome completo de uma pessoa e mostre:
#– O nome com todas as letras maiúsculas e minúsculas.
#– Quantas letras ao todo (sem considerar espaços).
#– Quantas letras tem o primeiro nome.
print("Digite o seu nome completo por favor")
nome= str(input()).strip()
print("Analisando o seu nome...")
print("O seu nome em maiúsculas é",nome.upper())
#print("O seu nome em minusculas é",nome.lower())
#print("O seu nome tem ao todo",len(nome)-nome.count(" "))
#print("O seu primeiro nome",nome.find(" "))
| 32
| 66
| 0.6875
|
c83fe29636947f4599a516ac7790b9ddf9f323ee
| 7,083
|
py
|
Python
|
src/aks-preview/azext_aks_preview/_params.py
|
mboersma/azure-cli-extensions
|
9fde4011748f006ba2943e2e3ba69c16605e715f
|
[
"MIT"
] | null | null | null |
src/aks-preview/azext_aks_preview/_params.py
|
mboersma/azure-cli-extensions
|
9fde4011748f006ba2943e2e3ba69c16605e715f
|
[
"MIT"
] | null | null | null |
src/aks-preview/azext_aks_preview/_params.py
|
mboersma/azure-cli-extensions
|
9fde4011748f006ba2943e2e3ba69c16605e715f
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long,too-many-statements
import os.path
import platform
from argcomplete.completers import FilesCompleter
from azure.cli.core.commands.parameters import (
file_type, get_resource_name_completion_list, name_type, tags_type, zones_type)
from ._completers import (
get_vm_size_completion_list, get_k8s_versions_completion_list, get_k8s_upgrades_completion_list)
from ._validators import (
validate_create_parameters, validate_k8s_version, validate_linux_host_name,
validate_ssh_key, validate_max_pods, validate_nodes_count, validate_ip_ranges,
validate_nodepool_name)
def load_arguments(self, _):
# AKS command argument configuration
with self.argument_context('aks') as c:
c.argument('resource_name', name_type, help='Name of the managed cluster.',
completer=get_resource_name_completion_list('Microsoft.ContainerService/ManagedClusters'))
c.argument('name', name_type, help='Name of the managed cluster.',
completer=get_resource_name_completion_list('Microsoft.ContainerService/ManagedClusters'))
c.argument('kubernetes_version', options_list=['--kubernetes-version', '-k'], validator=validate_k8s_version)
c.argument('node_count', options_list=['--node-count', '-c'], type=int)
c.argument('tags', tags_type)
with self.argument_context('aks create') as c:
c.argument('name', validator=validate_linux_host_name)
c.argument('kubernetes_version', completer=get_k8s_versions_completion_list)
c.argument('admin_username', options_list=['--admin-username', '-u'], default='azureuser')
c.argument('windows_admin_username', options_list=['--windows-admin-username'])
c.argument('windows_admin_password', options_list=['--windows-admin-password'])
c.argument('dns_name_prefix', options_list=['--dns-name-prefix', '-p'])
c.argument('generate_ssh_keys', action='store_true', validator=validate_create_parameters)
c.argument('node_vm_size', options_list=['--node-vm-size', '-s'], completer=get_vm_size_completion_list)
c.argument('nodepool_name', type=str, default='nodepool1',
help='Node pool name, upto 12 alphanumeric characters', validator=validate_nodepool_name)
c.argument('ssh_key_value', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa.pub'),
completer=FilesCompleter(), validator=validate_ssh_key)
c.argument('aad_client_app_id')
c.argument('aad_server_app_id')
c.argument('aad_server_app_secret')
c.argument('aad_tenant_id')
c.argument('dns_service_ip')
c.argument('docker_bridge_address')
c.argument('enable_addons', options_list=['--enable-addons', '-a'])
c.argument('disable_rbac', action='store_true')
c.argument('enable_rbac', action='store_true', options_list=['--enable-rbac', '-r'],
deprecate_info=c.deprecate(redirect="--disable-rbac", hide="2.0.45"))
c.argument('max_pods', type=int, options_list=['--max-pods', '-m'], validator=validate_max_pods)
c.argument('network_plugin')
c.argument('network_policy')
c.argument('no_ssh_key', options_list=['--no-ssh-key', '-x'])
c.argument('pod_cidr')
c.argument('service_cidr')
c.argument('vnet_subnet_id')
c.argument('workspace_resource_id')
c.argument('skip_subnet_role_assignment', action='store_true')
c.argument('enable_cluster_autoscaler', action='store_true')
c.argument('min_count', type=int, validator=validate_nodes_count)
c.argument('max_count', type=int, validator=validate_nodes_count)
c.argument('enable_vmss', action='store_true')
c.argument('node_zones', zones_type, options_list='--node-zones', help='(PREVIEW) Space-separated list of availability zones where agent nodes will be placed.')
c.argument('enable_pod_security_policy', action='store_true')
with self.argument_context('aks update') as c:
c.argument('enable_cluster_autoscaler', options_list=["--enable-cluster-autoscaler", "-e"], action='store_true')
c.argument('disable_cluster_autoscaler', options_list=["--disable-cluster-autoscaler", "-d"], action='store_true')
c.argument('update_cluster_autoscaler', options_list=["--update-cluster-autoscaler", "-u"], action='store_true')
c.argument('min_count', type=int, validator=validate_nodes_count)
c.argument('max_count', type=int, validator=validate_nodes_count)
c.argument('api_server_authorized_ip_ranges', type=str, validator=validate_ip_ranges)
c.argument('enable_pod_security_policy', action='store_true')
c.argument('disable_pod_security_policy', action='store_true')
with self.argument_context('aks scale') as c:
c.argument('nodepool_name', type=str,
help='Node pool name, upto 12 alphanumeric characters', validator=validate_nodepool_name)
with self.argument_context('aks upgrade') as c:
c.argument('kubernetes_version', completer=get_k8s_upgrades_completion_list)
with self.argument_context('aks nodepool') as c:
c.argument('cluster_name', type=str, help='The cluster name.')
for scope in ['aks nodepool add']:
with self.argument_context(scope) as c:
c.argument('nodepool_name', type=str, options_list=['--name', '-n'], validator=validate_nodepool_name, help='The node pool name.')
c.argument('node_zones', zones_type, options_list='--node-zones', help='(PREVIEW) Space-separated list of availability zones where agent nodes will be placed.')
c.argument('node_vm_size', options_list=['--node-vm-size', '-s'], completer=get_vm_size_completion_list)
c.argument('max_pods', type=int, options_list=['--max-pods', '-m'], validator=validate_max_pods)
c.argument('os_type', type=str)
for scope in ['aks nodepool show', 'aks nodepool delete', 'aks nodepool scale', 'aks nodepool upgrade']:
with self.argument_context(scope) as c:
c.argument('nodepool_name', type=str, options_list=['--name', '-n'], validator=validate_nodepool_name, help='The node pool name.')
def _get_default_install_location(exe_name):
system = platform.system()
if system == 'Windows':
home_dir = os.environ.get('USERPROFILE')
if not home_dir:
return None
install_location = os.path.join(home_dir, r'.azure-{0}\{0}.exe'.format(exe_name))
elif system == 'Linux' or system == 'Darwin':
install_location = '/usr/local/bin/{}'.format(exe_name)
else:
install_location = None
return install_location
| 59.521008
| 172
| 0.682056
|
f569cab933ae9dcf8225f11f518da1180582aca1
| 377
|
py
|
Python
|
string_manipulation/count_vowels.py
|
magnusrodseth/data-structures-and-algorithms
|
45dfdc0859683d5c76b82b87f415e2c0cdbc15e8
|
[
"MIT"
] | null | null | null |
string_manipulation/count_vowels.py
|
magnusrodseth/data-structures-and-algorithms
|
45dfdc0859683d5c76b82b87f415e2c0cdbc15e8
|
[
"MIT"
] | null | null | null |
string_manipulation/count_vowels.py
|
magnusrodseth/data-structures-and-algorithms
|
45dfdc0859683d5c76b82b87f415e2c0cdbc15e8
|
[
"MIT"
] | null | null | null |
def count_vowels(string: str) -> int:
"""
Find the number of vowels in a string. Vowels in English are A, E, I, O, U, Y
:param string:
:return:
"""
vowels = "aeiouy"
counter = 0
for letter in string.lower():
if letter in vowels:
counter += 1
return counter
if __name__ == '__main__':
print(count_vowels("hello"))
| 19.842105
| 81
| 0.575597
|
156302cefb6642ee02a21a38f75b3fc3d4bde9cf
| 3,124
|
py
|
Python
|
tests/test_completion/test_completion_show.py
|
gmelodie/typer
|
c4e3e460db89073a9b06c1ec90733ad732956e8e
|
[
"MIT"
] | 1
|
2020-12-01T07:02:53.000Z
|
2020-12-01T07:02:53.000Z
|
tests/test_completion/test_completion_show.py
|
gmelodie/typer
|
c4e3e460db89073a9b06c1ec90733ad732956e8e
|
[
"MIT"
] | null | null | null |
tests/test_completion/test_completion_show.py
|
gmelodie/typer
|
c4e3e460db89073a9b06c1ec90733ad732956e8e
|
[
"MIT"
] | null | null | null |
import os
import subprocess
from docs_src.first_steps import tutorial001 as mod
def test_completion_show_no_shell():
result = subprocess.run(
["coverage", "run", mod.__file__, "--show-completion"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
env={
**os.environ,
"_TYPER_COMPLETE_TESTING": "True",
"_TYPER_COMPLETE_TEST_DISABLE_SHELL_DETECTION": "True",
},
)
assert "Error: --show-completion option requires an argument" in result.stderr
def test_completion_show_bash():
result = subprocess.run(
["coverage", "run", mod.__file__, "--show-completion", "bash"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
env={
**os.environ,
"_TYPER_COMPLETE_TESTING": "True",
"_TYPER_COMPLETE_TEST_DISABLE_SHELL_DETECTION": "True",
},
)
assert (
"complete -o default -F _tutorial001py_completion tutorial001.py"
in result.stdout
)
def test_completion_source_zsh():
result = subprocess.run(
["coverage", "run", mod.__file__, "--show-completion", "zsh"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
env={
**os.environ,
"_TYPER_COMPLETE_TESTING": "True",
"_TYPER_COMPLETE_TEST_DISABLE_SHELL_DETECTION": "True",
},
)
assert "compdef _tutorial001py_completion tutorial001.py" in result.stdout
def test_completion_source_fish():
result = subprocess.run(
["coverage", "run", mod.__file__, "--show-completion", "fish"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
env={
**os.environ,
"_TYPER_COMPLETE_TESTING": "True",
"_TYPER_COMPLETE_TEST_DISABLE_SHELL_DETECTION": "True",
},
)
assert "complete --command tutorial001.py --no-files" in result.stdout
def test_completion_source_powershell():
result = subprocess.run(
["coverage", "run", mod.__file__, "--show-completion", "powershell"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
env={
**os.environ,
"_TYPER_COMPLETE_TESTING": "True",
"_TYPER_COMPLETE_TEST_DISABLE_SHELL_DETECTION": "True",
},
)
assert (
"Register-ArgumentCompleter -Native -CommandName tutorial001.py -ScriptBlock $scriptblock"
in result.stdout
)
def test_completion_source_pwsh():
result = subprocess.run(
["coverage", "run", mod.__file__, "--show-completion", "pwsh"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
env={
**os.environ,
"_TYPER_COMPLETE_TESTING": "True",
"_TYPER_COMPLETE_TEST_DISABLE_SHELL_DETECTION": "True",
},
)
assert (
"Register-ArgumentCompleter -Native -CommandName tutorial001.py -ScriptBlock $scriptblock"
in result.stdout
)
| 30.038462
| 98
| 0.606594
|
38bef9b7cda25694d517146827d6d1c4b1035147
| 11,967
|
py
|
Python
|
ubermagtable/util/util.py
|
ubermag/ubermagtable
|
a95ac9529f06e77ed88697c9ffb94b70a3f15c7b
|
[
"BSD-3-Clause"
] | 4
|
2019-10-21T01:12:37.000Z
|
2021-05-27T05:01:07.000Z
|
ubermagtable/util/util.py
|
ubermag/ubermagtable
|
a95ac9529f06e77ed88697c9ffb94b70a3f15c7b
|
[
"BSD-3-Clause"
] | 8
|
2019-08-06T18:45:43.000Z
|
2022-02-21T22:13:32.000Z
|
ubermagtable/util/util.py
|
ubermag/ubermagtable
|
a95ac9529f06e77ed88697c9ffb94b70a3f15c7b
|
[
"BSD-3-Clause"
] | 1
|
2021-07-04T14:32:38.000Z
|
2021-07-04T14:32:38.000Z
|
import re
# The OOMMF columns are renamed according to this dictionary.
oommf_dict = {'RungeKuttaEvolve:evolver:Total energy': 'E',
'RungeKuttaEvolve:evolver:Energy calc count': 'E_calc_count',
'RungeKuttaEvolve:evolver:Max dm/dt': 'max_dm/dt',
'RungeKuttaEvolve:evolver:dE/dt': 'dE/dt',
'RungeKuttaEvolve:evolver:Delta E': 'delta_E',
'RungeKuttaEvolve::Total energy': 'E',
'RungeKuttaEvolve::Energy calc count': 'E_calc_count',
'RungeKuttaEvolve::Max dm/dt': 'max_dm/dt',
'RungeKuttaEvolve::dE/dt': 'dE/dt',
'RungeKuttaEvolve::Delta E': 'delta_E',
'EulerEvolve:evolver:Total energy': 'E',
'EulerEvolve:evolver:Energy calc count': 'E_calc_count',
'EulerEvolve:evolver:Max dm/dt': 'max_dmdt',
'EulerEvolve:evolver:dE/dt': 'dE/dt',
'EulerEvolve:evolver:Delta E': 'delta_E',
'TimeDriver::Iteration': 'iteration',
'TimeDriver::Stage iteration': 'stage_iteration',
'TimeDriver::Stage': 'stage',
'TimeDriver::mx': 'mx',
'TimeDriver::my': 'my',
'TimeDriver::mz': 'mz',
'TimeDriver::Last time step': 'last_time_step',
'TimeDriver::Simulation time': 't',
'CGEvolve:evolver:Max mxHxm': 'max_mxHxm',
'CGEvolve:evolver:Total energy': 'E',
'CGEvolve:evolver:Delta E': 'delta_E',
'CGEvolve:evolver:Bracket count': 'bracket_count',
'CGEvolve:evolver:Line min count': 'line_min_count',
'CGEvolve:evolver:Conjugate cycle count':
'conjugate_cycle_count',
'CGEvolve:evolver:Cycle count': 'cycle_count',
'CGEvolve:evolver:Cycle sub count': 'cycle_sub_count',
'CGEvolve:evolver:Energy calc count': 'energy_calc_count',
'CGEvolve::Max mxHxm': 'max_mxHxm',
'CGEvolve::Total energy': 'E',
'CGEvolve::Delta E': 'delta_E',
'CGEvolve::Bracket count': 'bracket_count',
'CGEvolve::Line min count': 'line_min_count',
'CGEvolve::Conjugate cycle count': 'conjugate_cycle_count',
'CGEvolve::Cycle count': 'cycle_count',
'CGEvolve::Cycle sub count': 'cycle_sub_count',
'CGEvolve::Energy calc count': 'energy_calc_count',
'FixedMEL::Energy': 'MEL_E',
'FixedMEL:magnetoelastic:Energy': 'MEL_E',
'SpinTEvolve:evolver:Total energy': 'E',
'SpinTEvolve:evolver:Energy calc count': 'E_calc_count',
'SpinTEvolve:evolver:Max dm/dt': 'max_dmdt',
'SpinTEvolve:evolver:dE/dt': 'dE/dt',
'SpinTEvolve:evolver:Delta E': 'delta_E',
'SpinTEvolve:evolver:average u': 'average_u',
'SpinXferEvolve:evolver:Total energy': 'E',
'SpinXferEvolve:evolver:Energy calc count': 'E_calc_count',
'SpinXferEvolve:evolver:Max dm/dt': 'max_dmdt',
'SpinXferEvolve:evolver:dE/dt': 'dE/dt',
'SpinXferEvolve:evolver:Delta E': 'delta_E',
'SpinXferEvolve:evolver:average u': 'average_u',
'SpinXferEvolve:evolver:average J': 'average_J',
'ThetaEvolve:evolver:Total energy': 'E',
'ThetaEvolve:evolver:Energy calc count': 'E_calc_count',
'ThetaEvolve:evolver:Max dm/dt': 'max_dmdt',
'ThetaEvolve:evolver:dE/dt': 'dE/dt',
'ThetaEvolve:evolver:Delta E': 'delta_E',
'ThetaEvolve:evolver:Temperature': 'T',
'ThermHeunEvolve:evolver:Total energy': 'E',
'ThermHeunEvolve:evolver:Energy calc count': 'E_calc_count',
'ThermHeunEvolve:evolver:Max dm/dt': 'max_dmdt',
'ThermHeunEvolve:evolver:dE/dt': 'dE/dt',
'ThermHeunEvolve:evolver:Delta E': 'delta_E',
'ThermHeunEvolve:evolver:Temperature': 'T',
'ThermSpinXferEvolve:evolver:Total energy': 'E',
'ThermSpinXferEvolve:evolver:Energy calc count': 'E_calc_count',
'ThermSpinXferEvolve:evolver:Max dm/dt': 'max_dmdt',
'ThermSpinXferEvolve:evolver:dE/dt': 'dE/dt',
'ThermSpinXferEvolve:evolver:Delta E': 'delta_E',
'ThermSpinXferEvolve:evolver:Temperature': 'T',
'MinDriver::Iteration': 'iteration',
'MinDriver::Stage iteration': 'stage_iteration',
'MinDriver::Stage': 'stage',
'MinDriver::mx': 'mx',
'MinDriver::my': 'my',
'MinDriver::mz': 'mz',
'UniformExchange::Max Spin Ang': 'max_spin_ang',
'UniformExchange::Stage Max Spin Ang': 'stage_max_spin_ang',
'UniformExchange::Run Max Spin Ang': 'run_max_spin_ang',
'UniformExchange::Energy': 'E_exchange',
'DMExchange6Ngbr::Energy': 'E',
'DMI_Cnv::Energy': 'E',
'DMI_T::Energy': 'E',
'DMI_D2d::Energy': 'E',
'Demag::Energy': 'E',
'FixedZeeman::Energy': 'E_zeeman',
'UZeeman::Energy': 'E_zeeman',
'UZeeman::B': 'B',
'UZeeman::Bx': 'Bx',
'UZeeman::By': 'By',
'UZeeman::Bz': 'Bz',
'ScriptUZeeman::Energy': 'E',
'ScriptUZeeman::B': 'B',
'ScriptUZeeman::Bx': 'Bx',
'ScriptUZeeman::By': 'By',
'ScriptUZeeman::Bz': 'Bz',
'TransformZeeman::Energy': 'E',
'CubicAnisotropy::Energy': 'E',
'UniaxialAnisotropy::Energy': 'E',
'UniaxialAnisotropy4::Energy': 'E',
'Southampton_UniaxialAnisotropy4::Energy': 'E',
'Exchange6Ngbr::Energy': 'E',
'Exchange6Ngbr::Max Spin Ang': 'max_spin_ang',
'Exchange6Ngbr::Stage Max Spin Ang': 'stage_max_spin_ang',
'Exchange6Ngbr::Run Max Spin Ang': 'run_max_spin_ang',
'ExchangePtwise::Energy': 'E',
'ExchangePtwise::Max Spin Ang': 'max_spin_ang',
'ExchangePtwise::Stage Max Spin Ang': 'stage_max_spin_ang',
'ExchangePtwise::Run Max Spin Ang': 'run_max_spin_ang',
'TwoSurfaceExchange::Energy': 'E'}
# The mumax3 columns are renamed according to this dictionary.
mumax3_dict = {'t': 't',
'mx': 'mx',
'my': 'my',
'mz': 'mz',
'E_total': 'E',
'E_exch': 'E_totalexchange',
'E_demag': 'E_demag',
'E_Zeeman': 'E_zeeman',
'E_anis': 'E_totalanisotropy',
'dt': 'dt',
'maxTorque': 'maxtorque'}
def rename_column(name, cols_dict):
if name in cols_dict.keys():
return cols_dict[name]
for key in cols_dict.keys():
if len(key.split('::')) == 2:
start, end = key.split('::')
name_split = name.split(':')
if name_split[0] == start and name_split[-1] == end:
term_name = name.split(':')[1]
type_name = cols_dict[key]
# required for E_exchange in old and new OOMMF odt files
if not type_name.endswith(term_name):
type_name = f'{type_name}_{term_name}'
return type_name
else:
return name # name cannot be found in dictionary
def columns(filename, rename=True):
"""Extracts column names from a table file.
Parameters
----------
filename : str
OOMMF ``.odt`` or mumax3 ``.txt`` file.
rename : bool
If ``rename=True``, the column names are renamed with their shorter
versions. Defaults to ``True``.
Returns
-------
list
List of column names.
Examples
--------
1. Extracting the column names from an OOMMF `.odt` file.
>>> import os
>>> import ubermagtable.util as uu
...
>>> odtfile = os.path.join(os.path.dirname(__file__), '..',
... 'tests', 'test_sample', 'oommf-new-file1.odt')
>>> uu.columns(odtfile)
[...]
2. Extracting the names of columns from a mumax3 `.txt` file.
>>> odtfile = os.path.join(os.path.dirname(__file__), '..',
... 'tests', 'test_sample', 'mumax3-file1.txt')
>>> uu.columns(odtfile)
[...]
"""
with open(filename) as f:
lines = f.readlines()
if lines[0].startswith('# ODT'): # OOMMF odt file
cline = list(filter(lambda l: l.startswith('# Columns:'), lines))[0]
cline = re.split(r'Oxs_|Anv_|Southampton_|My_|YY_|UHH_|Xf_', cline)[1:]
cline = list(map(lambda col: re.sub(r'[{}]', '', col), cline))
cols = list(map(lambda s: s.strip(), cline))
cols_dict = oommf_dict
else: # mumax3 txt file
cline = lines[0][2:].rstrip().split('\t')
cols = list(map(lambda s: s.split(' ')[0], cline))
cols_dict = mumax3_dict
if rename:
return [rename_column(col, cols_dict) for col in cols]
else:
return cols
def units(filename, rename=True):
"""Extracts units for individual columns from a table file.
This method extracts both column names and units and returns a dictionary,
where keys are column names and values are the units.
Parameters
----------
filename : str
OOMMF ``.odt`` or mumax3 ``.txt`` file.
rename : bool
If ``rename=True``, the column names are renamed with their shorter
versions. Defaults to ``True``.
Returns
-------
dict
Dictionary of column names and units.
Examples
--------
1. Extracting units for individual columns from an OOMMF ``.odt`` file.
>>> import os
>>> import ubermagtable.util as uu
...
>>> odtfile = os.path.join(os.path.dirname(__file__), '..',
... 'tests', 'test_sample', 'oommf-new-file2.odt')
>>> uu.units(odtfile)
{...}
2. Extracting units for individual columns from a mumax3 ``.txt`` file.
>>> odtfile = os.path.join(os.path.dirname(__file__), '..',
... 'tests', 'test_sample', 'mumax3-file1.txt')
>>> uu.units(odtfile)
{...}
"""
with open(filename) as f:
lines = f.readlines()
if lines[0].startswith('# ODT'): # OOMMF odt file
uline = list(filter(lambda l: l.startswith('# Units:'), lines))[0]
units = uline.split()[2:]
units = list(map(lambda s: re.sub(r'[{}]', '', s), units))
else: # mumax3 txt file
uline = lines[0][2:].rstrip().split('\t')
units = list(map(lambda s: s.split()[1], uline))
units = list(map(lambda s: re.sub(r'[()]', '', s), units))
return dict(zip(columns(filename, rename=rename), units))
def data(filename):
"""Extracts numerical data from a table file.
Parameters
----------
filename : str
OOMMF ``.odt`` or mumax3 ``.txt`` file.
Returns
-------
list
List of numerical data.
Examples
--------
1. Reading data from an OOMMF ``.odt`` file.
>>> import os
>>> import ubermagtable.util as uu
...
>>> odtfile = os.path.join(os.path.dirname(__file__), '..',
... 'tests', 'test_sample', 'oommf-new-file3.odt')
>>> uu.data(odtfile)
[...]
2. Reading data from a mumax3 ``.txt`` file.
>>> odtfile = os.path.join(os.path.dirname(__file__), '..',
... 'tests', 'test_sample', 'mumax3-file1.txt')
>>> uu.data(odtfile)
[...]
"""
with open(filename) as f:
lines = f.readlines()
values = []
for line in lines:
if not line.startswith('#'):
values.append(list(map(float, line.split())))
return values
| 37.990476
| 79
| 0.543495
|
93585223c0c20531f9c8f07a06241f37fde48754
| 10,877
|
py
|
Python
|
assistant-sdk-python/google-assistant-sdk/googlesamples/assistant/grpc/textinput.py
|
Fishezzz/Google-Pi
|
6b26729e8be64d3c9545093e3a232c92b89282f8
|
[
"MIT"
] | null | null | null |
assistant-sdk-python/google-assistant-sdk/googlesamples/assistant/grpc/textinput.py
|
Fishezzz/Google-Pi
|
6b26729e8be64d3c9545093e3a232c92b89282f8
|
[
"MIT"
] | null | null | null |
assistant-sdk-python/google-assistant-sdk/googlesamples/assistant/grpc/textinput.py
|
Fishezzz/Google-Pi
|
6b26729e8be64d3c9545093e3a232c92b89282f8
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample that implements a text client for the Google Assistant Service."""
import RPi.GPIO as GPIO
LED1 = 26
LED2 = 19
LED3 = 13
LED4 = 6
LED5 = 5
LED6 = 0
#def f(x):
# switcher = {
# 'ON': 1,
# 'OFF': 0
# }
# return switcher.get(x, 0)
import os
import logging
import json
import click
import google.auth.transport.grpc
import google.auth.transport.requests
import google.oauth2.credentials
from google.assistant.embedded.v1alpha2 import (
embedded_assistant_pb2,
embedded_assistant_pb2_grpc
)
try:
from . import (
assistant_helpers,
browser_helpers,
device_helpers
)
except (SystemError, ImportError):
import assistant_helpers
import browser_helpers
import device_helpers
ASSISTANT_API_ENDPOINT = 'embeddedassistant.googleapis.com'
DEFAULT_GRPC_DEADLINE = 60 * 3 + 5
PLAYING = embedded_assistant_pb2.ScreenOutConfig.PLAYING
class SampleTextAssistant(object):
"""Sample Assistant that supports text based conversations.
Args:
language_code: language for the conversation.
device_model_id: identifier of the device model.
device_id: identifier of the registered device instance.
display: enable visual display of assistant response.
channel: authorized gRPC channel for connection to the
Google Assistant API.
deadline_sec: gRPC deadline in seconds for Google Assistant API call.
"""
def __init__(self, language_code, device_model_id, device_id,
display, channel, deadline_sec):
self.language_code = language_code
self.device_model_id = device_model_id
self.device_id = device_id
self.conversation_state = None
# Force reset of first conversation.
self.is_new_conversation = True
self.display = display
self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
channel
)
self.deadline = deadline_sec
def __enter__(self):
return self
def __exit__(self, etype, e, traceback):
if e:
return False
def assist(self, text_query):
"""Send a text request to the Assistant and playback the response.
"""
def iter_assist_requests():
config = embedded_assistant_pb2.AssistConfig(
audio_out_config=embedded_assistant_pb2.AudioOutConfig(
encoding='LINEAR16',
sample_rate_hertz=16000,
volume_percentage=0,
),
dialog_state_in=embedded_assistant_pb2.DialogStateIn(
language_code=self.language_code,
conversation_state=self.conversation_state,
is_new_conversation=self.is_new_conversation,
),
device_config=embedded_assistant_pb2.DeviceConfig(
device_id=self.device_id,
device_model_id=self.device_model_id,
),
text_query=text_query,
)
# Continue current conversation with later requests.
self.is_new_conversation = False
if self.display:
config.screen_out_config.screen_mode = PLAYING
req = embedded_assistant_pb2.AssistRequest(config=config)
assistant_helpers.log_assist_request_without_audio(req)
yield req
text_response = None
html_response = None
for resp in self.assistant.Assist(iter_assist_requests(),
self.deadline):
assistant_helpers.log_assist_response_without_audio(resp)
if resp.screen_out.data:
html_response = resp.screen_out.data
if resp.dialog_state_out.conversation_state:
conversation_state = resp.dialog_state_out.conversation_state
self.conversation_state = conversation_state
if resp.dialog_state_out.supplemental_display_text:
text_response = resp.dialog_state_out.supplemental_display_text
return text_response, html_response
@click.command()
@click.option('--api-endpoint', default=ASSISTANT_API_ENDPOINT,
metavar='<api endpoint>', show_default=True,
help='Address of Google Assistant API service.')
@click.option('--credentials',
metavar='<credentials>', show_default=True,
default=os.path.join(click.get_app_dir('google-oauthlib-tool'),
'credentials.json'),
help='Path to read OAuth2 credentials.')
@click.option('--device-model-id',
metavar='<device model id>',
required=True,
help=(('Unique device model identifier, '
'if not specifed, it is read from --device-config')))
@click.option('--device-id',
metavar='<device id>',
required=True,
help=(('Unique registered device instance identifier, '
'if not specified, it is read from --device-config, '
'if no device_config found: a new device is registered '
'using a unique id and a new device config is saved')))
@click.option('--lang', show_default=True,
metavar='<language code>',
default='en-US',
help='Language code of the Assistant')
@click.option('--display', is_flag=True, default=False,
help='Enable visual display of Assistant responses in HTML.')
@click.option('--verbose', '-v', is_flag=True, default=False,
help='Verbose logging.')
@click.option('--grpc-deadline', default=DEFAULT_GRPC_DEADLINE,
metavar='<grpc deadline>', show_default=True,
help='gRPC deadline in seconds')
def main(api_endpoint, credentials,
device_model_id, device_id, lang, display, verbose,
grpc_deadline, *args, **kwargs):
# Setup logging.
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)
# Load OAuth 2.0 credentials.
try:
with open(credentials, 'r') as f:
credentials = google.oauth2.credentials.Credentials(token=None,
**json.load(f))
http_request = google.auth.transport.requests.Request()
credentials.refresh(http_request)
except Exception as e:
logging.error('Error loading credentials: %s', e)
logging.error('Run google-oauthlib-tool to initialize '
'new OAuth 2.0 credentials.')
return
# Create an authorized gRPC channel.
grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
credentials, http_request, api_endpoint)
logging.info('Connecting to %s', api_endpoint)
device_handler = device_helpers.DeviceRequestHandler(device_id)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(LED1, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(LED2, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(LED3, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(LED4, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(LED5, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(LED6, GPIO.OUT, initial=GPIO.LOW)
@device_handler.command('action.devices.commands.OnOff')
def onoff(on):
if on:
logging.info('Turning device on')
GPIO.output(LED1, 1)
else:
logging.info('Turning device off')
GPIO.output(LED1, 0)
@device_handler.command('com.example.commands.MAIN')
def MAIN(someValue):
logging.info('someValue is: %s' % someValue)
@device_handler.command('com.example.commands.MyDevices')
def myDevices(status, device):
if status == "ON":
status = 1
else:
status = 0
if device == "LED 1":
logging.info('Turning %s %s' % (device, status))
GPIO.output(LED1, status)
elif device == "LED 2":
logging.info('Turning %s %s' % (device, status))
GPIO.output(LED2, status)
elif device == "LED 3":
logging.info('Turning %s %s' % (device, status))
GPIO.output(LED3, status)
elif device == "LED 4":
logging.info('Turning %s %s' % (device, status))
GPIO.output(LED4, status)
elif device == "LED 5":
logging.info('Turning %s %s' % (device, status))
GPIO.output(LED5, status)
elif device == "LED 6":
logging.info('Turning %s %s' % (device, status))
GPIO.output(LED6, status)
elif device == "ALL_LEDS":
logging.info('Turning all leds %s' % status)
GPIO.output(LED1, status)
GPIO.output(LED2, status)
GPIO.output(LED3, status)
GPIO.output(LED4, status)
GPIO.output(LED5, status)
GPIO.output(LED6, status)
else:
logging.info('Something went wrong')
@device_handler.command('com.example.commands.BlinkLight')
def blink(speed, number):
logging.info('Blinking device %s times.' % number)
delay = 1
if speed == "SLOWLY":
delay = 2
elif speed == "QUICKLY":
delay = 0.5
for i in range(int(number)):
logging.info('Device is blinking.')
GPIO.output(LED2, 1)
time.sleep(delay)
GPIO.output(LED2, 0)
time.sleep(delay)
@device_handler.command('com.example.commands.LEDColor')
def LEDColor(device, color):
logging.info('Making %s %s' % (device, color.get('name')))
with SampleTextAssistant(lang, device_model_id, device_id, display,
grpc_channel, grpc_deadline) as assistant:
while True:
query = click.prompt('')
click.echo('<you> %s' % query)
response_text, response_html = assistant.assist(text_query=query)
if display and response_html:
system_browser = browser_helpers.system_browser
system_browser.display(response_html)
if response_text:
click.echo('<@assistant> %s' % response_text)
if __name__ == '__main__':
main()
| 37.636678
| 79
| 0.617174
|
48fe14c33e54dfb399a8d5177f747765f5983d90
| 602
|
py
|
Python
|
rabbitgetapi/__init__.py
|
Sidon/get-rabbitmq-messages
|
8feff8c9b9edee863d875966f5e5f3a5eb6ab06a
|
[
"MIT"
] | 11
|
2022-01-10T13:49:39.000Z
|
2022-01-11T05:57:45.000Z
|
rabbitgetapi/__init__.py
|
Sidon/get-rabbitmq-messages
|
8feff8c9b9edee863d875966f5e5f3a5eb6ab06a
|
[
"MIT"
] | null | null | null |
rabbitgetapi/__init__.py
|
Sidon/get-rabbitmq-messages
|
8feff8c9b9edee863d875966f5e5f3a5eb6ab06a
|
[
"MIT"
] | null | null | null |
# Copyleft 2021 Sidon Duarte and contributors
__all__ = (
"__title__",
"__summary__",
"__uri__",
"__version__",
"__author__",
"__email__",
"__license__",
"__copyright__",
)
__copyright__ = "Copyright 2021 Sidon Duarte and individual contributors"
import importlib_metadata
metadata = importlib_metadata.metadata("rabbitgetapi-sdn")
__title__ = metadata["name"]
__summary__ = metadata["summary"]
__uri__ = metadata["home-page"]
__version__ = metadata["version"]
__author__ = metadata["author"]
__email__ = metadata["author-email"]
__license__ = metadata["license"]
| 21.5
| 73
| 0.722591
|
7952766da6086698fbd6c44b7d836aab65ab99c2
| 1,767
|
py
|
Python
|
Coloring/learning/ppo/utils/arguments.py
|
zarahz/MARL-and-Markets
|
3591a160e098e7251b9e7c7b59c6d0ab08ba0779
|
[
"MIT"
] | 1
|
2022-03-12T09:17:32.000Z
|
2022-03-12T09:17:32.000Z
|
Coloring/learning/ppo/utils/arguments.py
|
zarahz/MARL-and-Markets
|
3591a160e098e7251b9e7c7b59c6d0ab08ba0779
|
[
"MIT"
] | null | null | null |
Coloring/learning/ppo/utils/arguments.py
|
zarahz/MARL-and-Markets
|
3591a160e098e7251b9e7c7b59c6d0ab08ba0779
|
[
"MIT"
] | null | null | null |
import argparse
def get_train_args(parser):
'''
Add PPO relevant training arguments to the parser.
'''
# epochs range(3,30), wie oft anhand der experience gelernt wird
parser.add_argument("--epochs", type=int, default=4,
help="[PPO] Number of epochs for PPO optimization. (default: 4)")
# GAE = Generalized advantage estimator wird in verbindung mit dem advantage estimator berechnet
# Â von GAE(delta, lambda) zum zeitpunkt t = Summe (lambda*gamma)^l * delta zum zeitpunkt (t+l) ^ V
# range(0.9,1)
parser.add_argument("--gae-lambda", type=float, default=0.95,
help="[PPO] Lambda coefficient in GAE formula, used for calculation of the advantage values. (default: 0.95, 1 means no gae)")
# entropy coef -> c2 * S[pi von theta](state t)
# with S as "entropy Bonus"
# range(0, 0.01)
parser.add_argument("--entropy-coef", type=float, default=0.01,
help="[PPO] Entropy term coefficient. (default: 0.01)")
# value function coef -> c1 * Loss func von VF zum Zeitpunkt t
# with LVF in t = (Vtheta(state t) - Vt ^ targ)^2 => squared error loss
# range(0.5,1)
# nötig wenn parameter zwischen policy und value funct. geteilt werden
parser.add_argument("--value-loss-coef", type=float, default=0.5,
help="[PPO] Value loss term coefficient. (default: 0.5)")
parser.add_argument("--max-grad-norm", type=float, default=0.5,
help="[PPO] Maximum norm of gradient. (default: 0.5)")
# epsilon of clipping range(0.1,0.3)
parser.add_argument("--clip-eps", type=float, default=0.2,
help="[PPO] Clipping epsilon for PPO. (default: 0.2)")
return parser
| 49.083333
| 150
| 0.625354
|
1b7d6afe3285f1d3ba712656dfea009cec12fdd5
| 20,105
|
py
|
Python
|
Editor/EditorCode/UnitDialogs.py
|
Shahrose/lex-talionis
|
ef7e48124b36269f4212eb0e3a7747caf53bfadd
|
[
"MIT"
] | null | null | null |
Editor/EditorCode/UnitDialogs.py
|
Shahrose/lex-talionis
|
ef7e48124b36269f4212eb0e3a7747caf53bfadd
|
[
"MIT"
] | null | null | null |
Editor/EditorCode/UnitDialogs.py
|
Shahrose/lex-talionis
|
ef7e48124b36269f4212eb0e3a7747caf53bfadd
|
[
"MIT"
] | null | null | null |
import sys
from PyQt5.QtWidgets import QGridLayout, QDialog, QFormLayout, QLabel, QComboBox, \
QCheckBox, QLineEdit, QDialogButtonBox, QSpinBox, QPushButton
from PyQt5.QtCore import Qt, QSize
sys.path.append('../')
import Code.configuration as cf
import Code.Engine as Engine
# So that the code basically starts looking in the parent directory
Engine.engine_constants['home'] = '../'
import Code.GlobalConstants as GC
from . import DataImport
from EditorCode.DataImport import Data
from . import EditorUtilities
from EditorCode.CustomGUI import GenderBox
from EditorCode.multi_select_combo_box import MultiSelectComboBox
class HasModes(object):
def create_mode_combobox(self):
self.mode_box = MultiSelectComboBox()
mode_names = list(mode['name'] for mode in GC.DIFFICULTYDATA.values())
for mode_name in mode_names:
self.mode_box.addItem(mode_name)
self.mode_box.setCurrentTexts(mode_names)
self.form.addRow('Modes:', self.mode_box)
def populate_mode(self, unit):
self.mode_box.ResetSelection()
self.mode_box.setCurrentTexts(unit.mode)
def get_modes(self):
return self.mode_box.currentText()
class LoadUnitDialog(QDialog, HasModes):
def __init__(self, instruction, parent):
super(LoadUnitDialog, self).__init__(parent)
self.form = QFormLayout(self)
self.form.addRow(QLabel(instruction))
self.create_menus()
def create_menus(self):
# Team
self.team_box = QComboBox()
self.team_box.uniformItemSizes = True
for team in DataImport.teams:
self.team_box.addItem(team)
self.team_box.currentTextChanged.connect(self.team_changed)
self.form.addRow('Team:', self.team_box)
# Unit Select
self.unit_box = QComboBox()
self.unit_box.uniformItemSizes = True
self.unit_box.setIconSize(QSize(32, 32))
self.unit_data = list(Data.unit_data.values())
for idx, unit in enumerate(self.unit_data):
if unit.image:
self.unit_box.addItem(EditorUtilities.create_icon(unit.image), unit.id)
else:
self.unit_box.addItem(unit.id)
self.form.addRow(self.unit_box)
# Saved
self.saved_checkbox = QCheckBox('Load from last level?')
self.form.addRow(self.saved_checkbox)
# AI
self.ai_select = QComboBox()
self.ai_select.uniformItemSizes = True
for ai_name in GC.AIDATA:
self.ai_select.addItem(ai_name)
self.form.addRow('Select AI:', self.ai_select)
# AI Group
self.ai_group = QLineEdit()
self.form.addRow('AI Group:', self.ai_group)
self.create_mode_combobox()
self.ai_select.setEnabled(str(self.team_box.currentText()) != 'player')
self.ai_group.setEnabled(str(self.team_box.currentText()) != 'player')
self.buttonbox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel, Qt.Horizontal, self)
self.form.addRow(self.buttonbox)
self.buttonbox.accepted.connect(self.accept)
self.buttonbox.rejected.connect(self.reject)
def load(self, unit):
EditorUtilities.setComboBox(self.unit_box, unit.id)
EditorUtilities.setComboBox(self.team_box, unit.team)
self.saved_checkbox.setChecked(unit.saved)
EditorUtilities.setComboBox(self.ai_select, unit.ai)
if unit.ai_group:
self.ai_group.setText(str(unit.ai_group))
self.populate_mode(unit)
def team_changed(self, item):
self.saved_checkbox.setEnabled(str(item) == 'player')
self.ai_select.setEnabled(str(item) != 'player')
self.ai_group.setEnabled(str(item) != 'player')
def get_ai(self):
return str(self.ai_select.currentText()) if self.ai_select.isEnabled() else 'None'
def get_team(self):
return str(self.team_box.currentText())
@staticmethod
def getUnit(parent, title, instruction, current_unit=None):
dialog = LoadUnitDialog(instruction, parent)
if current_unit:
dialog.load(current_unit)
dialog.setWindowTitle(title)
result = dialog.exec_()
if result == QDialog.Accepted:
unit = list(Data.unit_data.values())[dialog.unit_box.currentIndex()]
unit.team = dialog.get_team()
unit.ai = dialog.get_ai()
unit.saved = bool(dialog.saved_checkbox.isChecked())
unit.ai_group = dialog.ai_group.text()
unit.mode = dialog.get_modes()
return unit, True
else:
return None, False
class ReinLoadUnitDialog(LoadUnitDialog):
def __init__(self, instruction, parent):
super(LoadUnitDialog, self).__init__(parent)
self.form = QFormLayout(self)
self.form.addRow(QLabel(instruction))
# Pack
self.pack = QLineEdit(parent.current_pack())
self.form.addRow('Group:', self.pack)
self.create_menus()
def load(self, unit):
EditorUtilities.setComboBox(self.unit_box, unit.id)
EditorUtilities.setComboBox(self.team_box, unit.team)
self.saved_checkbox.setChecked(unit.saved)
print(unit.ai)
EditorUtilities.setComboBox(self.ai_select, unit.ai)
if unit.ai_group:
self.ai_group.setText(str(unit.ai_group))
if unit.pack:
self.pack.setText(unit.pack)
self.populate_mode(unit)
@staticmethod
def getUnit(parent, title, instruction, current_unit=None):
dialog = ReinLoadUnitDialog(instruction, parent)
if current_unit:
dialog.load(current_unit)
dialog.setWindowTitle(title)
result = dialog.exec_()
if result == QDialog.Accepted:
unit = list(Data.unit_data.values())[dialog.unit_box.currentIndex()]
unit.team = dialog.get_team()
unit.ai = dialog.get_ai()
unit.saved = bool(dialog.saved_checkbox.isChecked())
unit.ai_group = str(dialog.ai_group.text())
unit.mode = dialog.get_modes()
unit.pack = str(dialog.pack.text())
same_pack = [rein for rein in parent.unit_data.reinforcements if rein.pack == unit.pack and
any(mode in unit.mode for mode in rein.mode)]
unit.event_id = EditorUtilities.next_available_event_id(same_pack, unit)
return unit, True
else:
return None, False
class CreateUnitDialog(QDialog, HasModes):
def __init__(self, instruction, unit_data, parent):
super(CreateUnitDialog, self).__init__(parent)
self.form = QFormLayout(self)
self.form.addRow(QLabel(instruction))
self.unit_data = unit_data
self.create_menus()
def create_menus(self):
# Team
self.team_box = QComboBox()
self.team_box.uniformItemSizes = True
for team in DataImport.teams:
self.team_box.addItem(team)
self.team_box.activated.connect(self.team_changed)
self.form.addRow('Team:', self.team_box)
# Class
self.class_box = QComboBox()
self.class_box.uniformItemSizes = True
self.class_box.setIconSize(QSize(48, 32))
for klass in Data.class_data.values():
self.class_box.addItem(EditorUtilities.create_icon(klass.get_image('player', 0)), klass.name)
self.form.addRow('Class:', self.class_box)
# Level
self.level = QSpinBox()
self.level.setMinimum(1)
self.form.addRow('Level:', self.level)
# Gender
self.gender = GenderBox(self)
self.form.addRow('Gender:', self.gender)
# Items
item_grid = self.set_up_items()
self.form.addRow('Items: ', item_grid)
# AI
self.ai_select = QComboBox()
self.ai_select.uniformItemSizes = True
for ai_name in GC.AIDATA:
self.ai_select.addItem(ai_name)
self.form.addRow('AI:', self.ai_select)
# AI Group
self.ai_group = QLineEdit()
self.form.addRow('AI Group:', self.ai_group)
self.ai_select.setEnabled(str(self.team_box.currentText()) != 'player')
self.ai_group.setEnabled(str(self.team_box.currentText()) != 'player')
# Faction
self.faction_select = QComboBox()
self.faction_select.uniformItemSizes = True
self.faction_select.setIconSize(QSize(32, 32))
for faction_name, faction in self.unit_data.factions.items():
image = GC.UNITDICT.get(faction.faction_icon + 'Emblem')
if image:
self.faction_select.addItem(EditorUtilities.create_icon(image.convert_alpha()), faction.faction_id)
else:
self.faction_select.addItem(faction.faction_id)
self.form.addRow('Faction:', self.faction_select)
self.create_mode_combobox()
self.buttonbox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel, Qt.Horizontal, self)
self.form.addRow(self.buttonbox)
self.buttonbox.accepted.connect(self.accept)
self.buttonbox.rejected.connect(self.reject)
def load(self, unit):
EditorUtilities.setComboBox(self.faction_select, unit.faction)
self.level.setValue(unit.level)
self.gender.setValue(unit.gender)
EditorUtilities.setComboBox(self.team_box, unit.team)
EditorUtilities.setComboBox(self.class_box, unit.klass)
EditorUtilities.setComboBox(self.ai_select, unit.ai)
if unit.ai_group:
self.ai_group.setText(str(unit.ai_group))
# === Items ===
self.clear_item_box()
for index, item in enumerate(unit.items):
self.add_item_box()
item_box, drop_box, event_box = self.item_boxes[index]
drop_box.setChecked(item.droppable)
event_box.setChecked(item.event_combat)
item_box.setCurrentIndex(list(Data.item_data.keys()).index(item.id))
# === Mode ===
self.populate_mode(unit)
self.team_changed(0)
self.gender_changed(unit.gender)
def set_up_items(self):
item_grid = QGridLayout()
self.add_item_button = QPushButton('Add Item')
self.add_item_button.clicked.connect(self.add_item_box)
self.remove_item_button = QPushButton('Remove Item')
self.remove_item_button.clicked.connect(self.remove_item_box)
self.item_boxes = []
for num in range(cf.CONSTANTS['max_items']):
self.item_boxes.append((self.create_item_combo_box(), QCheckBox(), QCheckBox()))
for index, item in enumerate(self.item_boxes):
item_box, drop, event = item
item_grid.addWidget(item_box, index + 1, 0, 1, 2, Qt.AlignTop)
item_grid.addWidget(drop, index + 1, 2, Qt.AlignTop)
item_grid.addWidget(event, index + 1, 3, Qt.AlignTop)
item_grid.addWidget(QLabel('Name:'), 0, 0, 1, 2, Qt.AlignTop)
item_grid.addWidget(QLabel('Drop?'), 0, 2, Qt.AlignTop)
item_grid.addWidget(QLabel('Event?'), 0, 3, Qt.AlignTop)
item_grid.addWidget(self.add_item_button, cf.CONSTANTS['max_items'] + 2, 0, 1, 2, Qt.AlignBottom)
item_grid.addWidget(self.remove_item_button, cf.CONSTANTS['max_items'] + 2, 2, 1, 2, Qt.AlignBottom)
self.clear_item_box()
return item_grid
# Item functions
def clear_item_box(self):
for index, (item_box, drop, event) in enumerate(self.item_boxes):
item_box.hide(); drop.hide(); event.hide()
self.num_items = 0
self.remove_item_button.setEnabled(False)
def add_item_box(self):
self.num_items += 1
self.remove_item_button.setEnabled(True)
item_box, drop, event = self.item_boxes[self.num_items - 1]
item_box.show(); drop.show(); event.show()
if self.num_items >= cf.CONSTANTS['max_items']:
self.add_item_button.setEnabled(False)
def remove_item_box(self):
self.num_items -= 1
self.add_item_button.setEnabled(True)
item_box, drop, event = self.item_boxes[self.num_items]
item_box.hide(); drop.hide(); event.hide()
if self.num_items <= 0:
self.remove_item_button.setEnabled(False)
def create_item_combo_box(self):
item_box = QComboBox()
item_box.uniformItemSizes = True
item_box.setIconSize(QSize(16, 16))
for item in Data.item_data.values():
if item.image:
item_box.addItem(EditorUtilities.create_icon(item.image), item.name)
else:
item_box.addItem(item.name)
return item_box
def getItems(self):
items = []
for index, (item_box, drop_box, event_box) in enumerate(self.item_boxes[:self.num_items]):
item = list(Data.item_data.values())[item_box.currentIndex()]
item.droppable = drop_box.isChecked()
item.event_combat = event_box.isChecked()
items.append(item)
return items
def team_changed(self, idx):
# Change class box to use sprites of that team
# And also turn off AI
team = str(self.team_box.currentText())
print("Team changed to %s" % team)
self.ai_select.setEnabled(team != 'player')
self.ai_group.setEnabled(team != 'player')
for idx, klass in enumerate(Data.class_data.values()):
gender = int(self.gender.value())
self.class_box.setItemIcon(idx, EditorUtilities.create_icon(klass.get_image(team, gender)))
def gender_changed(self, item):
gender = int(item)
print("Gender changed to %s" % gender)
for idx, klass in enumerate(Data.class_data.values()):
team = str(self.team_box.currentText())
self.class_box.setItemIcon(idx, EditorUtilities.create_icon(klass.get_image(team, gender)))
def get_ai(self):
return str(self.ai_select.currentText()) if self.ai_select.isEnabled() else 'None'
def create_unit(self, current_unit=None):
info = {}
info['faction'] = str(self.faction_select.currentText())
faction = self.unit_data.factions[info['faction']]
if faction:
info['name'] = faction.unit_name
info['faction_icon'] = faction.faction_icon
info['desc'] = faction.desc
info['level'] = int(self.level.value())
info['gender'] = int(self.gender.value())
info['klass'] = str(self.class_box.currentText())
info['items'] = self.getItems()
info['ai'] = self.get_ai()
info['ai_group'] = str(self.ai_group.text())
info['team'] = str(self.team_box.currentText())
info['generic'] = True
info['mode'] = self.get_modes()
created_unit = DataImport.Unit(info)
return created_unit
def modify_current_unit(self, unit):
unit.faction = str(self.faction_select.currentText())
faction = self.unit_data.factions[unit.faction]
if faction:
unit.name = faction.unit_name
unit.faction_icon = faction.faction_icon
unit.desc = faction.desc
unit.level = int(self.level.value())
unit.gender = int(self.gender.value())
unit.klass = str(self.class_box.currentText())
unit.items = self.getItems()
unit.ai = self.get_ai()
unit.ai_group = str(self.ai_group.text())
unit.team = str(self.team_box.currentText())
unit.generic = True
unit.mode = self.get_modes()
return unit
@classmethod
def getUnit(cls, parent, title, instruction, current_unit=None):
dialog = cls(instruction, parent.unit_data, parent)
if current_unit:
dialog.load(current_unit)
dialog.setWindowTitle(title)
result = dialog.exec_()
if result == QDialog.Accepted:
unit = dialog.create_unit(current_unit)
return unit, True
else:
return None, False
class ReinCreateUnitDialog(CreateUnitDialog):
def __init__(self, instruction, unit_data, parent):
super(CreateUnitDialog, self).__init__(parent)
self.form = QFormLayout(self)
self.form.addRow(QLabel(instruction))
self.unit_data = unit_data
# Pack
self.pack = QLineEdit(parent.current_pack())
self.form.addRow('Pack:', self.pack)
self.create_menus()
def load(self, unit):
EditorUtilities.setComboBox(self.faction_select, unit.faction)
self.level.setValue(unit.level)
self.gender.setValue(unit.gender)
EditorUtilities.setComboBox(self.team_box, unit.team)
EditorUtilities.setComboBox(self.class_box, unit.klass)
EditorUtilities.setComboBox(self.ai_select, unit.ai)
if unit.ai_group:
self.ai_group.setText(str(unit.ai_group))
if unit.pack:
self.pack.setText(unit.pack)
# === Items ===
self.clear_item_box()
for index, item in enumerate(unit.items):
self.add_item_box()
item_box, drop_box, event_box = self.item_boxes[index]
drop_box.setChecked(item.droppable)
event_box.setChecked(item.event_combat)
item_box.setCurrentIndex(list(Data.item_data.keys()).index(item.id))
self.populate_mode(unit)
self.team_changed(0)
self.gender_changed(unit.gender)
def create_unit(self, current_unit=None):
info = {}
info['faction'] = str(self.faction_select.currentText())
faction = self.unit_data.factions[info['faction']]
if faction:
info['name'] = faction.unit_name
info['faction_icon'] = faction.faction_icon
info['desc'] = faction.desc
info['level'] = int(self.level.value())
info['gender'] = int(self.gender.value())
info['klass'] = str(self.class_box.currentText())
info['items'] = self.getItems()
info['ai'] = self.get_ai()
info['ai_group'] = str(self.ai_group.text())
info['mode'] = self.get_modes()
info['pack'] = str(self.pack.text())
pack_mates = [rein for rein in self.unit_data.reinforcements if rein.pack == info['pack'] and
any(mode in info['mode'] for mode in rein.mode)]
info['event_id'] = EditorUtilities.next_available_event_id(pack_mates, current_unit)
info['team'] = str(self.team_box.currentText())
info['generic'] = True
created_unit = DataImport.Unit(info)
return created_unit
def modify_current_unit(self, unit):
unit.faction = str(self.faction_select.currentText())
faction = self.unit_data.factions[unit.faction]
if faction:
unit.name = faction.unit_name
unit.faction_icon = faction.faction_icon
unit.desc = faction.desc
unit.level = int(self.level.value())
unit.gender = int(self.gender.value())
unit.klass = str(self.class_box.currentText())
unit.items = self.getItems()
unit.ai = self.get_ai()
unit.ai_group = str(self.ai_group.text())
unit.mode = self.get_modes()
unit.pack = str(self.pack.text())
pack_mates = [rein for rein in self.unit_data.reinforcements if rein.pack == unit.pack and
any(mode in unit.mode for mode in rein.mode)]
unit.event_id = EditorUtilities.next_available_event_id(pack_mates, unit)
unit.team = str(self.team_box.currentText())
unit.generic = True
return unit
| 40.780933
| 116
| 0.622134
|
98286ecf42abd11446bc5c8f1c94f7b049243681
| 3,012
|
py
|
Python
|
generate_data.py
|
EliteAi-grad/SVM-SMO
|
9974d4678e2db2c3b13c359e265fc61ed2c786d9
|
[
"Apache-2.0"
] | null | null | null |
generate_data.py
|
EliteAi-grad/SVM-SMO
|
9974d4678e2db2c3b13c359e265fc61ed2c786d9
|
[
"Apache-2.0"
] | null | null | null |
generate_data.py
|
EliteAi-grad/SVM-SMO
|
9974d4678e2db2c3b13c359e265fc61ed2c786d9
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
# Copyright 2021 Google Inc. All Rights Reserved.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#==============================================================================
#
class Data(object):
#Dataset1: Generating linearly seperable dataset
def generate_linearlydataset_linear(self,seed=1):
np.random.seed(seed)
mean1 = np.array([0,3])
mean2 = np.array([3,0])
return mean1, mean2
def generate_helperdataset(self,mean1, cov, mean2):
X1 = np.random.multivariate_normal(mean1, cov, 100)
y1 = np.ones(len(X1))
X2 = np.random.multivariate_normal(mean2, cov, 100)
y2 = np.ones(len(X2)) * -1
return X1, y1, X2, y2
def generate_linearly_separable_data(self,seed=1):
mean1, mean2 = self.generate_linearlydataset_linear()
cov = np.array([[0.4, 0.7], [0.7, 0.4]])
return self.generate_helperdataset(mean1,cov,mean2)
def gen_non_lin_separable_data(self,seed=1):
np.random.seed(seed)
mean1 = [-5, 7]
mean2 = [7, -5]
mean3 = [11, -9]
mean4 = [-9, 11]
cov = [[2.1, 0.9], [0.9, 2.1]]
X1 = np.random.multivariate_normal(mean1, cov, 50)
X1 = np.vstack((X1, np.random.multivariate_normal(mean3, cov, 50)))
y1 = np.ones(len(X1))
X2 = np.random.multivariate_normal(mean2, cov, 50)
X2 = np.vstack((X2, np.random.multivariate_normal(mean4, cov, 50)))
y2 = np.ones(len(X2)) * -1
return X1, y1, X2, y2
def gen_lin_separable_overlap_data(self,seed=1):
np.random.seed(seed)
mean1 = np.array([-3, 7])
mean2 = np.array([7, -3])
cov = np.array([[3.5, 2.7], [2.7, 3.5]])
return self.generate_helperdataset(mean1,cov,mean2)
def split_data(X1, y1, X2, y2,percent):
dataset_size = len(X1)
threshold = int(dataset_size*percent);
# Training data: binary classifier X1, X2
X1_train = X1[:threshold]
y1_train = y1[:threshold]
X2_train = X2[:threshold]
y2_train = y2[:threshold]
#stack datasets
X_train = np.vstack((X1_train, X2_train))
y_train = np.hstack((y1_train, y2_train))
# Test data:
X1_test = X1[threshold:]
y1_test = y1[threshold:]
X2_test = X2[threshold:]
y2_test = y2[threshold:]
X_test = np.vstack((X1_test, X2_test))
y_test = np.hstack((y1_test, y2_test))
return X_train, y_train, X_test, y_test
| 35.857143
| 79
| 0.608566
|
e2fdde71ebd00c8776525d585ae7adb57473fd1d
| 1,334
|
py
|
Python
|
Intermediate/regex.py
|
Fernal73/LearnPython3
|
5288017c0dbf95633b84f1e6324f00dec6982d36
|
[
"MIT"
] | 1
|
2021-12-17T11:03:13.000Z
|
2021-12-17T11:03:13.000Z
|
Intermediate/regex.py
|
Fernal73/LearnPython3
|
5288017c0dbf95633b84f1e6324f00dec6982d36
|
[
"MIT"
] | 1
|
2020-02-05T00:14:43.000Z
|
2020-02-06T09:22:49.000Z
|
Intermediate/regex.py
|
Fernal73/LearnPython3
|
5288017c0dbf95633b84f1e6324f00dec6982d36
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
valid_countries = ["US", "IT", "FR"]
def extract_price(description, country):
if country not in valid_countries:
return
if country == "US":
pattern = re.compile(r'\$(\d+\.\d+)')
match = pattern.search(description)
if not match:
return
try:
return float(match.groups()[0])
except ValueError:
return
if country == "IT":
pattern = re.compile(r'€(\d+[,]\d+)')
match = pattern.search(description)
if not match:
return
try:
return float(match.groups()[0].replace(",","."))
except ValueError:
return
if country == "FR":
pattern = re.compile(r'€(\d+[,]\d+)|(\d+€\d+)')
match = pattern.search(description)
if not match:
return
try:
val = [gr for gr in match.groups() if gr != None]
val = val[0].replace(",",".")
val = val.replace("€",".")
return float(val)
except ValueError:
return
usdprice = "$24.99"
itprice = "€24,99"
frprice = "24€99"
print(extract_price(usdprice,"US"))
print(extract_price(itprice,"IT"))
print(extract_price(itprice,"FR"))
print(extract_price(frprice,"FR"))
| 22.610169
| 61
| 0.51949
|
76f29ccb0ef9a81ea291d9a21fd9bca99f856caf
| 28,306
|
py
|
Python
|
exchangelib/autodiscover/discovery.py
|
RossK1/exchangelib
|
5550c2fbcc064943e3b4e150f74a724e0bd0a9f3
|
[
"BSD-2-Clause"
] | 1,006
|
2016-07-18T16:42:55.000Z
|
2022-03-31T10:43:50.000Z
|
exchangelib/autodiscover/discovery.py
|
RossK1/exchangelib
|
5550c2fbcc064943e3b4e150f74a724e0bd0a9f3
|
[
"BSD-2-Clause"
] | 966
|
2016-05-13T18:55:43.000Z
|
2022-03-31T15:24:56.000Z
|
exchangelib/autodiscover/discovery.py
|
RossK1/exchangelib
|
5550c2fbcc064943e3b4e150f74a724e0bd0a9f3
|
[
"BSD-2-Clause"
] | 272
|
2016-04-05T02:17:10.000Z
|
2022-03-24T08:15:57.000Z
|
import logging
import time
from urllib.parse import urlparse
import dns.resolver
from cached_property import threaded_cached_property
from .cache import autodiscover_cache
from .properties import Autodiscover
from .protocol import AutodiscoverProtocol
from ..configuration import Configuration
from ..credentials import OAuth2Credentials
from ..errors import AutoDiscoverFailed, AutoDiscoverCircularRedirect, TransportError, RedirectError, UnauthorizedError
from ..protocol import Protocol, FailFast
from ..transport import get_auth_method_from_response, DEFAULT_HEADERS, NOAUTH, OAUTH2, GSSAPI, AUTH_TYPE_MAP, \
CREDENTIALS_REQUIRED
from ..util import post_ratelimited, get_domain, get_redirect_url, _back_off_if_needed, \
DummyResponse, CONNECTION_ERRORS, TLS_ERRORS
from ..version import Version
log = logging.getLogger(__name__)
def discover(email, credentials=None, auth_type=None, retry_policy=None):
return Autodiscovery(
email=email, credentials=credentials, auth_type=auth_type, retry_policy=retry_policy
).discover()
class SrvRecord:
"""A container for autodiscover-related SRV records in DNS."""
def __init__(self, priority, weight, port, srv):
self.priority = priority
self.weight = weight
self.port = port
self.srv = srv
def __eq__(self, other):
for k in self.__dict__:
if getattr(self, k) != getattr(other, k):
return False
return True
class Autodiscovery:
"""Autodiscover is a Microsoft protocol for automatically getting the endpoint of the Exchange server and other
connection-related settings holding the email address using only the email address, and username and password of the
user.
For a description of the protocol implemented, see "Autodiscover for Exchange ActiveSync developers":
https://docs.microsoft.com/en-us/previous-versions/office/developer/exchange-server-interoperability-guidance/hh352638%28v%3dexchg.140%29
Descriptions of the steps from the article are provided in their respective methods in this class.
For a description of how to handle autodiscover error messages, see:
https://docs.microsoft.com/en-us/exchange/client-developer/exchange-web-services/handling-autodiscover-error-messages
A tip from the article:
The client can perform steps 1 through 4 in any order or in parallel to expedite the process, but it must wait for
responses to finish at each step before proceeding. Given that many organizations prefer to use the URL in step 2 to
set up the Autodiscover service, the client might try this step first.
Another possibly newer resource which has not yet been attempted is "Outlook 2016 Implementation of Autodiscover":
https://support.microsoft.com/en-us/help/3211279/outlook-2016-implementation-of-autodiscover
WARNING: The autodiscover protocol is very complicated. If you have problems autodiscovering using this
implementation, start by doing an official test at https://testconnectivity.microsoft.com
"""
# When connecting to servers that may not be serving the correct endpoint, we should use a retry policy that does
# not leave us hanging for a long time on each step in the protocol.
INITIAL_RETRY_POLICY = FailFast()
RETRY_WAIT = 10 # Seconds to wait before retry on connection errors
MAX_REDIRECTS = 10 # Maximum number of URL redirects before we give up
DNS_RESOLVER_KWARGS = {}
DNS_RESOLVER_ATTRS = {
'timeout': AutodiscoverProtocol.TIMEOUT,
}
def __init__(self, email, credentials=None, auth_type=None, retry_policy=None):
"""
:param email: The email address to autodiscover
:param credentials: Credentials with authorization to make autodiscover lookups for this Account
(Default value = None)
:param auth_type: (Default value = None)
:param retry_policy: (Default value = None)
"""
self.email = email
self.credentials = credentials
self.auth_type = auth_type # The auth type that the resulting protocol instance should have
self.retry_policy = retry_policy # The retry policy that the resulting protocol instance should have
self._urls_visited = [] # Collects HTTP and Autodiscover redirects
self._redirect_count = 0
self._emails_visited = [] # Collects Autodiscover email redirects
def discover(self):
self._emails_visited.append(self.email.lower())
# Check the autodiscover cache to see if we already know the autodiscover service endpoint for this email
# domain. Use a lock to guard against multiple threads competing to cache information.
log.debug('Waiting for autodiscover_cache lock')
with autodiscover_cache:
log.debug('autodiscover_cache lock acquired')
cache_key = self._cache_key
domain = get_domain(self.email)
if cache_key in autodiscover_cache:
ad_protocol = autodiscover_cache[cache_key]
log.debug('Cache hit for key %s: %s', cache_key, ad_protocol.service_endpoint)
try:
ad_response = self._quick(protocol=ad_protocol)
except AutoDiscoverFailed:
# Autodiscover no longer works with this domain. Clear cache and try again after releasing the lock
log.debug('AD request failure. Removing cache for key %s', cache_key)
del autodiscover_cache[cache_key]
ad_response = self._step_1(hostname=domain)
else:
# This will cache the result
ad_response = self._step_1(hostname=domain)
log.debug('Released autodiscover_cache_lock')
if ad_response.redirect_address:
log.debug('Got a redirect address: %s', ad_response.redirect_address)
if ad_response.redirect_address.lower() in self._emails_visited:
raise AutoDiscoverCircularRedirect('We were redirected to an email address we have already seen')
# Start over, but with the new email address
self.email = ad_response.redirect_address
return self.discover()
# We successfully received a response. Clear the cache of seen emails etc.
self.clear()
return self._build_response(ad_response=ad_response)
def clear(self):
# This resets cached variables
self._urls_visited = []
self._redirect_count = 0
self._emails_visited = []
@property
def _cache_key(self):
# We may be using multiple different credentials and changing our minds on TLS verification. This key
# combination should be safe for caching.
domain = get_domain(self.email)
return domain, self.credentials
@threaded_cached_property
def resolver(self):
resolver = dns.resolver.Resolver(**self.DNS_RESOLVER_KWARGS)
for k, v in self.DNS_RESOLVER_ATTRS.items():
setattr(resolver, k, v)
return resolver
def _build_response(self, ad_response):
ews_url = ad_response.ews_url
if not ews_url:
raise AutoDiscoverFailed("Response is missing an 'ews_url' value")
if not ad_response.autodiscover_smtp_address:
# Autodiscover does not always return an email address. In that case, the requesting email should be used
ad_response.user.autodiscover_smtp_address = self.email
# Get the server version. Not all protocol entries have a server version so we cheat a bit and also look at the
# other ones that point to the same endpoint.
for protocol in ad_response.account.protocols:
if not protocol.ews_url or not protocol.server_version:
continue
if protocol.ews_url.lower() == ews_url.lower():
version = Version(build=protocol.server_version)
break
else:
version = None
# We may not want to use the auth_package hints in the AD response. It could be incorrect and we can just guess.
protocol = Protocol(
config=Configuration(
service_endpoint=ews_url,
credentials=self.credentials,
version=version,
auth_type=self.auth_type,
retry_policy=self.retry_policy,
)
)
return ad_response, protocol
def _quick(self, protocol):
# Reset auth type and retry policy if we requested non-default values
if self.auth_type:
protocol.config.auth_type = self.auth_type
if self.retry_policy:
protocol.config.retry_policy = self.retry_policy
try:
r = self._get_authenticated_response(protocol=protocol)
except TransportError as e:
raise AutoDiscoverFailed('Response error: %s' % e)
if r.status_code == 200:
try:
ad = Autodiscover.from_bytes(bytes_content=r.content)
return self._step_5(ad=ad)
except ValueError as e:
raise AutoDiscoverFailed('Invalid response: %s' % e)
raise AutoDiscoverFailed('Invalid response code: %s' % r.status_code)
def _redirect_url_is_valid(self, url):
"""Three separate responses can be “Redirect responses”:
* An HTTP status code (301, 302) with a new URL
* An HTTP status code of 200, but with a payload XML containing a redirect to a different URL
* An HTTP status code of 200, but with a payload XML containing a different SMTP address as the target address
We only handle the HTTP 302 redirects here. We validate the URL received in the redirect response to ensure that
it does not redirect to non-SSL endpoints or SSL endpoints with invalid certificates, and that the redirect is
not circular. Finally, we should fail after 10 redirects.
:param url:
:return:
"""
if url.lower() in self._urls_visited:
log.warning('We have already tried this URL: %s', url)
return False
if self._redirect_count >= self.MAX_REDIRECTS:
log.warning('We reached max redirects at URL: %s', url)
return False
# We require TLS endpoints
if not url.startswith('https://'):
log.debug('Invalid scheme for URL: %s', url)
return False
# Quick test that the endpoint responds and that TLS handshake is OK
try:
self._get_unauthenticated_response(url, method='head')
except TransportError as e:
log.debug('Response error on redirect URL %s: %s', url, e)
return False
self._redirect_count += 1
return True
def _get_unauthenticated_response(self, url, method='post'):
"""Get auth type by tasting headers from the server. Do POST requests be default. HEAD is too error prone, and
some servers are set up to redirect to OWA on all requests except POST to the autodiscover endpoint.
:param url:
:param method: (Default value = 'post')
:return:
"""
# We are connecting to untrusted servers here, so take necessary precautions.
hostname = urlparse(url).netloc
if not self._is_valid_hostname(hostname):
# 'requests' is really bad at reporting that a hostname cannot be resolved. Let's check this separately.
# Don't retry on DNS errors. They will most likely be persistent.
raise TransportError('%r has no DNS entry' % hostname)
kwargs = dict(
url=url, headers=DEFAULT_HEADERS.copy(), allow_redirects=False, timeout=AutodiscoverProtocol.TIMEOUT
)
if method == 'post':
kwargs['data'] = Autodiscover.payload(email=self.email)
retry = 0
t_start = time.monotonic()
while True:
_back_off_if_needed(self.INITIAL_RETRY_POLICY.back_off_until)
log.debug('Trying to get response from %s', url)
with AutodiscoverProtocol.raw_session(url) as s:
try:
r = getattr(s, method)(**kwargs)
r.close() # Release memory
break
except TLS_ERRORS as e:
# Don't retry on TLS errors. They will most likely be persistent.
raise TransportError(str(e))
except CONNECTION_ERRORS as e:
r = DummyResponse(url=url, headers={}, request_headers=kwargs['headers'])
total_wait = time.monotonic() - t_start
if self.INITIAL_RETRY_POLICY.may_retry_on_error(response=r, wait=total_wait):
log.debug("Connection error on URL %s (retry %s, error: %s). Cool down", url, retry, e)
# Don't respect the 'Retry-After' header. We don't know if this is a useful endpoint, and we
# want autodiscover to be reasonably fast.
self.INITIAL_RETRY_POLICY.back_off(self.RETRY_WAIT)
retry += 1
continue
log.debug("Connection error on URL %s: %s", url, e)
raise TransportError(str(e))
try:
auth_type = get_auth_method_from_response(response=r)
except UnauthorizedError:
# Failed to guess the auth type
auth_type = NOAUTH
if r.status_code in (301, 302) and 'location' in r.headers:
# Make the redirect URL absolute
try:
r.headers['location'] = get_redirect_url(r)
except TransportError:
del r.headers['location']
return auth_type, r
def _get_authenticated_response(self, protocol):
"""Get a response by using the credentials provided. We guess the auth type along the way.
:param protocol:
:return:
"""
# Redo the request with the correct auth
data = Autodiscover.payload(email=self.email)
headers = DEFAULT_HEADERS.copy()
session = protocol.get_session()
if GSSAPI in AUTH_TYPE_MAP and isinstance(session.auth, AUTH_TYPE_MAP[GSSAPI]):
# https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/pox-autodiscover-request-for-exchange
headers['X-ClientCanHandle'] = 'Negotiate'
try:
r, session = post_ratelimited(protocol=protocol, session=session, url=protocol.service_endpoint,
headers=headers, data=data, allow_redirects=False, stream=False)
protocol.release_session(session)
except UnauthorizedError as e:
# It's entirely possible for the endpoint to ask for login. We should continue if login fails because this
# isn't necessarily the right endpoint to use.
raise TransportError(str(e))
except RedirectError as e:
r = DummyResponse(url=protocol.service_endpoint, headers={'location': e.url}, request_headers=None,
status_code=302)
return r
def _attempt_response(self, url):
"""Return an (is_valid_response, response) tuple.
:param url:
:return:
"""
self._urls_visited.append(url.lower())
log.debug('Attempting to get a valid response from %s', url)
try:
auth_type, r = self._get_unauthenticated_response(url=url)
if isinstance(self.credentials, OAuth2Credentials):
# This type of credentials *must* use the OAuth auth type
auth_type = OAUTH2
elif self.credentials is None and auth_type in CREDENTIALS_REQUIRED:
raise ValueError('Auth type %r was detected but no credentials were provided' % auth_type)
ad_protocol = AutodiscoverProtocol(
config=Configuration(
service_endpoint=url,
credentials=self.credentials,
auth_type=auth_type,
retry_policy=self.INITIAL_RETRY_POLICY,
)
)
if auth_type != NOAUTH:
r = self._get_authenticated_response(protocol=ad_protocol)
except TransportError as e:
log.debug('Failed to get a response: %s', e)
return False, None
if r.status_code in (301, 302) and 'location' in r.headers:
redirect_url = get_redirect_url(r)
if self._redirect_url_is_valid(url=redirect_url):
# The protocol does not specify this explicitly, but by looking at how testconnectivity.microsoft.com
# works, it seems that we should follow this URL now and try to get a valid response.
return self._attempt_response(url=redirect_url)
if r.status_code == 200:
try:
ad = Autodiscover.from_bytes(bytes_content=r.content)
# We got a valid response. Unless this is a URL redirect response, we cache the result
if ad.response is None or not ad.response.redirect_url:
cache_key = self._cache_key
log.debug('Adding cache entry for key %s: %s', cache_key, ad_protocol.service_endpoint)
autodiscover_cache[cache_key] = ad_protocol
return True, ad
except ValueError as e:
log.debug('Invalid response: %s', e)
return False, None
def _is_valid_hostname(self, hostname):
log.debug('Checking if %s can be looked up in DNS', hostname)
try:
self.resolver.resolve(hostname)
except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
return False
return True
def _get_srv_records(self, hostname):
"""Send a DNS query for SRV entries for the hostname.
An SRV entry that has been formatted for autodiscovery will have the following format:
canonical name = mail.example.com.
service = 8 100 443 webmail.example.com.
The first three numbers in the service line are: priority, weight, port
:param hostname:
:return:
"""
log.debug('Attempting to get SRV records for %s', hostname)
records = []
try:
answers = self.resolver.resolve('%s.' % hostname, 'SRV')
except (dns.resolver.NoNameservers, dns.resolver.NoAnswer, dns.resolver.NXDOMAIN) as e:
log.debug('DNS lookup failure: %s', e)
return records
for rdata in answers:
try:
vals = rdata.to_text().strip().rstrip('.').split(' ')
# Raise ValueError if the first three are not ints, and IndexError if there are less than 4 values
priority, weight, port, srv = int(vals[0]), int(vals[1]), int(vals[2]), vals[3]
record = SrvRecord(priority=priority, weight=weight, port=port, srv=srv)
log.debug('Found SRV record %s ', record)
records.append(record)
except (ValueError, IndexError):
log.debug('Incompatible SRV record for %s (%s)', hostname, rdata.to_text())
return records
def _step_1(self, hostname):
"""Perform step 1, where the client sends an Autodiscover request to
https://example.com/autodiscover/autodiscover.xml and then does one of the following:
* If the Autodiscover attempt succeeds, the client proceeds to step 5.
* If the Autodiscover attempt fails, the client proceeds to step 2.
:param hostname:
:return:
"""
url = 'https://%s/Autodiscover/Autodiscover.xml' % hostname
log.info('Step 1: Trying autodiscover on %r with email %r', url, self.email)
is_valid_response, ad = self._attempt_response(url=url)
if is_valid_response:
return self._step_5(ad=ad)
return self._step_2(hostname=hostname)
def _step_2(self, hostname):
"""Perform step 2, where the client sends an Autodiscover request to
https://autodiscover.example.com/autodiscover/autodiscover.xml and then does one of the following:
* If the Autodiscover attempt succeeds, the client proceeds to step 5.
* If the Autodiscover attempt fails, the client proceeds to step 3.
:param hostname:
:return:
"""
url = 'https://autodiscover.%s/Autodiscover/Autodiscover.xml' % hostname
log.info('Step 2: Trying autodiscover on %r with email %r', url, self.email)
is_valid_response, ad = self._attempt_response(url=url)
if is_valid_response:
return self._step_5(ad=ad)
return self._step_3(hostname=hostname)
def _step_3(self, hostname):
"""Perform step 3, where the client sends an unauth'ed GET method request to
http://autodiscover.example.com/autodiscover/autodiscover.xml (Note that this is a non-HTTPS endpoint). The
client then does one of the following:
* If the GET request returns a 302 redirect response, it gets the redirection URL from the 'Location' HTTP
header and validates it as described in the "Redirect responses" section. The client then does one of the
following:
* If the redirection URL is valid, the client tries the URL and then does one of the following:
* If the attempt succeeds, the client proceeds to step 5.
* If the attempt fails, the client proceeds to step 4.
* If the redirection URL is not valid, the client proceeds to step 4.
* If the GET request does not return a 302 redirect response, the client proceeds to step 4.
:param hostname:
:return:
"""
url = 'http://autodiscover.%s/Autodiscover/Autodiscover.xml' % hostname
log.info('Step 3: Trying autodiscover on %r with email %r', url, self.email)
try:
_, r = self._get_unauthenticated_response(url=url, method='get')
except TransportError:
r = DummyResponse(url=url, headers={}, request_headers={})
if r.status_code in (301, 302) and 'location' in r.headers:
redirect_url = get_redirect_url(r)
if self._redirect_url_is_valid(url=redirect_url):
is_valid_response, ad = self._attempt_response(url=redirect_url)
if is_valid_response:
return self._step_5(ad=ad)
return self._step_4(hostname=hostname)
return self._step_4(hostname=hostname)
return self._step_4(hostname=hostname)
def _step_4(self, hostname):
"""Perform step 4, where the client performs a Domain Name System (DNS) query for an SRV record for
_autodiscover._tcp.example.com. The query might return multiple records. The client selects only records that
point to an SSL endpoint and that have the highest priority and weight. One of the following actions then
occurs:
* If no such records are returned, the client proceeds to step 6.
* If records are returned, the application randomly chooses a record in the list and validates the endpoint
that it points to by following the process described in the "Redirect Response" section. The client then
does one of the following:
* If the redirection URL is valid, the client tries the URL and then does one of the following:
* If the attempt succeeds, the client proceeds to step 5.
* If the attempt fails, the client proceeds to step 6.
* If the redirection URL is not valid, the client proceeds to step 6.
:param hostname:
:return:
"""
dns_hostname = '_autodiscover._tcp.%s' % hostname
log.info('Step 4: Trying autodiscover on %r with email %r', dns_hostname, self.email)
srv_records = self._get_srv_records(dns_hostname)
try:
srv_host = _select_srv_host(srv_records)
except ValueError:
srv_host = None
if not srv_host:
return self._step_6()
redirect_url = 'https://%s/Autodiscover/Autodiscover.xml' % srv_host
if self._redirect_url_is_valid(url=redirect_url):
is_valid_response, ad = self._attempt_response(url=redirect_url)
if is_valid_response:
return self._step_5(ad=ad)
return self._step_6()
return self._step_6()
def _step_5(self, ad):
"""Perform step 5. When a valid Autodiscover request succeeds, the following sequence occurs:
* If the server responds with an HTTP 302 redirect, the client validates the redirection URL according to
the process defined in the "Redirect responses" and then does one of the following:
* If the redirection URL is valid, the client tries the URL and then does one of the following:
* If the attempt succeeds, the client repeats step 5 from the beginning.
* If the attempt fails, the client proceeds to step 6.
* If the redirection URL is not valid, the client proceeds to step 6.
* If the server responds with a valid Autodiscover response, the client does one of the following:
* If the value of the Action element is "Redirect", the client gets the redirection email address from
the Redirect element and then returns to step 1, using this new email address.
* If the value of the Action element is "Settings", the client has successfully received the requested
configuration settings for the specified user. The client does not need to proceed to step 6.
:param ad:
:return:
"""
log.info('Step 5: Checking response')
if ad.response is None:
# This is not explicit in the protocol, but let's raise errors here
ad.raise_errors()
ad_response = ad.response
if ad_response.redirect_url:
log.debug('Got a redirect URL: %s', ad_response.redirect_url)
# We are diverging a bit from the protocol here. We will never get an HTTP 302 since earlier steps already
# followed the redirects where possible. Instead, we handle retirect responses here.
if self._redirect_url_is_valid(url=ad_response.redirect_url):
is_valid_response, ad = self._attempt_response(url=ad_response.redirect_url)
if is_valid_response:
return self._step_5(ad=ad)
return self._step_6()
log.debug('Invalid redirect URL: %s', ad_response.redirect_url)
return self._step_6()
# This could be an email redirect. Let outer layer handle this
return ad_response
def _step_6(self):
"""Perform step 6. If the client cannot contact the Autodiscover service, the client should ask the user for
the Exchange server name and use it to construct an Exchange EWS URL. The client should try to use this URL for
future requests.
"""
raise AutoDiscoverFailed(
'All steps in the autodiscover protocol failed for email %r. If you think this is an error, consider doing '
'an official test at https://testconnectivity.microsoft.com' % self.email)
def _select_srv_host(srv_records):
"""Select the record with the highest priority, that also supports TLS.
:param srv_records:
:return:
"""
best_record = None
for srv_record in srv_records:
if srv_record.port != 443:
log.debug('Skipping SRV record %r (no TLS)', srv_record)
continue
# Assume port 443 will serve TLS. If not, autodiscover will probably also be broken for others.
if best_record is None or best_record.priority < srv_record.priority:
best_record = srv_record
if not best_record:
raise ValueError('No suitable records')
return best_record.srv
| 49.227826
| 141
| 0.645411
|
34fd42b2dfbb939e69a7dc10285ef585868c8ddf
| 765
|
py
|
Python
|
effective_python/one.py
|
aleeper/python_sandbox
|
2c320e043735f99fac68308fe2692c819cf5a636
|
[
"MIT"
] | null | null | null |
effective_python/one.py
|
aleeper/python_sandbox
|
2c320e043735f99fac68308fe2692c819cf5a636
|
[
"MIT"
] | null | null | null |
effective_python/one.py
|
aleeper/python_sandbox
|
2c320e043735f99fac68308fe2692c819cf5a636
|
[
"MIT"
] | null | null | null |
key = 'my_var'
value = 0.234
formatted = f'{key!r:<10} = {value:+.2f}'
print(formatted)
item = ('Peanut butter', 'Jelly', 'Jam')
first, second, third = item
print(first, 'and', second)
print(*item)
snack_calories = {
'chips': 140,
'popcorn': 80,
'nuts': 190
}
for rank, (name, calories) in enumerate(snack_calories.items()):
print(rank, name, calories)
names = ['Cecilia', 'Lise', 'Marie']
counts = [len(n) for n in names]
longest_name = None
max_count = 0
for (name, count) in zip(names, counts):
if count > max_count:
longest_name = name
max_count = count
print(longest_name, max_count)
names.append('Rosalind')
import itertools
for (name, count) in itertools.zip_longest(names, counts, fillvalue=0):
print(f'{name}: {count}')
| 20.675676
| 71
| 0.662745
|
2a69bf5899f2e75077a94076c96a1bc614d17f17
| 5,041
|
py
|
Python
|
tests/sentry/integrations/jira/test_configure.py
|
boblail/sentry
|
71127331e58791d4651e480b65dd66f06cadc1c8
|
[
"BSD-3-Clause"
] | 1
|
2019-08-28T11:03:13.000Z
|
2019-08-28T11:03:13.000Z
|
tests/sentry/integrations/jira/test_configure.py
|
boblail/sentry
|
71127331e58791d4651e480b65dd66f06cadc1c8
|
[
"BSD-3-Clause"
] | 1
|
2019-03-13T06:05:24.000Z
|
2019-03-13T06:05:24.000Z
|
tests/sentry/integrations/jira/test_configure.py
|
boblail/sentry
|
71127331e58791d4651e480b65dd66f06cadc1c8
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from mock import patch
from jwt import ExpiredSignatureError
from django.core.urlresolvers import reverse
from sentry.integrations.atlassian_connect import AtlassianConnectValidationError
from sentry.models import Integration
from sentry.testutils import APITestCase
from sentry.utils.http import absolute_uri
PERMISSIONS_WARNING = 'You must have Owner or Manager permissions in Sentry to complete setup.'
REFRESH_REQUIRED = 'This page has expired, please refresh to configure your Sentry integration'
LOGIN_REQUIRED = 'Please login to your Sentry account to access the Sentry Add-on configuration'
ORGANIZATIONS_FORM = 'Enabled Sentry Organizations'
COMPLETED = 'Saved!'
class JiraConfigureViewTestCase(APITestCase):
def setUp(self):
super(JiraConfigureViewTestCase, self).setUp()
self.path = absolute_uri('extensions/jira/configure/') + '?xdm_e=base_url'
org = self.organization
self.user.name = 'Sentry Admin'
self.user.save()
integration = Integration.objects.create(
provider='jira',
name='Example Jira',
)
integration.add_organization(org, self.user)
self.installation = integration.get_installation(org.id)
class JiraConfigureViewErrorsTest(JiraConfigureViewTestCase):
@patch('sentry.integrations.jira.configure.get_integration_from_request',
side_effect=AtlassianConnectValidationError())
def test_atlassian_connect_validation_error_get(self, mock_get_integration_from_request):
response = self.client.get(self.path)
assert response.status_code == 200
assert PERMISSIONS_WARNING in response.content
@patch('sentry.integrations.jira.configure.get_integration_from_request',
side_effect=ExpiredSignatureError())
def test_expired_signature_error_get(self, mock_get_integration_from_request):
response = self.client.get(self.path)
assert response.status_code == 200
assert REFRESH_REQUIRED in response.content
@patch('sentry.integrations.jira.configure.get_integration_from_request')
def test_user_not_logged_in_get(self, mock_get_integration_from_request):
mock_get_integration_from_request.return_value = self.installation.model
response = self.client.get(self.path)
assert response.status_code == 200
assert LOGIN_REQUIRED in response.content
assert absolute_uri(reverse('sentry-login')) in response.content
@patch('sentry.integrations.jira.configure.get_integration_from_request',
side_effect=AtlassianConnectValidationError())
def test_atlassian_connect_validation_error_post(self, mock_get_integration_from_request):
response = self.client.post(self.path)
assert response.status_code == 200
assert PERMISSIONS_WARNING in response.content
@patch('sentry.integrations.jira.configure.get_integration_from_request',
side_effect=ExpiredSignatureError())
def test_expired_signature_error_post(self, mock_get_integration_from_request):
response = self.client.post(self.path)
assert response.status_code == 200
assert REFRESH_REQUIRED in response.content
@patch('sentry.integrations.jira.configure.get_integration_from_request')
def test_user_not_logged_in_post(self, mock_get_integration_from_request):
mock_get_integration_from_request.return_value = self.installation.model
response = self.client.post(self.path)
assert response.status_code == 200
assert LOGIN_REQUIRED in response.content
assert absolute_uri(reverse('sentry-login')) in response.content
class JiraConfigureViewTest(JiraConfigureViewTestCase):
def setUp(self):
super(JiraConfigureViewTest, self).setUp()
self.login_as(self.user)
def assert_no_errors(self, response):
assert PERMISSIONS_WARNING not in response.content
assert REFRESH_REQUIRED not in response.content
assert LOGIN_REQUIRED not in response.content
@patch('sentry.integrations.jira.configure.get_integration_from_request')
def test_simple_get(self, mock_get_integration_from_request):
mock_get_integration_from_request.return_value = self.installation.model
response = self.client.get(self.path)
assert response.status_code == 200
self.assert_no_errors(response)
assert ORGANIZATIONS_FORM in response.content
@patch('sentry.integrations.jira.configure.get_integration_from_request')
def test_simple_post(self, mock_get_integration_from_request):
mock_get_integration_from_request.return_value = self.installation.model
response = self.client.post(
self.path,
data={
'organizations': [self.organization.id]
}
)
assert response.status_code == 200
self.assert_no_errors(response)
assert ORGANIZATIONS_FORM not in response.content
assert COMPLETED in response.content
| 43.08547
| 96
| 0.749454
|
90fba69cb8068a100ad60c7ec9c9a72989350ddb
| 3,436
|
py
|
Python
|
assets/migrations/0002_auto_20190713_1727.py
|
UlovHer/CMDB
|
2288e71441ccddaeeeebf0f81cfeb3e321817738
|
[
"MIT"
] | null | null | null |
assets/migrations/0002_auto_20190713_1727.py
|
UlovHer/CMDB
|
2288e71441ccddaeeeebf0f81cfeb3e321817738
|
[
"MIT"
] | null | null | null |
assets/migrations/0002_auto_20190713_1727.py
|
UlovHer/CMDB
|
2288e71441ccddaeeeebf0f81cfeb3e321817738
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.3 on 2019-07-13 17:27
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('assets', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='eventlog',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='事件执行人'),
),
migrations.AddField(
model_name='disk',
name='asset',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assets.Asset'),
),
migrations.AddField(
model_name='cpu',
name='asset',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='assets.Asset'),
),
migrations.AddField(
model_name='businessunit',
name='parent_unit',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='parent_level', to='assets.BusinessUnit'),
),
migrations.AddField(
model_name='asset',
name='admin',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='admin', to=settings.AUTH_USER_MODEL, verbose_name='资产管理员'),
),
migrations.AddField(
model_name='asset',
name='approved_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='approved_by', to=settings.AUTH_USER_MODEL, verbose_name='批准人'),
),
migrations.AddField(
model_name='asset',
name='business_unit',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='assets.BusinessUnit', verbose_name='所属业务线'),
),
migrations.AddField(
model_name='asset',
name='contract',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='assets.Contract', verbose_name='合同'),
),
migrations.AddField(
model_name='asset',
name='idc',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='assets.IDC', verbose_name='所在机房'),
),
migrations.AddField(
model_name='asset',
name='manufacturer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='assets.Manufacturer', verbose_name='制造商'),
),
migrations.AddField(
model_name='asset',
name='tags',
field=models.ManyToManyField(blank=True, to='assets.Tag', verbose_name='标签'),
),
migrations.AlterUniqueTogether(
name='ram',
unique_together={('asset', 'slot')},
),
migrations.AlterUniqueTogether(
name='nic',
unique_together={('asset', 'model', 'mac')},
),
migrations.AlterUniqueTogether(
name='disk',
unique_together={('asset', 'sn')},
),
]
| 39.953488
| 182
| 0.610885
|
e9d4f2a6d268149f0f20265a98ad5982d7233c03
| 61,920
|
py
|
Python
|
lib/rucio/tests/test_replica.py
|
davidpob99/rucio
|
7c8f6ae1adfa0d41e533da572997bdfed6c555e1
|
[
"Apache-2.0"
] | 1
|
2020-03-19T11:48:44.000Z
|
2020-03-19T11:48:44.000Z
|
lib/rucio/tests/test_replica.py
|
davidpob99/rucio
|
7c8f6ae1adfa0d41e533da572997bdfed6c555e1
|
[
"Apache-2.0"
] | 3
|
2020-12-16T11:18:12.000Z
|
2021-04-12T11:38:51.000Z
|
lib/rucio/tests/test_replica.py
|
davidpob99/rucio
|
7c8f6ae1adfa0d41e533da572997bdfed6c555e1
|
[
"Apache-2.0"
] | 1
|
2021-01-18T21:28:29.000Z
|
2021-01-18T21:28:29.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2013-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vincent.garonne@cern.ch>, 2013-2018
# - Ralph Vigne <ralph.vigne@cern.ch>, 2013-2014
# - Mario Lassnig <mario.lassnig@cern.ch>, 2013-2020
# - Cedric Serfon <cedric.serfon@cern.ch>, 2014-2018
# - Thomas Beermann <thomas.beermann@cern.ch>, 2014
# - Martin Barisits <martin.barisits@cern.ch>, 2015-2021
# - Joaquín Bogado <jbogado@linti.unlp.edu.ar>, 2018
# - Dimitrios Christidis <dimitrios.christidis@cern.ch>, 2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2019
# - Tobias Wegner <twegner@cern.ch>, 2019
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - Luc Goossens <luc.goossens@cern.ch>, 2020
# - Eli Chadwick <eli.chadwick@stfc.ac.uk>, 2020
# - Patrick Austin <patrick.austin@stfc.ac.uk>, 2020
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020-2021
# - Ilija Vukotic <ivukotic@uchicago.edu>, 2021
# - Radu Carpa <radu.carpa@cern.ch>, 2021
from __future__ import print_function
import hashlib
import time
from datetime import datetime, timedelta
from json import dumps, loads
from unittest import mock
from xml.etree import ElementTree
import pytest
import xmltodict
from werkzeug.datastructures import MultiDict
from rucio.client.ruleclient import RuleClient
from rucio.common.exception import (DataIdentifierNotFound, AccessDenied, UnsupportedOperation,
RucioException, ReplicaIsLocked, ReplicaNotFound, ScopeNotFound,
DatabaseException)
from rucio.common.utils import generate_uuid, clean_surls, parse_response
from rucio.core.config import set as cconfig_set
from rucio.core.did import add_did, attach_dids, get_did, set_status, list_files, get_did_atime
from rucio.core.replica import (add_replica, add_replicas, delete_replicas, get_replicas_state,
get_replica, list_replicas, declare_bad_file_replicas, list_bad_replicas,
update_replica_state, get_RSEcoverage_of_dataset, get_replica_atime,
touch_replica, get_bad_pfns, set_tombstone)
from rucio.core.rse import add_protocol, add_rse_attribute, del_rse_attribute
from rucio.daemons.badreplicas.minos import run as minos_run
from rucio.daemons.badreplicas.minos_temporary_expiration import run as minos_temp_run
from rucio.daemons.badreplicas.necromancer import run as necromancer_run
from rucio.db.sqla import models
from rucio.db.sqla.constants import DIDType, ReplicaState, BadPFNStatus, OBSOLETE
from rucio.db.sqla.session import transactional_session
from rucio.rse import rsemanager as rsemgr
from rucio.tests.common import execute, headers, auth, Mime, accept
def mocked_VP_requests_get(*args, **kwargs):
"""This method will be used by the mock to replace requests.get to VP server."""
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
self.ok = True
def json(self):
return self.json_data
if args[0] == 'https://vps-mock.cern.ch/serverRanges':
return MockResponse({
"AGLT2": {
"servers": [
["192.41.231.239:1094", "100"],
["192.41.230.42:1094", "100"],
["192.41.230.43:1094", "100"]
],
"ranges": [
[1, 0.3333],
[2, 0.6666],
[0, 1]
]
}}, 200)
if args[0] == 'https://vps-mock.cern.ch/ds/4/scope:name':
return MockResponse(["AGLT2_VP_DISK", "MWT2_VP_DISK", "NET2_VP_DISK"], 200)
return MockResponse(None, 404)
class TestReplicaCore:
@mock.patch('rucio.core.replica.requests.get', side_effect=mocked_VP_requests_get)
def test_cache_replicas(self, mock_get, rse_factory, mock_scope, root_account):
""" REPLICA (CORE): Test listing replicas with cached root protocol """
rse, rse_id = rse_factory.make_rse()
add_protocol(rse_id, {'scheme': 'root',
'hostname': 'root.aperture.com',
'port': 1409,
'prefix': '//test/chamber/',
'impl': 'rucio.rse.protocols.xrootd.Default',
'domains': {
'lan': {'read': 1, 'write': 1, 'delete': 1},
'wan': {'read': 1, 'write': 1, 'delete': 1}}})
add_protocol(rse_id, {'scheme': 'http',
'hostname': 'root.aperture.com',
'port': 1409,
'prefix': '//test/chamber/',
'impl': 'rucio.rse.protocols.xrootd.Default',
'domains': {
'lan': {'read': 1, 'write': 1, 'delete': 1},
'wan': {'read': 1, 'write': 1, 'delete': 1}}})
files = []
name = 'file_%s' % generate_uuid()
hstr = hashlib.md5(('%s:%s' % (mock_scope, name)).encode('utf-8')).hexdigest()
pfn = 'root://root.aperture.com:1409//test/chamber/mock/%s/%s/%s' % (hstr[0:2], hstr[2:4], name)
files.append({'scope': mock_scope, 'name': name, 'bytes': 1234, 'adler32': 'deadbeef', 'pfn': pfn})
name = 'element_%s' % generate_uuid()
hstr = hashlib.md5(('%s:%s' % (mock_scope, name)).encode('utf-8')).hexdigest()
pfn = 'http://root.aperture.com:1409//test/chamber/mock/%s/%s/%s' % (hstr[0:2], hstr[2:4], name)
files.append({'scope': mock_scope, 'name': name, 'bytes': 1234, 'adler32': 'deadbeef', 'pfn': pfn})
add_replicas(rse_id=rse_id, files=files, account=root_account)
cconfig_set('clientcachemap', 'BLACKMESA', 'AGLT2')
cconfig_set('virtual_placement', 'vp_endpoint', 'https://vps-mock.cern.ch')
for rep in list_replicas(
dids=[{'scope': f['scope'], 'name': f['name'], 'type': DIDType.FILE} for f in files],
schemes=['root'],
domain='wan',
client_location={'site': 'BLACKMESA'}):
assert list(rep['pfns'].keys())[0].count('root://') == 2
for rep in list_replicas(
dids=[{'scope': f['scope'], 'name': f['name'], 'type': DIDType.FILE} for f in files],
schemes=['root'],
domain='wan',
client_location={'site': rse}):
assert list(rep['pfns'].keys())[0].count('root://') == 1
@pytest.mark.noparallel(reason='calls list_bad_replicas() which acts on all bad replicas without any filtering')
def test_add_list_bad_replicas(self, rse_factory, mock_scope, root_account):
""" REPLICA (CORE): Add bad replicas and list them"""
nbfiles = 5
# Adding replicas to deterministic RSE
_, rse1_id = rse_factory.make_srm_rse(deterministic=True)
files = [{'scope': mock_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for _ in range(nbfiles)]
add_replicas(rse_id=rse1_id, files=files, account=root_account, ignore_availability=True)
# Listing replicas on deterministic RSE
replicas = []
list_rep = []
for replica in list_replicas(dids=[{'scope': f['scope'], 'name': f['name'], 'type': DIDType.FILE} for f in files], schemes=['srm']):
replicas.extend(replica['rses'][rse1_id])
list_rep.append(replica)
r = declare_bad_file_replicas(replicas, 'This is a good reason', root_account)
assert r == {}
bad_replicas = list_bad_replicas()
nbbadrep = 0
for rep in list_rep:
for badrep in bad_replicas:
if badrep['rse_id'] == rse1_id:
if badrep['scope'] == rep['scope'] and badrep['name'] == rep['name']:
nbbadrep += 1
assert len(replicas) == nbbadrep
# Adding replicas to non-deterministic RSE
_, rse2_id = rse_factory.make_srm_rse(deterministic=False)
files = [{'scope': mock_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb',
'pfn': 'srm://%s.cern.ch/srm/managerv2?SFN=/test/%s/%s' % (rse2_id, mock_scope, generate_uuid()), 'meta': {'events': 10}} for _ in range(nbfiles)]
add_replicas(rse_id=rse2_id, files=files, account=root_account, ignore_availability=True)
# Listing replicas on non-deterministic RSE
replicas = []
list_rep = []
for replica in list_replicas(dids=[{'scope': f['scope'], 'name': f['name'], 'type': DIDType.FILE} for f in files], schemes=['srm']):
replicas.extend(replica['rses'][rse2_id])
list_rep.append(replica)
r = declare_bad_file_replicas(replicas, 'This is a good reason', root_account)
assert r == {}
bad_replicas = list_bad_replicas()
nbbadrep = 0
for rep in list_rep:
for badrep in bad_replicas:
if badrep['rse_id'] == rse2_id:
if badrep['scope'] == rep['scope'] and badrep['name'] == rep['name']:
nbbadrep += 1
assert len(replicas) == nbbadrep
# Now adding non-existing bad replicas
files = ['srm://%s.cern.ch/test/%s/%s' % (rse2_id, mock_scope, generate_uuid()), ]
r = declare_bad_file_replicas(files, 'This is a good reason', root_account)
output = ['%s Unknown replica' % rep for rep in files]
assert r == {rse2_id: output}
def test_add_list_replicas(self, rse_factory, mock_scope, root_account):
""" REPLICA (CORE): Add and list file replicas """
_, rse1_id = rse_factory.make_mock_rse()
_, rse2_id = rse_factory.make_mock_rse()
nbfiles = 13
files = [{'scope': mock_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for _ in range(nbfiles)]
for rse_id in [rse1_id, rse2_id]:
add_replicas(rse_id=rse_id, files=files, account=root_account, ignore_availability=True)
replica_cpt = 0
for _ in list_replicas(dids=[{'scope': f['scope'], 'name': f['name'], 'type': DIDType.FILE} for f in files], schemes=['srm']):
replica_cpt += 1
assert nbfiles == replica_cpt
def test_delete_replicas(self, rse_factory, mock_scope, root_account):
""" REPLICA (CORE): Delete replicas """
_, rse1_id = rse_factory.make_mock_rse()
_, rse2_id = rse_factory.make_mock_rse()
nbfiles = 5
files1 = [{'scope': mock_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for _ in range(nbfiles)]
add_replicas(rse_id=rse1_id, files=files1, account=root_account, ignore_availability=True)
files2 = [{'scope': mock_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for _ in range(nbfiles)]
add_replicas(rse_id=rse1_id, files=files2, account=root_account, ignore_availability=True)
add_replicas(rse_id=rse2_id, files=files2, account=root_account, ignore_availability=True)
delete_replicas(rse_id=rse1_id, files=files1 + files2)
for file in files1:
with pytest.raises(DataIdentifierNotFound):
print(get_did(scope=file['scope'], name=file['name']))
for file in files2:
get_did(scope=file['scope'], name=file['name'])
def test_delete_replicas_from_datasets(self, rse_factory, mock_scope, root_account):
""" REPLICA (CORE): Delete replicas from dataset """
_, rse_id = rse_factory.make_mock_rse()
tmp_dsn1 = 'dsn_%s' % generate_uuid()
tmp_dsn2 = 'dsn_%s' % generate_uuid()
nbfiles = 5
files1 = [{'scope': mock_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for _ in range(nbfiles)]
add_did(scope=mock_scope, name=tmp_dsn1, did_type=DIDType.DATASET, account=root_account)
add_did(scope=mock_scope, name=tmp_dsn2, did_type=DIDType.DATASET, account=root_account)
attach_dids(scope=mock_scope, name=tmp_dsn1, rse_id=rse_id, dids=files1, account=root_account)
attach_dids(scope=mock_scope, name=tmp_dsn2, dids=files1, account=root_account)
set_status(scope=mock_scope, name=tmp_dsn1, open=False)
delete_replicas(rse_id=rse_id, files=files1)
with pytest.raises(DataIdentifierNotFound):
get_did(scope=mock_scope, name=tmp_dsn1)
get_did(scope=mock_scope, name=tmp_dsn2)
assert [f for f in list_files(scope=mock_scope, name=tmp_dsn2)] == []
def test_touch_replicas(self, rse_factory, mock_scope, root_account):
""" REPLICA (CORE): Touch replicas accessed_at timestamp"""
_, rse_id = rse_factory.make_mock_rse()
nbfiles = 5
files1 = [{'scope': mock_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for _ in range(nbfiles)]
files2 = [{'scope': mock_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for _ in range(nbfiles)]
files2.append(files1[0])
add_replicas(rse_id=rse_id, files=files1, account=root_account, ignore_availability=True)
add_replicas(rse_id=rse_id, files=files2, account=root_account, ignore_availability=True)
now = datetime.utcnow()
now -= timedelta(microseconds=now.microsecond)
assert get_replica_atime({'scope': files1[0]['scope'], 'name': files1[0]['name'], 'rse_id': rse_id}) is None
assert get_did_atime(scope=mock_scope, name=files1[0]['name']) is None
for r in [{'scope': files1[0]['scope'], 'name': files1[0]['name'], 'rse_id': rse_id, 'accessed_at': now}]:
touch_replica(r)
assert now == get_replica_atime({'scope': files1[0]['scope'], 'name': files1[0]['name'], 'rse_id': rse_id})
assert now == get_did_atime(scope=mock_scope, name=files1[0]['name'])
for i in range(1, nbfiles):
assert get_replica_atime({'scope': files1[i]['scope'], 'name': files1[i]['name'], 'rse_id': rse_id}) is None
for i in range(0, nbfiles - 1):
assert get_replica_atime({'scope': files2[i]['scope'], 'name': files2[i]['name'], 'rse_id': rse_id}) is None
def test_list_replicas_all_states(self, rse_factory, mock_scope, root_account):
""" REPLICA (CORE): list file replicas with all_states"""
_, rse1_id = rse_factory.make_mock_rse()
_, rse2_id = rse_factory.make_mock_rse()
nbfiles = 13
files = [{'scope': mock_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for _ in range(nbfiles)]
rses = [rse1_id, rse2_id]
for rse_id in rses:
add_replicas(rse_id=rse_id, files=files, account=root_account, ignore_availability=True)
for file in files:
update_replica_state(rses[0], mock_scope, file['name'], ReplicaState.COPYING)
replica_cpt = 0
for replica in list_replicas(dids=[{'scope': f['scope'], 'name': f['name'], 'type': DIDType.FILE} for f in files], schemes=['srm'], all_states=True):
assert 'states' in replica
assert replica['states'][rses[0]] == str(ReplicaState.COPYING.name)
assert replica['states'][rses[1]] == str(ReplicaState.AVAILABLE.name)
replica_cpt += 1
assert nbfiles == replica_cpt
def test_list_replica_with_domain(self, rse_factory, mock_scope, root_account):
""" REPLICA (CORE): Add and list file replicas forcing domain"""
rse, rse_id = rse_factory.make_rse()
protocols = [{'scheme': 'MOCK',
'hostname': 'localhost',
'port': 17,
'prefix': '/i/prefer/the/lan',
'impl': 'rucio.rse.protocols.mock.Default',
'domains': {
'lan': {'read': 1,
'write': 1,
'delete': 1},
'wan': {'read': 2,
'write': 2,
'delete': 2}}},
{'scheme': 'MOCK',
'hostname': 'localhost',
'port': 18,
'prefix': '/i/prefer/the/wan',
'impl': 'rucio.rse.protocols.mock.Default',
'domains': {
'lan': {'read': 2,
'write': 2,
'delete': 2},
'wan': {'read': 1,
'write': 1,
'delete': 1}}}, ]
for p in protocols:
add_protocol(rse_id, p)
nbfiles = 3
files = [{'scope': mock_scope,
'name': 'file_%s' % generate_uuid(),
'bytes': 1234,
'adler32': '01234567',
'meta': {'events': 1234}} for _ in range(nbfiles)]
add_replicas(rse_id=rse_id, files=files, account=root_account, ignore_availability=True)
for replica in list_replicas(dids=[{'scope': f['scope'], 'name': f['name'], 'type': DIDType.FILE} for f in files],
schemes=['MOCK'],
domain='wan'):
assert '/i/prefer/the/wan' in list(replica['pfns'].keys())[0]
for replica in list_replicas(dids=[{'scope': f['scope'], 'name': f['name'], 'type': DIDType.FILE} for f in files],
schemes=['MOCK'],
domain='lan'):
assert '/i/prefer/the/lan' in list(replica['pfns'].keys())[0]
# test old client behaviour - get all WAN answers
for replica in list_replicas(dids=[{'scope': f['scope'], 'name': f['name'], 'type': DIDType.FILE} for f in files],
schemes=['MOCK']):
cmd = 'rucio list-file-replicas --pfns %s:%s' % (replica['scope'], replica['name'])
_, stdout, _ = execute(cmd)
assert '/i/prefer/the/wan' in stdout
# # force all LAN
for replica in list_replicas(dids=[{'scope': f['scope'], 'name': f['name'], 'type': DIDType.FILE} for f in files],
schemes=['MOCK'], domain='lan'):
cmd = 'rucio list-file-replicas --pfns --domain=lan %s:%s' % (replica['scope'], replica['name'])
errno, stdout, stderr = execute(cmd)
assert '/i/prefer/the/lan' in stdout
# # force all WAN
for replica in list_replicas(dids=[{'scope': f['scope'], 'name': f['name'], 'type': DIDType.FILE} for f in files],
schemes=['MOCK'], domain='wan'):
cmd = 'rucio list-file-replicas --pfns --domain=wan %s:%s' % (replica['scope'], replica['name'])
errno, stdout, stderr = execute(cmd)
assert '/i/prefer/the/wan' in stdout
# # force both WAN and LAN
for replica in list_replicas(dids=[{'scope': f['scope'], 'name': f['name'], 'type': DIDType.FILE} for f in files],
schemes=['MOCK'], domain='all'):
cmd = 'rucio list-file-replicas --pfns --domain=all %s:%s' % (replica['scope'], replica['name'])
errno, stdout, stderr = execute(cmd)
assert '/i/prefer/the/wan' in stdout
assert '/i/prefer/the/lan' in stdout
def test_list_replica_with_schemes(self, rse_factory, mock_scope, root_account, replica_client):
""" REPLICA (CORE): Add and list file replicas forcing schemes"""
rse, rse_id = rse_factory.make_rse()
add_protocol(rse_id, {'scheme': 'http',
'hostname': 'http.aperture.com',
'port': 80,
'prefix': '//test/chamber/',
'impl': 'rucio.rse.protocols.gfalv2.Default',
'domains': {
'lan': {'read': 1, 'write': 1, 'delete': 1},
'wan': {'read': 1, 'write': 1, 'delete': 1}}})
name = 'element_%s' % generate_uuid()
file_item = {'scope': mock_scope, 'name': name, 'bytes': 1234, 'adler32': 'deadbeef'}
add_replicas(rse_id=rse_id, files=[file_item], account=root_account)
replicas = list(replica_client.list_replicas([{'scope': mock_scope.external, 'name': name}]))
assert 'http://' in list(replicas[0]['pfns'].keys())[0]
def test_replica_no_site(self, rse_factory, mock_scope, root_account, replica_client):
""" REPLICA (CORE): Test listing replicas without site attribute """
rse, rse_id = rse_factory.make_rse()
add_protocol(rse_id, {'scheme': 'root',
'hostname': 'root.aperture.com',
'port': 1409,
'prefix': '//test/chamber/',
'impl': 'rucio.rse.protocols.xrootd.Default',
'domains': {
'lan': {'read': 1, 'write': 1, 'delete': 1},
'wan': {'read': 1, 'write': 1, 'delete': 1}}})
add_rse_attribute(rse_id=rse_id, key='site', value='APERTURE')
files = [{'scope': mock_scope, 'name': 'element_%s' % generate_uuid(),
'bytes': 1234, 'adler32': 'deadbeef'}]
add_replicas(rse_id=rse_id, files=files, account=root_account)
replicas = [r for r in replica_client.list_replicas(dids=[{'scope': 'mock', 'name': f['name']} for f in files])]
assert 'root://' in list(replicas[0]['pfns'].keys())[0]
replicas = [r for r in replica_client.list_replicas(dids=[{'scope': 'mock', 'name': f['name']} for f in files],
client_location={'site': 'SOMEWHERE'})]
assert 'root://' in list(replicas[0]['pfns'].keys())[0]
del_rse_attribute(rse_id=rse_id, key='site')
replicas = [r for r in replica_client.list_replicas(dids=[{'scope': 'mock', 'name': f['name']} for f in files])]
assert 'root://' in list(replicas[0]['pfns'].keys())[0]
replicas = [r for r in replica_client.list_replicas(dids=[{'scope': 'mock', 'name': f['name']} for f in files],
client_location={'site': 'SOMEWHERE'})]
assert 'root://' in list(replicas[0]['pfns'].keys())[0]
def test_replica_mixed_protocols(self, rse_factory, mock_scope, root_account):
""" REPLICA (CORE): Test adding replicas with mixed protocol """
rse, rse_id = rse_factory.make_rse()
add_protocol(rse_id, {'scheme': 'root',
'hostname': 'root.aperture.com',
'port': 1409,
'prefix': '//test/chamber/',
'impl': 'rucio.rse.protocols.xrootd.Default',
'domains': {
'lan': {'read': 1, 'write': 1, 'delete': 1},
'wan': {'read': 1, 'write': 1, 'delete': 1}}})
add_protocol(rse_id, {'scheme': 'http',
'hostname': 'root.aperture.com',
'port': 1409,
'prefix': '//test/chamber/',
'impl': 'rucio.rse.protocols.xrootd.Default',
'domains': {
'lan': {'read': 1, 'write': 1, 'delete': 1},
'wan': {'read': 1, 'write': 1, 'delete': 1}}})
files = []
name = 'element_%s' % generate_uuid()
hstr = hashlib.md5(('%s:%s' % (mock_scope, name)).encode('utf-8')).hexdigest()
pfn = 'root://root.aperture.com:1409//test/chamber/mock/%s/%s/%s' % (hstr[0:2], hstr[2:4], name)
files.append({'scope': mock_scope, 'name': name, 'bytes': 1234, 'adler32': 'deadbeef', 'pfn': pfn})
name = 'element_%s' % generate_uuid()
hstr = hashlib.md5(('%s:%s' % (mock_scope, name)).encode('utf-8')).hexdigest()
pfn = 'http://root.aperture.com:1409//test/chamber/mock/%s/%s/%s' % (hstr[0:2], hstr[2:4], name)
files.append({'scope': mock_scope, 'name': name, 'bytes': 1234, 'adler32': 'deadbeef', 'pfn': pfn})
add_replicas(rse_id=rse_id, files=files, account=root_account)
def test_set_tombstone(self, rse_factory, mock_scope, root_account):
""" REPLICA (CORE): set tombstone on replica """
# Set tombstone on one replica
rse, rse_id = rse_factory.make_mock_rse()
name = generate_uuid()
add_replica(rse_id, mock_scope, name, 4, root_account)
assert get_replica(rse_id, mock_scope, name)['tombstone'] is None
set_tombstone(rse_id, mock_scope, name)
assert get_replica(rse_id, mock_scope, name)['tombstone'] == OBSOLETE
# Set tombstone on locked replica
name = generate_uuid()
add_replica(rse_id, mock_scope, name, 4, root_account)
RuleClient().add_replication_rule([{'name': name, 'scope': mock_scope.external}], 1, rse, locked=True)
with pytest.raises(ReplicaIsLocked):
set_tombstone(rse_id, mock_scope, name)
# Set tombstone on not found replica
name = generate_uuid()
with pytest.raises(ReplicaNotFound):
set_tombstone(rse_id, mock_scope, name)
def test_core_default_tombstone_correctly_set(self, rse_factory, did_factory, root_account):
""" REPLICA (CORE): Per-RSE default tombstone is correctly taken into consideration"""
# One RSE has an attribute set, the other uses the default value of "None" for tombstone
rse1, rse1_id = rse_factory.make_mock_rse()
rse2, rse2_id = rse_factory.make_mock_rse()
tombstone_delay = 3600
add_rse_attribute(rse_id=rse2_id, key='tombstone_delay', value=tombstone_delay)
# Will use the default tombstone delay
did1 = did_factory.random_did()
add_replica(rse1_id, bytes_=4, account=root_account, **did1)
assert get_replica(rse1_id, **did1)['tombstone'] is None
# Will use the configured value on the RSE
did2 = did_factory.random_did()
add_replica(rse2_id, bytes_=4, account=root_account, **did2)
tombstone = get_replica(rse2_id, **did2)['tombstone']
expected_tombstone = datetime.utcnow() + timedelta(seconds=tombstone_delay)
assert expected_tombstone - timedelta(minutes=5) < tombstone < expected_tombstone + timedelta(minutes=5)
# Adding rule removes the tombstone
RuleClient().add_replication_rule([{'name': did1['name'], 'scope': did1['scope'].external}], 1, rse1, locked=True)
assert get_replica(rse1_id, **did1)['tombstone'] is None
RuleClient().add_replication_rule([{'name': did2['name'], 'scope': did2['scope'].external}], 1, rse2, locked=True)
assert get_replica(rse2_id, **did2)['tombstone'] is None
def test_list_replicas_with_updated_after(self, rse_factory, mock_scope, root_account):
""" REPLICA (CORE): Add and list file replicas with updated_after filter """
_, rse_id = rse_factory.make_mock_rse()
dsn = 'ds_ua_test_%s' % generate_uuid()
add_did(scope=mock_scope, name=dsn, did_type='DATASET', account=root_account)
#
t0 = datetime.utcnow()
time.sleep(2)
lfn = '%s._%s.data' % (dsn, '0001')
add_replica(rse_id=rse_id, scope=mock_scope, name=lfn, bytes_=12345, account=root_account)
attach_dids(scope=mock_scope, name=dsn, dids=[{'scope': mock_scope, 'name': lfn}], account=root_account)
time.sleep(2)
t1 = datetime.utcnow()
time.sleep(2)
lfn = '%s._%s.data' % (dsn, '0002')
add_replica(rse_id=rse_id, scope=mock_scope, name=lfn, bytes_=12345, account=root_account)
attach_dids(scope=mock_scope, name=dsn, dids=[{'scope': mock_scope, 'name': lfn}], account=root_account)
time.sleep(2)
t2 = datetime.utcnow()
time.sleep(2)
lfn = '%s._%s.data' % (dsn, '0003')
add_replica(rse_id=rse_id, scope=mock_scope, name=lfn, bytes_=12345, account=root_account)
attach_dids(scope=mock_scope, name=dsn, dids=[{'scope': mock_scope, 'name': lfn}], account=root_account)
time.sleep(2)
t3 = datetime.utcnow()
#
assert len(list(list_replicas([{'scope': mock_scope, 'name': dsn}], updated_after=None))) == 3
assert len(list(list_replicas([{'scope': mock_scope, 'name': dsn}], updated_after=t0))) == 3
assert len(list(list_replicas([{'scope': mock_scope, 'name': dsn}], updated_after=t1))) == 2
assert len(list(list_replicas([{'scope': mock_scope, 'name': dsn}], updated_after=t2))) == 1
assert len(list(list_replicas([{'scope': mock_scope, 'name': dsn}], updated_after=t3))) == 0
def test_get_RSE_coverage_of_dataset(self, rse_factory, mock_scope, root_account):
""" REPLICA (CORE): test RSE coverage retrieval """
_, rse1_id = rse_factory.make_mock_rse()
_, rse2_id = rse_factory.make_mock_rse()
_, rse3_id = rse_factory.make_mock_rse()
dsn = 'ds_cov_test_%s' % generate_uuid()
add_did(scope=mock_scope, name=dsn, did_type='DATASET', account=root_account)
# test empty dataset
cov = get_RSEcoverage_of_dataset(scope=mock_scope, name=dsn)
print(cov)
assert cov == {}
# add files/replicas
for i in range(1, 8):
add_replica(rse_id=rse1_id, scope=mock_scope, name=dsn + '_%06d.data' % i, bytes_=100, account=root_account)
for i in range(8, 11):
add_replica(rse_id=rse2_id, scope=mock_scope, name=dsn + '_%06d.data' % i, bytes_=100, account=root_account)
for i in range(11, 16):
add_replica(rse_id=rse3_id, scope=mock_scope, name=dsn + '_%06d.data' % i, bytes_=100, account=root_account)
attach_dids(scope=mock_scope, name=dsn, dids=[{'scope': mock_scope, 'name': dsn + '_%06d.data' % i} for i in range(1, 16)], account=root_account)
cov = get_RSEcoverage_of_dataset(scope=mock_scope, name=dsn)
print(cov)
assert cov[rse1_id] == 700
assert cov[rse2_id] == 300
assert cov[rse3_id] == 500
@pytest.mark.noparallel(reason='calls list_bad_replicas() and runs necromancer. Both act on all bad replicas without any filtering')
def test_client_add_list_bad_replicas(rse_factory, replica_client, did_client):
""" REPLICA (CLIENT): Add bad replicas"""
tmp_scope = 'mock'
nbfiles = 5
# Adding replicas to deterministic RSE
files = [{'scope': tmp_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for _ in range(nbfiles)]
rse1, rse1_id = rse_factory.make_srm_rse(deterministic=True)
replica_client.add_replicas(rse=rse1, files=files)
# Listing replicas on deterministic RSE
replicas, list_rep = [], []
for replica in replica_client.list_replicas(dids=[{'scope': f['scope'], 'name': f['name']} for f in files], schemes=['srm'], all_states=True):
replicas.extend(replica['rses'][rse1])
list_rep.append(replica)
r = replica_client.declare_bad_file_replicas(replicas, 'This is a good reason')
assert r == {}
bad_replicas = list_bad_replicas()
nbbadrep = 0
for rep in list_rep:
for badrep in bad_replicas:
if badrep['rse_id'] == rse1_id:
if badrep['scope'].external == rep['scope'] and badrep['name'] == rep['name']:
nbbadrep += 1
assert len(replicas) == nbbadrep
# Run necromancer once
necromancer_run(threads=1, bulk=10000, once=True)
# Try to attach a lost file
tmp_dsn = 'dataset_%s' % generate_uuid()
did_client.add_dataset(scope=tmp_scope, name=tmp_dsn)
with pytest.raises(UnsupportedOperation):
did_client.add_files_to_dataset(tmp_scope, name=tmp_dsn, files=files, rse=rse1)
# Adding replicas to non-deterministic RSE
rse2, rse2_id = rse_factory.make_srm_rse(deterministic=False)
files = [{'scope': tmp_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb',
'pfn': 'srm://%s.cern.ch/srm/managerv2?SFN=/test/%s/%s' % (rse2_id, tmp_scope, generate_uuid()), 'meta': {'events': 10}} for _ in range(nbfiles)]
replica_client.add_replicas(rse=rse2, files=files)
# Listing replicas on non-deterministic RSE
replicas, list_rep = [], []
for replica in replica_client.list_replicas(dids=[{'scope': f['scope'], 'name': f['name']} for f in files], schemes=['srm'], all_states=True):
replicas.extend(replica['rses'][rse2])
list_rep.append(replica)
print(replicas, list_rep)
r = replica_client.declare_bad_file_replicas(replicas, 'This is a good reason')
print(r)
assert r == {}
bad_replicas = list_bad_replicas()
nbbadrep = 0
for rep in list_rep:
for badrep in bad_replicas:
if badrep['rse_id'] == rse2_id:
if badrep['scope'].external == rep['scope'] and badrep['name'] == rep['name']:
nbbadrep += 1
assert len(replicas) == nbbadrep
# Now adding non-existing bad replicas
files = ['srm://%s.cern.ch/test/%s/%s' % (rse2_id, tmp_scope, generate_uuid()), ]
r = replica_client.declare_bad_file_replicas(files, 'This is a good reason')
output = ['%s Unknown replica' % rep for rep in files]
assert r == {rse2: output}
def test_client_add_suspicious_replicas(rse_factory, replica_client):
""" REPLICA (CLIENT): Add suspicious replicas"""
tmp_scope = 'mock'
nbfiles = 5
# Adding replicas to deterministic RSE
rse1, _ = rse_factory.make_srm_rse(deterministic=True)
files = [{'scope': tmp_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for _ in range(nbfiles)]
replica_client.add_replicas(rse=rse1, files=files)
# Listing replicas on deterministic RSE
replicas = []
list_rep = []
for replica in replica_client.list_replicas(dids=[{'scope': f['scope'], 'name': f['name']} for f in files], schemes=['srm'], all_states=True):
replicas.extend(replica['rses'][rse1])
list_rep.append(replica)
r = replica_client.declare_suspicious_file_replicas(replicas, 'This is a good reason')
assert r == {}
# Adding replicas to non-deterministic RSE
rse2, rse2_id = rse_factory.make_srm_rse(deterministic=False)
files = [{'scope': tmp_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb',
'pfn': 'srm://%s.cern.ch/srm/managerv2?SFN=/test/%s/%s' % (rse2_id, tmp_scope, generate_uuid()), 'meta': {'events': 10}} for _ in range(nbfiles)]
replica_client.add_replicas(rse=rse2, files=files)
# Listing replicas on non-deterministic RSE
replicas = []
list_rep = []
for replica in replica_client.list_replicas(dids=[{'scope': f['scope'], 'name': f['name']} for f in files], schemes=['srm'], all_states=True):
replicas.extend(replica['rses'][rse2])
list_rep.append(replica)
r = replica_client.declare_suspicious_file_replicas(replicas, 'This is a good reason')
assert r == {}
# Now adding non-existing bad replicas
files = ['srm://%s.cern.ch/test/%s/%s' % (rse2_id, tmp_scope, generate_uuid()), ]
r = replica_client.declare_suspicious_file_replicas(files, 'This is a good reason')
output = ['%s Unknown replica' % rep for rep in files]
assert r == {rse2: output}
@pytest.mark.noparallel(reason='Lists bad replicas multiple times. If the list changes between calls, test fails.')
def test_rest_bad_replica_methods_for_ui(rest_client, auth_token):
__test_rest_bad_replica_methods_for_ui(rest_client, auth_token, list_pfns=False)
__test_rest_bad_replica_methods_for_ui(rest_client, auth_token, list_pfns=True)
def __test_rest_bad_replica_methods_for_ui(rest_client, auth_token, list_pfns):
""" REPLICA (REST): Test the listing of bad and suspicious replicas """
if list_pfns:
common_data = {'list_pfns': 'True'}
else:
common_data = {}
data = {**common_data}
response = rest_client.get('/replicas/bad/states', headers=headers(auth(auth_token)), query_string=data)
assert response.status_code == 200
tot_files = []
for line in response.get_data(as_text=True).split('\n'):
if line != '':
tot_files.append(dumps(line))
nb_tot_files = len(tot_files)
data = {'state': 'B', **common_data}
response = rest_client.get('/replicas/bad/states', headers=headers(auth(auth_token)), query_string=data)
assert response.status_code == 200
tot_bad_files = []
for line in response.get_data(as_text=True).split('\n'):
if line != '':
tot_bad_files.append(dumps(line))
nb_tot_bad_files1 = len(tot_bad_files)
data = {'state': 'S', **common_data}
response = rest_client.get('/replicas/bad/states', headers=headers(auth(auth_token)), query_string=data)
assert response.status_code == 200
tot_suspicious_files = []
for line in response.get_data(as_text=True).split('\n'):
if line != '':
tot_suspicious_files.append(dumps(line))
nb_tot_suspicious_files = len(tot_suspicious_files)
data = {'state': 'T', **common_data}
response = rest_client.get('/replicas/bad/states', headers=headers(auth(auth_token)), query_string=data)
assert response.status_code == 200
tot_temporary_unavailable_files = []
for line in response.get_data(as_text=True).split('\n'):
if line != '':
tot_temporary_unavailable_files.append(dumps(line))
nb_tot_temporary_unavailable_files = len(tot_temporary_unavailable_files)
assert nb_tot_files == nb_tot_bad_files1 + nb_tot_suspicious_files + nb_tot_temporary_unavailable_files
tomorrow = datetime.utcnow() + timedelta(days=1)
data = {'state': 'B', 'younger_than': tomorrow.isoformat(), **common_data}
response = rest_client.get('/replicas/bad/states', headers=headers(auth(auth_token)), query_string=data)
assert response.status_code == 200
tot_bad_files = []
for line in response.get_data(as_text=True).split('\n'):
if line != '':
tot_bad_files.append(dumps(line))
nb_tot_bad_files = len(tot_bad_files)
assert nb_tot_bad_files == 0
if not list_pfns:
response = rest_client.get('/replicas/bad/summary', headers=headers(auth(auth_token)))
assert response.status_code == 200
nb_tot_bad_files2 = 0
for line in response.get_data(as_text=True).split('\n'):
if line != '':
line = loads(line)
nb_tot_bad_files2 += int(line.get('BAD', 0))
assert nb_tot_bad_files1 == nb_tot_bad_files2
def test_rest_list_replicas_content_type(rse_factory, mock_scope, replica_client, rest_client, auth_token):
""" REPLICA (REST): send a GET to list replicas with specific ACCEPT header."""
rse, _ = rse_factory.make_mock_rse()
scope = mock_scope.external
name = 'file_%s' % generate_uuid()
files1 = [{'scope': scope, 'name': name, 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}}]
replica_client.add_replicas(rse=rse, files=files1)
# unsupported requested content type
response = rest_client.get('/replicas/%s/%s' % (scope, name), headers=headers(auth(auth_token), accept('application/unsupported')))
assert response.status_code == 406
# content type json stream
response = rest_client.get('/replicas/%s/%s' % (scope, name), headers=headers(auth(auth_token), accept(Mime.JSON_STREAM)))
assert [header[1] for header in response.headers if header[0] == 'Content-Type'][0] == Mime.JSON_STREAM
# content type metalink4
response = rest_client.get('/replicas/%s/%s' % (scope, name), headers=headers(auth(auth_token), accept(Mime.METALINK)))
assert [header[1] for header in response.headers if header[0] == 'Content-Type'][0] == Mime.METALINK
# no requested content type
response = rest_client.get('/replicas/%s/%s' % (scope, name), headers=headers(auth(auth_token)))
assert [header[1] for header in response.headers if header[0] == 'Content-Type'][0] == Mime.JSON_STREAM
# all content types
response = rest_client.get('/replicas/%s/%s' % (scope, name), headers=headers(auth(auth_token), accept('*/*')))
assert [header[1] for header in response.headers if header[0] == 'Content-Type'][0] == Mime.JSON_STREAM
# multiple content types
response = rest_client.get('/replicas/%s/%s' % (scope, name), headers=headers(auth(auth_token), accept('application/unsupported, application/x-json-stream')))
assert [header[1] for header in response.headers if header[0] == 'Content-Type'][0] == Mime.JSON_STREAM
response = rest_client.get('/replicas/%s/%s' % (scope, name), headers=headers(auth(auth_token), accept('application/unsupported, */*;q=0.8')))
assert [header[1] for header in response.headers if header[0] == 'Content-Type'][0] == Mime.JSON_STREAM
def test_client_add_list_replicas(rse_factory, replica_client, mock_scope):
""" REPLICA (CLIENT): Add, change state and list file replicas """
rse1, _ = rse_factory.make_posix_rse()
rse2, _ = rse_factory.make_posix_rse()
nbfiles = 5
files1 = [{'scope': mock_scope.external, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for _ in range(nbfiles)]
replica_client.add_replicas(rse=rse1, files=files1)
files2 = [{'scope': mock_scope.external, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for _ in range(nbfiles)]
replica_client.add_replicas(rse=rse2, files=files2)
replicas = [r for r in replica_client.list_replicas(dids=[{'scope': i['scope'], 'name': i['name']} for i in files1])]
assert len(replicas) == len(files1)
replicas = [r for r in replica_client.list_replicas(dids=[{'scope': i['scope'], 'name': i['name']} for i in files2], schemes=['file'])]
assert len(replicas) == 5
replicas = [r for r in replica_client.list_replicas(dids=[{'scope': i['scope'], 'name': i['name']} for i in files2], schemes=['srm'])]
assert len(replicas) == 5
files3 = [{'scope': mock_scope.external, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'state': 'U', 'meta': {'events': 10}} for _ in range(nbfiles)]
replica_client.add_replicas(rse=rse2, files=files3)
replicas = [r for r in replica_client.list_replicas(dids=[{'scope': i['scope'], 'name': i['name']} for i in files3], schemes=['file'])]
for i in range(nbfiles):
assert replicas[i]['rses'] == {}
files4 = []
for file in files3:
file['state'] = 'A'
files4.append(file)
replica_client.update_replicas_states(rse2, files=files4)
replicas = [r for r in replica_client.list_replicas(dids=[{'scope': i['scope'], 'name': i['name']} for i in files3], schemes=['file'], all_states=True)]
assert len(replicas) == 5
for i in range(nbfiles):
assert rse2 in replicas[i]['rses']
def test_client_add_replica_scope_not_found(replica_client):
""" REPLICA (CLIENT): Add replica with missing scope """
files = [{'scope': 'nonexistingscope', 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb'}]
with pytest.raises(ScopeNotFound):
replica_client.add_replicas(rse='MOCK', files=files)
def test_client_access_denied_on_delete_replicas(rse_factory, mock_scope, replica_client):
""" REPLICA (CLIENT): Access denied on delete file replicas """
rse, _ = rse_factory.make_mock_rse()
nbfiles = 5
files = [{'scope': mock_scope.external, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for _ in range(nbfiles)]
replica_client.add_replicas(rse=rse, files=files)
with pytest.raises(AccessDenied):
replica_client.delete_replicas(rse=rse, files=files)
for f in files:
replicas = list(replica_client.list_replicas(dids=[{'scope': f['scope'], 'name': f['name']}]))
assert len(replicas) == 1
def test_client_list_replicas_on_did_without_replicas(rse_factory, did_factory, replica_client, did_client, root_account):
""" REPLICA (CLIENT): dids of type FILE, but without replicas, must be listed with empty pfns and rses"""
rse, _ = rse_factory.make_posix_rse()
file = did_factory.random_did()
dataset = did_factory.make_dataset()
container = did_factory.make_container()
@transactional_session
def __add_file_did_without_replica(session=None):
models.DataIdentifier(scope=file['scope'], name=file['name'], did_type=DIDType.FILE, bytes=1, adler32='0cc737eb', account=root_account).save(session=session, flush=False)
__add_file_did_without_replica()
# make all scopes external
file, dataset, container = ({'scope': did['scope'].external, 'name': did['name']} for did in (file, dataset, container))
did_client.add_files_to_dataset(files=[file], **dataset)
did_client.add_datasets_to_container(dsns=[dataset], **container)
replicas = list(replica_client.list_replicas(dids=[file]))
assert len(replicas) == 1
assert not replicas[0]['rses']
assert not replicas[0]['pfns']
# TODO: fix listing dids without replicas from datasets and containers and uncomment the following 2 asserts
# assert list(replica_client.list_replicas(dids=[dataset]))
# assert list(replica_client.list_replicas(dids=[container]))
def test_client_list_blocklisted_replicas(rse_factory, did_factory, replica_client, did_client):
""" REPLICA (CLIENT): Blocklisted replicas are filtered in list replicas"""
rse, _ = rse_factory.make_posix_rse()
file = did_factory.upload_test_file(rse)
dataset = did_factory.make_dataset()
container = did_factory.make_container()
# make all scopes external
file, dataset, container = ({'scope': did['scope'].external, 'name': did['name']} for did in (file, dataset, container))
did_client.add_files_to_dataset(files=[file], **dataset)
did_client.add_datasets_to_container(dsns=[dataset], **container)
# availability_write will not have any impact on listing replicas
did_factory.client.update_rse(rse, {'availability_write': False})
for did in (file, dataset, container):
replicas = list(replica_client.list_replicas(dids=[did]))
assert len(replicas) == 1
assert len(replicas[0]['rses']) == 1
# if availability_read is set to false, the replicas from the given rse will not be listed
did_factory.client.update_rse(rse, {'availability_read': False})
replicas = list(replica_client.list_replicas(dids=[file], ignore_availability=False))
assert len(replicas) == 1
assert not replicas[0]['rses'] and not replicas[0]['pfns']
for did in (dataset, container):
replicas = list(replica_client.list_replicas(dids=[did], ignore_availability=False))
assert len(replicas) == 0
# By default unavailable replicas will be returned
for did in (file, dataset, container):
replicas = list(replica_client.list_replicas(dids=[did]))
assert len(replicas) == 1
assert len(replicas[0]['rses']) == 1
@pytest.mark.dirty
@pytest.mark.noparallel(reason='runs minos, which acts on all bad pfns')
def test_client_add_temporary_unavailable_pfns(rse_factory, mock_scope, replica_client):
""" REPLICA (CLIENT): Add temporary unavailable PFNs"""
rse, rse_id = rse_factory.make_posix_rse()
nbfiles = 5
# Adding replicas to deterministic RSE
files = [{'scope': mock_scope.external, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for _ in range(nbfiles)]
replica_client.add_replicas(rse=rse, files=files)
# Listing replicas on deterministic RSE
list_rep = []
for replica in replica_client.list_replicas(dids=[{'scope': f['scope'], 'name': f['name']} for f in files], schemes=['file'], all_states=True):
pfn = list(replica['pfns'].keys())[0]
list_rep.append(pfn)
# Submit bad PFNs
now = datetime.utcnow()
reason_str = generate_uuid()
replica_client.add_bad_pfns(pfns=list_rep, reason=str(reason_str), state='TEMPORARY_UNAVAILABLE', expires_at=now.isoformat())
result = get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None)
bad_pfns = {}
for res in result:
bad_pfns[res['pfn']] = (res['state'], res['reason'], res['expires_at'])
for pfn in list_rep:
pfn = str(clean_surls([pfn])[0])
assert pfn in bad_pfns
assert bad_pfns[pfn][0] == BadPFNStatus.TEMPORARY_UNAVAILABLE
assert bad_pfns[pfn][1] == reason_str
# Submit with wrong state
with pytest.raises(RucioException):
replica_client.add_bad_pfns(pfns=list_rep, reason=str(reason_str), state='BADSTATE', expires_at=now.isoformat())
# Run minos once
minos_run(threads=1, bulk=10000, once=True)
result = get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None)
pfns = [res['pfn'] for res in result if res['pfn'] in bad_pfns]
res_pfns = []
for replica in list_rep:
if replica in pfns:
res_pfns.append(replica)
assert res_pfns == []
# Check the state in the replica table
for did in files:
rep = get_replicas_state(scope=mock_scope, name=did['name'])
assert list(rep.keys())[0] == ReplicaState.TEMPORARY_UNAVAILABLE
rep = []
for did in files:
did['state'] = ReplicaState.TEMPORARY_UNAVAILABLE
rep.append(did)
# Run the minos expiration
minos_temp_run(threads=1, once=True)
# Check the state in the replica table
for did in files:
rep = get_replicas_state(scope=mock_scope, name=did['name'])
assert list(rep.keys())[0] == ReplicaState.AVAILABLE
def test_client_set_tombstone(rse_factory, mock_scope, root_account, replica_client):
""" REPLICA (CLIENT): set tombstone on replica """
# Set tombstone on one replica
rse, rse_id = rse_factory.make_mock_rse()
name = generate_uuid()
add_replica(rse_id, mock_scope, name, 4, root_account)
assert get_replica(rse_id, mock_scope, name)['tombstone'] is None
replica_client.set_tombstone([{'rse': rse, 'scope': mock_scope.external, 'name': name}])
assert get_replica(rse_id, mock_scope, name)['tombstone'] == OBSOLETE
# Set tombstone on locked replica
name = generate_uuid()
add_replica(rse_id, mock_scope, name, 4, root_account)
RuleClient().add_replication_rule([{'name': name, 'scope': mock_scope.external}], 1, rse, locked=True)
with pytest.raises(ReplicaIsLocked):
replica_client.set_tombstone([{'rse': rse, 'scope': mock_scope.external, 'name': name}])
# Set tombstone on not found replica
name = generate_uuid()
with pytest.raises(ReplicaNotFound):
replica_client.set_tombstone([{'rse': rse, 'scope': mock_scope.external, 'name': name}])
def test_client_get_nrandom(rse_factory, did_factory, did_client, replica_client):
""" REPLICA (CLIENT): get N random replicas from a dataset"""
rse, _ = rse_factory.make_posix_rse()
dataset = did_factory.make_dataset()
dataset = {'scope': dataset['scope'].external, 'name': dataset['name']}
files = []
for _ in range(10):
file = did_factory.upload_test_file(rse)
file = {'scope': file['scope'].external, 'name': file['name']}
files.append(file)
did_client.add_files_to_dataset(files=files, **dataset)
replicas = list(replica_client.list_replicas(dids=[dataset], nrandom=5))
assert len(replicas) == 5
# Requesting more files than actually exist in the dataset, will return all files
replicas = list(replica_client.list_replicas(dids=[dataset], nrandom=15))
assert len(replicas) == 10
class TestReplicaMetalink:
@pytest.mark.dirty
@pytest.mark.noparallel(reason='uses pre-defined RSE')
def test_client_list_replicas_metalink_4(self, did_client, replica_client):
""" REPLICA (METALINK): List replicas as metalink version 4 """
fname = generate_uuid()
rses = ['MOCK', 'MOCK3', 'MOCK4']
dsn = generate_uuid()
files = [{'scope': 'mock', 'name': fname, 'bytes': 1, 'adler32': '0cc737eb'}]
did_client.add_dataset(scope='mock', name=dsn)
did_client.add_files_to_dataset('mock', name=dsn, files=files, rse='MOCK')
for r in rses:
replica_client.add_replicas(r, files)
ml = xmltodict.parse(replica_client.list_replicas(files,
metalink=4,
all_states=True,
schemes=['https', 'sftp', 'file']),
xml_attribs=False)
assert 3 == len(ml['metalink']['file']['url'])
def test_client_get_did_from_pfns_nondeterministic(self, vo, rse_factory, mock_scope, root_account, replica_client):
""" REPLICA (CLIENT): Get list of DIDs associated to PFNs for non-deterministic sites"""
rse, rse_id = rse_factory.make_srm_rse(deterministic=False)
nbfiles = 3
pfns = []
input_ = {}
rse_info = rsemgr.get_rse_info(rse=rse, vo=vo)
assert rse_info['deterministic'] is False
files = [{'scope': mock_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb',
'pfn': 'srm://%s.cern.ch/srm/managerv2?SFN=/test/%s/%s' % (rse_id, mock_scope, generate_uuid()), 'meta': {'events': 10}} for _ in range(nbfiles)]
for f in files:
input_[f['pfn']] = {'scope': f['scope'].external, 'name': f['name']}
add_replicas(rse_id=rse_id, files=files, account=root_account, ignore_availability=True)
for replica in list_replicas(dids=[{'scope': f['scope'], 'name': f['name'], 'type': DIDType.FILE} for f in files], schemes=['srm'], ignore_availability=True):
for r in replica['rses']:
pfns.extend(replica['rses'][r])
for result in replica_client.get_did_from_pfns(pfns, rse):
pfn = list(result.keys())[0]
assert input_[pfn] == list(result.values())[0]
def test_client_get_did_from_pfns_deterministic(self, vo, rse_factory, mock_scope, root_account, replica_client):
""" REPLICA (CLIENT): Get list of DIDs associated to PFNs for deterministic sites"""
rse, rse_id = rse_factory.make_srm_rse()
nbfiles = 3
pfns = []
input_ = {}
rse_info = rsemgr.get_rse_info(rse=rse, vo=vo)
assert rse_info['deterministic'] is True
files = [{'scope': mock_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for _ in range(nbfiles)]
p = rsemgr.create_protocol(rse_info, 'read', scheme='srm')
for f in files:
pfn = list(p.lfns2pfns(lfns={'scope': f['scope'].external, 'name': f['name']}).values())[0]
pfns.append(pfn)
input_[pfn] = {'scope': f['scope'].external, 'name': f['name']}
add_replicas(rse_id=rse_id, files=files, account=root_account, ignore_availability=True)
for result in replica_client.get_did_from_pfns(pfns, rse):
pfn = list(result.keys())[0]
assert input_[pfn] == list(result.values())[0]
@pytest.mark.parametrize("content_type", [Mime.METALINK, Mime.JSON_STREAM])
def test_client_list_replicas_streaming_error(content_type, vo, did_client, replica_client):
"""
REPLICA (CLIENT): List replicas and test for behavior when an error occurs while streaming.
Complicated test ahead! Mocking the wsgi frameworks, because the
wsgi test clients failed, showing different behavior than on the
apache webserver. Running the code against the apache web server
was problematic, because it was not easily possible to inject
raising an error after returning an element from the API.
"""
# mock data taken from a real response
mock_api_response = {
"adler32": "0cc737eb", "name": "file_a07ae361c1b844ba95f65b0ac385a3be", "rses": {
"MOCK3": ["srm://mock3.com:8443/srm/managerv2?SFN=/rucio/tmpdisk/rucio_tests/mock/bf/a5/file_a07ae361c1b844ba95f65b0ac385a3be"],
"MOCK": ["https://mock.com:2880/pnfs/rucio/disk-only/scratchdisk/mock/bf/a5/file_a07ae361c1b844ba95f65b0ac385a3be"],
"MOCK4": ["file://localhost/tmp/rucio_rse/mock/bf/a5/file_a07ae361c1b844ba95f65b0ac385a3be"]
}, "space_token": "RUCIODISK", "bytes": 1, "states": {"MOCK3": "AVAILABLE", "MOCK": "AVAILABLE", "MOCK4": "AVAILABLE"}, "pfns": {
"srm://mock3.com:8443/srm/managerv2?SFN=/rucio/tmpdisk/rucio_tests/mock/bf/a5/file_a07ae361c1b844ba95f65b0ac385a3be": {
"domain": "wan", "rse": "MOCK3", "priority": 3, "volatile": False, "client_extract": False, "type": "DISK", "rse_id": "4bce8ccadf594c42a627f842ccdb8fc2"
},
"https://mock.com:2880/pnfs/rucio/disk-only/scratchdisk/mock/bf/a5/file_a07ae361c1b844ba95f65b0ac385a3be": {
"domain": "wan", "rse": "MOCK", "priority": 2, "volatile": False, "client_extract": False, "type": "DISK", "rse_id": "908b01ee6fa04dd497c52d4869d778ca"
},
"file://localhost/tmp/rucio_rse/mock/bf/a5/file_a07ae361c1b844ba95f65b0ac385a3be": {
"domain": "wan", "rse": "MOCK4", "priority": 1, "volatile": False, "client_extract": False, "type": "DISK", "rse_id": "fd69ce85288845d9adcb54e2a7017520"
}
}, "scope": "mock", "md5": None
}
def api_returns(*_, **__):
yield mock_api_response
# raise after yielding an element
raise DatabaseException('Database error for testing')
json_data = dumps({'dids': [{'scope': 'mock', 'name': generate_uuid()}]})
def list_replicas_on_api():
from werkzeug.datastructures import Headers
class FakeRequest:
class FakeAcceptMimetypes:
provided = False
best_match = mock.MagicMock(return_value=content_type)
environ = {
'issuer': 'root',
'vo': vo,
'request_id': generate_uuid(),
}
query_string = None
args = MultiDict()
data = json_data
get_data = mock.MagicMock(return_value=json_data)
headers = Headers()
accept_mimetypes = FakeAcceptMimetypes()
remote_addr = '127.0.0.1'
response_mock = mock.Mock(return_value=None)
class FakeFlask:
request = FakeRequest()
abort = mock.MagicMock()
Response = response_mock
with mock.patch('rucio.web.rest.flaskapi.v1.common.flask', new=FakeFlask()), \
mock.patch('rucio.web.rest.flaskapi.v1.replicas.request', new=FakeRequest()), \
mock.patch('rucio.web.rest.flaskapi.v1.replicas.list_replicas', side_effect=api_returns):
from rucio.web.rest.flaskapi.v1.replicas import ListReplicas
list_replicas_restapi = ListReplicas()
list_replicas_restapi.post()
# for debugging when this test fails
print(f'Response({response_mock.call_args})')
print(f' args = {response_mock.call_args[0]}')
print(f'kwargs = {response_mock.call_args[1]}')
assert response_mock.call_args[1]['content_type'] == content_type
response_iter = response_mock.call_args[0][0]
assert response_iter != '', 'unexpected empty response'
# since we're directly accessing the generator for Flask, there is no error handling
with pytest.raises(DatabaseException, match='Database error for testing'):
for element in response_iter:
yield element
if content_type == Mime.METALINK:
# for metalink, this builds the incomplete XML that should be returned by the API on error
metalink = ''
for line in list_replicas_on_api():
metalink += line
assert metalink
print(metalink)
with pytest.raises(ElementTree.ParseError):
ElementTree.fromstring(metalink)
elif content_type == Mime.JSON_STREAM:
# for the json stream mimetype the API method just returns all mocked replicas on error
replicas = []
for json_doc in list_replicas_on_api():
if json_doc:
replicas.append(parse_response(json_doc))
assert replicas
print(replicas)
assert replicas == [mock_api_response]
else:
pytest.fail('unknown content_type parameter on test: ' + content_type)
| 50.629599
| 179
| 0.622771
|
ef53eb70c33118eca4d5e2773040fe8528edc2e9
| 6,913
|
py
|
Python
|
python/afsk_basic.py
|
pavelfpl/gr-gsSDR
|
141f5cd1f53b9691c7c7e084f32343bddc0d2d97
|
[
"MIT"
] | 1
|
2021-06-16T14:35:29.000Z
|
2021-06-16T14:35:29.000Z
|
python/afsk_basic.py
|
pavelfpl/gr-gsSDR
|
141f5cd1f53b9691c7c7e084f32343bddc0d2d97
|
[
"MIT"
] | null | null | null |
python/afsk_basic.py
|
pavelfpl/gr-gsSDR
|
141f5cd1f53b9691c7c7e084f32343bddc0d2d97
|
[
"MIT"
] | 1
|
2021-03-03T14:51:02.000Z
|
2021-03-03T14:51:02.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2021 Pavel Fiala
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import queue
import time
import numpy as np
from gnuradio import gr
import pmt,array
class afsk_basic(gr.sync_block):
"""
-------------------------------------------------------------------
A continuous phase FSK modulator for PMT custom packets / like AX25
-------------------------------------------------------------------
When given an packet, this block converts it to an audio stream with
the given configured parameters. Two in question:
- Preamble Len (ms): How long to transmit a clock signal (01010101)
- IF preamble_len_ms == 0 --> no preamble is added ...
The default values for the mark, space, and baud rate are configurable to
allow for further experimentation. v.23 modems, for example, use 1300/2100
tones to generate 1200 baud signals.
Modified for GR 3.8 from: https://github.com/tkuester/gr-bruninga
Links: https://inst.eecs.berkeley.edu/~ee123/sp15/lab/lab6/Lab6-Part-A-Audio-Frequency-Shift-Keying.html
https://notblackmagic.com/bitsnpieces/afsk/
"""
def __init__(self, samp_rate, preamble_len_ms, mark_freq, space_freq, baud_rate, stream_tag):
gr.sync_block.__init__(self,
name="afsk_basic",
in_sig=None,
out_sig=[np.float32])
self.samp_rate = samp_rate
self.mark_freq = mark_freq
self.space_freq = space_freq
self.baud_rate = baud_rate
self.stream_tag = stream_tag
self.preamble_len_bits = 0
if not (preamble_len_ms == 0):
self.preamble_len_bits = int((preamble_len_ms / 1000.0) * baud_rate / 2)
self.sps = int(1.0 * self.samp_rate / self.baud_rate)
self.outbox = queue.Queue()
self.output_buffer = None
self.opb_idx = 0
self.message_port_register_in(pmt.intern('in'))
self.set_msg_handler(pmt.intern('in'), self.handle_msg)
def handle_msg(self, msg_pmt):
# msg = pmt.to_python(msg_pmt)
# if not (isinstance(msg, tuple) and len(msg) == 2):
# print('Invalid Message: Expected tuple of len 2')
# print('Dropping msg of type %s' % type(msg))
# return
data_in = pmt.cdr(msg_pmt);
data = array.array('B', pmt.u8vector_elements(data_in))
self.outbox.put(data)
def ax25_to_fsk(self, msg):
# Generate message ...
if not (self.preamble_len_bits == 0):
msg_bits = [0, 1] * self.preamble_len_bits
msg_bits += msg
else:
msg_bits = msg
# Calculate phase increments ...
mark_pinc = 2 * np.pi * self.mark_freq / self.samp_rate
space_pinc = 2 * np.pi * self.space_freq / self.samp_rate
phase = 0
opb = np.empty(len(msg_bits) * self.sps)
for i, bit in enumerate(msg_bits):
pinc = (mark_pinc if bit is 1 else space_pinc)
phase += pinc
tmp = np.arange(self.sps) * pinc + phase
opb[i*self.sps:(i+1)*self.sps] = np.sin(tmp)
phase = tmp[-1]
return opb
def work(self, input_items, output_items):
out = output_items[0]
idx = 0
# TODO: Transmit cooldown period
if self.output_buffer is None:
if self.outbox.empty():
# TODO: This is a bit of a hack to work around the ALSA Audio
# Sink being unhappy with underflows ...
if(len(self.stream_tag)==0):
out[0:] = 0
return len(out)
else:
return 0
self.output_buffer = self.ax25_to_fsk(self.outbox.get())
self.opb_idx = 0
# print(len(self.output_buffer))
key = pmt.intern(self.stream_tag)
value = pmt.from_long(len(self.output_buffer))
self.add_item_tag(0, # Write to output port 0 ...
self.nitems_written(0), # Index of the tag in absolute terms ...
key, # Key of the tag ...
value # Value of the tag ...
)
# key = pmt.intern("stream_tag_stop")
# value = pmt.intern(str(len(self.output_buffer)))
# self.add_item_tag(0, # Write to output port 0 ...
# self.nitems_written(0)+len(self.output_buffer), # Index of the tag in absolute terms ...
# key, # Key of the tag ...
# value # Value of the tag ...
# )
# How many samples do we have left for each buffer ?
opb_left = len(self.output_buffer) - self.opb_idx
out_left = len(out) - idx
# Take the minimum, and copy them to out
cnt = min(opb_left, out_left)
out[idx:idx+cnt] = self.output_buffer[self.opb_idx:self.opb_idx+cnt]
# Update counters
idx += cnt
self.opb_idx += cnt
# If we run out of samples in the output buffer, we're done ...
if self.opb_idx >= len(self.output_buffer):
self.output_buffer = None
if(len(self.stream_tag)==0):
# Fill the remaining buffer with zeros. Hack to help the ALSA audio sink ...
# be happy.
if idx < len(out):
out[idx:] = 0
return len(out)
else:
return idx
| 37.775956
| 122
| 0.558513
|
12895bb4f0ca81fc9819d8b4969034f47711f172
| 94
|
py
|
Python
|
joplin/pages/event_page/apps.py
|
cityofaustin/joplin
|
01424e46993e9b1c8e57391d6b7d9448f31d596b
|
[
"MIT"
] | 15
|
2018-09-27T07:36:30.000Z
|
2021-08-03T16:01:21.000Z
|
joplin/pages/event_page/apps.py
|
cityofaustin/joplin
|
01424e46993e9b1c8e57391d6b7d9448f31d596b
|
[
"MIT"
] | 183
|
2017-11-16T23:30:47.000Z
|
2020-12-18T21:43:36.000Z
|
joplin/pages/event_page/apps.py
|
cityofaustin/joplin
|
01424e46993e9b1c8e57391d6b7d9448f31d596b
|
[
"MIT"
] | 12
|
2017-12-12T22:48:05.000Z
|
2021-03-01T18:01:24.000Z
|
from django.apps import AppConfig
class EventPageConfig(AppConfig):
name = 'event_page'
| 15.666667
| 33
| 0.765957
|
8b55ea3aca6d5193dad5e6b2ccd8cfcc8a824b56
| 1,895
|
py
|
Python
|
smc/examples/switch_interface.py
|
kobaan/fp-NGFW-SMC-python
|
7be57bdde954e4115a887c0140054c87cc0b53a0
|
[
"Apache-2.0"
] | 17
|
2019-11-19T07:25:09.000Z
|
2022-02-16T16:43:51.000Z
|
smc/examples/switch_interface.py
|
kobaan/fp-NGFW-SMC-python
|
7be57bdde954e4115a887c0140054c87cc0b53a0
|
[
"Apache-2.0"
] | 25
|
2020-05-20T12:27:35.000Z
|
2022-02-21T05:27:10.000Z
|
smc/examples/switch_interface.py
|
kobaan/fp-NGFW-SMC-python
|
7be57bdde954e4115a887c0140054c87cc0b53a0
|
[
"Apache-2.0"
] | 7
|
2020-02-04T12:16:50.000Z
|
2022-02-18T14:01:04.000Z
|
"""
Example script to show how to use Switch interfaces
-create switch interface/port group for an engine
-display switch interface
-delete switch interface
Needs Demo mode
"""
# Python Base Import
from smc import session
from smc.compat import is_api_version_less_than_or_equal
from smc.core.engines import Layer3Firewall
from smc_info import *
if __name__ == "__main__":
session.login(url=SMC_URL, api_key=API_KEY, verify=False, timeout=120, api_version=API_VERSION)
print("session OK")
try:
single_fw = Layer3Firewall("Plano")
single_fw.switch_physical_interface.add_switch_interface(1, "110", "My new switch interface")
# retrieve interface id
switch_interface_id = single_fw.switch_physical_interface.all()[0].interface_id
single_fw.switch_physical_interface \
.add_port_group_interface(switch_interface_id, 1, [1],
interfaces=[{'nodes': [{'address': '12.12.12.12',
'network_value': '12.12.12.0/24',
'nodeid': 1}]}])
single_fw.switch_physical_interface \
.add_port_group_interface(switch_interface_id, 2, [2, 3, 4, 5])
print("{}:{}".format(switch_interface_id,
single_fw.switch_physical_interface.get(switch_interface_id)))
for interface in single_fw.switch_physical_interface:
print("{}: {}".format(interface, interface.port_group_interface))
interface = single_fw.switch_physical_interface.get(switch_interface_id)
for sub_intf in interface.all_interfaces:
intf_id = sub_intf.data.interface_id
print("{}: {}".format(intf_id, sub_intf))
except Exception as e:
print(e)
exit(-1)
finally:
single_fw.switch_physical_interface.get(switch_interface_id).delete()
session.logout()
| 35.754717
| 99
| 0.667018
|
5f9164c1cc7e9494a573895e93fd39680b8520f6
| 1,324
|
py
|
Python
|
ymir/backend/src/ymir_app/app/models/iteration.py
|
Zhang-SJ930104/ymir
|
dd6481be6f229ade4cf8fba64ef44a15357430c4
|
[
"Apache-2.0"
] | null | null | null |
ymir/backend/src/ymir_app/app/models/iteration.py
|
Zhang-SJ930104/ymir
|
dd6481be6f229ade4cf8fba64ef44a15357430c4
|
[
"Apache-2.0"
] | 1
|
2022-01-18T09:28:29.000Z
|
2022-01-18T09:28:29.000Z
|
ymir/backend/src/ymir_app/app/models/iteration.py
|
Aryalfrat/ymir
|
d4617ed00ef67a77ab4e1944763f608bface4be6
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
from sqlalchemy import Boolean, Column, DateTime, Integer, SmallInteger, String
from app.config import settings
from app.db.base_class import Base
from app.models.task import Task # noqa
class Iteration(Base):
__tablename__ = "iteration"
id = Column(Integer, primary_key=True, index=True, autoincrement=True)
description = Column(String(settings.STRING_LEN_LIMIT))
iteration_round = Column(Integer, index=True, nullable=False)
current_stage = Column(SmallInteger, index=True, default=0, nullable=False)
previous_iteration = Column(Integer, index=True, default=0, nullable=False)
mining_input_dataset_id = Column(Integer)
mining_output_dataset_id = Column(Integer)
label_output_dataset_id = Column(Integer)
training_input_dataset_id = Column(Integer)
training_output_model_id = Column(Integer)
testing_dataset_id = Column(Integer)
user_id = Column(Integer, index=True, nullable=False)
project_id = Column(Integer, index=True, nullable=False)
is_deleted = Column(Boolean, default=False, nullable=False)
create_datetime = Column(DateTime, default=datetime.utcnow, nullable=False)
update_datetime = Column(
DateTime,
default=datetime.utcnow,
onupdate=datetime.utcnow,
nullable=False,
)
| 36.777778
| 79
| 0.749245
|
9c2bc8c5d959e04429538ef1d2dbb29ff5f8426c
| 399
|
py
|
Python
|
calipsoplus/wsgi.py
|
dsanchez-cells/calipsoplus-backend
|
7eaa6904ec59d88052644b31041b92ee20e54354
|
[
"MIT"
] | 4
|
2018-12-04T15:08:27.000Z
|
2019-04-11T09:49:41.000Z
|
calipsoplus/wsgi.py
|
dsanchez-cells/calipsoplus-backend
|
7eaa6904ec59d88052644b31041b92ee20e54354
|
[
"MIT"
] | 63
|
2018-11-22T13:07:56.000Z
|
2021-06-10T20:55:58.000Z
|
calipsoplus/wsgi.py
|
dsanchez-cells/calipsoplus-backend
|
7eaa6904ec59d88052644b31041b92ee20e54354
|
[
"MIT"
] | 10
|
2018-11-23T08:17:28.000Z
|
2022-01-15T23:41:59.000Z
|
"""
WSGI config for calipsoplus project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "calipsoplus.settings")
application = get_wsgi_application()
| 23.470588
| 78
| 0.789474
|
a0722888b4a9e16d2484eca76aa9a74456555147
| 5,715
|
py
|
Python
|
tensorflow/python/kernel_tests/sparse_ops/sparse_reorder_op_test.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 190,993
|
2015-11-09T13:17:30.000Z
|
2022-03-31T23:05:27.000Z
|
tensorflow/python/kernel_tests/sparse_ops/sparse_reorder_op_test.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 48,461
|
2015-11-09T14:21:11.000Z
|
2022-03-31T23:17:33.000Z
|
tensorflow/python/kernel_tests/sparse_ops/sparse_reorder_op_test.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 104,981
|
2015-11-09T13:40:17.000Z
|
2022-03-31T19:51:54.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseReorder."""
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SparseReorderTest(test.TestCase):
def _SparseTensorPlaceholder(self):
return sparse_tensor.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.float64),
array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.float64)
ind = ind[permutation]
val = val[permutation]
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor.SparseTensorValue(ind, val, shape)
def testStaticShapeInfoPreserved(self):
sp_input = sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_5x6(np.arange(6)))
self.assertAllEqual((5, 6), sp_input.get_shape())
sp_output = sparse_ops.sparse_reorder(sp_input)
self.assertAllEqual((5, 6), sp_output.get_shape())
def testAlreadyInOrder(self):
with self.session() as sess:
input_val = self._SparseTensorValue_5x6(np.arange(6))
sp_output = sparse_ops.sparse_reorder(input_val)
output_val = self.evaluate(sp_output)
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
@test_util.run_deprecated_v1
def testFeedAlreadyInOrder(self):
with self.session() as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6(np.arange(6))
sp_output = sparse_ops.sparse_reorder(sp_input)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
def testOutOfOrder(self):
expected_output_val = self._SparseTensorValue_5x6(np.arange(6))
with self.session() as sess:
for _ in range(5): # To test various random permutations
input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
sp_output = sparse_ops.sparse_reorder(input_val)
output_val = self.evaluate(sp_output)
self.assertAllEqual(output_val.indices, expected_output_val.indices)
self.assertAllEqual(output_val.values, expected_output_val.values)
self.assertAllEqual(output_val.dense_shape,
expected_output_val.dense_shape)
@test_util.run_deprecated_v1
def testFeedOutOfOrder(self):
expected_output_val = self._SparseTensorValue_5x6(np.arange(6))
with self.session() as sess:
for _ in range(5): # To test various random permutations
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
sp_output = sparse_ops.sparse_reorder(sp_input)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, expected_output_val.indices)
self.assertAllEqual(output_val.values, expected_output_val.values)
self.assertAllEqual(output_val.dense_shape,
expected_output_val.dense_shape)
@test_util.run_deprecated_v1
def testGradients(self):
with self.session():
for _ in range(5): # To test various random permutations
input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
sp_input = sparse_tensor.SparseTensor(input_val.indices,
input_val.values,
input_val.dense_shape)
sp_output = sparse_ops.sparse_reorder(sp_input)
err = gradient_checker.compute_gradient_error(
sp_input.values,
input_val.values.shape,
sp_output.values,
input_val.values.shape,
x_init_value=input_val.values)
self.assertLess(err, 1e-11)
def testShapeOverflow(self):
# Test case for GitHub issue 45392
sp_input = sparse_tensor.SparseTensor(
indices=[[0, 0, 0, 0, 0, 0]],
values=[0.0],
dense_shape=[4096, 4096, 4096, 4096, 4096, 4096])
self.assertAllEqual((4096, 4096, 4096, 4096, 4096, 4096),
sp_input.get_shape())
sp_output = sparse_ops.sparse_reorder(sp_input)
self.assertAllEqual((4096, 4096, 4096, 4096, 4096, 4096),
sp_output.get_shape())
if __name__ == "__main__":
test.main()
| 41.413043
| 80
| 0.695188
|
7024ac2376c8652f3b5d17d514c2b573ff16a7e0
| 113,866
|
py
|
Python
|
numpy/core/fromnumeric.py
|
shoyer/numpy
|
4ad33d21b1a30f931e23307e9f9355b70f633bed
|
[
"BSD-3-Clause"
] | 1
|
2020-12-22T17:44:13.000Z
|
2020-12-22T17:44:13.000Z
|
numpy/core/fromnumeric.py
|
shoyer/numpy
|
4ad33d21b1a30f931e23307e9f9355b70f633bed
|
[
"BSD-3-Clause"
] | null | null | null |
numpy/core/fromnumeric.py
|
shoyer/numpy
|
4ad33d21b1a30f931e23307e9f9355b70f633bed
|
[
"BSD-3-Clause"
] | 1
|
2019-06-11T20:34:19.000Z
|
2019-06-11T20:34:19.000Z
|
"""Module containing non-deprecated functions borrowed from Numeric.
"""
from __future__ import division, absolute_import, print_function
import functools
import types
import warnings
import numpy as np
from .. import VisibleDeprecationWarning
from . import multiarray as mu
from . import overrides
from . import umath as um
from . import numerictypes as nt
from ._asarray import asarray, array, asanyarray
from .multiarray import concatenate
from . import _methods
_dt_ = nt.sctype2char
# functions that are methods
__all__ = [
'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_',
'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
]
_gentype = types.GeneratorType
# save away Python sum
_sum_ = sum
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj), method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def _wrapfunc(obj, method, *args, **kwds):
bound = getattr(obj, method, None)
if bound is None:
return _wrapit(obj, method, *args, **kwds)
try:
return bound(*args, **kwds)
except TypeError:
# A TypeError occurs if the object does have such a method in its
# class, but its signature is not identical to that of NumPy's. This
# situation has occurred in the case of a downstream library like
# 'pandas'.
#
# Call _wrapit from within the except clause to ensure a potential
# exception has a traceback chain.
return _wrapit(obj, method, *args, **kwds)
def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs):
passkwargs = {k: v for k, v in kwargs.items()
if v is not np._NoValue}
if type(obj) is not mu.ndarray:
try:
reduction = getattr(obj, method)
except AttributeError:
pass
else:
# This branch is needed for reductions like any which don't
# support a dtype.
if dtype is not None:
return reduction(axis=axis, dtype=dtype, out=out, **passkwargs)
else:
return reduction(axis=axis, out=out, **passkwargs)
return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
def _take_dispatcher(a, indices, axis=None, out=None, mode=None):
return (a, out)
@array_function_dispatch(_take_dispatcher)
def take(a, indices, axis=None, out=None, mode='raise'):
"""
Take elements from an array along an axis.
When axis is not None, this function does the same thing as "fancy"
indexing (indexing arrays using arrays); however, it can be easier to use
if you need elements along a given axis. A call such as
``np.take(arr, indices, axis=3)`` is equivalent to
``arr[:,:,:,indices,...]``.
Explained without fancy indexing, this is equivalent to the following use
of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of
indices::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
Nj = indices.shape
for ii in ndindex(Ni):
for jj in ndindex(Nj):
for kk in ndindex(Nk):
out[ii + jj + kk] = a[ii + (indices[jj],) + kk]
Parameters
----------
a : array_like (Ni..., M, Nk...)
The source array.
indices : array_like (Nj...)
The indices of the values to extract.
.. versionadded:: 1.8.0
Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional (Ni..., Nj..., Nk...)
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype. Note that `out` is always
buffered if `mode='raise'`; use other modes for better performance.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
out : ndarray (Ni..., Nj..., Nk...)
The returned array has the same type as `a`.
See Also
--------
compress : Take elements using a boolean mask
ndarray.take : equivalent method
take_along_axis : Take elements by matching the array and the index arrays
Notes
-----
By eliminating the inner loop in the description above, and using `s_` to
build simple slice objects, `take` can be expressed in terms of applying
fancy indexing to each 1-d slice::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
for ii in ndindex(Ni):
for kk in ndindex(Nj):
out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices]
For this reason, it is equivalent to (but faster than) the following use
of `apply_along_axis`::
out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a)
Examples
--------
>>> a = [4, 3, 5, 7, 6, 8]
>>> indices = [0, 1, 4]
>>> np.take(a, indices)
array([4, 3, 6])
In this example if `a` is an ndarray, "fancy" indexing can be used.
>>> a = np.array(a)
>>> a[indices]
array([4, 3, 6])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, [[0, 1], [2, 3]])
array([[4, 3],
[5, 7]])
"""
return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode)
def _reshape_dispatcher(a, newshape, order=None):
return (a,)
# not deprecated --- copy if necessary, view otherwise
@array_function_dispatch(_reshape_dispatcher)
def reshape(a, newshape, order='C'):
"""
Gives a new shape to an array without changing its data.
Parameters
----------
a : array_like
Array to be reshaped.
newshape : int or tuple of ints
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is
inferred from the length of the array and remaining dimensions.
order : {'C', 'F', 'A'}, optional
Read the elements of `a` using this index order, and place the
elements into the reshaped array using this index order. 'C'
means to read / write the elements using C-like index order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to read / write the
elements using Fortran-like index order, with the first index
changing fastest, and the last index changing slowest. Note that
the 'C' and 'F' options take no account of the memory layout of
the underlying array, and only refer to the order of indexing.
'A' means to read / write the elements in Fortran-like index
order if `a` is Fortran *contiguous* in memory, C-like order
otherwise.
Returns
-------
reshaped_array : ndarray
This will be a new view object if possible; otherwise, it will
be a copy. Note there is no guarantee of the *memory layout* (C- or
Fortran- contiguous) of the returned array.
See Also
--------
ndarray.reshape : Equivalent method.
Notes
-----
It is not always possible to change the shape of an array without
copying the data. If you want an error to be raised when the data is copied,
you should assign the new shape to the shape attribute of the array::
>>> a = np.zeros((10, 2))
# A transpose makes the array non-contiguous
>>> b = a.T
# Taking a view makes it possible to modify the shape without modifying
# the initial object.
>>> c = b.view()
>>> c.shape = (20)
Traceback (most recent call last):
...
AttributeError: incompatible shape for a non-contiguous array
The `order` keyword gives the index ordering both for *fetching* the values
from `a`, and then *placing* the values into the output array.
For example, let's say you have an array:
>>> a = np.arange(6).reshape((3, 2))
>>> a
array([[0, 1],
[2, 3],
[4, 5]])
You can think of reshaping as first raveling the array (using the given
index order), then inserting the elements from the raveled array into the
new array using the same kind of index ordering as was used for the
raveling.
>>> np.reshape(a, (2, 3)) # C-like index ordering
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering
array([[0, 4, 3],
[2, 1, 5]])
>>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F')
array([[0, 4, 3],
[2, 1, 5]])
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.reshape(a, 6)
array([1, 2, 3, 4, 5, 6])
>>> np.reshape(a, 6, order='F')
array([1, 4, 2, 5, 3, 6])
>>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
array([[1, 2],
[3, 4],
[5, 6]])
"""
return _wrapfunc(a, 'reshape', newshape, order=order)
def _choose_dispatcher(a, choices, out=None, mode=None):
yield a
for c in choices:
yield c
yield out
@array_function_dispatch(_choose_dispatcher)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a set of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples -
in its full generality, this function is less simple than it might
seem from the following code description (below ndi =
`numpy.lib.index_tricks`):
``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``.
But this omits some subtleties. Here is a fully general summary:
Given an "index" array (`a`) of integers and a sequence of `n` arrays
(`choices`), `a` and each choice array are first broadcast, as necessary,
to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
for each `i`. Then, a new array with shape ``Ba.shape`` is created as
follows:
* if ``mode=raise`` (the default), then, first of all, each element of
`a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that
`i` (in that range) is the value at the `(j0, j1, ..., jm)` position
in `Ba` - then the value at the same position in the new array is the
value in `Bchoices[i]` at that same position;
* if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed)
integer; modular arithmetic is used to map integers outside the range
`[0, n-1]` back into that range; and then the new array is constructed
as above;
* if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed)
integer; negative integers are mapped to 0; values greater than `n-1`
are mapped to `n-1`; and then the new array is constructed as above.
Parameters
----------
a : int array
This array must contain integers in `[0, n-1]`, where `n` is the number
of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any
integers are permissible.
choices : sequence of arrays
Choice arrays. `a` and all of the choices must be broadcastable to the
same shape. If `choices` is itself an array (not recommended), then
its outermost dimension (i.e., the one corresponding to
``choices.shape[0]``) is taken as defining the "sequence".
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype. Note that `out` is always
buffered if `mode='raise'`; use other modes for better performance.
mode : {'raise' (default), 'wrap', 'clip'}, optional
Specifies how indices outside `[0, n-1]` will be treated:
* 'raise' : an exception is raised
* 'wrap' : value becomes value mod `n`
* 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
-------
merged_array : array
The merged result.
Raises
------
ValueError: shape mismatch
If `a` and each choice array are not all broadcastable to the same
shape.
See Also
--------
ndarray.choose : equivalent method
Notes
-----
To reduce the chance of misinterpretation, even though the following
"abuse" is nominally supported, `choices` should neither be, nor be
thought of as, a single array, i.e., the outermost sequence-like container
should be either a list or a tuple.
Examples
--------
>>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
... [20, 21, 22, 23], [30, 31, 32, 33]]
>>> np.choose([2, 3, 1, 0], choices
... # the first element of the result will be the first element of the
... # third (2+1) "array" in choices, namely, 20; the second element
... # will be the second element of the fourth (3+1) choice array, i.e.,
... # 31, etc.
... )
array([20, 31, 12, 3])
>>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
array([20, 31, 12, 3])
>>> # because there are 4 choice arrays
>>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
array([20, 1, 12, 3])
>>> # i.e., 0
A couple examples illustrating how choose broadcasts:
>>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
>>> choices = [-10, 10]
>>> np.choose(a, choices)
array([[ 10, -10, 10],
[-10, 10, -10],
[ 10, -10, 10]])
>>> # With thanks to Anne Archibald
>>> a = np.array([0, 1]).reshape((2,1,1))
>>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
>>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
>>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
array([[[ 1, 1, 1, 1, 1],
[ 2, 2, 2, 2, 2],
[ 3, 3, 3, 3, 3]],
[[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5]]])
"""
return _wrapfunc(a, 'choose', choices, out=out, mode=mode)
def _repeat_dispatcher(a, repeats, axis=None):
return (a,)
@array_function_dispatch(_repeat_dispatcher)
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : int or array of ints
The number of repetitions for each element. `repeats` is broadcasted
to fit the shape of the given axis.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
Examples
--------
>>> np.repeat(3, 4)
array([3, 3, 3, 3])
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
return _wrapfunc(a, 'repeat', repeats, axis=axis)
def _put_dispatcher(a, ind, v, mode=None):
return (a, ind, v)
@array_function_dispatch(_put_dispatcher)
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
a.flat[ind] = v
Parameters
----------
a : ndarray
Target array.
ind : array_like
Target indices, interpreted as integers.
v : array_like
Values to place in `a` at target indices. If `v` is shorter than
`ind` it will be repeated as necessary.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers. In 'raise' mode,
if an exception occurs the target array may still be modified.
See Also
--------
putmask, place
put_along_axis : Put elements by matching the array and the index arrays
Examples
--------
>>> a = np.arange(5)
>>> np.put(a, [0, 2], [-44, -55])
>>> a
array([-44, 1, -55, 3, 4])
>>> a = np.arange(5)
>>> np.put(a, 22, -5, mode='clip')
>>> a
array([ 0, 1, 2, 3, -5])
"""
try:
put = a.put
except AttributeError:
raise TypeError("argument 1 must be numpy.ndarray, "
"not {name}".format(name=type(a).__name__))
return put(ind, v, mode=mode)
def _swapaxes_dispatcher(a, axis1, axis2):
return (a,)
@array_function_dispatch(_swapaxes_dispatcher)
def swapaxes(a, axis1, axis2):
"""
Interchange two axes of an array.
Parameters
----------
a : array_like
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is
returned; otherwise a new array is created. For earlier NumPy
versions a view of `a` is returned only if the order of the
axes is changed, otherwise the input array is returned.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1],
[2],
[3]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.swapaxes(x,0,2)
array([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
return _wrapfunc(a, 'swapaxes', axis1, axis2)
def _transpose_dispatcher(a, axes=None):
return (a,)
@array_function_dispatch(_transpose_dispatcher)
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
a : array_like
Input array.
axes : list of ints, optional
By default, reverse the dimensions, otherwise permute the axes
according to the values given.
Returns
-------
p : ndarray
`a` with its axes permuted. A view is returned whenever
possible.
See Also
--------
moveaxis
argsort
Notes
-----
Use `transpose(a, argsort(axes))` to invert the transposition of tensors
when using the `axes` keyword argument.
Transposing a 1-D array returns an unchanged view of the original array.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.transpose(x)
array([[0, 2],
[1, 3]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return _wrapfunc(a, 'transpose', axes)
def _partition_dispatcher(a, kth, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_partition_dispatcher)
def partition(a, kth, axis=-1, kind='introselect', order=None):
"""
Return a partitioned copy of an array.
Creates a copy of the array with its elements rearranged in such a
way that the value of the element in k-th position is in the
position it would be in a sorted array. All elements smaller than
the k-th element are moved before this element and all equal or
greater are moved behind it. The ordering of the elements in the two
partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to be sorted.
kth : int or sequence of ints
Element index to partition by. The k-th value of the element
will be in its final sorted position and all smaller elements
will be moved before it and all equal or greater elements behind
it. The order of all elements in the partitions is undefined. If
provided with a sequence of k-th it will partition all elements
indexed by k-th of them into their sorted position at once.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string. Not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
partitioned_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.partition : Method to sort an array in-place.
argpartition : Indirect partition.
sort : Full sorting
Notes
-----
The various selection algorithms are characterized by their average
speed, worst case performance, work space size, and whether they are
stable. A stable sort keeps items with the same key in the same
relative order. The available algorithms have the following
properties:
================= ======= ============= ============ =======
kind speed worst case work space stable
================= ======= ============= ============ =======
'introselect' 1 O(n) 0 no
================= ======= ============= ============ =======
All the partition algorithms make temporary copies of the data when
partitioning along any but the last axis. Consequently,
partitioning along the last axis is faster and uses less space than
partitioning along any other axis.
The sort order for complex numbers is lexicographic. If both the
real and imaginary parts are non-nan then the order is determined by
the real parts except when they are equal, in which case the order
is determined by the imaginary parts.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> np.partition(a, 3)
array([2, 1, 3, 4])
>>> np.partition(a, (1, 3))
array([1, 2, 3, 4])
"""
if axis is None:
# flatten returns (1, N) for np.matrix, so always use the last axis
a = asanyarray(a).flatten()
axis = -1
else:
a = asanyarray(a).copy(order="K")
a.partition(kth, axis=axis, kind=kind, order=order)
return a
def _argpartition_dispatcher(a, kth, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_argpartition_dispatcher)
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
"""
Perform an indirect partition along the given axis using the
algorithm specified by the `kind` keyword. It returns an array of
indices of the same shape as `a` that index data along the given
axis in partitioned order.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to sort.
kth : int or sequence of ints
Element index to partition by. The k-th element will be in its
final sorted position and all smaller elements will be moved
before it and all larger elements behind it. The order all
elements in the partitions is undefined. If provided with a
sequence of k-th it will partition all of them into their sorted
position at once.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If
None, the flattened array is used.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string, and not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that partition `a` along the specified axis.
If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`.
More generally, ``np.take_along_axis(a, index_array, axis=a)`` always
yields the partitioned `a`, irrespective of dimensionality.
See Also
--------
partition : Describes partition algorithms used.
ndarray.partition : Inplace partition.
argsort : Full indirect sort
Notes
-----
See `partition` for notes on the different selection algorithms.
Examples
--------
One dimensional array:
>>> x = np.array([3, 4, 2, 1])
>>> x[np.argpartition(x, 3)]
array([2, 1, 3, 4])
>>> x[np.argpartition(x, (1, 3))]
array([1, 2, 3, 4])
>>> x = [3, 4, 2, 1]
>>> np.array(x)[np.argpartition(x, 3)]
array([2, 1, 3, 4])
"""
return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)
def _sort_dispatcher(a, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_sort_dispatcher)
def sort(a, axis=-1, kind=None, order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort or radix sort under the covers and, in general,
the actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
partition : Partial sort.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The four algorithms implemented in NumPy have the following
properties:
=========== ======= ============= ============ ========
kind speed worst case work space stable
=========== ======= ============= ============ ========
'quicksort' 1 O(n^2) 0 no
'heapsort' 3 O(n*log(n)) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'timsort' 2 O(n*log(n)) ~n/2 yes
=========== ======= ============= ============ ========
.. note:: The datatype determines which of 'mergesort' or 'timsort'
is actually used, even if 'mergesort' is specified. User selection
at a finer scale is not currently available.
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
.. versionadded:: 1.12.0
quicksort has been changed to an introsort which will switch
heapsort when it does not make enough progress. This makes its
worst case O(n*log(n)).
'stable' automatically choses the best stable sorting algorithm
for the data type being sorted. It, along with 'mergesort' is
currently mapped to timsort or radix sort depending on the
data type. API forward compatibility currently limits the
ability to select the implementation and it is hardwired for the different
data types.
.. versionadded:: 1.17.0
Timsort is added for better performance on already or nearly
sorted data. On random data timsort is almost identical to
mergesort. It is now used for stable sort while quicksort is still the
default sort if none is chosen. For details of timsort, refer to
`CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an
O(n) sort instead of O(n log n).
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
"""
if axis is None:
# flatten returns (1, N) for np.matrix, so always use the last axis
a = asanyarray(a).flatten()
axis = -1
else:
a = asanyarray(a).copy(order="K")
a.sort(axis=axis, kind=kind, order=order)
return a
def _argsort_dispatcher(a, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_argsort_dispatcher)
def argsort(a, axis=-1, kind=None, order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort under the covers and, in general, the
actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
More generally, ``np.take_along_axis(a, index_array, axis=axis)``
always yields the sorted `a`, irrespective of dimensionality.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
>>> ind
array([[0, 1],
[1, 0]])
>>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
array([[0, 2],
[2, 3]])
>>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
>>> ind
array([[0, 1],
[0, 1]])
>>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
array([[0, 3],
[2, 2]])
Indices of the sorted elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
>>> ind
(array([0, 1, 1, 0]), array([0, 0, 1, 1]))
>>> x[ind] # same as np.sort(x, axis=None)
array([0, 2, 2, 3])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order)
def _argmax_dispatcher(a, axis=None, out=None):
return (a, out)
@array_function_dispatch(_argmax_dispatcher)
def argmax(a, axis=None, out=None):
"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
Indexes of the maximal elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape)
>>> ind
(1, 2)
>>> a[ind]
15
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
"""
return _wrapfunc(a, 'argmax', axis=axis, out=out)
def _argmin_dispatcher(a, axis=None, out=None):
return (a, out)
@array_function_dispatch(_argmin_dispatcher)
def argmin(a, axis=None, out=None):
"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmin(a)
0
>>> np.argmin(a, axis=0)
array([0, 0, 0])
>>> np.argmin(a, axis=1)
array([0, 0])
Indices of the minimum elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape)
>>> ind
(0, 0)
>>> a[ind]
10
>>> b = np.arange(6) + 10
>>> b[4] = 10
>>> b
array([10, 11, 12, 13, 10, 15])
>>> np.argmin(b) # Only the first occurrence is returned.
0
"""
return _wrapfunc(a, 'argmin', axis=axis, out=out)
def _searchsorted_dispatcher(a, v, side=None, sorter=None):
return (a, v, sorter)
@array_function_dispatch(_searchsorted_dispatcher)
def searchsorted(a, v, side='left', sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `a` would be preserved.
Assuming that `a` is sorted:
====== ============================
`side` returned index `i` satisfies
====== ============================
left ``a[i-1] < v <= a[i]``
right ``a[i-1] <= v < a[i]``
====== ============================
Parameters
----------
a : 1-D array_like
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
.. versionadded:: 1.7.0
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
This function is a faster version of the builtin python `bisect.bisect_left`
(``side='left'``) and `bisect.bisect_right` (``side='right'``) functions,
which is also vectorized in the `v` argument.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
"""
return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)
def _resize_dispatcher(a, new_shape):
return (a,)
@array_function_dispatch(_resize_dispatcher)
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : array_like
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated in the order that they are stored in memory.
See Also
--------
ndarray.resize : resize an array in-place.
Notes
-----
Warning: This functionality does **not** consider axes separately,
i.e. it does not apply interpolation/extrapolation.
It fills the return array with the required number of elements, taken
from `a` as they are laid out in memory, disregarding strides and axes.
(This is in case the new shape is smaller. For larger, see above.)
This functionality is therefore not suitable to resize images,
or data where each axis represents a separate and distinct entity.
Examples
--------
>>> a=np.array([[0,1],[2,3]])
>>> np.resize(a,(2,3))
array([[0, 1, 2],
[3, 0, 1]])
>>> np.resize(a,(1,4))
array([[0, 1, 2, 3]])
>>> np.resize(a,(2,4))
array([[0, 1, 2, 3],
[0, 1, 2, 3]])
"""
if isinstance(new_shape, (int, nt.integer)):
new_shape = (new_shape,)
a = ravel(a)
Na = len(a)
total_size = um.multiply.reduce(new_shape)
if Na == 0 or total_size == 0:
return mu.zeros(new_shape, a.dtype)
n_copies = int(total_size / Na)
extra = total_size % Na
if extra != 0:
n_copies = n_copies + 1
extra = Na - extra
a = concatenate((a,) * n_copies)
if extra > 0:
a = a[:-extra]
return reshape(a, new_shape)
def _squeeze_dispatcher(a, axis=None):
return (a,)
@array_function_dispatch(_squeeze_dispatcher)
def squeeze(a, axis=None):
"""
Remove single-dimensional entries from the shape of an array.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
.. versionadded:: 1.7.0
Selects a subset of the single-dimensional entries in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
Returns
-------
squeezed : ndarray
The input array, but with all or a subset of the
dimensions of length 1 removed. This is always `a` itself
or a view into `a`.
Raises
------
ValueError
If `axis` is not `None`, and an axis being squeezed is not of length 1
See Also
--------
expand_dims : The inverse operation, adding singleton dimensions
reshape : Insert, remove, and combine dimensions, and resize existing ones
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
>>> np.squeeze(x, axis=0).shape
(3, 1)
>>> np.squeeze(x, axis=1).shape
Traceback (most recent call last):
...
ValueError: cannot select an axis to squeeze out which has size not equal to one
>>> np.squeeze(x, axis=2).shape
(1, 3)
"""
try:
squeeze = a.squeeze
except AttributeError:
return _wrapit(a, 'squeeze', axis=axis)
if axis is None:
return squeeze()
else:
return squeeze(axis=axis)
def _diagonal_dispatcher(a, offset=None, axis1=None, axis2=None):
return (a,)
@array_function_dispatch(_diagonal_dispatcher)
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Return specified diagonals.
If `a` is 2-D, returns the diagonal of `a` with the given offset,
i.e., the collection of elements of the form ``a[i, i+offset]``. If
`a` has more than two dimensions, then the axes specified by `axis1`
and `axis2` are used to determine the 2-D sub-array whose diagonal is
returned. The shape of the resulting array can be determined by
removing `axis1` and `axis2` and appending an index to the right equal
to the size of the resulting diagonals.
In versions of NumPy prior to 1.7, this function always returned a new,
independent array containing a copy of the values in the diagonal.
In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal,
but depending on this fact is deprecated. Writing to the resulting
array continues to work as it used to, but a FutureWarning is issued.
Starting in NumPy 1.9 it returns a read-only view on the original array.
Attempting to write to the resulting array will produce an error.
In some future release, it will return a read/write view and writing to
the returned array will alter your original array. The returned array
will have the same type as the input array.
If you don't write to the array returned by this function, then you can
just ignore all of the above.
If you depend on the current behavior, then we suggest copying the
returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead
of just ``np.diagonal(a)``. This will work with both past and future
versions of NumPy.
Parameters
----------
a : array_like
Array from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be positive or
negative. Defaults to main diagonal (0).
axis1 : int, optional
Axis to be used as the first axis of the 2-D sub-arrays from which
the diagonals should be taken. Defaults to first axis (0).
axis2 : int, optional
Axis to be used as the second axis of the 2-D sub-arrays from
which the diagonals should be taken. Defaults to second axis (1).
Returns
-------
array_of_diagonals : ndarray
If `a` is 2-D, then a 1-D array containing the diagonal and of the
same type as `a` is returned unless `a` is a `matrix`, in which case
a 1-D array rather than a (2-D) `matrix` is returned in order to
maintain backward compatibility.
If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2`
are removed, and a new axis inserted at the end corresponding to the
diagonal.
Raises
------
ValueError
If the dimension of `a` is less than 2.
See Also
--------
diag : MATLAB work-a-like for 1-D and 2-D arrays.
diagflat : Create diagonal arrays.
trace : Sum along diagonals.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> a.diagonal()
array([0, 3])
>>> a.diagonal(1)
array([1])
A 3-D example:
>>> a = np.arange(8).reshape(2,2,2); a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> a.diagonal(0, # Main diagonals of two arrays created by skipping
... 0, # across the outer(left)-most axis last and
... 1) # the "middle" (row) axis first.
array([[0, 6],
[1, 7]])
The sub-arrays whose main diagonals we just obtained; note that each
corresponds to fixing the right-most (column) axis, and that the
diagonals are "packed" in rows.
>>> a[:,:,0] # main diagonal is [0 6]
array([[0, 2],
[4, 6]])
>>> a[:,:,1] # main diagonal is [1 7]
array([[1, 3],
[5, 7]])
The anti-diagonal can be obtained by reversing the order of elements
using either `numpy.flipud` or `numpy.fliplr`.
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.fliplr(a).diagonal() # Horizontal flip
array([2, 4, 6])
>>> np.flipud(a).diagonal() # Vertical flip
array([6, 4, 2])
Note that the order in which the diagonal is retrieved varies depending
on the flip function.
"""
if isinstance(a, np.matrix):
# Make diagonal of matrix 1-D to preserve backward compatibility.
return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
else:
return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
def _trace_dispatcher(
a, offset=None, axis1=None, axis2=None, dtype=None, out=None):
return (a, out)
@array_function_dispatch(_trace_dispatcher)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3)
"""
if isinstance(a, np.matrix):
# Get trace of matrix via an array to preserve backward compatibility.
return asarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
else:
return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
def _ravel_dispatcher(a, order=None):
return (a,)
@array_function_dispatch(_ravel_dispatcher)
def ravel(a, order='C'):
"""Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
As of NumPy 1.10, the returned array will have the same type as the input
array. (for example, a masked array will be returned for a masked array
input)
Parameters
----------
a : array_like
Input array. The elements in `a` are read in the order specified by
`order`, and packed as a 1-D array.
order : {'C','F', 'A', 'K'}, optional
The elements of `a` are read using this index order. 'C' means
to index the elements in row-major, C-style order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to index the elements
in column-major, Fortran-style order, with the
first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of
the memory layout of the underlying array, and only refer to
the order of axis indexing. 'A' means to read the elements in
Fortran-like index order if `a` is Fortran *contiguous* in
memory, C-like order otherwise. 'K' means to read the
elements in the order they occur in memory, except for
reversing the data when strides are negative. By default, 'C'
index order is used.
Returns
-------
y : array_like
y is an array of the same subtype as `a`, with shape ``(a.size,)``.
Note that matrices are special cased for backward compatibility, if `a`
is a matrix, then y is a 1-D ndarray.
See Also
--------
ndarray.flat : 1-D iterator over an array.
ndarray.flatten : 1-D array copy of the elements of an array
in row-major order.
ndarray.reshape : Change the shape of an array without changing its data.
Notes
-----
In row-major, C-style order, in two dimensions, the row index
varies the slowest, and the column index the quickest. This can
be generalized to multiple dimensions, where row-major order
implies that the index along the first axis varies slowest, and
the index along the last quickest. The opposite holds for
column-major, Fortran-style index ordering.
When a view is desired in as many cases as possible, ``arr.reshape(-1)``
may be preferable.
Examples
--------
It is equivalent to ``reshape(-1, order=order)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.ravel(x)
array([1, 2, 3, 4, 5, 6])
>>> x.reshape(-1)
array([1, 2, 3, 4, 5, 6])
>>> np.ravel(x, order='F')
array([1, 4, 2, 5, 3, 6])
When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering:
>>> np.ravel(x.T)
array([1, 4, 2, 5, 3, 6])
>>> np.ravel(x.T, order='A')
array([1, 2, 3, 4, 5, 6])
When ``order`` is 'K', it will preserve orderings that are neither 'C'
nor 'F', but won't reverse axes:
>>> a = np.arange(3)[::-1]; a
array([2, 1, 0])
>>> a.ravel(order='C')
array([2, 1, 0])
>>> a.ravel(order='K')
array([2, 1, 0])
>>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a
array([[[ 0, 2, 4],
[ 1, 3, 5]],
[[ 6, 8, 10],
[ 7, 9, 11]]])
>>> a.ravel(order='C')
array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11])
>>> a.ravel(order='K')
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
if isinstance(a, np.matrix):
return asarray(a).ravel(order=order)
else:
return asanyarray(a).ravel(order=order)
def _nonzero_dispatcher(a):
return (a,)
@array_function_dispatch(_nonzero_dispatcher)
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always tested and returned in
row-major, C-style order. The corresponding non-zero
values can be obtained with::
a[nonzero(a)]
To group the indices by element, rather than dimension, use::
transpose(nonzero(a))
The result of this is always a 2-D array, with a row for
each non-zero element.
Parameters
----------
a : array_like
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Examples
--------
>>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
>>> x
array([[3, 0, 0],
[0, 4, 0],
[5, 6, 0]])
>>> np.nonzero(x)
(array([0, 1, 2, 2]), array([0, 1, 0, 1]))
>>> x[np.nonzero(x)]
array([3, 4, 5, 6])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
[2, 0],
[2, 1]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]])
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
Using this result to index `a` is equivalent to using the mask directly:
>>> a[np.nonzero(a > 3)]
array([4, 5, 6, 7, 8, 9])
>>> a[a > 3] # prefer this spelling
array([4, 5, 6, 7, 8, 9])
``nonzero`` can also be called as a method of the array.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return _wrapfunc(a, 'nonzero')
def _shape_dispatcher(a):
return (a,)
@array_function_dispatch(_shape_dispatcher)
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
alen
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
>>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
(2,)
>>> a.shape
(2,)
"""
try:
result = a.shape
except AttributeError:
result = asarray(a).shape
return result
def _compress_dispatcher(condition, a, axis=None, out=None):
return (condition, a, out)
@array_function_dispatch(_compress_dispatcher)
def compress(condition, a, axis=None, out=None):
"""
Return selected slices of an array along given axis.
When working along a given axis, a slice along that axis is returned in
`output` for each index where `condition` evaluates to True. When
working on a 1-D array, `compress` is equivalent to `extract`.
Parameters
----------
condition : 1-D array of bools
Array that selects which entries to return. If len(condition)
is less than the size of `a` along the given axis, then output is
truncated to the length of the condition array.
a : array_like
Array from which to extract a part.
axis : int, optional
Axis along which to take slices. If None (default), work on the
flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
compressed_array : ndarray
A copy of `a` without the slices along axis for which `condition`
is false.
See Also
--------
take, choose, diag, diagonal, select
ndarray.compress : Equivalent method in ndarray
np.extract: Equivalent method when working on 1-D arrays
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.array([[1, 2], [3, 4], [5, 6]])
>>> a
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.compress([0, 1], a, axis=0)
array([[3, 4]])
>>> np.compress([False, True, True], a, axis=0)
array([[3, 4],
[5, 6]])
>>> np.compress([False, True], a, axis=1)
array([[2],
[4],
[6]])
Working on the flattened array does not return slices along an axis but
selects elements.
>>> np.compress([False, True], a)
array([2])
"""
return _wrapfunc(a, 'compress', condition, axis=axis, out=out)
def _clip_dispatcher(a, a_min, a_max, out=None):
return (a, a_min, a_max)
@array_function_dispatch(_clip_dispatcher)
def clip(a, a_min, a_max, out=None):
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min : scalar or array_like or `None`
Minimum value. If `None`, clipping is not performed on lower
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
a_max : scalar or array_like or `None`
Maximum value. If `None`, clipping is not performed on upper
interval edge. Not more than one of `a_min` and `a_max` may be
`None`. If `a_min` or `a_max` are array_like, then the three
arrays will be broadcasted to match their shapes.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, 3, 6, out=a)
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8)
array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
"""
return _wrapfunc(a, 'clip', a_min, a_max, out=out)
def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
initial=None, where=None):
return (a, out)
@array_function_dispatch(_sum_dispatcher)
def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
initial=np._NoValue, where=np._NoValue):
"""
Sum of array elements over a given axis.
Parameters
----------
a : array_like
Elements to sum.
axis : None or int or tuple of ints, optional
Axis or axes along which a sum is performed. The default,
axis=None, will sum all of the elements of the input array. If
axis is negative it counts from the last to the first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a sum is performed on all of the axes
specified in the tuple instead of a single axis or all the axes as
before.
dtype : dtype, optional
The type of the returned array and of the accumulator in which the
elements are summed. The dtype of `a` is used by default unless `a`
has an integer dtype of less precision than the default platform
integer. In that case, if `a` is signed then the platform integer
is used while if `a` is unsigned then an unsigned integer of the
same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `sum` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
Starting value for the sum. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to include in the sum. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.17.0
Returns
-------
sum_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
See Also
--------
ndarray.sum : Equivalent method.
cumsum : Cumulative sum of array elements.
trapz : Integration of array values using the composite trapezoidal rule.
mean, average
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
The sum of an empty array is the neutral element 0:
>>> np.sum([])
0.0
Examples
--------
>>> np.sum([0.5, 1.5])
2.0
>>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.sum([[0, 1], [0, 5]])
6
>>> np.sum([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.sum([[0, 1], [0, 5]], axis=1)
array([1, 5])
>>> np.sum([[0, 1], [np.nan, 5]], where=[False, True], axis=1)
array([1., 5.])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
You can also start the sum with a value other than zero:
>>> np.sum([10], initial=5)
15
"""
if isinstance(a, _gentype):
# 2018-02-25, 1.15.0
warnings.warn(
"Calling np.sum(generator) is deprecated, and in the future will give a different result. "
"Use np.sum(np.fromiter(generator)) or the python sum builtin instead.",
DeprecationWarning, stacklevel=2)
res = _sum_(a)
if out is not None:
out[...] = res
return out
return res
return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims,
initial=initial, where=where)
def _any_dispatcher(a, axis=None, out=None, keepdims=None):
return (a, out)
@array_function_dispatch(_any_dispatcher)
def any(a, axis=None, out=None, keepdims=np._NoValue):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless `axis` is not ``None``
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical OR reduction is performed.
The default (`axis` = `None`) is to perform a logical OR over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
(e.g., if it is of type float, then it will remain so, returning
1.0 for True and 0.0 for False, regardless of the type of `a`).
See `doc.ufuncs` (Section "Output arguments") for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `any` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
any : bool or ndarray
A new boolean or `ndarray` is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.any : equivalent method
all : Test whether all elements along a given axis evaluate to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity evaluate
to `True` because these are not equal to zero.
Examples
--------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False])
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> o=np.array(False)
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
(array(True), array(True))
>>> # Check now that z is a reference to o
>>> z is o
True
>>> id(z), id(o) # identity of z and o # doctest: +SKIP
(191614240, 191614240)
"""
return _wrapreduction(a, np.logical_or, 'any', axis, None, out, keepdims=keepdims)
def _all_dispatcher(a, axis=None, out=None, keepdims=None):
return (a, out)
@array_function_dispatch(_all_dispatcher)
def all(a, axis=None, out=None, keepdims=np._NoValue):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (`axis` = `None`) is to perform a logical AND over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result.
It must have the same shape as the expected output and its
type is preserved (e.g., if ``dtype(out)`` is float, the result
will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section
"Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `all` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
all : ndarray, bool
A new boolean or array is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.all : equivalent method
any : Test whether any element along a given axis evaluates to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False])
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> o=np.array(False)
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z
(28293632, 28293632, array(True)) # may vary
"""
return _wrapreduction(a, np.logical_and, 'all', axis, None, out, keepdims=keepdims)
def _cumsum_dispatcher(a, axis=None, dtype=None, out=None):
return (a, out)
@array_function_dispatch(_cumsum_dispatcher)
def cumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
See Also
--------
sum : Sum array elements.
trapz : Integration of array values using the composite trapezoidal rule.
diff : Calculate the n-th discrete difference along given axis.
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
"""
return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out)
def _ptp_dispatcher(a, axis=None, out=None, keepdims=None):
return (a, out)
@array_function_dispatch(_ptp_dispatcher)
def ptp(a, axis=None, out=None, keepdims=np._NoValue):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for 'peak to peak'.
Parameters
----------
a : array_like
Input values.
axis : None or int or tuple of ints, optional
Axis along which to find the peaks. By default, flatten the
array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.15.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : array_like
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type of the output values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `ptp` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
ptp : ndarray
A new array holding the result, unless `out` was
specified, in which case a reference to `out` is returned.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.ptp(x, axis=0)
array([2, 2])
>>> np.ptp(x, axis=1)
array([1, 1])
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
ptp = a.ptp
except AttributeError:
pass
else:
return ptp(axis=axis, out=out, **kwargs)
return _methods._ptp(a, axis=axis, out=out, **kwargs)
def _amax_dispatcher(a, axis=None, out=None, keepdims=None, initial=None,
where=None):
return (a, out)
@array_function_dispatch(_amax_dispatcher)
def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
where=np._NoValue):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the maximum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amax` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The minimum value of an output element. Must be present to allow
computation on empty slice. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to compare for the maximum. See `~numpy.ufunc.reduce`
for details.
.. versionadded:: 1.17.0
Returns
-------
amax : ndarray or scalar
Maximum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amin :
The minimum value of an array along a given axis, propagating any NaNs.
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
argmax :
Return the indices of the maximum values.
nanmin, minimum, fmin
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding max value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmax.
Don't use `amax` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``amax(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amax(a) # Maximum of the flattened array
3
>>> np.amax(a, axis=0) # Maxima along the first axis
array([2, 3])
>>> np.amax(a, axis=1) # Maxima along the second axis
array([1, 3])
>>> np.amax(a, where=[False, True], initial=-1, axis=0)
array([-1, 3])
>>> b = np.arange(5, dtype=float)
>>> b[2] = np.NaN
>>> np.amax(b)
nan
>>> np.amax(b, where=~np.isnan(b), initial=-1)
4.0
>>> np.nanmax(b)
4.0
You can use an initial value to compute the maximum of an empty slice, or
to initialize it to a different value:
>>> np.max([[-50], [10]], axis=-1, initial=0)
array([ 0, 10])
Notice that the initial value is used as one of the elements for which the
maximum is determined, unlike for the default argument Python's max
function, which is only used for empty iterables.
>>> np.max([5], initial=6)
6
>>> max([5], default=6)
5
"""
return _wrapreduction(a, np.maximum, 'max', axis, None, out,
keepdims=keepdims, initial=initial, where=where)
def _amin_dispatcher(a, axis=None, out=None, keepdims=None, initial=None,
where=None):
return (a, out)
@array_function_dispatch(_amin_dispatcher)
def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
where=np._NoValue):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the minimum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amin` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The maximum value of an output element. Must be present to allow
computation on empty slice. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to compare for the minimum. See `~numpy.ufunc.reduce`
for details.
.. versionadded:: 1.17.0
Returns
-------
amin : ndarray or scalar
Minimum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amax :
The maximum value of an array along a given axis, propagating any NaNs.
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
argmin :
Return the indices of the minimum values.
nanmax, maximum, fmax
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding min value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmin.
Don't use `amin` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``amin(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amin(a) # Minimum of the flattened array
0
>>> np.amin(a, axis=0) # Minima along the first axis
array([0, 1])
>>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
>>> np.amin(a, where=[False, True], initial=10, axis=0)
array([10, 1])
>>> b = np.arange(5, dtype=float)
>>> b[2] = np.NaN
>>> np.amin(b)
nan
>>> np.amin(b, where=~np.isnan(b), initial=10)
0.0
>>> np.nanmin(b)
0.0
>>> np.min([[-50], [10]], axis=-1, initial=0)
array([-50, 0])
Notice that the initial value is used as one of the elements for which the
minimum is determined, unlike for the default argument Python's max
function, which is only used for empty iterables.
Notice that this isn't the same as Python's ``default`` argument.
>>> np.min([6], initial=5)
5
>>> min([6], default=5)
6
"""
return _wrapreduction(a, np.minimum, 'min', axis, None, out,
keepdims=keepdims, initial=initial, where=where)
def _alen_dispathcer(a):
return (a,)
@array_function_dispatch(_alen_dispathcer)
def alen(a):
"""
Return the length of the first dimension of the input array.
Parameters
----------
a : array_like
Input array.
Returns
-------
alen : int
Length of the first dimension of `a`.
See Also
--------
shape, size
Examples
--------
>>> a = np.zeros((7,4,5))
>>> a.shape[0]
7
>>> np.alen(a)
7
"""
try:
return len(a)
except TypeError:
return len(array(a, ndmin=1))
def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
initial=None, where=None):
return (a, out)
@array_function_dispatch(_prod_dispatcher)
def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
initial=np._NoValue, where=np._NoValue):
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which a product is performed. The default,
axis=None, will calculate the product of all the elements in the
input array. If axis is negative it counts from the last to the
first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a product is performed on all of the
axes specified in the tuple instead of a single axis or all the
axes as before.
dtype : dtype, optional
The type of the returned array, as well as of the accumulator in
which the elements are multiplied. The dtype of `a` is used by
default unless `a` has an integer dtype of less precision than the
default platform integer. In that case, if `a` is signed then the
platform integer is used while if `a` is unsigned then an unsigned
integer of the same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `prod` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The starting value for this product. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to include in the product. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.17.0
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
See Also
--------
ndarray.prod : equivalent method
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow. That means that, on a 32-bit platform:
>>> x = np.array([536870910, 536870910, 536870910, 536870910])
>>> np.prod(x)
16 # may vary
The product of an empty array is the neutral element 1:
>>> np.prod([])
1.0
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
Or select specific elements to include:
>>> np.prod([1., np.nan, 3.], where=[True, False, True])
3.0
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == int
True
You can also start the product with a value other than one:
>>> np.prod([1, 2], initial=5)
10
"""
return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out,
keepdims=keepdims, initial=initial, where=where)
def _cumprod_dispatcher(a, axis=None, dtype=None, out=None):
return (a, out)
@array_function_dispatch(_cumprod_dispatcher)
def cumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default
the input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If *dtype* is not specified, it
defaults to the dtype of `a`, unless `a` has an integer dtype with
a precision less than that of the default platform integer. In
that case, the default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case a reference to out is returned.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([1,2,3])
>>> np.cumprod(a) # intermediate results 1, 1*2
... # total product 1*2*3 = 6
array([1, 2, 6])
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.cumprod(a, dtype=float) # specify type of output
array([ 1., 2., 6., 24., 120., 720.])
The cumulative product for each column (i.e., over the rows) of `a`:
>>> np.cumprod(a, axis=0)
array([[ 1, 2, 3],
[ 4, 10, 18]])
The cumulative product for each row (i.e. over the columns) of `a`:
>>> np.cumprod(a,axis=1)
array([[ 1, 2, 6],
[ 4, 20, 120]])
"""
return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
def _ndim_dispatcher(a):
return (a,)
@array_function_dispatch(_ndim_dispatcher)
def ndim(a):
"""
Return the number of dimensions of an array.
Parameters
----------
a : array_like
Input array. If it is not already an ndarray, a conversion is
attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in `a`. Scalars are zero-dimensional.
See Also
--------
ndarray.ndim : equivalent method
shape : dimensions of array
ndarray.shape : dimensions of array
Examples
--------
>>> np.ndim([[1,2,3],[4,5,6]])
2
>>> np.ndim(np.array([[1,2,3],[4,5,6]]))
2
>>> np.ndim(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def _size_dispatcher(a, axis=None):
return (a,)
@array_function_dispatch(_size_dispatcher)
def size(a, axis=None):
"""
Return the number of elements along a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which the elements are counted. By default, give
the total number of elements.
Returns
-------
element_count : int
Number of elements along the specified axis.
See Also
--------
shape : dimensions of array
ndarray.shape : dimensions of array
ndarray.size : number of elements in array
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6]])
>>> np.size(a)
6
>>> np.size(a,1)
3
>>> np.size(a,0)
2
"""
if axis is None:
try:
return a.size
except AttributeError:
return asarray(a).size
else:
try:
return a.shape[axis]
except AttributeError:
return asarray(a).shape[axis]
def _around_dispatcher(a, decimals=None, out=None):
return (a, out)
@array_function_dispatch(_around_dispatcher)
def around(a, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
Parameters
----------
a : array_like
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary. See `doc.ufuncs` (Section
"Output arguments") for details.
Returns
-------
rounded_array : ndarray
An array of the same type as `a`, containing the rounded values.
Unless `out` was specified, a new array is created. A reference to
the result is returned.
The real and imaginary parts of complex numbers are rounded
separately. The result of rounding a float is a float.
See Also
--------
ndarray.round : equivalent method
ceil, fix, floor, rint, trunc
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc. Results may also be surprising due
to the inexact representation of decimal fractions in the IEEE
floating point standard [1]_ and errors introduced when scaling
by powers of ten.
References
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
.. [2] "How Futile are Mindless Assessments of
Roundoff in Floating-Point Computation?", William Kahan,
https://people.eecs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
>>> np.around([0.37, 1.64])
array([0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
array([ 0, 0, 0, 10])
"""
return _wrapfunc(a, 'round', decimals=decimals, out=out)
def _mean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None):
return (a, out)
@array_function_dispatch(_mean_dispatcher)
def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
See `doc.ufuncs` for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `mean` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
average : Weighted average
std, var, nanmean, nanstd, nanvar
Notes
-----
The arithmetic mean is the sum of the elements along the axis divided
by the number of elements.
Note that for floating-point input, the mean is computed using the
same precision the input has. Depending on the input data, this can
cause the results to be inaccurate, especially for `float32` (see
example below). Specifying a higher-precision accumulator using the
`dtype` keyword can alleviate this issue.
By default, `float16` results are computed using `float32` intermediates
for extra precision.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
2.5
>>> np.mean(a, axis=0)
array([2., 3.])
>>> np.mean(a, axis=1)
array([1.5, 3.5])
In single precision, `mean` can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.mean(a)
0.54999924
Computing the mean in float64 is more accurate:
>>> np.mean(a, dtype=np.float64)
0.55000000074505806 # may vary
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
mean = a.mean
except AttributeError:
pass
else:
return mean(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._mean(a, axis=axis, dtype=dtype,
out=out, **kwargs)
def _std_dispatcher(
a, axis=None, dtype=None, out=None, ddof=None, keepdims=None):
return (a, out)
@array_function_dispatch(_std_dispatcher)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
See Also
--------
var, mean, nanmean, nanstd, nanvar
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``.
The average squared deviation is normally calculated as
``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified,
the divisor ``N - ddof`` is used instead. In standard statistical
practice, ``ddof=1`` provides an unbiased estimator of the variance
of the infinite population. ``ddof=0`` provides a maximum likelihood
estimate of the variance for normally distributed variables. The
standard deviation computed in this function is the square root of
the estimated variance, so even with ``ddof=1``, it will not be an
unbiased estimate of the standard deviation per se.
Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949 # may vary
>>> np.std(a, axis=0)
array([1., 1.])
>>> np.std(a, axis=1)
array([0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.std(a)
0.45000005
Computing the standard deviation in float64 is more accurate:
>>> np.std(a, dtype=np.float64)
0.44999999925494177 # may vary
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
std = a.std
except AttributeError:
pass
else:
return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
def _var_dispatcher(
a, axis=None, dtype=None, out=None, ddof=None, keepdims=None):
return (a, out)
@array_function_dispatch(_var_dispatcher)
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed. The default is to
compute the variance of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a variance is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float32`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `var` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
See Also
--------
std, mean, nanmean, nanstd, nanvar
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(abs(x - x.mean())**2)``.
The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of a hypothetical infinite population.
``ddof=0`` provides a maximum likelihood estimate of the variance for
normally distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32` (see example
below). Specifying a higher-accuracy accumulator using the ``dtype``
keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
1.25
>>> np.var(a, axis=0)
array([1., 1.])
>>> np.var(a, axis=1)
array([0.25, 0.25])
In single precision, var() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.var(a)
0.20250003
Computing the variance in float64 is more accurate:
>>> np.var(a, dtype=np.float64)
0.20249999932944759 # may vary
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
var = a.var
except AttributeError:
pass
else:
return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
# Aliases of other functions. These have their own definitions only so that
# they can have unique docstrings.
@array_function_dispatch(_around_dispatcher)
def round_(a, decimals=0, out=None):
"""
Round an array to the given number of decimals.
See Also
--------
around : equivalent function; see for details.
"""
return around(a, decimals=decimals, out=out)
@array_function_dispatch(_prod_dispatcher, verify=False)
def product(*args, **kwargs):
"""
Return the product of array elements over a given axis.
See Also
--------
prod : equivalent function; see for details.
"""
return prod(*args, **kwargs)
@array_function_dispatch(_cumprod_dispatcher, verify=False)
def cumproduct(*args, **kwargs):
"""
Return the cumulative product over the given axis.
See Also
--------
cumprod : equivalent function; see for details.
"""
return cumprod(*args, **kwargs)
@array_function_dispatch(_any_dispatcher, verify=False)
def sometrue(*args, **kwargs):
"""
Check whether some values are true.
Refer to `any` for full documentation.
See Also
--------
any : equivalent function; see for details.
"""
return any(*args, **kwargs)
@array_function_dispatch(_all_dispatcher, verify=False)
def alltrue(*args, **kwargs):
"""
Check if all elements of input array are true.
See Also
--------
numpy.all : Equivalent function; see for details.
"""
return all(*args, **kwargs)
@array_function_dispatch(_ndim_dispatcher)
def rank(a):
"""
Return the number of dimensions of an array.
.. note::
This function is deprecated in NumPy 1.9 to avoid confusion with
`numpy.linalg.matrix_rank`. The ``ndim`` attribute or function
should be used instead.
See Also
--------
ndim : equivalent non-deprecated function
Notes
-----
In the old Numeric package, `rank` was the term used for the number of
dimensions, but in NumPy `ndim` is used instead.
"""
# 2014-04-12, 1.9
warnings.warn(
"`rank` is deprecated; use the `ndim` attribute or function instead. "
"To find the rank of a matrix see `numpy.linalg.matrix_rank`.",
VisibleDeprecationWarning, stacklevel=2)
return ndim(a)
| 31.931015
| 103
| 0.608478
|
ef9c2235339e74c1dba98dd013fe3c9298cefcba
| 7,548
|
py
|
Python
|
bridgedata.py
|
drhoet/marantz-hue-adapter
|
48dcdc8f00d671a6edbcc5e8c12b8a0604892b33
|
[
"MIT"
] | null | null | null |
bridgedata.py
|
drhoet/marantz-hue-adapter
|
48dcdc8f00d671a6edbcc5e8c12b8a0604892b33
|
[
"MIT"
] | null | null | null |
bridgedata.py
|
drhoet/marantz-hue-adapter
|
48dcdc8f00d671a6edbcc5e8c12b8a0604892b33
|
[
"MIT"
] | null | null | null |
import json
import logging
# Remark: this class is not thread-safe. It doesn't have to be, since everything is running on 1 thread: the server
# is an async server...
class BridgeData():
def __init__(self, config):
self.lights = {}
self.config = {
"name": config['HueBridge']['name'],
"zigbeechannel": 15,
"bridgeid": config['HueBridge']['bridgeid'],
"mac": config['HueBridge']['mac'],
"dhcp": True,
"ipaddress": config['Server']['host'] + ":" + config['Server']['port'],
"netmask": config['Server']['netmask'],
"gateway": config['Server']['gateway'],
"proxyaddress": "none",
"proxyport": 0,
"UTC": "2016-06-30T18:21:35",
"localtime": "2016-06-30T20:21:35",
"timezone": "Europe/Berlin",
"modelid": "BSB002",
"swversion": "01033370",
"apiversion": "1.13.0",
"swupdate": {
"updatestate": 0,
"checkforupdate": False,
"devicetypes": {
"bridge": False,
"lights": [],
"sensors": []
},
"url": "",
"text": "",
"notify": False
},
"linkbutton": False,
"portalservices": True,
"portalconnection": "connected",
"portalstate": {
"signedon": True,
"incoming": True,
"outgoing": True,
"communication": "disconnected"
},
"factorynew": False,
"replacesbridgeid": None,
"backup": {
"status": "idle",
"errorcode": 0
},
"whitelist": {
config['HueBridge']['user']: {
"last use date": "2016-03-11T20:35:57",
"create date": "2016-01-28T17:17:16",
"name": "MarantzHueAdapter"
}
}
}
self.data = {
"lights": self.lights,
"groups": {},
"config": self.config,
"schedules": {}
}
class Light():
def __init__(self, name, uniqueid, manufacturername='drhoet', swversion='0'):
self.type = ''
self.name = name
self.modelid = ''
self.manufacturername = manufacturername
self.uniqueid = uniqueid
self.swversion = swversion
self.state = {}
self.listeners = []
def set_state_property(self, property_name, property_value):
oldValue = self.state[property_name]
for listener in self.listeners:
listener.on_set_state_property(property_name, oldValue, property_value)
self.internal_set_state_property(property_name, property_value)
def internal_set_state_property(self, property_name, property_value):
self.state[property_name] = property_value
def internal_set_bool(self, property_name, property_value):
if property_value:
self.internal_set_state_property(property_name, True)
else:
self.internal_set_state_property(property_name, False)
def internal_set_int(self, property_name, property_value, minValue, maxValue):
if property_value > maxValue:
self.internal_set_state_property(property_name, maxValue)
elif property_value < minValue:
self.internal_set_state_property(property_name, minValue)
else:
self.internal_set_state_property(property_name, property_value)
def register_listener(self, listener):
self.listeners.append(listener)
def asJsonable(self):
return { 'type': self.type, 'name': self.name, 'modelid': self.modelid, 'manufacturername': self.manufacturername,
'uniqueid': self.uniqueid, 'swversion': self.swversion, 'state': self.state }
class LightStateListener():
def __init__(self):
self.prop_func_map = {}
self.logger = logging.getLogger('LightStateListener')
def on_set_state_property(self, name, oldValue, newValue):
self.logger.debug('Setting %s to: %s', name, newValue)
if name in self.prop_func_map:
self.prop_func_map[name](oldValue, newValue)
else:
self.logger.warn('Property not supported: %s', name)
class DimmableLight(Light):
def __init__(self, name, uniqueid):
super().__init__(name, uniqueid)
self.type = 'Dimmable light'
self.modelid = 'LWB006'
self.state = {
"on": True,
"bri": 255,
"alert": "none",
"reachable": True
}
def set_on(self, value):
self.internal_set_bool('on', value)
def set_brightness(self, value):
self.internal_set_int('bri', value, 0, 255)
def set_reachable(self, value):
self.internal_set_bool('reachable', value)
class DimmableLightStateListener(LightStateListener):
def __init__(self):
super().__init__()
self.prop_func_map['on'] = self.on_set_power
self.prop_func_map['bri'] = self.on_set_brightness
self.prop_func_map['alert'] = self.on_set_alert
self.prop_func_map['reachable'] = self.on_set_reachable
def on_set_power(self, oldValue, newValue):
pass
def on_set_brightness(self, oldValue, newValue):
pass
def on_set_alert(self, oldValue, newValue):
pass
def on_set_reachable(self, oldValue, newValue):
pass
class ExtendedColorLight(DimmableLight):
def __init__(self, name, uniqueid):
super().__init__(name, uniqueid)
self.type = 'Extended color light'
self.modelid = 'LLC020' #taking a HueGo: color gamut C is richer than gamut B
self.state = {
"on": True,
"bri": 255,
"hue": 0,
"sat": 0,
"xy": [0.0000, 0.0000],
"ct": 0,
"alert": "none",
"effect": "none",
"colormode": "hs",
"reachable": True
}
def set_xy(self, value):
self.internal_set_state_property('xy', value)
class ExtendedColorLightStateListener(DimmableLightStateListener):
def __init__(self):
super().__init__()
self.prop_func_map['hue'] = self.on_set_hue
self.prop_func_map['sat'] = self.on_set_saturation
self.prop_func_map['xy'] = self.on_set_colorxy
self.prop_func_map['ct'] = self.on_set_colorct
self.prop_func_map['effect'] = self.on_set_effect
self.prop_func_map['colormode'] = self.on_set_colormode
def on_set_hue(self, oldValue, newValue):
pass
def on_set_saturation(self, oldValue, newValue):
pass
def on_set_colorxy(self, oldValue, newValue):
pass
def on_set_colorct(self, oldValue, newValue):
pass
def on_set_effect(self, oldValue, newValue):
pass
def on_set_colormode(self, oldValue, newValue):
pass
class BridgeDataJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Light):
return obj.asJsonable()
# Let the base class default method raise the TypeError
return super().default(obj)
| 34.309091
| 123
| 0.556041
|
6d12ed1793bd72b7187185c3b641c4dd3e5e0b48
| 357
|
py
|
Python
|
_apicheck/apicheck/tools/manage/create_tool/cli.py
|
sundayayandele/apicheck
|
ab91f567d67547b92b8e94824a29dcd5993b769e
|
[
"Apache-2.0"
] | 2
|
2019-05-31T09:56:59.000Z
|
2019-05-31T11:28:50.000Z
|
_apicheck/apicheck/tools/manage/create_tool/cli.py
|
sundayayandele/apicheck
|
ab91f567d67547b92b8e94824a29dcd5993b769e
|
[
"Apache-2.0"
] | 3
|
2022-02-07T03:37:37.000Z
|
2022-03-02T03:38:13.000Z
|
_apicheck/apicheck/tools/manage/create_tool/cli.py
|
sundayayandele/apicheck
|
ab91f567d67547b92b8e94824a29dcd5993b769e
|
[
"Apache-2.0"
] | 1
|
2021-07-18T15:01:22.000Z
|
2021-07-18T15:01:22.000Z
|
def cli(subparser):
plugin_args = subparser.add_parser(
'create-tool',
help='Manage stored APIs')
plugin_args.add_argument('name',
help="plugin name")
plugin_args.add_argument('--dest', "-d",
required=True,
help="plugin dir destination")
| 25.5
| 59
| 0.504202
|
946e26ffb9449698bb234c074433ca0fe25165d1
| 3,301
|
py
|
Python
|
pkimplode/__init__.py
|
implode-compression-impls/pkimplode.py
|
938746f261279dbd5739ef52600474f2ff37f69e
|
[
"MIT"
] | null | null | null |
pkimplode/__init__.py
|
implode-compression-impls/pkimplode.py
|
938746f261279dbd5739ef52600474f2ff37f69e
|
[
"MIT"
] | null | null | null |
pkimplode/__init__.py
|
implode-compression-impls/pkimplode.py
|
938746f261279dbd5739ef52600474f2ff37f69e
|
[
"MIT"
] | null | null | null |
import typing
from collections.abc import ByteString
from io import BytesIO, IOBase
from mmap import mmap
from warnings import warn
from zlib import crc32 as crc32_zlib
from pklib_base import PklibError
from pklib_base.enums import CompressionType
from .ctypes import _compressStream
__all__ = ("compressStreamToStream", "compressStreamToBytes", "compressBytesChunkedToStream", "compressBytesChunkedToBytes", "compress")
allowed_dict_sizes = frozenset((1024, 2048, 4096))
DEFAULT_DICTIONARY_SIZE = 4096
def compressStreamToStream(inputStream: IOBase, outputStream: IOBase, compressionType: CompressionType = CompressionType.binary, dictionarySize: int = DEFAULT_DICTIONARY_SIZE) -> None:
"""Used to do streaming compression. The first arg is the stream to read from, the second ard is the stream to write to.
May be a memory map. `chunkSize` is the hint"""
assert dictionarySize in allowed_dict_sizes, "Unallowed dict size, must be from " + repr(allowed_dict_sizes)
errorCode = _compressStream(inputStream, outputStream, compressionType=int(compressionType), dictionarySize=dictionarySize)
if errorCode:
raise Exception(PklibError(errorCode))
def compressBytesChunkedToStream(rawData: ByteString, outputStream: IOBase, compressionType: CompressionType = CompressionType.binary, dictionarySize: int = DEFAULT_DICTIONARY_SIZE) -> int:
"""Compresses `rawData` into `outputStream`."""
with BytesIO(rawData) as inputStream:
return compressStreamToStream(inputStream, outputStream, compressionType, dictionarySize)
def compressBytesChunkedToBytes(rawData: ByteString, compressionType: CompressionType = CompressionType.binary, dictionarySize: int = DEFAULT_DICTIONARY_SIZE) -> int:
"""Compresses `rawData` into `bytes`."""
with BytesIO() as outputStream:
compressBytesChunkedToStream(rawData, outputStream, compressionType, dictionarySize)
return outputStream.getvalue()
def compressStreamToBytes(inputStream: IOBase, compressionType: CompressionType = CompressionType.binary, dictionarySize: int = DEFAULT_DICTIONARY_SIZE) -> int:
"""Compresses `inputStream` into `outputStream`. Processes the whole data."""
with BytesIO() as outputStream:
compressStreamToStream(inputStream, outputStream, compressionType, dictionarySize)
return outputStream.getvalue()
_functionsUseCaseMapping = (
compressStreamToStream,
compressBytesChunkedToStream,
compressStreamToBytes,
compressBytesChunkedToBytes,
)
def compress(rawData: typing.Union[ByteString, IOBase], outputStream: typing.Optional[IOBase] = None) -> int:
"""A convenience function. It is better to use the more specialized ones since they have less overhead. It compresses `rawData` into `outputStream` and returns a tuple `(left, output)`.
`rawData` can be either a stream, or `bytes`-like stuff.
If `outputStream` is None, then it returns `bytes`. If `outputStream` is a stream, it writes into it.
`left` returned is the count of bytes in the array/stream that weren't processed."""
isOutputBytes = outputStream is None
isInputBytes = isinstance(rawData, (ByteString, mmap))
selector = isOutputBytes << 1 | int(isInputBytes)
func = _functionsUseCaseMapping[selector]
argz = [rawData]
if not isOutputBytes:
argz.append(outputStream)
warn("Use " + func.__name__ + " instead.")
return func(*argz)
| 43.434211
| 189
| 0.795517
|
ad4b9a76736c7ff153ead1e4c724f6a4902a8fe7
| 3,738
|
py
|
Python
|
pandas_ta/overlap/supertrend.py
|
cloudlakecho/pandas-ta
|
f361621d614cd4ca67800c99be27cc908c0fce96
|
[
"MIT"
] | null | null | null |
pandas_ta/overlap/supertrend.py
|
cloudlakecho/pandas-ta
|
f361621d614cd4ca67800c99be27cc908c0fce96
|
[
"MIT"
] | null | null | null |
pandas_ta/overlap/supertrend.py
|
cloudlakecho/pandas-ta
|
f361621d614cd4ca67800c99be27cc908c0fce96
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from numpy import NaN as npNaN
from pandas import DataFrame
from pandas_ta.overlap import hl2
from pandas_ta.volatility import atr
from pandas_ta.utils import get_offset, verify_series
def supertrend(high, low, close, length=None, multiplier=None, offset=None, **kwargs):
"""Indicator: Supertrend"""
# Validate Arguments
high = verify_series(high)
low = verify_series(low)
close = verify_series(close)
length = int(length) if length and length > 0 else 7
multiplier = float(multiplier) if multiplier and multiplier > 0 else 3.
offset = get_offset(offset)
# Calculate Results
m = close.size
dir_, trend = [1] * m, [0] * m
long, short = [npNaN] * m, [npNaN] * m
hl2_ = hl2(high, low)
matr = multiplier * atr(high, low, close, length)
upperband = hl2_ + matr
lowerband = hl2_ - matr
for i in range(1, m):
if close.iloc[i] > upperband.iloc[i - 1]:
dir_[i] = 1
elif close.iloc[i] < lowerband.iloc[i - 1]:
dir_[i] = -1
else:
dir_[i] = dir_[i - 1]
if dir_[i] > 0 and lowerband.iloc[i] < lowerband.iloc[i - 1]:
lowerband.iloc[i] = lowerband.iloc[i - 1]
if dir_[i] < 0 and upperband.iloc[i] > upperband.iloc[i - 1]:
upperband.iloc[i] = upperband.iloc[i - 1]
if dir_[i] > 0:
trend[i] = long[i] = lowerband.iloc[i]
else:
trend[i] = short[i] = upperband.iloc[i]
# Prepare DataFrame to return
_props = f"_{length}_{multiplier}"
df = DataFrame({
f"SUPERT{_props}": trend,
f"SUPERTd{_props}": dir_,
f"SUPERTl{_props}": long,
f"SUPERTs{_props}": short
}, index=close.index)
df.name = f"SUPERT{_props}"
df.category = "overlap"
# Apply offset if needed
if offset != 0:
df = df.shift(offset)
# Handle fills
if 'fillna' in kwargs:
df.fillna(kwargs['fillna'], inplace=True)
if 'fill_method' in kwargs:
df.fillna(method=kwargs['fill_method'], inplace=True)
return df
supertrend.__doc__ = \
"""Supertrend (supertrend)
Supertrend is an overlap indicator. It is used to help identify trend
direction, setting stop loss, identify support and resistance, and/or
generate buy & sell signals.
Sources:
http://www.freebsensetips.com/blog/detail/7/What-is-supertrend-indicator-its-calculation
Calculation:
Default Inputs:
length=7, multiplier=3.0
Default Direction:
Set to +1 or bullish trend at start
MID = multiplier * ATR
LOWERBAND = HL2 - MID
UPPERBAND = HL2 + MID
if UPPERBAND[i] < FINAL_UPPERBAND[i-1] and close[i-1] > FINAL_UPPERBAND[i-1]:
FINAL_UPPERBAND[i] = UPPERBAND[i]
else:
FINAL_UPPERBAND[i] = FINAL_UPPERBAND[i-1])
if LOWERBAND[i] > FINAL_LOWERBAND[i-1] and close[i-1] < FINAL_LOWERBAND[i-1]:
FINAL_LOWERBAND[i] = LOWERBAND[i]
else:
FINAL_LOWERBAND[i] = FINAL_LOWERBAND[i-1])
if close[i] <= FINAL_UPPERBAND[i]:
SUPERTREND[i] = FINAL_UPPERBAND[i]
else:
SUPERTREND[i] = FINAL_LOWERBAND[i]
Args:
high (pd.Series): Series of 'high's
low (pd.Series): Series of 'low's
close (pd.Series): Series of 'close's
length (int) : length for ATR calculation. Default: 7
multiplier (float): Coefficient for upper and lower band distance to midrange. Default: 3.0
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.DataFrame: SUPERT (trend), SUPERTd (direction), SUPERTl (long), SUPERTs (short) columns.
"""
| 30.639344
| 95
| 0.629213
|
579c1a5044a4a2bc341a3def1eeb1d352209d927
| 4,356
|
py
|
Python
|
squid_log_reader.py
|
recursethenreverse/numerouter
|
463ca5e96fb8426c05566ceaff91ab80c0e18bf5
|
[
"MIT"
] | null | null | null |
squid_log_reader.py
|
recursethenreverse/numerouter
|
463ca5e96fb8426c05566ceaff91ab80c0e18bf5
|
[
"MIT"
] | null | null | null |
squid_log_reader.py
|
recursethenreverse/numerouter
|
463ca5e96fb8426c05566ceaff91ab80c0e18bf5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import time
import os
import sys
import getopt
import pika
import json
import signal
class squid_to_rmq_injector:
type_application = "application"
type_application_json = "application/json"
type_img = ["image/gif",
"image/png",
"image/jpeg",
"image/webp",]
type_media =["video/mp4",
"video/mpeg"]
advert_url_keys = ['daca_images', 'googlesyndication']
def __init__(self, config: dict):
self._file_name = config['file_name']
self._rabbit_mq_host = config['host']
self._queue_name_img = config['queue_name']
self._queue_name_json = config['queue_name_json']
self._tail_log = config['tail_file']
self._read_full_log = config['read_full_log']
self._clear_log_on_finish = config['clear_log_on_finish']
self._config = config
self._rmq_connection = self.init_rabbit_mq_connection()
self._rmq_channels = self.init_channels()
def init_rabbit_mq_connection(self):
credentials = pika.PlainCredentials('rabbit', 'rabbit') # user, password
parameters = pika.ConnectionParameters(self._rabbit_mq_host, 5672, '/', credentials)
return pika.BlockingConnection(parameters)
def init_channels(self):
rmq_channels = {}
rmq_channels[self._queue_name_img] = self.reserve_channel(self._queue_name_img)
rmq_channels[self._queue_name_json] = self.reserve_channel(self._queue_name_json)
return rmq_channels
def reserve_channel(self, target_queue_name):
channel = self._rmq_connection.channel()
channel.queue_declare(queue=target_queue_name , durable=True,
arguments={'x-message-ttl': 86400000, 'x-max-length': 5242880})
return channel
def send_msg(self, msg, target_queue_name):
self._rmq_channels[target_queue_name].basic_publish(exchange='',
routing_key=target_queue_name,
body=msg)
# channel.basic_publish(exchange='',
# routing_key=target_queue_name,
# body=msg)
def start(self):
# Set the filename and open the file
if self._read_full_log:
self.read_whole_log()
if self._clear_log_on_finish:
self.clear_the_log_file()
if self._tail_log:
self.tail_log()
def tail_log(self):
print("Open file: ", self._file_name)
file = open(self._file_name, 'r')
# Find the size of the file and move to the end
st_results = os.stat(self._file_name)
st_size = st_results[6]
file.seek(st_size)
print("Initial File Size: ", str(st_size))
while 1:
where = file.tell()
line = file.readline()
if not line:
time.sleep(5)
file.seek(where)
else:
self.sort_message_to_queues(line)
def sort_message_to_queues(self, line):
line = line if line else None
if line is not None:
if self.is_message_type(line, "image"):
print("Push: ", line)
self.send_msg(line, self._queue_name_img)
elif self.is_message_type(line, "json"):
print("Push json: ", line)
self.send_msg(line, self._queue_name_json)
def is_message_type(self, log_line, message_string):
if log_line is None:
return False
log_line = str(log_line) # only cast once
parts = log_line.split("|||") # only split once
if len(parts) > 4:
if message_string in parts[4]:
return True
return False
def read_whole_log(self):
print("sending log lines to queue")
with open(self._file_name, 'r') as f:
for line in f:
self.sort_message_to_queues(line)
def clear_the_log_file(self):
print("Clearing log file for read")
with open(self._file_name, 'w'):
pass
if __name__ == "__main__":
with open('reader_config.json', 'r') as f:
config = json.load(f)
stri = squid_to_rmq_injector(config)
stri.start()
| 31.565217
| 92
| 0.59068
|
71e66dbf56b23e023d1785e8d8823c82fe2dc01e
| 4,708
|
py
|
Python
|
src/local/butler/package.py
|
nopsledder/clusterfuzz
|
529963438d956e46ddddfb62debc6ed808be0083
|
[
"Apache-2.0"
] | 1
|
2020-05-21T18:47:06.000Z
|
2020-05-21T18:47:06.000Z
|
src/local/butler/package.py
|
nopsledder/clusterfuzz
|
529963438d956e46ddddfb62debc6ed808be0083
|
[
"Apache-2.0"
] | 20
|
2020-07-28T19:01:56.000Z
|
2021-03-23T01:04:42.000Z
|
src/local/butler/package.py
|
nopsledder/clusterfuzz
|
529963438d956e46ddddfb62debc6ed808be0083
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""package.py handles the package command"""
from __future__ import print_function
import os
import re
import sys
import zipfile
from local.butler import appengine
from local.butler import common
from local.butler import constants
MIN_SUPPORTED_NODEJS_VERSION = 4
def _clear_zip(target_zip_path):
"""Remove zip and manifest file."""
if os.path.exists(constants.PACKAGE_TARGET_MANIFEST_PATH):
os.remove(constants.PACKAGE_TARGET_MANIFEST_PATH)
if os.path.exists(target_zip_path):
os.remove(target_zip_path)
def _add_to_zip(output_file, src_file_path, dest_file_path=None):
"""Add the src_file_path to the output_file with the right target path."""
if dest_file_path is None:
dest_file_path = src_file_path
output_file.write(src_file_path, os.path.join('clusterfuzz', dest_file_path))
def _is_nodejs_up_to_date():
"""Check if node is of version MINIMUM_NODEJS_VERSION."""
return_code, output = common.execute('node -v')
if return_code != 0:
return False
m = re.match(br'v([0-9]+)\..+', output.strip())
if not m:
return False
major_version = int(m.group(1))
return major_version >= MIN_SUPPORTED_NODEJS_VERSION
def _get_files(path):
"""Iterate through files in path."""
for root, _, filenames in os.walk(path):
for filename in filenames:
if filename.endswith('.pyc') or (os.sep + '.git') in root:
continue
yield os.path.join(root, filename)
def package(revision,
target_zip_dir=constants.PACKAGE_TARGET_ZIP_DIRECTORY,
target_manifest_path=constants.PACKAGE_TARGET_MANIFEST_PATH,
platform_name=None,
python3=False):
"""Prepare clusterfuzz-source.zip."""
is_ci = os.getenv('TEST_BOT_ENVIRONMENT')
if not is_ci and common.is_git_dirty():
print('Your branch is dirty. Please fix before packaging.')
sys.exit(1)
if not _is_nodejs_up_to_date():
print('You do not have nodejs, or your nodejs is not at least version 4.')
sys.exit(1)
common.install_dependencies(platform_name=platform_name)
# This needs to be done before packaging step to let src/appengine/config be
# archived for bot.
appengine.symlink_dirs()
_, ls_files_output = common.execute('git -C . ls-files', print_output=False)
file_paths = [path.decode('utf-8') for path in ls_files_output.splitlines()]
if not os.path.exists(target_zip_dir):
os.makedirs(target_zip_dir)
target_zip_name = constants.LEGACY_ZIP_NAME
if platform_name:
if python3:
target_zip_name = platform_name + '-3.zip'
else:
target_zip_name = platform_name + '.zip'
target_zip_path = os.path.join(target_zip_dir, target_zip_name)
_clear_zip(target_zip_path)
output_file = zipfile.ZipFile(target_zip_path, 'w', zipfile.ZIP_DEFLATED)
# Add files from git.
for file_path in file_paths:
if (file_path.startswith('config') or file_path.startswith('local') or
file_path.startswith(os.path.join('src', 'appengine')) or
file_path.startswith(os.path.join('src', 'local')) or
file_path.startswith(os.path.join('src', 'python', 'tests'))):
continue
_add_to_zip(output_file, file_path)
# These are project configuration yamls.
for path in _get_files(os.path.join('src', 'appengine', 'config')):
_add_to_zip(output_file, path)
# These are third party dependencies.
for path in _get_files(os.path.join('src', 'third_party')):
_add_to_zip(output_file, path)
output_file.close()
with open(target_manifest_path, 'w') as f:
f.write('%s\n' % revision)
with zipfile.ZipFile(target_zip_path, 'a', zipfile.ZIP_DEFLATED) as f:
_add_to_zip(f, target_manifest_path, constants.PACKAGE_TARGET_MANIFEST_PATH)
print('Revision: %s' % revision)
print()
print('%s is ready.' % target_zip_path)
return target_zip_path
def execute(args):
if args.platform == 'all':
for platform_name in list(constants.PLATFORMS.keys()):
package(
revision=common.compute_staging_revision(),
platform_name=platform_name)
else:
package(
revision=common.compute_staging_revision(), platform_name=args.platform)
| 31.178808
| 80
| 0.724936
|
0770455229ba59a073ce8e93868e77e123c73c59
| 687
|
py
|
Python
|
solutions/Deepest Leaves Sum/solution.py
|
nilax97/leetcode-solutions
|
d3c12f2b289662d199510e0431e177bbf3cda121
|
[
"MIT"
] | 3
|
2021-06-06T22:03:15.000Z
|
2021-06-08T08:49:04.000Z
|
solutions/Deepest Leaves Sum/solution.py
|
nilax97/leetcode-solutions
|
d3c12f2b289662d199510e0431e177bbf3cda121
|
[
"MIT"
] | null | null | null |
solutions/Deepest Leaves Sum/solution.py
|
nilax97/leetcode-solutions
|
d3c12f2b289662d199510e0431e177bbf3cda121
|
[
"MIT"
] | null | null | null |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def dfs(self, node, height):
if node == None:
return
if len(self.mat) < height:
self.mat.append([])
self.mat[height-1].append(node.val)
self.dfs(node.left,height+1)
self.dfs(node.right,height+1)
def deepestLeavesSum(self, root: TreeNode) -> int:
self.mat = list()
self.dfs(root,1)
ans = 0
for x in self.mat[-1]:
ans += x
return ans
| 26.423077
| 55
| 0.519651
|
e477a093cc6bd2b00e094676e4d623422285089a
| 3,080
|
py
|
Python
|
finsim-merch/merchant.py
|
sei-inam/finsim
|
0839b3fc15600be59743b84fb4801e9175d54cb4
|
[
"Unlicense",
"MIT"
] | 14
|
2019-08-01T12:00:00.000Z
|
2021-06-27T06:07:03.000Z
|
finsim-merch/merchant.py
|
sei-inam/finsim
|
0839b3fc15600be59743b84fb4801e9175d54cb4
|
[
"Unlicense",
"MIT"
] | null | null | null |
finsim-merch/merchant.py
|
sei-inam/finsim
|
0839b3fc15600be59743b84fb4801e9175d54cb4
|
[
"Unlicense",
"MIT"
] | 9
|
2019-06-18T13:17:21.000Z
|
2022-02-02T20:08:11.000Z
|
#!/usr/bin/python3
# FinSim
# Copyright 2018 Carnegie Mellon University. All Rights Reserved.
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
# Released under a MIT (SEI)-style license, please see license.txt or contact permission@sei.cmu.edu for full terms.
#Acts as a merchant PoS system - generates transactions
#Passes on transactions to merchant's CC processor to get finalized
import requests, random, time
from init_script import gen_all_data, handle_current_component
#gen_all_data( rand_seed, num_banks, num_ccs, num_mercs, num_users, url_list_filepath )
#handle_current_component( gen_all, component_id, num_banks, num_ccs, num_mercs, register_to_db=False, return_all=False )
gen_all = gen_all_data('3', 1, 1, 1, 50, '/home/url_list.txt')
current_component = handle_current_component( gen_all, 'm0', 1, 1, 1, False, False )
while True:
time.sleep(random.randrange(3, 10))
try:
if random.randint(0, 10) < 8:
current_user_acct = current_component[1][random.randrange( 0, len(current_component[1]) )]
if current_user_acct.startswith( str( current_component[0][3][4] ) ):
bnk_loginForm = {"username":current_component[0][1], "password":current_component[0][3][3]}
bnk_token = requests.post( current_component[0][3][1] + "login", json = bnk_loginForm, verify=False ).json()
print( bnk_token )
bnk_header = { "Authorization": "Bearer " + bnk_token['access_token'], "Content-Type": "application/json" }
depositResponse = requests.post( current_component[0][3][1] + "bank/deposit", json = { 'amount': random.randint(1, 250), 'account': current_user_acct, 'counterpart_name': current_component[0][1], 'fraud_flag': 0 }, headers = bnk_header, verify=False )
print( depositResponse.text )
else:
loginForm = {"username":current_component[0][1], "password":current_component[0][2][2]}
token = requests.post( current_component[0][2][1] + "login", json = loginForm, verify=False ).json()
print( token )
header = { "Authorization": "Bearer " + token['access_token'], "Content-Type": "application/json" }
transactionResponse = requests.post( current_component[0][2][1] + "processTransaction", json = { "amount": random.randrange(1, 25), 'card_num': current_component[1][random.randrange(0, len(current_component[1]))] }, headers = header, verify=False )
print( transactionResponse.text )
except Exception as e:
print( e )
print( "Issue with sending request. Trying again." )
| 73.333333
| 514
| 0.701299
|
8a3c35005929542401c2c5abf6030dbb39f373cb
| 10,558
|
py
|
Python
|
saleor/webhook/payloads.py
|
angeles-ricardo-89/saleor
|
5fab7a883d025bff83320fbdd557ed7afa2923a9
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/webhook/payloads.py
|
angeles-ricardo-89/saleor
|
5fab7a883d025bff83320fbdd557ed7afa2923a9
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/webhook/payloads.py
|
angeles-ricardo-89/saleor
|
5fab7a883d025bff83320fbdd557ed7afa2923a9
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from typing import Optional
from django.db.models import QuerySet
from ..account.models import User
from ..checkout.models import Checkout
from ..core.utils.anonymization import (
anonymize_checkout,
anonymize_order,
generate_fake_user,
)
from ..invoice.models import Invoice
from ..order import FulfillmentStatus, OrderStatus
from ..order.models import Fulfillment, FulfillmentLine, Order
from ..order.utils import get_order_country
from ..payment import ChargeStatus
from ..product.models import Product
from ..warehouse.models import Warehouse
from .event_types import WebhookEventType
from .payload_serializers import PayloadSerializer
from .serializers import serialize_checkout_lines
ADDRESS_FIELDS = (
"first_name",
"last_name",
"company_name",
"street_address_1",
"street_address_2",
"city",
"city_area",
"postal_code",
"country",
"country_area",
"phone",
)
ORDER_FIELDS = (
"created",
"status",
"user_email",
"shipping_method_name",
"shipping_price_net_amount",
"shipping_price_gross_amount",
"total_net_amount",
"total_gross_amount",
"shipping_price_net_amount",
"shipping_price_gross_amount",
"discount_amount",
"discount_name",
"translated_discount_name",
"weight",
"private_metadata",
"metadata",
)
def generate_order_payload(order: "Order"):
serializer = PayloadSerializer()
fulfillment_fields = ("status", "tracking_number", "created")
payment_fields = (
"gateway"
"is_active"
"created"
"modified"
"charge_status"
"total"
"captured_amount"
"currency"
"billing_email"
"billing_first_name"
"billing_last_name"
"billing_company_name"
"billing_address_1"
"billing_address_2"
"billing_city"
"billing_city_area"
"billing_postal_code"
"billing_country_code"
"billing_country_area"
)
line_fields = (
"product_name",
"variant_name",
"translated_product_name",
"translated_variant_name",
"product_sku",
"quantity",
"currency",
"unit_price_net_amount",
"unit_price_gross_amount",
"tax_rate",
)
shipping_method_fields = ("name", "type", "currency", "price_amount")
order_data = serializer.serialize(
[order],
fields=ORDER_FIELDS,
additional_fields={
"shipping_method": (lambda o: o.shipping_method, shipping_method_fields),
"lines": (lambda o: o.lines.all(), line_fields),
"payments": (lambda o: o.payments.all(), payment_fields),
"shipping_address": (lambda o: o.shipping_address, ADDRESS_FIELDS),
"billing_address": (lambda o: o.billing_address, ADDRESS_FIELDS),
"fulfillments": (lambda o: o.fulfillments.all(), fulfillment_fields),
},
)
return order_data
def generate_invoice_payload(invoice: "Invoice"):
serializer = PayloadSerializer()
invoice_fields = ("id", "number", "external_url", "created")
return serializer.serialize(
[invoice],
fields=invoice_fields,
additional_fields={"order": (lambda i: i.order, ORDER_FIELDS)},
)
def generate_checkout_payload(checkout: "Checkout"):
serializer = PayloadSerializer()
checkout_fields = (
"created",
"last_change",
"status",
"email",
"quantity",
"currency",
"discount_amount",
"discount_name",
"private_metadata",
"metadata",
)
user_fields = ("email", "first_name", "last_name")
shipping_method_fields = ("name", "type", "currency", "price_amount")
lines_dict_data = serialize_checkout_lines(checkout)
checkout_data = serializer.serialize(
[checkout],
fields=checkout_fields,
obj_id_name="token",
additional_fields={
"user": (lambda c: c.user, user_fields),
"billing_address": (lambda c: c.billing_address, ADDRESS_FIELDS),
"shipping_address": (lambda c: c.shipping_address, ADDRESS_FIELDS),
"shipping_method": (lambda c: c.shipping_method, shipping_method_fields),
},
extra_dict_data={
# Casting to list to make it json-serializable
"lines": list(lines_dict_data)
},
)
return checkout_data
def generate_customer_payload(customer: "User"):
serializer = PayloadSerializer()
data = serializer.serialize(
[customer],
fields=[
"email",
"first_name",
"last_name",
"is_active",
"date_joined",
"private_metadata",
"metadata",
],
additional_fields={
"default_shipping_address": (
lambda c: c.default_billing_address,
ADDRESS_FIELDS,
),
"default_billing_address": (
lambda c: c.default_shipping_address,
ADDRESS_FIELDS,
),
},
)
return data
def generate_product_payload(product: "Product"):
serializer = PayloadSerializer()
product_fields = (
"name",
"description_json",
"currency",
"minimal_variant_price_amount",
"attributes",
"updated_at",
"charge_taxes",
"weight",
"publication_date",
"is_published",
"private_metadata",
"metadata",
)
product_variant_fields = (
"sku",
"name",
"currency",
"price_amount",
"track_inventory",
"quantity",
"quantity_allocated",
"cost_price_amount",
"private_metadata",
"metadata",
)
product_payload = serializer.serialize(
[product],
fields=product_fields,
additional_fields={
"category": (lambda p: p.category, ("name", "slug")),
"collections": (lambda p: p.collections.all(), ("name", "slug")),
"variants": (lambda p: p.variants.all(), product_variant_fields),
},
)
return product_payload
def generate_fulfillment_lines_payload(fulfillment: Fulfillment):
serializer = PayloadSerializer()
lines = FulfillmentLine.objects.prefetch_related(
"order_line__variant__product__product_type"
).filter(fulfillment=fulfillment)
line_fields = ("quantity",)
return serializer.serialize(
lines,
fields=line_fields,
extra_dict_data={
"weight": (lambda fl: fl.order_line.variant.get_weight().g),
"weight_unit": "gram",
"product_type": (
lambda fl: fl.order_line.variant.product.product_type.name
),
"unit_price_gross": lambda fl: fl.order_line.unit_price_gross_amount,
"currency": (lambda fl: fl.order_line.currency),
},
)
def generate_fulfillment_payload(fulfillment: Fulfillment):
serializer = PayloadSerializer()
# fulfillment fields to serialize
fulfillment_fields = ("status", "tracking_code", "order__user_email")
order_country = get_order_country(fulfillment.order)
fulfillment_line = fulfillment.lines.first()
if fulfillment_line and fulfillment_line.stock:
warehouse = fulfillment_line.stock.warehouse
else:
warehouse = Warehouse.objects.for_country(order_country).first()
fulfillment_data = serializer.serialize(
[fulfillment],
fields=fulfillment_fields,
additional_fields={
"warehouse_address": (lambda f: warehouse.address, ADDRESS_FIELDS),
},
extra_dict_data={
"order": json.loads(generate_order_payload(fulfillment.order))[0],
"lines": json.loads(generate_fulfillment_lines_payload(fulfillment)),
},
)
return fulfillment_data
def _get_sample_object(qs: QuerySet):
"""Return random object from query."""
random_object = qs.order_by("?").first()
return random_object
def _generate_sample_order_payload(event_name):
order_qs = Order.objects.prefetch_related(
"payments",
"lines",
"shipping_method",
"shipping_address",
"billing_address",
"fulfillments",
)
order = None
if event_name == WebhookEventType.ORDER_CREATED:
order = _get_sample_object(order_qs.filter(status=OrderStatus.UNFULFILLED))
elif event_name == WebhookEventType.ORDER_FULLY_PAID:
order = _get_sample_object(
order_qs.filter(payments__charge_status=ChargeStatus.FULLY_CHARGED)
)
elif event_name == WebhookEventType.ORDER_FULFILLED:
order = _get_sample_object(
order_qs.filter(fulfillments__status=FulfillmentStatus.FULFILLED)
)
elif event_name in [
WebhookEventType.ORDER_CANCELLED,
WebhookEventType.ORDER_UPDATED,
]:
order = _get_sample_object(order_qs.filter(status=OrderStatus.CANCELED))
if order:
anonymized_order = anonymize_order(order)
return generate_order_payload(anonymized_order)
def generate_sample_payload(event_name: str) -> Optional[dict]:
checkout_events = [
WebhookEventType.CHECKOUT_QUANTITY_CHANGED,
WebhookEventType.CHECKOUT_UPADTED,
WebhookEventType.CHECKOUT_CREATED,
]
if event_name == WebhookEventType.CUSTOMER_CREATED:
user = generate_fake_user()
payload = generate_customer_payload(user)
elif event_name == WebhookEventType.PRODUCT_CREATED:
product = _get_sample_object(
Product.objects.prefetch_related("category", "collections", "variants")
)
payload = generate_product_payload(product) if product else None
elif event_name in checkout_events:
checkout = _get_sample_object(
Checkout.objects.prefetch_related("lines__variant__product")
)
if checkout:
anonymized_checkout = anonymize_checkout(checkout)
payload = generate_checkout_payload(anonymized_checkout)
elif event_name == WebhookEventType.FULFILLMENT_CREATED:
fulfillment = _get_sample_object(
Fulfillment.objects.prefetch_related("lines__order_line__variant")
)
fulfillment.order = anonymize_order(fulfillment.order)
payload = generate_fulfillment_payload(fulfillment)
else:
payload = _generate_sample_order_payload(event_name)
return json.loads(payload) if payload else None
| 31.422619
| 85
| 0.644156
|
9f1cb662ef1192033096fe9743ac1be2f40c6de5
| 27
|
py
|
Python
|
arnold/crud/read/__init__.py
|
Clinical-Genomics/arnold
|
8b0dfe5a97736b60ffc3498b4f54c91f31bfe410
|
[
"MIT"
] | null | null | null |
arnold/crud/read/__init__.py
|
Clinical-Genomics/arnold
|
8b0dfe5a97736b60ffc3498b4f54c91f31bfe410
|
[
"MIT"
] | 2
|
2022-03-23T09:57:42.000Z
|
2022-03-28T08:28:46.000Z
|
arnold/crud/read/__init__.py
|
Clinical-Genomics/arnold
|
8b0dfe5a97736b60ffc3498b4f54c91f31bfe410
|
[
"MIT"
] | null | null | null |
from . import sample, step
| 13.5
| 26
| 0.740741
|
0f0864a88667f9bede4ed2af736d04cea378e0fa
| 19,869
|
py
|
Python
|
my_job_v3.py
|
dkcamargox/my-job
|
c1e5caca99a5aa346a22228bd1538d8ca4feb485
|
[
"MIT"
] | null | null | null |
my_job_v3.py
|
dkcamargox/my-job
|
c1e5caca99a5aa346a22228bd1538d8ca4feb485
|
[
"MIT"
] | null | null | null |
my_job_v3.py
|
dkcamargox/my-job
|
c1e5caca99a5aa346a22228bd1538d8ca4feb485
|
[
"MIT"
] | null | null | null |
from http.server import SimpleHTTPRequestHandler
import openpyxl as xls
import pprint as pp
import datetime
import time
from dateutil import parser
import os
import win32com.client
from google_sheet_controller import SheetController
def main():
ARCHIVE_NAME = input('Insertá el nombre del archivo\t')
SKIP_FORMATING = input('Skip formating? [Y]es/[N]o\t\t')
# ARCHIVE_NAME = 'CTES-17-12'
# SKIP_FORMATING = 'Y'
sheet = openSheet(ARCHIVE_NAME, SKIP_FORMATING)
sheetData = getData(sheet)
sheetData = organizeData(sheetData)
hours = {}
for sellerName, sellerData in sheetData.items():
comment = createComments(sellerData)
noVisited = getNoVisitedNumber(sellerData)
initialHour = None
finalHour = None
detentions = None
errors = None
duration = None
if (comment == None):
initialHour = getInitialHour(sellerData)
detentionsData = getDetentions(sellerData)
detentions = detentionsData[0]
errors = detentionsData[1]
duration = detentionsData[2]
if (len(errors)>0):
errors = f'{errors}'
else:
errors = ''
sellerData.reverse()
finalHour = getFinalHour(sellerData)
sellerData.reverse()
hours[sellerName] = {
'initialHour': initialHour,
'finalHour': finalHour,
'noVisited': noVisited,
'comment': comment,
'detentions': detentions,
'duration': duration,
'review': errors
}
else:
hours[sellerName] = {
'initialHour': '',
'finalHour': '',
'noVisited': noVisited,
'comment': comment,
'detentions': detentions,
'duration': duration,
'review': ''
}
print(f'{sellerName}: {comment}')
print(f'\tHora inicio: {initialHour}')
print(f'\tHora fin: {finalHour}')
if (errors != None and len(errors) != 0):
print(f'\tRevisar Clientes: {errors}')
if (detentions != None and detentions != ''):
print(f'\tDetenciones: [\n{detentions}\n]')
putDataIntoLocalSheet(sheetData, hours, ARCHIVE_NAME)
# os.system(f'Horarios-{ARCHIVE_NAME}.xlsx')
answ = str(input('WriteData? [Y]es/[N]o\t'))
if 'N' == answ.upper()[0]:
return
putDataIntoSheet(sheetData, hours, ARCHIVE_NAME)
return 0
def sortData(file, sheet):
excel = win32com.client.Dispatch("Excel.Application")
# opening file
wb = excel.Workbooks.Open(f'{os.getcwd()}\\{file}')
# opening sheet
ws = wb.Worksheets(sheet.title)
# getting not empty dimensions
range = sheet.dimensions
# getting the lastLine number
lastLine = range.split(':')[1][1:]
# remove filters
ws.AutoFilterMode = False
# removing borders
ws.Range("A:N").Borders.LineStyle = -4142
# treating range to fit headers
listRange = list(range)
listRange[1] = '2'
range = ''.join(listRange)
# sorting Hora Venta
ws.Range(range).Sort(
Key1=ws.Range('J2'),
Order1=1,
Orientation=1
)
# sorting Hora Motivo
ws.Range(range).Sort(
Key1=ws.Range('K2'),
Order1=1,
Orientation=1
)
# Sorting Sector > Hora visita
ws.Range(range).Sort(
Key1=ws.Range('G2'),
Key2=ws.Range('I2'),
Order1=1,
Order2=1,
Orientation=1
)
# deleting unused columns
ws.Range(f'A1:A{lastLine}').Delete(Shift=-4159)
ws.Range(f'C1:C{lastLine}').Delete(Shift=-4159)
ws.Range(f'C1:C{lastLine}').Delete(Shift=-4159)
ws.Range(f'C1:C{lastLine}').Delete(Shift=-4159)
ws.Range(f'H1:H{lastLine}').Delete(Shift=-4159)
ws.Range(f'H1:H{lastLine}').Delete(Shift=-4159)
ws.Range(f'H1:H{lastLine}').Delete(Shift=-4159)
# updating range with the deletions
listRange = list(range)
listRange[3] = 'G'
range = ''.join(listRange)
# auto fitting
ws.Columns("A").ColumnWidth = 16
ws.Columns("B").ColumnWidth = 7
ws.Columns("D").ColumnWidth = 8
ws.Columns("E").ColumnWidth = 10
ws.Columns("F").ColumnWidth = 10
ws.Columns("G").ColumnWidth = 10
# saving the mods
wb.Save()
excel.Application.Quit()
def duration(beginning, end):
return datetime.datetime.combine(datetime.date.min, end) - datetime.datetime.combine(datetime.date.min, beginning)
def openSheet(ARCHIVE_NAME, SKIP_FORMATING):
# open sheet and return the sheet object
workbook = xls.load_workbook(filename=f'{ARCHIVE_NAME}.xlsx')
if SKIP_FORMATING.upper()[0] == 'N':
sortData(f'{ARCHIVE_NAME}.xlsx', workbook.active)
workbook = xls.load_workbook(filename=f'{ARCHIVE_NAME}.xlsx')
sheet = workbook.active
return sheet
def getData(sheet):
# get all data from sheet and return
return sheet[sheet.dimensions]
def organizeData(sheet):
# organize sheet in a dictionary, by seller
lines = {}
for line in sheet[1:len(sheet)]:
lineData = {}
for i, cell in enumerate(line):
if type(cell.value) is datetime.datetime:
lineData[sheet[0][i].value] = cell.value.time()
else:
lineData[sheet[0][i].value] = cell.value
if lineData['Sector'] in lines:
lines[lineData['Sector']].append(lineData)
else:
lines[lineData['Sector']] = [lineData]
return lines
def printf(blob, identation):
# Print the dict in a readable way
if type(blob) is list:
print('\t' * (identation - 1), end='')
print('[')
for data in blob:
print('\t', end='')
printf(data, identation +1)
print('\t' * identation, end='')
print('],')
elif type(blob) is dict:
print('\t' * (identation - 1), end='')
print('{')
for key, value in blob.items():
print('\t' * identation, end='')
print(f"\t'{key}':\t", end='')
if type(value) is list or type(value) is dict:
printf(value, identation + 1)
else:
print(f"'{value}'", end='')
print(',')
print('\t' * identation, end='')
print('},')
else:
print('\t' * identation, end='')
print(f'\t{blob}')
return
def toTimeType(x):
return parser.parse(str(x)).time()
def fortyfiveminutes():
return datetime.timedelta(
minutes=45
)
def twentyfiveMinutes():
return datetime.timedelta(
minutes=25
)
def getInitialHour(sells):
# get the first hour in the route
for sell in sells:
# Hora visita Hora venta Hora motivo
if (sell['Hora visita'] == None or sell['Hora visita'] == ''):
continue
if (sell['Hora venta'] != None and sell['Hora venta'] != ''):
sell['Hora visita'] = toTimeType(sell['Hora visita'])
sell['Hora venta'] = toTimeType(sell['Hora venta'])
if (sell['Hora visita'] < sell['Hora venta']):
sellDuration = duration(sell['Hora visita'], sell['Hora venta'])
if (sellDuration > fortyfiveminutes()):
continue
return sell['Hora visita']
if (sell['Hora motivo'] != None and sell['Hora motivo'] != ''):
sell['Hora visita'] = toTimeType(sell['Hora visita'])
sell['Hora motivo'] = toTimeType(sell['Hora motivo'])
if (sell['Hora visita'] < sell['Hora motivo']):
sellDuration = duration(sell['Hora visita'], sell['Hora motivo'])
if (sellDuration > fortyfiveminutes()):
continue
return sell['Hora visita']
return
def getFinalHour(sells):
# get the last hour in the route
for sell in sells:
# Hora visita Hora venta Hora motivo
if sell['Visitado'] == 'NO':
continue
if (sell['Hora visita'] == None or sell['Hora visita'] == ''):
continue
if (sell['Hora venta'] != None and sell['Hora venta'] != ''):
sell['Hora visita'] = toTimeType(sell['Hora visita'])
sell['Hora venta'] = toTimeType(sell['Hora venta'])
if (sell['Hora visita'] < sell['Hora venta']):
sellDuration = duration(sell['Hora visita'], sell['Hora venta'])
if (sellDuration > fortyfiveminutes()):
continue
return sell['Hora venta']
if (sell['Hora motivo'] != None and sell['Hora motivo'] != ''):
sell['Hora visita'] = toTimeType(sell['Hora visita'])
sell['Hora motivo'] = toTimeType(sell['Hora motivo'])
if (sell['Hora visita'] < sell['Hora motivo']):
sellDuration = duration(sell['Hora visita'], sell['Hora motivo'])
if (sellDuration > fortyfiveminutes()):
continue
return sell['Hora motivo']
return
def getNoVisitedNumber(sells):
# get the number of the clients that were not visited
noVisitedNumber = 0
for sell in sells:
if sell['Visitado'] == 'NO':
noVisitedNumber += 1
return noVisitedNumber
def checkValidSell(sell):
# return true if valid
# return false if invalid
valid = False
try:
if (sell['Hora venta'] != None and sell['Hora venta'] != ''):
sell['Hora visita'] = toTimeType(sell['Hora visita'])
sell['Hora venta'] = toTimeType(sell['Hora venta'])
if (sell['Hora visita'] < sell['Hora venta']):
sellDuration = duration(sell['Hora visita'], sell['Hora venta'])
valid = sellDuration < twentyfiveMinutes()
if (sell['Hora motivo'] != None and sell['Hora motivo'] != ''):
sell['Hora visita'] = toTimeType(sell['Hora visita'])
sell['Hora motivo'] = toTimeType(sell['Hora motivo'])
if (sell['Hora visita'] < sell['Hora motivo']):
sellDuration = duration(sell['Hora visita'], sell['Hora motivo'])
valid = sellDuration < twentyfiveMinutes()
except:
return False
return valid
def appendValidLabel(sells):
for sell in sells:
# Hora visita Hora venta Hora motivo
if sell['Visitado'] == 'NO':
sell['Valido'] = False
if (sell['Hora visita'] == None or sell['Hora visita'] == ''):
sell['Valido'] = False
valid = checkValidSell(sell)
sell['Valido'] = valid
return
def addTime(t1, t2):
t1Array = t1.split(':')
t2Array = t2.split(':')
minutes = int(t1Array[1]) + int(t2Array[1])
hours = int(t1Array[0]) + int(t2Array[0])
hours = hours + int(minutes / 60)
minutes = (minutes % 60)
return f'{hours}:{minutes:02d}'
def getDetentions(sells):
# Detencion de 40min entre clientes LEGUIZAMON JULIANA y GOMEZ ELISA
#TODO => {
# - check if sell is valid
# - determine if the final hour of the last valid sell
# and the initial hour of the next valid sell interval is greater
# then 25 minutes
# - configure detention comments
# - return detention comment
# }
# get the last hour in the route
appendValidLabel(sells)
detention = ''
durationACM = '00:00'
nextValidManagment_index = 1
errors = []
for index, sell in enumerate(sells):
# Hora visita Hora venta Hora motivo
try:
if sell['Visitado'] == 'NO':
continue
if (sell['Hora visita'] == None or sell['Hora visita'] == ''):
continue
if(nextValidManagment_index > index):
continue
if(sell['Valido'] == False):
if (sell['Hora venta'] != None or sell['Hora venta'] != ''):
if(sells[index-1]['Valido'] == True):
lastValidManaggement_vta = sells[index-1]['Hora venta']
if (sell['Hora motivo'] != None or sell['Hora motivo'] != ''):
if(sells[index-1]['Valido'] == True):
lastValidManaggement_mtv = sells[index-1]['Hora motivo']
lastValidManaggement = lastValidManaggement_vta
lastValidManagment_index = index-1
if (lastValidManaggement_vta == ''):
lastValidManaggement = lastValidManaggement_mtv
nextValidManagment = None
for nvm_index, nvm_sell in enumerate(sells[index:]):
if (nvm_sell['Valido'] == True):
if (nvm_sell['Hora visita'] != None or nvm_sell['Hora visita'] != ''):
nextValidManagment = nvm_sell['Hora visita']
nextValidManagment_index = nvm_index + index
break
lastValidManaggement = toTimeType(lastValidManaggement)
if (nextValidManagment == None):
errors.append(index)
continue
nextValidManagment = toTimeType(nextValidManagment)
detentionDuration = duration(lastValidManaggement, nextValidManagment)
if(detentionDuration > twentyfiveMinutes()):
# lvm = last valid managment
# nvm = next valid managment
lvm_client = sells[lastValidManagment_index]['Descripción cliente']
nvm_client = sells[nextValidManagment_index]['Descripción cliente']
lvm_client_hour = lastValidManaggement
nvm_client_hour = nextValidManagment
f_detentionDuration = f'{detentionDuration}'.split(':')[0] + ':' + f'{detentionDuration}'.split(':')[1]
detention = f'\nDetencion de {f_detentionDuration} horas entre clientes {lvm_client} (hora: {lvm_client_hour}) y {nvm_client} (hora: {nvm_client_hour})' + detention
durationACM = addTime(durationACM, f_detentionDuration)
except:
errors.append(index);
print((detention[1:], errors, durationACM))
return (detention[1:], errors, durationACM)
def createComments(sells):
# analise known errors as 'Gestiones sin visitas' 'Visitas sin gestiones'
# counting sells
sellCountage = 0
for sell in sells:
if sell['Hora venta'] != '':
if sell['Visitado'] != 'NO':
sellCountage += 1
# counting excuses
excuseCountae = 0
for sell in sells:
if sell['Hora motivo'] != '':
if sell['Visitado'] != 'NO':
excuseCountae += 1
noVisited = getNoVisitedNumber(sells)
routeLength = len(sells)
fivePercentRouteLength = (0.05 * routeLength)
twentyPercentRouteLength = (0.2 * routeLength)
if routeLength == noVisited:
if sellCountage >= twentyPercentRouteLength:
return 'Gestiones sin visitas.'
return 'Ningun dato'
elif ((sellCountage + excuseCountae) < fivePercentRouteLength):
return 'Visitas sin gestiones.'
else:
return None
def putDataIntoLocalSheet(sheetData, hours, ARCHIVE_NAME):
wb = xls.Workbook()
dest_filename = f'Horarios-{ARCHIVE_NAME}.xlsx'
ws1 = wb.active
ws1.title = "horarios"
for row, hour in enumerate(hours.items()):
_ = ws1.cell(column=1, row=row+1, value=hour[0])
_ = ws1.cell(column=2, row=row+1, value=hour[1]['initialHour'])
_ = ws1.cell(column=3, row=row+1, value=hour[1]['finalHour'])
if (hour[1]['initialHour'] != None and hour[1]['initialHour'] != ''):
if (hour[1]['finalHour'] != None and hour[1]['finalHour'] != ''):
_ = ws1.cell(column=4, row=row+1, value=duration(hour[1]['initialHour'], hour[1]['finalHour']))
_ = ws1.cell(column=5, row=row+1, value=hour[1]['noVisited'])
_ = ws1.cell(column=6, row=row+1, value=hour[1]['comment'])
_ = ws1.cell(column=7, row=row+1, value=hour[1]['detentions'])
_ = ws1.cell(column=8, row=row+1, value=hour[1]['review'])
ws2 = wb.create_sheet(title="no visitados")
startPoint = 1
for sellerData, hours in zip(sheetData.values(), hours.items()):
if hours[1]['comment'] == 'Ningun dato':
continue
for sellData in sellerData:
if (sellData['Visitado'] == 'NO'):
_ = ws2.cell(column=1, row=startPoint, value=sellData['Descripción cliente'])
_ = ws2.cell(column=3, row=startPoint, value=sellData['Sector'])
startPoint += 1
startPoint += 2
wb.save(filename = dest_filename)
def putDataIntoSheet(sheetData, hours, ARCHIVE_NAME):
# CREATING CONN
sucursal = ARCHIVE_NAME.split('-')[0]
if sucursal == 'MNES':
sheetController = SheetController('1EZzP9mmCzQV8ED6WEw4oGETNJpdZoWAfCfzFeGX2l8w', 450535926)
elif sucursal == 'RCIA':
sheetController = SheetController('1hrb5DtyjUOMRpJiK2i1EssgWXFI0diyuaHtzVVnEwoA', 1164824847)
elif sucursal == 'CTES':
sheetController = SheetController('1_HroWNPXZQL-tG-3o-4mkU6L_h1jdcVVjN2odxKBKxY', 1288048372)
elif sucursal == 'RFRG':
sheetController = SheetController('1XXhApgyt_1MIOY_k1lhcRFCVhNMQykIbTkj-R7aRwh4', 2013313908)
# v
# 1071020126
# 265083769
# 1956127678
# 1650386418
# 2022
# 450535926
# 1164824847
# 1288048372
# 2013313908
# GETTING STARTPOINT
date = ARCHIVE_NAME.split('-')[1] + '-' + ARCHIVE_NAME.split('-')[2]
rows = sheetController.getRange(f"'{YEAR}'!B:B")
startPoint = rows.index([date]) + 1
table = []
for row, hour in enumerate(hours.items()):
if hour[1]['comment'] is not None:
comment = f"{hour[1]['comment']}"
else:
comment = ''
if hour[1]['detentions'] is not None:
detentions = f"{hour[1]['detentions']}"
if (hour[1]['duration'] != '00:00'):
duration = f"{hour[1]['duration']}"
result = f"=G{row+startPoint}-L{row+startPoint}"
else:
duration = ''
result = ''
else:
detentions = ''
duration = ''
result = ''
table.append([
f"{hour[1]['initialHour']}",
f"{hour[1]['finalHour']}",
f"=G{row+startPoint}-F{row+startPoint}",
f"{hour[1]['noVisited']}",
comment,
detentions,
duration,
result
])
sheetController.updateTable(f"'{YEAR}'!F{startPoint}:M{len(table) + startPoint}", table)
comments = []
for sellerData, hoursTuple in zip(sheetData.values(), enumerate(hours.items())):
row, hours = hoursTuple
if hours[1]['comment'] == 'Ningun dato':
continue
comment = ''
for sellData in sellerData:
if (sellData['Visitado'] == 'NO'):
comment = comment + sellData['Descripción cliente'] + '\n'
comments.append((f'I{row+startPoint}', comment))
sheetController.createCommentS(comments)
return
YEAR = '2022'
if __name__ == '__main__':
while(1):
main()
answ = str(input('Again? [Y]es/[N]o\t'))
os.system('cls')
if 'N' == answ.upper()[0]:
break
| 33.848382
| 184
| 0.556948
|
3fadeb510209f0a0b4fc55bd8871d0d89ff25679
| 1,948
|
py
|
Python
|
src/villages/models.py
|
flokli/bornhack-website
|
9dd6b0b23c2e6b1fb2c5f03a8766d4aa96d4443d
|
[
"BSD-3-Clause"
] | null | null | null |
src/villages/models.py
|
flokli/bornhack-website
|
9dd6b0b23c2e6b1fb2c5f03a8766d4aa96d4443d
|
[
"BSD-3-Clause"
] | null | null | null |
src/villages/models.py
|
flokli/bornhack-website
|
9dd6b0b23c2e6b1fb2c5f03a8766d4aa96d4443d
|
[
"BSD-3-Clause"
] | null | null | null |
from django.urls import reverse_lazy
from django.db import models
from django.utils.text import slugify
from utils.models import UUIDModel, CampRelatedModel
class Village(UUIDModel, CampRelatedModel):
class Meta:
ordering = ["name"]
unique_together = ("slug", "camp")
contact = models.ForeignKey("auth.User", on_delete=models.PROTECT)
camp = models.ForeignKey("camps.Camp", on_delete=models.PROTECT)
name = models.CharField(max_length=255)
slug = models.SlugField(max_length=255, blank=True)
description = models.TextField(
help_text="A descriptive text about your village. Markdown is supported."
)
private = models.BooleanField(
default=False,
help_text="Check if your village is invite only. Leave unchecked to welcome strangers.",
)
deleted = models.BooleanField(default=False)
def __str__(self):
return "%s (%s)" % (self.name, self.camp.title)
def get_absolute_url(self):
return reverse_lazy(
"village_detail", kwargs={"camp_slug": self.camp.slug, "slug": self.slug}
)
def save(self, **kwargs):
if (
not self.pk
or not self.slug
or Village.objects.filter(slug=self.slug).count() > 1
):
slug = slugify(self.name)
if not slug:
slug = "noname"
incrementer = 1
# We have to make sure that the slug won't clash with current slugs
while Village.objects.filter(slug=slug).exists():
if incrementer == 1:
slug = "{}-1".format(slug)
else:
slug = "{}-{}".format("-".join(slug.split("-")[:-1]), incrementer)
incrementer += 1
self.slug = slug
super(Village, self).save(**kwargs)
def delete(self, using=None, keep_parents=False):
self.deleted = True
self.save()
| 32.466667
| 96
| 0.596509
|
647ba39e90ea3bd481c20540ca73b992efd09ea9
| 1,531
|
py
|
Python
|
cpo/lib/fyre/utils/network.py
|
IBM/data-gate-cli
|
fc0cb1a560a0156c71eb63a550e198d0cd36e1df
|
[
"Apache-2.0"
] | 9
|
2020-08-21T08:46:34.000Z
|
2021-09-02T15:47:41.000Z
|
cpo/lib/fyre/utils/network.py
|
IBM/data-gate-cli
|
fc0cb1a560a0156c71eb63a550e198d0cd36e1df
|
[
"Apache-2.0"
] | 10
|
2020-11-26T15:31:43.000Z
|
2021-11-08T15:00:01.000Z
|
cpo/lib/fyre/utils/network.py
|
IBM/data-gate-cli
|
fc0cb1a560a0156c71eb63a550e198d0cd36e1df
|
[
"Apache-2.0"
] | 1
|
2022-03-10T07:14:49.000Z
|
2022-03-10T07:14:49.000Z
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ipaddress
import re as regex
from typing import List, Optional
from cpo.lib.error import DataGateCLIException
def get_private_ip_address_of_infrastructure_node(ipv4_addresses: List[ipaddress.IPv4Address]) -> ipaddress.IPv4Address:
"""Returns the private IP address of the infrastructure node
Parameters
----------
ipv4_addresses
all IPv4 addresses bound to local network interfaces of the
infrastructure node
Returns
-------
ipaddress.IPv4Address
private IP address of the infrastructure node
"""
result: Optional[ipaddress.IPv4Address] = None
for ipv4_address in ipv4_addresses:
search_result = regex.match("(10\\.\\d+\\.\\d+\\.\\d+)", str(ipv4_address))
if search_result is not None:
result = ipv4_address
break
if result is None:
raise DataGateCLIException("Private IP address not found")
return result
| 29.442308
| 120
| 0.7113
|
998cede907e9f40aec064656702b21837618a8e8
| 288
|
py
|
Python
|
dynamodbgeo/__init__.py
|
alpreu/dynamodb-geo.py
|
1910368b020cddfe8275f94d7962cc48ca0ec9bd
|
[
"MIT"
] | 25
|
2020-04-13T20:04:19.000Z
|
2022-03-18T12:34:06.000Z
|
dynamodbgeo/__init__.py
|
alpreu/dynamodb-geo.py
|
1910368b020cddfe8275f94d7962cc48ca0ec9bd
|
[
"MIT"
] | 11
|
2020-04-18T10:45:39.000Z
|
2021-09-09T10:06:14.000Z
|
dynamodbgeo/__init__.py
|
alpreu/dynamodb-geo.py
|
1910368b020cddfe8275f94d7962cc48ca0ec9bd
|
[
"MIT"
] | 14
|
2020-05-25T21:04:48.000Z
|
2021-11-08T17:37:42.000Z
|
import os, sys; sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from s2 import *
from model import *
from util import *
from DynamoDBManager import DynamoDBManager
from GeoDataManager import GeoDataManager
from GeoDataManagerConfiguration import GeoDataManagerConfiguration
| 32
| 76
| 0.840278
|
ea8021f2bf7f77f358b999889897250824ec60b9
| 591
|
py
|
Python
|
listings/admin.py
|
verumafalsum/btre_django
|
882eaa1ca33a6a79bb1b1f5918b9d0fb0abfeded
|
[
"MIT"
] | null | null | null |
listings/admin.py
|
verumafalsum/btre_django
|
882eaa1ca33a6a79bb1b1f5918b9d0fb0abfeded
|
[
"MIT"
] | null | null | null |
listings/admin.py
|
verumafalsum/btre_django
|
882eaa1ca33a6a79bb1b1f5918b9d0fb0abfeded
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from listings.models import Listing
class ListingAdmin(admin.ModelAdmin):
list_display = (
'id',
'title',
'price',
'list_date',
'realtor',
'is_published'
)
list_display_links = ('id', 'title')
list_filter = ('realtor', )
list_editable = ('is_published', )
search_fields = (
'title',
'description',
'address',
'city',
'state',
'zipcode',
'price'
)
list_per_page = 25
admin.site.register(Listing, ListingAdmin)
| 19.064516
| 42
| 0.539763
|
8fbefbb1ab0fab464afef209e0fe2189071da1de
| 1,155
|
py
|
Python
|
linPEAS/builder/src/yamlGlobals.py
|
sahil-rawat/privilege-escalation-awesome-scripts-suite
|
c2c7604f89e88ea46687f41f842cfa328dfc4750
|
[
"MIT"
] | null | null | null |
linPEAS/builder/src/yamlGlobals.py
|
sahil-rawat/privilege-escalation-awesome-scripts-suite
|
c2c7604f89e88ea46687f41f842cfa328dfc4750
|
[
"MIT"
] | null | null | null |
linPEAS/builder/src/yamlGlobals.py
|
sahil-rawat/privilege-escalation-awesome-scripts-suite
|
c2c7604f89e88ea46687f41f842cfa328dfc4750
|
[
"MIT"
] | null | null | null |
import os
import yaml
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
LINPEAS_BASE_PATH = CURRENT_DIR + "/../linpeas_base.sh"
FINAL_LINPEAS_PATH = CURRENT_DIR + "/../../" + "linpeas.sh"
YAML_NAME = "sensitive_files.yaml"
FILES_YAML = CURRENT_DIR + "/../../../build_lists/" + YAML_NAME
with open(FILES_YAML, 'r') as file:
YAML_LOADED = yaml.load(file, Loader=yaml.FullLoader)
ROOT_FOLDER = YAML_LOADED["root_folders"]
DEFAULTS = YAML_LOADED["defaults"]
COMMON_FILE_FOLDERS = YAML_LOADED["common_file_folders"]
COMMON_DIR_FOLDERS = YAML_LOADED["common_directory_folders"]
assert all(f in ROOT_FOLDER for f in COMMON_FILE_FOLDERS)
assert all(f in ROOT_FOLDER for f in COMMON_DIR_FOLDERS)
PEAS_FINDS_MARKUP = YAML_LOADED["peas_finds_markup"]
FIND_LINE_MARKUP = YAML_LOADED["find_line_markup"]
FIND_TEMPLATE = YAML_LOADED["find_template"]
PEAS_STORAGES_MARKUP = YAML_LOADED["peas_storages_markup"]
STORAGE_LINE_MARKUP = YAML_LOADED["storage_line_markup"]
STORAGE_LINE_EXTRA_MARKUP = YAML_LOADED["storage_line_extra_markup"]
STORAGE_TEMPLATE = YAML_LOADED["storage_template"]
INT_HIDDEN_FILES_MARKUP = YAML_LOADED["int_hidden_files_markup"]
| 38.5
| 68
| 0.800866
|
be1f79c4aca70068a422c4ed01f7ddab7809c049
| 2,249
|
py
|
Python
|
src/scing/push.py
|
hisplan/scing
|
c677235eba2ca371113b154301fb59916a3c481f
|
[
"MIT"
] | null | null | null |
src/scing/push.py
|
hisplan/scing
|
c677235eba2ca371113b154301fb59916a3c481f
|
[
"MIT"
] | null | null | null |
src/scing/push.py
|
hisplan/scing
|
c677235eba2ca371113b154301fb59916a3c481f
|
[
"MIT"
] | 1
|
2022-02-24T20:42:45.000Z
|
2022-02-24T20:42:45.000Z
|
import os
from docker.helper import Docker
from docker.aws_ecr import AwsEcr
from docker.quay_io import QuayIO
from scing.error import raise_error
def handle_push(image: str):
registry, image_name, image_version = Docker.parse_name(image)
exit_code = Docker.tag(registry, image_name, image_version)
if exit_code != 0:
raise_error("Unable to create a docker image tag!")
# amazon ecr: repo must exist first, so let's create one
if AwsEcr.is_amazon_ecr(registry):
if AwsEcr.exists_repos(image_name) != 0:
exit_code = AwsEcr.create_repos(image_name)
if exit_code != 0:
raise_error("Unable to create a repository in AWS ECR!")
# quay.io: private repo gets created when pushing, so let's manually create a public repo
elif QuayIO.is_quay_io(registry):
token = os.environ.get("QUAY_AUTH_TOKEN")
quay_api = QuayIO(token=token)
namespace = registry.split("/")[1]
status_code = quay_api.get_repo(namespace, image_name)
if status_code == 404:
# not found, let's create one
status_code = quay_api.create_repo(namespace, image_name, public=True)
if status_code == 200 or status_code == 201:
# successful
pass
else:
raise_error("Unable to create a repo in Red Hat quay.io!")
exit_code = Docker.push(registry, image_name, image_version)
if exit_code != 0:
raise_error("Unable to push an image to docker registry!")
# quay.io: in case of image pushed to private repo, so let's change to public repo
if QuayIO.is_quay_io(registry):
token = os.environ.get("QUAY_AUTH_TOKEN")
quay_api = QuayIO(token=token)
namespace = registry.split("/")[1]
status_code = quay_api.change_visibility(
namespace=namespace, repo_name=image_name, public=True
)
if status_code == 200 or status_code == 201:
# successful
pass
elif status_code == 400:
# probably the repository already exists. we can ignore this error
pass
elif status_code == 403:
raise_error("Invalid or missing QUAY_AUTH_TOKEN...")
| 38.775862
| 93
| 0.643842
|
314320be3e28d58d1a53fbfabf4ddc3434c5b4b4
| 2,623
|
py
|
Python
|
src/ms_graph_api.py
|
armanrahman22/teams-light
|
834d72cc60511373059dfa16297ec94a62d1a848
|
[
"MIT"
] | null | null | null |
src/ms_graph_api.py
|
armanrahman22/teams-light
|
834d72cc60511373059dfa16297ec94a62d1a848
|
[
"MIT"
] | null | null | null |
src/ms_graph_api.py
|
armanrahman22/teams-light
|
834d72cc60511373059dfa16297ec94a62d1a848
|
[
"MIT"
] | null | null | null |
"""
The configuration file would look like this (sans those // comments):
{
"authority": "https://login.microsoftonline.com/Enter_the_Tenant_Name_Here",
"client_id": "your_client_id",
"scope": ["https://graph.microsoft.com/.default"],
// For more information about scopes for an app, refer:
// https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-oauth2-client-creds-grant-flow#second-case-access-token-request-with-a-certificate"
"secret": "The secret generated by AAD during your confidential app registration",
// For information about generating client secret, refer:
// https://github.com/AzureAD/microsoft-authentication-library-for-python/wiki/Client-Credentials#registering-client-secrets-using-the-application-registration-portal
"endpoint": "https://graph.microsoft.com/v1.0/users"
}
You can then run this sample with a JSON configuration file:
python sample.py parameters.json
"""
import sys # For simplicity, we'll read config file from 1st CLI param sys.argv[1]
import json
import logging
import requests
import msal
ENDPOINT = "https://graph.microsoft.com/beta"
# Optional logging
# logging.basicConfig(level=logging.DEBUG)
config = json.load(open(sys.argv[1]))
# Create a preferably long-lived app instance which maintains a token cache.
app = msal.ConfidentialClientApplication(
config["client_id"],
authority=config["authority"],
client_credential=config["secret"],
# token_cache=... # Default cache is in memory only.
# You can learn how to use SerializableTokenCache from
# https://msal-python.rtfd.io/en/latest/#msal.SerializableTokenCache
)
# The pattern to acquire a token looks like this.
result = None
# Firstly, looks up a token from cache
# Since we are looking for token for the current app, NOT for an end user,
# notice we give account parameter as None.
result = app.acquire_token_silent(config["scope"], account=None)
if not result:
logging.info("No suitable token exists in cache. Let's get a new one from AAD.")
result = app.acquire_token_for_client(scopes=config["scope"])
if "access_token" in result:
# Calling graph using the access token
graph_data = requests.get( # Use token to call downstream service
f"{ENDPOINT}/me",
headers={"Authorization": "Bearer " + result["access_token"]},
).json()
print("Graph API call result: ")
print(json.dumps(graph_data, indent=2))
else:
print(result.get("error"))
print(result.get("error_description"))
print(result.get("correlation_id")) # You may need this when reporting a bug
| 36.430556
| 174
| 0.725124
|
bcd150a186fa0e7194bb9e3fb9f95731834ce6c9
| 728
|
py
|
Python
|
django/bitcoin_monitor/middleware.py
|
chanhosuh/bitcoin-monitor
|
acecfcf020cf2debfdf3a2e8c446007d7412d8e1
|
[
"MIT"
] | 1
|
2020-01-01T15:54:45.000Z
|
2020-01-01T15:54:45.000Z
|
django/bitcoin_monitor/middleware.py
|
chanhosuh/bitcoin-monitor
|
acecfcf020cf2debfdf3a2e8c446007d7412d8e1
|
[
"MIT"
] | 13
|
2019-02-28T03:24:54.000Z
|
2021-09-22T17:50:00.000Z
|
django/bitcoin_monitor/middleware.py
|
chanhosuh/bitcoin-monitor
|
acecfcf020cf2debfdf3a2e8c446007d7412d8e1
|
[
"MIT"
] | null | null | null |
""" https://www.fusionbox.com/blog/detail/create-react-app-and-django/624/ """
def dev_cors_middleware(get_response):
"""
Adds CORS headers for local testing only to allow the frontend, which is served on
localhost:3000, to access the API, which is served on localhost:8000.
"""
def middleware(request):
response = get_response(request)
response['Access-Control-Allow-Origin'] = 'http://localhost:3000'
response['Access-Control-Allow-Methods'] = 'GET, POST, PUT, PATCH, OPTIONS, DELETE, HEAD'
response['Access-Control-Allow-Headers'] = 'Content-Type, X-CSRFToken'
response['Access-Control-Allow-Credentials'] = 'true'
return response
return middleware
| 38.315789
| 97
| 0.684066
|
332e161eef959186a60c270cf82bdddc8cea38bc
| 98,053
|
py
|
Python
|
test/unit/container/test_backend.py
|
thiagodasilva/swift
|
0553d9333ed0045c4d209065b315533a33e5d7d7
|
[
"Apache-2.0"
] | null | null | null |
test/unit/container/test_backend.py
|
thiagodasilva/swift
|
0553d9333ed0045c4d209065b315533a33e5d7d7
|
[
"Apache-2.0"
] | null | null | null |
test/unit/container/test_backend.py
|
thiagodasilva/swift
|
0553d9333ed0045c4d209065b315533a33e5d7d7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tests for swift.container.backend """
import os
import hashlib
import unittest
from time import sleep, time
from uuid import uuid4
import itertools
import random
from collections import defaultdict
from contextlib import contextmanager
import sqlite3
import pickle
import json
from swift.container.backend import ContainerBroker
from swift.common.utils import Timestamp
from swift.common.storage_policy import POLICIES
import mock
from test.unit import (patch_policies, with_tempdir, make_timestamp_iter,
EMPTY_ETAG)
from test.unit.common.test_db import TestExampleBroker
class TestContainerBroker(unittest.TestCase):
"""Tests for ContainerBroker"""
def test_creation(self):
# Test ContainerBroker.__init__
broker = ContainerBroker(':memory:', account='a', container='c')
self.assertEqual(broker.db_file, ':memory:')
broker.initialize(Timestamp('1').internal, 0)
with broker.get() as conn:
curs = conn.cursor()
curs.execute('SELECT 1')
self.assertEqual(curs.fetchall()[0][0], 1)
@patch_policies
def test_storage_policy_property(self):
ts = (Timestamp(t).internal for t in itertools.count(int(time())))
for policy in POLICIES:
broker = ContainerBroker(':memory:', account='a',
container='policy_%s' % policy.name)
broker.initialize(next(ts), policy.idx)
with broker.get() as conn:
try:
conn.execute('''SELECT storage_policy_index
FROM container_stat''')
except Exception:
is_migrated = False
else:
is_migrated = True
if not is_migrated:
# pre spi tests don't set policy on initialize
broker.set_storage_policy_index(policy.idx)
self.assertEqual(policy.idx, broker.storage_policy_index)
# make sure it's cached
with mock.patch.object(broker, 'get'):
self.assertEqual(policy.idx, broker.storage_policy_index)
def test_exception(self):
# Test ContainerBroker throwing a conn away after
# unhandled exception
first_conn = None
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
with broker.get() as conn:
first_conn = conn
try:
with broker.get() as conn:
self.assertEqual(first_conn, conn)
raise Exception('OMG')
except Exception:
pass
self.assertTrue(broker.conn is None)
def test_empty(self):
# Test ContainerBroker.empty
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
self.assertTrue(broker.empty())
broker.put_object('o', Timestamp(time()).internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
self.assertTrue(not broker.empty())
sleep(.00001)
broker.delete_object('o', Timestamp(time()).internal)
self.assertTrue(broker.empty())
def test_reclaim(self):
broker = ContainerBroker(':memory:', account='test_account',
container='test_container')
broker.initialize(Timestamp('1').internal, 0)
broker.put_object('o', Timestamp(time()).internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 0)
broker.reclaim(Timestamp(time() - 999).internal, time())
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 0)
sleep(.00001)
broker.delete_object('o', Timestamp(time()).internal)
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 1)
broker.reclaim(Timestamp(time() - 999).internal, time())
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 1)
sleep(.00001)
broker.reclaim(Timestamp(time()).internal, time())
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 0)
# Test the return values of reclaim()
broker.put_object('w', Timestamp(time()).internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('x', Timestamp(time()).internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('y', Timestamp(time()).internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('z', Timestamp(time()).internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
# Test before deletion
broker.reclaim(Timestamp(time()).internal, time())
broker.delete_db(Timestamp(time()).internal)
def test_get_info_is_deleted(self):
start = int(time())
ts = (Timestamp(t).internal for t in itertools.count(start))
broker = ContainerBroker(':memory:', account='test_account',
container='test_container')
# create it
broker.initialize(next(ts), POLICIES.default.idx)
info, is_deleted = broker.get_info_is_deleted()
self.assertEqual(is_deleted, broker.is_deleted())
self.assertEqual(is_deleted, False) # sanity
self.assertEqual(info, broker.get_info())
self.assertEqual(info['put_timestamp'], Timestamp(start).internal)
self.assertTrue(Timestamp(info['created_at']) >= start)
self.assertEqual(info['delete_timestamp'], '0')
if self.__class__ in (TestContainerBrokerBeforeMetadata,
TestContainerBrokerBeforeXSync,
TestContainerBrokerBeforeSPI):
self.assertEqual(info['status_changed_at'], '0')
else:
self.assertEqual(info['status_changed_at'],
Timestamp(start).internal)
# delete it
delete_timestamp = next(ts)
broker.delete_db(delete_timestamp)
info, is_deleted = broker.get_info_is_deleted()
self.assertEqual(is_deleted, True) # sanity
self.assertEqual(is_deleted, broker.is_deleted())
self.assertEqual(info, broker.get_info())
self.assertEqual(info['put_timestamp'], Timestamp(start).internal)
self.assertTrue(Timestamp(info['created_at']) >= start)
self.assertEqual(info['delete_timestamp'], delete_timestamp)
self.assertEqual(info['status_changed_at'], delete_timestamp)
# bring back to life
broker.put_object('obj', next(ts), 0, 'text/plain', 'etag',
storage_policy_index=broker.storage_policy_index)
info, is_deleted = broker.get_info_is_deleted()
self.assertEqual(is_deleted, False) # sanity
self.assertEqual(is_deleted, broker.is_deleted())
self.assertEqual(info, broker.get_info())
self.assertEqual(info['put_timestamp'], Timestamp(start).internal)
self.assertTrue(Timestamp(info['created_at']) >= start)
self.assertEqual(info['delete_timestamp'], delete_timestamp)
self.assertEqual(info['status_changed_at'], delete_timestamp)
def test_delete_object(self):
# Test ContainerBroker.delete_object
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
broker.put_object('o', Timestamp(time()).internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 0)
sleep(.00001)
broker.delete_object('o', Timestamp(time()).internal)
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 1)
def test_put_object(self):
# Test ContainerBroker.put_object
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
# Create initial object
timestamp = Timestamp(time()).internal
broker.put_object('"{<object \'&\' name>}"', timestamp, 123,
'application/x-test',
'5af83e3196bf99f440f31f2e1a6c9afe')
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEqual(conn.execute(
"SELECT size FROM object").fetchone()[0], 123)
self.assertEqual(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEqual(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'5af83e3196bf99f440f31f2e1a6c9afe')
self.assertEqual(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Reput same event
broker.put_object('"{<object \'&\' name>}"', timestamp, 123,
'application/x-test',
'5af83e3196bf99f440f31f2e1a6c9afe')
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEqual(conn.execute(
"SELECT size FROM object").fetchone()[0], 123)
self.assertEqual(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEqual(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'5af83e3196bf99f440f31f2e1a6c9afe')
self.assertEqual(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Put new event
sleep(.00001)
timestamp = Timestamp(time()).internal
broker.put_object('"{<object \'&\' name>}"', timestamp, 124,
'application/x-test',
'aa0749bacbc79ec65fe206943d8fe449')
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEqual(conn.execute(
"SELECT size FROM object").fetchone()[0], 124)
self.assertEqual(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEqual(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'aa0749bacbc79ec65fe206943d8fe449')
self.assertEqual(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Put old event
otimestamp = Timestamp(float(Timestamp(timestamp)) - 1).internal
broker.put_object('"{<object \'&\' name>}"', otimestamp, 124,
'application/x-test',
'aa0749bacbc79ec65fe206943d8fe449')
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEqual(conn.execute(
"SELECT size FROM object").fetchone()[0], 124)
self.assertEqual(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEqual(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'aa0749bacbc79ec65fe206943d8fe449')
self.assertEqual(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Put old delete event
dtimestamp = Timestamp(float(Timestamp(timestamp)) - 1).internal
broker.put_object('"{<object \'&\' name>}"', dtimestamp, 0, '', '',
deleted=1)
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEqual(conn.execute(
"SELECT size FROM object").fetchone()[0], 124)
self.assertEqual(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEqual(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'aa0749bacbc79ec65fe206943d8fe449')
self.assertEqual(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Put new delete event
sleep(.00001)
timestamp = Timestamp(time()).internal
broker.put_object('"{<object \'&\' name>}"', timestamp, 0, '', '',
deleted=1)
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEqual(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 1)
# Put new event
sleep(.00001)
timestamp = Timestamp(time()).internal
broker.put_object('"{<object \'&\' name>}"', timestamp, 123,
'application/x-test',
'5af83e3196bf99f440f31f2e1a6c9afe')
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEqual(conn.execute(
"SELECT size FROM object").fetchone()[0], 123)
self.assertEqual(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEqual(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'5af83e3196bf99f440f31f2e1a6c9afe')
self.assertEqual(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# We'll use this later
sleep(.0001)
in_between_timestamp = Timestamp(time()).internal
# New post event
sleep(.0001)
previous_timestamp = timestamp
timestamp = Timestamp(time()).internal
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT created_at FROM object").fetchone()[0],
previous_timestamp)
self.assertEqual(conn.execute(
"SELECT size FROM object").fetchone()[0], 123)
self.assertEqual(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEqual(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'5af83e3196bf99f440f31f2e1a6c9afe')
self.assertEqual(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Put event from after last put but before last post
timestamp = in_between_timestamp
broker.put_object('"{<object \'&\' name>}"', timestamp, 456,
'application/x-test3',
'6af83e3196bf99f440f31f2e1a6c9afe')
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEqual(conn.execute(
"SELECT size FROM object").fetchone()[0], 456)
self.assertEqual(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test3')
self.assertEqual(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'6af83e3196bf99f440f31f2e1a6c9afe')
self.assertEqual(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
@patch_policies
def test_put_misplaced_object_does_not_effect_container_stats(self):
policy = random.choice(list(POLICIES))
ts = (Timestamp(t).internal for t in
itertools.count(int(time())))
broker = ContainerBroker(':memory:',
account='a', container='c')
broker.initialize(next(ts), policy.idx)
# migration tests may not honor policy on initialize
if isinstance(self, ContainerBrokerMigrationMixin):
real_storage_policy_index = \
broker.get_info()['storage_policy_index']
policy = filter(lambda p: p.idx == real_storage_policy_index,
POLICIES)[0]
broker.put_object('correct_o', next(ts), 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe',
storage_policy_index=policy.idx)
info = broker.get_info()
self.assertEqual(1, info['object_count'])
self.assertEqual(123, info['bytes_used'])
other_policy = random.choice([p for p in POLICIES
if p is not policy])
broker.put_object('wrong_o', next(ts), 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe',
storage_policy_index=other_policy.idx)
self.assertEqual(1, info['object_count'])
self.assertEqual(123, info['bytes_used'])
@patch_policies
def test_has_multiple_policies(self):
policy = random.choice(list(POLICIES))
ts = (Timestamp(t).internal for t in
itertools.count(int(time())))
broker = ContainerBroker(':memory:',
account='a', container='c')
broker.initialize(next(ts), policy.idx)
# migration tests may not honor policy on initialize
if isinstance(self, ContainerBrokerMigrationMixin):
real_storage_policy_index = \
broker.get_info()['storage_policy_index']
policy = filter(lambda p: p.idx == real_storage_policy_index,
POLICIES)[0]
broker.put_object('correct_o', next(ts), 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe',
storage_policy_index=policy.idx)
self.assertFalse(broker.has_multiple_policies())
other_policy = [p for p in POLICIES if p is not policy][0]
broker.put_object('wrong_o', next(ts), 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe',
storage_policy_index=other_policy.idx)
self.assertTrue(broker.has_multiple_policies())
@patch_policies
def test_get_policy_info(self):
policy = random.choice(list(POLICIES))
ts = (Timestamp(t).internal for t in
itertools.count(int(time())))
broker = ContainerBroker(':memory:',
account='a', container='c')
broker.initialize(next(ts), policy.idx)
# migration tests may not honor policy on initialize
if isinstance(self, ContainerBrokerMigrationMixin):
real_storage_policy_index = \
broker.get_info()['storage_policy_index']
policy = filter(lambda p: p.idx == real_storage_policy_index,
POLICIES)[0]
policy_stats = broker.get_policy_stats()
expected = {policy.idx: {'bytes_used': 0, 'object_count': 0}}
self.assertEqual(policy_stats, expected)
# add an object
broker.put_object('correct_o', next(ts), 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe',
storage_policy_index=policy.idx)
policy_stats = broker.get_policy_stats()
expected = {policy.idx: {'bytes_used': 123, 'object_count': 1}}
self.assertEqual(policy_stats, expected)
# add a misplaced object
other_policy = random.choice([p for p in POLICIES
if p is not policy])
broker.put_object('wrong_o', next(ts), 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe',
storage_policy_index=other_policy.idx)
policy_stats = broker.get_policy_stats()
expected = {
policy.idx: {'bytes_used': 123, 'object_count': 1},
other_policy.idx: {'bytes_used': 123, 'object_count': 1},
}
self.assertEqual(policy_stats, expected)
def test_policy_stat_tracking(self):
ts = (Timestamp(t).internal for t in
itertools.count(int(time())))
broker = ContainerBroker(':memory:',
account='a', container='c')
broker.initialize(next(ts), POLICIES.default.idx)
stats = defaultdict(dict)
iters = 100
for i in range(iters):
policy_index = random.randint(0, iters * 0.1)
name = 'object-%s' % random.randint(0, iters * 0.1)
size = random.randint(0, iters)
broker.put_object(name, next(ts), size, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe',
storage_policy_index=policy_index)
# track the size of the latest timestamp put for each object
# in each storage policy
stats[policy_index][name] = size
policy_stats = broker.get_policy_stats()
# if no objects were added for the default policy we still
# expect an entry for the default policy in the returned info
# because the database was initialized with that storage policy
# - but it must be empty.
if POLICIES.default.idx not in stats:
default_stats = policy_stats.pop(POLICIES.default.idx)
expected = {'object_count': 0, 'bytes_used': 0}
self.assertEqual(default_stats, expected)
self.assertEqual(len(policy_stats), len(stats))
for policy_index, stat in policy_stats.items():
self.assertEqual(stat['object_count'], len(stats[policy_index]))
self.assertEqual(stat['bytes_used'],
sum(stats[policy_index].values()))
def test_initialize_container_broker_in_default(self):
broker = ContainerBroker(':memory:', account='test1',
container='test2')
# initialize with no storage_policy_index argument
broker.initialize(Timestamp(1).internal)
info = broker.get_info()
self.assertEqual(info['account'], 'test1')
self.assertEqual(info['container'], 'test2')
self.assertEqual(info['hash'], '00000000000000000000000000000000')
self.assertEqual(info['put_timestamp'], Timestamp(1).internal)
self.assertEqual(info['delete_timestamp'], '0')
info = broker.get_info()
self.assertEqual(info['object_count'], 0)
self.assertEqual(info['bytes_used'], 0)
policy_stats = broker.get_policy_stats()
# Act as policy-0
self.assertTrue(0 in policy_stats)
self.assertEqual(policy_stats[0]['bytes_used'], 0)
self.assertEqual(policy_stats[0]['object_count'], 0)
broker.put_object('o1', Timestamp(time()).internal, 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 123)
policy_stats = broker.get_policy_stats()
self.assertTrue(0 in policy_stats)
self.assertEqual(policy_stats[0]['object_count'], 1)
self.assertEqual(policy_stats[0]['bytes_used'], 123)
def test_get_info(self):
# Test ContainerBroker.get_info
broker = ContainerBroker(':memory:', account='test1',
container='test2')
broker.initialize(Timestamp('1').internal, 0)
info = broker.get_info()
self.assertEqual(info['account'], 'test1')
self.assertEqual(info['container'], 'test2')
self.assertEqual(info['hash'], '00000000000000000000000000000000')
self.assertEqual(info['put_timestamp'], Timestamp(1).internal)
self.assertEqual(info['delete_timestamp'], '0')
if self.__class__ in (TestContainerBrokerBeforeMetadata,
TestContainerBrokerBeforeXSync,
TestContainerBrokerBeforeSPI):
self.assertEqual(info['status_changed_at'], '0')
else:
self.assertEqual(info['status_changed_at'],
Timestamp(1).internal)
info = broker.get_info()
self.assertEqual(info['object_count'], 0)
self.assertEqual(info['bytes_used'], 0)
broker.put_object('o1', Timestamp(time()).internal, 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 123)
sleep(.00001)
broker.put_object('o2', Timestamp(time()).internal, 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEqual(info['object_count'], 2)
self.assertEqual(info['bytes_used'], 246)
sleep(.00001)
broker.put_object('o2', Timestamp(time()).internal, 1000,
'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEqual(info['object_count'], 2)
self.assertEqual(info['bytes_used'], 1123)
sleep(.00001)
broker.delete_object('o1', Timestamp(time()).internal)
info = broker.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 1000)
sleep(.00001)
broker.delete_object('o2', Timestamp(time()).internal)
info = broker.get_info()
self.assertEqual(info['object_count'], 0)
self.assertEqual(info['bytes_used'], 0)
info = broker.get_info()
self.assertEqual(info['x_container_sync_point1'], -1)
self.assertEqual(info['x_container_sync_point2'], -1)
def test_set_x_syncs(self):
broker = ContainerBroker(':memory:', account='test1',
container='test2')
broker.initialize(Timestamp('1').internal, 0)
info = broker.get_info()
self.assertEqual(info['x_container_sync_point1'], -1)
self.assertEqual(info['x_container_sync_point2'], -1)
broker.set_x_container_sync_points(1, 2)
info = broker.get_info()
self.assertEqual(info['x_container_sync_point1'], 1)
self.assertEqual(info['x_container_sync_point2'], 2)
def test_get_report_info(self):
broker = ContainerBroker(':memory:', account='test1',
container='test2')
broker.initialize(Timestamp('1').internal, 0)
info = broker.get_info()
self.assertEqual(info['account'], 'test1')
self.assertEqual(info['container'], 'test2')
self.assertEqual(info['object_count'], 0)
self.assertEqual(info['bytes_used'], 0)
self.assertEqual(info['reported_object_count'], 0)
self.assertEqual(info['reported_bytes_used'], 0)
broker.put_object('o1', Timestamp(time()).internal, 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 123)
self.assertEqual(info['reported_object_count'], 0)
self.assertEqual(info['reported_bytes_used'], 0)
sleep(.00001)
broker.put_object('o2', Timestamp(time()).internal, 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEqual(info['object_count'], 2)
self.assertEqual(info['bytes_used'], 246)
self.assertEqual(info['reported_object_count'], 0)
self.assertEqual(info['reported_bytes_used'], 0)
sleep(.00001)
broker.put_object('o2', Timestamp(time()).internal, 1000,
'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEqual(info['object_count'], 2)
self.assertEqual(info['bytes_used'], 1123)
self.assertEqual(info['reported_object_count'], 0)
self.assertEqual(info['reported_bytes_used'], 0)
put_timestamp = Timestamp(time()).internal
sleep(.001)
delete_timestamp = Timestamp(time()).internal
broker.reported(put_timestamp, delete_timestamp, 2, 1123)
info = broker.get_info()
self.assertEqual(info['object_count'], 2)
self.assertEqual(info['bytes_used'], 1123)
self.assertEqual(info['reported_put_timestamp'], put_timestamp)
self.assertEqual(info['reported_delete_timestamp'], delete_timestamp)
self.assertEqual(info['reported_object_count'], 2)
self.assertEqual(info['reported_bytes_used'], 1123)
sleep(.00001)
broker.delete_object('o1', Timestamp(time()).internal)
info = broker.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 1000)
self.assertEqual(info['reported_object_count'], 2)
self.assertEqual(info['reported_bytes_used'], 1123)
sleep(.00001)
broker.delete_object('o2', Timestamp(time()).internal)
info = broker.get_info()
self.assertEqual(info['object_count'], 0)
self.assertEqual(info['bytes_used'], 0)
self.assertEqual(info['reported_object_count'], 2)
self.assertEqual(info['reported_bytes_used'], 1123)
def test_list_objects_iter(self):
# Test ContainerBroker.list_objects_iter
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
for obj1 in range(4):
for obj2 in range(125):
broker.put_object('%d/%04d' % (obj1, obj2),
Timestamp(time()).internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
for obj in range(125):
broker.put_object('2/0051/%04d' % obj,
Timestamp(time()).internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
for obj in range(125):
broker.put_object('3/%04d/0049' % obj,
Timestamp(time()).internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
listing = broker.list_objects_iter(100, '', None, None, '')
self.assertEqual(len(listing), 100)
self.assertEqual(listing[0][0], '0/0000')
self.assertEqual(listing[-1][0], '0/0099')
listing = broker.list_objects_iter(100, '', '0/0050', None, '')
self.assertEqual(len(listing), 50)
self.assertEqual(listing[0][0], '0/0000')
self.assertEqual(listing[-1][0], '0/0049')
listing = broker.list_objects_iter(100, '0/0099', None, None, '')
self.assertEqual(len(listing), 100)
self.assertEqual(listing[0][0], '0/0100')
self.assertEqual(listing[-1][0], '1/0074')
listing = broker.list_objects_iter(55, '1/0074', None, None, '')
self.assertEqual(len(listing), 55)
self.assertEqual(listing[0][0], '1/0075')
self.assertEqual(listing[-1][0], '2/0004')
listing = broker.list_objects_iter(55, '2/0005', None, None, '',
reverse=True)
self.assertEqual(len(listing), 55)
self.assertEqual(listing[0][0], '2/0004')
self.assertEqual(listing[-1][0], '1/0075')
listing = broker.list_objects_iter(10, '', None, '0/01', '')
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '0/0100')
self.assertEqual(listing[-1][0], '0/0109')
listing = broker.list_objects_iter(10, '', None, '0/', '/')
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '0/0000')
self.assertEqual(listing[-1][0], '0/0009')
listing = broker.list_objects_iter(10, '', None, '0/', '/',
reverse=True)
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '0/0124')
self.assertEqual(listing[-1][0], '0/0115')
# Same as above, but using the path argument.
listing = broker.list_objects_iter(10, '', None, None, '', '0')
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '0/0000')
self.assertEqual(listing[-1][0], '0/0009')
listing = broker.list_objects_iter(10, '', None, None, '', '0',
reverse=True)
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '0/0124')
self.assertEqual(listing[-1][0], '0/0115')
listing = broker.list_objects_iter(10, '', None, '', '/')
self.assertEqual(len(listing), 4)
self.assertEqual([row[0] for row in listing],
['0/', '1/', '2/', '3/'])
listing = broker.list_objects_iter(10, '', None, '', '/', reverse=True)
self.assertEqual(len(listing), 4)
self.assertEqual([row[0] for row in listing],
['3/', '2/', '1/', '0/'])
listing = broker.list_objects_iter(10, '2', None, None, '/')
self.assertEqual(len(listing), 2)
self.assertEqual([row[0] for row in listing], ['2/', '3/'])
listing = broker.list_objects_iter(10, '2/', None, None, '/')
self.assertEqual(len(listing), 1)
self.assertEqual([row[0] for row in listing], ['3/'])
listing = broker.list_objects_iter(10, '2/', None, None, '/',
reverse=True)
self.assertEqual(len(listing), 2)
self.assertEqual([row[0] for row in listing], ['1/', '0/'])
listing = broker.list_objects_iter(10, '20', None, None, '/',
reverse=True)
self.assertEqual(len(listing), 3)
self.assertEqual([row[0] for row in listing], ['2/', '1/', '0/'])
listing = broker.list_objects_iter(10, '2/0050', None, '2/', '/')
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '2/0051')
self.assertEqual(listing[1][0], '2/0051/')
self.assertEqual(listing[2][0], '2/0052')
self.assertEqual(listing[-1][0], '2/0059')
listing = broker.list_objects_iter(10, '3/0045', None, '3/', '/')
self.assertEqual(len(listing), 10)
self.assertEqual([row[0] for row in listing],
['3/0045/', '3/0046', '3/0046/', '3/0047',
'3/0047/', '3/0048', '3/0048/', '3/0049',
'3/0049/', '3/0050'])
broker.put_object('3/0049/', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
listing = broker.list_objects_iter(10, '3/0048', None, None, None)
self.assertEqual(len(listing), 10)
self.assertEqual(
[row[0] for row in listing],
['3/0048/0049', '3/0049', '3/0049/',
'3/0049/0049', '3/0050', '3/0050/0049', '3/0051', '3/0051/0049',
'3/0052', '3/0052/0049'])
listing = broker.list_objects_iter(10, '3/0048', None, '3/', '/')
self.assertEqual(len(listing), 10)
self.assertEqual(
[row[0] for row in listing],
['3/0048/', '3/0049', '3/0049/', '3/0050',
'3/0050/', '3/0051', '3/0051/', '3/0052', '3/0052/', '3/0053'])
listing = broker.list_objects_iter(10, None, None, '3/0049/', '/')
self.assertEqual(len(listing), 2)
self.assertEqual(
[row[0] for row in listing],
['3/0049/', '3/0049/0049'])
listing = broker.list_objects_iter(10, None, None, None, None,
'3/0049')
self.assertEqual(len(listing), 1)
self.assertEqual([row[0] for row in listing], ['3/0049/0049'])
listing = broker.list_objects_iter(2, None, None, '3/', '/')
self.assertEqual(len(listing), 2)
self.assertEqual([row[0] for row in listing], ['3/0000', '3/0000/'])
listing = broker.list_objects_iter(2, None, None, None, None, '3')
self.assertEqual(len(listing), 2)
self.assertEqual([row[0] for row in listing], ['3/0000', '3/0001'])
def test_reverse_prefix_delim(self):
expectations = [
{
'objects': [
'topdir1/subdir1.0/obj1',
'topdir1/subdir1.1/obj1',
'topdir1/subdir1/obj1',
],
'params': {
'prefix': 'topdir1/',
'delimiter': '/',
},
'expected': [
'topdir1/subdir1.0/',
'topdir1/subdir1.1/',
'topdir1/subdir1/',
],
},
{
'objects': [
'topdir1/subdir1.0/obj1',
'topdir1/subdir1.1/obj1',
'topdir1/subdir1/obj1',
'topdir1/subdir10',
'topdir1/subdir10/obj1',
],
'params': {
'prefix': 'topdir1/',
'delimiter': '/',
},
'expected': [
'topdir1/subdir1.0/',
'topdir1/subdir1.1/',
'topdir1/subdir1/',
'topdir1/subdir10',
'topdir1/subdir10/',
],
},
{
'objects': [
'topdir1/subdir1/obj1',
'topdir1/subdir1.0/obj1',
'topdir1/subdir1.1/obj1',
],
'params': {
'prefix': 'topdir1/',
'delimiter': '/',
'reverse': True,
},
'expected': [
'topdir1/subdir1/',
'topdir1/subdir1.1/',
'topdir1/subdir1.0/',
],
},
{
'objects': [
'topdir1/subdir10/obj1',
'topdir1/subdir10',
'topdir1/subdir1/obj1',
'topdir1/subdir1.0/obj1',
'topdir1/subdir1.1/obj1',
],
'params': {
'prefix': 'topdir1/',
'delimiter': '/',
'reverse': True,
},
'expected': [
'topdir1/subdir10/',
'topdir1/subdir10',
'topdir1/subdir1/',
'topdir1/subdir1.1/',
'topdir1/subdir1.0/',
],
},
{
'objects': [
'1',
'2',
'3/1',
'3/2.2',
'3/2/1',
'3/2/2',
'3/3',
'4',
],
'params': {
'path': '3/',
},
'expected': [
'3/1',
'3/2.2',
'3/3',
],
},
{
'objects': [
'1',
'2',
'3/1',
'3/2.2',
'3/2/1',
'3/2/2',
'3/3',
'4',
],
'params': {
'path': '3/',
'reverse': True,
},
'expected': [
'3/3',
'3/2.2',
'3/1',
],
},
]
ts = make_timestamp_iter()
default_listing_params = {
'limit': 10000,
'marker': '',
'end_marker': None,
'prefix': None,
'delimiter': None,
}
obj_create_params = {
'size': 0,
'content_type': 'application/test',
'etag': EMPTY_ETAG,
}
failures = []
for expected in expectations:
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(next(ts).internal, 0)
for name in expected['objects']:
broker.put_object(name, next(ts).internal, **obj_create_params)
params = default_listing_params.copy()
params.update(expected['params'])
listing = list(o[0] for o in broker.list_objects_iter(**params))
if listing != expected['expected']:
expected['listing'] = listing
failures.append(
"With objects %(objects)r, the params %(params)r "
"produced %(listing)r instead of %(expected)r" % expected)
self.assertFalse(failures, "Found the following failures:\n%s" %
'\n'.join(failures))
def test_list_objects_iter_non_slash(self):
# Test ContainerBroker.list_objects_iter using a
# delimiter that is not a slash
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
for obj1 in range(4):
for obj2 in range(125):
broker.put_object('%d:%04d' % (obj1, obj2),
Timestamp(time()).internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
for obj in range(125):
broker.put_object('2:0051:%04d' % obj,
Timestamp(time()).internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
for obj in range(125):
broker.put_object('3:%04d:0049' % obj,
Timestamp(time()).internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
listing = broker.list_objects_iter(100, '', None, None, '')
self.assertEqual(len(listing), 100)
self.assertEqual(listing[0][0], '0:0000')
self.assertEqual(listing[-1][0], '0:0099')
listing = broker.list_objects_iter(100, '', '0:0050', None, '')
self.assertEqual(len(listing), 50)
self.assertEqual(listing[0][0], '0:0000')
self.assertEqual(listing[-1][0], '0:0049')
listing = broker.list_objects_iter(100, '0:0099', None, None, '')
self.assertEqual(len(listing), 100)
self.assertEqual(listing[0][0], '0:0100')
self.assertEqual(listing[-1][0], '1:0074')
listing = broker.list_objects_iter(55, '1:0074', None, None, '')
self.assertEqual(len(listing), 55)
self.assertEqual(listing[0][0], '1:0075')
self.assertEqual(listing[-1][0], '2:0004')
listing = broker.list_objects_iter(10, '', None, '0:01', '')
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '0:0100')
self.assertEqual(listing[-1][0], '0:0109')
listing = broker.list_objects_iter(10, '', None, '0:', ':')
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '0:0000')
self.assertEqual(listing[-1][0], '0:0009')
# Same as above, but using the path argument, so nothing should be
# returned since path uses a '/' as a delimiter.
listing = broker.list_objects_iter(10, '', None, None, '', '0')
self.assertEqual(len(listing), 0)
listing = broker.list_objects_iter(10, '', None, '', ':')
self.assertEqual(len(listing), 4)
self.assertEqual([row[0] for row in listing],
['0:', '1:', '2:', '3:'])
listing = broker.list_objects_iter(10, '2', None, None, ':')
self.assertEqual(len(listing), 2)
self.assertEqual([row[0] for row in listing], ['2:', '3:'])
listing = broker.list_objects_iter(10, '2:', None, None, ':')
self.assertEqual(len(listing), 1)
self.assertEqual([row[0] for row in listing], ['3:'])
listing = broker.list_objects_iter(10, '2:0050', None, '2:', ':')
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '2:0051')
self.assertEqual(listing[1][0], '2:0051:')
self.assertEqual(listing[2][0], '2:0052')
self.assertEqual(listing[-1][0], '2:0059')
listing = broker.list_objects_iter(10, '3:0045', None, '3:', ':')
self.assertEqual(len(listing), 10)
self.assertEqual([row[0] for row in listing],
['3:0045:', '3:0046', '3:0046:', '3:0047',
'3:0047:', '3:0048', '3:0048:', '3:0049',
'3:0049:', '3:0050'])
broker.put_object('3:0049:', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
listing = broker.list_objects_iter(10, '3:0048', None, None, None)
self.assertEqual(len(listing), 10)
self.assertEqual(
[row[0] for row in listing],
['3:0048:0049', '3:0049', '3:0049:',
'3:0049:0049', '3:0050', '3:0050:0049', '3:0051', '3:0051:0049',
'3:0052', '3:0052:0049'])
listing = broker.list_objects_iter(10, '3:0048', None, '3:', ':')
self.assertEqual(len(listing), 10)
self.assertEqual(
[row[0] for row in listing],
['3:0048:', '3:0049', '3:0049:', '3:0050',
'3:0050:', '3:0051', '3:0051:', '3:0052', '3:0052:', '3:0053'])
listing = broker.list_objects_iter(10, None, None, '3:0049:', ':')
self.assertEqual(len(listing), 2)
self.assertEqual(
[row[0] for row in listing],
['3:0049:', '3:0049:0049'])
# Same as above, but using the path argument, so nothing should be
# returned since path uses a '/' as a delimiter.
listing = broker.list_objects_iter(10, None, None, None, None,
'3:0049')
self.assertEqual(len(listing), 0)
listing = broker.list_objects_iter(2, None, None, '3:', ':')
self.assertEqual(len(listing), 2)
self.assertEqual([row[0] for row in listing], ['3:0000', '3:0000:'])
listing = broker.list_objects_iter(2, None, None, None, None, '3')
self.assertEqual(len(listing), 0)
def test_list_objects_iter_prefix_delim(self):
# Test ContainerBroker.list_objects_iter
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
broker.put_object(
'/pets/dogs/1', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object(
'/pets/dogs/2', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object(
'/pets/fish/a', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object(
'/pets/fish/b', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object(
'/pets/fish_info.txt', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object(
'/snakes', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
# def list_objects_iter(self, limit, marker, prefix, delimiter,
# path=None, format=None):
listing = broker.list_objects_iter(100, None, None, '/pets/f', '/')
self.assertEqual([row[0] for row in listing],
['/pets/fish/', '/pets/fish_info.txt'])
listing = broker.list_objects_iter(100, None, None, '/pets/fish', '/')
self.assertEqual([row[0] for row in listing],
['/pets/fish/', '/pets/fish_info.txt'])
listing = broker.list_objects_iter(100, None, None, '/pets/fish/', '/')
self.assertEqual([row[0] for row in listing],
['/pets/fish/a', '/pets/fish/b'])
def test_list_objects_iter_order_and_reverse(self):
# Test ContainerBroker.list_objects_iter
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
broker.put_object(
'o1', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object(
'o10', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object(
'O1', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object(
'o2', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object(
'o3', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object(
'O4', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
listing = broker.list_objects_iter(100, None, None, '', '',
reverse=False)
self.assertEqual([row[0] for row in listing],
['O1', 'O4', 'o1', 'o10', 'o2', 'o3'])
listing = broker.list_objects_iter(100, None, None, '', '',
reverse=True)
self.assertEqual([row[0] for row in listing],
['o3', 'o2', 'o10', 'o1', 'O4', 'O1'])
listing = broker.list_objects_iter(2, None, None, '', '',
reverse=True)
self.assertEqual([row[0] for row in listing],
['o3', 'o2'])
listing = broker.list_objects_iter(100, 'o2', 'O4', '', '',
reverse=True)
self.assertEqual([row[0] for row in listing],
['o10', 'o1'])
def test_double_check_trailing_delimiter(self):
# Test ContainerBroker.list_objects_iter for a
# container that has an odd file with a trailing delimiter
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
broker.put_object('a', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/a', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/a/a', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/a/b', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/b', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b/a', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b/b', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('c', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/0', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0/', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('00', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0/0', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0/00', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0/1', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0/1/', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0/1/0', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('1', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('1/', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('1/0', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
listing = broker.list_objects_iter(25, None, None, None, None)
self.assertEqual(len(listing), 22)
self.assertEqual(
[row[0] for row in listing],
['0', '0/', '0/0', '0/00', '0/1', '0/1/', '0/1/0', '00', '1', '1/',
'1/0', 'a', 'a/', 'a/0', 'a/a', 'a/a/a', 'a/a/b', 'a/b', 'b',
'b/a', 'b/b', 'c'])
listing = broker.list_objects_iter(25, None, None, '', '/')
self.assertEqual(len(listing), 10)
self.assertEqual(
[row[0] for row in listing],
['0', '0/', '00', '1', '1/', 'a', 'a/', 'b', 'b/', 'c'])
listing = broker.list_objects_iter(25, None, None, 'a/', '/')
self.assertEqual(len(listing), 5)
self.assertEqual(
[row[0] for row in listing],
['a/', 'a/0', 'a/a', 'a/a/', 'a/b'])
listing = broker.list_objects_iter(25, None, None, '0/', '/')
self.assertEqual(len(listing), 5)
self.assertEqual(
[row[0] for row in listing],
['0/', '0/0', '0/00', '0/1', '0/1/'])
listing = broker.list_objects_iter(25, None, None, '0/1/', '/')
self.assertEqual(len(listing), 2)
self.assertEqual(
[row[0] for row in listing],
['0/1/', '0/1/0'])
listing = broker.list_objects_iter(25, None, None, 'b/', '/')
self.assertEqual(len(listing), 2)
self.assertEqual([row[0] for row in listing], ['b/a', 'b/b'])
def test_double_check_trailing_delimiter_non_slash(self):
# Test ContainerBroker.list_objects_iter for a
# container that has an odd file with a trailing delimiter
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
broker.put_object('a', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a:', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a:a', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a:a:a', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a:a:b', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a:b', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b:a', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b:b', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('c', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a:0', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0:', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('00', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0:0', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0:00', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0:1', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0:1:', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0:1:0', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('1', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('1:', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('1:0', Timestamp(time()).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
listing = broker.list_objects_iter(25, None, None, None, None)
self.assertEqual(len(listing), 22)
self.assertEqual(
[row[0] for row in listing],
['0', '00', '0:', '0:0', '0:00', '0:1', '0:1:', '0:1:0', '1', '1:',
'1:0', 'a', 'a:', 'a:0', 'a:a', 'a:a:a', 'a:a:b', 'a:b', 'b',
'b:a', 'b:b', 'c'])
listing = broker.list_objects_iter(25, None, None, '', ':')
self.assertEqual(len(listing), 10)
self.assertEqual(
[row[0] for row in listing],
['0', '00', '0:', '1', '1:', 'a', 'a:', 'b', 'b:', 'c'])
listing = broker.list_objects_iter(25, None, None, 'a:', ':')
self.assertEqual(len(listing), 5)
self.assertEqual(
[row[0] for row in listing],
['a:', 'a:0', 'a:a', 'a:a:', 'a:b'])
listing = broker.list_objects_iter(25, None, None, '0:', ':')
self.assertEqual(len(listing), 5)
self.assertEqual(
[row[0] for row in listing],
['0:', '0:0', '0:00', '0:1', '0:1:'])
listing = broker.list_objects_iter(25, None, None, '0:1:', ':')
self.assertEqual(len(listing), 2)
self.assertEqual(
[row[0] for row in listing],
['0:1:', '0:1:0'])
listing = broker.list_objects_iter(25, None, None, 'b:', ':')
self.assertEqual(len(listing), 2)
self.assertEqual([row[0] for row in listing], ['b:a', 'b:b'])
def test_chexor(self):
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
broker.put_object('a', Timestamp(1).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b', Timestamp(2).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
hasha = hashlib.md5('%s-%s' % ('a', Timestamp(1).internal)).digest()
hashb = hashlib.md5('%s-%s' % ('b', Timestamp(2).internal)).digest()
hashc = ''.join(
('%02x' % (ord(a) ^ ord(b)) for a, b in zip(hasha, hashb)))
self.assertEqual(broker.get_info()['hash'], hashc)
broker.put_object('b', Timestamp(3).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
hashb = hashlib.md5('%s-%s' % ('b', Timestamp(3).internal)).digest()
hashc = ''.join(
('%02x' % (ord(a) ^ ord(b)) for a, b in zip(hasha, hashb)))
self.assertEqual(broker.get_info()['hash'], hashc)
def test_newid(self):
# test DatabaseBroker.newid
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
id = broker.get_info()['id']
broker.newid('someid')
self.assertNotEqual(id, broker.get_info()['id'])
def test_get_items_since(self):
# test DatabaseBroker.get_items_since
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
broker.put_object('a', Timestamp(1).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
max_row = broker.get_replication_info()['max_row']
broker.put_object('b', Timestamp(2).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
items = broker.get_items_since(max_row, 1000)
self.assertEqual(len(items), 1)
self.assertEqual(items[0]['name'], 'b')
def test_sync_merging(self):
# exercise the DatabaseBroker sync functions a bit
broker1 = ContainerBroker(':memory:', account='a', container='c')
broker1.initialize(Timestamp('1').internal, 0)
broker2 = ContainerBroker(':memory:', account='a', container='c')
broker2.initialize(Timestamp('1').internal, 0)
self.assertEqual(broker2.get_sync('12345'), -1)
broker1.merge_syncs([{'sync_point': 3, 'remote_id': '12345'}])
broker2.merge_syncs(broker1.get_syncs())
self.assertEqual(broker2.get_sync('12345'), 3)
def test_merge_items(self):
broker1 = ContainerBroker(':memory:', account='a', container='c')
broker1.initialize(Timestamp('1').internal, 0)
broker2 = ContainerBroker(':memory:', account='a', container='c')
broker2.initialize(Timestamp('1').internal, 0)
broker1.put_object('a', Timestamp(1).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker1.put_object('b', Timestamp(2).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
id = broker1.get_info()['id']
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEqual(len(items), 2)
self.assertEqual(['a', 'b'], sorted([rec['name'] for rec in items]))
broker1.put_object('c', Timestamp(3).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEqual(len(items), 3)
self.assertEqual(['a', 'b', 'c'],
sorted([rec['name'] for rec in items]))
def test_merge_items_overwrite_unicode(self):
# test DatabaseBroker.merge_items
snowman = u'\N{SNOWMAN}'.encode('utf-8')
broker1 = ContainerBroker(':memory:', account='a', container='c')
broker1.initialize(Timestamp('1').internal, 0)
id = broker1.get_info()['id']
broker2 = ContainerBroker(':memory:', account='a', container='c')
broker2.initialize(Timestamp('1').internal, 0)
broker1.put_object(snowman, Timestamp(2).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker1.put_object('b', Timestamp(3).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker2.merge_items(json.loads(json.dumps(broker1.get_items_since(
broker2.get_sync(id), 1000))), id)
broker1.put_object(snowman, Timestamp(4).internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
broker2.merge_items(json.loads(json.dumps(broker1.get_items_since(
broker2.get_sync(id), 1000))), id)
items = broker2.get_items_since(-1, 1000)
self.assertEqual(['b', snowman],
sorted([rec['name'] for rec in items]))
for rec in items:
if rec['name'] == snowman:
self.assertEqual(rec['created_at'], Timestamp(4).internal)
if rec['name'] == 'b':
self.assertEqual(rec['created_at'], Timestamp(3).internal)
def test_merge_items_overwrite(self):
# test DatabaseBroker.merge_items
broker1 = ContainerBroker(':memory:', account='a', container='c')
broker1.initialize(Timestamp('1').internal, 0)
id = broker1.get_info()['id']
broker2 = ContainerBroker(':memory:', account='a', container='c')
broker2.initialize(Timestamp('1').internal, 0)
broker1.put_object('a', Timestamp(2).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker1.put_object('b', Timestamp(3).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
broker1.put_object('a', Timestamp(4).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEqual(['a', 'b'], sorted([rec['name'] for rec in items]))
for rec in items:
if rec['name'] == 'a':
self.assertEqual(rec['created_at'], Timestamp(4).internal)
if rec['name'] == 'b':
self.assertEqual(rec['created_at'], Timestamp(3).internal)
def test_merge_items_post_overwrite_out_of_order(self):
# test DatabaseBroker.merge_items
broker1 = ContainerBroker(':memory:', account='a', container='c')
broker1.initialize(Timestamp('1').internal, 0)
id = broker1.get_info()['id']
broker2 = ContainerBroker(':memory:', account='a', container='c')
broker2.initialize(Timestamp('1').internal, 0)
broker1.put_object('a', Timestamp(2).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker1.put_object('b', Timestamp(3).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
broker1.put_object('a', Timestamp(4).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEqual(['a', 'b'], sorted([rec['name'] for rec in items]))
for rec in items:
if rec['name'] == 'a':
self.assertEqual(rec['created_at'], Timestamp(4).internal)
if rec['name'] == 'b':
self.assertEqual(rec['created_at'], Timestamp(3).internal)
self.assertEqual(rec['content_type'], 'text/plain')
items = broker2.get_items_since(-1, 1000)
self.assertEqual(['a', 'b'], sorted([rec['name'] for rec in items]))
for rec in items:
if rec['name'] == 'a':
self.assertEqual(rec['created_at'], Timestamp(4).internal)
if rec['name'] == 'b':
self.assertEqual(rec['created_at'], Timestamp(3).internal)
broker1.put_object('b', Timestamp(5).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEqual(['a', 'b'], sorted([rec['name'] for rec in items]))
for rec in items:
if rec['name'] == 'a':
self.assertEqual(rec['created_at'], Timestamp(4).internal)
if rec['name'] == 'b':
self.assertEqual(rec['created_at'], Timestamp(5).internal)
self.assertEqual(rec['content_type'], 'text/plain')
def test_set_storage_policy_index(self):
ts = (Timestamp(t).internal for t in
itertools.count(int(time())))
broker = ContainerBroker(':memory:', account='test_account',
container='test_container')
timestamp = next(ts)
broker.initialize(timestamp, 0)
info = broker.get_info()
self.assertEqual(0, info['storage_policy_index']) # sanity check
self.assertEqual(0, info['object_count'])
self.assertEqual(0, info['bytes_used'])
if self.__class__ in (TestContainerBrokerBeforeMetadata,
TestContainerBrokerBeforeXSync,
TestContainerBrokerBeforeSPI):
self.assertEqual(info['status_changed_at'], '0')
else:
self.assertEqual(timestamp, info['status_changed_at'])
expected = {0: {'object_count': 0, 'bytes_used': 0}}
self.assertEqual(expected, broker.get_policy_stats())
timestamp = next(ts)
broker.set_storage_policy_index(111, timestamp)
self.assertEqual(broker.storage_policy_index, 111)
info = broker.get_info()
self.assertEqual(111, info['storage_policy_index'])
self.assertEqual(0, info['object_count'])
self.assertEqual(0, info['bytes_used'])
self.assertEqual(timestamp, info['status_changed_at'])
expected[111] = {'object_count': 0, 'bytes_used': 0}
self.assertEqual(expected, broker.get_policy_stats())
timestamp = next(ts)
broker.set_storage_policy_index(222, timestamp)
self.assertEqual(broker.storage_policy_index, 222)
info = broker.get_info()
self.assertEqual(222, info['storage_policy_index'])
self.assertEqual(0, info['object_count'])
self.assertEqual(0, info['bytes_used'])
self.assertEqual(timestamp, info['status_changed_at'])
expected[222] = {'object_count': 0, 'bytes_used': 0}
self.assertEqual(expected, broker.get_policy_stats())
old_timestamp, timestamp = timestamp, next(ts)
broker.set_storage_policy_index(222, timestamp) # it's idempotent
info = broker.get_info()
self.assertEqual(222, info['storage_policy_index'])
self.assertEqual(0, info['object_count'])
self.assertEqual(0, info['bytes_used'])
self.assertEqual(old_timestamp, info['status_changed_at'])
self.assertEqual(expected, broker.get_policy_stats())
def test_set_storage_policy_index_empty(self):
# Putting an object may trigger migrations, so test with a
# never-had-an-object container to make sure we handle it
broker = ContainerBroker(':memory:', account='test_account',
container='test_container')
broker.initialize(Timestamp('1').internal, 0)
info = broker.get_info()
self.assertEqual(0, info['storage_policy_index'])
broker.set_storage_policy_index(2)
info = broker.get_info()
self.assertEqual(2, info['storage_policy_index'])
def test_reconciler_sync(self):
broker = ContainerBroker(':memory:', account='test_account',
container='test_container')
broker.initialize(Timestamp('1').internal, 0)
self.assertEqual(-1, broker.get_reconciler_sync())
broker.update_reconciler_sync(10)
self.assertEqual(10, broker.get_reconciler_sync())
@with_tempdir
def test_legacy_pending_files(self, tempdir):
ts = (Timestamp(t).internal for t in
itertools.count(int(time())))
db_path = os.path.join(tempdir, 'container.db')
# first init an acct DB without the policy_stat table present
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(next(ts), 1)
# manually make some pending entries lacking storage_policy_index
with open(broker.pending_file, 'a+b') as fp:
for i in range(10):
name, timestamp, size, content_type, etag, deleted = (
'o%s' % i, next(ts), 0, 'c', 'e', 0)
fp.write(':')
fp.write(pickle.dumps(
(name, timestamp, size, content_type, etag, deleted),
protocol=2).encode('base64'))
fp.flush()
# use put_object to append some more entries with different
# values for storage_policy_index
for i in range(10, 30):
name = 'o%s' % i
if i < 20:
size = 1
storage_policy_index = 0
else:
size = 2
storage_policy_index = 1
broker.put_object(name, next(ts), size, 'c', 'e', 0,
storage_policy_index=storage_policy_index)
broker._commit_puts_stale_ok()
# 10 objects with 0 bytes each in the legacy pending entries
# 10 objects with 1 bytes each in storage policy 0
# 10 objects with 2 bytes each in storage policy 1
expected = {
0: {'object_count': 20, 'bytes_used': 10},
1: {'object_count': 10, 'bytes_used': 20},
}
self.assertEqual(broker.get_policy_stats(), expected)
class TestCommonContainerBroker(TestExampleBroker):
broker_class = ContainerBroker
def setUp(self):
super(TestCommonContainerBroker, self).setUp()
self.policy = random.choice(list(POLICIES))
def put_item(self, broker, timestamp):
broker.put_object('test', timestamp, 0, 'text/plain', 'x',
storage_policy_index=int(self.policy))
def delete_item(self, broker, timestamp):
broker.delete_object('test', timestamp,
storage_policy_index=int(self.policy))
class ContainerBrokerMigrationMixin(object):
"""
Mixin for running ContainerBroker against databases created with
older schemas.
"""
def setUp(self):
self._imported_create_object_table = \
ContainerBroker.create_object_table
ContainerBroker.create_object_table = \
prespi_create_object_table
self._imported_create_container_info_table = \
ContainerBroker.create_container_info_table
ContainerBroker.create_container_info_table = \
premetadata_create_container_info_table
self._imported_create_policy_stat_table = \
ContainerBroker.create_policy_stat_table
ContainerBroker.create_policy_stat_table = lambda *args: None
@classmethod
@contextmanager
def old_broker(cls):
cls.runTest = lambda *a, **k: None
case = cls()
case.setUp()
try:
yield ContainerBroker
finally:
case.tearDown()
def tearDown(self):
ContainerBroker.create_container_info_table = \
self._imported_create_container_info_table
ContainerBroker.create_object_table = \
self._imported_create_object_table
ContainerBroker.create_policy_stat_table = \
self._imported_create_policy_stat_table
def premetadata_create_container_info_table(self, conn, put_timestamp,
_spi=None):
"""
Copied from ContainerBroker before the metadata column was
added; used for testing with TestContainerBrokerBeforeMetadata.
Create the container_stat table which is specific to the container DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
if put_timestamp is None:
put_timestamp = Timestamp(0).internal
conn.executescript('''
CREATE TABLE container_stat (
account TEXT,
container TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
object_count INTEGER,
bytes_used INTEGER,
reported_put_timestamp TEXT DEFAULT '0',
reported_delete_timestamp TEXT DEFAULT '0',
reported_object_count INTEGER DEFAULT 0,
reported_bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0'
);
INSERT INTO container_stat (object_count, bytes_used)
VALUES (0, 0);
''')
conn.execute('''
UPDATE container_stat
SET account = ?, container = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, self.container, Timestamp(time()).internal,
str(uuid4()), put_timestamp))
class TestContainerBrokerBeforeMetadata(ContainerBrokerMigrationMixin,
TestContainerBroker):
"""
Tests for ContainerBroker against databases created before
the metadata column was added.
"""
def setUp(self):
super(TestContainerBrokerBeforeMetadata, self).setUp()
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
exc = None
with broker.get() as conn:
try:
conn.execute('SELECT metadata FROM container_stat')
except BaseException as err:
exc = err
self.assertTrue('no such column: metadata' in str(exc))
def tearDown(self):
super(TestContainerBrokerBeforeMetadata, self).tearDown()
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
with broker.get() as conn:
conn.execute('SELECT metadata FROM container_stat')
def prexsync_create_container_info_table(self, conn, put_timestamp,
_spi=None):
"""
Copied from ContainerBroker before the
x_container_sync_point[12] columns were added; used for testing with
TestContainerBrokerBeforeXSync.
Create the container_stat table which is specific to the container DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
if put_timestamp is None:
put_timestamp = Timestamp(0).internal
conn.executescript("""
CREATE TABLE container_stat (
account TEXT,
container TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
object_count INTEGER,
bytes_used INTEGER,
reported_put_timestamp TEXT DEFAULT '0',
reported_delete_timestamp TEXT DEFAULT '0',
reported_object_count INTEGER DEFAULT 0,
reported_bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0',
metadata TEXT DEFAULT ''
);
INSERT INTO container_stat (object_count, bytes_used)
VALUES (0, 0);
""")
conn.execute('''
UPDATE container_stat
SET account = ?, container = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, self.container, Timestamp(time()).internal,
str(uuid4()), put_timestamp))
class TestContainerBrokerBeforeXSync(ContainerBrokerMigrationMixin,
TestContainerBroker):
"""
Tests for ContainerBroker against databases created
before the x_container_sync_point[12] columns were added.
"""
def setUp(self):
super(TestContainerBrokerBeforeXSync, self).setUp()
ContainerBroker.create_container_info_table = \
prexsync_create_container_info_table
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
exc = None
with broker.get() as conn:
try:
conn.execute('''SELECT x_container_sync_point1
FROM container_stat''')
except BaseException as err:
exc = err
self.assertTrue('no such column: x_container_sync_point1' in str(exc))
def tearDown(self):
super(TestContainerBrokerBeforeXSync, self).tearDown()
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
with broker.get() as conn:
conn.execute('SELECT x_container_sync_point1 FROM container_stat')
def prespi_create_object_table(self, conn, *args, **kwargs):
conn.executescript("""
CREATE TABLE object (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
created_at TEXT,
size INTEGER,
content_type TEXT,
etag TEXT,
deleted INTEGER DEFAULT 0
);
CREATE INDEX ix_object_deleted_name ON object (deleted, name);
CREATE TRIGGER object_insert AFTER INSERT ON object
BEGIN
UPDATE container_stat
SET object_count = object_count + (1 - new.deleted),
bytes_used = bytes_used + new.size,
hash = chexor(hash, new.name, new.created_at);
END;
CREATE TRIGGER object_update BEFORE UPDATE ON object
BEGIN
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
END;
CREATE TRIGGER object_delete AFTER DELETE ON object
BEGIN
UPDATE container_stat
SET object_count = object_count - (1 - old.deleted),
bytes_used = bytes_used - old.size,
hash = chexor(hash, old.name, old.created_at);
END;
""")
def prespi_create_container_info_table(self, conn, put_timestamp,
_spi=None):
"""
Copied from ContainerBroker before the
storage_policy_index column was added; used for testing with
TestContainerBrokerBeforeSPI.
Create the container_stat table which is specific to the container DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
if put_timestamp is None:
put_timestamp = Timestamp(0).internal
conn.executescript("""
CREATE TABLE container_stat (
account TEXT,
container TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
object_count INTEGER,
bytes_used INTEGER,
reported_put_timestamp TEXT DEFAULT '0',
reported_delete_timestamp TEXT DEFAULT '0',
reported_object_count INTEGER DEFAULT 0,
reported_bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0',
metadata TEXT DEFAULT '',
x_container_sync_point1 INTEGER DEFAULT -1,
x_container_sync_point2 INTEGER DEFAULT -1
);
INSERT INTO container_stat (object_count, bytes_used)
VALUES (0, 0);
""")
conn.execute('''
UPDATE container_stat
SET account = ?, container = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, self.container, Timestamp(time()).internal,
str(uuid4()), put_timestamp))
class TestContainerBrokerBeforeSPI(ContainerBrokerMigrationMixin,
TestContainerBroker):
"""
Tests for ContainerBroker against databases created
before the storage_policy_index column was added.
"""
def setUp(self):
super(TestContainerBrokerBeforeSPI, self).setUp()
ContainerBroker.create_container_info_table = \
prespi_create_container_info_table
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
exc = None
with broker.get() as conn:
try:
conn.execute('''SELECT storage_policy_index
FROM container_stat''')
except BaseException as err:
exc = err
self.assertTrue('no such column: storage_policy_index' in str(exc))
def tearDown(self):
super(TestContainerBrokerBeforeSPI, self).tearDown()
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
with broker.get() as conn:
conn.execute('SELECT storage_policy_index FROM container_stat')
@patch_policies
@with_tempdir
def test_object_table_migration(self, tempdir):
db_path = os.path.join(tempdir, 'container.db')
# initialize an un-migrated database
broker = ContainerBroker(db_path, account='a', container='c')
put_timestamp = Timestamp(int(time())).internal
broker.initialize(put_timestamp, None)
with broker.get() as conn:
try:
conn.execute('''
SELECT storage_policy_index FROM object
''').fetchone()[0]
except sqlite3.OperationalError as err:
# confirm that the table doesn't have this column
self.assertTrue('no such column: storage_policy_index' in
str(err))
else:
self.fail('broker did not raise sqlite3.OperationalError '
'trying to select from storage_policy_index '
'from object table!')
# manually insert an existing row to avoid automatic migration
obj_put_timestamp = Timestamp(time()).internal
with broker.get() as conn:
conn.execute('''
INSERT INTO object (name, created_at, size,
content_type, etag, deleted)
VALUES (?, ?, ?, ?, ?, ?)
''', ('test_name', obj_put_timestamp, 123,
'text/plain', '8f4c680e75ca4c81dc1917ddab0a0b5c', 0))
conn.commit()
# make sure we can iter objects without performing migration
for o in broker.list_objects_iter(1, None, None, None, None):
self.assertEqual(o, ('test_name', obj_put_timestamp, 123,
'text/plain',
'8f4c680e75ca4c81dc1917ddab0a0b5c'))
# get_info
info = broker.get_info()
expected = {
'account': 'a',
'container': 'c',
'put_timestamp': put_timestamp,
'delete_timestamp': '0',
'status_changed_at': '0',
'bytes_used': 123,
'object_count': 1,
'reported_put_timestamp': '0',
'reported_delete_timestamp': '0',
'reported_object_count': 0,
'reported_bytes_used': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1,
'storage_policy_index': 0,
}
for k, v in expected.items():
self.assertEqual(info[k], v,
'The value for %s was %r not %r' % (
k, info[k], v))
self.assertTrue(
Timestamp(info['created_at']) > Timestamp(put_timestamp))
self.assertNotEqual(int(info['hash'], 16), 0)
orig_hash = info['hash']
# get_replication_info
info = broker.get_replication_info()
# translate object count for replicators
expected['count'] = expected.pop('object_count')
for k, v in expected.items():
self.assertEqual(info[k], v)
self.assertTrue(
Timestamp(info['created_at']) > Timestamp(put_timestamp))
self.assertEqual(info['hash'], orig_hash)
self.assertEqual(info['max_row'], 1)
self.assertEqual(info['metadata'], '')
# get_policy_stats
info = broker.get_policy_stats()
expected = {
0: {'bytes_used': 123, 'object_count': 1}
}
self.assertEqual(info, expected)
# empty & is_deleted
self.assertEqual(broker.empty(), False)
self.assertEqual(broker.is_deleted(), False)
# no migrations have occurred yet
# container_stat table
with broker.get() as conn:
try:
conn.execute('''
SELECT storage_policy_index FROM container_stat
''').fetchone()[0]
except sqlite3.OperationalError as err:
# confirm that the table doesn't have this column
self.assertTrue('no such column: storage_policy_index' in
str(err))
else:
self.fail('broker did not raise sqlite3.OperationalError '
'trying to select from storage_policy_index '
'from container_stat table!')
# object table
with broker.get() as conn:
try:
conn.execute('''
SELECT storage_policy_index FROM object
''').fetchone()[0]
except sqlite3.OperationalError as err:
# confirm that the table doesn't have this column
self.assertTrue('no such column: storage_policy_index' in
str(err))
else:
self.fail('broker did not raise sqlite3.OperationalError '
'trying to select from storage_policy_index '
'from object table!')
# policy_stat table
with broker.get() as conn:
try:
conn.execute('''
SELECT storage_policy_index FROM policy_stat
''').fetchone()[0]
except sqlite3.OperationalError as err:
# confirm that the table does not exist yet
self.assertTrue('no such table: policy_stat' in str(err))
else:
self.fail('broker did not raise sqlite3.OperationalError '
'trying to select from storage_policy_index '
'from policy_stat table!')
# now do a PUT with a different value for storage_policy_index
# which will update the DB schema as well as update policy_stats
# for legacy objects in the DB (those without an SPI)
second_object_put_timestamp = Timestamp(time()).internal
other_policy = [p for p in POLICIES if p.idx != 0][0]
broker.put_object('test_second', second_object_put_timestamp,
456, 'text/plain',
'cbac50c175793513fa3c581551c876ab',
storage_policy_index=other_policy.idx)
broker._commit_puts_stale_ok()
# we are fully migrated and both objects have their
# storage_policy_index
with broker.get() as conn:
storage_policy_index = conn.execute('''
SELECT storage_policy_index FROM container_stat
''').fetchone()[0]
self.assertEqual(storage_policy_index, 0)
rows = conn.execute('''
SELECT name, storage_policy_index FROM object
''').fetchall()
for row in rows:
if row[0] == 'test_name':
self.assertEqual(row[1], 0)
else:
self.assertEqual(row[1], other_policy.idx)
# and all stats tracking is in place
stats = broker.get_policy_stats()
self.assertEqual(len(stats), 2)
self.assertEqual(stats[0]['object_count'], 1)
self.assertEqual(stats[0]['bytes_used'], 123)
self.assertEqual(stats[other_policy.idx]['object_count'], 1)
self.assertEqual(stats[other_policy.idx]['bytes_used'], 456)
# get info still reports on the legacy storage policy
info = broker.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 123)
# unless you change the storage policy
broker.set_storage_policy_index(other_policy.idx)
info = broker.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 456)
| 45.081839
| 79
| 0.565021
|
283f0601687abbce36dfd840691216f850c7c99e
| 8,923
|
py
|
Python
|
bcs-ui/backend/templatesets/var_mgmt/serializers.py
|
masanqi/bk-bcs
|
70d97b674fbd5beacde21d6ca8be914d7eb56865
|
[
"Apache-2.0"
] | 599
|
2019-06-25T03:20:46.000Z
|
2022-03-31T12:14:33.000Z
|
bcs-ui/backend/templatesets/var_mgmt/serializers.py
|
masanqi/bk-bcs
|
70d97b674fbd5beacde21d6ca8be914d7eb56865
|
[
"Apache-2.0"
] | 537
|
2019-06-27T06:03:44.000Z
|
2022-03-31T12:10:01.000Z
|
bcs-ui/backend/templatesets/var_mgmt/serializers.py
|
masanqi/bk-bcs
|
70d97b674fbd5beacde21d6ca8be914d7eb56865
|
[
"Apache-2.0"
] | 214
|
2019-06-25T03:26:05.000Z
|
2022-03-31T07:52:03.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
import re
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from backend.container_service.clusters.base.utils import get_clusters
from backend.resources.namespace.utils import get_namespaces
from backend.templatesets.legacy_apps.configuration.constants import VARIABLE_PATTERN
from ..legacy_apps.instance.serializers import InstanceNamespaceSLZ
from .constants import VariableCategory, VariableScope
from .models import Variable
from .utils import get_variable_quote_num
logger = logging.getLogger(__name__)
RE_KEY = re.compile(r'^%s{0,63}$' % VARIABLE_PATTERN)
SYS_KEYS = [
'SYS_BCS_ZK',
'SYS_CC_ZK',
'SYS_BCSGROUP',
'SYS_TEMPLATE_ID',
'SYS_VERSION_ID',
'SYS_VERSION',
'SYS_INSTANCE_ID',
'SYS_CREATOR',
'SYS_UPDATOR',
'SYS_OPERATOR',
'SYS_CREATE_TIME',
'SYS_UPDATE_TIME',
]
class SearchVariableSLZ(serializers.Serializer):
type = serializers.CharField(default='with_quote_num')
scope = serializers.CharField(default='')
search_key = serializers.CharField(default='')
limit = serializers.IntegerField(default=10)
offset = serializers.IntegerField(default=0)
class ListVariableSLZ(serializers.ModelSerializer):
default = serializers.DictField(source='get_default_data')
quote_num = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
category_name = serializers.SerializerMethodField()
scope_name = serializers.SerializerMethodField()
class Meta:
model = Variable
fields = (
'id',
'name',
'key',
'default',
'default_value',
'desc',
'category',
'category_name',
'scope',
'scope_name',
'quote_num',
'creator',
'created',
'updated',
'updator',
)
def get_quote_num(self, obj):
search_type = self.context['search_type']
if search_type == 'base':
return 0
return get_variable_quote_num(obj.key, self.context['project_id'])
def get_name(self, obj):
return _(obj.name)
def get_category_name(self, obj):
return _(obj.get_category_display())
def get_scope_name(self, obj):
return _(obj.get_scope_display())
class VariableSLZ(serializers.ModelSerializer):
scope = serializers.ChoiceField(choices=VariableScope.get_choices(), required=True)
name = serializers.CharField(max_length=256, required=True)
key = serializers.RegexField(
RE_KEY, max_length=64, required=True, error_messages={'invalid': _("KEY 只能包含字母、数字和下划线,且以字母开头,最大长度为64个字符")}
)
default = serializers.JSONField(required=False)
desc = serializers.CharField(max_length=256, required=False, allow_blank=True)
project_id = serializers.CharField(max_length=64, required=True)
class Meta:
model = Variable
fields = ('id', 'name', 'key', 'default', 'desc', 'category', 'scope', 'project_id')
# TODO add validate_project_id
def validate_default(self, default):
if not isinstance(default, dict):
raise ValidationError(_("default字段非字典类型"))
if 'value' not in default:
raise ValidationError(_("default字段没有以value作为键值"))
return default
def validate_key(self, key):
if key in SYS_KEYS:
raise ValidationError('KEY[{}]{}'.format(key, _("为系统变量名,不允许添加")))
return key
def to_representation(self, instance):
instance.default = instance.get_default_data()
return super().to_representation(instance)
class CreateVariableSLZ(VariableSLZ):
def create(self, validated_data):
exists = Variable.objects.filter(key=validated_data['key'], project_id=validated_data['project_id']).exists()
if exists:
detail = {'field': ['{}KEY{}{}'.format(_("变量"), validated_data['key'], _("已经存在"))]}
raise ValidationError(detail=detail)
variable = Variable.objects.create(**validated_data)
return variable
class UpdateVariableSLZ(VariableSLZ):
def update(self, instance, validated_data):
if instance.category == VariableCategory.SYSTEM.value:
raise ValidationError(_("系统内置变量不允许操作"))
old_key = instance.key
new_key = validated_data.get('key')
if new_key != old_key:
if get_variable_quote_num(old_key, validated_data.get('project_id')) > 0:
raise ValidationError('KEY{}{}'.format(old_key, _("已经被引用,不能修改KEY")))
if Variable.objects.filter(key=new_key, project_id=validated_data['project_id']).exists():
detail = {'field': ['{}KEY{}{}'.format(_("变量"), validated_data['key'], _("已经存在"))]}
raise ValidationError(detail=detail)
instance.key = new_key
instance.scope = validated_data.get('scope')
instance.name = validated_data.get('name')
instance.default = validated_data.get('default')
instance.desc = validated_data.get('desc')
instance.updator = validated_data.get('updator')
instance.save()
return instance
class SearchVariableWithNamespaceSLZ(InstanceNamespaceSLZ):
namespaces = serializers.CharField(required=True)
def validate(self, data):
pass
def to_internal_value(self, data):
data = super().to_internal_value(data)
data['namespaces'] = data['namespaces'].split(',')
return data
class VariableDeleteSLZ(serializers.Serializer):
id_list = serializers.JSONField(required=True)
class ClusterVariableSLZ(serializers.Serializer):
cluster_vars = serializers.JSONField(required=True)
class NsVariableSLZ(serializers.Serializer):
ns_vars = serializers.JSONField(required=True)
class VariableItemSLZ(serializers.Serializer):
name = serializers.CharField(max_length=256, required=True)
key = serializers.RegexField(
RE_KEY, max_length=64, required=True, error_messages={'invalid': _("KEY 只能包含字母、数字和下划线,且以字母开头,最大长度为64个字符")}
)
value = serializers.CharField(required=True)
desc = serializers.CharField(default='')
scope = serializers.ChoiceField(choices=VariableScope.get_choices(), required=True)
vars = serializers.ListField(child=serializers.JSONField(), required=False)
class ImportVariableSLZ(serializers.Serializer):
variables = serializers.ListField(child=VariableItemSLZ(), min_length=1)
@cached_property
def clusters(self):
data = get_clusters(self.context['access_token'], self.context['project_id'])
if data:
return [c["cluster_id"] for c in data]
return []
@cached_property
def namespaces(self):
data = get_namespaces(self.context['access_token'], self.context['project_id'])
return {f"{n['cluster_id']}/{n['name']}": n['id'] for n in data}
def _validate_cluster_var(self, var):
for c_var in var['vars']:
cluster_id = c_var.get('cluster_id')
if cluster_id not in self.clusters:
raise ValidationError(_("集群变量中, 集群ID({})不存在").format(cluster_id))
if 'value' not in c_var:
raise ValidationError(_("集群变量中, 集群ID({})的value未设置").format(cluster_id))
def _validate_ns_var(self, var):
for n_var in var['vars']:
namespace = f"{n_var.get('cluster_id')}/{n_var.get('namespace')}"
ns_id = self.namespaces.get(namespace)
if not ns_id:
raise ValidationError(_("命名空间变量中, 命名空间({})不存在").format(namespace))
if 'value' not in n_var:
raise ValidationError(_("命名空间变量中, 命名空间({})的value未设置").format(namespace))
n_var['ns_id'] = ns_id
def validate(self, data):
for var in data['variables']:
if var['scope'] == VariableScope.CLUSTER.value:
self._validate_cluster_var(var)
if var['scope'] == VariableScope.NAMESPACE.value:
self._validate_ns_var(var)
return data
| 35.835341
| 117
| 0.674549
|
c9e13f55e7aebb97f9dd5efbae37478aed79a8de
| 1,466
|
py
|
Python
|
client/py_client/modules/funds/models/get_content_list.py
|
thefstock/FirstockPy
|
09b4dcf3470f83de991b43213958d2c6783f997b
|
[
"MIT"
] | 1
|
2022-03-29T06:56:06.000Z
|
2022-03-29T06:56:06.000Z
|
client/py_client/modules/funds/models/get_content_list.py
|
thefstock/FirstockPy
|
09b4dcf3470f83de991b43213958d2c6783f997b
|
[
"MIT"
] | 3
|
2022-01-17T09:31:21.000Z
|
2022-03-11T12:12:08.000Z
|
client/py_client/modules/funds/models/get_content_list.py
|
thefstock/FirstockPy
|
09b4dcf3470f83de991b43213958d2c6783f997b
|
[
"MIT"
] | null | null | null |
"""
Request and response models for get content list request
"""
from typing import Optional
from pydantic import BaseModel
from datetime import datetime
from ....common.enums import ResponseStatus
from ....utils.decoders import build_loader, datetime_decoder
__all__ = ['GetContentListRequestModel', 'GetContentListResponseModel']
class GetContentListRequestModel(BaseModel):
"""
The request model for get content list endpoint
"""
uid: str
"""The user id of the login user"""
exch: str
"""Exchange Name"""
condition_name: str
"""Condition list"""
basket: Optional[str]
"""Basket Name"""
class GetContentListResponseModel(BaseModel):
"""
The response model for get content list endpoint
"""
stat: ResponseStatus
"""The get content list success or failure status"""
request_time: Optional[datetime]
"""It will be present only on successful response."""
tsym: Optional[str]
"""Trading symbol"""
lp: Optional[float]
"""LTP"""
c: Optional[float]
"""Close price"""
h: Optional[float]
"""High price"""
l: Optional[float]
"""Low price"""
ap: Optional[float]
"""Average trade price"""
v: Optional[float]
"""Volume"""
ltt: Optional[str]
"""Last trade time"""
pc: Optional[float]
"""Percentage change """
emsg: Optional[str]
"""Error message if the request failed"""
class Config:
"""model configuration"""
json_loads = build_loader({
"request_time": datetime_decoder()
})
| 24.032787
| 71
| 0.689632
|
0fd7faf4acd714a0457a6d7eed52cf0da65a27ee
| 7,092
|
py
|
Python
|
designate_tempest_plugin/tests/api/v2/test_tld.py
|
mail2nsrajesh/designate-tempest-plugin
|
0e09e0f2deb80f88bc7c929478aa7e1dd1b72296
|
[
"Apache-2.0"
] | null | null | null |
designate_tempest_plugin/tests/api/v2/test_tld.py
|
mail2nsrajesh/designate-tempest-plugin
|
0e09e0f2deb80f88bc7c929478aa7e1dd1b72296
|
[
"Apache-2.0"
] | null | null | null |
designate_tempest_plugin/tests/api/v2/test_tld.py
|
mail2nsrajesh/designate-tempest-plugin
|
0e09e0f2deb80f88bc7c929478aa7e1dd1b72296
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from tempest.lib.common.utils import data_utils
from designate_tempest_plugin.tests import base
LOG = logging.getLogger(__name__)
class BaseTldTest(base.BaseDnsV2Test):
excluded_keys = ['created_at', 'updated_at', 'links']
class TldAdminTest(BaseTldTest):
credentials = ['admin']
@classmethod
def setup_clients(cls):
super(TldAdminTest, cls).setup_clients()
cls.admin_client = cls.os_adm.tld_client
@classmethod
def resource_setup(cls):
super(TldAdminTest, cls).resource_setup()
cls.tld = cls.admin_client.create_tld(tld_name='com',
ignore_errors=lib_exc.Conflict)
@decorators.idempotent_id('52a4bb4b-4eff-4591-9dd3-ad98316806c3')
def test_create_tld(self):
tld_data = {
"name": "org",
"description": "sample tld"}
LOG.info('Create a tld')
_, tld = self.admin_client.create_tld(tld_data['name'],
tld_data['description'])
self.addCleanup(self.admin_client.delete_tld, tld['id'])
self.assertEqual(tld_data["name"], tld['name'])
@decorators.idempotent_id('271af08c-2603-4f61-8eb1-05887b74e25a')
def test_show_tld(self):
tld_data = {
"name": "org",
"description": "sample tld"}
LOG.info('Create a tld')
_, tld = self.admin_client.create_tld(tld_data['name'],
tld_data['description'])
self.addCleanup(self.admin_client.delete_tld, tld['id'])
LOG.info('Fetch the tld')
_, body = self.admin_client.show_tld(tld['id'])
LOG.info('Ensure the fetched response matches the created tld')
self.assertExpected(tld, body, self.excluded_keys)
@decorators.idempotent_id('26708cb8-7126-48a7-9424-1c225e56e609')
def test_delete_tld(self):
LOG.info('Create a tld')
_, tld = self.admin_client.create_tld()
self.addCleanup(self.admin_client.delete_tld, tld['id'],
ignore_errors=lib_exc.NotFound)
LOG.info('Delete the tld')
_, body = self.admin_client.delete_tld(tld['id'])
self.assertRaises(lib_exc.NotFound,
lambda: self.admin_client.show_tld(tld['id']))
@decorators.idempotent_id('95b13759-c85c-4791-829b-9591ca15779d')
def test_list_tlds(self):
LOG.info('List tlds')
_, body = self.admin_client.list_tlds()
self.assertGreater(len(body['tlds']), 0)
@decorators.idempotent_id('1a233812-48d9-4d15-af5e-9961744286ff')
def test_update_tld(self):
_, tld = self.admin_client.create_tld()
self.addCleanup(self.admin_client.delete_tld, tld['id'])
tld_data = {
"name": "org",
"description": "Updated description"
}
LOG.info('Update the tld')
_, patch_tld = self.admin_client.update_tld(tld['id'],
tld_data['name'], tld_data['description'])
self.assertEqual(tld_data["name"], patch_tld["name"])
self.assertEqual(tld_data["description"], patch_tld["description"])
@decorators.idempotent_id('8116dcf5-a329-47d1-90be-5ff32f299c53')
def test_list_tlds_dot_json_fails(self):
uri = self.admin_client.get_uri('tlds.json')
self.assertRaises(lib_exc.NotFound,
lambda: self.admin_client.get(uri))
class TestTldNotFoundAdmin(BaseTldTest):
credentials = ["admin"]
@classmethod
def setup_clients(cls):
super(TestTldNotFoundAdmin, cls).setup_clients()
cls.admin_client = cls.os_adm.tld_client
@decorators.idempotent_id('b237d5ee-0d76-4294-a3b6-c2f8bf4b0e30')
def test_show_tld_404(self):
e = self.assertRaises(lib_exc.NotFound,
self.admin_client.show_tld,
data_utils.rand_uuid())
self.assertTld404(e.resp, e.resp_body)
@decorators.idempotent_id('3d128772-7f52-4473-b569-51ae8294667b')
def test_update_tld_404(self):
e = self.assertRaises(lib_exc.NotFound,
self.admin_client.update_tld,
data_utils.rand_uuid())
self.assertTld404(e.resp, e.resp_body)
@decorators.idempotent_id('18e465e7-5c7d-4775-acef-bd12a8db1095')
def test_delete_tld_404(self):
e = self.assertRaises(lib_exc.NotFound,
self.admin_client.delete_tld,
data_utils.rand_uuid())
self.assertTld404(e.resp, e.resp_body)
def assertTld404(self, resp, resp_body):
self.assertEqual(404, resp.status)
self.assertEqual(404, resp_body['code'])
self.assertEqual("tld_not_found", resp_body['type'])
self.assertEqual("Could not find Tld", resp_body['message'])
class TestTldInvalidIdAdmin(BaseTldTest):
credentials = ["admin"]
@classmethod
def setup_clients(cls):
super(TestTldInvalidIdAdmin, cls).setup_clients()
cls.admin_client = cls.os_adm.tld_client
@decorators.idempotent_id('f9ec0730-57ff-4720-8d06-e11d377c7cfc')
def test_show_tld_invalid_uuid(self):
e = self.assertRaises(lib_exc.BadRequest,
self.admin_client.show_tld,
'foo')
self.assertTldInvalidId(e.resp, e.resp_body)
@decorators.idempotent_id('13dc6518-b479-4502-90f5-f5a5ecc8b1fb')
def test_update_tld_invalid_uuid(self):
e = self.assertRaises(lib_exc.BadRequest,
self.admin_client.update_tld,
'foo')
self.assertTldInvalidId(e.resp, e.resp_body)
@decorators.idempotent_id('6a6fc9db-9a73-4ffc-831a-172e1cbc7394')
def test_delete_tld_invalid_uuid(self):
e = self.assertRaises(lib_exc.BadRequest,
self.admin_client.delete_tld,
'foo')
self.assertTldInvalidId(e.resp, e.resp_body)
def assertTldInvalidId(self, resp, resp_body):
self.assertEqual(400, resp.status)
self.assertEqual(400, resp_body['code'])
self.assertEqual("invalid_uuid", resp_body['type'])
self.assertEqual("Invalid UUID tld_id: foo",
resp_body['message'])
| 37.13089
| 78
| 0.635646
|
e61cd50038b7a2e0e8de6e219f203a5126911c70
| 5,492
|
py
|
Python
|
bot/cogs/site.py
|
crazygmr101/bot
|
93be87cea7cde7333042e2bb9529867723f567a7
|
[
"MIT"
] | null | null | null |
bot/cogs/site.py
|
crazygmr101/bot
|
93be87cea7cde7333042e2bb9529867723f567a7
|
[
"MIT"
] | null | null | null |
bot/cogs/site.py
|
crazygmr101/bot
|
93be87cea7cde7333042e2bb9529867723f567a7
|
[
"MIT"
] | null | null | null |
import logging
from discord import Colour, Embed
from discord.ext.commands import Cog, Context, group
from bot.bot import Bot
from bot.constants import URLs
from bot.pagination import LinePaginator
log = logging.getLogger(__name__)
PAGES_URL = f"{URLs.site_schema}{URLs.site}/pages"
class Site(Cog):
"""Commands for linking to different parts of the site."""
def __init__(self, bot: Bot):
self.bot = bot
@group(name="site", aliases=("s",), invoke_without_command=True)
async def site_group(self, ctx: Context) -> None:
"""Commands for getting info about our website."""
await ctx.send_help(ctx.command)
@site_group.command(name="home", aliases=("about",))
async def site_main(self, ctx: Context) -> None:
"""Info about the website itself."""
url = f"{URLs.site_schema}{URLs.site}/"
embed = Embed(title="Python Discord website")
embed.set_footer(text=url)
embed.colour = Colour.blurple()
embed.description = (
f"[Our official website]({url}) is an open-source community project "
"created with Python and Flask. It contains information about the server "
"itself, lets you sign up for upcoming events, has its own wiki, contains "
"a list of valuable learning resources, and much more."
)
await ctx.send(embed=embed)
@site_group.command(name="resources")
async def site_resources(self, ctx: Context) -> None:
"""Info about the site's Resources page."""
learning_url = f"{PAGES_URL}/resources"
embed = Embed(title="Resources")
embed.set_footer(text=f"{learning_url}")
embed.colour = Colour.blurple()
embed.description = (
f"The [Resources page]({learning_url}) on our website contains a "
"list of hand-selected learning resources that we regularly recommend "
f"to both beginners and experts."
)
await ctx.send(embed=embed)
@site_group.command(name="tools")
async def site_tools(self, ctx: Context) -> None:
"""Info about the site's Tools page."""
tools_url = f"{PAGES_URL}/resources/tools"
embed = Embed(title="Tools")
embed.set_footer(text=f"{tools_url}")
embed.colour = Colour.blurple()
embed.description = (
f"The [Tools page]({tools_url}) on our website contains a "
f"couple of the most popular tools for programming in Python."
)
await ctx.send(embed=embed)
@site_group.command(name="help")
async def site_help(self, ctx: Context) -> None:
"""Info about the site's Getting Help page."""
url = f"{PAGES_URL}/resources/guides/asking-good-questions"
embed = Embed(title="Asking Good Questions")
embed.set_footer(text=url)
embed.colour = Colour.blurple()
embed.description = (
"Asking the right question about something that's new to you can sometimes be tricky. "
f"To help with this, we've created a [guide to asking good questions]({url}) on our website. "
"It contains everything you need to get the very best help from our community."
)
await ctx.send(embed=embed)
@site_group.command(name="faq")
async def site_faq(self, ctx: Context) -> None:
"""Info about the site's FAQ page."""
url = f"{PAGES_URL}/frequently-asked-questions"
embed = Embed(title="FAQ")
embed.set_footer(text=url)
embed.colour = Colour.blurple()
embed.description = (
"As the largest Python community on Discord, we get hundreds of questions every day. "
"Many of these questions have been asked before. We've compiled a list of the most "
"frequently asked questions along with their answers, which can be found on "
f"our [FAQ page]({url})."
)
await ctx.send(embed=embed)
@site_group.command(aliases=['r', 'rule'], name='rules')
async def site_rules(self, ctx: Context, *rules: int) -> None:
"""Provides a link to all rules or, if specified, displays specific rule(s)."""
rules_embed = Embed(title='Rules', color=Colour.blurple())
rules_embed.url = f"{PAGES_URL}/rules"
if not rules:
# Rules were not submitted. Return the default description.
rules_embed.description = (
"The rules and guidelines that apply to this community can be found on"
f" our [rules page]({PAGES_URL}/rules). We expect"
" all members of the community to have read and understood these."
)
await ctx.send(embed=rules_embed)
return
full_rules = await self.bot.api_client.get('rules', params={'link_format': 'md'})
invalid_indices = tuple(
pick
for pick in rules
if pick < 1 or pick > len(full_rules)
)
if invalid_indices:
indices = ', '.join(map(str, invalid_indices))
await ctx.send(f":x: Invalid rule indices: {indices}")
return
for rule in rules:
self.bot.stats.incr(f"rule_uses.{rule}")
final_rules = tuple(f"**{pick}.** {full_rules[pick - 1]}" for pick in rules)
await LinePaginator.paginate(final_rules, ctx, rules_embed, max_lines=3)
def setup(bot: Bot) -> None:
"""Load the Site cog."""
bot.add_cog(Site(bot))
| 37.360544
| 106
| 0.619993
|
2b3713cab68dd487a6763f2553e1565f6b8346ee
| 14,862
|
py
|
Python
|
tracmass_scripts/export_tracmass_to_nc.py
|
oj-tooth/lt_toolbox
|
e6521fc0c243d1ff84599c418ae43271c6bd746c
|
[
"MIT"
] | 5
|
2021-02-03T09:42:47.000Z
|
2021-02-05T02:15:35.000Z
|
tracmass_scripts/export_tracmass_to_nc.py
|
oj-tooth/lt_toolbox
|
e6521fc0c243d1ff84599c418ae43271c6bd746c
|
[
"MIT"
] | 2
|
2020-12-15T12:20:30.000Z
|
2020-12-22T14:14:32.000Z
|
tracmass_scripts/export_tracmass_to_nc.py
|
oj-tooth/lt_toolbox
|
e6521fc0c243d1ff84599c418ae43271c6bd746c
|
[
"MIT"
] | null | null | null |
################################################################
# export_tracmass_to_nc.py
# --------------------------------------------------------------
# Description: Script to transform TRACMASS model
# output to standard NCEI_NetCDF_Trajectory format.
#
# User Input: Locations where user modification is required
# are indicated beneath NOTE statements in the script.
#
# --------------------------------------------------------------
# Date Created: 2021-01-03
#
# Author: Ollie Tooth
###############################################################
# Import packages.
import os
import numpy as np
import pandas as pd
import xarray as xr
from scipy import interpolate
from tqdm import tqdm
from export_utils import add_seed
# ---------------------------------------------------------------------------
# Stage 1:
# Opening the raw ouput files from TRACMASS.
# NOTE: change directory path to TRACMASS output data as required.
# os.chdir('OUTPUT_DIR_PATH')
os.chdir('/Users/ollietooth/Desktop/D.Phil./Tracmass/projects/NEMO/data/output/')
# Read Tracmass output_run.csv output file to pandas DataFrame
# with specified headers.
# NOTE: change the raw data file name and variable names to
# correspond with your run as required. 'FILENAME_run.csv'
df_run = pd.read_csv('ORCA1_output_run.csv',
names=[
'ntrac', # Trajectory no.
'x', # Position in zonal direction.
'y', # Position in meridional direction.
'z', # Position in the vertical direction.
'subvol', # Transport of particle.
'time_s', # Time since start of simulation (s).
'To_C', # Temperature (read - degrees C) .
'S_psu', # Salinity (read - g per kg).
'sigma0_kgm-3' # Density (computed - kg per m3).
])
# Read Tracmass output_out.csv output file to pandas DataFrame
# with specified headers.
# NOTE: change the raw data file name and variable names to
# correspond with your run as required. 'FILENAME_out.csv'
df_out = pd.read_csv('ORCA1_output_out.csv',
names=[
'ntrac', # Trajectory no.
'x', # Position in zonal direction.
'y', # Position in meridional direction.
'z', # Position in the vertical direction.
'subvol', # Transport of particle.
'time_s', # Time since start of simulation (s).
'To_C', # Temperature (read - degrees C) .
'S_psu', # Salinity (read - g per kg).
'sigma0_kgm-3' # Density (computed - kg per m3).
])
# Concantenate pandas DataFrames, df_run and df_out.
# Since indexes overlap, ignore index in concantenation.
df = pd.concat([df_run, df_out], ignore_index=True, sort=False)
df.drop_duplicates()
# Update user at command line.
print("Completed: Loading and Concantenating DataFrames.")
# ---------------------------------------------------------------------------
# Stage 2:
# Defining time and obs variables.
# Create new time column where time_s is stored in timedelta64 format.
# timedelta64 has units of nanoseconds.
df['time'] = pd.to_timedelta(df['time_s'], unit='s')
# NOTE: specify TRACMASS output time step for your simulation -
# modify unit to min/hours/days as required.
t_step = pd.Timedelta(30, unit='D').total_seconds()
# Create obs variable to store the observation no., equivalent
# to the time-level of output in the model.
df['obs'] = np.ceil((df['time_s']/t_step))
# Ensure obs variable is of in64 type.
df = df.astype({'obs': 'int64'})
# Update user at command line.
print("Completed: Added time variable.")
# ---------------------------------------------------------------------------
# Stage 3:
# Transform output variables to numpy arrays with dimensions traj x obs.
# Transform particle positions into (traj x obs) pandas DataFrames.
X = df.pivot(index='ntrac', columns='obs', values='x')
Y = df.pivot(index='ntrac', columns='obs', values='y')
Z = df.pivot(index='ntrac', columns='obs', values='z')
# Transform subvol into (traj x obs) pandas DataFrames.
Volume = df.pivot(index='ntrac', columns='obs', values='subvol')
# Transform tracers into (traj x obs) pandas DataFrames.
# NOTE: modify the number of tracers as required.
Temp = df.pivot(index='ntrac', columns='obs', values='To_C')
Sal = df.pivot(index='ntrac', columns='obs', values='S_psu')
Sigma0 = df.pivot(index='ntrac', columns='obs', values='sigma0_kgm-3')
# Transform time and ntrac into (traj x obs) pandas DataFrames.
Time = df.pivot(index='ntrac', columns='obs', values='time')
Traj = df.pivot(index='ntrac', columns='obs', values='ntrac')
# Update user at command line.
print("Completed: Pivoted output variables into (traj x obs) DataFrames.")
# ---------------------------------------------------------------------------
# Stage 4:
# Converting all of our pandas DataFrames to np arrays.
# Position arrays.
# The suffix _index is included to differentiate the position
# arrays of grid indexes from those of latitude, longitude and depth.
x_index = X.to_numpy()
y_index = Y.to_numpy()
z_index = Z.to_numpy()
# Transport arrays.
vol = Volume.to_numpy()
# Tracer arrays.
# NOTE: modify the number of tracers as required.
temp = Temp.to_numpy()
sal = Sal.to_numpy()
sigma0 = Sigma0.to_numpy()
# Time/ID arrays.
time = Time.to_numpy()
trajectory = Traj.to_numpy()
# Update user at command line.
print("Completed: Converted DataFrames to arrays.")
# ---------------------------------------------------------------------------
# Stage 5:
# Interpolating depth using deptht field from NEMO input data and z_index.
# Move to fields input data directory.
# NOTE: change directory path to lat/lon/depth files as required.
# os.chdir('FIELD_DIR_PATH')
os.chdir('/Users/ollietooth/Desktop/D.Phil./Tracmass/projects/NEMO/data/fields/')
# Set field file name containing nav_lat/nav_lon/depth data.
# NOTE: change field file name as required.
field_file = "ORCA1-N406_2000T.nc4" # 'FIELD_FILE.nc'
# Import deptht/u/v variable from input fields to TRACMASS.
# NOTE: change the depth variable as required - deptht/u/v.
depth = xr.open_dataset(field_file).deptht
# Inserting a value for the sea surface (0 m) for use with
# interpolation indexes < 1.
depth = np.insert(depth, 0, 0)
# Storing the index values for depth.
index = np.arange(0, len(depth))
# Utilise Scipy interp1d for linear interpolation function of depth.
f_depth = interpolate.interp1d(index, depth)
# Store the dimension sizes of our output matrix, equal to z_index.
nrows, ncols = np.shape(z_index)
# Configuiring the size of our empty array for z, particle depths.
z = np.zeros([nrows, ncols])
# For loop to use interpolate the particle depth from f_depth using z_index.
for i in np.arange(0, nrows):
# Defining z to be negative since the z-axis is traditionally
# positive-upwards in physical oceanography.
z[i, :] = - f_depth(z_index[i, :])
# Update user at command line.
print("Completed: Depth interpolation.")
# ---------------------------------------------------------------------------
# Stage 6:
# Interpolating particle latitudes and longtidues using position indexes.
# Import nav_lat and nav_lon variables from input field_file.
lat_mdl = xr.open_dataset(field_file).nav_lat
lon_mdl = xr.open_dataset(field_file).nav_lon
# Configuiring the size of our empty array for lat and lon.
# Since all output data are stored in traj x obs,
# nrows and ncols are consistent across all arrays.
lat = np.zeros([nrows, ncols])
lon = np.zeros([nrows, ncols])
# For loop to interpolate particle poisitions in lat/lon space from
# (x_index, y_index) pairs.
print("Position Interpolation Progress:")
# Uses tqdm package for progress bar of linear interpolation loop.
for i in tqdm(np.arange(0, nrows)):
lat[i, :] = lat_mdl.interp(
x=xr.DataArray(x_index[i, :], dims="z"),
y=xr.DataArray(y_index[i, :], dims="z")
)
lon[i, :] = lon_mdl.interp(
x=xr.DataArray(x_index[i, :], dims="z"),
y=xr.DataArray(y_index[i, :], dims="z")
)
# Update user at command line.
print("Completed: Position interpolation.")
# ---------------------------------------------------------------------------
# Stage 7:
# Creating a NCEI_NetCDF_Trajectory file storing our transformed
# TRACMASS output.
# Using xarray to generate a DataSet with data variables and attributes.
dataset = xr.Dataset(
data_vars={
"trajectory": (["traj", "obs"], trajectory),
"time": (["traj", "obs"], time),
"lat": (["traj", "obs"], lat),
"lon": (["traj", "obs"], lon),
"z": (["traj", "obs"], z),
"vol": (["traj", "obs"], vol),
"temp": (["traj", "obs"], temp),
"sal": (["traj", "obs"], sal),
"sigma0": (["traj", "obs"], sigma0),
},
# NOTE: modify dataset attributes below to include important features
# of your simulation.
attrs={
"ncei_template_version": "NCEI_NetCDF_Trajectory_Template_v2.0",
"featureType": "trajectory",
"title": "ORCA1 Trial Sim",
"summary": "Trial simulation of ORCA1 - seeding particles southwards on x-z plane at ~70N",
"TRACMASS_version": "v7 (2020-10-28)",
"Conventions": "CF-1.6/CF-1.7",
"date_created": "2020-12-29", # Use ISO 8601:2004 for date.
"creator_name": "Ollie Tooth",
"creator_email": "oliver_tooth@env-res.ox.ac.uk",
"project": "ORCA1_Sim01",
"creator_type": "person",
"creator_institution": "University of Oxford",
"product_version": "1.0",
"references": "TRACMASS - https://github.com/TRACMASS",
}
)
# Specifying variable attributes according to the NCEI_NetCDF_Trajectory_Template_v2.0.
# See: https://www.nodc.noaa.gov/data/formats/netcdf/v2.0/trajectoryIncomplete.cdl
# trajectory
dataset.trajectory.attrs = {
'long_name': "Unique identifier for each particle",
'cf_role': "trajectory_id"
}
# time
dataset.time.attrs = {
'long_name': "time since begining of the simulation",
'standard_name': "time",
'unit': 'nanoseconds',
'calendar': "none"
}
# lat
dataset.lat.attrs = {
'long_name': "latitude",
'standard_name': "latitude",
'units': "degrees_north"
}
# lon
dataset.lon.attrs = {
'long_name': "longitude",
'standard_name': "longitude",
'units': "degrees_east"
}
# z
dataset.z.attrs = {
'long_name': "depth",
'standard_name': "depth",
'units': "meters",
"positive": "upward"
}
# vol
dataset.vol.attrs = {
'long_name': "particle transport",
'standard_name': "volume",
'units': "m3"
}
# NOTE: modify tracer attributes below as required.
# temp
dataset.temp.attrs = {
'long_name': "temperature",
'standard_name': "temperature",
'units': "C"
}
# sal
dataset.sal.attrs = {
'long_name': "salinity",
'standard_name': "salinity",
'units': "PSU"
}
# sigma0
dataset.sigma0.attrs = {
'long_name': "sigma0",
'standard_name': "sigma0",
'units': "kg/m3"
}
# Update user at command line.
print("Completed: Generated output DataSet.")
# ---------------------------------------------------------------------------
# Stage 8:
# Saving our NCEI_NetCDF_Trajectory file as a netCDF file.
# NOTE: change directory path to where we would like output .nc
# files to be stored.
# os.chdir('OUTPUT_DIR_PATH')
os.chdir('/Users/ollietooth/Desktop/D.Phil./PrelimPhase/data/')
# NOTE: modify multifile variable (logical) as True for multiple
# .nc output files, one file per seed-level, or False for a single
# .nc output file.
multifile = False
if multifile is True:
# -----------------------------------------
# Subroutine for multiple .nc output files.
# -----------------------------------------
# Defining output file name prefix and suffix.
fn_prefix = "ORCA1-N406_TRACMASS_seed"
fn_suffix = ".nc"
# -----------------------------
# Adding seed_level to DataSet.
# -----------------------------
# Returning the seed-levels with add_seed().
seed_level = add_seed(dataset)
# Append seed_level DataArray to original DataSet.
dataset['seed_level'] = xr.DataArray(seed_level, dims=["traj"])
# Adding attributes to seed_level DataArray.
dataset.seed_level.attrs = {'long_name': "seeding level",
'standard_name': "seed_level",
'units': "none"
}
# --------------------------------------------------------
# Subsetting dataset by seed_level and saving as .nc file.
# --------------------------------------------------------
# Minimum seed_level.
min_seed = np.min(seed_level)
# Maximum seed_level.
max_seed = np.max(seed_level)
# Iterate over seed-levels - subset and save DataSet to .nc files.
print("Saving Files Progress:")
# Uses tqdm package for progress bar of linear interpolation loop.
for seed in tqdm(np.arange(min_seed, max_seed + 1)):
# Find rows where seed-level equals seed.
rows = np.where(seed_level == seed)[0]
# Subset the DataSet with rows and return as output_data.
output_data = dataset.isel(traj=xr.DataArray(rows, dims=["traj"]))
# Save dataset to netCDF format -
# Defining out_filename with prefix and suffix specififed above.
output_filename = fn_prefix + str(seed) + fn_suffix
# Use loseless compression for .nc files by updating encoding.
output_data.to_netcdf(output_filename,encoding=output_data.encoding.update({'zlib': True, 'complevel': 4}), format="NETCDF4")
# Update user at command line.
print("Completed: Saved Dataset in multiple .nc files.")
else:
# ---------------------------------------
# Subroutine for single .nc output file.
# ---------------------------------------
# Save dataset to netCDF format -
# NOTE: modify the output file name as required for your simulation.
# dataset.to_netcdf('OUTPUT_FILENAME', format="NETCDF4")
# Use loseless compression for .nc files by updating encoding.
dataset.to_netcdf('ORCA1-N406_TRACMASS_complete.nc', encoding=dataset.encoding.update({'zlib': True, 'complevel': 4}), format="NETCDF4")
# Update user at command line.
print("Completed: Saved Dataset in single .nc file.")
| 36.24878
| 140
| 0.594133
|
d73da28fdbcd57c0a5ea47224197acc719f21237
| 10,478
|
py
|
Python
|
gym_pybullet_drones/envs/CtrlAviary.py
|
ziyangli/gym-pybullet-drones
|
5593ec16a53c299f5300c62f6dff14b15247fcf5
|
[
"MIT"
] | null | null | null |
gym_pybullet_drones/envs/CtrlAviary.py
|
ziyangli/gym-pybullet-drones
|
5593ec16a53c299f5300c62f6dff14b15247fcf5
|
[
"MIT"
] | null | null | null |
gym_pybullet_drones/envs/CtrlAviary.py
|
ziyangli/gym-pybullet-drones
|
5593ec16a53c299f5300c62f6dff14b15247fcf5
|
[
"MIT"
] | 1
|
2020-10-12T20:30:45.000Z
|
2020-10-12T20:30:45.000Z
|
import numpy as np
from gym import error, spaces, utils
from gym_pybullet_drones.envs.BaseAviary import DroneModel, Physics, BaseAviary
######################################################################################################################################################
#### Multi-drone environment class for control applications ##########################################################################################
######################################################################################################################################################
class CtrlAviary(BaseAviary):
####################################################################################################
#### Initialize the environment ####################################################################
####################################################################################################
#### Arguments #####################################################################################
#### - drone_model (DroneModel) desired drone type (associated to an .urdf file) ###########
#### - num_drones (int) desired number of drones in the aviary #####################
#### - visibility_radius (float) used to compute the drones' adjacency matrix, in meters ####
#### - initial_xyzs ((3,1) array) initial XYZ position of the drones #########################
#### - initial_rpys ((3,1) array) initial orientations of the drones (radians) ###############
#### - physics (Physics) desired implementation of physics/dynamics #################
#### - freq (int) the frequency (Hz) at which the physics engine advances ####
#### - aggregate_phy_steps (int) number of physics updates within one call of .step() #######
#### - gui (bool) whether to use PyBullet's GUI ##############################
#### - record (bool) whether to save a video of the simulation ##################
#### - obstacles (bool) whether to add obstacles to the simulation #################
####################################################################################################
def __init__(self, drone_model: DroneModel=DroneModel.CF2X, num_drones: int=1, \
visibility_radius: float=np.inf, initial_xyzs=None, initial_rpys=None, \
physics: Physics=Physics.PYB, freq: int=240, aggregate_phy_steps: int=1, \
gui=False, record=False, obstacles=False):
super().__init__(drone_model=drone_model, num_drones=num_drones, visibility_radius=visibility_radius, \
initial_xyzs=initial_xyzs, initial_rpys=initial_rpys, physics=physics, freq=freq, aggregate_phy_steps=aggregate_phy_steps, \
gui=gui, record=record, obstacles=obstacles)
####################################################################################################
#### Return the action space of the environment, a Dict of Box(4,) with NUM_DRONES entries #########
####################################################################################################
def _actionSpace(self):
#### Action vector ######## P0 P1 P2 P3
act_lower_bound = np.array([0., 0., 0., 0.])
act_upper_bound = np.array([self.MAX_RPM, self.MAX_RPM, self.MAX_RPM, self.MAX_RPM])
return spaces.Dict({ str(i): spaces.Box(low=act_lower_bound, high=act_upper_bound, dtype=np.float32) for i in range(self.NUM_DRONES) })
####################################################################################################
#### Return the observation space of the environment, a Dict with NUM_DRONES entries of Dict of ####
#### { Box(4,), MultiBinary(NUM_DRONES) } ##########################################################
####################################################################################################
def _observationSpace(self):
#### Observation vector ### X Y Z Q1 Q2 Q3 Q4 R P Y VX VY VZ WR WP WY P0 P1 P2 P3
obs_lower_bound = np.array([-np.inf, -np.inf, 0., -1., -1., -1., -1., -np.pi, -np.pi, -np.pi, -np.inf, -np.inf, -np.inf, -np.inf, -np.inf, -np.inf, 0., 0., 0., 0.])
obs_upper_bound = np.array([np.inf, np.inf, np.inf, 1., 1., 1., 1., np.pi, np.pi, np.pi, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, self.MAX_RPM, self.MAX_RPM, self.MAX_RPM, self.MAX_RPM])
return spaces.Dict({ str(i): spaces.Dict ({"state": spaces.Box(low=obs_lower_bound, high=obs_upper_bound, dtype=np.float32), \
"neighbors": spaces.MultiBinary(self.NUM_DRONES) }) for i in range(self.NUM_DRONES) })
####################################################################################################
#### Return the current observation of the environment #############################################
####################################################################################################
#### Returns #######################################################################################
#### - obs (dict) {"0":{"state": np.arr(20,),"neighbors": np.arr(NUM_DRONES)},
#### .. "NUM_DRONES-1": {..} } ##################################
#### for the "state"'s content see _observationSpace() ##########
#### "neighbors" is the drone's row of the adjacency matrix #####
####################################################################################################
def _computeObs(self):
adjacency_mat = self._getAdjacencyMatrix()
return {str(i): {"state": self._getDroneState(i), "neighbors": adjacency_mat[i,:] } for i in range(self.NUM_DRONES) }
####################################################################################################
#### Preprocess the action passed to step() ########################################################
####################################################################################################
#### Arguments #####################################################################################
#### - action (dict of (4,1) array) unclipped RPMs commanded to the 4 motors of each drone #####
####################################################################################################
#### Returns #######################################################################################
#### - clip_action ((N_DRONES,4,1) arr) clipped RPMs commanded to the 4 motors of each drone #######
####################################################################################################
def _preprocessAction(self, action):
clipped_action = np.zeros((self.NUM_DRONES,4))
for k, v in action.items():
clipped_action[int(k),:] = np.clip(np.array(v), 0, self.MAX_RPM)
return clipped_action
####################################################################################################
#### Compute the current reward value(s) ###########################################################
####################################################################################################
#### Arguments #####################################################################################
#### - obs (..) the return of _computeObs() ################################
####################################################################################################
#### Returns #######################################################################################
#### - reward (..) the reward(s) associated to the current obs/state ##########
####################################################################################################
def _computeReward(self, obs):
return -1
####################################################################################################
#### Compute the current done value(s) #############################################################
####################################################################################################
#### Arguments #####################################################################################
#### - obs (..) the return of _computeObs() ################################
####################################################################################################
#### Returns #######################################################################################
#### - done (..) the done value(s) associated to the current obs/state ######
####################################################################################################
def _computeDone(self, obs):
return False
####################################################################################################
#### Compute the current info dict(s) ##############################################################
####################################################################################################
#### Arguments #####################################################################################
#### - obs (..) the return of _computeObs() ################################
####################################################################################################
#### Returns #######################################################################################
#### - info (..) the info dict(s) associated to the current obs/state #######
####################################################################################################
def _computeInfo(self, obs):
return {"answer": 42} #### Calculated by the Deep Thought supercomputer in 7.5M years
| 86.595041
| 216
| 0.307597
|
c407d974fb81cedb8837d3cf72f5d1be84b6cbc2
| 40,179
|
py
|
Python
|
RegRCNN/models/mrcnn.py
|
HannahElisa/RegRCNN
|
1aa69d00c61bd36685213248bb30d4ba30ac5a06
|
[
"Apache-2.0"
] | null | null | null |
RegRCNN/models/mrcnn.py
|
HannahElisa/RegRCNN
|
1aa69d00c61bd36685213248bb30d4ba30ac5a06
|
[
"Apache-2.0"
] | null | null | null |
RegRCNN/models/mrcnn.py
|
HannahElisa/RegRCNN
|
1aa69d00c61bd36685213248bb30d4ba30ac5a06
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2019 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Parts are based on https://github.com/multimodallearning/pytorch-mask-rcnn
published under MIT license.
"""
import os
from multiprocessing import Pool
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils
import RegRCNN.utils.model_utils as mutils
import RegRCNN.utils.exp_utils as utils
class RPN(nn.Module):
"""
Region Proposal Network.
"""
def __init__(self, cf, conv):
super(RPN, self).__init__()
self.dim = conv.dim
self.conv_shared = conv(cf.end_filts, cf.n_rpn_features, ks=3, stride=cf.rpn_anchor_stride, pad=1, relu=cf.relu)
self.conv_class = conv(cf.n_rpn_features, 2 * len(cf.rpn_anchor_ratios), ks=1, stride=1, relu=None)
self.conv_bbox = conv(cf.n_rpn_features, 2 * self.dim * len(cf.rpn_anchor_ratios), ks=1, stride=1, relu=None)
def forward(self, x):
"""
:param x: input feature maps (b, in_channels, y, x, (z))
:return: rpn_class_logits (b, 2, n_anchors)
:return: rpn_probs_logits (b, 2, n_anchors)
:return: rpn_bbox (b, 2 * dim, n_anchors)
"""
# Shared convolutional base of the RPN.
x = self.conv_shared(x)
# Anchor Score. (batch, anchors per location * 2, y, x, (z)).
rpn_class_logits = self.conv_class(x)
# Reshape to (batch, 2, anchors)
axes = (0, 2, 3, 1) if self.dim == 2 else (0, 2, 3, 4, 1)
rpn_class_logits = rpn_class_logits.permute(*axes)
rpn_class_logits = rpn_class_logits.contiguous()
rpn_class_logits = rpn_class_logits.view(x.size()[0], -1, 2)
# Softmax on last dimension (fg vs. bg).
rpn_probs = F.softmax(rpn_class_logits, dim=2)
# Bounding box refinement. (batch, anchors_per_location * (y, x, (z), log(h), log(w), (log(d)), y, x, (z))
rpn_bbox = self.conv_bbox(x)
# Reshape to (batch, 2*dim, anchors)
rpn_bbox = rpn_bbox.permute(*axes)
rpn_bbox = rpn_bbox.contiguous()
rpn_bbox = rpn_bbox.view(x.size()[0], -1, self.dim * 2)
return [rpn_class_logits, rpn_probs, rpn_bbox]
class Classifier(nn.Module):
"""
Head network for classification and bounding box refinement. Performs RoiAlign, processes resulting features through a
shared convolutional base and finally branches off the classifier- and regression head.
"""
def __init__(self, cf, conv):
super(Classifier, self).__init__()
self.cf = cf
self.dim = conv.dim
self.in_channels = cf.end_filts
self.pool_size = cf.pool_size
self.pyramid_levels = cf.pyramid_levels
# instance_norm does not work with spatial dims (1, 1, (1))
norm = cf.norm if cf.norm != 'instance_norm' else None
self.conv1 = conv(cf.end_filts, cf.end_filts * 4, ks=self.pool_size, stride=1, norm=norm, relu=cf.relu)
self.conv2 = conv(cf.end_filts * 4, cf.end_filts * 4, ks=1, stride=1, norm=norm, relu=cf.relu)
self.linear_bbox = nn.Linear(cf.end_filts * 4, cf.head_classes * 2 * self.dim)
if 'regression' in self.cf.prediction_tasks:
self.linear_regressor = nn.Linear(cf.end_filts * 4, cf.head_classes * cf.regression_n_features)
self.rg_n_feats = cf.regression_n_features
#classify into bins of regression values
elif 'regression_bin' in self.cf.prediction_tasks:
self.linear_regressor = nn.Linear(cf.end_filts * 4, cf.head_classes * len(cf.bin_labels))
self.rg_n_feats = len(cf.bin_labels)
else:
self.linear_regressor = lambda x: torch.zeros((x.shape[0], cf.head_classes * 1), dtype=torch.float32).fill_(float('NaN')).cuda()
self.rg_n_feats = 1 #cf.regression_n_features
if 'class' in self.cf.prediction_tasks:
self.linear_class = nn.Linear(cf.end_filts * 4, cf.head_classes)
else:
assert cf.head_classes == 2, "#head classes {} needs to be 2 (bg/fg) when not predicting classes".format(cf.head_classes)
self.linear_class = lambda x: torch.zeros((x.shape[0], cf.head_classes), dtype=torch.float64).cuda()
def forward(self, x, rois):
"""
:param x: input feature maps (b, in_channels, y, x, (z))
:param rois: normalized box coordinates as proposed by the RPN to be forwarded through
the second stage (n_proposals, (y1, x1, y2, x2, (z1), (z2), batch_ix). Proposals of all batch elements
have been merged to one vector, while the origin info has been stored for re-allocation.
:return: mrcnn_class_logits (n_proposals, n_head_classes)
:return: mrcnn_bbox (n_proposals, n_head_classes, 2 * dim) predicted corrections to be applied to proposals for refinement.
"""
x = mutils.pyramid_roi_align(x, rois, self.pool_size, self.pyramid_levels, self.dim)
x = self.conv1(x)
x = self.conv2(x)
x = x.view(-1, self.in_channels * 4)
mrcnn_bbox = self.linear_bbox(x)
mrcnn_bbox = mrcnn_bbox.view(mrcnn_bbox.size()[0], -1, self.dim * 2)
mrcnn_class_logits = self.linear_class(x)
mrcnn_regress = self.linear_regressor(x)
mrcnn_regress = mrcnn_regress.view(mrcnn_regress.size()[0], -1, self.rg_n_feats)
return [mrcnn_bbox, mrcnn_class_logits, mrcnn_regress]
class Mask(nn.Module):
"""
Head network for proposal-based mask segmentation. Performs RoiAlign, some convolutions and applies sigmoid on the
output logits to allow for overlapping classes.
"""
def __init__(self, cf, conv):
super(Mask, self).__init__()
self.pool_size = cf.mask_pool_size
self.pyramid_levels = cf.pyramid_levels
self.dim = conv.dim
self.conv1 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
self.conv2 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
self.conv3 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
self.conv4 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
if conv.dim == 2:
self.deconv = nn.ConvTranspose2d(cf.end_filts, cf.end_filts, kernel_size=2, stride=2) # todo why no norm here?
else:
self.deconv = nn.ConvTranspose3d(cf.end_filts, cf.end_filts, kernel_size=2, stride=2)
self.relu = nn.ReLU(inplace=True) if cf.relu == 'relu' else nn.LeakyReLU(inplace=True)
self.conv5 = conv(cf.end_filts, cf.head_classes, ks=1, stride=1, relu=None)
self.sigmoid = nn.Sigmoid()
def forward(self, x, rois):
"""
:param x: input feature maps (b, in_channels, y, x, (z))
:param rois: normalized box coordinates as proposed by the RPN to be forwarded through
the second stage (n_proposals, (y1, x1, y2, x2, (z1), (z2), batch_ix). Proposals of all batch elements
have been merged to one vector, while the origin info has been stored for re-allocation.
:return: x: masks (n_sampled_proposals (n_detections in inference), n_classes, y, x, (z))
"""
x = mutils.pyramid_roi_align(x, rois, self.pool_size, self.pyramid_levels, self.dim)
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.relu(self.deconv(x))
x = self.conv5(x)
x = self.sigmoid(x)
return x
############################################################
# Loss Functions
############################################################
def compute_rpn_class_loss(rpn_class_logits, rpn_match, shem_poolsize):
"""
:param rpn_match: (n_anchors). [-1, 0, 1] for negative, neutral, and positive matched anchors.
:param rpn_class_logits: (n_anchors, 2). logits from RPN classifier.
:param SHEM_poolsize: int. factor of top-k candidates to draw from per negative sample (stochastic-hard-example-mining).
:return: loss: torch tensor
:return: np_neg_ix: 1D array containing indices of the neg_roi_logits, which have been sampled for training.
"""
# Filter out netural anchors
pos_indices = torch.nonzero(rpn_match == 1)
neg_indices = torch.nonzero(rpn_match == -1)
# loss for positive samples
if not 0 in pos_indices.size():
pos_indices = pos_indices.squeeze(1)
roi_logits_pos = rpn_class_logits[pos_indices]
pos_loss = F.cross_entropy(roi_logits_pos, torch.LongTensor([1] * pos_indices.shape[0]).cuda())
else:
pos_loss = torch.FloatTensor([0]).cuda()
# loss for negative samples: draw hard negative examples (SHEM)
# that match the number of positive samples, but at least 1.
if not 0 in neg_indices.size():
neg_indices = neg_indices.squeeze(1)
roi_logits_neg = rpn_class_logits[neg_indices]
negative_count = np.max((1, pos_indices.cpu().data.numpy().size))
roi_probs_neg = F.softmax(roi_logits_neg, dim=1)
neg_ix = mutils.shem(roi_probs_neg, negative_count, shem_poolsize)
neg_loss = F.cross_entropy(roi_logits_neg[neg_ix], torch.LongTensor([0] * neg_ix.shape[0]).cuda())
np_neg_ix = neg_ix.cpu().data.numpy()
#print("pos, neg count", pos_indices.cpu().data.numpy().size, negative_count)
else:
neg_loss = torch.FloatTensor([0]).cuda()
np_neg_ix = np.array([]).astype('int32')
loss = (pos_loss + neg_loss) / 2
return loss, np_neg_ix
def compute_rpn_bbox_loss(rpn_pred_deltas, rpn_target_deltas, rpn_match):
"""
:param rpn_target_deltas: (b, n_positive_anchors, (dy, dx, (dz), log(dh), log(dw), (log(dd)))).
Uses 0 padding to fill in unsed bbox deltas.
:param rpn_pred_deltas: predicted deltas from RPN. (b, n_anchors, (dy, dx, (dz), log(dh), log(dw), (log(dd))))
:param rpn_match: (n_anchors). [-1, 0, 1] for negative, neutral, and positive matched anchors.
:return: loss: torch 1D tensor.
"""
if not 0 in torch.nonzero(rpn_match == 1).size():
indices = torch.nonzero(rpn_match == 1).squeeze(1)
# Pick bbox deltas that contribute to the loss
rpn_pred_deltas = rpn_pred_deltas[indices]
# Trim target bounding box deltas to the same length as rpn_bbox.
target_deltas = rpn_target_deltas[:rpn_pred_deltas.size()[0], :]
# Smooth L1 loss
loss = F.smooth_l1_loss(rpn_pred_deltas, target_deltas)
else:
loss = torch.FloatTensor([0]).cuda()
return loss
def compute_mrcnn_bbox_loss(mrcnn_pred_deltas, mrcnn_target_deltas, target_class_ids):
"""
:param mrcnn_target_deltas: (n_sampled_rois, (dy, dx, (dz), log(dh), log(dw), (log(dh)))
:param mrcnn_pred_deltas: (n_sampled_rois, n_classes, (dy, dx, (dz), log(dh), log(dw), (log(dh)))
:param target_class_ids: (n_sampled_rois)
:return: loss: torch 1D tensor.
"""
if not 0 in torch.nonzero(target_class_ids > 0).size():
positive_roi_ix = torch.nonzero(target_class_ids > 0)[:, 0]
positive_roi_class_ids = target_class_ids[positive_roi_ix].long()
target_bbox = mrcnn_target_deltas[positive_roi_ix, :].detach()
pred_bbox = mrcnn_pred_deltas[positive_roi_ix, positive_roi_class_ids, :]
loss = F.smooth_l1_loss(pred_bbox, target_bbox)
else:
loss = torch.FloatTensor([0]).cuda()
return loss
def compute_mrcnn_mask_loss(pred_masks, target_masks, target_class_ids):
"""
:param target_masks: (n_sampled_rois, y, x, (z)) A float32 tensor of values 0 or 1. Uses zero padding to fill array.
:param pred_masks: (n_sampled_rois, n_classes, y, x, (z)) float32 tensor with values between [0, 1].
:param target_class_ids: (n_sampled_rois)
:return: loss: torch 1D tensor.
"""
#print("targ masks", target_masks.unique(return_counts=True))
if not 0 in torch.nonzero(target_class_ids > 0).size():
# Only positive ROIs contribute to the loss. And only
# the class-specific mask of each ROI.
positive_ix = torch.nonzero(target_class_ids > 0)[:, 0]
positive_class_ids = target_class_ids[positive_ix].long()
y_true = target_masks[positive_ix, :, :].detach()
y_pred = pred_masks[positive_ix, positive_class_ids, :, :]
loss = F.binary_cross_entropy(y_pred, y_true)
else:
loss = torch.FloatTensor([0]).cuda()
return loss
def compute_mrcnn_class_loss(tasks, pred_class_logits, target_class_ids):
"""
:param pred_class_logits: (n_sampled_rois, n_classes)
:param target_class_ids: (n_sampled_rois) batch dimension was merged into roi dimension.
:return: loss: torch 1D tensor.
"""
if 'class' in tasks and not 0 in target_class_ids.size():
loss = F.cross_entropy(pred_class_logits, target_class_ids.long())
else:
loss = torch.FloatTensor([0.]).cuda()
return loss
def compute_mrcnn_regression_loss(tasks, pred, target, target_class_ids):
"""regression loss is a distance metric between target vector and predicted regression vector.
:param pred: (n_sampled_rois, n_classes, [n_rg_feats if real regression or 1 if rg_bin task)
:param target: (n_sampled_rois, [n_rg_feats or n_rg_bins])
:return: differentiable loss, torch 1D tensor on cuda
"""
if not 0 in target.shape and not 0 in torch.nonzero(target_class_ids > 0).shape:
positive_roi_ix = torch.nonzero(target_class_ids > 0)[:, 0]
positive_roi_class_ids = target_class_ids[positive_roi_ix].long()
target = target[positive_roi_ix].detach()
pred = pred[positive_roi_ix, positive_roi_class_ids]
if "regression_bin" in tasks:
loss = F.cross_entropy(pred, target.long())
else:
loss = F.smooth_l1_loss(pred, target)
#loss = F.mse_loss(pred, target)
else:
loss = torch.FloatTensor([0.]).cuda()
return loss
############################################################
# Detection Layer
############################################################
def compute_roi_scores(tasks, batch_rpn_proposals, mrcnn_cl_logits):
""" Depending on the predicition tasks: if no class prediction beyong fg/bg (--> means no additional class
head was applied) use RPN objectness scores as roi scores, otherwise class head scores.
:param cf:
:param batch_rpn_proposals:
:param mrcnn_cl_logits:
:return:
"""
if not 'class' in tasks:
scores = batch_rpn_proposals[:, :, -1].view(-1, 1)
scores = torch.cat((1 - scores, scores), dim=1)
else:
scores = F.softmax(mrcnn_cl_logits, dim=1)
return scores
############################################################
# MaskRCNN Class
############################################################
class net(nn.Module):
def __init__(self, cf, logger):
super(net, self).__init__()
self.cf = cf
self.logger = logger
self.build()
loss_order = ['rpn_class', 'rpn_bbox', 'mrcnn_bbox', 'mrcnn_mask', 'mrcnn_class', 'mrcnn_rg']
if hasattr(cf, "mrcnn_loss_weights"):
# bring into right order
self.loss_weights = np.array([cf.mrcnn_loss_weights[k] for k in loss_order])
else:
self.loss_weights = np.array([1.]*len(loss_order))
if self.cf.weight_init=="custom":
logger.info("Tried to use custom weight init which is not defined. Using pytorch default.")
elif self.cf.weight_init:
mutils.initialize_weights(self)
else:
logger.info("using default pytorch weight init")
def build(self):
"""Build Mask R-CNN architecture."""
# Image size must be dividable by 2 multiple times.
h, w = self.cf.patch_size[:2]
if h / 2**5 != int(h / 2**5) or w / 2**5 != int(w / 2**5):
raise Exception("Image size must be divisible by 2 at least 5 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 288, 320, 384, 448, 512, ... etc.,i.e.,"
"any number x*32 will do!")
# instantiate abstract multi-dimensional conv generator and load backbone module.
backbone = utils.import_module('bbone', self.cf.backbone_path)
self.logger.info("loaded backbone from {}".format(self.cf.backbone_path))
conv = backbone.ConvGenerator(self.cf.dim)
# build Anchors, FPN, RPN, Classifier / Bbox-Regressor -head, Mask-head
self.np_anchors = mutils.generate_pyramid_anchors(self.logger, self.cf)
self.anchors = torch.from_numpy(self.np_anchors).float().cuda()
self.fpn = backbone.FPN(self.cf, conv, relu_enc=self.cf.relu, operate_stride1=False).cuda()
self.rpn = RPN(self.cf, conv)
self.classifier = Classifier(self.cf, conv)
self.mask = Mask(self.cf, conv)
def forward(self, img, is_training=True):
"""
:param img: input images (b, c, y, x, (z)).
:return: rpn_pred_logits: (b, n_anchors, 2)
:return: rpn_pred_deltas: (b, n_anchors, (y, x, (z), log(h), log(w), (log(d))))
:return: batch_proposal_boxes: (b, n_proposals, (y1, x1, y2, x2, (z1), (z2), batch_ix)) only for monitoring/plotting.
:return: detections: (n_final_detections, (y1, x1, y2, x2, (z1), (z2), batch_ix, pred_class_id, pred_score)
:return: detection_masks: (n_final_detections, n_classes, y, x, (z)) raw molded masks as returned by mask-head.
"""
# extract features.
fpn_outs = self.fpn(img)
rpn_feature_maps = [fpn_outs[i] for i in self.cf.pyramid_levels]
self.mrcnn_feature_maps = rpn_feature_maps
# loop through pyramid layers and apply RPN.
layer_outputs = [ self.rpn(p_feats) for p_feats in rpn_feature_maps ]
# concatenate layer outputs.
# convert from list of lists of level outputs to list of lists of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
outputs = list(zip(*layer_outputs))
outputs = [torch.cat(list(o), dim=1) for o in outputs]
rpn_pred_logits, rpn_pred_probs, rpn_pred_deltas = outputs
#
# # generate proposals: apply predicted deltas to anchors and filter by foreground scores from RPN classifier.
proposal_count = self.cf.post_nms_rois_training if is_training else self.cf.post_nms_rois_inference
batch_normed_props, batch_unnormed_props = mutils.refine_proposals(rpn_pred_probs, rpn_pred_deltas,
proposal_count, self.anchors, self.cf)
# merge batch dimension of proposals while storing allocation info in coordinate dimension.
batch_ixs = torch.arange(
batch_normed_props.shape[0]).cuda().unsqueeze(1).repeat(1,batch_normed_props.shape[1]).view(-1).float()
rpn_rois = batch_normed_props[:, :, :-1].view(-1, batch_normed_props[:, :, :-1].shape[2])
self.rpn_rois_batch_info = torch.cat((rpn_rois, batch_ixs.unsqueeze(1)), dim=1)
# this is the first of two forward passes in the second stage, where no activations are stored for backprop.
# here, all proposals are forwarded (with virtual_batch_size = batch_size * post_nms_rois.)
# for inference/monitoring as well as sampling of rois for the loss functions.
# processed in chunks of roi_chunk_size to re-adjust to gpu-memory.
chunked_rpn_rois = self.rpn_rois_batch_info.split(self.cf.roi_chunk_size)
bboxes_list, class_logits_list, regressions_list = [], [], []
with torch.no_grad():
for chunk in chunked_rpn_rois:
chunk_bboxes, chunk_class_logits, chunk_regressions = self.classifier(self.mrcnn_feature_maps, chunk)
bboxes_list.append(chunk_bboxes)
class_logits_list.append(chunk_class_logits)
regressions_list.append(chunk_regressions)
mrcnn_bbox = torch.cat(bboxes_list, 0)
mrcnn_class_logits = torch.cat(class_logits_list, 0)
mrcnn_regressions = torch.cat(regressions_list, 0)
self.mrcnn_roi_scores = compute_roi_scores(self.cf.prediction_tasks, batch_normed_props, mrcnn_class_logits)
# refine classified proposals, filter and return final detections.
# returns (cf.max_inst_per_batch_element, n_coords+1+...)
detections = mutils.refine_detections(self.cf, batch_ixs, rpn_rois, mrcnn_bbox, self.mrcnn_roi_scores,
mrcnn_regressions)
# forward remaining detections through mask-head to generate corresponding masks.
scale = [img.shape[2]] * 4 + [img.shape[-1]] * 2
scale = torch.from_numpy(np.array(scale[:self.cf.dim * 2] + [1])[None]).float().cuda()
# first self.cf.dim * 2 entries on axis 1 are always the box coords, +1 is batch_ix
detection_boxes = detections[:, :self.cf.dim * 2 + 1] / scale
with torch.no_grad():
detection_masks = self.mask(self.mrcnn_feature_maps, detection_boxes)
return [rpn_pred_logits, rpn_pred_deltas, batch_unnormed_props, detections, detection_masks]
def loss_samples_forward(self, batch_gt_boxes, batch_gt_masks, batch_gt_class_ids, batch_gt_regressions=None):
"""
this is the second forward pass through the second stage (features from stage one are re-used).
samples few rois in loss_example_mining and forwards only those for loss computation.
:param batch_gt_class_ids: list over batch elements. Each element is a list over the corresponding roi target labels.
:param batch_gt_boxes: list over batch elements. Each element is a list over the corresponding roi target coordinates.
:param batch_gt_masks: (b, n(b), c, y, x (,z)) list over batch elements. Each element holds n_gt_rois(b)
(i.e., dependent on the batch element) binary masks of shape (c, y, x, (z)).
:return: sample_logits: (n_sampled_rois, n_classes) predicted class scores.
:return: sample_deltas: (n_sampled_rois, n_classes, 2 * dim) predicted corrections to be applied to proposals for refinement.
:return: sample_mask: (n_sampled_rois, n_classes, y, x, (z)) predicted masks per class and proposal.
:return: sample_target_class_ids: (n_sampled_rois) target class labels of sampled proposals.
:return: sample_target_deltas: (n_sampled_rois, 2 * dim) target deltas of sampled proposals for box refinement.
:return: sample_target_masks: (n_sampled_rois, y, x, (z)) target masks of sampled proposals.
:return: sample_proposals: (n_sampled_rois, 2 * dim) RPN output for sampled proposals. only for monitoring/plotting.
"""
# sample rois for loss and get corresponding targets for all Mask R-CNN head network losses.
sample_ics, sample_target_deltas, sample_target_mask, sample_target_class_ids, sample_target_regressions = \
mutils.loss_example_mining(self.cf, self.rpn_rois_batch_info, batch_gt_boxes, batch_gt_masks,
self.mrcnn_roi_scores, batch_gt_class_ids, batch_gt_regressions)
# re-use feature maps and RPN output from first forward pass.
sample_proposals = self.rpn_rois_batch_info[sample_ics]
if not 0 in sample_proposals.size():
sample_deltas, sample_logits, sample_regressions = self.classifier(self.mrcnn_feature_maps, sample_proposals)
sample_mask = self.mask(self.mrcnn_feature_maps, sample_proposals)
else:
sample_logits = torch.FloatTensor().cuda()
sample_deltas = torch.FloatTensor().cuda()
sample_regressions = torch.FloatTensor().cuda()
sample_mask = torch.FloatTensor().cuda()
return [sample_deltas, sample_mask, sample_logits, sample_regressions, sample_proposals,
sample_target_deltas, sample_target_mask, sample_target_class_ids, sample_target_regressions]
def get_results(self, img_shape, detections, detection_masks, box_results_list=None, return_masks=True):
"""
Restores batch dimension of merged detections, unmolds detections, creates and fills results dict.
:param img_shape:
:param detections: shape (n_final_detections, len(info)), where
info=( y1, x1, y2, x2, (z1,z2), batch_ix, pred_class_id, pred_score )
:param detection_masks: (n_final_detections, n_classes, y, x, (z)) raw molded masks as returned by mask-head.
:param box_results_list: None or list of output boxes for monitoring/plotting.
each element is a list of boxes per batch element.
:param return_masks: boolean. If True, full resolution masks are returned for all proposals (speed trade-off).
:return: results_dict: dictionary with keys:
'boxes': list over batch elements. each batch element is a list of boxes. each box is a dictionary:
[[{box_0}, ... {box_n}], [{box_0}, ... {box_n}], ...]
'seg_preds': pixel-wise class predictions (b, 1, y, x, (z)) with values [0, 1] only fg. vs. bg for now.
class-specific return of masks will come with implementation of instance segmentation evaluation.
"""
detections = detections.cpu().data.numpy()
if self.cf.dim == 2:
detection_masks = detection_masks.permute(0, 2, 3, 1).cpu().data.numpy()
else:
detection_masks = detection_masks.permute(0, 2, 3, 4, 1).cpu().data.numpy()
# det masks shape now (n_dets, y,x(,z), n_classes)
# restore batch dimension of merged detections using the batch_ix info.
batch_ixs = detections[:, self.cf.dim*2]
detections = [detections[batch_ixs == ix] for ix in range(img_shape[0])]
mrcnn_mask = [detection_masks[batch_ixs == ix] for ix in range(img_shape[0])]
# mrcnn_mask: shape (b_size, variable, variable, n_classes), variable bc depends on single instance mask size
if box_results_list == None: # for test_forward, where no previous list exists.
box_results_list = [[] for _ in range(img_shape[0])]
# seg_logits == seg_probs in mrcnn since mask head finishes with sigmoid (--> image space = [0,1])
seg_probs = []
# loop over batch and unmold detections.
for ix in range(img_shape[0]):
# final masks are one-hot encoded (b, n_classes, y, x, (z))
final_masks = np.zeros((self.cf.num_classes + 1, *img_shape[2:]))
#+1 for bg, 0.5 bc mask head classifies only bg/fg with logits between 0,1--> bg is <0.5
if self.cf.num_classes + 1 != self.cf.num_seg_classes:
self.logger.warning("n of roi-classifier head classes {} doesnt match cf.num_seg_classes {}".format(
self.cf.num_classes + 1, self.cf.num_seg_classes))
if not 0 in detections[ix].shape:
boxes = detections[ix][:, :self.cf.dim*2].astype(np.int32)
class_ids = detections[ix][:, self.cf.dim*2 + 1].astype(np.int32)
scores = detections[ix][:, self.cf.dim*2 + 2]
masks = mrcnn_mask[ix][np.arange(boxes.shape[0]), ..., class_ids]
regressions = detections[ix][:,self.cf.dim*2+3:]
# Filter out detections with zero area. Often only happens in early
# stages of training when the network weights are still a bit random.
if self.cf.dim == 2:
exclude_ix = np.where((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
else:
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 5] - boxes[:, 4]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
regressions = np.delete(regressions, exclude_ix, axis=0)
# Resize masks to original image size and set boundary threshold.
if return_masks:
for i in range(masks.shape[0]): #masks per this batch instance/element/image
# Convert neural network mask to full size mask
if self.cf.dim == 2:
full_mask = mutils.unmold_mask_2D(masks[i], boxes[i], img_shape[2:])
else:
full_mask = mutils.unmold_mask_3D(masks[i], boxes[i], img_shape[2:])
# take the maximum seg_logits per class of instances in that class, i.e., a pixel in a class
# has the max seg_logit value over all instances of that class in one sample
final_masks[class_ids[i]] = np.max((final_masks[class_ids[i]], full_mask), axis=0)
final_masks[0] = np.full(final_masks[0].shape, 0.49999999) #effectively min_det_thres at 0.5 per pixel
# add final predictions to results.
if not 0 in boxes.shape:
for ix2, coords in enumerate(boxes):
box = {'box_coords': coords, 'box_type': 'det', 'box_score': scores[ix2],
'box_pred_class_id': class_ids[ix2]}
#if (hasattr(self.cf, "convert_cl_to_rg") and self.cf.convert_cl_to_rg):
if "regression_bin" in self.cf.prediction_tasks:
# in this case, regression preds are actually the rg_bin_ids --> map to rg value the bin represents
box['rg_bin'] = regressions[ix2].argmax()
box['regression'] = self.cf.bin_id2rg_val[box['rg_bin']]
else:
box['regression'] = regressions[ix2]
if hasattr(self.cf, "rg_val_to_bin_id") and \
any(['regression' in task for task in self.cf.prediction_tasks]):
box.update({'rg_bin': self.cf.rg_val_to_bin_id(regressions[ix2])})
box_results_list[ix].append(box)
# if no detections were made--> keep full bg mask (zeros).
seg_probs.append(final_masks)
# create and fill results dictionary.
results_dict = {}
results_dict['boxes'] = box_results_list
results_dict['seg_preds'] = np.array(seg_probs)
return results_dict
def train_forward(self, batch, is_validation=False):
"""
train method (also used for validation monitoring). wrapper around forward pass of network. prepares input data
for processing, computes losses, and stores outputs in a dictionary.
:param batch: dictionary containing 'data', 'seg', etc.
batch['roi_masks']: (b, n(b), c, h(n), w(n) (z(n))) list like roi_labels but with arrays (masks) inplace of
integers. c==channels of the raw segmentation.
:return: results_dict: dictionary with keys:
'boxes': list over batch elements. each batch element is a list of boxes. each box is a dictionary:
[[{box_0}, ... {box_n}], [{box_0}, ... {box_n}], ...]
'seg_preds': pixel-wise class predictions (b, 1, y, x, (z)) with values [0, n_classes].
'torch_loss': 1D torch tensor for backprop.
'class_loss': classification loss for monitoring.
"""
img = batch['data']
gt_boxes = batch['bb_target']
#axes = (0, 2, 3, 1) if self.cf.dim == 2 else (0, 2, 3, 4, 1)
#gt_masks = [np.transpose(batch['roi_masks'][ii], axes=axes) for ii in range(len(batch['roi_masks']))]
gt_masks = batch['roi_masks']
gt_class_ids = batch['class_targets']
if 'regression' in self.cf.prediction_tasks:
gt_regressions = batch["regression_targets"]
elif 'regression_bin' in self.cf.prediction_tasks:
gt_regressions = batch["rg_bin_targets"]
else:
gt_regressions = None
img = torch.from_numpy(img).cuda().float()
batch_rpn_class_loss = torch.FloatTensor([0]).cuda()
batch_rpn_bbox_loss = torch.FloatTensor([0]).cuda()
# list of output boxes for monitoring/plotting. each element is a list of boxes per batch element.
box_results_list = [[] for _ in range(img.shape[0])]
#forward passes. 1. general forward pass, where no activations are saved in second stage (for performance
# monitoring and loss sampling). 2. second stage forward pass of sampled rois with stored activations for backprop.
rpn_class_logits, rpn_pred_deltas, proposal_boxes, detections, detection_masks = self.forward(img)
mrcnn_pred_deltas, mrcnn_pred_mask, mrcnn_class_logits, mrcnn_regressions, sample_proposals, \
mrcnn_target_deltas, target_mask, target_class_ids, target_regressions = \
self.loss_samples_forward(gt_boxes, gt_masks, gt_class_ids, gt_regressions)
# loop over batch
for b in range(img.shape[0]):
if len(gt_boxes[b]) > 0:
# add gt boxes to output list
for tix in range(len(gt_boxes[b])):
gt_box = {'box_type': 'gt', 'box_coords': batch['bb_target'][b][tix]}
for name in self.cf.roi_items:
gt_box.update({name: batch[name][b][tix]})
box_results_list[b].append(gt_box)
# match gt boxes with anchors to generate targets for RPN losses.
rpn_match, rpn_target_deltas = mutils.gt_anchor_matching(self.cf, self.np_anchors, gt_boxes[b])
# add positive anchors used for loss to output list for monitoring.
pos_anchors = mutils.clip_boxes_numpy(self.np_anchors[np.argwhere(rpn_match == 1)][:, 0], img.shape[2:])
for p in pos_anchors:
box_results_list[b].append({'box_coords': p, 'box_type': 'pos_anchor'})
else:
rpn_match = np.array([-1]*self.np_anchors.shape[0])
rpn_target_deltas = np.array([0])
rpn_match_gpu = torch.from_numpy(rpn_match).cuda()
rpn_target_deltas = torch.from_numpy(rpn_target_deltas).float().cuda()
# compute RPN losses.
rpn_class_loss, neg_anchor_ix = compute_rpn_class_loss(rpn_class_logits[b], rpn_match_gpu, self.cf.shem_poolsize)
rpn_bbox_loss = compute_rpn_bbox_loss(rpn_pred_deltas[b], rpn_target_deltas, rpn_match_gpu)
batch_rpn_class_loss += rpn_class_loss /img.shape[0]
batch_rpn_bbox_loss += rpn_bbox_loss /img.shape[0]
# add negative anchors used for loss to output list for monitoring.
# neg_anchor_ix = neg_ix come from shem and mark positions in roi_probs_neg = rpn_class_logits[neg_indices]
# with neg_indices = rpn_match == -1
neg_anchors = mutils.clip_boxes_numpy(self.np_anchors[rpn_match == -1][neg_anchor_ix], img.shape[2:])
for n in neg_anchors:
box_results_list[b].append({'box_coords': n, 'box_type': 'neg_anchor'})
# add highest scoring proposals to output list for monitoring.
rpn_proposals = proposal_boxes[b][proposal_boxes[b, :, -1].argsort()][::-1]
for r in rpn_proposals[:self.cf.n_plot_rpn_props, :-1]:
box_results_list[b].append({'box_coords': r, 'box_type': 'prop'})
# add positive and negative roi samples used for mrcnn losses to output list for monitoring.
if not 0 in sample_proposals.shape:
rois = mutils.clip_to_window(self.cf.window, sample_proposals).cpu().data.numpy()
for ix, r in enumerate(rois):
box_results_list[int(r[-1])].append({'box_coords': r[:-1] * self.cf.scale,
'box_type': 'pos_class' if target_class_ids[ix] > 0 else 'neg_class'})
# compute mrcnn losses.
mrcnn_class_loss = compute_mrcnn_class_loss(self.cf.prediction_tasks, mrcnn_class_logits, target_class_ids)
mrcnn_bbox_loss = compute_mrcnn_bbox_loss(mrcnn_pred_deltas, mrcnn_target_deltas, target_class_ids)
mrcnn_regressions_loss = compute_mrcnn_regression_loss(self.cf.prediction_tasks, mrcnn_regressions, target_regressions, target_class_ids)
# mrcnn can be run without pixelwise annotations available (Faster R-CNN mode).
# In this case, the mask_loss is taken out of training.
if self.cf.frcnn_mode:
mrcnn_mask_loss = torch.FloatTensor([0]).cuda()
else:
mrcnn_mask_loss = compute_mrcnn_mask_loss(mrcnn_pred_mask, target_mask, target_class_ids)
loss = batch_rpn_class_loss + batch_rpn_bbox_loss +\
mrcnn_bbox_loss + mrcnn_mask_loss + mrcnn_class_loss + mrcnn_regressions_loss
# run unmolding of predictions for monitoring and merge all results to one dictionary.
return_masks = self.cf.return_masks_in_val if is_validation else self.cf.return_masks_in_train
results_dict = self.get_results(img.shape, detections, detection_masks, box_results_list,
return_masks=return_masks)
results_dict['seg_preds'] = results_dict['seg_preds'].argmax(axis=1).astype('uint8')[:,np.newaxis]
if 'dice' in self.cf.metrics:
results_dict['batch_dices'] = mutils.dice_per_batch_and_class(
results_dict['seg_preds'], batch["seg"], self.cf.num_seg_classes, convert_to_ohe=True)
results_dict['torch_loss'] = loss
results_dict['class_loss'] = mrcnn_class_loss.item()
results_dict['bbox_loss'] = mrcnn_bbox_loss.item()
results_dict['mask_loss'] = mrcnn_mask_loss.item()
results_dict['rg_loss'] = mrcnn_regressions_loss.item()
results_dict['rpn_class_loss'] = rpn_class_loss.item()
results_dict['rpn_bbox_loss'] = rpn_bbox_loss.item()
return results_dict
def test_forward(self, batch, return_masks=True):
"""
test method. wrapper around forward pass of network without usage of any ground truth information.
prepares input data for processing and stores outputs in a dictionary.
:param batch: dictionary containing 'data'
:param return_masks: boolean. If True, full resolution masks are returned for all proposals (speed trade-off).
:return: results_dict: dictionary with keys:
'boxes': list over batch elements. each batch element is a list of boxes. each box is a dictionary:
[[{box_0}, ... {box_n}], [{box_0}, ... {box_n}], ...]
'seg_preds': pixel-wise class predictions (b, 1, y, x, (z)) with values [0, n_classes]
"""
img = batch['data']
img = torch.from_numpy(img).float().cuda()
_, _, _, detections, detection_masks = self.forward(img)
results_dict = self.get_results(img.shape, detections, detection_masks, return_masks=return_masks)
return results_dict
| 53.429521
| 145
| 0.638866
|
fb4f174c8d0bef1c009855ee6036cae221270216
| 1,784
|
py
|
Python
|
altimeter/aws/resource/ec2/transit_gateway_vpc_attachment.py
|
jparten/altimeter
|
956cf7f7c2fe443751b8da393a764f8a7bb82348
|
[
"MIT"
] | null | null | null |
altimeter/aws/resource/ec2/transit_gateway_vpc_attachment.py
|
jparten/altimeter
|
956cf7f7c2fe443751b8da393a764f8a7bb82348
|
[
"MIT"
] | null | null | null |
altimeter/aws/resource/ec2/transit_gateway_vpc_attachment.py
|
jparten/altimeter
|
956cf7f7c2fe443751b8da393a764f8a7bb82348
|
[
"MIT"
] | null | null | null |
"""Resource for Transit Gateway VPC Attachments"""
from typing import Type
from botocore.client import BaseClient
from altimeter.aws.resource.resource_spec import ListFromAWSResult
from altimeter.aws.resource.ec2 import EC2ResourceSpec
from altimeter.core.graph.schema import Schema
from altimeter.core.graph.field.dict_field import AnonymousDictField
from altimeter.core.graph.field.list_field import ListField
from altimeter.core.graph.field.scalar_field import EmbeddedScalarField, ScalarField
class TransitGatewayVpcAttachmentResourceSpec(EC2ResourceSpec):
"""Resource for Transit Gateway VPC Attachments"""
type_name = "transit-gateway-vpc-attachment"
schema = Schema(
ScalarField("TransitGatewayAttachmentId"),
ScalarField("TransitGatewayId"),
ScalarField("VpcId"),
ScalarField("VpcOwnerId"),
ScalarField("State"),
ScalarField("CreationTime"),
ListField("SubnetIds", EmbeddedScalarField(), alti_key="subnet_id"),
AnonymousDictField("Options", ScalarField("DnsSupport"), ScalarField("Ipv6Support")),
)
@classmethod
def list_from_aws(
cls: Type["TransitGatewayVpcAttachmentResourceSpec"],
client: BaseClient,
account_id: str,
region: str,
) -> ListFromAWSResult:
paginator = client.get_paginator("describe_transit_gateway_vpc_attachments")
attachments = {}
for resp in paginator.paginate():
for attachment in resp.get("TransitGatewayVpcAttachments", []):
resource_arn = cls.generate_arn(
account_id, region, attachment["TransitGatewayAttachmentId"]
)
attachments[resource_arn] = attachment
return ListFromAWSResult(resources=attachments)
| 39.644444
| 93
| 0.714686
|
cf1bb838cc0c2c5bbcb60ff2203bb1623c7d0bcb
| 34,334
|
py
|
Python
|
pymatgen/command_line/critic2_caller.py
|
mt-huebsch/pymatgen
|
92da4a6a3d7c7a2f4cfed19a49794d59f15b42e7
|
[
"MIT"
] | null | null | null |
pymatgen/command_line/critic2_caller.py
|
mt-huebsch/pymatgen
|
92da4a6a3d7c7a2f4cfed19a49794d59f15b42e7
|
[
"MIT"
] | null | null | null |
pymatgen/command_line/critic2_caller.py
|
mt-huebsch/pymatgen
|
92da4a6a3d7c7a2f4cfed19a49794d59f15b42e7
|
[
"MIT"
] | null | null | null |
"""
This module implements an interface to the critic2 Bader analysis code.
For most Bader analysis purposes, users are referred to
pymatgen.command_line.bader_caller instead, this module is for advanced
usage requiring identification of critical points in the charge density.
This module depends on a compiled critic2 executable available in the path.
Please follow the instructions at https://github.com/aoterodelaroza/critic2
to compile.
New users are *strongly* encouraged to read the critic2 manual first.
In brief,
* critic2 searches for critical points in charge density
* a critical point can be one of four types: nucleus, bond, ring
or cage
* it does this by seeding locations for likely critical points
and then searching in these regions
* there are two lists of critical points in the output, a list
of non-equivalent points (with in-depth information about the
field at those points), and a full list of points generated
by the appropriate symmetry operations
* connectivity between these points is also provided when
appropriate (e.g. the two nucleus critical points linked to
a bond critical point)
* critic2 can do many other things besides
If you use this module, please cite the following:
A. Otero-de-la-Roza, E. R. Johnson and V. Luaña,
Comput. Phys. Commun. 185, 1007-1018 (2014)
(http://dx.doi.org/10.1016/j.cpc.2013.10.026)
A. Otero-de-la-Roza, M. A. Blanco, A. Martín Pendás and
V. Luaña, Comput. Phys. Commun. 180, 157–166 (2009)
(http://dx.doi.org/10.1016/j.cpc.2008.07.018)
"""
import logging
import os
import subprocess
import warnings
from enum import Enum
import numpy as np
from monty.dev import requires
from monty.json import MSONable
from monty.os.path import which
from monty.serialization import loadfn
from monty.tempfile import ScratchDir
from scipy.spatial import KDTree
from pymatgen.analysis.graphs import StructureGraph
from pymatgen.command_line.bader_caller import get_filepath
from pymatgen.core.periodic_table import DummySpecies
from pymatgen.io.vasp.inputs import Potcar
from pymatgen.io.vasp.outputs import Chgcar, VolumetricData
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Critic2Caller:
"""
Class to call critic2 and store standard output for further processing.
"""
@requires(
which("critic2"),
"Critic2Caller requires the executable critic to be in the path. "
"Please follow the instructions at https://github.com/aoterodelaroza/critic2.",
)
def __init__(
self,
structure,
chgcar=None,
chgcar_ref=None,
user_input_settings=None,
write_cml=False,
write_json=True,
zpsp=None,
):
"""
Run Critic2 in automatic mode on a supplied structure, charge
density (chgcar) and reference charge density (chgcar_ref).
The reason for a separate reference field is that in
VASP, the CHGCAR charge density only contains valence
electrons and may be missing substantial charge at
nuclei leading to misleading results. Thus, a reference
field is commonly constructed from the sum of AECCAR0
and AECCAR2 which is the total charge density, but then
the valence charge density is used for the final analysis.
If chgcar_ref is not supplied, chgcar will be used as the
reference field. If chgcar is not supplied, the promolecular
charge density will be used as the reference field -- this can
often still give useful results if only topological information
is wanted.
User settings is a dictionary that can contain:
* GRADEPS, float (field units), gradient norm threshold
* CPEPS, float (Bohr units in crystals), minimum distance between
critical points for them to be equivalent
* NUCEPS, same as CPEPS but specifically for nucleus critical
points (critic2 default is depedent on grid dimensions)
* NUCEPSH, same as NUCEPS but specifically for hydrogen nuclei
since associated charge density can be significantly displaced
from hydrogen nucleus
* EPSDEGEN, float (field units), discard critical point if any
element of the diagonal of the Hessian is below this value,
useful for discarding points in vacuum regions
* DISCARD, float (field units), discard critical points with field
value below this value, useful for discarding points in vacuum
regions
* SEED, list of strings, strategies for seeding points, default
is ['WS 1', 'PAIR 10'] which seeds critical points by
sub-dividing the Wigner-Seitz cell and between every atom pair
closer than 10 Bohr, see critic2 manual for more options
:param structure: Structure to analyze
:param chgcar: Charge density to use for analysis. If None, will
use promolecular density. Should be a Chgcar object or path (string).
:param chgcar_ref: Reference charge density. If None, will use
chgcar as reference. Should be a Chgcar object or path (string).
:param user_input_settings (dict): as explained above
:param write_cml (bool): Useful for debug, if True will write all
critical points to a file 'table.cml' in the working directory
useful for visualization
:param write_json (bool): Whether to write out critical points
and YT json. YT integration will be performed with this setting.
:param zpsp (dict): Dict of element/symbol name to number of electrons
(ZVAL in VASP pseudopotential), with which to properly augment core regions
and calculate charge transfer. Optional.
"""
settings = {"CPEPS": 0.1, "SEED": ["WS", "PAIR DIST 10"]}
if user_input_settings:
settings.update(user_input_settings)
# Load crystal structure
input_script = ["crystal POSCAR"]
# Load data to use as reference field
if chgcar_ref:
input_script += ["load ref.CHGCAR id chg_ref", "reference chg_ref"]
# Load data to use for analysis
if chgcar:
input_script += ["load int.CHGCAR id chg_int", "integrable chg_int"]
if zpsp:
zpsp_str = " zpsp " + " ".join(
["{} {}".format(symbol, int(zval)) for symbol, zval in zpsp.items()]
)
input_script[-2] += zpsp_str
# Command to run automatic analysis
auto = "auto "
for k, v in settings.items():
if isinstance(v, list):
for item in v:
auto += "{} {} ".format(k, item)
else:
auto += "{} {} ".format(k, v)
input_script += [auto]
if write_cml:
input_script += ["cpreport ../table.cml cell border graph"]
if write_json:
input_script += ["cpreport cpreport.json"]
if write_json and chgcar:
# requires gridded data to work
input_script += ["yt"]
input_script += ["yt JSON yt.json"]
input_script = "\n".join(input_script)
# store if examining the input script is useful,
# not otherwise used
self._input_script = input_script
with ScratchDir(".") as temp_dir:
os.chdir(temp_dir)
with open("input_script.cri", "w") as f:
f.write(input_script)
structure.to(filename="POSCAR")
if chgcar and isinstance(chgcar, VolumetricData):
chgcar.write_file("int.CHGCAR")
elif chgcar:
os.symlink(chgcar, "int.CHGCAR")
if chgcar_ref and isinstance(chgcar_ref, VolumetricData):
chgcar_ref.write_file("ref.CHGCAR")
elif chgcar_ref:
os.symlink(chgcar_ref, "ref.CHGCAR")
args = ["critic2", "input_script.cri"]
rs = subprocess.Popen(
args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, close_fds=True
)
stdout, stderr = rs.communicate()
stdout = stdout.decode()
if stderr:
stderr = stderr.decode()
warnings.warn(stderr)
if rs.returncode != 0:
raise RuntimeError(
"critic2 exited with return code {}: {}".format(
rs.returncode, stdout
)
)
self._stdout = stdout
self._stderr = stderr
if os.path.exists("cpreport.json"):
cpreport = loadfn("cpreport.json")
else:
cpreport = None
if os.path.exists("yt.json"):
yt = loadfn("yt.json")
else:
yt = None
self.output = Critic2Analysis(
structure,
stdout=stdout,
stderr=stderr,
cpreport=cpreport,
yt=yt,
zpsp=zpsp,
)
@classmethod
def from_path(cls, path, suffix="", zpsp=None):
"""
Convenience method to run critic2 analysis on a folder containing
typical VASP output files.
This method will:
1. Look for files CHGCAR, AECAR0, AECAR2, POTCAR or their gzipped
counterparts.
2. If AECCAR* files are present, constructs a temporary reference
file as AECCAR0 + AECCAR2.
3. Runs critic2 analysis twice: once for charge, and a second time
for the charge difference (magnetization density).
:param path: path to folder to search in
:param suffix: specific suffix to look for (e.g. '.relax1' for
'CHGCAR.relax1.gz')
:param zpsp: manually specify ZPSP if POTCAR not present
:return:
"""
chgcar_path = get_filepath("CHGCAR", "Could not find CHGCAR!", path, suffix)
chgcar = Chgcar.from_file(chgcar_path)
chgcar_ref = None
if not zpsp:
potcar_path = get_filepath(
"POTCAR",
"Could not find POTCAR, will not be able to calculate charge transfer.",
path,
suffix,
)
if potcar_path:
potcar = Potcar.from_file(potcar_path)
zpsp = {p.element: p.zval for p in potcar}
if not zpsp:
# try and get reference "all-electron-like" charge density if zpsp not present
aeccar0_path = get_filepath(
"AECCAR0",
"Could not find AECCAR0, interpret Bader results with caution.",
path,
suffix,
)
aeccar0 = Chgcar.from_file(aeccar0_path) if aeccar0_path else None
aeccar2_path = get_filepath(
"AECCAR2",
"Could not find AECCAR2, interpret Bader results with caution.",
path,
suffix,
)
aeccar2 = Chgcar.from_file(aeccar2_path) if aeccar2_path else None
chgcar_ref = aeccar0.linear_add(aeccar2) if (aeccar0 and aeccar2) else None
return cls(chgcar.structure, chgcar, chgcar_ref, zpsp=zpsp)
class CriticalPointType(Enum):
"""
Enum type for the different varieties of critical point.
"""
nucleus = "nucleus" # (3, -3)
bond = "bond" # (3, -1)
ring = "ring" # (3, 1)
cage = "cage" # (3, 3)
nnattr = "nnattr" # (3, -3), non-nuclear attractor
class CriticalPoint(MSONable):
"""
Access information about a critical point and the field values at that point.
"""
def __init__(
self,
index,
type,
frac_coords,
point_group,
multiplicity,
field,
field_gradient,
coords=None,
field_hessian=None,
):
"""
Class to characterise a critical point from a topological
analysis of electron charge density.
Note this class is usually associated with a Structure, so
has information on multiplicity/point group symmetry.
:param index: index of point
:param type: type of point, given as a string
:param coords: Cartesian co-ordinates in Angstroms
:param frac_coords: fractional co-ordinates
:param point_group: point group associated with critical point
:param multiplicity: number of equivalent critical points
:param field: value of field at point (f)
:param field_gradient: gradient of field at point (grad f)
:param field_hessian: hessian of field at point (del^2 f)
"""
self.index = index
self._type = type
self.coords = coords
self.frac_coords = frac_coords
self.point_group = point_group
self.multiplicity = multiplicity
self.field = field
self.field_gradient = field_gradient
self.field_hessian = field_hessian
@property
def type(self):
"""
Returns: Instance of CriticalPointType
"""
return CriticalPointType(self._type)
def __str__(self):
return "Critical Point: {} ({})".format(self.type.name, self.frac_coords)
@property
def laplacian(self):
"""
Returns: The Laplacian of the field at the critical point
"""
return np.trace(self.field_hessian)
@property
def ellipticity(self):
"""
Most meaningful for bond critical points,
can be physically interpreted as e.g. degree
of pi-bonding in organic molecules. Consult
literature for more information.
Returns: The ellpiticity of the field at the critical point
"""
eig, _ = np.linalg.eig(self.field_hessian)
eig.sort()
return eig[0] / eig[1] - 1
class Critic2Analysis(MSONable):
"""
Class to process the standard output from critic2 into pymatgen-compatible objects.
"""
def __init__(
self, structure, stdout=None, stderr=None, cpreport=None, yt=None, zpsp=None
):
"""
This class is used to store results from the Critic2Caller.
To explore the bond graph, use the "structure_graph"
method, which returns a user-friendly StructureGraph
class with bonding information. By default, this returns
a StructureGraph with edge weights as bond lengths, but
can optionally return a graph with edge weights as any
property supported by the `CriticalPoint` class, such as
bond ellipticity.
This class also provides an interface to explore just the
non-symmetrically-equivalent critical points via the
`critical_points` attribute, and also all critical
points (via nodes dict) and connections between them
(via edges dict). The user should be familiar with critic2
before trying to understand these.
Indexes of nucleus critical points in the nodes dict are the
same as the corresponding sites in structure, with indices of
other critical points arbitrarily assigned.
Only one of (stdout, cpreport) required, with cpreport preferred
since this is a new, native JSON output from critic2.
:param structure: associated Structure
:param stdout: stdout from running critic2 in automatic
mode
:param stderr: stderr from running critic2 in automatic
mode
:param cpreport: json output from CPREPORT command
:param yt: json output from YT command
:param zpsp (dict): Dict of element/symbol name to number of electrons
(ZVAL in VASP pseudopotential), with which to calculate charge transfer.
Optional.
"""
self.structure = structure
self._stdout = stdout
self._stderr = stderr
self._cpreport = cpreport
self._yt = yt
self._zpsp = zpsp
self.nodes = {}
self.edges = {}
if yt:
self.structure = self._annotate_structure_with_yt(yt, structure, zpsp)
if cpreport:
self._parse_cpreport(cpreport)
elif stdout:
self._parse_stdout(stdout)
else:
raise ValueError("One of cpreport or stdout required.")
self._remap_indices()
def structure_graph(self, include_critical_points=("bond", "ring", "cage")):
"""
A StructureGraph object describing bonding information
in the crystal.
Args:
include_critical_points: add DummySpecies for
the critical points themselves, a list of
"nucleus", "bond", "ring", "cage", set to None
to disable
Returns: a StructureGraph
"""
structure = self.structure.copy()
point_idx_to_struct_idx = {}
if include_critical_points:
# atoms themselves don't have field information
# so set to 0
for prop in ("ellipticity", "laplacian", "field"):
structure.add_site_property(prop, [0] * len(structure))
for idx, node in self.nodes.items():
cp = self.critical_points[node["unique_idx"]]
if cp.type.value in include_critical_points:
specie = DummySpecies(
"X{}cp".format(cp.type.value[0]), oxidation_state=None
)
structure.append(
specie,
node["frac_coords"],
properties={
"ellipticity": cp.ellipticity,
"laplacian": cp.laplacian,
"field": cp.field,
},
)
point_idx_to_struct_idx[idx] = len(structure) - 1
edge_weight = "bond_length"
edge_weight_units = "Å"
sg = StructureGraph.with_empty_graph(
structure,
name="bonds",
edge_weight_name=edge_weight,
edge_weight_units=edge_weight_units,
)
edges = self.edges.copy()
idx_to_delete = []
# check for duplicate bonds
for idx, edge in edges.items():
unique_idx = self.nodes[idx]["unique_idx"]
# only check edges representing bonds, not rings
if self.critical_points[unique_idx].type == CriticalPointType.bond:
if idx not in idx_to_delete:
for idx2, edge2 in edges.items():
if idx != idx2 and edge == edge2:
idx_to_delete.append(idx2)
warnings.warn(
"Duplicate edge detected, try re-running "
"critic2 with custom parameters to fix this. "
"Mostly harmless unless user is also "
"interested in rings/cages."
)
logger.debug(
"Duplicate edge between points {} (unique point {})"
"and {} ({}).".format(
idx,
self.nodes[idx]["unique_idx"],
idx2,
self.nodes[idx2]["unique_idx"],
)
)
# and remove any duplicate bonds present
for idx in idx_to_delete:
del edges[idx]
for idx, edge in edges.items():
unique_idx = self.nodes[idx]["unique_idx"]
# only add edges representing bonds, not rings
if self.critical_points[unique_idx].type == CriticalPointType.bond:
from_idx = edge["from_idx"]
to_idx = edge["to_idx"]
# have to also check bond is between nuclei if non-nuclear
# attractors not in structure
skip_bond = False
if include_critical_points and "nnattr" not in include_critical_points:
from_type = self.critical_points[
self.nodes[from_idx]["unique_idx"]
].type
to_type = self.critical_points[
self.nodes[from_idx]["unique_idx"]
].type
skip_bond = (from_type != CriticalPointType.nucleus) or (
to_type != CriticalPointType.nucleus
)
if not skip_bond:
from_lvec = edge["from_lvec"]
to_lvec = edge["to_lvec"]
relative_lvec = np.subtract(to_lvec, from_lvec)
# for edge case of including nnattrs in bonding graph when other critical
# points also included, indices may get mixed
struct_from_idx = point_idx_to_struct_idx.get(from_idx, from_idx)
struct_to_idx = point_idx_to_struct_idx.get(to_idx, to_idx)
weight = self.structure.get_distance(
struct_from_idx, struct_to_idx, jimage=relative_lvec
)
crit_point = self.critical_points[unique_idx]
edge_properties = {
"field": crit_point.field,
"laplacian": crit_point.laplacian,
"ellipticity": crit_point.ellipticity,
"frac_coords": self.nodes[idx]["frac_coords"],
}
sg.add_edge(
struct_from_idx,
struct_to_idx,
from_jimage=from_lvec,
to_jimage=to_lvec,
weight=weight,
edge_properties=edge_properties,
)
return sg
def get_critical_point_for_site(self, n):
"""
Args:
n: Site index n
Returns: A CriticalPoint instance
"""
return self.critical_points[self.nodes[n]["unique_idx"]]
def get_volume_and_charge_for_site(self, n):
"""
Args:
n: Site index n
Returns: A dict containing "volume" and "charge" keys,
or None if YT integration not performed
"""
# pylint: disable=E1101
if not self._node_values:
return None
return self._node_values[n]
def _parse_cpreport(self, cpreport):
def get_type(signature: int, is_nucleus: bool):
if signature == 3:
return "cage"
if signature == 1:
return "ring"
if signature == -1:
return "bond"
if signature == -3:
if is_nucleus:
return "nucleus"
return "nnattr"
return None
bohr_to_angstrom = 0.529177
self.critical_points = [
CriticalPoint(
p["id"] - 1,
get_type(p["signature"], p["is_nucleus"]),
p["fractional_coordinates"],
p["point_group"],
p["multiplicity"],
p["field"],
p["gradient"],
coords=[x * bohr_to_angstrom for x in p["cartesian_coordinates"]]
if cpreport["units"] == "bohr"
else None,
field_hessian=p["hessian"],
)
for p in cpreport["critical_points"]["nonequivalent_cps"]
]
for idx, p in enumerate(cpreport["critical_points"]["cell_cps"]):
self._add_node(
idx=p["id"] - 1,
unique_idx=p["nonequivalent_id"] - 1,
frac_coords=p["fractional_coordinates"],
)
if "attractors" in p:
self._add_edge(
idx=p["id"] - 1,
from_idx=int(p["attractors"][0]["cell_id"]) - 1,
from_lvec=p["attractors"][0]["lvec"],
to_idx=int(p["attractors"][1]["cell_id"]) - 1,
to_lvec=p["attractors"][1]["lvec"],
)
def _remap_indices(self):
"""
Re-maps indices on self.nodes and self.edges such that node indices match
that of structure, and then sorts self.nodes by index.
"""
# Order of nuclei provided by critic2 doesn't
# necessarily match order of sites in Structure.
# This is because critic2 performs a symmetrization step.
# We perform a mapping from one to the other,
# and re-index all nodes accordingly.
node_mapping = {} # critic2_index:structure_index
# ensure frac coords are in [0,1] range
frac_coords = np.array(self.structure.frac_coords) % 1
kd = KDTree(frac_coords)
node_mapping = {}
for idx, node in self.nodes.items():
if (
self.critical_points[node["unique_idx"]].type
== CriticalPointType.nucleus
):
node_mapping[idx] = kd.query(node["frac_coords"])[1]
if len(node_mapping) != len(self.structure):
warnings.warn(
"Check that all sites in input structure ({}) have "
"been detected by critic2 ({}).".format(
len(self.structure), len(node_mapping)
)
)
self.nodes = {
node_mapping.get(idx, idx): node for idx, node in self.nodes.items()
}
for edge in self.edges.values():
edge["from_idx"] = node_mapping.get(edge["from_idx"], edge["from_idx"])
edge["to_idx"] = node_mapping.get(edge["to_idx"], edge["to_idx"])
@staticmethod
def _annotate_structure_with_yt(yt, structure, zpsp):
volume_idx = None
charge_idx = None
for prop in yt["integration"]["properties"]:
if prop["label"] == "Volume":
volume_idx = prop["id"] - 1 # 1-indexed, change to 0
elif prop["label"] == "$chg_int":
charge_idx = prop["id"] - 1
def get_volume_and_charge(nonequiv_idx):
attractor = yt["integration"]["attractors"][nonequiv_idx - 1]
if attractor["id"] != nonequiv_idx:
raise ValueError(
"List of attractors may be un-ordered (wanted id={}): {}".format(
nonequiv_idx, attractor
)
)
return (
attractor["integrals"][volume_idx],
attractor["integrals"][charge_idx],
)
volumes = []
charges = []
charge_transfer = []
for idx, site in enumerate(yt["structure"]["cell_atoms"]):
if not np.allclose(
structure[idx].frac_coords, site["fractional_coordinates"]
):
raise IndexError(
"Site in structure doesn't seem to match site in YT integration:\n{}\n{}".format(
structure[idx], site
)
)
volume, charge = get_volume_and_charge(site["nonequivalent_id"])
volumes.append(volume)
charges.append(charge)
if zpsp:
if structure[idx].species_string in zpsp:
charge_transfer.append(charge - zpsp[structure[idx].species_string])
else:
raise ValueError(
"ZPSP argument does not seem compatible with species in structure ({}): {}".format(
structure[idx].species_string, zpsp
)
)
structure = structure.copy()
structure.add_site_property("bader_volume", volumes)
structure.add_site_property("bader_charge", charges)
if zpsp:
if len(charge_transfer) != len(charges):
warnings.warn(
"Something went wrong calculating charge transfer: {}".format(
charge_transfer
)
)
else:
structure.add_site_property("bader_charge_transfer", charge_transfer)
return structure
def _parse_stdout(self, stdout):
warnings.warn(
"Parsing critic2 standard output is deprecated and will not be maintained, "
"please use the native JSON output in future."
)
stdout = stdout.split("\n")
# NOTE WE ARE USING 0-BASED INDEXING:
# This is different from critic2 which
# uses 1-based indexing, so all parsed
# indices have 1 subtracted.
# Parsing happens in two stages:
# 1. We construct a list of unique critical points
# (i.e. non-equivalent by the symmetry of the crystal)
# and the properties of the field at those points
# 2. We construct a list of nodes and edges describing
# all critical points in the crystal
# Steps 1. and 2. are essentially independent, except
# that the critical points in 2. have a pointer to their
# associated unique critical point in 1. so that more
# information on that point can be retrieved if necessary.
unique_critical_points = []
# parse unique critical points
for i, line in enumerate(stdout):
if "mult name f |grad| lap" in line:
start_i = i + 1
elif "* Analysis of system bonds" in line:
end_i = i - 2
# if start_i and end_i haven't been found, we
# need to re-evaluate assumptions in this parser!
for i, line in enumerate(stdout):
if start_i <= i <= end_i:
l = line.replace("(", "").replace(")", "").split()
unique_idx = int(l[0]) - 1
point_group = l[1]
# type = l[2] # type from definition of critical point e.g. (3, -3)
critical_point_type = l[3] # type from name, e.g. nucleus
frac_coords = [float(l[4]), float(l[5]), float(l[6])]
multiplicity = float(l[7])
# name = float(l[8])
field = float(l[9])
field_gradient = float(l[10])
# laplacian = float(l[11])
point = CriticalPoint(
unique_idx,
critical_point_type,
frac_coords,
point_group,
multiplicity,
field,
field_gradient,
)
unique_critical_points.append(point)
for i, line in enumerate(stdout):
if "+ Critical point no." in line:
unique_idx = int(line.split()[4]) - 1
elif "Hessian:" in line:
l1 = list(map(float, stdout[i + 1].split()))
l2 = list(map(float, stdout[i + 2].split()))
l3 = list(map(float, stdout[i + 3].split()))
hessian = [
[l1[0], l1[1], l1[2]],
[l2[0], l2[1], l2[2]],
[l3[0], l3[1], l3[2]],
]
unique_critical_points[unique_idx].field_hessian = hessian
self.critical_points = unique_critical_points
# parse graph connecting critical points
for i, line in enumerate(stdout):
if "#cp ncp typ position " in line:
start_i = i + 1
elif "* Attractor connectivity matrix" in line:
end_i = i - 2
# if start_i and end_i haven't been found, we
# need to re-evaluate assumptions in this parser!
for i, line in enumerate(stdout):
if start_i <= i <= end_i:
l = line.replace("(", "").replace(")", "").split()
idx = int(l[0]) - 1
unique_idx = int(l[1]) - 1
frac_coords = [float(l[3]), float(l[4]), float(l[5])]
self._add_node(idx, unique_idx, frac_coords)
if len(l) > 6:
from_idx = int(l[6]) - 1
to_idx = int(l[10]) - 1
self._add_edge(
idx,
from_idx=from_idx,
from_lvec=(int(l[7]), int(l[8]), int(l[9])),
to_idx=to_idx,
to_lvec=(int(l[11]), int(l[12]), int(l[13])),
)
def _add_node(self, idx, unique_idx, frac_coords):
"""
Add information about a node describing a critical point.
:param idx: index
:param unique_idx: index of unique CriticalPoint,
used to look up more information of point (field etc.)
:param frac_coord: fractional co-ordinates of point
:return:
"""
self.nodes[idx] = {"unique_idx": unique_idx, "frac_coords": frac_coords}
def _add_edge(self, idx, from_idx, from_lvec, to_idx, to_lvec):
"""
Add information about an edge linking two critical points.
This actually describes two edges:
from_idx ------ idx ------ to_idx
However, in practice, from_idx and to_idx will typically be
atom nuclei, with the center node (idx) referring to a bond
critical point. Thus, it will be more convenient to model
this as a single edge linking nuclei with the properties
of the bond critical point stored as an edge attribute.
:param idx: index of node
:param from_idx: from index of node
:param from_lvec: vector of lattice image the from node is in
as tuple of ints
:param to_idx: to index of node
:param to_lvec: vector of lattice image the to node is in as
tuple of ints
:return:
"""
self.edges[idx] = {
"from_idx": from_idx,
"from_lvec": from_lvec,
"to_idx": to_idx,
"to_lvec": to_lvec,
}
| 37.037756
| 107
| 0.56096
|
49191d6d875773ee488e9326de578065d9ba8c20
| 2,042
|
py
|
Python
|
invocations/console.py
|
daobook/invocations
|
9d64e17a7446713c045646ef28ef8562dd05aea1
|
[
"BSD-2-Clause"
] | 114
|
2015-01-07T18:03:03.000Z
|
2022-03-29T02:57:04.000Z
|
invocations/console.py
|
daobook/invocations
|
9d64e17a7446713c045646ef28ef8562dd05aea1
|
[
"BSD-2-Clause"
] | 20
|
2015-02-01T21:52:10.000Z
|
2022-03-27T20:11:28.000Z
|
invocations/console.py
|
daobook/invocations
|
9d64e17a7446713c045646ef28ef8562dd05aea1
|
[
"BSD-2-Clause"
] | 23
|
2015-09-21T00:03:20.000Z
|
2022-01-27T08:14:40.000Z
|
"""
Text console UI helpers and patterns, e.g. 'Y/n' prompts and the like.
"""
from __future__ import unicode_literals, print_function
import sys
from invoke.vendor.six.moves import input
# NOTE: originally cribbed from fab 1's contrib.console.confirm
def confirm(question, assume_yes=True):
"""
Ask user a yes/no question and return their response as a boolean.
``question`` should be a simple, grammatically complete question such as
"Do you wish to continue?", and will have a string similar to ``" [Y/n] "``
appended automatically. This function will *not* append a question mark for
you.
By default, when the user presses Enter without typing anything, "yes" is
assumed. This can be changed by specifying ``assume_yes=False``.
.. note::
If the user does not supply input that is (case-insensitively) equal to
"y", "yes", "n" or "no", they will be re-prompted until they do.
:param str question: The question part of the prompt.
:param bool assume_yes:
Whether to assume the affirmative answer by default. Default value:
``True``.
:returns: A `bool`.
"""
# Set up suffix
if assume_yes:
suffix = "Y/n"
else:
suffix = "y/N"
# Loop till we get something we like
# TODO: maybe don't do this? It can be annoying. Turn into 'q'-for-quit?
while True:
# TODO: ensure that this is Ctrl-C friendly, ISTR issues with
# raw_input/input on some Python versions blocking KeyboardInterrupt.
response = input("{} [{}] ".format(question, suffix))
response = response.lower().strip() # Normalize
# Default
if not response:
return assume_yes
# Yes
if response in ["y", "yes"]:
return True
# No
if response in ["n", "no"]:
return False
# Didn't get empty, yes or no, so complain and loop
err = "I didn't understand you. Please specify '(y)es' or '(n)o'."
print(err, file=sys.stderr)
| 34.033333
| 79
| 0.632713
|
9d27ecb74e8fff731beb1499bdc401aec9b17030
| 4,471
|
py
|
Python
|
server/image_browse_resource.py
|
HailLab/HistomicsTK
|
83af8312851c2719fbf475635ad494ee5f885582
|
[
"Apache-2.0"
] | null | null | null |
server/image_browse_resource.py
|
HailLab/HistomicsTK
|
83af8312851c2719fbf475635ad494ee5f885582
|
[
"Apache-2.0"
] | null | null | null |
server/image_browse_resource.py
|
HailLab/HistomicsTK
|
83af8312851c2719fbf475635ad494ee5f885582
|
[
"Apache-2.0"
] | null | null | null |
from girder.api import access
from girder.api.v1.item import Item as ItemResource
from girder.api.describe import autoDescribeRoute, Description
from girder.constants import AccessType
from girder.exceptions import RestException
from girder.models.folder import Folder
import random
def _isLargeImageItem(item):
return item.get('largeImage', {}).get('fileId') is not None
class ImageBrowseResource(ItemResource):
"""Extends the "item" resource to iterate through images im a folder."""
def __init__(self, apiRoot):
# Don't call the parent (Item) constructor, to avoid redefining routes,
# but do call the grandparent (Resource) constructor
super(ItemResource, self).__init__()
self.resourceName = 'item'
apiRoot.item.route('GET', (':id', 'next_image'), self.getNextImage)
apiRoot.item.route('GET', (':id', 'previous_image'), self.getPreviousImage)
def getAdjacentImages(self, currentImage, currentFolder=None):
user = self.getCurrentUser()
groups = [str(g) for g in user.get('groups', [])]
expert_group = '5e3102c0e3c0d89a0744bf50'
folderModel = Folder()
if currentFolder:
folder = currentFolder
else:
folder = folderModel.load(
currentImage['folderId'], user=user, level=AccessType.READ)
if folder.get('isVirtual'):
children = folderModel.childItems(folder, includeVirtual=True)
else:
children = folderModel.childItems(folder)
allImages = [item for item in children if _isLargeImageItem(item)]
if expert_group not in groups:
random.seed(user.get('_id'))
random.shuffle(allImages)
try:
index = allImages.index(currentImage)
except ValueError:
raise RestException('Id is not an image', 404)
if index >= len(allImages) - 1 and str(folder['_id']) == '5f0dc45cc9f8c18253ae949b':
user_email = user.get('email', 'unknown_user').lower()
nextImage = {u'size': 3016797, u'_id': u'https://redcap.vanderbilt.edu/surveys/?s=HH3D3PMNM8&skin_email=' + user_email, u'description': u'', u'baseParentType': u'collection', u'baseParentId': u'5e4719631c7080564deb44e5', u'creatorId': u'5e2f35c7e7a8d01deb3964f3', u'folderId': u'5f0dc45cc9f8c18253ae949b', u'lowerName': u'survey.jpg', u'name': u'survey.JPG'}
elif index >= len(allImages) - 1 and str(folder['_id']) == '5f0dc449c9f8c18253ae949a':
user_email = user.get('email', 'unknown_user').lower()
nextImage = {u'size': 3016797, u'_id': u'https://redcap.vanderbilt.edu/surveys/?s=RARCDR4N443KDYHR&user_id=' + user_email, u'description': u'', u'baseParentType': u'collection', u'baseParentId': u'5e4719631c7080564deb44e5', u'creatorId': u'5e2f35c7e7a8d01deb3964f3', u'folderId': u'5f0dc449c9f8c18253ae949a', u'lowerName': u'survey.jpg', u'name': u'survey.JPG'}
else:
nextImage = allImages[(index + 1) % len(allImages)]
return {
'previous': allImages[index - 1],
'next': nextImage
}
@access.public
@autoDescribeRoute(
Description('Get the next image in the same folder as the given item.')
.modelParam('id', 'The current image ID',
model='item', destName='image', paramType='path', level=AccessType.READ)
.modelParam('folderId', 'The (virtual) folder ID the image is located in',
model='folder', destName='folder', paramType='query', level=AccessType.READ,
required=False)
.errorResponse()
.errorResponse('Image not found', code=404)
)
def getNextImage(self, image, folder):
return self.getAdjacentImages(image, folder)['next']
@access.public
@autoDescribeRoute(
Description('Get the previous image in the same folder as the given item.')
.modelParam('id', 'The current item ID',
model='item', destName='image', paramType='path', level=AccessType.READ)
.modelParam('folderId', 'The (virtual) folder ID the image is located in',
model='folder', destName='folder', paramType='query', level=AccessType.READ,
required=False)
.errorResponse()
.errorResponse('Image not found', code=404)
)
def getPreviousImage(self, image, folder):
return self.getAdjacentImages(image, folder)['previous']
| 49.131868
| 373
| 0.648848
|
1b806a393b639ef1888583aaf3a7eb9e51071239
| 885
|
py
|
Python
|
setup.py
|
roansong/osu-replay-parser
|
70a206622b51bb8443d423f6da671bb005cb32f7
|
[
"MIT"
] | 1
|
2019-12-08T07:22:56.000Z
|
2019-12-08T07:22:56.000Z
|
setup.py
|
roansong/osu-replay-parser
|
70a206622b51bb8443d423f6da671bb005cb32f7
|
[
"MIT"
] | null | null | null |
setup.py
|
roansong/osu-replay-parser
|
70a206622b51bb8443d423f6da671bb005cb32f7
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
from setuptools import find_packages
version = '3.0.0'
setup(
name = 'osrparse',
version = version,
description = "Python implementation of osu! rhythm game replay parser.",
classifiers = [
'Topic :: Games/Entertainment',
'Topic :: Software Development :: Libraries',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
],
keywords = ['osu!, osr, replay, replays, parsing, parser, python'],
author = 'Kevin Lim',
author_email = 'kszlim@gmail.com',
url = 'https://github.com/kszlim/osu-replay-parser',
download_url = 'https://github.com/kszlim/osu-replay-parser/tarball/' + version,
license = 'MIT',
test_suite="tests",
packages = find_packages()
)
| 32.777778
| 84
| 0.639548
|
dcc7b4f726e1d0b585747e0aa2c9cd7d082bef4f
| 2,603
|
py
|
Python
|
Chapter07/code/csv_loader.py
|
bdonkey/Amazon-SageMaker-Best-Practices
|
7d1afe63c03e73b00b9d332026b81a9cdd5075e6
|
[
"MIT"
] | 11
|
2021-03-22T23:37:39.000Z
|
2022-02-02T07:37:46.000Z
|
Chapter07/code/csv_loader.py
|
bdonkey/Amazon-SageMaker-Best-Practices
|
7d1afe63c03e73b00b9d332026b81a9cdd5075e6
|
[
"MIT"
] | null | null | null |
Chapter07/code/csv_loader.py
|
bdonkey/Amazon-SageMaker-Best-Practices
|
7d1afe63c03e73b00b9d332026b81a9cdd5075e6
|
[
"MIT"
] | 6
|
2021-12-17T03:00:59.000Z
|
2022-03-16T07:52:13.000Z
|
import os
from torch.utils.data import Dataset
import glob
import torch
import sys
import logging
import collections
import bisect
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stdout))
class CsvDataset(Dataset):
def __init__(self, csv_path):
self.csv_path = csv_path
if os.path.isfile(csv_path):
self.folder = False
self.count, fmap, self.line_offset = self.get_line_count(csv_path)
logger.debug(f"For {csv_path}, count = {self.count}")
else:
self.folder = True
self.count, fmap, self.line_offset = self.get_folder_line_count(csv_path)
self.fmap = collections.OrderedDict(sorted(fmap.items()))
def get_folder_line_count(self, d):
cnt = 0
all_map = {}
all_lc = {}
for f in glob.glob(os.path.join(d, '*.csv')):
fcnt, _, line_offset = self.get_line_count(f)
cnt = cnt + fcnt
all_map[cnt] = f
all_lc.update(line_offset)
return cnt, all_map, all_lc
def get_line_count(self, f):
with open(f) as F:
line_offset = []
offset = 0
count = 0
for line in F:
line_offset.append(offset)
offset += len(line)
count = count + 1
return count, {count: f}, {f: line_offset}
def __len__(self):
return self.count
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
logger.debug(f"Indices: {idx}")
# This gives us the index in the line counts greater than or equal to the desired index.
# The map value for this line count is the file name containing that row.
klist = list(self.fmap.keys())
idx_m = bisect.bisect_left(klist, idx+1)
# Grab the ending count of thisl file
cur_idx = klist[idx_m]
# grab the ending count of the previous file
if idx_m > 0:
prev_idx = klist[idx_m-1]
else:
prev_idx = 0
# grab the file name for the desired row count
fname = self.fmap[cur_idx]
loff = self.line_offset[fname]
with open(fname) as F:
F.seek(loff[idx - prev_idx])
idx_line = F.readline()
idx_parts = idx_line.split(',')
return tuple([torch.tensor( [float(f) for f in idx_parts[1:]] ), torch.tensor(float(idx_parts[0]))])
| 30.623529
| 108
| 0.56627
|
756c73c66617bb16f49291d11916d3b63db280ec
| 4,174
|
py
|
Python
|
src/modules/minecraft/stats/bedwars_stats.py
|
teymour-aldridge/robo
|
e31fcf14120822ec5732c465f75728e1a02b79be
|
[
"MIT"
] | null | null | null |
src/modules/minecraft/stats/bedwars_stats.py
|
teymour-aldridge/robo
|
e31fcf14120822ec5732c465f75728e1a02b79be
|
[
"MIT"
] | null | null | null |
src/modules/minecraft/stats/bedwars_stats.py
|
teymour-aldridge/robo
|
e31fcf14120822ec5732c465f75728e1a02b79be
|
[
"MIT"
] | null | null | null |
import discord
from exceptions import StatsNotFoundError
hypixel_logo_url = "https://pbs.twimg.com/profile_images/1346968969849171970/DdNypQdN_400x400.png"
corresponding_gamemodes_and_gamecodes = {
"eight_one": "Solo", "eight_two": "Doubles",
"four_three": "Threes", "four_four": "Fours"
}
def embed_bedwars_stats(base_player_data, page_number) -> discord.Embed:
stats_embed = discord.Embed(
title=base_player_data["username"],
colour=discord.Colour.gold()
)
# get only the data i need from the dict with all the data
if page_number == 0:
stats_data, gamemode = get_overall_bedwars_stats(
base_player_data["gamemode_specific_data"], base_player_data["player_rank"])
else:
gamecode = page_number_to_gamecode(page_number)
stats_data, gamemode = get_mode_specific_bedwars_stats(
base_player_data["gamemode_specific_data"], base_player_data["player_rank"], gamecode)
stats_embed.set_author(icon_url=hypixel_logo_url,
name=f" {gamemode} bedwars stats")
stats_embed.set_thumbnail(url=base_player_data["user_avatar_url"])
stats_embed.set_footer(text=base_player_data["first_and_last_login"])
for stat in stats_data.keys():
stats_embed.add_field(
name=stat, value=f"`{stats_data[stat]}`", inline=True)
return stats_embed
def get_overall_bedwars_stats(data, player_rank) -> discord.Embed:
gamemode = "Overall"
try:
overall_stats_data = {
"Rank": "[{}]".format(player_rank), "Coins": data["coins"], "Winstreak": data["winstreak"],
"Wins": data["wins_bedwars"], "Losses": data["losses_bedwars"], "WLR": round(data["wins_bedwars"]/data["losses_bedwars"], 2),
"Kills": data["kills_bedwars"], "Deaths": data["deaths_bedwars"], "K/D": round(data["kills_bedwars"]/data["deaths_bedwars"], 2),
"Final Kills": data["final_kills_bedwars"], "Final Deaths": data["final_deaths_bedwars"], "FKDR": round(data["final_kills_bedwars"]/data["final_deaths_bedwars"], 2),
"Beds Broken": data["beds_broken_bedwars"], "Beds Lost": data["beds_lost_bedwars"], "BBBLR": round(data["beds_broken_bedwars"]/data["beds_lost_bedwars"], 2),
"Games Played": data["games_played_bedwars"], "Diamonds": data["diamond_resources_collected_bedwars"], "Emeralds": data["emerald_resources_collected_bedwars"]
}
except:
raise StatsNotFoundError
return overall_stats_data, gamemode
def get_mode_specific_bedwars_stats(data, player_rank, gamecode) -> discord.Embed:
gamemode = corresponding_gamemodes_and_gamecodes[gamecode]
try:
overall_stats_data = {
"Rank": "[{}]".format(player_rank), "Coins": data["coins"], "Winstreak": data[f"{gamecode}_winstreak"],
"Wins": data[f"{gamecode}_wins_bedwars"], "Losses": data[f"{gamecode}_losses_bedwars"], "WLR": round(data[f"{gamecode}_wins_bedwars"]/data[f"{gamecode}_losses_bedwars"], 2),
"Kills": data[f"{gamecode}_kills_bedwars"], "Deaths": data["deaths_bedwars"], "K/D": round(data[f"{gamecode}_kills_bedwars"]/data[f"{gamecode}_deaths_bedwars"], 2),
"Final Kills": data[f"{gamecode}_final_kills_bedwars"], "Final Deaths": data[f"{gamecode}_final_deaths_bedwars"], "FKDR": round(data[f"{gamecode}_final_kills_bedwars"]/data[f"{gamecode}_final_deaths_bedwars"], 2),
"Beds Broken": data[f"{gamecode}_beds_broken_bedwars"], "Beds Lost": data[f"{gamecode}_beds_lost_bedwars"], "BBBLR": round(data[f"{gamecode}_beds_broken_bedwars"]/data[f"{gamecode}_beds_lost_bedwars"], 2),
"Games Played": data[f"{gamecode}_games_played_bedwars"], "Diamonds": data[f"{gamecode}_diamond_resources_collected_bedwars"], "Emeralds": data[f"{gamecode}_emerald_resources_collected_bedwars"]
}
except:
raise StatsNotFoundError
return overall_stats_data, gamemode
def page_number_to_gamecode(page_number):
corresponding_page_numbers_and_gamecodes = {
1: "eight_one", 2: "eight_two",
3: "four_three", 4: "four_four"
}
return corresponding_page_numbers_and_gamecodes[page_number]
| 50.289157
| 225
| 0.69885
|
6a6a6f79d8067be0597c0dd0b317baa58e9ba76e
| 3,167
|
py
|
Python
|
prereise/gather/demanddata/eia/tests/test_map_ba.py
|
SEL-Columbia/PreREISE-building
|
527cc02e6867a879c7e68e8e3fc5dc843de20580
|
[
"MIT"
] | null | null | null |
prereise/gather/demanddata/eia/tests/test_map_ba.py
|
SEL-Columbia/PreREISE-building
|
527cc02e6867a879c7e68e8e3fc5dc843de20580
|
[
"MIT"
] | null | null | null |
prereise/gather/demanddata/eia/tests/test_map_ba.py
|
SEL-Columbia/PreREISE-building
|
527cc02e6867a879c7e68e8e3fc5dc843de20580
|
[
"MIT"
] | 1
|
2022-02-23T20:43:36.000Z
|
2022-02-23T20:43:36.000Z
|
import pandas as pd
import pytest
from pandas.testing import assert_series_equal
from prereise.gather.demanddata.eia.map_ba import (
aggregate_ba_demand,
get_demand_in_loadzone,
map_buses_to_ba,
map_buses_to_county,
)
def test_get_demand_in_loadzone_case():
bus_map, agg_demand = create_loadzone_dataframe()
zone_demand = get_demand_in_loadzone(agg_demand, bus_map)
assert zone_demand.values.T.tolist() == [
[(1 / 4) + (2 / 3)] * 3,
[(3 / 4) + (4 / 3) + 3] * 3,
]
def test_get_demand_in_loadzone_has_equal_total_demand():
bus_map, agg_demand = create_loadzone_dataframe()
zone_demand = get_demand_in_loadzone(agg_demand, bus_map)
assert_series_equal(
agg_demand.sum(axis=1), zone_demand.sum(axis=1), check_dtype=False
)
def test_aggregate_ba_demand_sums_first_three_columns():
initial_df = create_ba_to_region_dataframe()
mapping = {"ABC": ["A", "B", "C"]}
result = aggregate_ba_demand(initial_df, mapping)
assert result["ABC"].tolist() == list(range(30, 60, 3))
def test_aggregate_ba_demand_sums_first_columns_pairs():
initial_df = create_ba_to_region_dataframe()
mapping = {"AB": ["A", "B"], "CD": ["C", "D"]}
result = aggregate_ba_demand(initial_df, mapping)
assert result["AB"].tolist() == list(range(10, 30, 2))
assert result["CD"].tolist() == list(range(50, 70, 2))
def create_loadzone_dataframe():
bus_map_data = {
"BA": ["A", "A", "B", "A", "B", "C"],
"zone_name": ["X", "X", "X", "Y", "Y", "Y"],
"Pd": range(0, 6),
}
bus_map = pd.DataFrame.from_dict(bus_map_data)
agg_demand = pd.DataFrame({"A": [1] * 3, "B": [2] * 3, "C": [3] * 3})
agg_demand.set_index(
pd.date_range(start="2016-01-01", periods=3, freq="H"), inplace=True
)
return bus_map, agg_demand
def create_ba_to_region_dataframe():
start_data = {
"A": range(0, 10),
"B": range(10, 20),
"C": range(20, 30),
"D": range(30, 40),
"E": range(40, 50),
}
initial_df = pd.DataFrame.from_dict(start_data)
return initial_df
@pytest.mark.skip(reason="Currently failing due to issues with the geo.fcc.gov API")
def test_map_buses_to_county():
bus_df = pd.DataFrame(
{
"lat": [34.0522, 29.7604, 40.7128],
"lon": [-118.2437, -95.3698, -74.0060],
},
index=["CA", "Texas", "NY"],
)
expected_res = ["Los Angeles__CA", "Harris__TX", "New York__NY"]
bus_county, bus_no_county_match = map_buses_to_county(bus_df)
assert bus_county["County"].tolist() == expected_res
assert bus_no_county_match == []
@pytest.mark.skip(reason="Currently failing due to issues with the geo.fcc.gov API")
def test_map_buses_to_ba():
bus_df = pd.DataFrame(
{
"lat": [34.0522, 29.7604, 39.9042],
"lon": [-118.2437, -95.3698, 116.4074],
},
index=["CA", "Texas", "Beijing"],
)
expected_res = ["LDWP", "ERCOT", "LDWP"]
bus_ba, bus_no_county_match = map_buses_to_ba(bus_df)
assert bus_ba["BA"].tolist() == expected_res
assert bus_no_county_match == ["Beijing"]
| 31.989899
| 84
| 0.627092
|
0eb01e3bc8e15aa217f2ca780460dac03226eb88
| 4,142
|
py
|
Python
|
python3/koans/about_multiple_inheritance.py
|
bjmccotter7192/PythonKoan
|
92759c90824583e7bdd3355c9b5615a67dd69abf
|
[
"MIT"
] | null | null | null |
python3/koans/about_multiple_inheritance.py
|
bjmccotter7192/PythonKoan
|
92759c90824583e7bdd3355c9b5615a67dd69abf
|
[
"MIT"
] | null | null | null |
python3/koans/about_multiple_inheritance.py
|
bjmccotter7192/PythonKoan
|
92759c90824583e7bdd3355c9b5615a67dd69abf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Slightly based on AboutModules in the Ruby Koans
#
from runner.koan import *
class AboutMultipleInheritance(Koan):
class Nameable:
def __init__(self):
self._name = None
def set_name(self, new_name):
self._name = new_name
def here(self):
return "In Nameable class"
class Animal:
def legs(self):
return 4
def can_climb_walls(self):
return False
def here(self):
return "In Animal class"
class Pig(Animal):
def __init__(self):
super().__init__()
self._name = "Jasper"
@property
def name(self):
return self._name
def speak(self):
return "OINK"
def color(self):
return 'pink'
def here(self):
return "In Pig class"
class Spider(Animal):
def __init__(self):
super().__init__()
self._name = "Boris"
def can_climb_walls(self):
return True
def legs(self):
return 8
def color(self):
return 'black'
def here(self):
return "In Spider class"
class Spiderpig(Pig, Spider, Nameable):
def __init__(self):
super(AboutMultipleInheritance.Pig, self).__init__()
super(AboutMultipleInheritance.Nameable, self).__init__()
self._name = "Jeff"
def speak(self):
return "This looks like a job for Spiderpig!"
def here(self):
return "In Spiderpig class"
#
# Hierarchy:
# Animal
# / \
# Pig Spider Nameable
# \ | /
# Spiderpig
#
# ------------------------------------------------------------------
def test_normal_methods_are_available_in_the_object(self):
jeff = self.Spiderpig()
self.assertRegex(jeff.speak(), "This looks like a job for Spiderpig!")
def test_base_class_methods_are_also_available_in_the_object(self):
jeff = self.Spiderpig()
try:
jeff.set_name("Rover")
except:
self.fail("This should not happen")
self.assertEqual(True, jeff.can_climb_walls())
def test_base_class_methods_can_affect_instance_variables_in_the_object(self):
jeff = self.Spiderpig()
self.assertEqual("Jeff", jeff.name)
jeff.set_name("Rover")
self.assertEqual("Rover", jeff.name)
def test_left_hand_side_inheritance_tends_to_be_higher_priority(self):
jeff = self.Spiderpig()
self.assertEqual("pink", jeff.color())
def test_super_class_methods_are_higher_priority_than_super_super_classes(self):
jeff = self.Spiderpig()
self.assertEqual(8, jeff.legs())
def test_we_can_inspect_the_method_resolution_order(self):
#
# MRO = Method Resolution Order
#
mro = type(self.Spiderpig()).mro()
self.assertEqual('Spiderpig', mro[0].__name__)
self.assertEqual('Pig', mro[1].__name__)
self.assertEqual('Spider', mro[2].__name__)
self.assertEqual('Animal', mro[3].__name__)
self.assertEqual('Nameable', mro[4].__name__)
self.assertEqual('object', mro[5].__name__)
def test_confirm_the_mro_controls_the_calling_order(self):
jeff = self.Spiderpig()
self.assertRegex(jeff.here(), 'Spiderpig')
next = super(AboutMultipleInheritance.Spiderpig, jeff)
self.assertRegex(next.here(), 'Pig')
next = super(AboutMultipleInheritance.Pig, jeff)
self.assertRegex(next.here(), "In Spider class")
# Hang on a minute?!? That last class name might be a super class of
# the 'jeff' object, but its hardly a superclass of Pig, is it?
#
# To avoid confusion it may help to think of super() as next_mro().
| 29.375887
| 85
| 0.556012
|
5ca3f3d1c504cbdd4cb67084b046d7fe3c625bac
| 369
|
py
|
Python
|
dongguan/dongguan/items.py
|
GongkunJiang/MySpider
|
8c088f696679b13568843af521279f9f25f40314
|
[
"MIT"
] | null | null | null |
dongguan/dongguan/items.py
|
GongkunJiang/MySpider
|
8c088f696679b13568843af521279f9f25f40314
|
[
"MIT"
] | null | null | null |
dongguan/dongguan/items.py
|
GongkunJiang/MySpider
|
8c088f696679b13568843af521279f9f25f40314
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class DongguanItem(scrapy.Item):
# 每个帖子的标题
title = scrapy.Field()
# 每个帖子的编号
number = scrapy.Field()
# 每个帖子的文字内容
content = scrapy.Field()
# 每个帖子的url
url = scrapy.Field()
| 19.421053
| 52
| 0.653117
|
4c7076d82bed8f88ec8cb8464384343c63c64af4
| 5,336
|
py
|
Python
|
train/custom_train_2.py
|
bjw806/Crypto-Deep-Learning-test1
|
9a3dcdaa0e106f1d1a0d8425e864b8bf0007811d
|
[
"Apache-2.0"
] | null | null | null |
train/custom_train_2.py
|
bjw806/Crypto-Deep-Learning-test1
|
9a3dcdaa0e106f1d1a0d8425e864b8bf0007811d
|
[
"Apache-2.0"
] | null | null | null |
train/custom_train_2.py
|
bjw806/Crypto-Deep-Learning-test1
|
9a3dcdaa0e106f1d1a0d8425e864b8bf0007811d
|
[
"Apache-2.0"
] | 1
|
2022-02-19T08:53:32.000Z
|
2022-02-19T08:53:32.000Z
|
import os
import sys
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
#tf.debugging.set_log_device_placement(True)
epochs = 10
train_data_dir = './data/train/'
validation_data_dir = './data/validation/'
def tester():
img_width, img_height = 356, 295#489, 405 #최대 약 350x350 크기... vram 부족...
train_samples = 7836
validation_samples = 2000
filters1 = 64 #32
filters2 = 64 #32
filters3 = 128 #64
conv1_size = 5 #3
conv2_size = 3 #2
conv3_size = 7 #5
pool_size = 2
# We have 2 classes, long and short
classes_num = 2
batch_size = 32 # 128
#1
model = models.Sequential()
model.add(layers.Conv2D(filters1, (conv1_size, conv1_size), padding='same', input_shape=(img_height, img_width , 3)))
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(layers.Dropout(0.25))
#2
model.add(layers.Conv2D(filters2, (conv2_size, conv2_size), padding="same"))
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling2D(pool_size=(pool_size, pool_size),data_format='channels_last'))
model.add(layers.Dropout(0.25))
#5
model.add(layers.Conv2D(filters3, (conv3_size, conv3_size), padding='same'))
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling2D(pool_size=(pool_size, pool_size),data_format='channels_last'))
model.add(layers.Dropout(0.25))
#1024 neuron hidden layer
model.add(layers.Flatten())
model.add(layers.Dense(1024))
model.add(layers.Activation('relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(classes_num, activation='softmax'))#10
model.summary()
#모델 컴파일
#sgd = tf.keras.optimizers.SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
#model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
#model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss='categorical_crossentropy', metrics=["acc"])#accuracy
model.compile(optimizer=tf.keras.optimizers.RMSprop(), loss='categorical_crossentropy', metrics=['acc'])#lr=0.0001, decay=1e-6
#model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=0.001, decay=1e-6, momentum=0.9), loss='categorical_crossentropy', metrics=['acc'])#SGD
#model.compile(loss='categorical_crossentropy',optimizer=sgd, metrics=['acc'])
train_datagen = ImageDataGenerator(#rescale=1. / 255,
horizontal_flip=False)
test_datagen = ImageDataGenerator(#rescale=1. / 255,
horizontal_flip=False)
#validation_datagen = ImageDataGenerator(rescale=1. / 255, horizontal_flip=False)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height, img_width),
shuffle=True,
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
shuffle=True,
class_mode='categorical')
#체크포인트
metric = 'val_acc'
target_dir = "./models/weights-improvement/"
if not os.path.exists(target_dir):
os.mkdir(target_dir)
model.save('./models/model.h5')
model.save_weights('./models/weights.h5')
checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath= target_dir + 'weights-improvement-{epoch:02d}-{acc:.2f}.h5',#
monitor=metric, verbose=2, save_best_only=True, mode='max')
#텐서보드 사용
#logdir = "./logs/" +datetime.now().strftime("%Y%m%d-%H%M%S")
#tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)
#모델 훈련
#model.fit(train_images, train_labels, epochs=10)
callbacks_list = [checkpoint]
#모델 불러오기
#if(target_dir):
# model.load_weights(target_dir)
model.fit(
train_generator,
steps_per_epoch=train_samples // batch_size,
epochs=epochs,
shuffle=True,
validation_data=validation_generator,
callbacks=callbacks_list,#[checkpoint],
#callbacks=[tensorboard_callback],#텐서보드
validation_steps=validation_samples // batch_size)
#모델 평가
#
#test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
#print(test_acc)
model.save('./models/model.h5')
model.save_weights('./models/weights.h5')
############################################################################
if __name__ == '__main__':
#1
#physical_devices = tf.config.list_physical_devices('GPU')
#tf.config.experimental.set_memory_growth(physical_devices[0], True)
#2
#gpus = tf.config.experimental.list_physical_devices('GPU')
#if gpus:
# try:
# tf.config.experimental.set_memory_growth(gpus[0], True)
# except RuntimeError as e:
# # 프로그램 시작시에 메모리 증가가 설정되어야만 합니다
# print(e)
tester()
| 37.314685
| 148
| 0.642616
|
54b50e480d423146ddec16c94b62e022c71ffde6
| 13,271
|
py
|
Python
|
pysumma/calibration/ostrich.py
|
synapticarbors/pysumma
|
7480a12d40742d58629f2742ae7abc92131f831e
|
[
"BSD-3-Clause"
] | 9
|
2017-10-24T21:07:32.000Z
|
2021-11-18T22:03:05.000Z
|
pysumma/calibration/ostrich.py
|
synapticarbors/pysumma
|
7480a12d40742d58629f2742ae7abc92131f831e
|
[
"BSD-3-Clause"
] | 52
|
2018-02-03T20:15:11.000Z
|
2021-11-11T12:38:30.000Z
|
pysumma/calibration/ostrich.py
|
synapticarbors/pysumma
|
7480a12d40742d58629f2742ae7abc92131f831e
|
[
"BSD-3-Clause"
] | 19
|
2017-10-24T21:23:08.000Z
|
2021-07-21T21:41:55.000Z
|
import os
import pandas as pd
import numpy as np
import shutil
import stat
import inspect
import subprocess
from functools import partial
from pathlib import Path
from pkg_resources import resource_filename as resource
from pysumma import Simulation
from string import Template
from typing import List, Dict
def read_template(path):
with open(path, 'r') as f:
OST_FILE= f.read()
return Template(OST_FILE)
resource = partial(resource, __name__)
# Paths to template files
INPT_FILE = resource('meta/ostIn.template')
EXEC_FILE = resource('meta/model_executable.template')
SAVE_FILE = resource('meta/save_parameters.template')
# Templates
INPT_META = read_template(INPT_FILE)
EXEC_META = read_template(EXEC_FILE)
SAVE_META = read_template(SAVE_FILE)
class Ostrich():
"""
Provides a high level interface to the OSTRICH optimization package.
This class can currently only be used for single-objective optimization
using the DDS algorithm as defined in the template file. Currently the
metrics calculated are KGE, MAE, and MSE as defined in the evaluation
package, though more metrics can be implemmented quite easily.
A basic workflow for this object is:
::
import pysumma as ps
summa_exe = './summa.exe'
ostrich_exe = './ostrich.exe'
file_manager = './file_manager.txt'
python_exe = '/pool0/data/andrbenn/.conda/all/bin/python'
ostrich = ps.Ostrich(ostrich_exe, summa_exe, file_manager, python_path=python_exe)
ostrich.calib_params = [
ps.OstrichParam('paramName', starValue, (minValue, maxValue)),
]
ostrich.obs_data_file = 'obs_data.nc'
ostrich.sim_calib_var = 'sim_varname'
ostrich.obs_calib_var = 'obs_varname'
ostrich.write_config()
ostrich.run()
Attributes
----------
ostrich:
Path to OSTRICH executable
python_path:
Path to Python executable used for the ``run_script``
summa:
Path to the SUMMA executable
template:
OSTRICH configuration file template
save_template:
Template for script to save best parameters
run_template:
Template for script to run and evaluate SUMMA
config_path:
Path to location of calibration runs/logs
simulation:
pysumma Simulation object used as template
file_manager:
File manager file for SUMMA simulation
seed:
Random seed for calibration
errval:
Error value for OSTRICH
perturb_val:
Strength of parameter perturbations during calibration
max_iters:
Number of calibration trial runs
cost_function:
Metric to use when ranking calibration runs
maximize:
Whether to maximize the ``cost_function``
simulation_kwargs:
Keyword arguments to pass to the simulation run function
"""
def __init__(self, ostrich_executable, summa_executable, file_manager, python_path='python'):
"""Initialize a new Ostrich object"""
self.available_metrics: np.ndarray = np.array(['KGE', 'MAE', 'MSE', 'RMSE', 'NSE'])
self.ostrich: str = ostrich_executable
self.python_path: str = python_path
self.summa: str = summa_executable
self.template: Template = INPT_META
self.save_template: Template = SAVE_META
self.run_template: Template = EXEC_META
self.config_path: Path = Path(os.path.abspath(file_manager)).parent / 'calibration'
self.simulation = Simulation(summa_executable, file_manager,
config_dir=self.config_path)
self.file_manager = self.simulation.manager
self.run_script: Path = self.config_path / 'run_script.py'
self.save_script: Path = self.config_path / 'save_script.py'
self.metrics_file: Path = self.config_path / 'metrics.txt'
self.metrics_log: Path = self.config_path / 'metrics_log.csv'
self.impot_strings: str = ''
self.conversion_function: callable = lambda x: x
self.filter_function: callable = lambda x, y: (x, y)
self.preserve_output: str ='no'
self.seed: int = 42
self.errval: float = -9999
self.perturb_val: float = 0.2
self.max_iters: int = 100
self.calib_params: List[OstrichParam] = []
self.tied_params: List[OstrichTiedParam] = []
self.cost_function: str = 'KGE'
self.objective_function: str = 'gcop'
self.maximize: bool = True
self.simulation_kwargs: Dict = {}
self.allow_failures: bool = False
def run(self, prerun_cmds=[], monitor=True):
"""Start calibration run"""
if len(prerun_cmds):
preprocess_cmd = " && ".join(prerun_cmds) + " && "
else:
preprocess_cmd = ""
cmd = preprocess_cmd + f'cd {str(self.config_path)} && ./ostrich'
self.cmd = cmd
self.process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
if monitor:
self.stdout, self.stderr = self.process.communicate()
if isinstance(self.stdout, bytes):
self.stderr = self.stderr.decode('utf-8', 'ignore')
self.stdout = self.stdout.decode('utf-8', 'ignore')
def monitor(self):
if not self.process:
return
else:
self.stdout, self.stderr = self.process.communicate()
if isinstance(self.stdout, bytes):
self.stderr = self.stderr.decode('utf-8', 'ignore')
self.stdout = self.stdout.decode('utf-8', 'ignore')
return self.stdout, self.stderr
def write_config(self):
"""Writes all necessary files for calibration"""
if not os.path.exists(self.config_path):
os.mkdir(self.config_path)
# Substitue templates and save
self.weightTemplateFile = self.write_weight_template_section()
self.weightValueFile = self.write_weight_value_section()
with open(self.config_path / 'ostIn.txt', 'w') as f:
f.write(self.template.substitute(self.map_vars_to_template))
with open(self.save_script, 'w') as f:
f.write(self.save_template.substitute(self.map_vars_to_save_template))
self.simulation._write_configuration()
with open(self.run_script, 'w') as f:
f.write(self.run_template.substitute(self.map_vars_to_run_template))
shutil.copy(self.ostrich, self.config_path / 'ostrich')
# Make sure we set permissions for execution
st = os.stat(self.config_path / 'ostrich')
os.chmod(self.config_path / 'ostrich', st.st_mode | stat.S_IEXEC)
st = os.stat(self.run_script)
os.chmod(self.run_script, st.st_mode | stat.S_IEXEC)
st = os.stat(self.save_script)
os.chmod(self.save_script, st.st_mode | stat.S_IEXEC)
def write_weight_template_section(self, file_name=Path('param_mapping.tpl')) -> Path:
"""Write the parameter name mapping for OSTRICH"""
with open(self.config_path / file_name, 'w') as f:
for cp in self.calib_params:
if cp.weightname.endswith('mtp'):
f.write(f'{cp.realname} | {cp.weightname}\n')
for tp in self.tied_params:
if tp.realname.endswith('mtp'):
f.write(f'{tp.realname.replace("_mtp", "")} | {tp.realname}\n')
return Path('.') / file_name
def write_weight_value_section(self, file_name='param_weights.txt') -> Path:
"""Write the parameter values for OSTRICH"""
with open(self.config_path / file_name, 'w') as f:
f.write('\n'.join([f'{cp.realname} | {cp.value}'
for cp in self.calib_params]) + '\n')
return Path('.') / file_name
def add_tied_param(self, param_name, lower_bound, upper_bound, initial_value=0.5):
self.calib_params.append(OstrichParam(f'{param_name}', initial_value, (0.01, 0.99), weightname=f'{param_name}_scale'))
self.tied_params.append(OstrichTiedParam(param_name, lower_bound, upper_bound))
@property
def param_section(self) -> str:
"""Write list of calibration parameters"""
return '\n'.join(str(param) for param in self.calib_params)
@property
def tied_param_section(self) -> str:
"""Write list of tied calibration parameters"""
if len(self.tied_params):
return '\n'.join(str(param) for param in self.tied_params)
else:
return '# nothing to do here'
@property
def response_section(self) -> str:
"""Write section of OSTRICH configuration for selecting metric"""
metric_row = np.argwhere(self.cost_function == self.available_metrics)[0][0]
return f"{self.cost_function} {self.metrics_file}; OST_NULL {metric_row} 1 ' '"
@property
def tied_response_section(self) -> str:
"""Write section for determining if we are maximizing or minimizing the metric"""
if self.maximize:
return f'neg{self.cost_function} 1 {self.cost_function} wsum -1.00'
else:
return '# nothing to do here'
@property
def map_vars_to_template(self):
"""For completion of the OSTRICH input template"""
return {'runScript': self.run_script,
'objectiveFun': self.objective_function,
'saveScript': self.save_script,
'preserveOutput': self.preserve_output,
'seed': self.seed,
'errval': self.errval,
'perturbVal': self.perturb_val,
'maxIters': self.max_iters,
'paramSection': self.param_section,
'tiedParamSection': self.tied_param_section,
'responseSection': self.response_section,
'tiedResponseSection': self.tied_response_section,
'costFunction': f'neg{self.cost_function}' if self.maximize else self.cost_function,
'weightTemplateFile': self.weightTemplateFile,
'weightValueFile': self.weightValueFile
}
@property
def map_vars_to_save_template(self):
"""For completion of the parameter saving template"""
return {
'pythonPath': self.python_path,
'saveDir': self.config_path.parent / 'best_calibration',
'modelDir': self.config_path}
@property
def map_vars_to_run_template(self):
"""For completion of the model run script template"""
return {
'pythonPath': self.python_path,
'summaExe': self.summa,
'fileManager': self.simulation.manager_path,
'obsDataFile': self.obs_data_file,
'simVarList': self.sim_calib_vars,
'obsVarList': self.obs_calib_vars,
'outFile': self.metrics_file,
'metricsLog': self.metrics_log,
'importStrings': self.import_strings,
'conversionFunc': "=".join(inspect.getsource(self.conversion_function).split('=')[1:]),
'filterFunc': "=".join(inspect.getsource(self.filter_function).split('=')[1:]),
'paramMappingFile': self.weightTemplateFile,
'paramWeightFile': self.weightValueFile,
'simulationArgs': self.simulation_kwargs,
'allowFailures': self.allow_failures,
'paramFile': (self.simulation.manager['settingsPath'].value
+ self.simulation.manager['trialParamFile'].value),
}
class OstrichParam():
"""
Definition of a SUMMA parameter to be optimized by OSTRICH
Parameters
----------
realname:
Parameter name as seen by SUMMA
weightname:
Parameter name as seen by OSTRICH
value:
Default value
lower:
Lower bound for parameter value
upper:
Upper bound for parameter value
"""
def __init__(self, name, value, val_range, weightname=''):
self.realname = name
if not weightname:
self.weightname = f'{name}_mtp'
else:
self.weightname = weightname
self.value = value
self.lower, self.upper = val_range
def __str__(self):
return f"{self.weightname} {self.value} {self.lower} {self.upper} none none none free"
class OstrichTiedParam():
def __init__(self, name, lower_param, upper_param):
self.realname = f'{name}_mtp'
self.weightname = f'{name}_scale'
self.lower_param = f'{lower_param}_mtp'
self.upper_param = f'{upper_param}_mtp'
@property
def type_data(self):
"""This corresponds to the equation y = x2 + x1x3 - x1x2"""
if self.lower_param and self.upper_param:
return "ratio 0 -1 1 0 0 1 0 0 0 0 0 0 0 0 0 1 free"
elif self.lower_param:
raise NotImplementedError()
return ""
elif self.upper_param:
raise NotImplementedError()
return ""
def __str__(self):
return f"{self.realname} 3 {self.weightname} {self.lower_param} {self.upper_param} {self.type_data}"
| 39.497024
| 126
| 0.627684
|
5bccfe5fa63f38f157af4e077a3da1639a454ac9
| 246
|
py
|
Python
|
Tests/image_tests/renderpasses/test_ForwardRendering.py
|
SvenHinze/SpatioTemporalReprojection
|
3abc2964ef3adfeb10a64dfc6d06bc2ab87a5081
|
[
"BSD-3-Clause"
] | 62
|
2022-02-04T10:34:29.000Z
|
2022-03-31T19:41:20.000Z
|
Tests/image_tests/renderpasses/test_ForwardRendering.py
|
SvenHinze/SpatioTemporalReprojection
|
3abc2964ef3adfeb10a64dfc6d06bc2ab87a5081
|
[
"BSD-3-Clause"
] | 2
|
2021-03-02T10:16:06.000Z
|
2021-08-13T10:10:21.000Z
|
Tests/image_tests/renderpasses/test_ForwardRendering.py
|
SvenHinze/SpatioTemporalReprojection
|
3abc2964ef3adfeb10a64dfc6d06bc2ab87a5081
|
[
"BSD-3-Clause"
] | 4
|
2022-02-04T16:08:30.000Z
|
2022-03-09T09:39:41.000Z
|
from helpers import render_frames
from graphs.ForwardRendering import ForwardRendering as g
from falcor import *
m.addGraph(g)
m.loadScene('Arcade/Arcade.fscene')
ctx = locals()
# default
render_frames(ctx, 'default', frames=[1,16,64])
exit()
| 18.923077
| 57
| 0.768293
|
2fa8124bb856e7c64f7043bcbbb250f7adbf4581
| 2,417
|
py
|
Python
|
code/07Rock_paper_scissors.py
|
JohnZhong2021/Python-practice-project
|
0a39e3b32197a888814ff3ec9425acbd21f6fa5d
|
[
"MIT"
] | null | null | null |
code/07Rock_paper_scissors.py
|
JohnZhong2021/Python-practice-project
|
0a39e3b32197a888814ff3ec9425acbd21f6fa5d
|
[
"MIT"
] | null | null | null |
code/07Rock_paper_scissors.py
|
JohnZhong2021/Python-practice-project
|
0a39e3b32197a888814ff3ec9425acbd21f6fa5d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 15 19:40:19 2021
@author: JohnZhong
"""
from random import choice
def user_input():
user_input = input("Let's play rock paper and scissors with the computer!\
\nR for rock\nP for paper\nS for scissors\nYour decision:")
return user_input
def check_user_move(user_input):
if user_input in ["r", "R", "rock", "Rock", "ROCK"]:
return "r"
elif user_input in ["p", "P", "paper", "Paper", "PAPER"]:
return "p"
elif user_input in ["s", "S", "Scissors", "scissors", "SCISSORS"]:
return "s"
else:
return "Invalid input! Please check..."
def decide_winner(computer_move, user_move):
if computer_move == user_move:
return "It's a tie"
elif computer_move == "s" and user_move == "p":
return "Computer wins"
elif computer_move == "s" and user_move == "r":
return "You win"
elif computer_move == "r" and user_move == "s":
return "Computer wins"
elif computer_move == "r" and user_move == "p":
return "You win"
elif computer_move == "p" and user_move == "r":
return "Computer wins"
elif computer_move == "p" and user_move == "s":
return "You win"
def score_keeper(result_per_move):
computer_sore = 0
user_score = 0
if result_per_move == "Computer wins":
computer_sore += 1
elif result_per_move == "You win":
user_score += 1
return [computer_sore, user_score]
i = 0
computer_final_score = 0
user_final_score = 0
while i <= 5:
computer_move = choice(['r', 'p', 's'])
user_move = check_user_move(user_input())
decide_winner(computer_move, user_move)
result_per_move = decide_winner(computer_move, user_move)
computer_final_score += score_keeper(result_per_move)[0]
user_final_score += score_keeper(result_per_move)[1]
i += 1
if computer_final_score > user_final_score:
print("\nSorry you lose...better luck next time")
print("\nThe computer score is:", computer_final_score)
print("Your score is:", user_final_score)
elif computer_final_score < user_final_score:
print("\nCongretulations! You are the winner!!!")
print("\nThe computer score is:", computer_final_score)
print("Your score is:", user_final_score)
else:
print("\nIt's a tie")
print("\nThe computer score is:", computer_final_score)
print("Your score is:", user_final_score)
| 30.987179
| 78
| 0.65453
|
078e3822459d5e5b80647ca85f3ab7f0eeefae77
| 2,808
|
py
|
Python
|
dataloaders/datasets/combine_dbs.py
|
rucnyz/pytorch-deeplab
|
684e770dc82406f9572b09b2cd60d8f7b597a702
|
[
"MIT"
] | null | null | null |
dataloaders/datasets/combine_dbs.py
|
rucnyz/pytorch-deeplab
|
684e770dc82406f9572b09b2cd60d8f7b597a702
|
[
"MIT"
] | null | null | null |
dataloaders/datasets/combine_dbs.py
|
rucnyz/pytorch-deeplab
|
684e770dc82406f9572b09b2cd60d8f7b597a702
|
[
"MIT"
] | null | null | null |
import torch.utils.data as data
class CombineDBs(data.Dataset):
NUM_CLASSES = 21
def __init__(self, dataloaders, excluded = None):
self.dataloaders = dataloaders
self.excluded = excluded
self.im_ids = []
# Combine object lists
for dl in dataloaders:
for elem in dl.im_ids:
if elem not in self.im_ids:
self.im_ids.append(elem)
# Exclude
if excluded:
for dl in excluded:
for elem in dl.im_ids:
if elem in self.im_ids:
self.im_ids.remove(elem)
# Get object pointers
self.cat_list = []
self.im_list = []
new_im_ids = []
num_images = 0
for ii, dl in enumerate(dataloaders):
for jj, curr_im_id in enumerate(dl.im_ids):
if (curr_im_id in self.im_ids) and (curr_im_id not in new_im_ids):
num_images += 1
new_im_ids.append(curr_im_id)
self.cat_list.append({'db_ii': ii, 'cat_ii': jj})
self.im_ids = new_im_ids
print('Combined number of images: {:d}'.format(num_images))
def __getitem__(self, index):
_db_ii = self.cat_list[index]["db_ii"]
_cat_ii = self.cat_list[index]['cat_ii']
sample = self.dataloaders[_db_ii].__getitem__(_cat_ii)
if 'meta' in sample.keys():
sample['meta']['db'] = str(self.dataloaders[_db_ii])
return sample
def __len__(self):
return len(self.cat_list)
def __str__(self):
include_db = [str(db) for db in self.dataloaders]
exclude_db = [str(db) for db in self.excluded]
return 'Included datasets:' + str(include_db) + '\n' + 'Excluded datasets:' + str(exclude_db)
if __name__ == "__main__":
import matplotlib.pyplot as plt
from dataloaders.datasets import pascal, sbd
from dataloaders import sbd
import torch
import numpy as np
from dataloaders.utils import decode_segmap
import argparse
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.base_size = 513
args.crop_size = 513
pascal_voc_val = pascal.VOCSegmentation(args, split = 'val')
sbd = sbd.SBDSegmentation(args, split = ['train', 'val'])
pascal_voc_train = pascal.VOCSegmentation(args, split = 'train')
dataset = CombineDBs([pascal_voc_train, sbd], excluded = [pascal_voc_val])
dataloader = torch.utils.data.DataLoader(dataset, batch_size = 2, shuffle = True, num_workers = 0)
for ii, sample in enumerate(dataloader):
for jj in range(sample["image"].size()[0]):
img = sample['image'].numpy()
gt = sample['label'].numpy()
tmp = np.array(gt[jj]).astype(np.uint8)
segmap = decode_segmap(tmp, dataset = 'pascal')
img_tmp = np.transpose(img[jj], axes = [1, 2, 0])
img_tmp *= (0.229, 0.224, 0.225)
img_tmp += (0.485, 0.456, 0.406)
img_tmp *= 255.0
img_tmp = img_tmp.astype(np.uint8)
plt.figure()
plt.title('display')
plt.subplot(211)
plt.imshow(img_tmp)
plt.subplot(212)
plt.imshow(segmap)
if ii == 1:
break
plt.show(block = True)
| 27.529412
| 99
| 0.688746
|
67b87efa1d6072fc78ccbaf4e2947e4f79ae9158
| 2,117
|
py
|
Python
|
data_capture/admin.py
|
davezen1/calc
|
410d114f01e84e9fc6363f58853a4d9451a00ef2
|
[
"CC0-1.0"
] | null | null | null |
data_capture/admin.py
|
davezen1/calc
|
410d114f01e84e9fc6363f58853a4d9451a00ef2
|
[
"CC0-1.0"
] | 3
|
2021-03-19T23:45:25.000Z
|
2022-03-21T22:21:12.000Z
|
data_capture/admin.py
|
davezen1/calc
|
410d114f01e84e9fc6363f58853a4d9451a00ef2
|
[
"CC0-1.0"
] | null | null | null |
from django.contrib import admin
from django.db import models
from django import forms
from django.utils.safestring import mark_safe
from .schedules import registry
from .models import SubmittedPriceList, SubmittedPriceListRow
class SubmittedPriceListRowInline(admin.TabularInline):
model = SubmittedPriceListRow
can_delete = False
exclude = ('contract_model',)
formfield_overrides = {
models.TextField: {'widget': forms.TextInput}
}
def has_add_permission(self, request):
return False
@admin.register(SubmittedPriceList)
class SubmittedPriceListAdmin(admin.ModelAdmin):
list_display = ('contract_number', 'vendor_name', 'submitter',
'is_approved')
exclude = ('serialized_gleaned_data', 'schedule')
readonly_fields = ('schedule_title', 'current_status')
inlines = [
SubmittedPriceListRowInline
]
def current_status(self, instance):
if instance.is_approved:
return mark_safe(
"<span style=\"color: green\">"
"This price list has been approved, so its data is now "
"in CALC. Uncheck the <strong>Is approved</strong> box to "
"remove its data from CALC.</span>"
)
return mark_safe(
"<span style=\"color: red\">"
"This price list is not currently approved, so its data is "
"not in CALC. Check the <strong>Is approved</strong> box to "
"add its data to CALC."
)
def schedule_title(self, instance):
return registry.get_class(instance.schedule).title
schedule_title.short_description = 'Schedule'
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
def save_model(self, request, obj, form, change):
original = SubmittedPriceList.objects.get(pk=obj.id)
if original.is_approved != obj.is_approved:
if obj.is_approved:
obj.approve()
else:
obj.unapprove()
obj.save()
| 29.402778
| 75
| 0.640057
|
b2bf3f4cbbdfdc8bd5492ec139e41b4827f4073b
| 496
|
py
|
Python
|
app/kobo/migrations/0014_auto_20180921_1443.py
|
dianedetoeuf/django_kobo
|
d437a289e1952bb55fb7004fddbff6b978aa15d6
|
[
"MIT"
] | 1
|
2018-12-20T07:59:55.000Z
|
2018-12-20T07:59:55.000Z
|
app/kobo/migrations/0014_auto_20180921_1443.py
|
dianedetoeuf/django_kobo
|
d437a289e1952bb55fb7004fddbff6b978aa15d6
|
[
"MIT"
] | 9
|
2018-11-06T01:51:28.000Z
|
2018-12-21T22:19:42.000Z
|
app/kobo/migrations/0014_auto_20180921_1443.py
|
dianedetoeuf/django_kobo
|
d437a289e1952bb55fb7004fddbff6b978aa15d6
|
[
"MIT"
] | 2
|
2018-11-21T15:13:32.000Z
|
2020-02-19T08:39:37.000Z
|
# Generated by Django 2.0.5 on 2018-09-21 14:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kobo', '0013_auto_20180921_1437'),
]
operations = [
migrations.RemoveField(
model_name='kobodata',
name='legacy',
),
migrations.AddField(
model_name='kobodata',
name='kobo_managed',
field=models.BooleanField(default=True),
),
]
| 21.565217
| 52
| 0.574597
|
0f304a446eb457b19fa7e0a96e533920b868e1fa
| 18,418
|
py
|
Python
|
src/python/module/z5py/dataset.py
|
gdkrmr/z5
|
578bb64c4481191d6359b8ee3335ec8a166db4e0
|
[
"MIT"
] | null | null | null |
src/python/module/z5py/dataset.py
|
gdkrmr/z5
|
578bb64c4481191d6359b8ee3335ec8a166db4e0
|
[
"MIT"
] | null | null | null |
src/python/module/z5py/dataset.py
|
gdkrmr/z5
|
578bb64c4481191d6359b8ee3335ec8a166db4e0
|
[
"MIT"
] | null | null | null |
import numbers
import json
import numpy as np
from . import _z5py
from .attribute_manager import AttributeManager
from .shape_utils import normalize_slices, rectify_shape, get_default_chunks
AVAILABLE_COMPRESSORS = _z5py.get_available_codecs()
COMPRESSORS_ZARR = ('raw', 'blosc', 'zlib', 'bzip2', 'gzip')
COMPRESSORS_N5 = ('raw', 'gzip', 'bzip2', 'xz', 'lz4')
class Dataset:
""" Dataset for access to data on disc.
Should not be instantiated directly, but rather
be created or opened via ``create_dataset``, ``require_dataset`` or
the ``[]`` operator of File or Group.
"""
_dtype_dict = {np.dtype('uint8'): 'uint8',
np.dtype('uint16'): 'uint16',
np.dtype('uint32'): 'uint32',
np.dtype('uint64'): 'uint64',
np.dtype('int8'): 'int8',
np.dtype('int16'): 'int16',
np.dtype('int32'): 'int32',
np.dtype('int64'): 'int64',
np.dtype('float32'): 'float32',
np.dtype('float64'): 'float64'}
# Compression libraries supported by zarr format
compressors_zarr = tuple(comp for comp in COMPRESSORS_ZARR if AVAILABLE_COMPRESSORS[comp])
# Default compression for zarr format
zarr_default_compressor = 'blosc' if AVAILABLE_COMPRESSORS['blosc'] else 'raw'
# Compression libraries supported by n5 format
compressors_n5 = tuple(comp for comp in COMPRESSORS_N5 if AVAILABLE_COMPRESSORS[comp])
# Default compression for n5 format
n5_default_compressor = 'gzip' if AVAILABLE_COMPRESSORS['gzip'] else 'raw'
def __init__(self, dset_impl, handle, n_threads=1):
self._impl = dset_impl
self._handle = handle
self._attrs = AttributeManager(self._handle)
self.n_threads = n_threads
@staticmethod
def _to_zarr_compression_options(compression, compression_options):
if compression == 'blosc':
default_opts = {'codec': 'lz4', 'clevel': 5, 'shuffle': 1}
elif compression == 'zlib':
default_opts = {'id': 'zlib', 'level': 5}
elif compression == 'gzip':
default_opts = {'id': 'gzip', 'level': 5}
elif compression == 'bzip2':
default_opts = {'level': 5}
elif compression == 'raw':
default_opts = {}
else:
raise RuntimeError("Compression %s is not supported in zarr format" % compression)
# check for invalid options
extra_args = set(compression_options) - set(default_opts)
if extra_args:
raise RuntimeError("Invalid options for %s compression: %s" % (compression, ' '.join(list(extra_args))))
# return none for raw compression
if not default_opts:
return {}
# update the default options
default_opts.update(compression_options)
return default_opts
@staticmethod
def _to_n5_compression_options(compression, compression_options):
if compression == 'gzip':
default_opts = {'level': 5}
elif compression == 'bzip2':
default_opts = {'level': 5}
elif compression == 'raw':
default_opts = {}
elif compression == 'xz':
default_opts = {'level': 6}
elif compression == 'lz4':
default_opts = {'level': 6}
else:
raise RuntimeError("Compression %s is not supported in n5 format" % compression)
# check for invalid options
extra_args = set(compression_options) - set(default_opts)
if extra_args:
raise RuntimeError("Invalid options for %s compression: %s" % (compression, ' '.join(list(extra_args))))
# update the default options
default_opts.update(compression_options)
return default_opts
# NOTE in contrast to h5py, we also check that the chunks match
# this is crucial, because different chunks can lead to subsequent incorrect
# code when relying on chunk-aligned access for parallel writing
@classmethod
def _require_dataset(cls, group, name,
shape, dtype,
chunks, n_threads, **kwargs):
if group.has(name):
if group.is_sub_group(name):
raise TypeError("Incompatible object (Group) already exists")
handle = group.get_dataset_handle(name)
ds = cls(_z5py.open_dataset(group, name), handle, n_threads)
if shape != ds.shape:
raise TypeError("Shapes do not match (existing (%s) vs new (%s))" % (', '.join(map(str, ds.shape)),
', '.join(map(str, shape))))
if chunks is not None:
if chunks != ds.chunks:
raise TypeError("Chunks do not match (existing (%s) vs new (%s))" % (', '.join(map(str, ds.chunks)),
', '.join(map(str, chunks))))
if dtype is not None:
if np.dtype(dtype) != ds.dtype:
raise TypeError("Datatypes do not match (existing %s vs new %s)" % str(ds.dtype), str(dtype))
return ds
else:
# pop all kwargs that are not compression options
data = kwargs.pop('data', None)
compression = kwargs.pop('compression', None)
fillvalue = kwargs.pop('fillvalue', 0)
return cls._create_dataset(group, name, shape, dtype, data=data,
chunks=chunks, compression=compression,
fillvalue=fillvalue, n_threads=n_threads,
compression_options=kwargs)
@classmethod
def _create_dataset(cls, group, name,
shape=None, dtype=None,
data=None, chunks=None,
compression=None,
fillvalue=0, n_threads=1,
compression_options={}):
# check shape, dtype and data
have_data = data is not None
if have_data:
if shape is None:
shape = data.shape
elif shape != data.shape:
raise ValueError("Shape is incompatible with data")
if dtype is None:
dtype = data.dtype
# NOTE In contrast to h5py, we don't do automatic type conversion
elif np.dtype(dtype) != data.dtype:
raise ValueError("Datatype is incompatible with data")
else:
if shape is None:
raise TypeError("One of shape or data must be specified")
# NOTE In contrast to h5py (and numpy), we DO NOT have float64
# as default data type, but require a data type if no data is given
if dtype is None:
raise TypeError("One of dtype or data must be specified")
parsed_dtype = np.dtype(dtype)
# get default chunks if necessary
# NOTE in contrast to h5py, datasets are chunked
# by default, with chunk size ~ 64**3
if chunks is None:
chunks = get_default_chunks(shape)
# check chunks have the same len than the shape
if len(chunks) != len(shape):
raise RuntimeError("Chunks %s must have same length as shape %s" % (str(chunks),
str(shape)))
# limit chunks to shape
chunks = tuple(min(ch, sh) for ch, sh in zip(chunks, shape))
is_zarr = group.is_zarr()
# check compression / get default compression
# if no compression is given
if compression is None:
compression = cls.zarr_default_compressor if is_zarr else cls.n5_default_compressor
else:
valid_compression = compression in cls.compressors_zarr if is_zarr else\
compression in cls.compressors_n5
if not valid_compression:
raise ValueError("Compression filter \"%s\" is unavailable" % compression)
# get and check compression
if is_zarr and compression not in cls.compressors_zarr:
compression = cls.zarr_default_compressor
elif not is_zarr and compression not in cls.compressors_n5:
compression = cls.n5_default_compressor
if parsed_dtype not in cls._dtype_dict:
raise ValueError("Invalid data type {} for N5 dataset".format(repr(dtype)))
# update the compression options
if is_zarr:
copts = cls._to_zarr_compression_options(compression, compression_options)
else:
copts = cls._to_n5_compression_options(compression, compression_options)
# convert the copts to json parseable string
copts = json.dumps(copts)
# get the dataset and write data if necessary
impl = _z5py.create_dataset(group, name, cls._dtype_dict[parsed_dtype],
shape, chunks, compression, copts, fillvalue)
handle = group.get_dataset_handle(name)
ds = cls(impl, handle, n_threads)
if have_data:
ds[:] = data
return ds
@classmethod
def _open_dataset(cls, group, name):
ds = _z5py.open_dataset(group, name)
handle = group.get_dataset_handle(name)
return cls(ds, handle)
@property
def is_zarr(self):
""" Flag to indicate zarr or n5 format of this dataset.
"""
return self._impl.is_zarr
@property
def attrs(self):
""" The ``AttributeManager`` of this dataset.
"""
return self._attrs
@property
def shape(self):
""" Shape of this dataset.
"""
return tuple(self._impl.shape)
@property
def ndim(self):
""" Number of dimensions of this dataset.
"""
return self._impl.ndim
@property
def size(self):
""" Size (total number of elements) of this dataset.
"""
return self._impl.size
@property
def chunks(self):
""" Chunks of this dataset.
"""
return tuple(self._impl.chunks)
@property
def dtype(self):
""" Datatype of this dataset.
"""
return np.dtype(self._impl.dtype)
@property
def chunks_per_dimension(self):
""" Number of chunks in each dimension of this dataset.
"""
return self._impl.chunks_per_dimension
@property
def number_of_chunks(self):
""" Total number of chunks of this dataset.
"""
return self._impl.number_of_chunks
@property
def compression(self):
return self._impl.compressor
@property
def compression_opts(self):
""" Compression library options of this dataset.
"""
copts = self._impl.compression_options
# decode to json
copts = json.loads(copts)
return copts
def __len__(self):
return self._impl.len
def index_to_roi(self, index):
""" Convert index to region of interest.
Convert an index, which can be a slice or a tuple of slices / ellipsis to a
region of interest. The roi consists of the region offset and the region shape.
Args:
index (slice or tuple): index into dataset.
Returns:
tuple: offset of the region of interest.
tuple: shape of the region of interest.
tuple: which dimensions should be squeezed out
"""
normalized, to_squeeze = normalize_slices(index, self.shape)
return (
tuple(norm.start for norm in normalized),
tuple(
0 if norm.start is None else norm.stop - norm.start for norm in normalized
),
to_squeeze
)
# most checks are done in c++
def __getitem__(self, index):
roi_begin, shape, to_squeeze = self.index_to_roi(index)
out = np.empty(shape, dtype=self.dtype)
if 0 not in shape:
_z5py.read_subarray(self._impl,
out, roi_begin,
n_threads=self.n_threads)
# todo: this probably has more copies than necessary
if len(to_squeeze) == len(shape):
return out.flatten()[0]
elif to_squeeze:
return out.squeeze(to_squeeze)
else:
return out
# most checks are done in c++
def __setitem__(self, index, item):
roi_begin, shape, _ = self.index_to_roi(index)
if 0 in shape:
return
# broadcast scalar
if isinstance(item, (numbers.Number, np.number)):
_z5py.write_scalar(self._impl, roi_begin,
list(shape), item,
str(self.dtype), self.n_threads)
return
try:
item_arr = np.asarray(item, self.dtype, order='C')
except ValueError as e:
if any(s in str(e) for s in ('invalid literal for ', 'could not convert')):
bad_dtype = np.asarray(item).dtype
raise TypeError("No conversion path for dtype: " + repr(bad_dtype))
else:
raise
except TypeError as e:
if 'argument must be' in str(e):
raise OSError("Can't prepare for writing data (no appropriate function for conversion path)")
else:
raise
item_arr = rectify_shape(item_arr, shape)
_z5py.write_subarray(self._impl,
item_arr,
roi_begin,
n_threads=self.n_threads)
def read_direct(self, dest, source_sel=None, dest_sel=None):
""" Wrapper to improve similarity to h5py. Reads from the dataset to ``dest``, using ``read_subarray``.
Args:
dest (array) destination object into which the read data is written to.
dest_sel (slice array) selection of data to write to ``dest``. Defaults to the whole range of ``dest``.
source_sel (slice array) selection in dataset to read from. Defaults to the whole range of the dataset.
Spaces, defined by ``source_sel`` and ``dest_sel`` must be in the same size but dont need to have the same
offset
"""
if source_sel is None:
source_sel = tuple(slice(0, sh) for sh in self.shape)
if dest_sel is None:
dest_sel = tuple(slice(0, sh) for sh in dest.shape)
start = [s.start for s in source_sel]
stop = [s.stop for s in source_sel]
dest[dest_sel] = self.read_subarray(start, stop)
def write_direct(self, source, source_sel=None, dest_sel=None):
""" Wrapper to improve similarity to h5py. Writes to the dataset from ``source``, using ``write_subarray``.
Args:
source (array) source object from which the written data is obtained.
source_sel (slice array) selection of data to write from ``source``. Defaults to the whole range of
``source``.
dest_sel (slice array) selection in dataset to write to. Defaults to the whole range of the dataset.
Spaces, defined by ``source_sel`` and ``dest_sel`` must be in the same size but dont need to have the same
offset
"""
if dest_sel is None:
dest_sel = tuple(slice(0, sh) for sh in self.shape)
if source_sel is None:
source_sel = tuple(slice(0, sh) for sh in source.shape)
start = [s.start for s in dest_sel]
self.write_subarray(start, source[source_sel])
# expose the impl write subarray functionality
def write_subarray(self, start, data):
""" Write subarray to dataset.
``data`` is written to region of interest, defined by ``start``
and the shape of ``data``. The region of interest must be in
bounds of the dataset and the datatype must agree with the dataset.
Args:
start (tuple): offset of the roi to write.
data (np.ndarray): data to write; shape determines the roi shape.
"""
_z5py.write_subarray(self._impl,
np.require(data, requirements='C'),
list(start),
n_threads=self.n_threads)
# expose the impl read subarray functionality
def read_subarray(self, start, stop):
""" Read subarray from region of interest.
Region of interest is defined by ``start`` and ``stop``
and must be in bounds of the dataset.
Args:
start (tuple): start coordinates of the roi.
stop (tuple): stop coordinates of the roi.
Returns:
np.ndarray
"""
shape = tuple(sto - sta for sta, sto in zip(start, stop))
out = np.empty(shape, dtype=self.dtype)
_z5py.read_subarray(self._impl, out, start, n_threads=self.n_threads)
return out
def chunk_exists(self, chunk_indices):
""" Check if chunk has data.
Check for the given indices if the chunk has data.
Args:
chunk_indices (tuple): chunk indices.
Returns:
bool
"""
return self._impl.chunkExists(chunk_indices)
def write_chunk(self, chunk_indices, data, varlen=False):
""" Write single chunk
Args:
chunk_indices (tuple): indices of the chunk to write to
data (np.ndarray): data to be written
varlen (bool): write this chunk in varlen mode; only supported in n5
(default: False)
"""
if self.is_zarr and varlen:
raise RuntimeError("Varlength chunks are not supported in zarr")
_z5py.write_chunk(self._impl, chunk_indices, data, varlen)
def read_chunk(self, chunk_indices):
""" Read single chunk
Args:
chunk_indices (tuple): indices of the chunk to write to
Returns
np.ndarray or None - chunk data, returns None if the chunk is empty
"""
if not self._impl.chunkExists(chunk_indices):
return None
chunk_reader = getattr(_z5py, 'read_chunk_%s' % self._impl.dtype)
return chunk_reader(self._impl, chunk_indices)
| 38.774737
| 120
| 0.581822
|
b57a538092b047be02ac35963b89ddfd3e9de54d
| 3,966
|
py
|
Python
|
tests/unit_tests/repobee_plug/test_plug_config.py
|
DD2480-Group-18/repobee
|
1dd79bb6ace3c00b920ef0b32664847cd5b12f84
|
[
"MIT"
] | 39
|
2019-04-02T15:53:23.000Z
|
2022-03-07T02:38:41.000Z
|
tests/unit_tests/repobee_plug/test_plug_config.py
|
DD2480-Group-18/repobee
|
1dd79bb6ace3c00b920ef0b32664847cd5b12f84
|
[
"MIT"
] | 788
|
2019-03-31T13:55:53.000Z
|
2022-03-29T20:41:02.000Z
|
tests/unit_tests/repobee_plug/test_plug_config.py
|
slarse/repobee
|
03fcf90dc0244e0274a890d2a897752889c70326
|
[
"MIT"
] | 18
|
2020-06-15T11:49:50.000Z
|
2022-03-06T19:05:53.000Z
|
import pytest
from repobee_plug import config
from repobee_plug import exceptions
class TestConfig:
"""Tests for the Config class."""
def test_detects_cyclic_inheritance(self, tmp_path):
# arrange
grandparent_path = tmp_path / "otherdir" / "grandparent.ini"
parent_path = tmp_path / "dir" / "parent.ini"
child_path = tmp_path / "repobee.ini"
grandparent = config.Config(grandparent_path)
parent = config.Config(parent_path)
parent.parent = grandparent
child = config.Config(child_path)
child.parent = parent
# act/assert
with pytest.raises(exceptions.PlugError) as exc_info:
grandparent.parent = child
cycle = " -> ".join(
map(
str,
[grandparent_path, child_path, parent_path, grandparent_path],
)
)
assert f"Cyclic inheritance detected in config: {cycle}" in str(
exc_info.value
)
def test_get_option_from_parent(self, tmp_path):
# arrange
parent_path = tmp_path / "dir" / "parent.ini"
child_path = tmp_path / "repobee.ini"
parent = config.Config(parent_path)
parent_sec = "some-section"
parent_opt = "some-option"
parent_val = "some-value"
parent.create_section(parent_sec)
parent[parent_sec][parent_opt] = parent_val
# act
child = config.Config(child_path)
child.parent = parent
fetched_val = child.get(parent_sec, parent_opt)
# assert
assert fetched_val == parent_val
def test_resolves_section_from_parent(self, tmp_path):
# arrange
parent_path = tmp_path / "dir" / "parent.ini"
child_path = tmp_path / "repobee.ini"
parent = config.Config(parent_path)
parent_sec = "some-section"
parent_opt = "some-option"
parent_val = "some-value"
parent.create_section(parent_sec)
parent[parent_sec][parent_opt] = parent_val
# act
child = config.Config(child_path)
child.parent = parent
fetched_section = child[parent_sec]
# assert
assert parent_opt in fetched_section
assert fetched_section[parent_opt] == parent_val
def test_section_contains_values_from_parent_and_child(self, tmp_path):
# arrange
parent_path = tmp_path / "dir" / "parent.ini"
child_path = tmp_path / "repobee.ini"
parent = config.Config(parent_path)
parent_sec = "some-section"
parent_opt = "some-option"
parent_val = "some-value"
parent.create_section(parent_sec)
parent[parent_sec][parent_opt] = parent_val
child_opt = "other-option"
child_val = "other-value"
child = config.Config(child_path)
child.parent = parent
child.create_section("some-section")
child[parent_sec][child_opt] = child_val
# act
section = child[parent_sec]
fetched_parent_value = section[parent_opt]
fetched_child_value = section[child_opt]
# assert
assert fetched_parent_value == parent_val
assert fetched_child_value == child_val
def test_key_error_on_getitem_for_non_existent_key(self, tmp_path):
# arrange
parent_path = tmp_path / "dir" / "parent.ini"
child_path = tmp_path / "repobee.ini"
parent = config.Config(parent_path)
parent_sec = "some-section"
parent_opt = "some-option"
parent_val = "some-value"
parent.create_section(parent_sec)
parent[parent_sec][parent_opt] = parent_val
child = config.Config(child_path)
child.parent = parent
# act/assert
non_existing_key = "thiskeydoesntexist"
with pytest.raises(KeyError) as exc_info:
child[parent_sec][non_existing_key]
assert non_existing_key in str(exc_info.value)
| 30.507692
| 78
| 0.629349
|
83e837ae73c20f0c5f6dacaa5d3cb5c62d149b43
| 16,274
|
py
|
Python
|
tests/conftest.py
|
Pacman1984/etna
|
9b3ccb980e576d56858f14aca2e06ce2957b0fa9
|
[
"Apache-2.0"
] | 96
|
2021-09-05T06:29:34.000Z
|
2021-11-07T15:22:54.000Z
|
tests/conftest.py
|
Pacman1984/etna
|
9b3ccb980e576d56858f14aca2e06ce2957b0fa9
|
[
"Apache-2.0"
] | 188
|
2021-09-06T15:59:58.000Z
|
2021-11-17T09:34:16.000Z
|
tests/conftest.py
|
Pacman1984/etna
|
9b3ccb980e576d56858f14aca2e06ce2957b0fa9
|
[
"Apache-2.0"
] | 8
|
2021-09-06T09:18:35.000Z
|
2021-11-11T21:18:39.000Z
|
from copy import deepcopy
from typing import Tuple
import numpy as np
import pandas as pd
import pytest
from etna.datasets import generate_const_df
from etna.datasets.tsdataset import TSDataset
@pytest.fixture(autouse=True)
def random_seed():
"""Fixture to fix random state for every test case."""
import random
import torch
SEED = 121 # noqa: N806
torch.manual_seed(SEED)
random.seed(SEED)
np.random.seed(SEED)
@pytest.fixture()
def example_df(random_seed):
df1 = pd.DataFrame()
df1["timestamp"] = pd.date_range(start="2020-01-01", end="2020-02-01", freq="H")
df1["segment"] = "segment_1"
df1["target"] = np.arange(len(df1)) + 2 * np.random.normal(size=len(df1))
df2 = pd.DataFrame()
df2["timestamp"] = pd.date_range(start="2020-01-01", end="2020-02-01", freq="H")
df2["segment"] = "segment_2"
df2["target"] = np.sqrt(np.arange(len(df2)) + 2 * np.cos(np.arange(len(df2))))
return pd.concat([df1, df2], ignore_index=True)
@pytest.fixture
def two_dfs_with_different_timestamps(random_seed):
"""Generate two dataframes with the same segments and different timestamps"""
def generate_df(start_time):
df = pd.DataFrame()
for i in range(5):
tmp = pd.DataFrame({"timestamp": pd.date_range(start_time, "2021-01-01")})
tmp["segment"] = f"segment_{i + 1}"
tmp["target"] = np.random.uniform(0, 10, len(tmp))
df = df.append(tmp)
df = df.pivot(index="timestamp", columns="segment")
df = df.reorder_levels([1, 0], axis=1)
df = df.sort_index(axis=1)
df.columns.names = ["segment", "feature"]
return TSDataset(df, freq="1D")
df1 = generate_df(start_time="2020-01-01")
df2 = generate_df(start_time="2019-01-01")
return df1, df2
@pytest.fixture
def two_dfs_with_different_segments_sets(random_seed):
"""Generate two dataframes with the same timestamps and different segments"""
def generate_df(n_segments):
df = pd.DataFrame()
for i in range(n_segments):
tmp = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", "2021-01-01")})
tmp["segment"] = f"segment_{i + 1}"
tmp["target"] = np.random.uniform(0, 10, len(tmp))
df = df.append(tmp)
df = df.pivot(index="timestamp", columns="segment")
df = df.reorder_levels([1, 0], axis=1)
df = df.sort_index(axis=1)
df.columns.names = ["segment", "feature"]
return TSDataset(df, freq="1D")
df1 = generate_df(n_segments=5)
df2 = generate_df(n_segments=10)
return df1, df2
@pytest.fixture
def train_test_dfs(random_seed):
"""Generate two dataframes with the same segments and the same timestamps"""
def generate_df():
df = pd.DataFrame()
for i in range(5):
tmp = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", "2021-01-01")})
tmp["segment"] = f"segment_{i + 1}"
tmp["target"] = np.random.uniform(0, 10, len(tmp))
df = df.append(tmp)
df = df.pivot(index="timestamp", columns="segment")
df = df.reorder_levels([1, 0], axis=1)
df = df.sort_index(axis=1)
df.columns.names = ["segment", "feature"]
return TSDataset(df, freq="1D")
df1 = generate_df()
df2 = generate_df()
return df1, df2
@pytest.fixture
def simple_df() -> TSDataset:
"""Generate dataset with simple values without any noise"""
history = 49
df1 = pd.DataFrame()
df1["target"] = np.arange(history)
df1["segment"] = "A"
df1["timestamp"] = pd.date_range(start="2020-01-01", periods=history)
df2 = pd.DataFrame()
df2["target"] = [0, 2, 4, 6, 8, 10, 12] * 7
df2["segment"] = "B"
df2["timestamp"] = pd.date_range(start="2020-01-01", periods=history)
df = pd.concat([df1, df2]).reset_index(drop=True)
df = TSDataset.to_dataset(df)
tsds = TSDataset(df, freq="1d")
return tsds
@pytest.fixture()
def outliers_df():
timestamp1 = np.arange(np.datetime64("2021-01-01"), np.datetime64("2021-02-01"))
target1 = [np.sin(i) for i in range(len(timestamp1))]
target1[10] += 10
timestamp2 = np.arange(np.datetime64("2021-01-01"), np.datetime64("2021-02-10"))
target2 = [np.sin(i) for i in range(len(timestamp2))]
target2[8] += 8
target2[15] = 2
target2[26] -= 12
df1 = pd.DataFrame({"timestamp": timestamp1, "target": target1, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp2, "target": target2, "segment": "2"})
df = pd.concat([df1, df2], ignore_index=True)
return df
@pytest.fixture
def example_df_(random_seed) -> pd.DataFrame:
periods = 100
df1 = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", periods=periods)})
df1["segment"] = ["segment_1"] * periods
df1["target"] = np.random.uniform(10, 20, size=periods)
df1["target_no_change"] = df1["target"]
df2 = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", periods=periods)})
df2["segment"] = ["segment_2"] * periods
df2["target"] = np.random.uniform(-15, 5, size=periods)
df2["target_no_change"] = df2["target"]
df = pd.concat((df1, df2))
df = df.pivot(index="timestamp", columns="segment").reorder_levels([1, 0], axis=1).sort_index(axis=1)
df.columns.names = ["segment", "feature"]
return df
@pytest.fixture
def example_tsds(random_seed) -> TSDataset:
periods = 100
df1 = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", periods=periods)})
df1["segment"] = "segment_1"
df1["target"] = np.random.uniform(10, 20, size=periods)
df2 = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", periods=periods)})
df2["segment"] = "segment_2"
df2["target"] = np.random.uniform(-15, 5, size=periods)
df = pd.concat([df1, df2]).reset_index(drop=True)
df = TSDataset.to_dataset(df)
tsds = TSDataset(df, freq="D")
return tsds
@pytest.fixture
def example_reg_tsds(random_seed) -> TSDataset:
periods = 100
df1 = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", periods=periods)})
df1["segment"] = "segment_1"
df1["target"] = np.random.uniform(10, 20, size=periods)
df2 = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", periods=periods)})
df2["segment"] = "segment_2"
df2["target"] = np.random.uniform(-15, 5, size=periods)
exog_weekend_1 = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", periods=periods + 7)})
exog_weekend_1["segment"] = "segment_1"
exog_weekend_1["regressor_exog_weekend"] = ((exog_weekend_1.timestamp.dt.dayofweek) // 5 == 1).astype("category")
exog_weekend_2 = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", periods=periods + 7)})
exog_weekend_2["segment"] = "segment_2"
exog_weekend_2["regressor_exog_weekend"] = ((exog_weekend_2.timestamp.dt.dayofweek) // 5 == 1).astype("category")
df = pd.concat([df1, df2]).reset_index(drop=True)
exog = pd.concat([exog_weekend_1, exog_weekend_2]).reset_index(drop=True)
df = TSDataset.to_dataset(df)
exog = TSDataset.to_dataset(exog)
tsds = TSDataset(df, freq="D", df_exog=exog)
return tsds
@pytest.fixture()
def outliers_tsds():
timestamp1 = np.arange(np.datetime64("2021-01-01"), np.datetime64("2021-02-01"))
target1 = [np.sin(i) for i in range(len(timestamp1))]
target1[10] += 10
timestamp2 = np.arange(np.datetime64("2021-01-01"), np.datetime64("2021-02-10"))
target2 = [np.sin(i) for i in range(len(timestamp2))]
target2[8] += 8
target2[15] = 2
target2[26] -= 12
df1 = pd.DataFrame({"timestamp": timestamp1, "target": target1, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp2, "target": target2, "segment": "2"})
df = pd.concat([df1, df2], ignore_index=True)
df = df.pivot(index="timestamp", columns="segment")
df = df.reorder_levels([1, 0], axis=1)
df = df.sort_index(axis=1)
df.columns.names = ["segment", "feature"]
exog = df.copy()
exog.columns = pd.MultiIndex.from_arrays([["1", "2"], ["exog", "exog"]])
tsds = TSDataset(df, "1d", exog)
return tsds
@pytest.fixture
def outliers_df_with_two_columns() -> TSDataset:
timestamp1 = np.arange(np.datetime64("2021-01-01"), np.datetime64("2021-02-10"))
target1 = [np.sin(i) for i in range(len(timestamp1))]
feature1 = [np.cos(i) for i in range(len(timestamp1))]
target1[10] += 10
feature1[7] += 10
timestamp2 = np.arange(np.datetime64("2021-01-01"), np.datetime64("2021-02-10"))
target2 = [np.sin(i) for i in range(len(timestamp2))]
feature2 = [np.cos(i) for i in range(len(timestamp2))]
target2[8] += 8
target2[15] = 2
target2[26] -= 12
feature2[25] += 10
df1 = pd.DataFrame({"timestamp": timestamp1, "target": target1, "feature": feature1, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp2, "target": target2, "feature": feature2, "segment": "2"})
df = pd.concat([df1, df2], ignore_index=True)
df = df.pivot(index="timestamp", columns="segment")
df = df.reorder_levels([1, 0], axis=1)
df = df.sort_index(axis=1)
df.columns.names = ["segment", "feature"]
tsds = TSDataset(df, "1d")
return tsds
@pytest.fixture
def multitrend_df() -> pd.DataFrame:
"""Generate one segment pd.DataFrame with multiple linear trend."""
df = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", "2021-05-31")})
ns = [100, 150, 80, 187]
ks = [0.4, -0.3, 0.8, -0.6]
x = np.zeros(shape=(len(df)))
left = 0
right = 0
for i, (n, k) in enumerate(zip(ns, ks)):
right += n
x[left:right] = np.arange(0, n, 1) * k
for _n, _k in zip(ns[:i], ks[:i]):
x[left:right] += _n * _k
left = right
df["target"] = x
df["segment"] = "segment_1"
df = TSDataset.to_dataset(df=df)
return df
@pytest.fixture
def ts_with_different_series_length(example_df: pd.DataFrame) -> TSDataset:
"""Generate TSDataset with different lengths series."""
df = TSDataset.to_dataset(example_df)
df.loc[:4, pd.IndexSlice["segment_1", "target"]] = None
ts = TSDataset(df=df, freq="H")
return ts
@pytest.fixture
def imbalanced_tsdf(random_seed) -> TSDataset:
"""Generate two series with big time range difference"""
df1 = pd.DataFrame({"timestamp": pd.date_range("2021-01-25", "2021-02-01", freq="D")})
df1["segment"] = "segment_1"
df1["target"] = np.random.uniform(0, 5, len(df1))
df2 = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", "2021-02-01", freq="D")})
df2["segment"] = "segment_2"
df2["target"] = np.random.uniform(0, 5, len(df2))
df = df1.append(df2)
df = df.pivot(index="timestamp", columns="segment").reorder_levels([1, 0], axis=1).sort_index(axis=1)
df.columns.names = ["segment", "feature"]
ts = TSDataset(df, freq="D")
return ts
@pytest.fixture
def example_tsdf(random_seed) -> TSDataset:
df1 = pd.DataFrame()
df1["timestamp"] = pd.date_range(start="2020-01-01", end="2020-02-01", freq="H")
df1["segment"] = "segment_1"
df1["target"] = np.arange(len(df1)) + 2 * np.random.normal(size=len(df1))
df2 = pd.DataFrame()
df2["timestamp"] = pd.date_range(start="2020-01-01", end="2020-02-01", freq="H")
df2["segment"] = "segment_2"
df2["target"] = np.sqrt(np.arange(len(df2)) + 2 * np.cos(np.arange(len(df2))))
df = pd.concat([df1, df2], ignore_index=True)
df = df.pivot(index="timestamp", columns="segment").reorder_levels([1, 0], axis=1).sort_index(axis=1)
df.columns.names = ["segment", "feature"]
df = TSDataset(df, freq="H")
return df
@pytest.fixture
def big_daily_example_tsdf(random_seed) -> TSDataset:
df1 = pd.DataFrame()
df1["timestamp"] = pd.date_range(start="2019-01-01", end="2020-04-01", freq="D")
df1["segment"] = "segment_1"
df1["target"] = np.arange(len(df1)) + 2 * np.random.normal(size=len(df1))
df2 = pd.DataFrame()
df2["timestamp"] = pd.date_range(start="2019-06-01", end="2020-04-01", freq="D")
df2["segment"] = "segment_2"
df2["target"] = np.sqrt(np.arange(len(df2)) + 2 * np.cos(np.arange(len(df2))))
df = pd.concat([df1, df2], ignore_index=True)
df = df.pivot(index="timestamp", columns="segment").reorder_levels([1, 0], axis=1).sort_index(axis=1)
df.columns.names = ["segment", "feature"]
df = TSDataset(df, freq="D")
return df
@pytest.fixture
def big_example_tsdf(random_seed) -> TSDataset:
df1 = pd.DataFrame()
df1["timestamp"] = pd.date_range(start="2020-01-01", end="2021-02-01", freq="D")
df1["segment"] = "segment_1"
df1["target"] = np.arange(len(df1)) + 2 * np.random.normal(size=len(df1))
df2 = pd.DataFrame()
df2["timestamp"] = pd.date_range(start="2020-01-01", end="2021-02-01", freq="D")
df2["segment"] = "segment_2"
df2["target"] = np.sqrt(np.arange(len(df2)) + 2 * np.cos(np.arange(len(df2))))
df = pd.concat([df1, df2], ignore_index=True)
df = df.pivot(index="timestamp", columns="segment").reorder_levels([1, 0], axis=1).sort_index(axis=1)
df.columns.names = ["segment", "feature"]
df = TSDataset(df, freq="D")
return df
@pytest.fixture
def simple_df_relevance() -> Tuple[pd.DataFrame, pd.DataFrame]:
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df_1 = pd.DataFrame({"timestamp": timestamp, "target": np.arange(32), "segment": "1"})
df_2 = pd.DataFrame({"timestamp": timestamp[5:], "target": np.arange(5, 32), "segment": "2"})
df = pd.concat([df_1, df_2], ignore_index=True)
df = TSDataset.to_dataset(df)
timestamp = pd.date_range("2020-12-01", "2021-02-11")
regr1_2 = np.sin(-np.arange(len(timestamp) - 5))
regr2_2 = np.log(np.arange(1, len(timestamp) - 4))
df_1 = pd.DataFrame(
{
"timestamp": timestamp,
"regressor_1": np.arange(len(timestamp)),
"regressor_2": np.zeros(len(timestamp)),
"segment": "1",
}
)
df_2 = pd.DataFrame({"timestamp": timestamp[5:], "regressor_1": regr1_2, "regressor_2": regr2_2, "segment": "2"})
df_exog = pd.concat([df_1, df_2], ignore_index=True)
df_exog = TSDataset.to_dataset(df_exog)
return df, df_exog
@pytest.fixture
def const_ts_anomal() -> TSDataset:
df = generate_const_df(periods=15, start_time="2020-01-01", scale=1.0, n_segments=2)
ts = TSDataset(TSDataset.to_dataset(df), freq="D")
return ts
@pytest.fixture
def ts_diff_endings(example_reg_tsds):
ts = deepcopy(example_reg_tsds)
ts.loc[ts.index[-5] :, pd.IndexSlice["segment_1", "target"]] = np.NAN
return ts
@pytest.fixture
def df_with_nans_in_tails(example_df):
df = TSDataset.to_dataset(example_df)
df.loc[:4, pd.IndexSlice["segment_1", "target"]] = None
df.loc[-3:, pd.IndexSlice["segment_1", "target"]] = None
return df
@pytest.fixture
def df_with_nans(df_with_nans_in_tails):
df = df_with_nans_in_tails
df.loc[[df.index[5], df.index[8]], pd.IndexSlice["segment_1", "target"]] = None
return df
@pytest.fixture
def toy_dataset_equal_targets_and_quantiles():
n_periods = 5
n_segments = 2
time = list(pd.date_range("2020-01-01", periods=n_periods, freq="1D"))
df = {
"timestamp": time * n_segments,
"segment": ["a"] * n_periods + ["b"] * n_periods,
"target": np.concatenate((np.array((2, 3, 4, 5, 5)), np.array((3, 3, 3, 5, 2)))).astype(np.float64),
"target_0.01": np.concatenate((np.array((2, 3, 4, 5, 5)), np.array((3, 3, 3, 5, 2)))).astype(np.float64),
}
return TSDataset.to_dataset(pd.DataFrame(df))
@pytest.fixture
def toy_dataset_with_mean_shift_in_target():
mean_1 = 10
mean_2 = 20
n_periods = 5
n_segments = 2
time = list(pd.date_range("2020-01-01", periods=n_periods, freq="1D"))
df = {
"timestamp": time * n_segments,
"segment": ["a"] * n_periods + ["b"] * n_periods,
"target": np.concatenate((np.array((-1, 3, 3, -4, -1)) + mean_1, np.array((-2, 3, -4, 5, -2)) + mean_2)).astype(
np.float64
),
"target_0.01": np.concatenate((np.array((-1, 3, 3, -4, -1)), np.array((-2, 3, -4, 5, -2)))).astype(np.float64),
}
return TSDataset.to_dataset(pd.DataFrame(df))
| 34.40592
| 120
| 0.628303
|
3e8c69007245d593a2a7966160e5cd3d52fa86b2
| 13,233
|
py
|
Python
|
datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/metadata.py
|
ypisetsky/integrations-core
|
f7153d3f896827c3325c7f0ec088bc17d088a894
|
[
"BSD-3-Clause"
] | 1
|
2021-06-06T23:49:17.000Z
|
2021-06-06T23:49:17.000Z
|
datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/metadata.py
|
ypisetsky/integrations-core
|
f7153d3f896827c3325c7f0ec088bc17d088a894
|
[
"BSD-3-Clause"
] | null | null | null |
datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/metadata.py
|
ypisetsky/integrations-core
|
f7153d3f896827c3325c7f0ec088bc17d088a894
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import re
from collections import defaultdict
import click
from ...testing import process_checks_option
from ...utils import complete_valid_checks, get_metadata_file, load_manifest, normalize_display_name, read_metadata_rows
from ..console import CONTEXT_SETTINGS, abort, echo_debug, echo_failure, echo_info, echo_success, echo_warning
REQUIRED_HEADERS = {'metric_name', 'metric_type', 'orientation', 'integration'}
OPTIONAL_HEADERS = {'description', 'interval', 'unit_name', 'per_unit_name', 'short_name'}
ALL_HEADERS = REQUIRED_HEADERS | OPTIONAL_HEADERS
VALID_METRIC_TYPE = {'count', 'gauge', 'rate'}
VALID_ORIENTATION = {'0', '1', '-1'}
EXCLUDE_INTEGRATIONS = [
'disk',
'go-expvar', # This has a special case externally
'go-metro',
'hdfs_datanode',
'hdfs_namenode',
'http',
'kafka_consumer',
'kubelet',
'kubernetes',
'kubernetes_api_server_metrics',
'kubernetes_state',
'mesos_master',
'mesos_slave',
'network',
'ntp',
'process',
'riak_cs',
'system_core',
'system_swap',
'tcp',
]
# To easily derive these again in future, copy the contents of `integration/system/units_catalog.csv` then run:
#
# pyperclip.copy('\n'.join(" '{}',".format(line.split(',')[2]) for line in pyperclip.paste().splitlines()))
VALID_UNIT_NAMES = {
'name',
'bit',
'byte',
'kibibyte',
'mebibyte',
'gibibyte',
'tebibyte',
'pebibyte',
'exbibyte',
'microsecond',
'millisecond',
'second',
'minute',
'hour',
'day',
'week',
'fraction',
'percent',
'connection',
'request',
'process',
'file',
'buffer',
'inode',
'sector',
'block',
'packet',
'segment',
'response',
'message',
'payload',
'core',
'thread',
'table',
'index',
'lock',
'transaction',
'query',
'row',
'hit',
'miss',
'eviction',
'dollar',
'cent',
'error',
'host',
'node',
'key',
'command',
'offset',
'page',
'read',
'write',
'occurrence',
'event',
'time',
'unit',
'operation',
'item',
'record',
'object',
'cursor',
'assertion',
'fault',
'percent_nano',
'get',
'set',
'scan',
'nanosecond',
'service',
'task',
'worker',
'resource',
'document',
'shard',
'flush',
'merge',
'refresh',
'fetch',
'garbage collection',
'timeout',
'hertz',
'kilohertz',
'megahertz',
'gigahertz',
'email',
'datagram',
'column',
'apdex',
'instance',
'sample',
'commit',
'wait',
'ticket',
'split',
'stage',
'monitor',
'location',
'check',
'question',
'route',
'session',
'entry',
'attempt',
'cpu',
'device',
'update',
'method',
'job',
'container',
'execution',
'throttle',
'invocation',
'user',
'degree celsius',
'degree fahrenheit',
'success',
'nanocore',
'microcore',
'millicore',
'kilocore',
'megacore',
'gigacore',
'teracore',
'petacore',
'exacore',
'build',
'prediction',
'heap',
'volume',
'watt',
'kilowatt',
'megawatt',
'gigawatt',
'terawatt',
'view',
'microdollar',
'euro',
'pound',
'penny',
'yen',
'milliwatt',
'microwatt',
'nanowatt',
'ampere',
'milliampere',
'volt',
'millivolt',
'deciwatt',
'decidegree celsius',
'span',
'exception',
'run',
}
ALLOWED_PREFIXES = ['system', 'jvm', 'http', 'datadog', 'sftp']
PROVIDER_INTEGRATIONS = {'openmetrics', 'prometheus'}
MAX_DESCRIPTION_LENGTH = 400
METRIC_REPLACEMENT = re.compile(r"([^a-zA-Z0-9_.]+)|(^[^a-zA-Z]+)")
METRIC_DOTUNDERSCORE_CLEANUP = re.compile(r"_*\._*")
def normalize_metric_name(metric_name):
"""Copy pasted from the backend normalization code.
Extracted from dogweb/datalayer/metrics/query/metadata.py:normalize_metric_name
Metrics in metadata.csv need to be formatted this way otherwise, syncing metadata will fail.
Function not exported as a util, as this is different than AgentCheck.normalize. This function just makes sure
that whatever is in the metadata.csv is understandable by the backend.
"""
if not isinstance(metric_name, str):
metric_name = str(metric_name)
metric_name = METRIC_REPLACEMENT.sub("_", metric_name)
return METRIC_DOTUNDERSCORE_CLEANUP.sub(".", metric_name).strip("_")
def check_duplicate_values(current_check, line, row, header_name, duplicates, fail=None):
"""Check if the given column value has been seen before.
Output a warning and return True if so.
"""
if row[header_name] and row[header_name] not in duplicates:
duplicates.add(row[header_name])
elif row[header_name] != '':
message = f"{current_check}:{line} `{row[header_name]}` is a duplicate {header_name}"
if fail:
echo_failure(message)
return True
else:
echo_warning(message)
return False
@click.command(context_settings=CONTEXT_SETTINGS, short_help='Validate `metadata.csv` files')
@click.option(
'--check-duplicates', is_flag=True, help='Output warnings if there are duplicate short names and descriptions'
)
@click.option('--show-warnings', '-w', is_flag=True, help='Show warnings in addition to failures')
@click.argument('check', autocompletion=complete_valid_checks, required=False)
def metadata(check, check_duplicates, show_warnings):
"""Validates metadata.csv files
If `check` is specified, only the check will be validated, if check value is 'changed' will only apply to changed
checks, an 'all' or empty `check` value will validate all README files.
"""
checks = process_checks_option(check, source='metrics', validate=True)
echo_info(f"Validating metadata for {len(checks)} checks ...")
# If a check is specified, abort if it doesn't have a metadata file
if check not in ('all', 'changed') and not checks:
abort(f'Metadata file for {check} not found.')
errors = False
for current_check in checks:
if current_check.startswith('datadog_checks_'):
continue
# get any manifest info needed for validation
manifest = load_manifest(current_check)
try:
metric_prefix = manifest['metric_prefix'].rstrip('.')
except KeyError:
metric_prefix = None
display_name = manifest['display_name']
metadata_file = get_metadata_file(current_check)
echo_debug(f"Checking {metadata_file}")
# To make logging less verbose, common errors are counted for current check
metric_prefix_count = defaultdict(int)
empty_count = defaultdict(int)
empty_warning_count = defaultdict(int)
duplicate_name_set = set()
duplicate_short_name_set = set()
duplicate_description_set = set()
metric_prefix_error_shown = False
if os.stat(metadata_file).st_size == 0:
errors = True
echo_failure(f"{current_check} metadata file is empty. This file needs the header row at minimum")
for line, row in read_metadata_rows(metadata_file):
# determine if number of columns is complete by checking for None values (DictReader populates missing columns with None https://docs.python.org/3.8/library/csv.html#csv.DictReader) # noqa
if None in row.values():
errors = True
echo_failure(f"{current_check}:{line} {row['metric_name']} Has the wrong amount of columns")
continue
# all headers exist, no invalid headers
all_keys = set(row)
if all_keys != ALL_HEADERS:
invalid_headers = all_keys.difference(ALL_HEADERS)
if invalid_headers:
errors = True
echo_failure(f'{current_check}:{line} Invalid column {invalid_headers}')
missing_headers = ALL_HEADERS.difference(all_keys)
if missing_headers:
errors = True
echo_failure(f'{current_check}:{line} Missing columns {missing_headers}')
continue
errors = errors or check_duplicate_values(
current_check, line, row, 'metric_name', duplicate_name_set, fail=True
)
if check_duplicates:
check_duplicate_values(current_check, line, row, 'short_name', duplicate_short_name_set)
check_duplicate_values(current_check, line, row, 'description', duplicate_description_set)
normalized_metric_name = normalize_metric_name(row['metric_name'])
if row['metric_name'] != normalized_metric_name:
errors = True
echo_failure(
f"{current_check}:{line} Metric name '{row['metric_name']}' is not valid, "
f"it should be normalized as {normalized_metric_name}"
)
# metric_name header
if metric_prefix:
prefix = row['metric_name'].split('.')[0]
if prefix not in ALLOWED_PREFIXES:
if not row['metric_name'].startswith(metric_prefix):
metric_prefix_count[prefix] += 1
else:
errors = True
if not metric_prefix_error_shown and current_check not in PROVIDER_INTEGRATIONS:
metric_prefix_error_shown = True
echo_failure(f'{current_check}:{line} metric_prefix does not exist in manifest')
# metric_type header
if row['metric_type'] and row['metric_type'] not in VALID_METRIC_TYPE:
errors = True
echo_failure(f"{current_check}:{line} `{row['metric_type']}` is an invalid metric_type.")
# unit_name header
if row['unit_name'] and row['unit_name'] not in VALID_UNIT_NAMES:
errors = True
echo_failure(f"{current_check}:{line} `{row['unit_name']}` is an invalid unit_name.")
# per_unit_name header
if row['per_unit_name'] and row['per_unit_name'] not in VALID_UNIT_NAMES:
errors = True
echo_failure(f"{current_check}:{line} `{row['per_unit_name']}` is an invalid per_unit_name.")
# integration header
integration = row['integration']
normalized_integration = normalize_display_name(display_name)
if integration != normalized_integration and normalized_integration not in EXCLUDE_INTEGRATIONS:
errors = True
echo_failure(
f"{current_check}:{line} integration: `{row['integration']}` should be: {normalized_integration}"
)
# orientation header
if row['orientation'] and row['orientation'] not in VALID_ORIENTATION:
errors = True
echo_failure(f"{current_check}:{line} `{row['orientation']}` is an invalid orientation.")
# empty required fields
for header in REQUIRED_HEADERS:
if not row[header]:
empty_count[header] += 1
# empty description field, description is recommended
if not row['description']:
empty_warning_count['description'] += 1
elif "|" in row['description']:
errors = True
echo_failure(f"{current_check}:{line} `{row['metric_name']}` contains a `|`.")
# check if there is unicode
elif any(not content.isascii() for _, content in row.items()):
errors = True
echo_failure(f"{current_check}:{line} `{row['metric_name']}` contains unicode characters.")
# exceeds max allowed length of description
elif len(row['description']) > MAX_DESCRIPTION_LENGTH:
errors = True
echo_failure(
f"{current_check}:{line} `{row['metric_name']}` exceeds the max length: "
f"{MAX_DESCRIPTION_LENGTH} for descriptions."
)
if row['interval'] and not row['interval'].isdigit():
errors = True
echo_failure(f"{current_check}:{line} interval should be an int, found '{row['interval']}'.")
for header, count in empty_count.items():
errors = True
echo_failure(f'{current_check}: {header} is empty in {count} rows.')
for prefix, count in metric_prefix_count.items():
echo_failure(
f"{current_check}: `{prefix}` appears {count} time(s) and does not match metric_prefix "
"defined in the manifest."
)
if show_warnings:
for header, count in empty_warning_count.items():
echo_warning(f'{current_check}: {header} is empty in {count} rows.')
if errors:
abort()
echo_success('Validated!')
| 31.507143
| 200
| 0.6013
|
750cb4ab62fbae289b38cca2774cf1d454986bb8
| 17
|
py
|
Python
|
mak/libs/ircc/ir_ast/ir_reference.py
|
motor-dev/Motor
|
98cb099fe1c2d31e455ed868cc2a25eae51e79f0
|
[
"BSD-3-Clause"
] | 4
|
2015-05-13T16:28:36.000Z
|
2017-05-24T15:34:14.000Z
|
mak/libs/ircc/ir_ast/ir_reference.py
|
motor-dev/Motor
|
98cb099fe1c2d31e455ed868cc2a25eae51e79f0
|
[
"BSD-3-Clause"
] | null | null | null |
mak/libs/ircc/ir_ast/ir_reference.py
|
motor-dev/Motor
|
98cb099fe1c2d31e455ed868cc2a25eae51e79f0
|
[
"BSD-3-Clause"
] | 1
|
2017-03-21T08:28:07.000Z
|
2017-03-21T08:28:07.000Z
|
IrReference = str
| 17
| 17
| 0.823529
|
3f4aa820f05b822fe2d62d7f332b36886deca11a
| 448
|
py
|
Python
|
core/migrations/0006_auto_20210406_0647.py
|
SejaMuchhal/PizzaStore
|
268ee7df8040616fc8cd6f59a74440b8428db000
|
[
"MIT"
] | 1
|
2021-04-06T17:01:52.000Z
|
2021-04-06T17:01:52.000Z
|
core/migrations/0006_auto_20210406_0647.py
|
SejaMuchhal/PizzaStore
|
268ee7df8040616fc8cd6f59a74440b8428db000
|
[
"MIT"
] | null | null | null |
core/migrations/0006_auto_20210406_0647.py
|
SejaMuchhal/PizzaStore
|
268ee7df8040616fc8cd6f59a74440b8428db000
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-04-06 06:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_auto_20210406_0631'),
]
operations = [
migrations.AlterField(
model_name='pizza',
name='type',
field=models.CharField(choices=[('regular', 'Regular pizza'), ('square', 'Square pizza')], max_length=13),
),
]
| 23.578947
| 118
| 0.598214
|
dea5d1cc17f9cc854bffa302f93b0e0dd79079db
| 797
|
py
|
Python
|
google/cloud/aiplatform_v1beta1/services/specialist_pool_service/__init__.py
|
dizcology/python-aiplatform
|
1a135775966c8a2303ded529eba514dcf9db7205
|
[
"Apache-2.0"
] | 180
|
2020-09-23T17:21:15.000Z
|
2022-03-30T17:25:47.000Z
|
google/cloud/aiplatform_v1beta1/services/specialist_pool_service/__init__.py
|
pompipo/python-aiplatform
|
3612b05c62dfb46822cd2c1798fd47349dba33bc
|
[
"Apache-2.0"
] | 601
|
2020-09-23T16:23:44.000Z
|
2022-03-31T19:08:23.000Z
|
google/cloud/aiplatform_v1beta1/services/specialist_pool_service/__init__.py
|
pompipo/python-aiplatform
|
3612b05c62dfb46822cd2c1798fd47349dba33bc
|
[
"Apache-2.0"
] | 109
|
2020-09-23T16:22:04.000Z
|
2022-03-28T21:18:29.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import SpecialistPoolServiceClient
from .async_client import SpecialistPoolServiceAsyncClient
__all__ = (
"SpecialistPoolServiceClient",
"SpecialistPoolServiceAsyncClient",
)
| 34.652174
| 74
| 0.766625
|
4820441f978ed43270848593b75a7e2c928def0c
| 7,064
|
py
|
Python
|
scenes/benchmark_dam.py
|
WenyinWei/fluimu
|
ea9c076719ce31b64125708c0e150e2ab6b9a8c5
|
[
"Apache-2.0"
] | 95
|
2019-12-04T21:39:51.000Z
|
2022-03-12T01:03:36.000Z
|
scenes/benchmark_dam.py
|
WenyinWei/fluimu
|
ea9c076719ce31b64125708c0e150e2ab6b9a8c5
|
[
"Apache-2.0"
] | 4
|
2019-12-21T15:08:54.000Z
|
2021-02-28T19:40:08.000Z
|
scenes/benchmark_dam.py
|
WenyinWei/fluimu
|
ea9c076719ce31b64125708c0e150e2ab6b9a8c5
|
[
"Apache-2.0"
] | 26
|
2020-01-21T00:48:47.000Z
|
2022-01-14T06:04:20.000Z
|
# ----------------------------------------------------------------------------
#
# MantaFlow fluid solver framework
# Copyright 2018 Kiwon Um, Nils Thuerey
#
# This program is free software, distributed under the terms of the
# Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Benchmark case for dam with fluid-implicit-particle (FLIP) simulation
#
# ----------------------------------------------------------------------------
guion = True
pause = True
# default solver parameters
params = {}
params['dim'] = 3 # dimension
params['sres'] = 2 # sub-resolution per cell
params['dx'] = 1.0/params['sres'] # particle spacing (= 2 x radius)
params['res'] = 25 # reference resolution
params['len'] = 1.0 # reference length
params['bnd'] = 4 # boundary cells
params['gref'] = -9.8 # real-world gravity
params['cgaccuracy'] = 1e-3 # cg solver's threshold
params['jitter'] = 0.5 # jittering particles
params['gfm'] = True # 2nd order fluid-empty BC
params['fps'] = 30 # frames per second
params['t_end'] = 5.0 # quit simulation
params['sdt'] = None # fix timestep size
# scale unit in regard to the manta world
scaleToManta = float(params['res'])/params['len']
# NOTE: the original test uses 3.22; but, here it's slightly modified for the sake of convenience in discretization
params['gs'] = [round(float(params['res'])*3.2)+params['bnd']*2, params['res']*3+params['bnd']*2, params['res']+params['bnd']*2 if params['dim']==3 else 1]
params['grav'] = params['gref']*scaleToManta
s = Solver(name="FLIP", gridSize=vec3(params['gs'][0], params['gs'][1], params['gs'][2]), dim=params['dim'])
s.cfl = 1
s.frameLength = 1.0/float(params['fps'])
s.timestepMin = 0
s.timestepMax = s.frameLength
s.timestep = s.frameLength
# prepare grids and particles
gFlags = s.create(FlagGrid)
gV = s.create(MACGrid)
gVold = s.create(MACGrid)
gP = s.create(RealGrid)
gPhiSld = s.create(LevelsetGrid)
pp = s.create(BasicParticleSystem)
pT = pp.create(PdataInt)
pV = pp.create(PdataVec3)
pVtmp = pp.create(PdataVec3)
mesh = s.create(name='mesh', type=Mesh) if (params['dim']==3 and guion) else None
paramSolvePressure = dict(flags=gFlags, vel=gV, pressure=gP, cgAccuracy=params['cgaccuracy'])
if params['gfm']: # for the free-surface boundary condition
gPhi = s.create(LevelsetGrid)
gIdxSys = s.create(ParticleIndexSystem)
gIdx = s.create(IntGrid)
paramSolvePressure.update(phi=gPhi)
# boundary setup
gFlags.initDomain(params['bnd']-1)
bndBox = s.create(Box, p0=vec3(0), p1=vec3(params['gs'][0], params['gs'][1], params['gs'][2]))
inBox = s.create(Box, p0=vec3(params['bnd'], params['bnd'], params['bnd'] if params['dim']==3 else 0), p1=vec3(params['gs'][0]-params['bnd'], params['gs'][1]-params['bnd'], (params['gs'][0]-params['bnd']) if params['dim']==3 else 1))
gPhiSld.join(bndBox.computeLevelset(notiming=True), notiming=True)
gPhiSld.subtract(inBox.computeLevelset(notiming=True), notiming=True)
# obstacle
a = vec3(0.744*scaleToManta+params['bnd'], 0.161*0.5*scaleToManta+params['bnd'], 0.5*params['gs'][2] if (params['dim']==3) else 0)
b = vec3(0.161*0.5*scaleToManta, 0.161*0.5*scaleToManta, 0.403*0.5*scaleToManta if (params['dim']==3) else params['gs'][2])
obs = s.create(Box, center=a, size=b)
obs.applyToGrid(grid=gFlags, value=FlagObstacle, respectFlags=gFlags)
gPhiSld.join(obs.computeLevelset(notiming=True), notiming=True)
# fluid setup: dam
dam_c = [2.606, 0.275, 0.5]
dam_s = [1.228*0.5, 0.55*0.5, 0.5]
a = vec3(dam_c[0]*scaleToManta+params['bnd'], dam_c[1]*scaleToManta+params['bnd'], dam_c[2]*scaleToManta+params['bnd'] if (params['dim']==3) else 0)
b = vec3(dam_s[0]*scaleToManta, dam_s[1]*scaleToManta, dam_s[2]*scaleToManta if (params['dim']==3) else params['gs'][2])
fld = s.create(Box, center=a, size=b)
fld.applyToGrid(grid=gFlags, value=FlagFluid, respectFlags=gFlags)
begin = pp.pySize()
sampleShapeWithParticles(shape=fld, flags=gFlags, parts=pp, discretization=params['sres'], randomness=0, notiming=True)
end = pp.pySize()
pT.setConstRange(s=FlagFluid, begin=begin, end=end, notiming=True)
if guion:
gui = Gui()
gui.show()
if pause: gui.pause()
while (s.timeTotal<params['t_end']): # main loop
mapPartsToMAC(vel=gV, flags=gFlags, velOld=gVold, parts=pp, partVel=pV, ptype=pT, exclude=FlagEmpty)
if params['sdt'] is None: s.adaptTimestep(gV.getMaxAbs())
else: s.adaptTimestepByDt(params['sdt'])
addGravityNoScale(flags=gFlags, vel=gV, gravity=vec3(0, params['grav'], 0))
if params['gfm']:
gridParticleIndex(parts=pp, flags=gFlags, indexSys=gIdxSys, index=gIdx)
unionParticleLevelset(parts=pp, indexSys=gIdxSys, flags=gFlags, index=gIdx, phi=gPhi, radiusFactor=1.0)
extrapolateLsSimple(phi=gPhi, distance=4, inside=True)
setWallBcs(flags=gFlags, vel=gV)
solvePressure(**paramSolvePressure)
setWallBcs(flags=gFlags, vel=gV)
extrapolateMACSimple(flags=gFlags, vel=gV)
# update velocity (general update from FLIP and individual update for Lagrangian particles)
flipVelocityUpdate(vel=gV, velOld=gVold, flags=gFlags, parts=pp, partVel=pV, flipRatio=0.97, ptype=pT, exclude=FlagEmpty)
addForcePvel(vel=pV, a=vec3(0, params['grav'], 0), dt=s.timestep, ptype=pT, exclude=FlagFluid)
# update position
pp.getPosPdata(target=pVtmp)
pp.advectInGrid(flags=gFlags, vel=gV, integrationMode=IntRK4, deleteInObstacle=False, ptype=pT, exclude=FlagEmpty)
eulerStep(parts=pp, vel=pV, ptype=pT, exclude=FlagFluid)
pp.projectOutOfBnd(flags=gFlags, bnd=params['bnd']+params['dx']*0.5, plane='xXyYzZ', ptype=pT)
pushOutofObs(parts=pp, flags=gFlags, phiObs=gPhiSld, thresh=params['dx']*0.5, ptype=pT)
# update velocity of the Lagrangian particles
updateVelocityFromDeltaPos(parts=pp, vel=pV, x_prev=pVtmp, dt=s.timestep, ptype=pT, exclude=FlagFluid)
# We don't need to solve the pressure for isolated cells.
markFluidCells(parts=pp, flags=gFlags, ptype=pT)
setPartType(parts=pp, ptype=pT, mark=FlagFluid, stype=FlagEmpty, flags=gFlags, cflag=FlagFluid)
markIsolatedFluidCell(flags=gFlags, mark=FlagEmpty)
setPartType(parts=pp, ptype=pT, mark=FlagEmpty, stype=FlagFluid, flags=gFlags, cflag=FlagEmpty)
if params['dim']==3 and guion:
gridParticleIndex(parts=pp, flags=gFlags, indexSys=gIdxSys, index=gIdx)
unionParticleLevelset(parts=pp, indexSys=gIdxSys, flags=gFlags, index=gIdx, phi=gPhi, radiusFactor=1.0)
extrapolateLsSimple(phi=gPhi, distance=4, inside=True)
gPhi.createMesh(mesh)
s.step()
| 49.055556
| 234
| 0.640147
|
7be1c0a31de835a4ac6ea4af114b8227428bead5
| 272
|
py
|
Python
|
webfront_service/api/exceptions.py
|
halo-framework/halo-webapp
|
279db4726124662f601e20e55e2019cd2c0a3673
|
[
"MIT"
] | null | null | null |
webfront_service/api/exceptions.py
|
halo-framework/halo-webapp
|
279db4726124662f601e20e55e2019cd2c0a3673
|
[
"MIT"
] | null | null | null |
webfront_service/api/exceptions.py
|
halo-framework/halo-webapp
|
279db4726124662f601e20e55e2019cd2c0a3673
|
[
"MIT"
] | null | null | null |
from halo_app.exceptions import HaloException
class IllegalProviderException(HaloException):
pass
class IllegalRuntimeException(HaloException):
pass
class IllegalServiceDomainException(HaloException):
pass
class IllegalIdException(HaloException):
pass
| 19.428571
| 51
| 0.819853
|
e39d934815464869c8f90be92d4dcec38fb50ecb
| 404
|
py
|
Python
|
biokit/services/text/faidx.py
|
JLSteenwyk/BioKIT
|
9ca31d8003dc845bf56b2c56c87820c0b05021c4
|
[
"MIT"
] | 8
|
2021-10-03T21:08:33.000Z
|
2021-12-02T17:15:32.000Z
|
biokit/services/text/faidx.py
|
JLSteenwyk/BioKIT
|
9ca31d8003dc845bf56b2c56c87820c0b05021c4
|
[
"MIT"
] | null | null | null |
biokit/services/text/faidx.py
|
JLSteenwyk/BioKIT
|
9ca31d8003dc845bf56b2c56c87820c0b05021c4
|
[
"MIT"
] | 5
|
2021-10-05T06:25:03.000Z
|
2022-01-04T11:01:09.000Z
|
from Bio import SeqIO
from .base import Text
class Faidx(Text):
def __init__(self, args) -> None:
super().__init__(**self.process_args(args))
def run(self):
record_dict = SeqIO.index(self.fasta, "fasta")
print(f">{record_dict[self.entry].name}\n{record_dict[self.entry].seq}")
def process_args(self, args):
return dict(fasta=args.fasta, entry=args.entry)
| 25.25
| 80
| 0.658416
|
97b6d509d724101d39faa0c45104c76e471e8322
| 3,684
|
py
|
Python
|
netroids/player_managers.py
|
FarmCodeGary/Netroids
|
a3370fb7cbfc01a05b57cbf6871a3c63827ad5ac
|
[
"MIT"
] | null | null | null |
netroids/player_managers.py
|
FarmCodeGary/Netroids
|
a3370fb7cbfc01a05b57cbf6871a3c63827ad5ac
|
[
"MIT"
] | null | null | null |
netroids/player_managers.py
|
FarmCodeGary/Netroids
|
a3370fb7cbfc01a05b57cbf6871a3c63827ad5ac
|
[
"MIT"
] | null | null | null |
from interface import (
DOWN_PRESSED_EVENT, DOWN_RELEASED_EVENT, UP_PRESSED_EVENT,
UP_RELEASED_EVENT, LEFT_PRESSED_EVENT, LEFT_RELEASED_EVENT,
RIGHT_PRESSED_EVENT, RIGHT_RELEASED_EVENT, SPACE_PRESSED_EVENT,
SPACE_RELEASED_EVENT)
class LocalPlayerManager:
def __init__(self, gui):
self.entity_id = None
self.down_pressed = False
self.up_pressed = False
self.left_pressed = False
self.right_pressed = False
self.space_pressed = False
self.fired_this_frame = False
gui.set_event_handler(DOWN_PRESSED_EVENT, self.on_down_pressed)
gui.set_event_handler(DOWN_RELEASED_EVENT, self.on_down_released)
gui.set_event_handler(UP_PRESSED_EVENT, self.on_up_pressed)
gui.set_event_handler(UP_RELEASED_EVENT, self.on_up_released)
gui.set_event_handler(LEFT_PRESSED_EVENT, self.on_left_pressed)
gui.set_event_handler(LEFT_RELEASED_EVENT, self.on_left_released)
gui.set_event_handler(RIGHT_PRESSED_EVENT, self.on_right_pressed)
gui.set_event_handler(RIGHT_RELEASED_EVENT, self.on_right_released)
gui.set_event_handler(SPACE_PRESSED_EVENT, self.on_space_pressed)
gui.set_event_handler(SPACE_RELEASED_EVENT, self.on_space_released)
def set_entity(self, entity_id):
self.entity_id = entity_id
def clear_fired_this_frame(self):
self.fired_this_frame = False
def on_down_pressed(self):
self.down_pressed = True
def on_down_released(self):
self.down_pressed = False
def on_up_pressed(self):
self.up_pressed = True
def on_up_released(self):
self.up_pressed = False
def on_left_pressed(self):
self.left_pressed = True
def on_left_released(self):
self.left_pressed = False
def on_right_pressed(self):
self.right_pressed = True
def on_right_released(self):
self.right_pressed = False
def on_space_pressed(self):
self.space_pressed = True
self.fired_this_frame = True
def on_space_released(self):
self.space_pressed = False
def get_rotation_status(self):
if self.left_pressed and self.right_pressed:
return "off"
elif self.left_pressed:
return "left"
elif self.right_pressed:
return "right"
else:
return "off"
def get_throttle_status(self):
if self.up_pressed and self.down_pressed:
return "off"
elif self.up_pressed:
return "forward"
elif self.down_pressed:
return "backward"
else:
return "off"
def get_shooting_status(self):
if self.space_pressed or self.fired_this_frame:
return "on"
else:
return "off"
def generate_control_message(self):
if self.entity_id is None:
return None
else:
# TODO: Replace with use of string.format
return ("CONTROL\n"+str(self.entity_id)+"\nThrottle:" +
self.get_throttle_status()+"\nRotating:" +
self.get_rotation_status()+"\nShooting:" +
self.get_shooting_status())
class RemotePlayerManager:
def __init__(self, ip_address, name, color, ship):
self.ip_address = ip_address
self.name = name
self.color = color
self.ship = ship
self.score = 0
ship.player_manager = self
self.time_last_heard_from = None
def resetScore(self):
self.score = 0
| 32.315789
| 76
| 0.637079
|
60a0bd9165c960dcd89c5a3f08e9d6083d75f843
| 1,258
|
py
|
Python
|
sqlalchemy_paginate/__init__.py
|
sungmin-park/sqlalchemy-paginate
|
a0512a1f277ebe25a38b874f0acb6714f2c36851
|
[
"MIT"
] | 1
|
2016-07-06T09:53:51.000Z
|
2016-07-06T09:53:51.000Z
|
sqlalchemy_paginate/__init__.py
|
sungmin-park/sqlalchemy-paginate
|
a0512a1f277ebe25a38b874f0acb6714f2c36851
|
[
"MIT"
] | null | null | null |
sqlalchemy_paginate/__init__.py
|
sungmin-park/sqlalchemy-paginate
|
a0512a1f277ebe25a38b874f0acb6714f2c36851
|
[
"MIT"
] | null | null | null |
def int_ceil(x, y):
"""
equivalent to math.ceil(x / y)
:param x:
:param y:
:return:
"""
q, r = divmod(x, y)
if r:
q += 1
return q
class Pagination(object):
def __init__(self, query, page=1, per_page=10, per_nav=10,
map_=lambda x: x):
self.first = 1
self.total = query.count()
if self.total == 0:
self.last = 1
else:
self.last = int_ceil(self.total, per_page)
self.page = max(min(self.last, page), 1)
self.prev = max(self.page - 1, 1)
self.has_prev = self.prev != self.page
self.next = min(self.page + 1, self.last)
self.has_next = self.next != self.page
self.nav_head = per_nav * (int_ceil(self.page, per_nav) - 1) + 1
self.nav_tail = min(self.last, self.nav_head + per_nav - 1)
self.nav_prev = max(self.page - per_nav, 1)
self.has_nav_prev = self.nav_prev < self.nav_head
self.nav_next = min(self.page + per_nav, self.last)
self.has_nav_next = self.nav_next > self.nav_tail
self.pages = range(self.nav_head, self.nav_tail + 1)
start = (self.page - 1) * per_page
self.items = map(map_, query[start: start + per_page])
| 29.952381
| 72
| 0.562003
|
1d3ab77f6a8bb2479c14ba9b9de1e53bd5d91996
| 80,334
|
py
|
Python
|
generated/nidigital/nidigital/unit_tests/_mock_helper.py
|
kurtp-ni/nimi-python
|
4f0bccce67a69ca9f46a8ab9b07dc26ca0049729
|
[
"MIT"
] | 88
|
2017-08-03T18:07:27.000Z
|
2022-01-28T13:55:06.000Z
|
generated/nidigital/nidigital/unit_tests/_mock_helper.py
|
kurtp-ni/nimi-python
|
4f0bccce67a69ca9f46a8ab9b07dc26ca0049729
|
[
"MIT"
] | 1,310
|
2017-07-11T18:42:44.000Z
|
2022-03-28T21:03:57.000Z
|
generated/nidigital/nidigital/unit_tests/_mock_helper.py
|
kurtp-ni/nimi-python
|
4f0bccce67a69ca9f46a8ab9b07dc26ca0049729
|
[
"MIT"
] | 70
|
2017-07-25T14:52:53.000Z
|
2022-03-31T14:14:23.000Z
|
# -*- coding: utf-8 -*-
# This file was generated
import sys # noqa: F401 - Not all mock_helpers will need this
class MockFunctionCallError(Exception):
def __init__(self, function, param=None):
self.function = function
self.param = param
msg = "{0} called without setting side_effect".format(self.function)
if param is not None:
msg += " or setting the {0} parameter return value".format(self.param)
super(Exception, self).__init__(msg)
class SideEffectsHelper(object):
def __init__(self):
self._defaults = {}
self._defaults['Abort'] = {}
self._defaults['Abort']['return'] = 0
self._defaults['AbortKeepAlive'] = {}
self._defaults['AbortKeepAlive']['return'] = 0
self._defaults['ApplyLevelsAndTiming'] = {}
self._defaults['ApplyLevelsAndTiming']['return'] = 0
self._defaults['ApplyTDROffsets'] = {}
self._defaults['ApplyTDROffsets']['return'] = 0
self._defaults['BurstPattern'] = {}
self._defaults['BurstPattern']['return'] = 0
self._defaults['ClockGenerator_Abort'] = {}
self._defaults['ClockGenerator_Abort']['return'] = 0
self._defaults['ClockGenerator_GenerateClock'] = {}
self._defaults['ClockGenerator_GenerateClock']['return'] = 0
self._defaults['Commit'] = {}
self._defaults['Commit']['return'] = 0
self._defaults['ConfigureActiveLoadLevels'] = {}
self._defaults['ConfigureActiveLoadLevels']['return'] = 0
self._defaults['ConfigurePatternBurstSites'] = {}
self._defaults['ConfigurePatternBurstSites']['return'] = 0
self._defaults['ConfigureTimeSetCompareEdgesStrobe'] = {}
self._defaults['ConfigureTimeSetCompareEdgesStrobe']['return'] = 0
self._defaults['ConfigureTimeSetCompareEdgesStrobe2x'] = {}
self._defaults['ConfigureTimeSetCompareEdgesStrobe2x']['return'] = 0
self._defaults['ConfigureTimeSetDriveEdges'] = {}
self._defaults['ConfigureTimeSetDriveEdges']['return'] = 0
self._defaults['ConfigureTimeSetDriveEdges2x'] = {}
self._defaults['ConfigureTimeSetDriveEdges2x']['return'] = 0
self._defaults['ConfigureTimeSetDriveFormat'] = {}
self._defaults['ConfigureTimeSetDriveFormat']['return'] = 0
self._defaults['ConfigureTimeSetEdge'] = {}
self._defaults['ConfigureTimeSetEdge']['return'] = 0
self._defaults['ConfigureTimeSetEdgeMultiplier'] = {}
self._defaults['ConfigureTimeSetEdgeMultiplier']['return'] = 0
self._defaults['ConfigureTimeSetPeriod'] = {}
self._defaults['ConfigureTimeSetPeriod']['return'] = 0
self._defaults['ConfigureVoltageLevels'] = {}
self._defaults['ConfigureVoltageLevels']['return'] = 0
self._defaults['CreateCaptureWaveformFromFileDigicapture'] = {}
self._defaults['CreateCaptureWaveformFromFileDigicapture']['return'] = 0
self._defaults['CreateCaptureWaveformParallel'] = {}
self._defaults['CreateCaptureWaveformParallel']['return'] = 0
self._defaults['CreateCaptureWaveformSerial'] = {}
self._defaults['CreateCaptureWaveformSerial']['return'] = 0
self._defaults['CreateSourceWaveformFromFileTDMS'] = {}
self._defaults['CreateSourceWaveformFromFileTDMS']['return'] = 0
self._defaults['CreateSourceWaveformParallel'] = {}
self._defaults['CreateSourceWaveformParallel']['return'] = 0
self._defaults['CreateSourceWaveformSerial'] = {}
self._defaults['CreateSourceWaveformSerial']['return'] = 0
self._defaults['CreateTimeSet'] = {}
self._defaults['CreateTimeSet']['return'] = 0
self._defaults['DeleteAllTimeSets'] = {}
self._defaults['DeleteAllTimeSets']['return'] = 0
self._defaults['DisableSites'] = {}
self._defaults['DisableSites']['return'] = 0
self._defaults['EnableSites'] = {}
self._defaults['EnableSites']['return'] = 0
self._defaults['FetchHistoryRAMCycleInformation'] = {}
self._defaults['FetchHistoryRAMCycleInformation']['return'] = 0
self._defaults['FetchHistoryRAMCycleInformation']['patternIndex'] = None
self._defaults['FetchHistoryRAMCycleInformation']['timeSetIndex'] = None
self._defaults['FetchHistoryRAMCycleInformation']['vectorNumber'] = None
self._defaults['FetchHistoryRAMCycleInformation']['cycleNumber'] = None
self._defaults['FetchHistoryRAMCycleInformation']['numDutCycles'] = None
self._defaults['FetchHistoryRAMCyclePinData'] = {}
self._defaults['FetchHistoryRAMCyclePinData']['return'] = 0
self._defaults['FetchHistoryRAMCyclePinData']['actualNumPinData'] = None
self._defaults['FetchHistoryRAMCyclePinData']['expectedPinStates'] = None
self._defaults['FetchHistoryRAMCyclePinData']['actualPinStates'] = None
self._defaults['FetchHistoryRAMCyclePinData']['perPinPassFail'] = None
self._defaults['FetchHistoryRAMScanCycleNumber'] = {}
self._defaults['FetchHistoryRAMScanCycleNumber']['return'] = 0
self._defaults['FetchHistoryRAMScanCycleNumber']['scanCycleNumber'] = None
self._defaults['FrequencyCounter_MeasureFrequency'] = {}
self._defaults['FrequencyCounter_MeasureFrequency']['return'] = 0
self._defaults['FrequencyCounter_MeasureFrequency']['actualNumFrequencies'] = None
self._defaults['FrequencyCounter_MeasureFrequency']['frequencies'] = None
self._defaults['GetAttributeViBoolean'] = {}
self._defaults['GetAttributeViBoolean']['return'] = 0
self._defaults['GetAttributeViBoolean']['value'] = None
self._defaults['GetAttributeViInt32'] = {}
self._defaults['GetAttributeViInt32']['return'] = 0
self._defaults['GetAttributeViInt32']['value'] = None
self._defaults['GetAttributeViInt64'] = {}
self._defaults['GetAttributeViInt64']['return'] = 0
self._defaults['GetAttributeViInt64']['value'] = None
self._defaults['GetAttributeViReal64'] = {}
self._defaults['GetAttributeViReal64']['return'] = 0
self._defaults['GetAttributeViReal64']['value'] = None
self._defaults['GetAttributeViString'] = {}
self._defaults['GetAttributeViString']['return'] = 0
self._defaults['GetAttributeViString']['value'] = None
self._defaults['GetChannelNameFromString'] = {}
self._defaults['GetChannelNameFromString']['return'] = 0
self._defaults['GetChannelNameFromString']['names'] = None
self._defaults['GetError'] = {}
self._defaults['GetError']['return'] = 0
self._defaults['GetError']['errorCode'] = None
self._defaults['GetError']['errorDescription'] = None
self._defaults['GetFailCount'] = {}
self._defaults['GetFailCount']['return'] = 0
self._defaults['GetFailCount']['actualNumRead'] = None
self._defaults['GetFailCount']['failureCount'] = None
self._defaults['GetHistoryRAMSampleCount'] = {}
self._defaults['GetHistoryRAMSampleCount']['return'] = 0
self._defaults['GetHistoryRAMSampleCount']['sampleCount'] = None
self._defaults['GetPatternName'] = {}
self._defaults['GetPatternName']['return'] = 0
self._defaults['GetPatternName']['name'] = None
self._defaults['GetPatternPinList'] = {}
self._defaults['GetPatternPinList']['return'] = 0
self._defaults['GetPatternPinList']['pinList'] = None
self._defaults['GetPinName'] = {}
self._defaults['GetPinName']['return'] = 0
self._defaults['GetPinName']['name'] = None
self._defaults['GetPinResultsPinInformation'] = {}
self._defaults['GetPinResultsPinInformation']['return'] = 0
self._defaults['GetPinResultsPinInformation']['actualNumValues'] = None
self._defaults['GetPinResultsPinInformation']['pinIndexes'] = None
self._defaults['GetPinResultsPinInformation']['siteNumbers'] = None
self._defaults['GetPinResultsPinInformation']['channelIndexes'] = None
self._defaults['GetSitePassFail'] = {}
self._defaults['GetSitePassFail']['return'] = 0
self._defaults['GetSitePassFail']['actualNumSites'] = None
self._defaults['GetSitePassFail']['passFail'] = None
self._defaults['GetSiteResultsSiteNumbers'] = {}
self._defaults['GetSiteResultsSiteNumbers']['return'] = 0
self._defaults['GetSiteResultsSiteNumbers']['actualNumSiteNumbers'] = None
self._defaults['GetSiteResultsSiteNumbers']['siteNumbers'] = None
self._defaults['GetTimeSetDriveFormat'] = {}
self._defaults['GetTimeSetDriveFormat']['return'] = 0
self._defaults['GetTimeSetDriveFormat']['format'] = None
self._defaults['GetTimeSetEdge'] = {}
self._defaults['GetTimeSetEdge']['return'] = 0
self._defaults['GetTimeSetEdge']['time'] = None
self._defaults['GetTimeSetEdgeMultiplier'] = {}
self._defaults['GetTimeSetEdgeMultiplier']['return'] = 0
self._defaults['GetTimeSetEdgeMultiplier']['edgeMultiplier'] = None
self._defaults['GetTimeSetName'] = {}
self._defaults['GetTimeSetName']['return'] = 0
self._defaults['GetTimeSetName']['name'] = None
self._defaults['GetTimeSetPeriod'] = {}
self._defaults['GetTimeSetPeriod']['return'] = 0
self._defaults['GetTimeSetPeriod']['period'] = None
self._defaults['InitWithOptions'] = {}
self._defaults['InitWithOptions']['return'] = 0
self._defaults['InitWithOptions']['newVi'] = None
self._defaults['Initiate'] = {}
self._defaults['Initiate']['return'] = 0
self._defaults['IsDone'] = {}
self._defaults['IsDone']['return'] = 0
self._defaults['IsDone']['done'] = None
self._defaults['IsSiteEnabled'] = {}
self._defaults['IsSiteEnabled']['return'] = 0
self._defaults['IsSiteEnabled']['enable'] = None
self._defaults['LoadLevels'] = {}
self._defaults['LoadLevels']['return'] = 0
self._defaults['LoadPattern'] = {}
self._defaults['LoadPattern']['return'] = 0
self._defaults['LoadPinMap'] = {}
self._defaults['LoadPinMap']['return'] = 0
self._defaults['LoadSpecifications'] = {}
self._defaults['LoadSpecifications']['return'] = 0
self._defaults['LoadTiming'] = {}
self._defaults['LoadTiming']['return'] = 0
self._defaults['LockSession'] = {}
self._defaults['LockSession']['return'] = 0
self._defaults['LockSession']['callerHasLock'] = None
self._defaults['PPMU_Measure'] = {}
self._defaults['PPMU_Measure']['return'] = 0
self._defaults['PPMU_Measure']['actualNumRead'] = None
self._defaults['PPMU_Measure']['measurements'] = None
self._defaults['PPMU_Source'] = {}
self._defaults['PPMU_Source']['return'] = 0
self._defaults['ReadSequencerFlag'] = {}
self._defaults['ReadSequencerFlag']['return'] = 0
self._defaults['ReadSequencerFlag']['value'] = None
self._defaults['ReadSequencerRegister'] = {}
self._defaults['ReadSequencerRegister']['return'] = 0
self._defaults['ReadSequencerRegister']['value'] = None
self._defaults['ReadStatic'] = {}
self._defaults['ReadStatic']['return'] = 0
self._defaults['ReadStatic']['actualNumRead'] = None
self._defaults['ReadStatic']['data'] = None
self._defaults['ResetDevice'] = {}
self._defaults['ResetDevice']['return'] = 0
self._defaults['SelfCalibrate'] = {}
self._defaults['SelfCalibrate']['return'] = 0
self._defaults['SendSoftwareEdgeTrigger'] = {}
self._defaults['SendSoftwareEdgeTrigger']['return'] = 0
self._defaults['SetAttributeViBoolean'] = {}
self._defaults['SetAttributeViBoolean']['return'] = 0
self._defaults['SetAttributeViInt32'] = {}
self._defaults['SetAttributeViInt32']['return'] = 0
self._defaults['SetAttributeViInt64'] = {}
self._defaults['SetAttributeViInt64']['return'] = 0
self._defaults['SetAttributeViReal64'] = {}
self._defaults['SetAttributeViReal64']['return'] = 0
self._defaults['SetAttributeViString'] = {}
self._defaults['SetAttributeViString']['return'] = 0
self._defaults['TDR'] = {}
self._defaults['TDR']['return'] = 0
self._defaults['TDR']['actualNumOffsets'] = None
self._defaults['TDR']['offsets'] = None
self._defaults['UnloadAllPatterns'] = {}
self._defaults['UnloadAllPatterns']['return'] = 0
self._defaults['UnloadSpecifications'] = {}
self._defaults['UnloadSpecifications']['return'] = 0
self._defaults['UnlockSession'] = {}
self._defaults['UnlockSession']['return'] = 0
self._defaults['UnlockSession']['callerHasLock'] = None
self._defaults['WaitUntilDone'] = {}
self._defaults['WaitUntilDone']['return'] = 0
self._defaults['WriteSequencerFlag'] = {}
self._defaults['WriteSequencerFlag']['return'] = 0
self._defaults['WriteSequencerRegister'] = {}
self._defaults['WriteSequencerRegister']['return'] = 0
self._defaults['WriteSourceWaveformBroadcastU32'] = {}
self._defaults['WriteSourceWaveformBroadcastU32']['return'] = 0
self._defaults['WriteSourceWaveformDataFromFileTDMS'] = {}
self._defaults['WriteSourceWaveformDataFromFileTDMS']['return'] = 0
self._defaults['WriteSourceWaveformSiteUniqueU32'] = {}
self._defaults['WriteSourceWaveformSiteUniqueU32']['return'] = 0
self._defaults['WriteStatic'] = {}
self._defaults['WriteStatic']['return'] = 0
self._defaults['close'] = {}
self._defaults['close']['return'] = 0
self._defaults['error_message'] = {}
self._defaults['error_message']['return'] = 0
self._defaults['error_message']['errorMessage'] = None
self._defaults['reset'] = {}
self._defaults['reset']['return'] = 0
self._defaults['self_test'] = {}
self._defaults['self_test']['return'] = 0
self._defaults['self_test']['testResult'] = None
self._defaults['self_test']['testMessage'] = None
def __getitem__(self, func):
return self._defaults[func]
def __setitem__(self, func, val):
self._defaults[func] = val
def niDigital_Abort(self, vi): # noqa: N802
if self._defaults['Abort']['return'] != 0:
return self._defaults['Abort']['return']
return self._defaults['Abort']['return']
def niDigital_AbortKeepAlive(self, vi): # noqa: N802
if self._defaults['AbortKeepAlive']['return'] != 0:
return self._defaults['AbortKeepAlive']['return']
return self._defaults['AbortKeepAlive']['return']
def niDigital_ApplyLevelsAndTiming(self, vi, site_list, levels_sheet, timing_sheet, initial_state_high_pins, initial_state_low_pins, initial_state_tristate_pins): # noqa: N802
if self._defaults['ApplyLevelsAndTiming']['return'] != 0:
return self._defaults['ApplyLevelsAndTiming']['return']
return self._defaults['ApplyLevelsAndTiming']['return']
def niDigital_ApplyTDROffsets(self, vi, channel_list, num_offsets, offsets): # noqa: N802
if self._defaults['ApplyTDROffsets']['return'] != 0:
return self._defaults['ApplyTDROffsets']['return']
return self._defaults['ApplyTDROffsets']['return']
def niDigital_BurstPattern(self, vi, site_list, start_label, select_digital_function, wait_until_done, timeout): # noqa: N802
if self._defaults['BurstPattern']['return'] != 0:
return self._defaults['BurstPattern']['return']
return self._defaults['BurstPattern']['return']
def niDigital_ClockGenerator_Abort(self, vi, channel_list): # noqa: N802
if self._defaults['ClockGenerator_Abort']['return'] != 0:
return self._defaults['ClockGenerator_Abort']['return']
return self._defaults['ClockGenerator_Abort']['return']
def niDigital_ClockGenerator_GenerateClock(self, vi, channel_list, frequency, select_digital_function): # noqa: N802
if self._defaults['ClockGenerator_GenerateClock']['return'] != 0:
return self._defaults['ClockGenerator_GenerateClock']['return']
return self._defaults['ClockGenerator_GenerateClock']['return']
def niDigital_Commit(self, vi): # noqa: N802
if self._defaults['Commit']['return'] != 0:
return self._defaults['Commit']['return']
return self._defaults['Commit']['return']
def niDigital_ConfigureActiveLoadLevels(self, vi, channel_list, iol, ioh, vcom): # noqa: N802
if self._defaults['ConfigureActiveLoadLevels']['return'] != 0:
return self._defaults['ConfigureActiveLoadLevels']['return']
return self._defaults['ConfigureActiveLoadLevels']['return']
def niDigital_ConfigurePatternBurstSites(self, vi, site_list): # noqa: N802
if self._defaults['ConfigurePatternBurstSites']['return'] != 0:
return self._defaults['ConfigurePatternBurstSites']['return']
return self._defaults['ConfigurePatternBurstSites']['return']
def niDigital_ConfigureTimeSetCompareEdgesStrobe(self, vi, pin_list, time_set_name, strobe_edge): # noqa: N802
if self._defaults['ConfigureTimeSetCompareEdgesStrobe']['return'] != 0:
return self._defaults['ConfigureTimeSetCompareEdgesStrobe']['return']
return self._defaults['ConfigureTimeSetCompareEdgesStrobe']['return']
def niDigital_ConfigureTimeSetCompareEdgesStrobe2x(self, vi, pin_list, time_set_name, strobe_edge, strobe2_edge): # noqa: N802
if self._defaults['ConfigureTimeSetCompareEdgesStrobe2x']['return'] != 0:
return self._defaults['ConfigureTimeSetCompareEdgesStrobe2x']['return']
return self._defaults['ConfigureTimeSetCompareEdgesStrobe2x']['return']
def niDigital_ConfigureTimeSetDriveEdges(self, vi, pin_list, time_set_name, format, drive_on_edge, drive_data_edge, drive_return_edge, drive_off_edge): # noqa: N802
if self._defaults['ConfigureTimeSetDriveEdges']['return'] != 0:
return self._defaults['ConfigureTimeSetDriveEdges']['return']
return self._defaults['ConfigureTimeSetDriveEdges']['return']
def niDigital_ConfigureTimeSetDriveEdges2x(self, vi, pin_list, time_set_name, format, drive_on_edge, drive_data_edge, drive_return_edge, drive_off_edge, drive_data2_edge, drive_return2_edge): # noqa: N802
if self._defaults['ConfigureTimeSetDriveEdges2x']['return'] != 0:
return self._defaults['ConfigureTimeSetDriveEdges2x']['return']
return self._defaults['ConfigureTimeSetDriveEdges2x']['return']
def niDigital_ConfigureTimeSetDriveFormat(self, vi, pin_list, time_set_name, drive_format): # noqa: N802
if self._defaults['ConfigureTimeSetDriveFormat']['return'] != 0:
return self._defaults['ConfigureTimeSetDriveFormat']['return']
return self._defaults['ConfigureTimeSetDriveFormat']['return']
def niDigital_ConfigureTimeSetEdge(self, vi, pin_list, time_set_name, edge, time): # noqa: N802
if self._defaults['ConfigureTimeSetEdge']['return'] != 0:
return self._defaults['ConfigureTimeSetEdge']['return']
return self._defaults['ConfigureTimeSetEdge']['return']
def niDigital_ConfigureTimeSetEdgeMultiplier(self, vi, pin_list, time_set_name, edge_multiplier): # noqa: N802
if self._defaults['ConfigureTimeSetEdgeMultiplier']['return'] != 0:
return self._defaults['ConfigureTimeSetEdgeMultiplier']['return']
return self._defaults['ConfigureTimeSetEdgeMultiplier']['return']
def niDigital_ConfigureTimeSetPeriod(self, vi, time_set_name, period): # noqa: N802
if self._defaults['ConfigureTimeSetPeriod']['return'] != 0:
return self._defaults['ConfigureTimeSetPeriod']['return']
return self._defaults['ConfigureTimeSetPeriod']['return']
def niDigital_ConfigureVoltageLevels(self, vi, channel_list, vil, vih, vol, voh, vterm): # noqa: N802
if self._defaults['ConfigureVoltageLevels']['return'] != 0:
return self._defaults['ConfigureVoltageLevels']['return']
return self._defaults['ConfigureVoltageLevels']['return']
def niDigital_CreateCaptureWaveformFromFileDigicapture(self, vi, waveform_name, waveform_file_path): # noqa: N802
if self._defaults['CreateCaptureWaveformFromFileDigicapture']['return'] != 0:
return self._defaults['CreateCaptureWaveformFromFileDigicapture']['return']
return self._defaults['CreateCaptureWaveformFromFileDigicapture']['return']
def niDigital_CreateCaptureWaveformParallel(self, vi, pin_list, waveform_name): # noqa: N802
if self._defaults['CreateCaptureWaveformParallel']['return'] != 0:
return self._defaults['CreateCaptureWaveformParallel']['return']
return self._defaults['CreateCaptureWaveformParallel']['return']
def niDigital_CreateCaptureWaveformSerial(self, vi, pin_list, waveform_name, sample_width, bit_order): # noqa: N802
if self._defaults['CreateCaptureWaveformSerial']['return'] != 0:
return self._defaults['CreateCaptureWaveformSerial']['return']
return self._defaults['CreateCaptureWaveformSerial']['return']
def niDigital_CreateSourceWaveformFromFileTDMS(self, vi, waveform_name, waveform_file_path, write_waveform_data): # noqa: N802
if self._defaults['CreateSourceWaveformFromFileTDMS']['return'] != 0:
return self._defaults['CreateSourceWaveformFromFileTDMS']['return']
return self._defaults['CreateSourceWaveformFromFileTDMS']['return']
def niDigital_CreateSourceWaveformParallel(self, vi, pin_list, waveform_name, data_mapping): # noqa: N802
if self._defaults['CreateSourceWaveformParallel']['return'] != 0:
return self._defaults['CreateSourceWaveformParallel']['return']
return self._defaults['CreateSourceWaveformParallel']['return']
def niDigital_CreateSourceWaveformSerial(self, vi, pin_list, waveform_name, data_mapping, sample_width, bit_order): # noqa: N802
if self._defaults['CreateSourceWaveformSerial']['return'] != 0:
return self._defaults['CreateSourceWaveformSerial']['return']
return self._defaults['CreateSourceWaveformSerial']['return']
def niDigital_CreateTimeSet(self, vi, name): # noqa: N802
if self._defaults['CreateTimeSet']['return'] != 0:
return self._defaults['CreateTimeSet']['return']
return self._defaults['CreateTimeSet']['return']
def niDigital_DeleteAllTimeSets(self, vi): # noqa: N802
if self._defaults['DeleteAllTimeSets']['return'] != 0:
return self._defaults['DeleteAllTimeSets']['return']
return self._defaults['DeleteAllTimeSets']['return']
def niDigital_DisableSites(self, vi, site_list): # noqa: N802
if self._defaults['DisableSites']['return'] != 0:
return self._defaults['DisableSites']['return']
return self._defaults['DisableSites']['return']
def niDigital_EnableSites(self, vi, site_list): # noqa: N802
if self._defaults['EnableSites']['return'] != 0:
return self._defaults['EnableSites']['return']
return self._defaults['EnableSites']['return']
def niDigital_FetchHistoryRAMCycleInformation(self, vi, site, sample_index, pattern_index, time_set_index, vector_number, cycle_number, num_dut_cycles): # noqa: N802
if self._defaults['FetchHistoryRAMCycleInformation']['return'] != 0:
return self._defaults['FetchHistoryRAMCycleInformation']['return']
# pattern_index
if self._defaults['FetchHistoryRAMCycleInformation']['patternIndex'] is None:
raise MockFunctionCallError("niDigital_FetchHistoryRAMCycleInformation", param='patternIndex')
if pattern_index is not None:
pattern_index.contents.value = self._defaults['FetchHistoryRAMCycleInformation']['patternIndex']
# time_set_index
if self._defaults['FetchHistoryRAMCycleInformation']['timeSetIndex'] is None:
raise MockFunctionCallError("niDigital_FetchHistoryRAMCycleInformation", param='timeSetIndex')
if time_set_index is not None:
time_set_index.contents.value = self._defaults['FetchHistoryRAMCycleInformation']['timeSetIndex']
# vector_number
if self._defaults['FetchHistoryRAMCycleInformation']['vectorNumber'] is None:
raise MockFunctionCallError("niDigital_FetchHistoryRAMCycleInformation", param='vectorNumber')
if vector_number is not None:
vector_number.contents.value = self._defaults['FetchHistoryRAMCycleInformation']['vectorNumber']
# cycle_number
if self._defaults['FetchHistoryRAMCycleInformation']['cycleNumber'] is None:
raise MockFunctionCallError("niDigital_FetchHistoryRAMCycleInformation", param='cycleNumber')
if cycle_number is not None:
cycle_number.contents.value = self._defaults['FetchHistoryRAMCycleInformation']['cycleNumber']
# num_dut_cycles
if self._defaults['FetchHistoryRAMCycleInformation']['numDutCycles'] is None:
raise MockFunctionCallError("niDigital_FetchHistoryRAMCycleInformation", param='numDutCycles')
if num_dut_cycles is not None:
num_dut_cycles.contents.value = self._defaults['FetchHistoryRAMCycleInformation']['numDutCycles']
return self._defaults['FetchHistoryRAMCycleInformation']['return']
def niDigital_FetchHistoryRAMCyclePinData(self, vi, site, pin_list, sample_index, dut_cycle_index, pin_data_buffer_size, expected_pin_states, actual_pin_states, per_pin_pass_fail, actual_num_pin_data): # noqa: N802
if self._defaults['FetchHistoryRAMCyclePinData']['return'] != 0:
return self._defaults['FetchHistoryRAMCyclePinData']['return']
# actual_num_pin_data
if self._defaults['FetchHistoryRAMCyclePinData']['actualNumPinData'] is None:
raise MockFunctionCallError("niDigital_FetchHistoryRAMCyclePinData", param='actualNumPinData')
if actual_num_pin_data is not None:
actual_num_pin_data.contents.value = self._defaults['FetchHistoryRAMCyclePinData']['actualNumPinData']
if self._defaults['FetchHistoryRAMCyclePinData']['expectedPinStates'] is None:
raise MockFunctionCallError("niDigital_FetchHistoryRAMCyclePinData", param='expectedPinStates')
if pin_data_buffer_size.value == 0:
return len(self._defaults['FetchHistoryRAMCyclePinData']['expectedPinStates'])
try:
expected_pin_states_ref = expected_pin_states.contents
except AttributeError:
expected_pin_states_ref = expected_pin_states
for i in range(len(self._defaults['FetchHistoryRAMCyclePinData']['expectedPinStates'])):
expected_pin_states_ref[i] = self._defaults['FetchHistoryRAMCyclePinData']['expectedPinStates'][i]
if self._defaults['FetchHistoryRAMCyclePinData']['actualPinStates'] is None:
raise MockFunctionCallError("niDigital_FetchHistoryRAMCyclePinData", param='actualPinStates')
if pin_data_buffer_size.value == 0:
return len(self._defaults['FetchHistoryRAMCyclePinData']['actualPinStates'])
try:
actual_pin_states_ref = actual_pin_states.contents
except AttributeError:
actual_pin_states_ref = actual_pin_states
for i in range(len(self._defaults['FetchHistoryRAMCyclePinData']['actualPinStates'])):
actual_pin_states_ref[i] = self._defaults['FetchHistoryRAMCyclePinData']['actualPinStates'][i]
if self._defaults['FetchHistoryRAMCyclePinData']['perPinPassFail'] is None:
raise MockFunctionCallError("niDigital_FetchHistoryRAMCyclePinData", param='perPinPassFail')
if pin_data_buffer_size.value == 0:
return len(self._defaults['FetchHistoryRAMCyclePinData']['perPinPassFail'])
try:
per_pin_pass_fail_ref = per_pin_pass_fail.contents
except AttributeError:
per_pin_pass_fail_ref = per_pin_pass_fail
for i in range(len(self._defaults['FetchHistoryRAMCyclePinData']['perPinPassFail'])):
per_pin_pass_fail_ref[i] = self._defaults['FetchHistoryRAMCyclePinData']['perPinPassFail'][i]
return self._defaults['FetchHistoryRAMCyclePinData']['return']
def niDigital_FetchHistoryRAMScanCycleNumber(self, vi, site, sample_index, scan_cycle_number): # noqa: N802
if self._defaults['FetchHistoryRAMScanCycleNumber']['return'] != 0:
return self._defaults['FetchHistoryRAMScanCycleNumber']['return']
# scan_cycle_number
if self._defaults['FetchHistoryRAMScanCycleNumber']['scanCycleNumber'] is None:
raise MockFunctionCallError("niDigital_FetchHistoryRAMScanCycleNumber", param='scanCycleNumber')
if scan_cycle_number is not None:
scan_cycle_number.contents.value = self._defaults['FetchHistoryRAMScanCycleNumber']['scanCycleNumber']
return self._defaults['FetchHistoryRAMScanCycleNumber']['return']
def niDigital_FrequencyCounter_MeasureFrequency(self, vi, channel_list, frequencies_buffer_size, frequencies, actual_num_frequencies): # noqa: N802
if self._defaults['FrequencyCounter_MeasureFrequency']['return'] != 0:
return self._defaults['FrequencyCounter_MeasureFrequency']['return']
# actual_num_frequencies
if self._defaults['FrequencyCounter_MeasureFrequency']['actualNumFrequencies'] is None:
raise MockFunctionCallError("niDigital_FrequencyCounter_MeasureFrequency", param='actualNumFrequencies')
if actual_num_frequencies is not None:
actual_num_frequencies.contents.value = self._defaults['FrequencyCounter_MeasureFrequency']['actualNumFrequencies']
if self._defaults['FrequencyCounter_MeasureFrequency']['frequencies'] is None:
raise MockFunctionCallError("niDigital_FrequencyCounter_MeasureFrequency", param='frequencies')
if frequencies_buffer_size.value == 0:
return len(self._defaults['FrequencyCounter_MeasureFrequency']['frequencies'])
try:
frequencies_ref = frequencies.contents
except AttributeError:
frequencies_ref = frequencies
for i in range(len(self._defaults['FrequencyCounter_MeasureFrequency']['frequencies'])):
frequencies_ref[i] = self._defaults['FrequencyCounter_MeasureFrequency']['frequencies'][i]
return self._defaults['FrequencyCounter_MeasureFrequency']['return']
def niDigital_GetAttributeViBoolean(self, vi, channel_name, attribute, value): # noqa: N802
if self._defaults['GetAttributeViBoolean']['return'] != 0:
return self._defaults['GetAttributeViBoolean']['return']
# value
if self._defaults['GetAttributeViBoolean']['value'] is None:
raise MockFunctionCallError("niDigital_GetAttributeViBoolean", param='value')
if value is not None:
value.contents.value = self._defaults['GetAttributeViBoolean']['value']
return self._defaults['GetAttributeViBoolean']['return']
def niDigital_GetAttributeViInt32(self, vi, channel_name, attribute, value): # noqa: N802
if self._defaults['GetAttributeViInt32']['return'] != 0:
return self._defaults['GetAttributeViInt32']['return']
# value
if self._defaults['GetAttributeViInt32']['value'] is None:
raise MockFunctionCallError("niDigital_GetAttributeViInt32", param='value')
if value is not None:
value.contents.value = self._defaults['GetAttributeViInt32']['value']
return self._defaults['GetAttributeViInt32']['return']
def niDigital_GetAttributeViInt64(self, vi, channel_name, attribute, value): # noqa: N802
if self._defaults['GetAttributeViInt64']['return'] != 0:
return self._defaults['GetAttributeViInt64']['return']
# value
if self._defaults['GetAttributeViInt64']['value'] is None:
raise MockFunctionCallError("niDigital_GetAttributeViInt64", param='value')
if value is not None:
value.contents.value = self._defaults['GetAttributeViInt64']['value']
return self._defaults['GetAttributeViInt64']['return']
def niDigital_GetAttributeViReal64(self, vi, channel_name, attribute, value): # noqa: N802
if self._defaults['GetAttributeViReal64']['return'] != 0:
return self._defaults['GetAttributeViReal64']['return']
# value
if self._defaults['GetAttributeViReal64']['value'] is None:
raise MockFunctionCallError("niDigital_GetAttributeViReal64", param='value')
if value is not None:
value.contents.value = self._defaults['GetAttributeViReal64']['value']
return self._defaults['GetAttributeViReal64']['return']
def niDigital_GetAttributeViString(self, vi, channel_name, attribute, buffer_size, value): # noqa: N802
if self._defaults['GetAttributeViString']['return'] != 0:
return self._defaults['GetAttributeViString']['return']
if self._defaults['GetAttributeViString']['value'] is None:
raise MockFunctionCallError("niDigital_GetAttributeViString", param='value')
if buffer_size.value == 0:
return len(self._defaults['GetAttributeViString']['value'])
value.value = self._defaults['GetAttributeViString']['value'].encode('ascii')
return self._defaults['GetAttributeViString']['return']
def niDigital_GetChannelNameFromString(self, vi, indices, name_buffer_size, names): # noqa: N802
if self._defaults['GetChannelNameFromString']['return'] != 0:
return self._defaults['GetChannelNameFromString']['return']
if self._defaults['GetChannelNameFromString']['names'] is None:
raise MockFunctionCallError("niDigital_GetChannelNameFromString", param='names')
if name_buffer_size.value == 0:
return len(self._defaults['GetChannelNameFromString']['names'])
names.value = self._defaults['GetChannelNameFromString']['names'].encode('ascii')
return self._defaults['GetChannelNameFromString']['return']
def niDigital_GetError(self, vi, error_code, error_description_buffer_size, error_description): # noqa: N802
if self._defaults['GetError']['return'] != 0:
return self._defaults['GetError']['return']
# error_code
if self._defaults['GetError']['errorCode'] is None:
raise MockFunctionCallError("niDigital_GetError", param='errorCode')
if error_code is not None:
error_code.contents.value = self._defaults['GetError']['errorCode']
if self._defaults['GetError']['errorDescription'] is None:
raise MockFunctionCallError("niDigital_GetError", param='errorDescription')
if error_description_buffer_size.value == 0:
return len(self._defaults['GetError']['errorDescription'])
error_description.value = self._defaults['GetError']['errorDescription'].encode('ascii')
return self._defaults['GetError']['return']
def niDigital_GetFailCount(self, vi, channel_list, buffer_size, failure_count, actual_num_read): # noqa: N802
if self._defaults['GetFailCount']['return'] != 0:
return self._defaults['GetFailCount']['return']
# actual_num_read
if self._defaults['GetFailCount']['actualNumRead'] is None:
raise MockFunctionCallError("niDigital_GetFailCount", param='actualNumRead')
if actual_num_read is not None:
actual_num_read.contents.value = self._defaults['GetFailCount']['actualNumRead']
if self._defaults['GetFailCount']['failureCount'] is None:
raise MockFunctionCallError("niDigital_GetFailCount", param='failureCount')
if buffer_size.value == 0:
return len(self._defaults['GetFailCount']['failureCount'])
try:
failure_count_ref = failure_count.contents
except AttributeError:
failure_count_ref = failure_count
for i in range(len(self._defaults['GetFailCount']['failureCount'])):
failure_count_ref[i] = self._defaults['GetFailCount']['failureCount'][i]
return self._defaults['GetFailCount']['return']
def niDigital_GetHistoryRAMSampleCount(self, vi, site, sample_count): # noqa: N802
if self._defaults['GetHistoryRAMSampleCount']['return'] != 0:
return self._defaults['GetHistoryRAMSampleCount']['return']
# sample_count
if self._defaults['GetHistoryRAMSampleCount']['sampleCount'] is None:
raise MockFunctionCallError("niDigital_GetHistoryRAMSampleCount", param='sampleCount')
if sample_count is not None:
sample_count.contents.value = self._defaults['GetHistoryRAMSampleCount']['sampleCount']
return self._defaults['GetHistoryRAMSampleCount']['return']
def niDigital_GetPatternName(self, vi, pattern_index, name_buffer_size, name): # noqa: N802
if self._defaults['GetPatternName']['return'] != 0:
return self._defaults['GetPatternName']['return']
if self._defaults['GetPatternName']['name'] is None:
raise MockFunctionCallError("niDigital_GetPatternName", param='name')
if name_buffer_size.value == 0:
return len(self._defaults['GetPatternName']['name'])
name.value = self._defaults['GetPatternName']['name'].encode('ascii')
return self._defaults['GetPatternName']['return']
def niDigital_GetPatternPinList(self, vi, start_label, pin_list_buffer_size, pin_list): # noqa: N802
if self._defaults['GetPatternPinList']['return'] != 0:
return self._defaults['GetPatternPinList']['return']
if self._defaults['GetPatternPinList']['pinList'] is None:
raise MockFunctionCallError("niDigital_GetPatternPinList", param='pinList')
if pin_list_buffer_size.value == 0:
return len(self._defaults['GetPatternPinList']['pinList'])
pin_list.value = self._defaults['GetPatternPinList']['pinList'].encode('ascii')
return self._defaults['GetPatternPinList']['return']
def niDigital_GetPinName(self, vi, pin_index, name_buffer_size, name): # noqa: N802
if self._defaults['GetPinName']['return'] != 0:
return self._defaults['GetPinName']['return']
if self._defaults['GetPinName']['name'] is None:
raise MockFunctionCallError("niDigital_GetPinName", param='name')
if name_buffer_size.value == 0:
return len(self._defaults['GetPinName']['name'])
name.value = self._defaults['GetPinName']['name'].encode('ascii')
return self._defaults['GetPinName']['return']
def niDigital_GetPinResultsPinInformation(self, vi, channel_list, buffer_size, pin_indexes, site_numbers, channel_indexes, actual_num_values): # noqa: N802
if self._defaults['GetPinResultsPinInformation']['return'] != 0:
return self._defaults['GetPinResultsPinInformation']['return']
# actual_num_values
if self._defaults['GetPinResultsPinInformation']['actualNumValues'] is None:
raise MockFunctionCallError("niDigital_GetPinResultsPinInformation", param='actualNumValues')
if actual_num_values is not None:
actual_num_values.contents.value = self._defaults['GetPinResultsPinInformation']['actualNumValues']
if self._defaults['GetPinResultsPinInformation']['pinIndexes'] is None:
raise MockFunctionCallError("niDigital_GetPinResultsPinInformation", param='pinIndexes')
if buffer_size.value == 0:
return len(self._defaults['GetPinResultsPinInformation']['pinIndexes'])
try:
pin_indexes_ref = pin_indexes.contents
except AttributeError:
pin_indexes_ref = pin_indexes
for i in range(len(self._defaults['GetPinResultsPinInformation']['pinIndexes'])):
pin_indexes_ref[i] = self._defaults['GetPinResultsPinInformation']['pinIndexes'][i]
if self._defaults['GetPinResultsPinInformation']['siteNumbers'] is None:
raise MockFunctionCallError("niDigital_GetPinResultsPinInformation", param='siteNumbers')
if buffer_size.value == 0:
return len(self._defaults['GetPinResultsPinInformation']['siteNumbers'])
try:
site_numbers_ref = site_numbers.contents
except AttributeError:
site_numbers_ref = site_numbers
for i in range(len(self._defaults['GetPinResultsPinInformation']['siteNumbers'])):
site_numbers_ref[i] = self._defaults['GetPinResultsPinInformation']['siteNumbers'][i]
if self._defaults['GetPinResultsPinInformation']['channelIndexes'] is None:
raise MockFunctionCallError("niDigital_GetPinResultsPinInformation", param='channelIndexes')
if buffer_size.value == 0:
return len(self._defaults['GetPinResultsPinInformation']['channelIndexes'])
try:
channel_indexes_ref = channel_indexes.contents
except AttributeError:
channel_indexes_ref = channel_indexes
for i in range(len(self._defaults['GetPinResultsPinInformation']['channelIndexes'])):
channel_indexes_ref[i] = self._defaults['GetPinResultsPinInformation']['channelIndexes'][i]
return self._defaults['GetPinResultsPinInformation']['return']
def niDigital_GetSitePassFail(self, vi, site_list, pass_fail_buffer_size, pass_fail, actual_num_sites): # noqa: N802
if self._defaults['GetSitePassFail']['return'] != 0:
return self._defaults['GetSitePassFail']['return']
# actual_num_sites
if self._defaults['GetSitePassFail']['actualNumSites'] is None:
raise MockFunctionCallError("niDigital_GetSitePassFail", param='actualNumSites')
if actual_num_sites is not None:
actual_num_sites.contents.value = self._defaults['GetSitePassFail']['actualNumSites']
if self._defaults['GetSitePassFail']['passFail'] is None:
raise MockFunctionCallError("niDigital_GetSitePassFail", param='passFail')
if pass_fail_buffer_size.value == 0:
return len(self._defaults['GetSitePassFail']['passFail'])
try:
pass_fail_ref = pass_fail.contents
except AttributeError:
pass_fail_ref = pass_fail
for i in range(len(self._defaults['GetSitePassFail']['passFail'])):
pass_fail_ref[i] = self._defaults['GetSitePassFail']['passFail'][i]
return self._defaults['GetSitePassFail']['return']
def niDigital_GetSiteResultsSiteNumbers(self, vi, site_list, site_result_type, site_numbers_buffer_size, site_numbers, actual_num_site_numbers): # noqa: N802
if self._defaults['GetSiteResultsSiteNumbers']['return'] != 0:
return self._defaults['GetSiteResultsSiteNumbers']['return']
# actual_num_site_numbers
if self._defaults['GetSiteResultsSiteNumbers']['actualNumSiteNumbers'] is None:
raise MockFunctionCallError("niDigital_GetSiteResultsSiteNumbers", param='actualNumSiteNumbers')
if actual_num_site_numbers is not None:
actual_num_site_numbers.contents.value = self._defaults['GetSiteResultsSiteNumbers']['actualNumSiteNumbers']
if self._defaults['GetSiteResultsSiteNumbers']['siteNumbers'] is None:
raise MockFunctionCallError("niDigital_GetSiteResultsSiteNumbers", param='siteNumbers')
if site_numbers_buffer_size.value == 0:
return len(self._defaults['GetSiteResultsSiteNumbers']['siteNumbers'])
try:
site_numbers_ref = site_numbers.contents
except AttributeError:
site_numbers_ref = site_numbers
for i in range(len(self._defaults['GetSiteResultsSiteNumbers']['siteNumbers'])):
site_numbers_ref[i] = self._defaults['GetSiteResultsSiteNumbers']['siteNumbers'][i]
return self._defaults['GetSiteResultsSiteNumbers']['return']
def niDigital_GetTimeSetDriveFormat(self, vi, pin, time_set_name, format): # noqa: N802
if self._defaults['GetTimeSetDriveFormat']['return'] != 0:
return self._defaults['GetTimeSetDriveFormat']['return']
# format
if self._defaults['GetTimeSetDriveFormat']['format'] is None:
raise MockFunctionCallError("niDigital_GetTimeSetDriveFormat", param='format')
if format is not None:
format.contents.value = self._defaults['GetTimeSetDriveFormat']['format']
return self._defaults['GetTimeSetDriveFormat']['return']
def niDigital_GetTimeSetEdge(self, vi, pin, time_set_name, edge, time): # noqa: N802
if self._defaults['GetTimeSetEdge']['return'] != 0:
return self._defaults['GetTimeSetEdge']['return']
# time
if self._defaults['GetTimeSetEdge']['time'] is None:
raise MockFunctionCallError("niDigital_GetTimeSetEdge", param='time')
if time is not None:
time.contents.value = self._defaults['GetTimeSetEdge']['time']
return self._defaults['GetTimeSetEdge']['return']
def niDigital_GetTimeSetEdgeMultiplier(self, vi, pin, time_set_name, edge_multiplier): # noqa: N802
if self._defaults['GetTimeSetEdgeMultiplier']['return'] != 0:
return self._defaults['GetTimeSetEdgeMultiplier']['return']
# edge_multiplier
if self._defaults['GetTimeSetEdgeMultiplier']['edgeMultiplier'] is None:
raise MockFunctionCallError("niDigital_GetTimeSetEdgeMultiplier", param='edgeMultiplier')
if edge_multiplier is not None:
edge_multiplier.contents.value = self._defaults['GetTimeSetEdgeMultiplier']['edgeMultiplier']
return self._defaults['GetTimeSetEdgeMultiplier']['return']
def niDigital_GetTimeSetName(self, vi, time_set_index, name_buffer_size, name): # noqa: N802
if self._defaults['GetTimeSetName']['return'] != 0:
return self._defaults['GetTimeSetName']['return']
if self._defaults['GetTimeSetName']['name'] is None:
raise MockFunctionCallError("niDigital_GetTimeSetName", param='name')
if name_buffer_size.value == 0:
return len(self._defaults['GetTimeSetName']['name'])
name.value = self._defaults['GetTimeSetName']['name'].encode('ascii')
return self._defaults['GetTimeSetName']['return']
def niDigital_GetTimeSetPeriod(self, vi, time_set_name, period): # noqa: N802
if self._defaults['GetTimeSetPeriod']['return'] != 0:
return self._defaults['GetTimeSetPeriod']['return']
# period
if self._defaults['GetTimeSetPeriod']['period'] is None:
raise MockFunctionCallError("niDigital_GetTimeSetPeriod", param='period')
if period is not None:
period.contents.value = self._defaults['GetTimeSetPeriod']['period']
return self._defaults['GetTimeSetPeriod']['return']
def niDigital_InitWithOptions(self, resource_name, id_query, reset_device, option_string, new_vi): # noqa: N802
if self._defaults['InitWithOptions']['return'] != 0:
return self._defaults['InitWithOptions']['return']
# new_vi
if self._defaults['InitWithOptions']['newVi'] is None:
raise MockFunctionCallError("niDigital_InitWithOptions", param='newVi')
if new_vi is not None:
new_vi.contents.value = self._defaults['InitWithOptions']['newVi']
return self._defaults['InitWithOptions']['return']
def niDigital_Initiate(self, vi): # noqa: N802
if self._defaults['Initiate']['return'] != 0:
return self._defaults['Initiate']['return']
return self._defaults['Initiate']['return']
def niDigital_IsDone(self, vi, done): # noqa: N802
if self._defaults['IsDone']['return'] != 0:
return self._defaults['IsDone']['return']
# done
if self._defaults['IsDone']['done'] is None:
raise MockFunctionCallError("niDigital_IsDone", param='done')
if done is not None:
done.contents.value = self._defaults['IsDone']['done']
return self._defaults['IsDone']['return']
def niDigital_IsSiteEnabled(self, vi, site, enable): # noqa: N802
if self._defaults['IsSiteEnabled']['return'] != 0:
return self._defaults['IsSiteEnabled']['return']
# enable
if self._defaults['IsSiteEnabled']['enable'] is None:
raise MockFunctionCallError("niDigital_IsSiteEnabled", param='enable')
if enable is not None:
enable.contents.value = self._defaults['IsSiteEnabled']['enable']
return self._defaults['IsSiteEnabled']['return']
def niDigital_LoadLevels(self, vi, file_path): # noqa: N802
if self._defaults['LoadLevels']['return'] != 0:
return self._defaults['LoadLevels']['return']
return self._defaults['LoadLevels']['return']
def niDigital_LoadPattern(self, vi, file_path): # noqa: N802
if self._defaults['LoadPattern']['return'] != 0:
return self._defaults['LoadPattern']['return']
return self._defaults['LoadPattern']['return']
def niDigital_LoadPinMap(self, vi, file_path): # noqa: N802
if self._defaults['LoadPinMap']['return'] != 0:
return self._defaults['LoadPinMap']['return']
return self._defaults['LoadPinMap']['return']
def niDigital_LoadSpecifications(self, vi, file_path): # noqa: N802
if self._defaults['LoadSpecifications']['return'] != 0:
return self._defaults['LoadSpecifications']['return']
return self._defaults['LoadSpecifications']['return']
def niDigital_LoadTiming(self, vi, file_path): # noqa: N802
if self._defaults['LoadTiming']['return'] != 0:
return self._defaults['LoadTiming']['return']
return self._defaults['LoadTiming']['return']
def niDigital_LockSession(self, vi, caller_has_lock): # noqa: N802
if self._defaults['LockSession']['return'] != 0:
return self._defaults['LockSession']['return']
# caller_has_lock
if self._defaults['LockSession']['callerHasLock'] is None:
raise MockFunctionCallError("niDigital_LockSession", param='callerHasLock')
if caller_has_lock is not None:
caller_has_lock.contents.value = self._defaults['LockSession']['callerHasLock']
return self._defaults['LockSession']['return']
def niDigital_PPMU_Measure(self, vi, channel_list, measurement_type, buffer_size, measurements, actual_num_read): # noqa: N802
if self._defaults['PPMU_Measure']['return'] != 0:
return self._defaults['PPMU_Measure']['return']
# actual_num_read
if self._defaults['PPMU_Measure']['actualNumRead'] is None:
raise MockFunctionCallError("niDigital_PPMU_Measure", param='actualNumRead')
if actual_num_read is not None:
actual_num_read.contents.value = self._defaults['PPMU_Measure']['actualNumRead']
if self._defaults['PPMU_Measure']['measurements'] is None:
raise MockFunctionCallError("niDigital_PPMU_Measure", param='measurements')
if buffer_size.value == 0:
return len(self._defaults['PPMU_Measure']['measurements'])
try:
measurements_ref = measurements.contents
except AttributeError:
measurements_ref = measurements
for i in range(len(self._defaults['PPMU_Measure']['measurements'])):
measurements_ref[i] = self._defaults['PPMU_Measure']['measurements'][i]
return self._defaults['PPMU_Measure']['return']
def niDigital_PPMU_Source(self, vi, channel_list): # noqa: N802
if self._defaults['PPMU_Source']['return'] != 0:
return self._defaults['PPMU_Source']['return']
return self._defaults['PPMU_Source']['return']
def niDigital_ReadSequencerFlag(self, vi, flag, value): # noqa: N802
if self._defaults['ReadSequencerFlag']['return'] != 0:
return self._defaults['ReadSequencerFlag']['return']
# value
if self._defaults['ReadSequencerFlag']['value'] is None:
raise MockFunctionCallError("niDigital_ReadSequencerFlag", param='value')
if value is not None:
value.contents.value = self._defaults['ReadSequencerFlag']['value']
return self._defaults['ReadSequencerFlag']['return']
def niDigital_ReadSequencerRegister(self, vi, reg, value): # noqa: N802
if self._defaults['ReadSequencerRegister']['return'] != 0:
return self._defaults['ReadSequencerRegister']['return']
# value
if self._defaults['ReadSequencerRegister']['value'] is None:
raise MockFunctionCallError("niDigital_ReadSequencerRegister", param='value')
if value is not None:
value.contents.value = self._defaults['ReadSequencerRegister']['value']
return self._defaults['ReadSequencerRegister']['return']
def niDigital_ReadStatic(self, vi, channel_list, buffer_size, data, actual_num_read): # noqa: N802
if self._defaults['ReadStatic']['return'] != 0:
return self._defaults['ReadStatic']['return']
# actual_num_read
if self._defaults['ReadStatic']['actualNumRead'] is None:
raise MockFunctionCallError("niDigital_ReadStatic", param='actualNumRead')
if actual_num_read is not None:
actual_num_read.contents.value = self._defaults['ReadStatic']['actualNumRead']
if self._defaults['ReadStatic']['data'] is None:
raise MockFunctionCallError("niDigital_ReadStatic", param='data')
if buffer_size.value == 0:
return len(self._defaults['ReadStatic']['data'])
try:
data_ref = data.contents
except AttributeError:
data_ref = data
for i in range(len(self._defaults['ReadStatic']['data'])):
data_ref[i] = self._defaults['ReadStatic']['data'][i]
return self._defaults['ReadStatic']['return']
def niDigital_ResetDevice(self, vi): # noqa: N802
if self._defaults['ResetDevice']['return'] != 0:
return self._defaults['ResetDevice']['return']
return self._defaults['ResetDevice']['return']
def niDigital_SelfCalibrate(self, vi): # noqa: N802
if self._defaults['SelfCalibrate']['return'] != 0:
return self._defaults['SelfCalibrate']['return']
return self._defaults['SelfCalibrate']['return']
def niDigital_SendSoftwareEdgeTrigger(self, vi, trigger, trigger_identifier): # noqa: N802
if self._defaults['SendSoftwareEdgeTrigger']['return'] != 0:
return self._defaults['SendSoftwareEdgeTrigger']['return']
return self._defaults['SendSoftwareEdgeTrigger']['return']
def niDigital_SetAttributeViBoolean(self, vi, channel_name, attribute, value): # noqa: N802
if self._defaults['SetAttributeViBoolean']['return'] != 0:
return self._defaults['SetAttributeViBoolean']['return']
return self._defaults['SetAttributeViBoolean']['return']
def niDigital_SetAttributeViInt32(self, vi, channel_name, attribute, value): # noqa: N802
if self._defaults['SetAttributeViInt32']['return'] != 0:
return self._defaults['SetAttributeViInt32']['return']
return self._defaults['SetAttributeViInt32']['return']
def niDigital_SetAttributeViInt64(self, vi, channel_name, attribute, value): # noqa: N802
if self._defaults['SetAttributeViInt64']['return'] != 0:
return self._defaults['SetAttributeViInt64']['return']
return self._defaults['SetAttributeViInt64']['return']
def niDigital_SetAttributeViReal64(self, vi, channel_name, attribute, value): # noqa: N802
if self._defaults['SetAttributeViReal64']['return'] != 0:
return self._defaults['SetAttributeViReal64']['return']
return self._defaults['SetAttributeViReal64']['return']
def niDigital_SetAttributeViString(self, vi, channel_name, attribute, value): # noqa: N802
if self._defaults['SetAttributeViString']['return'] != 0:
return self._defaults['SetAttributeViString']['return']
return self._defaults['SetAttributeViString']['return']
def niDigital_TDR(self, vi, channel_list, apply_offsets, offsets_buffer_size, offsets, actual_num_offsets): # noqa: N802
if self._defaults['TDR']['return'] != 0:
return self._defaults['TDR']['return']
# actual_num_offsets
if self._defaults['TDR']['actualNumOffsets'] is None:
raise MockFunctionCallError("niDigital_TDR", param='actualNumOffsets')
if actual_num_offsets is not None:
actual_num_offsets.contents.value = self._defaults['TDR']['actualNumOffsets']
if self._defaults['TDR']['offsets'] is None:
raise MockFunctionCallError("niDigital_TDR", param='offsets')
if offsets_buffer_size.value == 0:
return len(self._defaults['TDR']['offsets'])
try:
offsets_ref = offsets.contents
except AttributeError:
offsets_ref = offsets
for i in range(len(self._defaults['TDR']['offsets'])):
offsets_ref[i] = self._defaults['TDR']['offsets'][i]
return self._defaults['TDR']['return']
def niDigital_UnloadAllPatterns(self, vi, unload_keep_alive_pattern): # noqa: N802
if self._defaults['UnloadAllPatterns']['return'] != 0:
return self._defaults['UnloadAllPatterns']['return']
return self._defaults['UnloadAllPatterns']['return']
def niDigital_UnloadSpecifications(self, vi, file_path): # noqa: N802
if self._defaults['UnloadSpecifications']['return'] != 0:
return self._defaults['UnloadSpecifications']['return']
return self._defaults['UnloadSpecifications']['return']
def niDigital_UnlockSession(self, vi, caller_has_lock): # noqa: N802
if self._defaults['UnlockSession']['return'] != 0:
return self._defaults['UnlockSession']['return']
# caller_has_lock
if self._defaults['UnlockSession']['callerHasLock'] is None:
raise MockFunctionCallError("niDigital_UnlockSession", param='callerHasLock')
if caller_has_lock is not None:
caller_has_lock.contents.value = self._defaults['UnlockSession']['callerHasLock']
return self._defaults['UnlockSession']['return']
def niDigital_WaitUntilDone(self, vi, timeout): # noqa: N802
if self._defaults['WaitUntilDone']['return'] != 0:
return self._defaults['WaitUntilDone']['return']
return self._defaults['WaitUntilDone']['return']
def niDigital_WriteSequencerFlag(self, vi, flag, value): # noqa: N802
if self._defaults['WriteSequencerFlag']['return'] != 0:
return self._defaults['WriteSequencerFlag']['return']
return self._defaults['WriteSequencerFlag']['return']
def niDigital_WriteSequencerRegister(self, vi, reg, value): # noqa: N802
if self._defaults['WriteSequencerRegister']['return'] != 0:
return self._defaults['WriteSequencerRegister']['return']
return self._defaults['WriteSequencerRegister']['return']
def niDigital_WriteSourceWaveformBroadcastU32(self, vi, waveform_name, waveform_size, waveform_data): # noqa: N802
if self._defaults['WriteSourceWaveformBroadcastU32']['return'] != 0:
return self._defaults['WriteSourceWaveformBroadcastU32']['return']
return self._defaults['WriteSourceWaveformBroadcastU32']['return']
def niDigital_WriteSourceWaveformDataFromFileTDMS(self, vi, waveform_name, waveform_file_path): # noqa: N802
if self._defaults['WriteSourceWaveformDataFromFileTDMS']['return'] != 0:
return self._defaults['WriteSourceWaveformDataFromFileTDMS']['return']
return self._defaults['WriteSourceWaveformDataFromFileTDMS']['return']
def niDigital_WriteSourceWaveformSiteUniqueU32(self, vi, site_list, waveform_name, num_waveforms, samples_per_waveform, waveform_data): # noqa: N802
if self._defaults['WriteSourceWaveformSiteUniqueU32']['return'] != 0:
return self._defaults['WriteSourceWaveformSiteUniqueU32']['return']
return self._defaults['WriteSourceWaveformSiteUniqueU32']['return']
def niDigital_WriteStatic(self, vi, channel_list, state): # noqa: N802
if self._defaults['WriteStatic']['return'] != 0:
return self._defaults['WriteStatic']['return']
return self._defaults['WriteStatic']['return']
def niDigital_close(self, vi): # noqa: N802
if self._defaults['close']['return'] != 0:
return self._defaults['close']['return']
return self._defaults['close']['return']
def niDigital_error_message(self, vi, error_code, error_message): # noqa: N802
if self._defaults['error_message']['return'] != 0:
return self._defaults['error_message']['return']
# error_message
if self._defaults['error_message']['errorMessage'] is None:
raise MockFunctionCallError("niDigital_error_message", param='errorMessage')
test_value = self._defaults['error_message']['errorMessage']
if type(test_value) is str:
test_value = test_value.encode('ascii')
assert len(error_message) >= len(test_value)
for i in range(len(test_value)):
error_message[i] = test_value[i]
return self._defaults['error_message']['return']
def niDigital_reset(self, vi): # noqa: N802
if self._defaults['reset']['return'] != 0:
return self._defaults['reset']['return']
return self._defaults['reset']['return']
def niDigital_self_test(self, vi, test_result, test_message): # noqa: N802
if self._defaults['self_test']['return'] != 0:
return self._defaults['self_test']['return']
# test_result
if self._defaults['self_test']['testResult'] is None:
raise MockFunctionCallError("niDigital_self_test", param='testResult')
if test_result is not None:
test_result.contents.value = self._defaults['self_test']['testResult']
# test_message
if self._defaults['self_test']['testMessage'] is None:
raise MockFunctionCallError("niDigital_self_test", param='testMessage')
test_value = self._defaults['self_test']['testMessage']
if type(test_value) is str:
test_value = test_value.encode('ascii')
assert len(test_message) >= len(test_value)
for i in range(len(test_value)):
test_message[i] = test_value[i]
return self._defaults['self_test']['return']
# Helper function to setup Mock object with default side effects and return values
def set_side_effects_and_return_values(self, mock_library):
mock_library.niDigital_Abort.side_effect = MockFunctionCallError("niDigital_Abort")
mock_library.niDigital_Abort.return_value = 0
mock_library.niDigital_AbortKeepAlive.side_effect = MockFunctionCallError("niDigital_AbortKeepAlive")
mock_library.niDigital_AbortKeepAlive.return_value = 0
mock_library.niDigital_ApplyLevelsAndTiming.side_effect = MockFunctionCallError("niDigital_ApplyLevelsAndTiming")
mock_library.niDigital_ApplyLevelsAndTiming.return_value = 0
mock_library.niDigital_ApplyTDROffsets.side_effect = MockFunctionCallError("niDigital_ApplyTDROffsets")
mock_library.niDigital_ApplyTDROffsets.return_value = 0
mock_library.niDigital_BurstPattern.side_effect = MockFunctionCallError("niDigital_BurstPattern")
mock_library.niDigital_BurstPattern.return_value = 0
mock_library.niDigital_ClockGenerator_Abort.side_effect = MockFunctionCallError("niDigital_ClockGenerator_Abort")
mock_library.niDigital_ClockGenerator_Abort.return_value = 0
mock_library.niDigital_ClockGenerator_GenerateClock.side_effect = MockFunctionCallError("niDigital_ClockGenerator_GenerateClock")
mock_library.niDigital_ClockGenerator_GenerateClock.return_value = 0
mock_library.niDigital_Commit.side_effect = MockFunctionCallError("niDigital_Commit")
mock_library.niDigital_Commit.return_value = 0
mock_library.niDigital_ConfigureActiveLoadLevels.side_effect = MockFunctionCallError("niDigital_ConfigureActiveLoadLevels")
mock_library.niDigital_ConfigureActiveLoadLevels.return_value = 0
mock_library.niDigital_ConfigurePatternBurstSites.side_effect = MockFunctionCallError("niDigital_ConfigurePatternBurstSites")
mock_library.niDigital_ConfigurePatternBurstSites.return_value = 0
mock_library.niDigital_ConfigureTimeSetCompareEdgesStrobe.side_effect = MockFunctionCallError("niDigital_ConfigureTimeSetCompareEdgesStrobe")
mock_library.niDigital_ConfigureTimeSetCompareEdgesStrobe.return_value = 0
mock_library.niDigital_ConfigureTimeSetCompareEdgesStrobe2x.side_effect = MockFunctionCallError("niDigital_ConfigureTimeSetCompareEdgesStrobe2x")
mock_library.niDigital_ConfigureTimeSetCompareEdgesStrobe2x.return_value = 0
mock_library.niDigital_ConfigureTimeSetDriveEdges.side_effect = MockFunctionCallError("niDigital_ConfigureTimeSetDriveEdges")
mock_library.niDigital_ConfigureTimeSetDriveEdges.return_value = 0
mock_library.niDigital_ConfigureTimeSetDriveEdges2x.side_effect = MockFunctionCallError("niDigital_ConfigureTimeSetDriveEdges2x")
mock_library.niDigital_ConfigureTimeSetDriveEdges2x.return_value = 0
mock_library.niDigital_ConfigureTimeSetDriveFormat.side_effect = MockFunctionCallError("niDigital_ConfigureTimeSetDriveFormat")
mock_library.niDigital_ConfigureTimeSetDriveFormat.return_value = 0
mock_library.niDigital_ConfigureTimeSetEdge.side_effect = MockFunctionCallError("niDigital_ConfigureTimeSetEdge")
mock_library.niDigital_ConfigureTimeSetEdge.return_value = 0
mock_library.niDigital_ConfigureTimeSetEdgeMultiplier.side_effect = MockFunctionCallError("niDigital_ConfigureTimeSetEdgeMultiplier")
mock_library.niDigital_ConfigureTimeSetEdgeMultiplier.return_value = 0
mock_library.niDigital_ConfigureTimeSetPeriod.side_effect = MockFunctionCallError("niDigital_ConfigureTimeSetPeriod")
mock_library.niDigital_ConfigureTimeSetPeriod.return_value = 0
mock_library.niDigital_ConfigureVoltageLevels.side_effect = MockFunctionCallError("niDigital_ConfigureVoltageLevels")
mock_library.niDigital_ConfigureVoltageLevels.return_value = 0
mock_library.niDigital_CreateCaptureWaveformFromFileDigicapture.side_effect = MockFunctionCallError("niDigital_CreateCaptureWaveformFromFileDigicapture")
mock_library.niDigital_CreateCaptureWaveformFromFileDigicapture.return_value = 0
mock_library.niDigital_CreateCaptureWaveformParallel.side_effect = MockFunctionCallError("niDigital_CreateCaptureWaveformParallel")
mock_library.niDigital_CreateCaptureWaveformParallel.return_value = 0
mock_library.niDigital_CreateCaptureWaveformSerial.side_effect = MockFunctionCallError("niDigital_CreateCaptureWaveformSerial")
mock_library.niDigital_CreateCaptureWaveformSerial.return_value = 0
mock_library.niDigital_CreateSourceWaveformFromFileTDMS.side_effect = MockFunctionCallError("niDigital_CreateSourceWaveformFromFileTDMS")
mock_library.niDigital_CreateSourceWaveformFromFileTDMS.return_value = 0
mock_library.niDigital_CreateSourceWaveformParallel.side_effect = MockFunctionCallError("niDigital_CreateSourceWaveformParallel")
mock_library.niDigital_CreateSourceWaveformParallel.return_value = 0
mock_library.niDigital_CreateSourceWaveformSerial.side_effect = MockFunctionCallError("niDigital_CreateSourceWaveformSerial")
mock_library.niDigital_CreateSourceWaveformSerial.return_value = 0
mock_library.niDigital_CreateTimeSet.side_effect = MockFunctionCallError("niDigital_CreateTimeSet")
mock_library.niDigital_CreateTimeSet.return_value = 0
mock_library.niDigital_DeleteAllTimeSets.side_effect = MockFunctionCallError("niDigital_DeleteAllTimeSets")
mock_library.niDigital_DeleteAllTimeSets.return_value = 0
mock_library.niDigital_DisableSites.side_effect = MockFunctionCallError("niDigital_DisableSites")
mock_library.niDigital_DisableSites.return_value = 0
mock_library.niDigital_EnableSites.side_effect = MockFunctionCallError("niDigital_EnableSites")
mock_library.niDigital_EnableSites.return_value = 0
mock_library.niDigital_FetchHistoryRAMCycleInformation.side_effect = MockFunctionCallError("niDigital_FetchHistoryRAMCycleInformation")
mock_library.niDigital_FetchHistoryRAMCycleInformation.return_value = 0
mock_library.niDigital_FetchHistoryRAMCyclePinData.side_effect = MockFunctionCallError("niDigital_FetchHistoryRAMCyclePinData")
mock_library.niDigital_FetchHistoryRAMCyclePinData.return_value = 0
mock_library.niDigital_FetchHistoryRAMScanCycleNumber.side_effect = MockFunctionCallError("niDigital_FetchHistoryRAMScanCycleNumber")
mock_library.niDigital_FetchHistoryRAMScanCycleNumber.return_value = 0
mock_library.niDigital_FrequencyCounter_MeasureFrequency.side_effect = MockFunctionCallError("niDigital_FrequencyCounter_MeasureFrequency")
mock_library.niDigital_FrequencyCounter_MeasureFrequency.return_value = 0
mock_library.niDigital_GetAttributeViBoolean.side_effect = MockFunctionCallError("niDigital_GetAttributeViBoolean")
mock_library.niDigital_GetAttributeViBoolean.return_value = 0
mock_library.niDigital_GetAttributeViInt32.side_effect = MockFunctionCallError("niDigital_GetAttributeViInt32")
mock_library.niDigital_GetAttributeViInt32.return_value = 0
mock_library.niDigital_GetAttributeViInt64.side_effect = MockFunctionCallError("niDigital_GetAttributeViInt64")
mock_library.niDigital_GetAttributeViInt64.return_value = 0
mock_library.niDigital_GetAttributeViReal64.side_effect = MockFunctionCallError("niDigital_GetAttributeViReal64")
mock_library.niDigital_GetAttributeViReal64.return_value = 0
mock_library.niDigital_GetAttributeViString.side_effect = MockFunctionCallError("niDigital_GetAttributeViString")
mock_library.niDigital_GetAttributeViString.return_value = 0
mock_library.niDigital_GetChannelNameFromString.side_effect = MockFunctionCallError("niDigital_GetChannelNameFromString")
mock_library.niDigital_GetChannelNameFromString.return_value = 0
mock_library.niDigital_GetError.side_effect = MockFunctionCallError("niDigital_GetError")
mock_library.niDigital_GetError.return_value = 0
mock_library.niDigital_GetFailCount.side_effect = MockFunctionCallError("niDigital_GetFailCount")
mock_library.niDigital_GetFailCount.return_value = 0
mock_library.niDigital_GetHistoryRAMSampleCount.side_effect = MockFunctionCallError("niDigital_GetHistoryRAMSampleCount")
mock_library.niDigital_GetHistoryRAMSampleCount.return_value = 0
mock_library.niDigital_GetPatternName.side_effect = MockFunctionCallError("niDigital_GetPatternName")
mock_library.niDigital_GetPatternName.return_value = 0
mock_library.niDigital_GetPatternPinList.side_effect = MockFunctionCallError("niDigital_GetPatternPinList")
mock_library.niDigital_GetPatternPinList.return_value = 0
mock_library.niDigital_GetPinName.side_effect = MockFunctionCallError("niDigital_GetPinName")
mock_library.niDigital_GetPinName.return_value = 0
mock_library.niDigital_GetPinResultsPinInformation.side_effect = MockFunctionCallError("niDigital_GetPinResultsPinInformation")
mock_library.niDigital_GetPinResultsPinInformation.return_value = 0
mock_library.niDigital_GetSitePassFail.side_effect = MockFunctionCallError("niDigital_GetSitePassFail")
mock_library.niDigital_GetSitePassFail.return_value = 0
mock_library.niDigital_GetSiteResultsSiteNumbers.side_effect = MockFunctionCallError("niDigital_GetSiteResultsSiteNumbers")
mock_library.niDigital_GetSiteResultsSiteNumbers.return_value = 0
mock_library.niDigital_GetTimeSetDriveFormat.side_effect = MockFunctionCallError("niDigital_GetTimeSetDriveFormat")
mock_library.niDigital_GetTimeSetDriveFormat.return_value = 0
mock_library.niDigital_GetTimeSetEdge.side_effect = MockFunctionCallError("niDigital_GetTimeSetEdge")
mock_library.niDigital_GetTimeSetEdge.return_value = 0
mock_library.niDigital_GetTimeSetEdgeMultiplier.side_effect = MockFunctionCallError("niDigital_GetTimeSetEdgeMultiplier")
mock_library.niDigital_GetTimeSetEdgeMultiplier.return_value = 0
mock_library.niDigital_GetTimeSetName.side_effect = MockFunctionCallError("niDigital_GetTimeSetName")
mock_library.niDigital_GetTimeSetName.return_value = 0
mock_library.niDigital_GetTimeSetPeriod.side_effect = MockFunctionCallError("niDigital_GetTimeSetPeriod")
mock_library.niDigital_GetTimeSetPeriod.return_value = 0
mock_library.niDigital_InitWithOptions.side_effect = MockFunctionCallError("niDigital_InitWithOptions")
mock_library.niDigital_InitWithOptions.return_value = 0
mock_library.niDigital_Initiate.side_effect = MockFunctionCallError("niDigital_Initiate")
mock_library.niDigital_Initiate.return_value = 0
mock_library.niDigital_IsDone.side_effect = MockFunctionCallError("niDigital_IsDone")
mock_library.niDigital_IsDone.return_value = 0
mock_library.niDigital_IsSiteEnabled.side_effect = MockFunctionCallError("niDigital_IsSiteEnabled")
mock_library.niDigital_IsSiteEnabled.return_value = 0
mock_library.niDigital_LoadLevels.side_effect = MockFunctionCallError("niDigital_LoadLevels")
mock_library.niDigital_LoadLevels.return_value = 0
mock_library.niDigital_LoadPattern.side_effect = MockFunctionCallError("niDigital_LoadPattern")
mock_library.niDigital_LoadPattern.return_value = 0
mock_library.niDigital_LoadPinMap.side_effect = MockFunctionCallError("niDigital_LoadPinMap")
mock_library.niDigital_LoadPinMap.return_value = 0
mock_library.niDigital_LoadSpecifications.side_effect = MockFunctionCallError("niDigital_LoadSpecifications")
mock_library.niDigital_LoadSpecifications.return_value = 0
mock_library.niDigital_LoadTiming.side_effect = MockFunctionCallError("niDigital_LoadTiming")
mock_library.niDigital_LoadTiming.return_value = 0
mock_library.niDigital_LockSession.side_effect = MockFunctionCallError("niDigital_LockSession")
mock_library.niDigital_LockSession.return_value = 0
mock_library.niDigital_PPMU_Measure.side_effect = MockFunctionCallError("niDigital_PPMU_Measure")
mock_library.niDigital_PPMU_Measure.return_value = 0
mock_library.niDigital_PPMU_Source.side_effect = MockFunctionCallError("niDigital_PPMU_Source")
mock_library.niDigital_PPMU_Source.return_value = 0
mock_library.niDigital_ReadSequencerFlag.side_effect = MockFunctionCallError("niDigital_ReadSequencerFlag")
mock_library.niDigital_ReadSequencerFlag.return_value = 0
mock_library.niDigital_ReadSequencerRegister.side_effect = MockFunctionCallError("niDigital_ReadSequencerRegister")
mock_library.niDigital_ReadSequencerRegister.return_value = 0
mock_library.niDigital_ReadStatic.side_effect = MockFunctionCallError("niDigital_ReadStatic")
mock_library.niDigital_ReadStatic.return_value = 0
mock_library.niDigital_ResetDevice.side_effect = MockFunctionCallError("niDigital_ResetDevice")
mock_library.niDigital_ResetDevice.return_value = 0
mock_library.niDigital_SelfCalibrate.side_effect = MockFunctionCallError("niDigital_SelfCalibrate")
mock_library.niDigital_SelfCalibrate.return_value = 0
mock_library.niDigital_SendSoftwareEdgeTrigger.side_effect = MockFunctionCallError("niDigital_SendSoftwareEdgeTrigger")
mock_library.niDigital_SendSoftwareEdgeTrigger.return_value = 0
mock_library.niDigital_SetAttributeViBoolean.side_effect = MockFunctionCallError("niDigital_SetAttributeViBoolean")
mock_library.niDigital_SetAttributeViBoolean.return_value = 0
mock_library.niDigital_SetAttributeViInt32.side_effect = MockFunctionCallError("niDigital_SetAttributeViInt32")
mock_library.niDigital_SetAttributeViInt32.return_value = 0
mock_library.niDigital_SetAttributeViInt64.side_effect = MockFunctionCallError("niDigital_SetAttributeViInt64")
mock_library.niDigital_SetAttributeViInt64.return_value = 0
mock_library.niDigital_SetAttributeViReal64.side_effect = MockFunctionCallError("niDigital_SetAttributeViReal64")
mock_library.niDigital_SetAttributeViReal64.return_value = 0
mock_library.niDigital_SetAttributeViString.side_effect = MockFunctionCallError("niDigital_SetAttributeViString")
mock_library.niDigital_SetAttributeViString.return_value = 0
mock_library.niDigital_TDR.side_effect = MockFunctionCallError("niDigital_TDR")
mock_library.niDigital_TDR.return_value = 0
mock_library.niDigital_UnloadAllPatterns.side_effect = MockFunctionCallError("niDigital_UnloadAllPatterns")
mock_library.niDigital_UnloadAllPatterns.return_value = 0
mock_library.niDigital_UnloadSpecifications.side_effect = MockFunctionCallError("niDigital_UnloadSpecifications")
mock_library.niDigital_UnloadSpecifications.return_value = 0
mock_library.niDigital_UnlockSession.side_effect = MockFunctionCallError("niDigital_UnlockSession")
mock_library.niDigital_UnlockSession.return_value = 0
mock_library.niDigital_WaitUntilDone.side_effect = MockFunctionCallError("niDigital_WaitUntilDone")
mock_library.niDigital_WaitUntilDone.return_value = 0
mock_library.niDigital_WriteSequencerFlag.side_effect = MockFunctionCallError("niDigital_WriteSequencerFlag")
mock_library.niDigital_WriteSequencerFlag.return_value = 0
mock_library.niDigital_WriteSequencerRegister.side_effect = MockFunctionCallError("niDigital_WriteSequencerRegister")
mock_library.niDigital_WriteSequencerRegister.return_value = 0
mock_library.niDigital_WriteSourceWaveformBroadcastU32.side_effect = MockFunctionCallError("niDigital_WriteSourceWaveformBroadcastU32")
mock_library.niDigital_WriteSourceWaveformBroadcastU32.return_value = 0
mock_library.niDigital_WriteSourceWaveformDataFromFileTDMS.side_effect = MockFunctionCallError("niDigital_WriteSourceWaveformDataFromFileTDMS")
mock_library.niDigital_WriteSourceWaveformDataFromFileTDMS.return_value = 0
mock_library.niDigital_WriteSourceWaveformSiteUniqueU32.side_effect = MockFunctionCallError("niDigital_WriteSourceWaveformSiteUniqueU32")
mock_library.niDigital_WriteSourceWaveformSiteUniqueU32.return_value = 0
mock_library.niDigital_WriteStatic.side_effect = MockFunctionCallError("niDigital_WriteStatic")
mock_library.niDigital_WriteStatic.return_value = 0
mock_library.niDigital_close.side_effect = MockFunctionCallError("niDigital_close")
mock_library.niDigital_close.return_value = 0
mock_library.niDigital_error_message.side_effect = MockFunctionCallError("niDigital_error_message")
mock_library.niDigital_error_message.return_value = 0
mock_library.niDigital_reset.side_effect = MockFunctionCallError("niDigital_reset")
mock_library.niDigital_reset.return_value = 0
mock_library.niDigital_self_test.side_effect = MockFunctionCallError("niDigital_self_test")
mock_library.niDigital_self_test.return_value = 0
| 64.2672
| 219
| 0.713708
|
cc86481f20c9b0db449dfbbb5699f3be5e4f934e
| 10,776
|
py
|
Python
|
io_mod.py
|
yufengliang/mbxaspy
|
d92a15c5b0feca11e9260bfdbc0ca7fb07d426ff
|
[
"Apache-2.0"
] | 5
|
2018-03-23T01:54:00.000Z
|
2020-12-04T18:31:25.000Z
|
io_mod.py
|
yufengliang/mbxaspy
|
d92a15c5b0feca11e9260bfdbc0ca7fb07d426ff
|
[
"Apache-2.0"
] | null | null | null |
io_mod.py
|
yufengliang/mbxaspy
|
d92a15c5b0feca11e9260bfdbc0ca7fb07d426ff
|
[
"Apache-2.0"
] | 3
|
2018-03-23T01:56:29.000Z
|
2020-10-04T05:13:35.000Z
|
""" a module for input and output """
from __future__ import print_function
from struct import pack, unpack
import re
import sys
import inspect
import heapq
from constants import *
from utils import *
_quote = {'"', "'"}
_delimiter = {';', ',', ' ', '\t', '\n'}
def input_from_binary(fhandle, data_type, ndata, offset):
""" input data from a binary file
Args:
fhandle: file handle. The file needs to be opened first.
data_type: 'float', 'double', 'complex'.
ndata: length of the data measured in data_type
offset: start to read at the offset measured in data_type.
count from the head of fhandle.
Returns:
a list of data of specified data_type
"""
if not data_type in data_set:
raise TypeError(' data_type must be in ' + str(set(data_set)) + '.' )
# current file position
pos = fhandle.tell()
# seek and read
fhandle.seek(offset * data_set[data_type][0])
#data = fhandle.read(ndata * data_set[data_type][0])
data = fhandle.read(ndata * data_set[data_type][0])
# convert
data_len = data_set[data_type][0] * ndata
reslist = list(unpack(data_set[data_type][1] * ndata, data[0 : data_len]))
if data_type == 'complex':
# Add odd and even-index elements into complexes
reslist = [re + 1j * im for re, im in zip(reslist[::2], reslist[1::2])]
# return to previous position
fhandle.seek(pos)
return reslist
def input_arguments(lines, lower = False):
""" input arguments from a user-defined file
Given lines =
"
nbnd_f = 300 # number of final-state orbitals
# is_gamma =
nbnd_i=400; job_done = .true., is_absorption =FALSE
ecut = 30.0
"
Should return a dictionary like:
{'nbnd_f' : '300', 'nbnd_i' : '400', 'job_done' : '.true.', 'is_absorption' : 'FALSE', 'ecut' : '30.0'}
Args:
fname: file name
Returns:
a dictionary as above
"""
var_dict = {}
if len(lines) == 0: return var_dict
if lines[-1] != '\n': lines += '\n'
lines = re.sub('#.*\n', '#', lines) # convert all comments into _delimiters
for block in lines.split('#'):
name = None
# look for assignment
for item in block.split('='):
value = None
new_name = item
# if value is string
for s in _quote:
item_str = item.split(s)
if len(item_str) > 2: # found quotation marks
value = item_str[1] # the string in the first _quote
new_name = item_str[-1].strip() # last term
# value not a string
if value is None:
value = item
for s in _delimiter:
try:
value = list(filter(None, value.split(s)))[0] # always take the first meaningful string
except IndexError:
value = ''
break
for s in _delimiter:
try:
new_name = list(filter(None, new_name.split(s)))[-1] # always take the last meaningful string
except IndexError:
new_name = ''
break
if is_valid_variable_name(name) and value is not None:
if lower: name = name.lower()
var_dict.update({name : value})
name = new_name
return var_dict
def convert_val(val_str, val):
""" Given a string, convert into the correct data type """
if val is bool:
if 'true' in val_str.lower(): val_str = 'true'
else: val_str = '' # otherwise set to false
val_type = val
try:
return val_type(val_str)
except ValueError:
# Can it be a float ?
return val_type(float(val_str))
def list2str_1d(nums, mid = -1):
"""
Give a list of nums, output the head, the middle, and the tail of it
with nice format. Return the formatted string.
Args:
mid: define the middle point you are interested in
"""
nvis = 3 # numbers printed out in each part
l = len(nums)
mid = mid if mid > 0 else l / 2
fmtstr = '{0:.4f} '
resstr = ''
for i in range(min(nvis, l)):
resstr += fmtstr.format(nums[i])
irange = range(max(nvis + 1, int(mid - nvis / 2)), min(l, int(mid + nvis / 2 + 1)))
if len(irange) > 0: resstr += ' ... '
for i in irange:
resstr += fmtstr.format(nums[i])
irange = range(max(int(mid + nvis / 2 + 1), l - nvis), l)
if len(irange) > 0: resstr += ' ... '
for i in irange:
resstr += fmtstr.format(nums[i])
return resstr
def eigvec2str(eigvec, m, n, nctr, nvis = 6, npc = 6, iws = ' '):
"""
Output some prominent matrix elements for an eigenvector matrix
eigvec is given as a 1D array:
[ <B_1|1k>, <B_1|2k>, <B_2|1k>, <B_2|2k>]
which corresponds to such a matrix (m rows x n cols):
<B_1|1k> <B_1|2k>
<B_2|1k> <B_2|2k>
nctr: list states around the center nctr
nvis: number of printed out states
npc: number of principal components
iws: initial white spaces for indentation
"""
resstr = iws + '{0:<10}{1}\n'.format('norm', 'principal components')
# guarantee len(eigvec) = n * m
for j in range(max(0, nctr - int(nvis / 2) + 1), min(n, nctr + int(nvis / 2) + 1)):
eabs = [ abs(eigvec[i * n + j]) ** 2 for i in range(m) ]
norm = sum(eabs)
# print out the norm
resstr += iws + '{0:<10.5f}'.format(norm)
# the wavefunction to print: |nk> = ...
resstr += '|n={0:>4},k> = '.format(j)
# Find the npc elements with the largest norm
indices = heapq.nlargest(npc, range(len(eabs)), key = lambda i : eabs[i])
for i in indices:
resstr += '({0:11.3f})|B_{1}> + '.format(eigvec[i * n + j], i)
# resstr = resstr[:-3] # delete the last +
resstr += '... \n'
return resstr
def atomic_species_to_list(asp_str):
"""
Convert a atomic_species block (as in Qespresso) into a list like:
[['Br', 'Br.pbe-van_mit.UPF'], ['C', 'C.pbe-van_bm.UPF'], ... ]
"""
res = []
for l in asp_str.split('\n'):
words = l.split()
if len(words) == 3 and len(words[0]) < elem_maxl and words[2].split('.')[-1] == 'UPF':
# *** There should be more robust sanity checks: check elements
res.append([words[0], words[2]])
return res
def atomic_positions_to_list(apos_str):
"""
Convert a atomic_positions block (as in Qespresso) into a list like:
[['Pb', '0.0', '0.0', '0.0'], ['Br', '0.0', '0.0', '0.5'], ...]
Most interested in the atoms' names rather than their positions
"""
res = []
for l in apos_str.split('\n'):
words = l.split()
if len(words) >= 4 and len(words[0]) < elem_maxl:
# *** There should be more robust sanity checks: check elements
res.append(words)
return res
def read_qij_from_upf(upf_fname):
"""
Given a PAW/ultrasoft pseudopotential in UPF format, find the projectors' angular momenta
and the corresponding Q_int matrices in the file.
"""
l = [] # angular momentum number
qij = [] # Q_int matrix
i, j = 0, 0
errmsg = ''
fh = []
try:
fh = open(upf_fname, 'r')
except IOError:
errmsg = 'cannot open UPF file: ' + str(upf_fname)
for line in fh:
words = line.split()
if len(words) >= 4 and words[2 : 4] == ['Beta', 'L']:
l.append(int(words[1]))
if len(words) >= 2 and words[1] == 'Q_int':
if (i, j) == (0, 0):
# if first time, initialize the qij matrix
qij = [[0.0] * len(l) for _ in l]
try:
qij[i][j] = float(words[0])
except IndexError:
errmsg = 'too many Q_int for given l = ' + str(len(l))
break
qij[j][i] = qij[i][j]
j += 1
if j > len(l) - 1: i, j = i + 1, i + 1
if fh: fh.close()
return l, qij, errmsg
def get_index(s):
"""
get the index in the string like 'a[4]'.
should return 4
"""
return int(s[s.find("[")+1:s.find("]")])
def import_from_pos(fh):
"""
import < beta_l | r_i | h > from the given file handle
The pos file used should have the following format (take I 4d9 as an example):
position
6 1 ! nwfc1, nwfc2
0 0 1 1 2 2 ! lwfc1(1:nwfc1)
2 ! lwfc2(1:nwfc2)
22 ! nonzero elements (i,j,ixyz,cR,cI)
3 1 3 0.2194993793964386E+00 0.0000000000000000E+00
4 1 1 0.1097496896982193E+00 0.0000000000000000E+00
5 1 2 0.1097496896982193E+00 0.0000000000000000E+00
6 1 3 0.2587353587150574E+00 0.0000000000000000E+00
7 1 1 0.1293676793575287E+00 0.0000000000000000E+00
8 1 2 0.1293676793575287E+00 0.0000000000000000E+00
3 2 1 -0.1900920420885086E+00 0.0000000000000000E+00
...
3 3 2 -0.1900920420885086E+00 0.0000000000000000E+00
...
4 4 1 -0.1900920420885086E+00 0.0000000000000000E+00
...
4 5 2 -0.1900920420885086E+00 0.0000000000000000E+00
...
"""
elem = None
while True:
l = fh.readline()
if not l: break
if 'nwfc1' in l and 'nwfc2' in l:
w = l.split()
nwfc1, nwfc2 = int(w[0]), int(w[1])
# nwfc2 is assumed to be one - only one l value
if 'lwfc1' in l:
w = l.split('!')[0].split()
lwfc1 = [int(_) for _ in w]
if 'lwfc2' in l:
lwfc2 = int(l.split()[0])
if 'nonzero elements' in l:
n = int(l.split()[0])
elem = []
l = fh.readline()
c = 0
while l and c < n:
w = l.split()
if len(w) in {5, 10}: # 5-col is for old pos format and 10-col is the enriched format by yfliang
# (l,m) in lwfc1, m in lwfc2 (only one), i = (x=1,y=2,z=3)
# m ranges from -l to l
# elem = < h_c | r_i | beta_lm > (Core-level wavefunctions always proceed. )
elem.append([int(_) for _ in w[ : 3]] + [float(w[3]) + 1j * float(w[4])])
l = fh.readline()
c += 1
return lwfc1, lwfc2, elem
# export function only
__all__ = [s for s in dir() if not s.startswith('_') and inspect.isfunction(getattr(sys.modules[__name__],s))]
if __name__ == '__main__':
print(__file__ + ": the i/o module for mbxaspy")
# debug
var_dict = input_arguments(sys.stdin.read())
print(var_dict)
| 32.167164
| 117
| 0.542873
|
94178406ff4769fb635472ba9bb94977c793b152
| 1,357
|
py
|
Python
|
make_gtrack.py
|
tharvesh/preprocess_script
|
a52d56442c4038a1af567c83773972f10078294e
|
[
"MIT"
] | null | null | null |
make_gtrack.py
|
tharvesh/preprocess_script
|
a52d56442c4038a1af567c83773972f10078294e
|
[
"MIT"
] | null | null | null |
make_gtrack.py
|
tharvesh/preprocess_script
|
a52d56442c4038a1af567c83773972f10078294e
|
[
"MIT"
] | null | null | null |
import sys
import re
sig_fname=str(sys.argv[1])
domain_fname=str(sys.argv[2])
with open(sig_fname) as f:
sig_file=f.readlines()
with open(domain_fname) as dom:
domain_file=dom.readlines()
sig_file=[x.strip() for x in sig_file]
domain_file=[x.strip() for x in domain_file]
sig_list = []
sig_dict = {}
for line in sig_file:
chr_1,start1,end1,chr_2,start2,end2,pval = line.split()
key=chr_1 + ":" + start1 + "-" + end1
val=chr_2 + ":" + start2 + "-" + end2
sig_dict.setdefault(key,[]).append(val)
combined_dict = {}
for line in domain_file:
char,start,end,cat = line.split()
domain=char + ":" + start + "-" + end
if domain in sig_dict:
if domain in sig_dict.keys():
#print domain, sig_dict[domain]
combined_dict[domain] = sig_dict[domain]
for key, value in sig_dict.iteritems():
if domain in value:
#print domain, key
if domain in combined_dict.keys():
combined_dict[domain].append(key)
else:
#print domain + "\t."
combined_dict[domain] = "."
#print "##gtrack version: 1.0"
#print "##track type: linked segments"
#print "###seqid start end id radius edges"
for key, value in combined_dict.iteritems():
temp_bed = re.split('[:-]',key)
temp_bed = "\t".join(temp_bed)
temp_edge = ";".join(value)
print temp_bed + "\t" + key + "\t" + "1\t"+ temp_edge
| 24.672727
| 57
| 0.642594
|
80dbf143ca1084bed271e2928b5021da41be058e
| 4,626
|
py
|
Python
|
Allura/allura/tests/test_decorators.py
|
rohankumardubey/allura
|
9c490a051ca912d28b81ce656441d6fed100cb24
|
[
"Apache-2.0"
] | 113
|
2015-03-25T10:33:37.000Z
|
2022-02-16T20:55:06.000Z
|
Allura/allura/tests/test_decorators.py
|
rohankumardubey/allura
|
9c490a051ca912d28b81ce656441d6fed100cb24
|
[
"Apache-2.0"
] | 4
|
2017-08-04T16:19:07.000Z
|
2020-06-08T19:01:33.000Z
|
Allura/allura/tests/test_decorators.py
|
rohankumardubey/allura
|
9c490a051ca912d28b81ce656441d6fed100cb24
|
[
"Apache-2.0"
] | 36
|
2015-08-14T16:27:39.000Z
|
2022-02-16T20:54:35.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import inspect
from unittest import TestCase
from mock import patch
import random
import gc
from alluratest.tools import assert_equal, assert_not_equal
from allura.lib.decorators import task, memoize
class TestTask(TestCase):
def test_no_params(self):
@task
def func():
pass
self.assertTrue(hasattr(func, 'post'))
def test_with_params(self):
@task(disable_notifications=True)
def func():
pass
self.assertTrue(hasattr(func, 'post'))
@patch('allura.lib.decorators.c')
@patch('allura.model.MonQTask')
def test_post(self, c, MonQTask):
@task(disable_notifications=True)
def func(s, foo=None, **kw):
pass
def mock_post(f, args, kw, delay=None):
self.assertTrue(c.project.notifications_disabled)
self.assertFalse('delay' in kw)
self.assertEqual(delay, 1)
self.assertEqual(kw, dict(foo=2))
self.assertEqual(args, ('test',))
self.assertEqual(f, func)
c.project.notifications_disabled = False
MonQTask.post.side_effect = mock_post
func.post('test', foo=2, delay=1)
class TestMemoize(object):
def test_function(self):
@memoize
def remember_randomy(do_random, foo=None):
if do_random:
return random.random()
else:
return "constant"
rand1 = remember_randomy(True)
rand2 = remember_randomy(True)
const1 = remember_randomy(False)
rand_kwargs1 = remember_randomy(True, foo='asdf')
rand_kwargs2 = remember_randomy(True, foo='xyzzy')
assert_equal(rand1, rand2)
assert_equal(const1, "constant")
assert_not_equal(rand1, rand_kwargs1)
assert_not_equal(rand_kwargs1, rand_kwargs2)
def test_methods(self):
class Randomy(object):
@memoize
def randomy(self, do_random):
if do_random:
return random.random()
else:
return "constant"
@memoize
def other(self, do_random):
if do_random:
return random.random()
else:
return "constant"
r = Randomy()
rand1 = r.randomy(True)
rand2 = r.randomy(True)
const1 = r.randomy(False)
other1 = r.other(True)
other2 = r.other(True)
assert_equal(rand1, rand2)
assert_equal(const1, "constant")
assert_not_equal(rand1, other1)
assert_equal(other1, other2)
r2 = Randomy()
r2rand1 = r2.randomy(True)
r2rand2 = r2.randomy(True)
r2const1 = r2.randomy(False)
r2other1 = r2.other(True)
r2other2 = r2.other(True)
assert_not_equal(r2rand1, rand1)
assert_equal(r2rand1, r2rand2)
assert_not_equal(r2other1, other1)
assert_equal(r2other1, r2other2)
def test_methods_garbage_collection(self):
class Randomy(object):
@memoize
def randomy(self, do_random):
if do_random:
return random.random()
else:
return "constant"
r = Randomy()
rand1 = r.randomy(True)
for gc_ref in gc.get_referrers(r):
if inspect.isframe(gc_ref):
continue
else:
raise AssertionError('Unexpected reference to `r` instance: {!r}\n'
'@memoize probably made a reference to it and has created a circular reference loop'.format(gc_ref))
| 32.125
| 137
| 0.602032
|
ee1b858d85530195070e55b7ce9cde14ee249ce8
| 11,592
|
py
|
Python
|
cryspy/C_item_loop_classes/cl_2_section.py
|
ikibalin/rhochi
|
1ca03f18dc72006322a101ed877cdbba33ed61e7
|
[
"MIT"
] | null | null | null |
cryspy/C_item_loop_classes/cl_2_section.py
|
ikibalin/rhochi
|
1ca03f18dc72006322a101ed877cdbba33ed61e7
|
[
"MIT"
] | null | null | null |
cryspy/C_item_loop_classes/cl_2_section.py
|
ikibalin/rhochi
|
1ca03f18dc72006322a101ed877cdbba33ed61e7
|
[
"MIT"
] | null | null | null |
"""Description of classes Section, SectionL."""
from typing import NoReturn
import numpy
from cryspy.A_functions_base.function_2_crystallography_base import \
calc_atoms_in_unit_cell
from cryspy.A_functions_base.function_1_objects import \
form_items_by_dictionary
from cryspy.B_parent_classes.cl_1_item import ItemN
from cryspy.B_parent_classes.cl_2_loop import LoopN
from cryspy.C_item_loop_classes.cl_1_cell import Cell
from cryspy.C_item_loop_classes.cl_1_atom_site import AtomSiteL
from cryspy.C_item_loop_classes.cl_1_space_group_symop import \
SpaceGroupSymop, SpaceGroupSymopL
class Section(ItemN):
"""Section description.
Describe information concerning the density point.
"""
ATTR_MANDATORY_NAMES = ("id", "size_x", "size_y", "atom_center",
"atom_along_axis_x", "atom_x_operation_xyz",
"atom_along_axis_y", "atom_y_operation_xyz",
"points_x", "points_y", "url_out")
ATTR_MANDATORY_TYPES = (str, float, float, str, str, str, str, str, int,
int, str)
ATTR_MANDATORY_CIF = ("id", "size_x", "size_y", "atom_center",
"atom_along_axis_x", "atom_x_operation_xyz",
"atom_along_axis_y", "atom_y_operation_xyz",
"points_x", "points_y", "url_out")
ATTR_OPTIONAL_NAMES = ("field_x", "field_y", "field_z")
ATTR_OPTIONAL_TYPES = (float, float, float)
ATTR_OPTIONAL_CIF = ("field_x", "field_y", "field_z")
ATTR_NAMES = ATTR_MANDATORY_NAMES + ATTR_OPTIONAL_NAMES
ATTR_TYPES = ATTR_MANDATORY_TYPES + ATTR_OPTIONAL_TYPES
ATTR_CIF = ATTR_MANDATORY_CIF + ATTR_OPTIONAL_CIF
ATTR_INT_NAMES = ()
ATTR_INT_PROTECTED_NAMES = ()
# parameters considered are refined parameters
ATTR_REF = ()
ATTR_SIGMA = tuple([f"{_h:}_sigma" for _h in ATTR_REF])
ATTR_CONSTR_FLAG = tuple([f"{_h:}_constraint" for _h in ATTR_REF])
ATTR_REF_FLAG = tuple([f"{_h:}_refinement" for _h in ATTR_REF])
ATTR_CONSTR_MARK = tuple([f"{_h:}_mark" for _h in ATTR_REF])
# formats if cif format
D_FORMATS = {"size_x": "{:.2f}", "size_y": "{:.2f}"}
# constraints on the parameters
D_CONSTRAINTS = {}
# default values for the parameters
D_DEFAULT = {}
for key in ATTR_SIGMA:
D_DEFAULT[key] = 0.
for key in (ATTR_CONSTR_FLAG + ATTR_REF_FLAG):
D_DEFAULT[key] = False
for key in ATTR_CONSTR_MARK:
D_DEFAULT[key] = ""
PREFIX = "section"
def __init__(self, **kwargs) -> NoReturn:
super(Section, self).__init__()
# defined for any integer and float parameters
D_MIN = {}
# defined for ani integer and float parameters
D_MAX = {}
self.__dict__["D_MIN"] = D_MIN
self.__dict__["D_MAX"] = D_MAX
for key, attr in self.D_DEFAULT.items():
setattr(self, key, attr)
for key, attr in kwargs.items():
setattr(self, key, attr)
def calc_axes_x_y_z(self, cell: Cell, atom_site: AtomSiteL):
"""
Calculate three vectors of axes: pos_x, pos_y, pos_z.
Arguments
---------
- cell
- atom_site (loop)
"""
atom_o_label = self.atom_center
_item_a_s_o = atom_site[atom_o_label]
fract_a_o_xyz = numpy.array([_item_a_s_o.fract_x, _item_a_s_o.fract_y,
_item_a_s_o.fract_z], dtype=float)
pos_a_o_xyz = cell.calc_position_by_coordinate(
fract_a_o_xyz[0], fract_a_o_xyz[1], fract_a_o_xyz[2])
pos_a_o_xyz = numpy.array(pos_a_o_xyz, dtype=float)
atom_x_label = self.atom_along_axis_x
atom_x_operation_xyz = self.atom_x_operation_xyz
s_g_s_a_x = SpaceGroupSymop(operation_xyz=atom_x_operation_xyz)
_item_a_s_x = atom_site[atom_x_label]
fract_a_x_xyz = numpy.array([_item_a_s_x.fract_x, _item_a_s_x.fract_y,
_item_a_s_x.fract_z], dtype=float)
fract_a_x_xyz = numpy.dot(s_g_s_a_x.r, fract_a_x_xyz) + s_g_s_a_x.b
pos_a_x_xyz = cell.calc_position_by_coordinate(
fract_a_x_xyz[0], fract_a_x_xyz[1], fract_a_x_xyz[2])
pos_a_x_xyz = numpy.array(pos_a_x_xyz, dtype=float)
atom_y_label = self.atom_along_axis_y
atom_y_operation_xyz = self.atom_y_operation_xyz
s_g_s_a_y = SpaceGroupSymop(operation_xyz=atom_y_operation_xyz)
_item_a_s_y = atom_site[atom_y_label]
fract_a_y_xyz = numpy.array([_item_a_s_y.fract_x, _item_a_s_y.fract_y,
_item_a_s_y.fract_z], dtype=float)
fract_a_y_xyz = numpy.dot(s_g_s_a_y.r, fract_a_y_xyz) + s_g_s_a_y.b
pos_a_y_xyz = cell.calc_position_by_coordinate(
fract_a_y_xyz[0], fract_a_y_xyz[1], fract_a_y_xyz[2])
pos_a_y_xyz = numpy.array(pos_a_y_xyz, dtype=float)
v_pos_x = (pos_a_x_xyz-pos_a_o_xyz)/((
numpy.square(pos_a_x_xyz-pos_a_o_xyz)).sum())**0.5
v_pos_a_y = (pos_a_y_xyz-pos_a_o_xyz)/((
numpy.square(pos_a_y_xyz-pos_a_o_xyz)).sum())**0.5
v_pos_y_not_norm = v_pos_a_y - (v_pos_a_y * v_pos_x).sum() * v_pos_x
v_pos_y = v_pos_y_not_norm / (
numpy.square(v_pos_y_not_norm).sum())**0.5
v_pos_z = numpy.cross(v_pos_x, v_pos_y)
return v_pos_x, v_pos_y, v_pos_z
def calc_fractions(self, cell: Cell, atom_site: AtomSiteL) -> \
numpy.ndarray:
"""Give a numpy.nd_array of fractions: fractions_xyz.
Arguments
---------
- cell
- atom_site
"""
size_x, size_y = self.size_x, self.size_y
points_x, points_y = self.points_x, self.points_y
atom_o_label = self.atom_center
_item_a_s_o = atom_site[atom_o_label]
fract_a_o_xyz = numpy.array([_item_a_s_o.fract_x, _item_a_s_o.fract_y,
_item_a_s_o.fract_z], dtype=float)
pos_a_o_xyz = cell.calc_position_by_coordinate(
fract_a_o_xyz[0], fract_a_o_xyz[1], fract_a_o_xyz[2])
pos_a_o_xyz = numpy.array(pos_a_o_xyz, dtype=float)
v_pos_x, v_pos_y, v_pos_z = self.calc_axes_x_y_z(cell, atom_site)
v_delta_pos_x = v_pos_x * size_x / float(points_x)
v_delta_pos_y = v_pos_y * size_y / float(points_y)
np_x_2d, np_y_2d = numpy.meshgrid(range(-points_x//2, points_x//2),
range(-points_y//2, points_y//2),
indexing="ij")
np_x_1d, np_y_1d = np_x_2d.flatten(), np_y_2d.flatten()
pos_xyz = np_x_1d[numpy.newaxis, :]*v_delta_pos_x[:, numpy.newaxis] + \
np_y_1d[numpy.newaxis, :] * v_delta_pos_y[:, numpy.newaxis] + \
pos_a_o_xyz[:, numpy.newaxis]
fract_x, fract_y, fract_z = cell.calc_coordinate_by_position(
pos_xyz[0, :], pos_xyz[1, :], pos_xyz[2, :])
return fract_x, fract_y, fract_z
def calc_atoms(self, cell: Cell, atom_site: AtomSiteL,
space_group_symop: SpaceGroupSymopL, distance_min=0.3):
"""Calculate position of atoms in the section plane.
Argument
--------
- cell
- atom_site
- space_group_symop
- distance_min is minimal distance to search atom in angstrems
(default value is 0.3)
Output
------
- atom_x
- atom_y
- atom_label
"""
r_11 = numpy.array(space_group_symop.r_11, dtype=int)
r_12 = numpy.array(space_group_symop.r_12, dtype=int)
r_13 = numpy.array(space_group_symop.r_13, dtype=int)
r_21 = numpy.array(space_group_symop.r_21, dtype=int)
r_22 = numpy.array(space_group_symop.r_22, dtype=int)
r_23 = numpy.array(space_group_symop.r_23, dtype=int)
r_31 = numpy.array(space_group_symop.r_31, dtype=int)
r_32 = numpy.array(space_group_symop.r_32, dtype=int)
r_33 = numpy.array(space_group_symop.r_33, dtype=int)
b_1 = numpy.array(space_group_symop.b_1, dtype=float)
b_2 = numpy.array(space_group_symop.b_2, dtype=float)
b_3 = numpy.array(space_group_symop.b_3, dtype=float)
r_ij = (r_11, r_12, r_13, r_21, r_22, r_23, r_31, r_32, r_33)
b_i = (b_1, b_2, b_3)
fract_atom_auc_x = numpy.array(atom_site.fract_x, dtype=float)
fract_atom_auc_y = numpy.array(atom_site.fract_y, dtype=float)
fract_atom_auc_z = numpy.array(atom_site.fract_z, dtype=float)
fract_atom_auc_xyz = (fract_atom_auc_x, fract_atom_auc_y,
fract_atom_auc_z)
label_atom_auc = numpy.array(atom_site.label, dtype=str)
fract_atom_uc_x, fract_atom_uc_y, fract_atom_uc_z, label_atom_uc = \
calc_atoms_in_unit_cell(r_ij, b_i, fract_atom_auc_xyz,
label_atom_auc)
size_x = self.size_x
size_y = self.size_y
atom_center = self.atom_center
atom_site_center = atom_site[atom_center]
center_fract_x = atom_site_center.fract_x
center_fract_y = atom_site_center.fract_y
center_fract_z = atom_site_center.fract_z
center_pos_x, center_pos_y, center_pos_z = \
cell.calc_position_by_coordinate(center_fract_x, center_fract_y,
center_fract_z)
v_pos_x, v_pos_y, v_pos_z = self.calc_axes_x_y_z(cell, atom_site)
pos_atom_uc_x, pos_atom_uc_y, pos_atom_uc_z = \
cell.calc_position_by_coordinate(fract_atom_uc_x, fract_atom_uc_y,
fract_atom_uc_z)
pos_atom_loc_x = v_pos_x[0]*(pos_atom_uc_x - center_pos_x) + \
v_pos_x[1]*(pos_atom_uc_y - center_pos_y) + \
v_pos_x[2]*(pos_atom_uc_z - center_pos_z)
pos_atom_loc_y = v_pos_y[0]*(pos_atom_uc_x - center_pos_x) + \
v_pos_y[1]*(pos_atom_uc_y - center_pos_y) + \
v_pos_y[2]*(pos_atom_uc_z - center_pos_z)
pos_atom_loc_z = v_pos_z[0]*(pos_atom_uc_x - center_pos_x) + \
v_pos_z[1]*(pos_atom_uc_y - center_pos_y) + \
v_pos_z[2]*(pos_atom_uc_z - center_pos_z)
flag_x = numpy.abs(pos_atom_loc_x) < 0.5*size_x
flag_y = numpy.abs(pos_atom_loc_y) < 0.5*size_y
flag_z = numpy.abs(pos_atom_loc_z) < distance_min
flag_xyz = numpy.logical_and(flag_x, numpy.logical_and(flag_y, flag_z))
atom_x = pos_atom_loc_x[flag_xyz]
atom_y = pos_atom_loc_y[flag_xyz]
atom_label = label_atom_uc[flag_xyz]
return atom_x, atom_y, atom_label
class SectionL(LoopN):
"""Description of sections.
Describe information concerning the density point.
"""
ITEM_CLASS = Section
ATTR_INDEX = "id"
def __init__(self, loop_name: str = None, **kwargs) -> NoReturn:
super(SectionL, self).__init__()
self.__dict__["items"] = form_items_by_dictionary(self.ITEM_CLASS, kwargs)
self.__dict__["loop_name"] = loop_name
# s_cont = """
# loop_
# _section_id
# _section_size_x
# _section_size_y
# _section_atom_center
# _section_atom_along_axis_x
# _section_atom_along_axis_x_symop
# _section_atom_along_axis_y
# _section_atom_along_axis_y_symop
# _section_points_x
# _section_points_y
# _section_url_out
# core 5 5 Ho1 O1 x,y,z O2 x,y,z 100 100 s_ho1.dat
# """
# obj = SectionL.from_cif(s_cont)
# print(obj, end="\n\n")
# print(obj["core"], end="\n\n")
| 39.162162
| 82
| 0.63863
|
992733f8ad51ad2f636a0cab79e9db11252baf21
| 1,349
|
py
|
Python
|
setup.py
|
khimsh/is_isbn
|
7b657887d3634c29d03856f97c0f0aa4c20e8dce
|
[
"MIT"
] | 2
|
2021-07-06T01:08:14.000Z
|
2021-07-06T01:08:16.000Z
|
setup.py
|
khimsh/is_isbn
|
7b657887d3634c29d03856f97c0f0aa4c20e8dce
|
[
"MIT"
] | 8
|
2020-01-22T17:58:31.000Z
|
2022-01-31T02:25:44.000Z
|
setup.py
|
khimsh/is_isbn
|
7b657887d3634c29d03856f97c0f0aa4c20e8dce
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [ ]
setup_requirements = [ ]
test_requirements = [ ]
setup(
author="Ia Khimshiashvili",
author_email='i.khimsh@gmail.com',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Checks if the provided string is valid ISBN.",
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='is_isbn',
name='is_isbn',
packages=find_packages(include=['is_isbn']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/khimsh/is_isbn',
version='0.1.0',
zip_safe=False,
)
| 27.530612
| 63
| 0.644181
|
d65e9d5ae1c848921d0aec15727b0ac655dda249
| 552
|
py
|
Python
|
fedot/core/composer/constraint.py
|
rozlana-g/FEDOT
|
a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c
|
[
"BSD-3-Clause"
] | 1
|
2021-11-09T10:24:38.000Z
|
2021-11-09T10:24:38.000Z
|
fedot/core/composer/constraint.py
|
rozlana-g/FEDOT
|
a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c
|
[
"BSD-3-Clause"
] | null | null | null |
fedot/core/composer/constraint.py
|
rozlana-g/FEDOT
|
a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c
|
[
"BSD-3-Clause"
] | null | null | null |
from copy import deepcopy
from typing import Optional
from fedot.core.optimisers.graph import OptGraph
from fedot.core.pipelines.validation import validate
def constraint_function(graph: OptGraph,
params: Optional['GraphGenerationParams'] = None):
try:
rules = params.rules_for_constraint if params else None
object_for_validation = params.adapter.restore(deepcopy(graph))
validate(object_for_validation, rules, params.advisor.task)
return True
except ValueError:
return False
| 32.470588
| 74
| 0.724638
|
f2b742f5082255a0f58057cfd7babad77e026d27
| 112
|
py
|
Python
|
tests/inputs/if-branching/84-joining_two_lists_deleting_a_lot.py
|
helq/pytropos
|
497ed5902e6e4912249ca0a46b477f9bfa6ae80a
|
[
"MIT"
] | 4
|
2019-10-06T18:01:24.000Z
|
2020-07-03T05:27:35.000Z
|
tests/inputs/if-branching/84-joining_two_lists_deleting_a_lot.py
|
helq/pytropos
|
497ed5902e6e4912249ca0a46b477f9bfa6ae80a
|
[
"MIT"
] | 5
|
2021-06-07T15:50:04.000Z
|
2021-06-07T15:50:06.000Z
|
tests/inputs/if-branching/84-joining_two_lists_deleting_a_lot.py
|
helq/pytropos
|
497ed5902e6e4912249ca0a46b477f9bfa6ae80a
|
[
"MIT"
] | null | null | null |
if _:
a = [3.0, []]
a[1].append(a[0])
else:
a = [[]]
a.append(a[0])
a[1].append(a[1])
# show_store()
| 11.2
| 19
| 0.446429
|
1cf863b5b6bdc74a763967fdfaa763c6283a09f6
| 9,843
|
py
|
Python
|
torba/torba/server/env.py
|
Nykseli/lbry-sdk
|
07afc0aa0a1e6c0ef6aa284fb47513af940440c1
|
[
"MIT"
] | null | null | null |
torba/torba/server/env.py
|
Nykseli/lbry-sdk
|
07afc0aa0a1e6c0ef6aa284fb47513af940440c1
|
[
"MIT"
] | null | null | null |
torba/torba/server/env.py
|
Nykseli/lbry-sdk
|
07afc0aa0a1e6c0ef6aa284fb47513af940440c1
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2016, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
import re
import resource
from os import environ
from collections import namedtuple
from ipaddress import ip_address
from torba.server.util import class_logger
from torba.server.coins import Coin
import torba.server.util as lib_util
NetIdentity = namedtuple('NetIdentity', 'host tcp_port ssl_port nick_suffix')
class Env:
# Peer discovery
PD_OFF, PD_SELF, PD_ON = range(3)
class Error(Exception):
pass
def __init__(self, coin=None):
self.logger = class_logger(__name__, self.__class__.__name__)
self.allow_root = self.boolean('ALLOW_ROOT', False)
self.host = self.default('HOST', 'localhost')
self.rpc_host = self.default('RPC_HOST', 'localhost')
self.loop_policy = self.set_event_loop_policy()
self.obsolete(['UTXO_MB', 'HIST_MB', 'NETWORK'])
self.db_dir = self.required('DB_DIRECTORY')
self.db_engine = self.default('DB_ENGINE', 'leveldb')
self.daemon_url = self.required('DAEMON_URL')
if coin is not None:
assert issubclass(coin, Coin)
self.coin = coin
else:
coin_name = self.required('COIN').strip()
network = self.default('NET', 'mainnet').strip()
self.coin = Coin.lookup_coin_class(coin_name, network)
self.cache_MB = self.integer('CACHE_MB', 1200)
self.host = self.default('HOST', 'localhost')
self.reorg_limit = self.integer('REORG_LIMIT', self.coin.REORG_LIMIT)
# Server stuff
self.tcp_port = self.integer('TCP_PORT', None)
self.ssl_port = self.integer('SSL_PORT', None)
if self.ssl_port:
self.ssl_certfile = self.required('SSL_CERTFILE')
self.ssl_keyfile = self.required('SSL_KEYFILE')
self.rpc_port = self.integer('RPC_PORT', 8000)
self.max_subscriptions = self.integer('MAX_SUBSCRIPTIONS', 10000)
self.banner_file = self.default('BANNER_FILE', None)
self.tor_banner_file = self.default('TOR_BANNER_FILE', self.banner_file)
self.anon_logs = self.boolean('ANON_LOGS', False)
self.log_sessions = self.integer('LOG_SESSIONS', 3600)
# Peer discovery
self.peer_discovery = self.peer_discovery_enum()
self.peer_announce = self.boolean('PEER_ANNOUNCE', True)
self.force_proxy = self.boolean('FORCE_PROXY', False)
self.tor_proxy_host = self.default('TOR_PROXY_HOST', 'localhost')
self.tor_proxy_port = self.integer('TOR_PROXY_PORT', None)
# The electrum client takes the empty string as unspecified
self.donation_address = self.default('DONATION_ADDRESS', '')
# Server limits to help prevent DoS
self.max_send = self.integer('MAX_SEND', 1000000)
self.max_subs = self.integer('MAX_SUBS', 250000)
self.max_sessions = self.sane_max_sessions()
self.max_session_subs = self.integer('MAX_SESSION_SUBS', 50000)
self.bandwidth_limit = self.integer('BANDWIDTH_LIMIT', 2000000)
self.session_timeout = self.integer('SESSION_TIMEOUT', 600)
self.drop_client = self.custom("DROP_CLIENT", None, re.compile)
# Identities
clearnet_identity = self.clearnet_identity()
tor_identity = self.tor_identity(clearnet_identity)
self.identities = [identity
for identity in (clearnet_identity, tor_identity)
if identity is not None]
@classmethod
def default(cls, envvar, default):
return environ.get(envvar, default)
@classmethod
def boolean(cls, envvar, default):
default = 'Yes' if default else ''
return bool(cls.default(envvar, default).strip())
@classmethod
def required(cls, envvar):
value = environ.get(envvar)
if value is None:
raise cls.Error('required envvar {} not set'.format(envvar))
return value
@classmethod
def integer(cls, envvar, default):
value = environ.get(envvar)
if value is None:
return default
try:
return int(value)
except Exception:
raise cls.Error('cannot convert envvar {} value {} to an integer'
.format(envvar, value))
@classmethod
def custom(cls, envvar, default, parse):
value = environ.get(envvar)
if value is None:
return default
try:
return parse(value)
except Exception as e:
raise cls.Error('cannot parse envvar {} value {}'
.format(envvar, value)) from e
@classmethod
def obsolete(cls, envvars):
bad = [envvar for envvar in envvars if environ.get(envvar)]
if bad:
raise cls.Error('remove obsolete environment variables {}'
.format(bad))
def set_event_loop_policy(self):
policy_name = self.default('EVENT_LOOP_POLICY', None)
if not policy_name:
import asyncio
return asyncio.get_event_loop_policy()
elif policy_name == 'uvloop':
import uvloop
import asyncio
loop_policy = uvloop.EventLoopPolicy()
asyncio.set_event_loop_policy(loop_policy)
return loop_policy
raise self.Error('unknown event loop policy "{}"'.format(policy_name))
def cs_host(self, *, for_rpc):
"""Returns the 'host' argument to pass to asyncio's create_server
call. The result can be a single host name string, a list of
host name strings, or an empty string to bind to all interfaces.
If rpc is True the host to use for the RPC server is returned.
Otherwise the host to use for SSL/TCP servers is returned.
"""
host = self.rpc_host if for_rpc else self.host
result = [part.strip() for part in host.split(',')]
if len(result) == 1:
result = result[0]
# An empty result indicates all interfaces, which we do not
# permitted for an RPC server.
if for_rpc and not result:
result = 'localhost'
if result == 'localhost':
# 'localhost' resolves to ::1 (ipv6) on many systems, which fails on default setup of
# docker, using 127.0.0.1 instead forces ipv4
result = '127.0.0.1'
return result
def sane_max_sessions(self):
"""Return the maximum number of sessions to permit. Normally this
is MAX_SESSIONS. However, to prevent open file exhaustion, ajdust
downwards if running with a small open file rlimit."""
env_value = self.integer('MAX_SESSIONS', 1000)
nofile_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
# We give the DB 250 files; allow ElectrumX 100 for itself
value = max(0, min(env_value, nofile_limit - 350))
if value < env_value:
self.logger.warning('lowered maximum sessions from {:,d} to {:,d} '
'because your open file limit is {:,d}'
.format(env_value, value, nofile_limit))
return value
def clearnet_identity(self):
host = self.default('REPORT_HOST', None)
if host is None:
return None
try:
ip = ip_address(host)
except ValueError:
bad = (not lib_util.is_valid_hostname(host)
or host.lower() == 'localhost')
else:
bad = (ip.is_multicast or ip.is_unspecified
or (ip.is_private and self.peer_announce))
if bad:
raise self.Error('"{}" is not a valid REPORT_HOST'.format(host))
tcp_port = self.integer('REPORT_TCP_PORT', self.tcp_port) or None
ssl_port = self.integer('REPORT_SSL_PORT', self.ssl_port) or None
if tcp_port == ssl_port:
raise self.Error('REPORT_TCP_PORT and REPORT_SSL_PORT '
'both resolve to {}'.format(tcp_port))
return NetIdentity(
host,
tcp_port,
ssl_port,
''
)
def tor_identity(self, clearnet):
host = self.default('REPORT_HOST_TOR', None)
if host is None:
return None
if not host.endswith('.onion'):
raise self.Error('tor host "{}" must end with ".onion"'
.format(host))
def port(port_kind):
"""Returns the clearnet identity port, if any and not zero,
otherwise the listening port."""
result = 0
if clearnet:
result = getattr(clearnet, port_kind)
return result or getattr(self, port_kind)
tcp_port = self.integer('REPORT_TCP_PORT_TOR',
port('tcp_port')) or None
ssl_port = self.integer('REPORT_SSL_PORT_TOR',
port('ssl_port')) or None
if tcp_port == ssl_port:
raise self.Error('REPORT_TCP_PORT_TOR and REPORT_SSL_PORT_TOR '
'both resolve to {}'.format(tcp_port))
return NetIdentity(
host,
tcp_port,
ssl_port,
'_tor',
)
def hosts_dict(self):
return {identity.host: {'tcp_port': identity.tcp_port,
'ssl_port': identity.ssl_port}
for identity in self.identities}
def peer_discovery_enum(self):
pd = self.default('PEER_DISCOVERY', 'on').strip().lower()
if pd in ('off', ''):
return self.PD_OFF
elif pd == 'self':
return self.PD_SELF
else:
return self.PD_ON
| 39.215139
| 97
| 0.601951
|
953b9c3b3d6e8f19fec391442c2a6d1f5b8b6d52
| 901
|
bzl
|
Python
|
google/cloud/secretmanager/google_cloud_cpp_secretmanager_mocks.bzl
|
joezqren/google-cloud-cpp
|
325d312b0a21569f3c57515aec7d91f3540d3b48
|
[
"Apache-2.0"
] | 299
|
2019-01-31T12:17:56.000Z
|
2022-03-30T15:46:15.000Z
|
google/cloud/secretmanager/google_cloud_cpp_secretmanager_mocks.bzl
|
joezqren/google-cloud-cpp
|
325d312b0a21569f3c57515aec7d91f3540d3b48
|
[
"Apache-2.0"
] | 6,560
|
2019-01-29T03:15:15.000Z
|
2022-03-31T23:58:48.000Z
|
google/cloud/secretmanager/google_cloud_cpp_secretmanager_mocks.bzl
|
joezqren/google-cloud-cpp
|
325d312b0a21569f3c57515aec7d91f3540d3b48
|
[
"Apache-2.0"
] | 253
|
2019-02-07T01:18:13.000Z
|
2022-03-30T17:21:10.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DO NOT EDIT -- GENERATED BY CMake -- Change the CMakeLists.txt file if needed
"""Automatically generated source lists for google_cloud_cpp_secretmanager_mocks - DO NOT EDIT."""
google_cloud_cpp_secretmanager_mocks_hdrs = [
"mocks/mock_secret_manager_connection.h",
]
google_cloud_cpp_secretmanager_mocks_srcs = [
]
| 36.04
| 98
| 0.773585
|
7860a19c40ae1247b273aec344c6810f65fa4c45
| 305,605
|
py
|
Python
|
PW_from_gps_figures.py
|
mfkiwl/PW_from_GPS
|
fa0b0b9e1325a055ce884f79c14d24148348886b
|
[
"MIT"
] | 4
|
2019-09-09T19:47:49.000Z
|
2021-12-29T18:12:26.000Z
|
PW_from_gps_figures.py
|
mfkiwl/PW_from_GPS
|
fa0b0b9e1325a055ce884f79c14d24148348886b
|
[
"MIT"
] | null | null | null |
PW_from_gps_figures.py
|
mfkiwl/PW_from_GPS
|
fa0b0b9e1325a055ce884f79c14d24148348886b
|
[
"MIT"
] | 6
|
2019-12-06T02:27:05.000Z
|
2021-12-27T16:32:54.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 3 17:28:04 2020
@author: shlomi
"""
from PW_paths import work_yuval
from matplotlib import rcParams
import seaborn as sns
from pathlib import Path
import matplotlib.pyplot as plt
from PW_paths import savefig_path
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
from PW_stations import produce_geo_gnss_solved_stations
tela_results_path = work_yuval / 'GNSS_stations/tela/rinex/30hr/results'
tela_solutions = work_yuval / 'GNSS_stations/tela/gipsyx_solutions'
sound_path = work_yuval / 'sounding'
phys_soundings = sound_path / 'bet_dagan_phys_sounding_2007-2019.nc'
ims_path = work_yuval / 'IMS_T'
gis_path = work_yuval / 'gis'
dem_path = work_yuval / 'AW3D30'
era5_path = work_yuval / 'ERA5'
hydro_path = work_yuval / 'hydro'
ceil_path = work_yuval / 'ceilometers'
aero_path = work_yuval / 'AERONET'
climate_path = work_yuval / 'climate'
df_gnss = produce_geo_gnss_solved_stations(
plot=False, add_distance_to_coast=True)
st_order_climate = [x for x in df_gnss.dropna().sort_values(
['groups_climate', 'lat', 'lon'], ascending=[1, 0, 0]).index]
rc = {
'font.family': 'serif',
'xtick.labelsize': 'large',
'ytick.labelsize': 'large'}
for key, val in rc.items():
rcParams[key] = val
# sns.set(rc=rc, style='white')
seasonal_colors = {'DJF': 'tab:blue',
'SON': 'tab:red',
'JJA': 'tab:green',
'MAM': 'tab:orange',
'Annual': 'tab:purple'}
def get_twin(ax, axis):
assert axis in ("x", "y")
siblings = getattr(ax, f"get_shared_{axis}_axes")().get_siblings(ax)
for sibling in siblings:
if sibling.bbox.bounds == ax.bbox.bounds and sibling is not ax:
return sibling
return None
def sci_notation(num, decimal_digits=1, precision=None, exponent=None):
"""
Returns a string representation of the scientific
notation of the given number formatted for use with
LaTeX or Mathtext, with specified number of significant
decimal digits and precision (number of decimal digits
to show). The exponent to be used can also be specified
explicitly.
"""
from math import floor, log10
if exponent is None:
exponent = int(floor(log10(abs(num))))
coeff = round(num / float(10**exponent), decimal_digits)
if precision is None:
precision = decimal_digits
return r"${0:.{2}f}\cdot10^{{{1:d}}}$".format(coeff, exponent, precision)
def utm_from_lon(lon):
"""
utm_from_lon - UTM zone for a longitude
Not right for some polar regions (Norway, Svalbard, Antartica)
:param float lon: longitude
:return: UTM zone number
:rtype: int
"""
from math import floor
return floor((lon + 180) / 6) + 1
def scale_bar(ax, proj, length, location=(0.5, 0.05), linewidth=3,
units='km', m_per_unit=1000, bounds=None):
"""
http://stackoverflow.com/a/35705477/1072212
ax is the axes to draw the scalebar on.
proj is the projection the axes are in
location is center of the scalebar in axis coordinates ie. 0.5 is the middle of the plot
length is the length of the scalebar in km.
linewidth is the thickness of the scalebar.
units is the name of the unit
m_per_unit is the number of meters in a unit
"""
import cartopy.crs as ccrs
from matplotlib import patheffects
# find lat/lon center to find best UTM zone
try:
x0, x1, y0, y1 = ax.get_extent(proj.as_geodetic())
except AttributeError:
if bounds is not None:
x0, x1, y0, y1 = bounds
# Projection in metres
utm = ccrs.UTM(utm_from_lon((x0+x1)/2))
# Get the extent of the plotted area in coordinates in metres
x0, x1, y0, y1 = ax.get_extent(utm)
# Turn the specified scalebar location into coordinates in metres
sbcx, sbcy = x0 + (x1 - x0) * location[0], y0 + (y1 - y0) * location[1]
# Generate the x coordinate for the ends of the scalebar
bar_xs = [sbcx - length * m_per_unit/2, sbcx + length * m_per_unit/2]
# buffer for scalebar
buffer = [patheffects.withStroke(linewidth=5, foreground="w")]
# Plot the scalebar with buffer
ax.plot(bar_xs, [sbcy, sbcy], transform=utm, color='k',
linewidth=linewidth, path_effects=buffer)
# buffer for text
buffer = [patheffects.withStroke(linewidth=3, foreground="w")]
# Plot the scalebar label
t0 = ax.text(sbcx, sbcy, str(length) + ' ' + units, transform=utm,
horizontalalignment='center', verticalalignment='bottom',
path_effects=buffer, zorder=2)
left = x0+(x1-x0)*0.05
# Plot the N arrow
t1 = ax.text(left, sbcy, u'\u25B2\nN', transform=utm,
horizontalalignment='center', verticalalignment='bottom',
path_effects=buffer, zorder=2)
# Plot the scalebar without buffer, in case covered by text buffer
ax.plot(bar_xs, [sbcy, sbcy], transform=utm, color='k',
linewidth=linewidth, zorder=3)
return
@ticker.FuncFormatter
def lon_formatter(x, pos):
if x < 0:
return r'{0:.1f}$\degree$W'.format(abs(x))
elif x > 0:
return r'{0:.1f}$\degree$E'.format(abs(x))
elif x == 0:
return r'0$\degree$'
@ticker.FuncFormatter
def lat_formatter(x, pos):
if x < 0:
return r'{0:.1f}$\degree$S'.format(abs(x))
elif x > 0:
return r'{0:.1f}$\degree$N'.format(abs(x))
elif x == 0:
return r'0$\degree$'
def align_yaxis_np(ax1, ax2):
"""Align zeros of the two axes, zooming them out by same ratio"""
import numpy as np
axes = np.array([ax1, ax2])
extrema = np.array([ax.get_ylim() for ax in axes])
tops = extrema[:,1] / (extrema[:,1] - extrema[:,0])
# Ensure that plots (intervals) are ordered bottom to top:
if tops[0] > tops[1]:
axes, extrema, tops = [a[::-1] for a in (axes, extrema, tops)]
# How much would the plot overflow if we kept current zoom levels?
tot_span = tops[1] + 1 - tops[0]
extrema[0,1] = extrema[0,0] + tot_span * (extrema[0,1] - extrema[0,0])
extrema[1,0] = extrema[1,1] + tot_span * (extrema[1,0] - extrema[1,1])
[axes[i].set_ylim(*extrema[i]) for i in range(2)]
# def align_yaxis(ax1, v1, ax2, v2):
# """adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
# _, y1 = ax1.transData.transform((0, v1))
# _, y2 = ax2.transData.transform((0, v2))
# inv = ax2.transData.inverted()
# _, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
# miny, maxy = ax2.get_ylim()
# ax2.set_ylim(miny+dy, maxy+dy)
def get_legend_labels_handles_title_seaborn_histplot(ax):
old_legend = ax.legend_
handles = old_legend.legendHandles
labels = [t.get_text() for t in old_legend.get_texts()]
title = old_legend.get_title().get_text()
return handles, labels, title
def alignYaxes(axes, align_values=None):
'''Align the ticks of multiple y axes
Args:
axes (list): list of axes objects whose yaxis ticks are to be aligned.
Keyword Args:
align_values (None or list/tuple): if not None, should be a list/tuple
of floats with same length as <axes>. Values in <align_values>
define where the corresponding axes should be aligned up. E.g.
[0, 100, -22.5] means the 0 in axes[0], 100 in axes[1] and -22.5
in axes[2] would be aligned up. If None, align (approximately)
the lowest ticks in all axes.
Returns:
new_ticks (list): a list of new ticks for each axis in <axes>.
A new sets of ticks are computed for each axis in <axes> but with equal
length.
'''
from matplotlib.pyplot import MaxNLocator
import numpy as np
nax = len(axes)
ticks = [aii.get_yticks() for aii in axes]
if align_values is None:
aligns = [ticks[ii][0] for ii in range(nax)]
else:
if len(align_values) != nax:
raise Exception(
"Length of <axes> doesn't equal that of <align_values>.")
aligns = align_values
bounds = [aii.get_ylim() for aii in axes]
# align at some points
ticks_align = [ticks[ii]-aligns[ii] for ii in range(nax)]
# scale the range to 1-100
ranges = [tii[-1]-tii[0] for tii in ticks]
lgs = [-np.log10(rii)+2. for rii in ranges]
igs = [np.floor(ii) for ii in lgs]
log_ticks = [ticks_align[ii]*(10.**igs[ii]) for ii in range(nax)]
# put all axes ticks into a single array, then compute new ticks for all
comb_ticks = np.concatenate(log_ticks)
comb_ticks.sort()
locator = MaxNLocator(nbins='auto', steps=[1, 2, 2.5, 3, 4, 5, 8, 10])
new_ticks = locator.tick_values(comb_ticks[0], comb_ticks[-1])
new_ticks = [new_ticks/10.**igs[ii] for ii in range(nax)]
new_ticks = [new_ticks[ii]+aligns[ii] for ii in range(nax)]
# find the lower bound
idx_l = 0
for i in range(len(new_ticks[0])):
if any([new_ticks[jj][i] > bounds[jj][0] for jj in range(nax)]):
idx_l = i-1
break
# find the upper bound
idx_r = 0
for i in range(len(new_ticks[0])):
if all([new_ticks[jj][i] > bounds[jj][1] for jj in range(nax)]):
idx_r = i
break
# trim tick lists by bounds
new_ticks = [tii[idx_l:idx_r+1] for tii in new_ticks]
# set ticks for each axis
for axii, tii in zip(axes, new_ticks):
axii.set_yticks(tii)
return new_ticks
def align_yaxis(ax1, v1, ax2, v2):
"""adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
adjust_yaxis(ax2, (y1 - y2) / 2, v2)
adjust_yaxis(ax1, (y2 - y1) / 2, v1)
def adjust_yaxis(ax, ydif, v):
"""shift axis ax by ydiff, maintaining point v at the same location"""
inv = ax.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, ydif))
miny, maxy = ax.get_ylim()
miny, maxy = miny - v, maxy - v
if -miny > maxy or (-miny == maxy and dy > 0):
nminy = miny
nmaxy = miny * (maxy + dy) / (miny + dy)
else:
nmaxy = maxy
nminy = maxy * (miny + dy) / (maxy + dy)
ax.set_ylim(nminy + v, nmaxy + v)
def qualitative_cmap(n=2):
import matplotlib.colors as mcolors
if n == 2:
colorsList = [mcolors.BASE_COLORS['r'], mcolors.BASE_COLORS['g']]
cmap = mcolors.ListedColormap(colorsList)
elif n == 4:
colorsList = [
mcolors.BASE_COLORS['r'],
mcolors.BASE_COLORS['g'],
mcolors.BASE_COLORS['c'],
mcolors.BASE_COLORS['m']]
cmap = mcolors.ListedColormap(colorsList)
elif n == 5:
colorsList = [
mcolors.BASE_COLORS['r'],
mcolors.BASE_COLORS['g'],
mcolors.BASE_COLORS['c'],
mcolors.BASE_COLORS['m'],
mcolors.BASE_COLORS['b']]
cmap = mcolors.ListedColormap(colorsList)
return cmap
def caption(text, color='blue', **kwargs):
from termcolor import colored
print(colored('Caption:', color, attrs=['bold'], **kwargs))
print(colored(text, color, attrs=['bold'], **kwargs))
return
def adjust_lightness(color, amount=0.5):
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], max(0, min(1, amount * c[1])), c[2])
def produce_colors_for_pwv_station(scope='annual', zebra=False,
as_dict=False, as_cat_dict=False):
import pandas as pd
stns = group_sites_to_xarray(scope=scope)
cdict = {'coastal': 'tab:blue',
'highland': 'tab:green',
'eastern': 'tab:orange'}
if as_cat_dict:
return cdict
# for grp, color in cdict.copy().items():
# cdict[grp] = to_rgba(get_named_colors_mapping()[
# color], alpha=1)
ds = stns.to_dataset('group')
colors = []
for group in ds:
sts = ds[group].dropna('GNSS').values
for i, st in enumerate(sts):
color = cdict.get(group)
if zebra:
if i % 2 != 0:
# rgba = np.array(rgba)
# rgba[-1] = 0.5
color = adjust_lightness(color, 0.5)
colors.append(color)
# colors = [item for sublist in colors for item in sublist]
stns = stns.T.values.ravel()
stns = stns[~pd.isnull(stns)]
if as_dict:
colors = dict(zip(stns, colors))
return colors
def fix_time_axis_ticks(ax, limits=None, margin=15):
import pandas as pd
import matplotlib.dates as mdates
if limits is not None:
ax.set_xlim(*pd.to_datetime(limits))
years_fmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(mdates.YearLocator())
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(mdates.MonthLocator())
# locator = mdates.AutoDateLocator(minticks=3, maxticks=7)
# formatter = mdates.ConciseDateFormatter(locator)
# ax.xaxis.set_major_locator(locator)
# ax.xaxis.set_major_formatter(formatter)
return ax
def plot_qflux_climatotlogy_israel(path=era5_path, save=True, reduce='mean',
plot_type='uv'):
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
ds = xr.load_dataset(path / 'ERA5_UVQ_mm_israel_1979-2020.nc')
ds = ds.sel(expver=1).reset_coords(drop=True)
if plot_type == 'uv':
f1 = ds['q'] * ds['u']
f2 = ds['q'] * ds['v']
elif plot_type == 'md':
qu = ds['q'] * ds['u']
qv = ds['q'] * ds['v']
f1 = np.sqrt(qu**2 + qv**2)
f2 = np.rad2deg(np.arctan2(qv, qu))
if reduce == 'mean':
f1_clim = f1.groupby('time.month').mean().mean(
'longitude').mean('latitude')
f2_clim = f2.groupby('time.month').mean().mean(
'longitude').mean('latitude')
center = 0
cmap = 'bwr'
elif reduce == 'std':
f1_clim = f1.groupby('time.month').std().mean(
'longitude').mean('latitude')
f2_clim = f2.groupby('time.month').std().mean(
'longitude').mean('latitude')
center = None
cmap = 'viridis'
ds_clim = xr.concat([f1_clim, f2_clim], 'direction')
ds_clim['direction'] = ['zonal', 'meridional']
if plot_type == 'md':
fg, axes = plt.subplots(1, 2, figsize=(14, 7))
f1_clim.sel(
level=slice(
300,
1000)).T.plot.contourf(levels=41,
yincrease=False,
cmap=cmap,
center=center, ax=axes[0])
f2_clim.sel(
level=slice(
300,
1000)).T.plot.contourf(levels=41,
yincrease=False,
cmap=cmap,
center=center, ax=axes[1])
else:
fg = ds_clim.sel(
level=slice(
300,
1000)).T.plot.contourf(
levels=41,
yincrease=False,
cmap=cmap,
center=center,
col='direction',
figsize=(
15,
6))
fg.fig.suptitle('Moisture flux climatology over Israel')
# fig, axes = plt.subplots(1, 2, figsize=(15, 5))
# qu_clim.sel(level=slice(300,1000)).T.plot.contourf(levels=41, yincrease=False, ax=axes[0], cmap='bwr', center=0)
# qv_clim.sel(level=slice(300,1000)).T.plot.contourf(levels=41, yincrease=False, ax=axes[1], cmap='bwr', center=0)
fg.fig.subplots_adjust(top=0.923,
bottom=0.102,
left=0.058,
right=0.818,
hspace=0.2,
wspace=0.045)
if save:
filename = 'moisture_clim_from_ERA5_over_israel.png'
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='landscape')
return fg
def plot_mean_std_count(da_ts, time_reduce='hour', reduce='mean',
count_factor=1):
import xarray as xr
import seaborn as sns
"""plot mean, std and count of Xarray dataarray time-series"""
cmap = sns.color_palette("colorblind", 2)
time_dim = list(set(da_ts.dims))[0]
grp = '{}.{}'.format(time_dim, time_reduce)
if reduce == 'mean':
mean = da_ts.groupby(grp).mean()
elif reduce == 'median':
mean = da_ts.groupby(grp).median()
std = da_ts.groupby(grp).std()
mean_plus_std = mean + std
mean_minus_std = mean - std
count = da_ts.groupby(grp).count()
if isinstance(da_ts, xr.Dataset):
dvars = [x for x in da_ts.data_vars.keys()]
assert len(dvars) == 2
secondary_y = dvars[1]
else:
secondary_y = None
fig, axes = plt.subplots(2, 1, sharex=True, sharey=False, figsize=(15, 15))
mean_df = mean.to_dataframe()
if secondary_y is not None:
axes[0] = mean_df[dvars[0]].plot(
ax=axes[0], linewidth=2.0, marker='o', color=cmap[0])
ax2mean = mean_df[secondary_y].plot(
ax=axes[0],
linewidth=2.0,
marker='s',
color=cmap[1],
secondary_y=True)
h1, l1 = axes[0].get_legend_handles_labels()
h2, l2 = axes[0].right_ax.get_legend_handles_labels()
handles = h1 + h2
labels = l1 + l2
axes[0].legend(handles, labels)
axes[0].fill_between(mean_df.index.values,
mean_minus_std[dvars[0]].values,
mean_plus_std[dvars[0]].values,
color=cmap[0],
alpha=0.5)
ax2mean.fill_between(
mean_df.index.values,
mean_minus_std[secondary_y].values,
mean_plus_std[secondary_y].values,
color=cmap[1],
alpha=0.5)
ax2mean.tick_params(axis='y', colors=cmap[1])
else:
mean_df.plot(ax=axes[0], linewidth=2.0, marker='o', color=cmap[0])
axes[0].fill_between(
mean_df.index.values,
mean_minus_std.values,
mean_plus_std.values,
color=cmap[0],
alpha=0.5)
axes[0].grid()
count_df = count.to_dataframe() / count_factor
count_df.plot.bar(ax=axes[1], rot=0)
axes[0].xaxis.set_tick_params(labelbottom=True)
axes[0].tick_params(axis='y', colors=cmap[0])
fig.tight_layout()
if secondary_y is not None:
return axes, ax2mean
else:
return axes
def plot_seasonal_histogram(da, dim='sound_time', xlim=None, xlabel=None,
suptitle=''):
fig_hist, axs = plt.subplots(2, 2, sharex=False, sharey=True,
figsize=(10, 8))
seasons = ['DJF', 'MAM', 'JJA', 'SON']
cmap = sns.color_palette("colorblind", 4)
for i, ax in enumerate(axs.flatten()):
da_season = da.sel(
{dim: da['{}.season'.format(dim)] == seasons[i]}).dropna(dim)
ax = sns.distplot(da_season, ax=ax, norm_hist=False,
color=cmap[i], hist_kws={'edgecolor': 'k'},
axlabel=xlabel,
label=seasons[i])
ax.set_xlim(xlim)
ax.legend()
# axes.set_xlabel('MLH [m]')
ax.set_ylabel('Frequency')
fig_hist.suptitle(suptitle)
fig_hist.tight_layout()
return axs
def plot_two_histograms_comparison(x, y, bins=None, labels=['x', 'y'],
ax=None, colors=['b', 'r']):
import numpy as np
import matplotlib.pyplot as plt
x_w = np.empty(x.shape)
x_w.fill(1/x.shape[0])
y_w = np.empty(y.shape)
y_w.fill(1/y.shape[0])
if ax is None:
fig, ax = plt.subplots()
ax.hist([x, y], bins=bins, weights=[x_w, y_w], color=colors,
label=labels)
ax.legend()
return ax
def plot_diurnal_wind_hodograph(path=ims_path, station='TEL-AVIV-COAST',
season=None, cmax=None, ax=None):
import xarray as xr
from metpy.plots import Hodograph
# import matplotlib
import numpy as np
colorbar = False
# from_list = matplotlib.colors.LinearSegmentedColormap.from_list
cmap = plt.cm.get_cmap('hsv', 24)
# cmap = from_list(None, plt.cm.jet(range(0,24)), 24)
U = xr.open_dataset(path / 'IMS_U_israeli_10mins.nc')
V = xr.open_dataset(path / 'IMS_V_israeli_10mins.nc')
u_sta = U[station]
v_sta = V[station]
u_sta.load()
v_sta.load()
if season is not None:
print('{} season selected'.format(season))
u_sta = u_sta.sel(time=u_sta['time.season'] == season)
v_sta = v_sta.sel(time=v_sta['time.season'] == season)
u = u_sta.groupby('time.hour').mean()
v = v_sta.groupby('time.hour').mean()
if ax is None:
colorbar = True
fig, ax = plt.subplots()
max_uv = max(max(u.values), max(v.values)) + 1
if cmax is None:
max_uv = max(max(u.values), max(v.values)) + 1
else:
max_uv = cmax
h = Hodograph(component_range=max_uv, ax=ax)
h.add_grid(increment=0.5)
# hours = np.arange(0, 25)
lc = h.plot_colormapped(u, v, u.hour, cmap=cmap,
linestyle='-', linewidth=2)
#ticks = np.arange(np.min(hours), np.max(hours))
# cb = fig.colorbar(lc, ticks=range(0,24), label='Time of Day [UTC]')
if colorbar:
cb = ax.figure.colorbar(lc, ticks=range(
0, 24), label='Time of Day [UTC]')
# cb.ax.tick_params(length=0)
if season is None:
ax.figure.suptitle('{} diurnal wind Hodograph'.format(station))
else:
ax.figure.suptitle(
'{} diurnal wind Hodograph {}'.format(station, season))
ax.set_xlabel('North')
ax.set_ylabel('East')
ax.set_title('South')
ax2 = ax.twinx()
ax2.tick_params(axis='y', right=False, labelright=False)
ax2.set_ylabel('West')
# axcb = fig.colorbar(lc)
return ax
def plot_MLR_GNSS_PW_harmonics_facetgrid(path=work_yuval, season='JJA',
n_max=2, ylim=None, scope='diurnal',
save=True, era5=False, leg_size=15):
"""
Parameters
----------
path : TYPE, optional
DESCRIPTION. The default is work_yuval.
season : TYPE, optional
DESCRIPTION. The default is 'JJA'.
n_max : TYPE, optional
DESCRIPTION. The default is 2.
ylim : TYPE, optional
the ylimits of each panel use [-6,8] for annual. The default is None.
scope : TYPE, optional
DESCRIPTION. The default is 'diurnal'.
save : TYPE, optional
DESCRIPTION. The default is True.
era5 : TYPE, optional
DESCRIPTION. The default is False.
leg_size : TYPE, optional
DESCRIPTION. The default is 15.
Returns
-------
None.
"""
import xarray as xr
from aux_gps import run_MLR_harmonics
from matplotlib.ticker import AutoMinorLocator
from PW_stations import produce_geo_gnss_solved_stations
import numpy as np
sns.set_style('whitegrid')
sns.set_style('ticks')
geo = produce_geo_gnss_solved_stations(add_distance_to_coast=True, plot=False)
if scope == 'diurnal':
cunits = 'cpd'
ticks = np.arange(0, 23, 3)
xlabel = 'Hour of day [UTC]'
elif scope == 'annual':
cunits = 'cpy'
ticks = np.arange(1, 13, 1)
xlabel = 'month'
print('producing {} harmonics plot.'.format(scope))
if era5:
harmonics = xr.load_dataset(path / 'GNSS_PW_era5_harmonics_{}.nc'.format(scope))
else:
harmonics = xr.load_dataset(path / 'GNSS_PW_harmonics_{}.nc'.format(scope))
# sites = sorted(list(set([x.split('_')[0] for x in harmonics])))
# da = xr.DataArray([x for x in range(len(sites))], dims='GNSS')
# da['GNSS'] = sites
sites = group_sites_to_xarray(upper=False, scope=scope)
sites_flat = [x for x in sites.values.flatten()]
da = xr.DataArray([x for x in range(len(sites_flat))], dims='GNSS')
da['GNSS'] = [x for x in range(len(da))]
fg = xr.plot.FacetGrid(
da,
col='GNSS',
col_wrap=3,
sharex=False,
sharey=False, figsize=(20, 20))
for i in range(fg.axes.shape[0]): # i is rows
for j in range(fg.axes.shape[1]): # j is cols
site = sites.values[i, j]
ax = fg.axes[i, j]
try:
harm_site = harmonics[[x for x in harmonics if site in x]]
if site in ['nrif']:
leg_loc = 'upper center'
elif site in ['yrcm', 'ramo']:
leg_loc = 'lower center'
# elif site in ['katz']:
# leg_loc = 'upper right'
else:
leg_loc = None
if scope == 'annual':
leg_loc = 'upper left'
ax, handles, labels = run_MLR_harmonics(harm_site, season=season,
cunits=cunits,
n_max=n_max, plot=True, ax=ax,
legend_loc=leg_loc, ncol=1,
legsize=leg_size, lw=2.5,
legend_S_only=True)
ax.set_xlabel(xlabel, fontsize=16)
if ylim is not None:
ax.set_ylim(*ylim)
ax.tick_params(axis='x', which='major', labelsize=18)
# if scope == 'diurnal':
ax.yaxis.set_major_locator(plt.MaxNLocator(4))
ax.yaxis.set_minor_locator(AutoMinorLocator(2))
ax.tick_params(axis='y', which='major', labelsize=18)
ax.yaxis.tick_left()
ax.xaxis.set_ticks(ticks)
ax.grid()
ax.set_title('')
ax.set_ylabel('')
ax.grid(axis='y', which='minor', linestyle='--')
# get this for upper legend:
# handles, labels = ax.get_legend_handles_labels()
if scope == 'annual':
site_label = '{} ({:.0f})'.format(
site.upper(), geo.loc[site].alt)
label_coord = [0.52, 0.87]
fs = 18
elif scope == 'diurnal':
site_label = site.upper()
label_coord = [0.1, 0.85]
fs = 20
ax.text(*label_coord, site_label,
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes, fontsize=fs)
if j == 0:
ax.set_ylabel('PWV anomalies [mm]', fontsize=16)
# if j == 0:
# ax.set_ylabel('PW anomalies [mm]', fontsize=12)
# elif j == 1:
# if i>5:
# ax.set_ylabel('PW anomalies [mm]', fontsize=12)
except TypeError:
print('{}, {} axis off'.format(i, j))
ax.set_axis_off()
# for i, (site, ax) in enumerate(zip(da['GNSS'].values, fg.axes.flatten())):
# harm_site = harmonics[[x for x in harmonics if sites[i] in x]]
# if site in ['elat', 'nrif']:
# loc = 'upper center'
# text = 0.1
# elif site in ['elro', 'yrcm', 'ramo', 'slom', 'jslm']:
# loc = 'upper right'
# text = 0.1
# else:
# loc = None
# text = 0.1
# ax = run_MLR_diurnal_harmonics(harm_site, season=season, n_max=n_max, plot=True, ax=ax, legend_loc=loc)
# ax.set_title('')
# ax.set_ylabel('PW anomalies [mm]')
# if ylim is not None:
# ax.set_ylim(ylim[0], ylim[1])
# ax.text(text, .85, site.upper(),
# horizontalalignment='center', fontweight='bold',
# transform=ax.transAxes)
# for i, ax in enumerate(fg.axes.flatten()):
# if i > (da.GNSS.telasize-1):
# ax.set_axis_off()
# pass
# add upper legend for all factes:
S_labels = labels[:-2]
S_labels = [x.split(' ')[0] for x in S_labels]
last_label = 'Mean PWV anomalies'
sum_label = labels[-2].split("'")[1]
S_labels.append(sum_label)
S_labels.append(last_label)
fg.fig.legend(handles=handles, labels=S_labels, prop={'size': 20}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=20, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.subplots_adjust(
top=0.973,
bottom=0.032,
left=0.054,
right=0.995,
hspace=0.15,
wspace=0.12)
if save:
if era5:
filename = 'pw_era5_{}_harmonics_{}_{}.png'.format(scope, n_max, season)
else:
filename = 'pw_{}_harmonics_{}_{}.png'.format(scope, n_max, season)
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='portrait')
return fg
def plot_gustiness(path=work_yuval, ims_path=ims_path, site='tela',
ims_site='HAIFA-TECHNION', season='JJA', month=None, pts=7,
ax=None):
import xarray as xr
import numpy as np
g = xr.open_dataset(
ims_path / 'IMS_G{}_israeli_10mins_daily_anoms.nc'.format(pts))[ims_site]
g.load()
if season is not None:
g = g.sel(time=g['time.season'] == season)
label = 'Gustiness {} IMS station in {} season'.format(
site, season)
elif month is not None:
g = g.sel(time=g['time.month'] == month)
label = 'Gustiness {} IMS station in {} month'.format(
site, month)
elif season is not None and month is not None:
raise('pls pick either season or month...')
# date = groupby_date_xr(g)
# # g_anoms = g.groupby('time.month') - g.groupby('time.month').mean('time')
# g_anoms = g.groupby(date) - g.groupby(date).mean('time')
# g_anoms = g_anoms.reset_coords(drop=True)
G = g.groupby('time.hour').mean('time') * 100.0
if ax is None:
fig, ax = plt.subplots(figsize=(16, 8))
Gline = G.plot(ax=ax, color='b', marker='o', label='Gustiness')
ax.set_title(label)
ax.axhline(0, color='b', linestyle='--')
ax.set_ylabel('Gustiness anomalies [dimensionless]', color='b')
ax.set_xlabel('Time of day [UTC]')
# ax.set_xticks(np.arange(0, 24, step=1))
ax.yaxis.label.set_color('b')
ax.tick_params(axis='y', colors='b')
ax.xaxis.set_ticks(np.arange(0, 23, 3))
ax.grid()
pw = xr.open_dataset(
work_yuval /
'GNSS_PW_hourly_anoms_thresh_50_homogenized.nc')[site]
pw.load().dropna('time')
if season is not None:
pw = pw.sel(time=pw['time.season'] == season)
elif month is not None:
pw = pw.sel(time=pw['time.month'] == month)
# date = groupby_date_xr(pw)
# pw = pw.groupby(date) - pw.groupby(date).mean('time')
# pw = pw.reset_coords(drop=True)
pw = pw.groupby('time.hour').mean()
axpw = ax.twinx()
PWline = pw.plot.line(ax=axpw, color='tab:green',
marker='s', label='PW ({})'.format(season))
axpw.axhline(0, color='k', linestyle='--')
lns = Gline + PWline
axpw.set_ylabel('PW anomalies [mm]')
align_yaxis(ax, 0, axpw, 0)
return lns
def plot_gustiness_facetgrid(path=work_yuval, ims_path=ims_path,
season='JJA', month=None, save=True):
import xarray as xr
gnss_ims_dict = {
'alon': 'ASHQELON-PORT', 'bshm': 'HAIFA-TECHNION', 'csar': 'HADERA-PORT',
'tela': 'TEL-AVIV-COAST', 'slom': 'BESOR-FARM', 'kabr': 'SHAVE-ZIYYON',
'nzrt': 'DEIR-HANNA', 'katz': 'GAMLA', 'elro': 'MEROM-GOLAN-PICMAN',
'mrav': 'MAALE-GILBOA', 'yosh': 'ARIEL', 'jslm': 'JERUSALEM-GIVAT-RAM',
'drag': 'METZOKE-DRAGOT', 'dsea': 'SEDOM', 'ramo': 'MIZPE-RAMON-20120927',
'nrif': 'NEOT-SMADAR', 'elat': 'ELAT', 'klhv': 'SHANI',
'yrcm': 'ZOMET-HANEGEV', 'spir': 'PARAN-20060124'}
da = xr.DataArray([x for x in gnss_ims_dict.values()], dims=['GNSS'])
da['GNSS'] = [x for x in gnss_ims_dict.keys()]
to_remove = ['kabr', 'nzrt', 'katz', 'elro', 'klhv', 'yrcm', 'slom']
sites = [x for x in da['GNSS'].values if x not in to_remove]
da = da.sel(GNSS=sites)
gnss_order = ['bshm', 'mrav', 'drag', 'csar', 'yosh', 'dsea', 'tela', 'jslm',
'nrif', 'alon', 'ramo', 'elat']
df = da.to_dataframe('gnss')
da = df.reindex(gnss_order).to_xarray()['gnss']
fg = xr.plot.FacetGrid(
da,
col='GNSS',
col_wrap=3,
sharex=False,
sharey=False, figsize=(20, 20))
for i, (site, ax) in enumerate(zip(da['GNSS'].values, fg.axes.flatten())):
lns = plot_gustiness(path=path, ims_path=ims_path,
ims_site=gnss_ims_dict[site],
site=site, season=season, month=month, ax=ax)
labs = [l.get_label() for l in lns]
if site in ['tela', 'alon', 'dsea', 'csar', 'elat', 'nrif']:
ax.legend(lns, labs, loc='upper center', prop={
'size': 8}, framealpha=0.5, fancybox=True, title=site.upper())
elif site in ['drag']:
ax.legend(lns, labs, loc='upper right', prop={
'size': 8}, framealpha=0.5, fancybox=True, title=site.upper())
else:
ax.legend(lns, labs, loc='best', prop={
'size': 8}, framealpha=0.5, fancybox=True, title=site.upper())
ax.set_title('')
ax.set_ylabel(r'G anomalies $\times$$10^{2}$')
# ax.text(.8, .85, site.upper(),
# horizontalalignment='center', fontweight='bold',
# transform=ax.transAxes)
for i, ax in enumerate(fg.axes.flatten()):
if i > (da.GNSS.size-1):
ax.set_axis_off()
pass
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.974,
bottom=0.053,
left=0.041,
right=0.955,
hspace=0.15,
wspace=0.3)
filename = 'gustiness_israeli_gnss_pw_diurnal_{}.png'.format(season)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_fft_diurnal(path=work_yuval, save=True):
import xarray as xr
import numpy as np
import matplotlib.ticker as tck
sns.set_style("whitegrid",
{'axes.grid': True,
'xtick.bottom': True,
'font.family': 'serif',
'ytick.left': True})
sns.set_context('paper')
power = xr.load_dataset(path / 'GNSS_PW_power_spectrum_diurnal.nc')
power = power.to_array('site')
sites = [x for x in power.site.values]
fg = power.plot.line(col='site', col_wrap=4,
sharex=False, figsize=(20, 18))
fg.set_xlabels('Frequency [cpd]')
fg.set_ylabels('PW PSD [dB]')
ticklabels = np.arange(0, 7)
for ax, site in zip(fg.axes.flatten(), sites):
sns.despine()
ax.set_title('')
ax.set_xticklabels(ticklabels)
# ax.tick_params(axis='y', which='minor')
ax.yaxis.set_minor_locator(tck.AutoMinorLocator())
ax.set_xlim(0, 6.5)
ax.set_ylim(70, 125)
ax.grid(True)
ax.grid(which='minor', axis='y')
ax.text(.8, .85, site.upper(),
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes)
fg.fig.tight_layout()
filename = 'power_pw_diurnal.png'
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_rinex_availability_with_map(path=work_yuval, gis_path=gis_path,
scope='diurnal', ims=True,
dem_path=dem_path, fontsize=18, save=True):
# TODO: add box around merged stations and removed stations
# TODO: add color map labels to stations removed and merged
from aux_gps import gantt_chart
import xarray as xr
import pandas as pd
import geopandas as gpd
from PW_stations import produce_geo_gnss_solved_stations
from aux_gps import geo_annotate
from ims_procedures import produce_geo_ims
from matplotlib.colors import ListedColormap
from aux_gps import path_glob
sns.set_style('whitegrid')
sns.set_style('ticks')
print('{} scope selected.'.format(scope))
fig = plt.figure(figsize=(20, 15))
# grid = plt.GridSpec(1, 2, width_ratios=[
# 5, 2], wspace=0.1)
grid = plt.GridSpec(1, 2, width_ratios=[
5, 3], wspace=0.05)
ax_gantt = fig.add_subplot(grid[0, 0]) # plt.subplot(221)
ax_map = fig.add_subplot(grid[0, 1]) # plt.subplot(122)
# fig, ax = plt.subplots(1, 2, sharex=False, sharey=False, figsize=(20, 6))
# RINEX gantt chart:
if scope == 'diurnal':
file = path_glob(path, 'GNSS_PW_thresh_50_for_diurnal_analysis.nc')[-1]
elif scope == 'annual':
file = path / 'GNSS_PW_monthly_thresh_50.nc'
ds = xr.open_dataset(file)
just_pw = [x for x in ds if 'error' not in x]
ds = ds[just_pw]
da = ds.to_array('station').sel(time=slice(None,'2019'))
da['station'] = [x.upper() for x in da.station.values]
ds = da.to_dataset('station')
# reorder for annual, coastal, highland and eastern:
stns = group_sites_to_xarray(scope='annual', upper=True).T.values.ravel()
stns = stns[~pd.isnull(stns)]
ds = ds[stns]
# colors:
colors = produce_colors_for_pwv_station(scope=scope, zebra=False)
title = 'Daily RINEX files availability for the Israeli GNSS stations'
ax_gantt = gantt_chart(
ds,
ax=ax_gantt,
fw='bold', grid=True,
title='', colors=colors,
pe_dict=None, fontsize=fontsize, linewidth=24, antialiased=False)
years_fmt = mdates.DateFormatter('%Y')
# ax_gantt.xaxis.set_major_locator(mdates.YearLocator())
ax_gantt.xaxis.set_major_locator(mdates.YearLocator(4))
ax_gantt.xaxis.set_minor_locator(mdates.YearLocator(1))
ax_gantt.xaxis.set_major_formatter(years_fmt)
# ax_gantt.xaxis.set_minor_formatter(years_fmt)
ax_gantt.tick_params(axis='x', labelrotation=0)
# Israel gps ims map:
ax_map = plot_israel_map(
gis_path=gis_path, ax=ax_map, ticklabelsize=fontsize)
# overlay with dem data:
cmap = plt.get_cmap('terrain', 41)
dem = xr.open_dataarray(dem_path / 'israel_dem_250_500.nc')
# dem = xr.open_dataarray(dem_path / 'israel_dem_500_1000.nc')
fg = dem.plot.imshow(ax=ax_map, alpha=0.5, cmap=cmap,
vmin=dem.min(), vmax=dem.max(), add_colorbar=False)
# scale_bar(ax_map, 50)
cbar_kwargs = {'fraction': 0.1, 'aspect': 50, 'pad': 0.03}
cb = plt.colorbar(fg, **cbar_kwargs)
cb.set_label(label='meters above sea level',
size=fontsize, weight='normal')
cb.ax.tick_params(labelsize=fontsize)
ax_map.set_xlabel('')
ax_map.set_ylabel('')
gps = produce_geo_gnss_solved_stations(path=gis_path, plot=False)
# removed = ['hrmn', 'nizn', 'spir']
# removed = ['hrmn']
if scope == 'diurnal':
removed = ['hrmn', 'gilb', 'lhav']
elif scope == 'annual':
removed = ['hrmn', 'gilb', 'lhav']
print('removing {} stations from map.'.format(removed))
# merged = ['klhv', 'lhav', 'mrav', 'gilb']
merged = []
gps_list = [x for x in gps.index if x not in merged and x not in removed]
gps.loc[gps_list, :].plot(ax=ax_map, edgecolor='black', marker='s',
alpha=1.0, markersize=35, facecolor="None", linewidth=2, zorder=3)
# gps.loc[removed, :].plot(ax=ax_map, color='black', edgecolor='black', marker='s',
# alpha=1.0, markersize=25, facecolor='white')
# gps.loc[merged, :].plot(ax=ax_map, color='black', edgecolor='r', marker='s',
# alpha=0.7, markersize=25)
gps_stations = gps_list # [x for x in gps.index]
# to_plot_offset = ['mrav', 'klhv', 'nzrt', 'katz', 'elro']
to_plot_offset = []
for x, y, label in zip(gps.loc[gps_stations, :].lon, gps.loc[gps_stations,
:].lat, gps.loc[gps_stations, :].index.str.upper()):
if label.lower() in to_plot_offset:
ax_map.annotate(label, xy=(x, y), xytext=(4, -6),
textcoords="offset points", color='k',
fontweight='bold', fontsize=fontsize - 2)
else:
ax_map.annotate(label, xy=(x, y), xytext=(3, 3),
textcoords="offset points", color='k',
fontweight='bold', fontsize=fontsize - 2)
# geo_annotate(ax_map, gps_normal_anno.lon, gps_normal_anno.lat,
# gps_normal_anno.index.str.upper(), xytext=(3, 3), fmt=None,
# c='k', fw='normal', fs=10, colorupdown=False)
# geo_annotate(ax_map, gps_offset_anno.lon, gps_offset_anno.lat,
# gps_offset_anno.index.str.upper(), xytext=(4, -6), fmt=None,
# c='k', fw='normal', fs=10, colorupdown=False)
# plot bet-dagan:
df = pd.Series([32.00, 34.81]).to_frame().T
df.index = ['Bet-Dagan']
df.columns = ['lat', 'lon']
bet_dagan = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon,
df.lat),
crs=gps.crs)
bet_dagan.plot(ax=ax_map, color='black', edgecolor='black',
marker='x', linewidth=2, zorder=2)
geo_annotate(ax_map, bet_dagan.lon, bet_dagan.lat,
bet_dagan.index, xytext=(4, -6), fmt=None,
c='k', fw='bold', fs=fontsize - 2, colorupdown=False)
# plt.legend(['GNSS \nreceiver sites',
# 'removed \nGNSS sites',
# 'merged \nGNSS sites',
# 'radiosonde\nstation'],
# loc='upper left', framealpha=0.7, fancybox=True,
# handletextpad=0.2, handlelength=1.5)
if ims:
print('getting IMS temperature stations metadata...')
ims = produce_geo_ims(path=gis_path, freq='10mins', plot=False)
ims.plot(ax=ax_map, marker='o', edgecolor='tab:orange', alpha=1.0,
markersize=35, facecolor="tab:orange", zorder=1)
# ims, gps = produce_geo_df(gis_path=gis_path, plot=False)
print('getting solved GNSS israeli stations metadata...')
plt.legend(['GNSS \nstations',
'radiosonde\nstation', 'IMS stations'],
loc='upper left', framealpha=0.7, fancybox=True,
handletextpad=0.2, handlelength=1.5, fontsize=fontsize - 2)
else:
plt.legend(['GNSS \nstations',
'radiosonde\nstation'],
loc='upper left', framealpha=0.7, fancybox=True,
handletextpad=0.2, handlelength=1.5, fontsize=fontsize - 2)
fig.subplots_adjust(top=0.95,
bottom=0.11,
left=0.05,
right=0.95,
hspace=0.2,
wspace=0.2)
# plt.legend(['IMS stations', 'GNSS stations'], loc='upper left')
filename = 'rinex_israeli_gnss_map_{}.png'.format(scope)
# caption('Daily RINEX files availability for the Israeli GNSS station network at the SOPAC/GARNER website')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def plot_means_box_plots(path=work_yuval, thresh=50, kind='box',
x='month', col_wrap=5, ylimits=None, twin=None,
twin_attrs=None,
xlimits=None, anoms=True, bins=None,
season=None, attrs_plot=True, save=True, ds_input=None):
import xarray as xr
pw = xr.open_dataset(
work_yuval /
'GNSS_PW_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
attrs = [x.attrs for x in pw.data_vars.values()]
if x == 'month':
pw = xr.load_dataset(
work_yuval /
'GNSS_PW_monthly_thresh_{:.0f}_homogenized.nc'.format(thresh))
# pw = pw.resample(time='MS').mean('time')
elif x == 'hour':
# pw = pw.resample(time='1H').mean('time')
# pw = pw.groupby('time.hour').mean('time')
pw = xr.load_dataset(
work_yuval / 'GNSS_PW_hourly_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
# first remove long term monthly means:
if anoms:
pw = xr.load_dataset(
work_yuval / 'GNSS_PW_hourly_anoms_thresh_{:.0f}_homogenized.nc'.format(thresh))
if twin is not None:
twin = twin.groupby('time.month') - \
twin.groupby('time.month').mean('time')
twin = twin.reset_coords(drop=True)
# pw = pw.groupby('time.month') - pw.groupby('time.month').mean('time')
elif x == 'day':
# pw = pw.resample(time='1H').mean('time')
# pw = pw.groupby('time.hour').mean('time')
pw = xr.load_dataset(
work_yuval / 'GNSS_PW_daily_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
# first remove long term monthly means:
if anoms:
# pw = pw.groupby('time.month') - pw.groupby('time.month').mean('time')
pw = pw.groupby('time.dayofyear') - \
pw.groupby('time.dayodyear').mean('time')
if season is not None:
if season != 'all':
print('{} season is selected'.format(season))
pw = pw.sel(time=pw['time.season'] == season)
all_seas = False
if twin is not None:
twin = twin.sel(time=twin['time.season'] == season)
else:
print('all seasons selected')
all_seas = True
else:
all_seas = False
for i, da in enumerate(pw.data_vars):
pw[da].attrs = attrs[i]
if not attrs_plot:
attrs = None
if ds_input is not None:
# be carful!:
pw = ds_input
fg = plot_multi_box_xr(pw, kind=kind, x=x, col_wrap=col_wrap,
ylimits=ylimits, xlimits=xlimits, attrs=attrs,
bins=bins, all_seasons=all_seas, twin=twin,
twin_attrs=twin_attrs)
attrs = [x.attrs for x in pw.data_vars.values()]
for i, ax in enumerate(fg.axes.flatten()):
try:
mean_years = float(attrs[i]['mean_years'])
# print(i)
# print(mean_years)
except IndexError:
ax.set_axis_off()
pass
if kind != 'hist':
[fg.axes[x, 0].set_ylabel('PW [mm]')
for x in range(len(fg.axes[:, 0]))]
# [fg.axes[-1, x].set_xlabel('month') for x in range(len(fg.axes[-1, :]))]
fg.fig.subplots_adjust(top=0.98,
bottom=0.05,
left=0.025,
right=0.985,
hspace=0.27,
wspace=0.215)
if season is not None:
filename = 'pw_{}ly_means_{}_seas_{}.png'.format(x, kind, season)
else:
filename = 'pw_{}ly_means_{}.png'.format(x, kind)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_interannual_MLR_results(path=climate_path, fontsize=16, save=True):
import matplotlib.pyplot as plt
from climate_works import run_best_MLR
# rds = xr.load_dataset(path / 'best_MLR_interannual_gnss_pwv.nc')
model_lci, rdf_lci = run_best_MLR(plot=False, heatmap=False, keep='lci',
add_trend=True)
rds_lci = model_lci.results_
model_eofi, rdf_eofi = run_best_MLR(plot=False, heatmap=False, keep='eofi',
add_trend=False)
rds_eofi = model_eofi.results_
fig, axes = plt.subplots(2, 1, sharex=True, sharey=False, figsize=(15, 7))
origln = rds_lci['original'].plot.line('k-.', ax=axes[0], linewidth=1.5)
predln_lci = rds_lci['predict'].plot.line('b-', ax=axes[0], linewidth=1.5)
predln_eofi = rds_eofi['predict'].plot.line(
'g-', ax=axes[0], linewidth=1.5)
r2_lci = rds_lci['r2_adj'].item()
r2_eofi = rds_eofi['r2_adj'].item()
axes[0].legend(origln+predln_lci+predln_eofi, ['mean PWV (12m-mean)', 'MLR with LCI (Adj R$^2$:{:.2f})'.format(
r2_lci), 'MLR with EOFs (Adj R$^2$:{:.2f})'.format(r2_eofi)], fontsize=fontsize-2)
axes[0].grid()
axes[0].set_xlabel('')
axes[0].set_ylabel('PWV anomalies [mm]', fontsize=fontsize)
axes[0].tick_params(labelsize=fontsize)
axes[0].grid(which='minor', color='k', linestyle='--')
residln_lci = rds_lci['resid'].plot.line('b-', ax=axes[1])
residln_eofi = rds_eofi['resid'].plot.line('g-', ax=axes[1])
axes[1].legend(residln_lci+residln_eofi, ['MLR with LCI',
'MLR with EOFs'], fontsize=fontsize-2)
axes[1].grid()
axes[1].set_ylabel('Residuals [mm]', fontsize=fontsize)
axes[1].tick_params(labelsize=fontsize)
axes[1].set_xlabel('')
years_fmt = mdates.DateFormatter('%Y')
# ax.figure.autofmt_xdate()
axes[1].xaxis.set_major_locator(mdates.YearLocator(2))
axes[1].xaxis.set_minor_locator(mdates.YearLocator(1))
axes[1].xaxis.set_major_formatter(years_fmt)
axes[1].grid(which='minor', color='k', linestyle='--')
# ax.xaxis.set_minor_locator(mdates.MonthLocator())
axes[1].figure.autofmt_xdate()
fig.tight_layout()
fig.subplots_adjust()
if save:
filename = 'pw_interannual_MLR_comparison.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def plot_annual_pw(path=work_yuval, fontsize=20, labelsize=18, compare='uerra',
ylim=[7.5, 40], save=True, kind='violin', bins=None, ds=None,
add_temperature=False):
"""kind can be violin or hist, for violin choose ylim=7.5,40 and for hist
choose ylim=0,0.3"""
import xarray as xr
import pandas as pd
import numpy as np
from synoptic_procedures import slice_xr_with_synoptic_class
gnss_filename = 'GNSS_PW_monthly_thresh_50.nc'
# gnss_filename = 'first_climatol_try.nc'
pw = xr.load_dataset(path / gnss_filename)
df_annual = pw.to_dataframe()
hue = None
if compare is not None:
df_annual = prepare_reanalysis_monthly_pwv_to_dataframe(
path, re=compare, ds=ds)
hue = 'source'
if not add_temperature:
fg = plot_pw_geographical_segments(
df_annual, scope='annual',
kind=kind,
fg=None,
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, hue=hue,
save=False, bins=bins)
fg.fig.subplots_adjust(
top=0.973,
bottom=0.029,
left=0.054,
right=0.995,
hspace=0.15,
wspace=0.12)
filename = 'pw_annual_means_{}.png'.format(kind)
else:
fg = plot_pw_geographical_segments(
df_annual, scope='annual',
kind='mean_month',
fg=None, ticklabelcolor='tab:blue',
ylim=[10, 31], color='tab:blue',
fontsize=fontsize,
labelsize=labelsize, hue=None,
save=False, bins=None)
# tmm = xr.load_dataset(path / 'GNSS_TD_monthly_1996_2020.nc')
tmm = xr.load_dataset(path / 'IMS_T/GNSS_TD_daily.nc')
tmm = tmm.groupby('time.month').mean()
dftm = tmm.to_dataframe()
# dftm.columns = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
sites = group_sites_to_xarray(scope='annual')
sites_flat = sites.values.ravel()
# sites = sites[~pd.isnull(sites)]
for i, ax in enumerate(fg.axes.flat):
if pd.isnull(sites_flat[i]):
continue
twinax = ax.twinx()
twinax.plot(dftm.index.values, dftm[sites_flat[i]].values, color='tab:red',
markersize=10, marker='s', lw=1, markerfacecolor="None",
label='Temperature')
# dftm[sites[i]].plot(ax=twinax, color='r', markersize=10,
# marker='s', lw=1, markerfacecolor="None")
twinax.set_ylim(5, 37)
twinax.set_yticks(np.arange(5, 40, 10))
twinax.tick_params(axis='y', which='major', labelcolor='tab:red',
labelsize=labelsize)
if sites_flat[i] in sites.sel(group='eastern'):
twinax.set_ylabel(r'Temperature [$\degree$ C]',
fontsize=labelsize)
# fg.fig.canvas.draw()
# twinax.xaxis.set_ticks(np.arange(1, 13))
# twinax.tick_params(axis='x', which='major', labelsize=labelsize-2)
lines, labels = ax.get_legend_handles_labels()
lines2, labels2 = twinax.get_legend_handles_labels()
labels = ['PWV', 'Surface Temperature']
fg.fig.legend(handles=lines+lines2, labels=labels, prop={'size': 20}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=20, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.subplots_adjust(
top=0.97,
bottom=0.029,
left=0.049,
right=0.96,
hspace=0.15,
wspace=0.17)
filename = 'pw_annual_means_temperature.png'
if save:
if compare is not None:
filename = 'pw_annual_means_{}_with_{}.png'.format(kind, compare)
plt.savefig(savefig_path / filename, orientation='portrait')
return fg
def plot_multi_box_xr(pw, kind='violin', x='month', sharex=False, sharey=False,
col_wrap=5, ylimits=None, xlimits=None, attrs=None,
bins=None, all_seasons=False, twin=None, twin_attrs=None):
import xarray as xr
pw = pw.to_array('station')
if twin is not None:
twin = twin.to_array('station')
fg = xr.plot.FacetGrid(pw, col='station', col_wrap=col_wrap, sharex=sharex,
sharey=sharey)
for i, (sta, ax) in enumerate(zip(pw['station'].values, fg.axes.flatten())):
pw_sta = pw.sel(station=sta).reset_coords(drop=True)
if all_seasons:
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'DJF')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None, bins=bins,
marker='o')
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'MAM')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None, bins=bins,
marker='^')
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'JJA')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None, bins=bins,
marker='s')
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'SON')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=attrs[i], bins=bins,
marker='x')
df = pw_sta.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=attrs[i], bins=bins,
marker='d')
if sta == 'nrif' or sta == 'elat':
ax.legend(['DJF', 'MAM', 'JJA', 'SON', 'Annual'],
prop={'size': 8}, loc='upper center', framealpha=0.5, fancybox=True)
elif sta == 'yrcm' or sta == 'ramo':
ax.legend(['DJF', 'MAM', 'JJA', 'SON', 'Annual'],
prop={'size': 8}, loc='upper right', framealpha=0.5, fancybox=True)
else:
ax.legend(['DJF', 'MAM', 'JJA', 'SON', 'Annual'],
prop={'size': 8}, loc='best', framealpha=0.5, fancybox=True)
else:
# if x == 'hour':
# # remove seasonal signal:
# pw_sta = pw_sta.groupby('time.dayofyear') - pw_sta.groupby('time.dayofyear').mean('time')
# elif x == 'month':
# # remove daily signal:
# pw_sta = pw_sta.groupby('time.hour') - pw_sta.groupby('time.hour').mean('time')
df = pw_sta.to_dataframe(sta)
if twin is not None:
twin_sta = twin.sel(station=sta).reset_coords(drop=True)
twin_df = twin_sta.to_dataframe(sta)
else:
twin_df = None
if attrs is not None:
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=attrs[i],
bins=bins, twin_df=twin_df, twin_attrs=twin_attrs)
else:
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None,
bins=bins, twin_df=twin_df, twin_attrs=twin_attrs)
return fg
def plot_box_df(df, x='month', title='TELA', marker='o',
ylabel=r'IWV [kg$\cdot$m$^{-2}$]', ax=None, kind='violin',
ylimits=(5, 40), xlimits=None, attrs=None, bins=None, twin_df=None,
twin_attrs=None):
# x=hour is experimental
import seaborn as sns
from matplotlib.ticker import MultipleLocator
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import kurtosis
from scipy.stats import skew
# df = da_ts.to_dataframe()
if x == 'month':
df[x] = df.index.month
pal = sns.color_palette("Paired", 12)
elif x == 'hour':
df[x] = df.index.hour
if twin_df is not None:
twin_df[x] = twin_df.index.hour
# df[x] = df.index
pal = sns.color_palette("Paired", 12)
y = df.columns[0]
if ax is None:
fig, ax = plt.subplots()
if kind is None:
df = df.groupby(x).mean()
df.plot(ax=ax, legend=False, marker=marker)
if twin_df is not None:
twin_df = twin_df.groupby(x).mean()
twinx = ax.twinx()
twin_df.plot.line(ax=twinx, color='r', marker='s')
ax.axhline(0, color='k', linestyle='--')
if twin_attrs is not None:
twinx.set_ylabel(twin_attrs['ylabel'])
align_yaxis(ax, 0, twinx, 0)
ax.set_xlabel('Time of day [UTC]')
elif kind == 'violin':
sns.violinplot(ax=ax, data=df, x=x, y=y, palette=pal, fliersize=4,
gridsize=250, inner='quartile', scale='area')
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_xlabel('')
elif kind == 'box':
kwargs = dict(markerfacecolor='r', marker='o')
sns.boxplot(ax=ax, data=df, x=x, y=y, palette=pal, fliersize=4,
whis=1.0, flierprops=kwargs, showfliers=False)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_xlabel('')
elif kind == 'hist':
if bins is None:
bins = 15
a = df[y].dropna()
sns.distplot(ax=ax, a=a, norm_hist=True, bins=bins, axlabel='PW [mm]')
xmean = df[y].mean()
xmedian = df[y].median()
std = df[y].std()
sk = skew(df[y].dropna().values)
kurt = kurtosis(df[y].dropna().values)
# xmode = df[y].mode().median()
data_x, data_y = ax.lines[0].get_data()
ymean = np.interp(xmean, data_x, data_y)
ymed = np.interp(xmedian, data_x, data_y)
# ymode = np.interp(xmode, data_x, data_y)
ax.vlines(x=xmean, ymin=0, ymax=ymean, color='r', linestyle='--')
ax.vlines(x=xmedian, ymin=0, ymax=ymed, color='g', linestyle='-')
# ax.vlines(x=xmode, ymin=0, ymax=ymode, color='k', linestyle='-')
# ax.legend(['Mean:{:.1f}'.format(xmean),'Median:{:.1f}'.format(xmedian),'Mode:{:.1f}'.format(xmode)])
ax.legend(['Mean: {:.1f}'.format(xmean),
'Median: {:.1f}'.format(xmedian)])
ax.text(0.55, 0.45, "Std-Dev: {:.1f}\nSkewness: {:.1f}\nKurtosis: {:.1f}".format(
std, sk, kurt), transform=ax.transAxes)
ax.yaxis.set_minor_locator(MultipleLocator(5))
ax.yaxis.grid(True, which='minor', linestyle='--', linewidth=1, alpha=0.7)
ax.yaxis.grid(True, linestyle='--', linewidth=1, alpha=0.7)
title = ax.get_title().split('=')[-1].strip(' ')
if attrs is not None:
mean_years = float(attrs['mean_years'])
ax.set_title('')
ax.text(.2, .85, y.upper(),
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes)
if kind is not None:
if kind != 'hist':
ax.text(.22, .72, '{:.1f} years'.format(mean_years),
horizontalalignment='center',
transform=ax.transAxes)
ax.yaxis.tick_left()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
if ylimits is not None:
ax.set_ylim(*ylimits)
if twin_attrs is not None:
twinx.set_ylim(*twin_attrs['ylimits'])
align_yaxis(ax, 0, twinx, 0)
if xlimits is not None:
ax.set_xlim(*xlimits)
return ax
def plot_means_pw(load_path=work_yuval, ims_path=ims_path, thresh=50,
col_wrap=5, means='hour', save=True):
import xarray as xr
import numpy as np
pw = xr.load_dataset(
work_yuval /
'GNSS_PW_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
if means == 'hour':
# remove long term monthly means:
pw_clim = pw.groupby('time.month') - \
pw.groupby('time.month').mean('time')
pw_clim = pw_clim.groupby('time.{}'.format(means)).mean('time')
else:
pw_clim = pw.groupby('time.{}'.format(means)).mean('time')
# T = xr.load_dataset(
# ims_path /
# 'GNSS_5mins_TD_ALL_1996_2020.nc')
# T_clim = T.groupby('time.month').mean('time')
attrs = [x.attrs for x in pw.data_vars.values()]
fg = pw_clim.to_array('station').plot(col='station', col_wrap=col_wrap,
color='b', marker='o', alpha=0.7,
sharex=False, sharey=True)
col_arr = np.arange(0, len(pw_clim))
right_side = col_arr[col_wrap-1::col_wrap]
for i, ax in enumerate(fg.axes.flatten()):
title = ax.get_title().split('=')[-1].strip(' ')
try:
mean_years = float(attrs[i]['mean_years'])
ax.set_title('')
ax.text(.2, .85, title.upper(),
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes)
ax.text(.2, .73, '{:.1f} years'.format(mean_years),
horizontalalignment='center',
transform=ax.transAxes)
# ax_t = ax.twinx()
# T_clim['{}'.format(title)].plot(
# color='r', linestyle='dashed', marker='s', alpha=0.7,
# ax=ax_t)
# ax_t.set_ylim(0, 30)
fg.fig.canvas.draw()
# labels = [item.get_text() for item in ax_t.get_yticklabels()]
# ax_t.yaxis.set_ticklabels([])
# ax_t.tick_params(axis='y', color='r')
# ax_t.set_ylabel('')
# if i in right_side:
# ax_t.set_ylabel(r'Surface temperature [$\degree$C]', fontsize=10)
# ax_t.yaxis.set_ticklabels(labels)
# ax_t.tick_params(axis='y', labelcolor='r', color='r')
# show months ticks and grid lines for pw:
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
ax.yaxis.grid()
# ax.legend([ax.lines[0], ax_t.lines[0]], ['PW', 'T'],
# loc='upper right', fontsize=10, prop={'size': 8})
# ax.legend([ax.lines[0]], ['PW'],
# loc='upper right', fontsize=10, prop={'size': 8})
except IndexError:
pass
# change bottom xticks to 1-12 and show them:
# fg.axes[-1, 0].xaxis.set_ticks(np.arange(1, 13))
[fg.axes[x, 0].set_ylabel('PW [mm]') for x in range(len(fg.axes[:, 0]))]
# adjust subplots:
fg.fig.subplots_adjust(top=0.977,
bottom=0.039,
left=0.036,
right=0.959,
hspace=0.185,
wspace=0.125)
filename = 'PW_{}_climatology.png'.format(means)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_gnss_radiosonde_monthly_means(sound_path=sound_path, path=work_yuval,
times=['2014', '2019'], sample='MS',
gps_station='tela', east_height=5000):
import xarray as xr
from aux_gps import path_glob
import pandas as pd
file = path_glob(sound_path, 'bet_dagan_phys_PW_Tm_Ts_*.nc')
phys = xr.load_dataset(file[0])['PW']
if east_height is not None:
file = path_glob(sound_path, 'bet_dagan_edt_sounding*.nc')
east = xr.load_dataset(file[0])['east_distance']
east = east.resample(sound_time=sample).mean().sel(
Height=east_height, method='nearest')
east_df = east.reset_coords(drop=True).to_dataframe()
if times is not None:
phys = phys.sel(sound_time=slice(*times))
ds = phys.resample(sound_time=sample).mean(
).to_dataset(name='Bet-dagan-radiosonde')
ds = ds.rename({'sound_time': 'time'})
gps = xr.load_dataset(
path / 'GNSS_PW_thresh_50_homogenized.nc')[gps_station]
if times is not None:
gps = gps.sel(time=slice(*times))
ds[gps_station] = gps.resample(time=sample).mean()
df = ds.to_dataframe()
# now plot:
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
# [x.set_xlim([pd.to_datetime(times[0]), pd.to_datetime(times[1])])
# for x in axes]
df.columns = ['Bet dagan soundings', '{} GNSS station'.format(gps_station)]
sns.lineplot(data=df, markers=['o', 's'], linewidth=2.0, ax=axes[0])
# axes[0].legend(['Bet_Dagan soundings', 'TELA GPS station'])
df_r = df.iloc[:, 1] - df.iloc[:, 0]
df_r.columns = ['Residual distribution']
sns.lineplot(data=df_r, color='k', marker='o', linewidth=1.5, ax=axes[1])
if east_height is not None:
ax_east = axes[1].twinx()
sns.lineplot(data=east_df, color='red',
marker='x', linewidth=1.5, ax=ax_east)
ax_east.set_ylabel(
'East drift at {} km altitude [km]'.format(east_height / 1000.0))
axes[1].axhline(y=0, color='r')
axes[0].grid(b=True, which='major')
axes[1].grid(b=True, which='major')
axes[0].set_ylabel('Precipitable Water [mm]')
axes[1].set_ylabel('Residuals [mm]')
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0.01)
return ds
def plot_wetz_example(path=tela_results_path, plot='WetZ', fontsize=16,
save=True):
from aux_gps import path_glob
import matplotlib.pyplot as plt
from gipsyx_post_proc import process_one_day_gipsyx_output
filepath = path_glob(path, 'tela*_smoothFinal.tdp')[3]
if plot is None:
df, meta = process_one_day_gipsyx_output(filepath, True)
return df, meta
else:
df, meta = process_one_day_gipsyx_output(filepath, False)
if not isinstance(plot, str):
raise ValueError('pls pick only one field to plot., e.g., WetZ')
error_plot = '{}_error'.format(plot)
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
desc = meta['desc'][plot]
unit = meta['units'][plot]
df[plot].plot(ax=ax, legend=False, color='k')
ax.fill_between(df.index, df[plot] - df[error_plot],
df[plot] + df[error_plot], alpha=0.5)
ax.grid()
# ax.set_title('{} from station TELA in {}'.format(
# desc, df.index[100].strftime('%Y-%m-%d')))
ax.set_ylabel('WetZ [{}]'.format(unit), fontsize=fontsize)
ax.set_xlabel('Time [UTC]', fontsize=fontsize)
ax.tick_params(which='both', labelsize=fontsize)
ax.grid('on')
fig.tight_layout()
filename = 'wetz_tela_daily.png'
caption('{} from station TELA in {}. Note the error estimation from the GipsyX software(filled)'.format(
desc, df.index[100].strftime('%Y-%m-%d')))
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def plot_figure_3(path=tela_solutions, year=2004, field='WetZ',
middle_date='11-25', zooms=[10, 3, 0.5], save=True):
from gipsyx_post_proc import analyse_results_ds_one_station
import xarray as xr
import matplotlib.pyplot as plt
import pandas as pd
dss = xr.open_dataset(path / 'TELA_ppp_raw_{}.nc'.format(year))
nums = sorted(list(set([int(x.split('-')[1])
for x in dss if x.split('-')[0] == field])))
ds = dss[['{}-{}'.format(field, i) for i in nums]]
da = analyse_results_ds_one_station(dss, field=field, plot=False)
fig, axes = plt.subplots(ncols=1, nrows=3, sharex=False, figsize=(16, 10))
for j, ax in enumerate(axes):
start = pd.to_datetime('{}-{}'.format(year, middle_date)
) - pd.Timedelta(zooms[j], unit='D')
end = pd.to_datetime('{}-{}'.format(year, middle_date)
) + pd.Timedelta(zooms[j], unit='D')
daa = da.sel(time=slice(start, end))
for i, ppp in enumerate(ds):
ds['{}-{}'.format(field, i)].plot(ax=ax, linewidth=3.0)
daa.plot.line(marker='.', linewidth=0., ax=ax, color='k')
axes[j].set_xlim(start, end)
axes[j].set_ylim(daa.min() - 0.5, daa.max() + 0.5)
try:
axes[j - 1].axvline(x=start, color='r', alpha=0.85,
linestyle='--', linewidth=2.0)
axes[j - 1].axvline(x=end, color='r', alpha=0.85,
linestyle='--', linewidth=2.0)
except IndexError:
pass
units = ds.attrs['{}>units'.format(field)]
sta = da.attrs['station']
desc = da.attrs['{}>desc'.format(field)]
ax.set_ylabel('{} [{}]'.format(field, units))
ax.set_xlabel('')
ax.grid()
# fig.suptitle(
# '30 hours stitched {} for GNSS station {}'.format(
# desc, sta), fontweight='bold')
fig.tight_layout()
caption('20, 6 and 1 days of zenith wet delay in 2004 from the TELA GNSS station for the top, middle and bottom figures respectively. The colored segments represent daily solutions while the black dots represent smoothed mean solutions.')
filename = 'zwd_tela_discon_panel.png'
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
# fig.subplots_adjust(top=0.95)
return axes
def plot_figure_3_1(path=work_yuval, data='zwd'):
import xarray as xr
from aux_gps import plot_tmseries_xarray
from PW_stations import load_gipsyx_results
if data == 'zwd':
tela = load_gipsyx_results('tela', sample_rate='1H', plot_fields=None)
label = 'ZWD [cm]'
title = 'Zenith wet delay derived from GPS station TELA'
ax = plot_tmseries_xarray(tela, 'WetZ')
elif data == 'pw':
ds = xr.open_dataset(path / 'GNSS_hourly_PW.nc')
tela = ds['tela']
label = 'PW [mm]'
title = 'Precipitable water derived from GPS station TELA'
ax = plot_tmseries_xarray(tela)
ax.set_ylabel(label)
ax.set_xlim('1996-02', '2019-07')
ax.set_title(title)
ax.set_xlabel('')
ax.figure.tight_layout()
return ax
def plot_ts_tm(path=sound_path, model='TSEN',
times=['2007', '2019'], fontsize=14, save=True):
"""plot ts-tm relashonship"""
import xarray as xr
import matplotlib.pyplot as plt
import seaborn as sns
from PW_stations import ML_Switcher
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from sounding_procedures import get_field_from_radiosonde
models_dict = {'LR': 'Linear Regression',
'TSEN': 'Theil–Sen Regression'}
# sns.set_style('whitegrid')
pds = xr.Dataset()
Ts = get_field_from_radiosonde(path=sound_path, field='Ts',
data_type='phys', reduce=None, times=times,
plot=False)
Tm = get_field_from_radiosonde(path=sound_path, field='Tm',
data_type='phys', reduce='min', times=times,
plot=False)
pds['Tm'] = Tm
pds['Ts'] = Ts
pds = pds.dropna('sound_time')
fig, ax = plt.subplots(1, 1, figsize=(7, 7))
pds.plot.scatter(
x='Ts',
y='Tm',
marker='.',
s=100.,
linewidth=0,
alpha=0.5,
ax=ax)
ax.grid()
ml = ML_Switcher()
fit_model = ml.pick_model(model)
X = pds.Ts.values.reshape(-1, 1)
y = pds.Tm.values
fit_model.fit(X, y)
predict = fit_model.predict(X)
coef = fit_model.coef_[0]
inter = fit_model.intercept_
ax.plot(X, predict, c='r')
bevis_tm = pds.Ts.values * 0.72 + 70.0
ax.plot(pds.Ts.values, bevis_tm, c='purple')
ax.legend(['{} ({:.2f}, {:.2f})'.format(models_dict.get(model),
coef, inter), 'Bevis 1992 et al. (0.72, 70.0)'], fontsize=fontsize-4)
# ax.set_xlabel('Surface Temperature [K]')
# ax.set_ylabel('Water Vapor Mean Atmospheric Temperature [K]')
ax.set_xlabel('Ts [K]', fontsize=fontsize)
ax.set_ylabel('Tm [K]', fontsize=fontsize)
ax.set_ylim(265, 320)
ax.tick_params(labelsize=fontsize)
axin1 = inset_axes(ax, width="40%", height="40%", loc=2)
resid = predict - y
sns.distplot(resid, bins=50, color='k', label='residuals', ax=axin1,
kde=False,
hist_kws={"linewidth": 1, "alpha": 0.5, "color": "k", 'edgecolor': 'k'})
axin1.yaxis.tick_right()
rmean = np.mean(resid)
rmse = np.sqrt(mean_squared_error(y, predict))
print(rmean, rmse)
r2 = r2_score(y, predict)
axin1.axvline(rmean, color='r', linestyle='dashed', linewidth=1)
# axin1.set_xlabel('Residual distribution[K]')
textstr = '\n'.join(['n={}'.format(pds.Ts.size),
'RMSE: ', '{:.2f} K'.format(rmse)]) # ,
# r'R$^2$: {:.2f}'.format(r2)])
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
axin1.text(0.05, 0.95, textstr, transform=axin1.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
# axin1.text(0.2, 0.9, 'n={}'.format(pds.Ts.size),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# axin1.text(0.78, 0.9, 'RMSE: {:.2f} K'.format(rmse),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
axin1.set_xlim(-15, 15)
fig.tight_layout()
filename = 'Bet_dagan_ts_tm_fit_{}-{}.png'.format(times[0], times[1])
caption('Water vapor mean temperature (Tm) vs. surface temperature (Ts) of the Bet-Dagan radiosonde station. Ordinary least squares linear fit(red) yields the residual distribution with RMSE of 4 K. Bevis(1992) model is plotted(purple) for comparison.')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_pw_tela_bet_dagan_scatterplot(path=work_yuval, sound_path=sound_path,
ims_path=ims_path, station='tela',
cats=None,
times=['2007', '2019'], wv_name='pw',
r2=False, fontsize=14,
save=True):
"""plot the PW of Bet-Dagan vs. PW of gps station"""
from PW_stations import mean_ZWD_over_sound_time_and_fit_tstm
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
# sns.set_style('white')
ds, mda = mean_ZWD_over_sound_time_and_fit_tstm(path=path, sound_path=sound_path,
ims_path=ims_path,
data_type='phys',
gps_station=station,
times=times,
plot=False,
cats=cats)
ds = ds.drop_dims('time')
time_dim = list(set(ds.dims))[0]
ds = ds.rename({time_dim: 'time'})
tpw = 'tpw_bet_dagan'
ds = ds[[tpw, 'tela_pw']].dropna('time')
ds = ds.sel(time=slice(*times))
fig, ax = plt.subplots(1, 1, figsize=(7, 7))
ds.plot.scatter(x=tpw,
y='tela_pw',
marker='.',
s=100.,
linewidth=0,
alpha=0.5,
ax=ax)
ax.plot(ds[tpw], ds[tpw], c='r')
ax.legend(['y = x'], loc='upper right', fontsize=fontsize)
if wv_name == 'pw':
ax.set_xlabel('PWV from Bet-Dagan [mm]', fontsize=fontsize)
ax.set_ylabel('PWV from TELA GPS station [mm]', fontsize=fontsize)
elif wv_name == 'iwv':
ax.set_xlabel(
r'IWV from Bet-Dagan station [kg$\cdot$m$^{-2}$]', fontsize=fontsize)
ax.set_ylabel(
r'IWV from TELA GPS station [kg$\cdot$m$^{-2}$]', fontsize=fontsize)
ax.grid()
axin1 = inset_axes(ax, width="40%", height="40%", loc=2)
resid = ds.tela_pw.values - ds[tpw].values
sns.distplot(resid, bins=50, color='k', label='residuals', ax=axin1,
kde=False,
hist_kws={"linewidth": 1, "alpha": 0.5, "color": "k", "edgecolor": 'k'})
axin1.yaxis.tick_right()
rmean = np.mean(resid)
rmse = np.sqrt(mean_squared_error(ds[tpw].values, ds.tela_pw.values))
r2s = r2_score(ds[tpw].values, ds.tela_pw.values)
axin1.axvline(rmean, color='r', linestyle='dashed', linewidth=1)
# axin1.set_xlabel('Residual distribution[mm]')
ax.tick_params(labelsize=fontsize)
if wv_name == 'pw':
if r2:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
'bias: {:.2f} mm'.format(rmean),
'RMSE: {:.2f} mm'.format(rmse),
r'R$^2$: {:.2f}'.format(r2s)])
else:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
'bias: {:.2f} mm'.format(rmean),
'RMSE: {:.2f} mm'.format(rmse)])
elif wv_name == 'iwv':
if r2:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
r'bias: {:.2f} kg$\cdot$m$^{{-2}}$'.format(
rmean),
r'RMSE: {:.2f} kg$\cdot$m$^{{-2}}$'.format(
rmse),
r'R$^2$: {:.2f}'.format(r2s)])
else:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
r'bias: {:.2f} kg$\cdot$m$^{{-2}}$'.format(
rmean),
r'RMSE: {:.2f} kg$\cdot$m$^{{-2}}$'.format(rmse)])
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
axin1.text(0.05, 0.95, textstr, transform=axin1.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
#
# axin1.text(0.2, 0.95, 'n={}'.format(ds[tpw].size),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# axin1.text(0.3, 0.85, 'bias: {:.2f} mm'.format(rmean),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# axin1.text(0.35, 0.75, 'RMSE: {:.2f} mm'.format(rmse),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# fig.suptitle('Precipitable Water comparison for the years {} to {}'.format(*times))
fig.tight_layout()
caption(
'PW from TELA GNSS station vs. PW from Bet-Dagan radiosonde station in {}-{}. A 45 degree line is plotted(red) for comparison. Note the skew in the residual distribution with an RMSE of 4.37 mm.'.format(times[0], times[1]))
# fig.subplots_adjust(top=0.95)
filename = 'Bet_dagan_tela_pw_compare_{}-{}.png'.format(times[0], times[1])
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ds
def plot_tela_bet_dagan_comparison(path=work_yuval, sound_path=sound_path,
ims_path=ims_path, station='tela',
times=['2007', '2020'], cats=None,
compare='pwv',
save=True):
from PW_stations import mean_ZWD_over_sound_time_and_fit_tstm
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import matplotlib.dates as mdates
# sns.set_style('whitegrid')
ds, mda = mean_ZWD_over_sound_time_and_fit_tstm(path=path,
sound_path=sound_path,
ims_path=ims_path,
data_type='phys',
gps_station=station,
times=times,
plot=False,
cats=cats)
ds = ds.drop_dims('time')
time_dim = list(set(ds.dims))[0]
ds = ds.rename({time_dim: 'time'})
ds = ds.dropna('time')
ds = ds.sel(time=slice(*times))
if compare == 'zwd':
df = ds[['zwd_bet_dagan', 'tela']].to_dataframe()
elif compare == 'pwv':
df = ds[['tpw_bet_dagan', 'tela_pw']].to_dataframe()
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
df.columns = ['Bet-Dagan soundings', 'TELA GNSS station']
sns.scatterplot(
data=df,
s=20,
ax=axes[0],
style='x',
linewidth=0,
alpha=0.8)
# axes[0].legend(['Bet_Dagan soundings', 'TELA GPS station'])
df_r = df.iloc[:, 0] - df.iloc[:, 1]
df_r.columns = ['Residual distribution']
sns.scatterplot(
data=df_r,
color='k',
s=20,
ax=axes[1],
linewidth=0,
alpha=0.5)
axes[0].grid(b=True, which='major')
axes[1].grid(b=True, which='major')
if compare == 'zwd':
axes[0].set_ylabel('Zenith Wet Delay [cm]')
axes[1].set_ylabel('Residuals [cm]')
elif compare == 'pwv':
axes[0].set_ylabel('Precipitable Water Vapor [mm]')
axes[1].set_ylabel('Residuals [mm]')
# axes[0].set_title('Zenith wet delay from Bet-Dagan radiosonde station and TELA GNSS satation')
sonde_change_x = pd.to_datetime('2013-08-20')
axes[1].axvline(sonde_change_x, color='red')
axes[1].annotate(
'changed sonde type from VIZ MK-II to PTU GPS',
(mdates.date2num(sonde_change_x),
10),
xytext=(
15,
15),
textcoords='offset points',
arrowprops=dict(
arrowstyle='fancy',
color='red'),
color='red')
# axes[1].set_aspect(3)
[x.set_xlim(*[pd.to_datetime(times[0]), pd.to_datetime(times[1])])
for x in axes]
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0.01)
filename = 'Bet_dagan_tela_{}_compare.png'.format(compare)
caption('Top: zenith wet delay from Bet-dagan radiosonde station(blue circles) and from TELA GNSS station(orange x) in 2007-2019. Bottom: residuals. Note the residuals become constrained from 08-2013 probebly due to an equipment change.')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return df
def plot_israel_map(gis_path=gis_path, rc=rc, ticklabelsize=12, ax=None):
"""general nice map for israel, need that to plot stations,
and temperature field on top of it"""
import geopandas as gpd
import contextily as ctx
import seaborn as sns
import cartopy.crs as ccrs
sns.set_style("ticks", rc=rc)
isr_with_yosh = gpd.read_file(gis_path / 'Israel_and_Yosh.shp')
isr_with_yosh.crs = {'init': 'epsg:4326'}
# isr_with_yosh = isr_with_yosh.to_crs(epsg=3857)
crs_epsg = ccrs.epsg('3857')
# crs_epsg = ccrs.epsg('2039')
if ax is None:
# fig, ax = plt.subplots(subplot_kw={'projection': crs_epsg},
# figsize=(6, 15))
bounds = isr_with_yosh.geometry.total_bounds
extent = [bounds[0], bounds[2], bounds[1], bounds[3]]
# ax.set_extent([bounds[0], bounds[2], bounds[1], bounds[3]], crs=crs_epsg)
# ax.add_geometries(isr_with_yosh.geometry, crs=crs_epsg)
ax = isr_with_yosh.plot(alpha=0.0, figsize=(6, 15))
else:
isr_with_yosh.plot(alpha=0.0, ax=ax)
ctx.add_basemap(
ax,
url=ctx.sources.ST_TERRAIN_BACKGROUND,
crs='epsg:4326')
ax.xaxis.set_major_locator(ticker.MaxNLocator(2))
ax.yaxis.set_major_locator(ticker.MaxNLocator(5))
ax.yaxis.set_major_formatter(lat_formatter)
ax.xaxis.set_major_formatter(lon_formatter)
ax.tick_params(top=True, bottom=True, left=True, right=True,
direction='out', labelsize=ticklabelsize)
# scale_bar(ax, ccrs.Mercator(), 50, bounds=bounds)
return ax
def plot_israel_with_stations(gis_path=gis_path, dem_path=dem_path, ims=True,
gps=True, radio=True, terrain=True, alt=False,
ims_names=False, gps_final=False, save=True):
from PW_stations import produce_geo_gnss_solved_stations
from aux_gps import geo_annotate
from ims_procedures import produce_geo_ims
import matplotlib.pyplot as plt
import xarray as xr
import pandas as pd
import geopandas as gpd
ax = plot_israel_map(gis_path)
station_names = []
legend = []
if ims:
print('getting IMS temperature stations metadata...')
ims_t = produce_geo_ims(path=gis_path, freq='10mins', plot=False)
ims_t.plot(ax=ax, color='red', edgecolor='black', alpha=0.5)
station_names.append('ims')
legend.append('IMS stations')
if ims_names:
geo_annotate(ax, ims_t.lon, ims_t.lat,
ims_t['name_english'], xytext=(3, 3), fmt=None,
c='k', fw='normal', fs=7, colorupdown=False)
# ims, gps = produce_geo_df(gis_path=gis_path, plot=False)
if gps:
print('getting solved GNSS israeli stations metadata...')
gps_df = produce_geo_gnss_solved_stations(path=gis_path, plot=False)
if gps_final:
to_drop = ['gilb', 'lhav', 'hrmn', 'nizn', 'spir']
gps_final_stations = [x for x in gps_df.index if x not in to_drop]
gps = gps_df.loc[gps_final_stations, :]
gps.plot(ax=ax, color='k', edgecolor='black', marker='s')
gps_stations = [x for x in gps.index]
to_plot_offset = ['gilb', 'lhav']
# [gps_stations.remove(x) for x in to_plot_offset]
gps_normal_anno = gps.loc[gps_stations, :]
# gps_offset_anno = gps.loc[to_plot_offset, :]
geo_annotate(ax, gps_normal_anno.lon, gps_normal_anno.lat,
gps_normal_anno.index.str.upper(), xytext=(3, 3), fmt=None,
c='k', fw='bold', fs=10, colorupdown=False)
if alt:
geo_annotate(ax, gps_normal_anno.lon, gps_normal_anno.lat,
gps_normal_anno.alt, xytext=(4, -6), fmt='{:.0f}',
c='k', fw='bold', fs=9, colorupdown=False)
# geo_annotate(ax, gps_offset_anno.lon, gps_offset_anno.lat,
# gps_offset_anno.index.str.upper(), xytext=(4, -6), fmt=None,
# c='k', fw='bold', fs=10, colorupdown=False)
station_names.append('gps')
legend.append('GNSS stations')
if terrain:
# overlay with dem data:
cmap = plt.get_cmap('terrain', 41)
dem = xr.open_dataarray(dem_path / 'israel_dem_250_500.nc')
# dem = xr.open_dataarray(dem_path / 'israel_dem_500_1000.nc')
fg = dem.plot.imshow(ax=ax, alpha=0.5, cmap=cmap,
vmin=dem.min(), vmax=dem.max(), add_colorbar=False)
cbar_kwargs = {'fraction': 0.1, 'aspect': 50, 'pad': 0.03}
cb = plt.colorbar(fg, **cbar_kwargs)
cb.set_label(label='meters above sea level', size=8, weight='normal')
cb.ax.tick_params(labelsize=8)
ax.set_xlabel('')
ax.set_ylabel('')
if radio: # plot bet-dagan:
df = pd.Series([32.00, 34.81]).to_frame().T
df.index = ['Bet-Dagan']
df.columns = ['lat', 'lon']
bet_dagan = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon,
df.lat),
crs=gps.crs)
bet_dagan.plot(ax=ax, color='black', edgecolor='black',
marker='+')
geo_annotate(ax, bet_dagan.lon, bet_dagan.lat,
bet_dagan.index, xytext=(4, -6), fmt=None,
c='k', fw='bold', fs=10, colorupdown=False)
station_names.append('radio')
legend.append('radiosonde')
if legend:
plt.legend(legend, loc='upper left')
plt.tight_layout()
plt.subplots_adjust(bottom=0.05)
if station_names:
station_names = '_'.join(station_names)
else:
station_names = 'no_stations'
filename = 'israel_map_{}.png'.format(station_names)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def plot_zwd_lapse_rate(path=work_yuval, fontsize=18, model='TSEN', save=True):
from PW_stations import calculate_zwd_altitude_fit
df, zwd_lapse_rate = calculate_zwd_altitude_fit(path=path, model=model,
plot=True, fontsize=fontsize)
if save:
filename = 'zwd_lapse_rate.png'
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_ims_T_lapse_rate(ims_path=ims_path, dt='2013-10-19T22:00:00',
fontsize=16, save=True):
from aux_gps import path_glob
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# from matplotlib import rc
def choose_dt_and_lapse_rate(tdf, dt, T_alts, lapse_rate):
ts = tdf.loc[dt, :]
# dt_col = dt.strftime('%Y-%m-%d %H:%M')
# ts.name = dt_col
# Tloc_df = Tloc_df.join(ts, how='right')
# Tloc_df = Tloc_df.dropna(axis=0)
ts_vs_alt = pd.Series(ts.values, index=T_alts)
ts_vs_alt_for_fit = ts_vs_alt.dropna()
[a, b] = np.polyfit(ts_vs_alt_for_fit.index.values,
ts_vs_alt_for_fit.values, 1)
if lapse_rate == 'auto':
lapse_rate = np.abs(a) * 1000
if lapse_rate < 5.0:
lapse_rate = 5.0
elif lapse_rate > 10.0:
lapse_rate = 10.0
return ts_vs_alt, lapse_rate
# rc('text', usetex=False)
# rc('text',latex.unicode=False)
glob_str = 'IMS_TD_israeli_10mins*.nc'
file = path_glob(ims_path, glob_str=glob_str)[0]
ds = xr.open_dataset(file)
time_dim = list(set(ds.dims))[0]
# slice to a starting year(1996?):
ds = ds.sel({time_dim: slice('1996', None)})
# years = sorted(list(set(ds[time_dim].dt.year.values)))
# get coords and alts of IMS stations:
T_alts = np.array([ds[x].attrs['station_alt'] for x in ds])
# T_lats = np.array([ds[x].attrs['station_lat'] for x in ds])
# T_lons = np.array([ds[x].attrs['station_lon'] for x in ds])
print('loading IMS_TD of israeli stations 10mins freq..')
# transform to dataframe and add coords data to df:
tdf = ds.to_dataframe()
# dt_col = dt.strftime('%Y-%m-%d %H:%M')
dt = pd.to_datetime(dt)
# prepare the ims coords and temp df(Tloc_df) and the lapse rate:
ts_vs_alt, lapse_rate = choose_dt_and_lapse_rate(tdf, dt, T_alts, 'auto')
fig, ax_lapse = plt.subplots(figsize=(10, 6))
sns.regplot(x=ts_vs_alt.index, y=ts_vs_alt.values, color='r',
scatter_kws={'color': 'k'}, ax=ax_lapse)
# suptitle = dt.strftime('%Y-%m-%d %H:%M')
ax_lapse.set_xlabel('Altitude [m]', fontsize=fontsize)
ax_lapse.set_ylabel(r'Temperature [$\degree$C]', fontsize=fontsize)
ax_lapse.text(0.5, 0.95, r'Lapse rate: {:.2f} $\degree$C/km'.format(lapse_rate),
horizontalalignment='center', verticalalignment='center',
fontsize=fontsize,
transform=ax_lapse.transAxes, color='k')
ax_lapse.grid()
ax_lapse.tick_params(labelsize=fontsize)
# ax_lapse.set_title(suptitle, fontsize=14, fontweight='bold')
fig.tight_layout()
filename = 'ims_lapse_rate_example.png'
caption('Temperature vs. altitude for 10 PM in 2013-10-19 for all automated 10 mins IMS stations. The lapse rate is calculated using ordinary least squares linear fit.')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax_lapse
def plot_figure_9(hydro_path=hydro_path, gis_path=gis_path, pw_anom=False,
max_flow_thresh=None, wv_name='pw', save=True):
from hydro_procedures import get_hydro_near_GNSS
from hydro_procedures import loop_over_gnss_hydro_and_aggregate
import matplotlib.pyplot as plt
df = get_hydro_near_GNSS(
radius=5,
hydro_path=hydro_path,
gis_path=gis_path,
plot=False)
ds = loop_over_gnss_hydro_and_aggregate(df, pw_anom=pw_anom,
max_flow_thresh=max_flow_thresh,
hydro_path=hydro_path,
work_yuval=work_yuval, ndays=3,
plot=False, plot_all=False)
names = [x for x in ds.data_vars]
fig, ax = plt.subplots(figsize=(10, 6))
for name in names:
ds.mean('station').mean('tide_start')[name].plot.line(
marker='.', linewidth=0., ax=ax)
ax.set_xlabel('Days before tide event')
ax.grid()
hstations = [ds[x].attrs['hydro_stations'] for x in ds.data_vars]
events = [ds[x].attrs['total_events'] for x in ds.data_vars]
fmt = list(zip(names, hstations, events))
ax.legend(['{} with {} stations ({} total events)'.format(x, y, z)
for x, y, z in fmt])
fig.canvas.draw()
labels = [item.get_text() for item in ax.get_xticklabels()]
xlabels = [x.replace('−', '') for x in labels]
ax.set_xticklabels(xlabels)
fig.canvas.draw()
if wv_name == 'pw':
if pw_anom:
ax.set_ylabel('PW anomalies [mm]')
else:
ax.set_ylabel('PW [mm]')
elif wv_name == 'iwv':
if pw_anom:
ax.set_ylabel(r'IWV anomalies [kg$\cdot$m$^{-2}$]')
else:
ax.set_ylabel(r'IWV [kg$\cdot$m$^{-2}$]')
fig.tight_layout()
# if pw_anom:
# title = 'Mean PW anomalies for tide stations near all GNSS stations'
# else:
# title = 'Mean PW for tide stations near all GNSS stations'
# if max_flow_thresh is not None:
# title += ' (max_flow > {} m^3/sec)'.format(max_flow_thresh)
# ax.set_title(title)
if pw_anom:
filename = 'hydro_tide_lag_pw_anom.png'
if max_flow_thresh:
filename = 'hydro_tide_lag_pw_anom_max{}.png'.format(
max_flow_thresh)
else:
filename = 'hydro_tide_lag_pw.png'
if max_flow_thresh:
filename = 'hydro_tide_lag_pw_anom_max{}.png'.format(
max_flow_thresh)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def produce_table_1(removed=['hrmn', 'nizn', 'spir'], merged={'klhv': ['klhv', 'lhav'],
'mrav': ['gilb', 'mrav']}, add_location=False,
scope='annual', remove_distance=True):
"""for scope='diurnal' use removed=['hrmn'], add_location=True
and remove_distance=False"""
from PW_stations import produce_geo_gnss_solved_stations
import pandas as pd
sites = group_sites_to_xarray(upper=False, scope=scope)
df_gnss = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=True)
new = sites.T.values.ravel()
if scope == 'annual':
new = [x for x in new.astype(str) if x != 'nan']
df_gnss = df_gnss.reindex(new)
df_gnss['ID'] = df_gnss.index.str.upper()
pd.options.display.float_format = '{:.2f}'.format
df = df_gnss[['name', 'ID', 'lat', 'lon', 'alt', 'distance']]
df['alt'] = df['alt'].map('{:,.0f}'.format)
df['distance'] = df['distance'].astype(int)
cols = ['GNSS Station name', 'Station ID', 'Latitude [N]',
'Longitude [E]', 'Altitude [m a.s.l]', 'Distance from shore [km]']
df.columns = cols
if scope != 'annual':
df.loc['spir', 'GNSS Station name'] = 'Sapir'
if remove_distance:
df = df.iloc[:, 0:-1]
if add_location:
groups = group_sites_to_xarray(upper=False, scope=scope)
coastal = groups.sel(group='coastal').values
coastal = coastal[~pd.isnull(coastal)]
highland = groups.sel(group='highland').values
highland = highland[~pd.isnull(highland)]
eastern = groups.sel(group='eastern').values
eastern = eastern[~pd.isnull(eastern)]
df.loc[coastal, 'Location'] = 'Coastal'
df.loc[highland, 'Location'] = 'Highland'
df.loc[eastern, 'Location'] = 'Eastern'
if removed is not None:
df = df.loc[[x for x in df.index if x not in removed], :]
if merged is not None:
return df
print(df.to_latex(index=False))
return df
def produce_table_stats(thresh=50, add_location=True, add_height=True):
"""add plot sd to height with se_sd errorbars"""
from PW_stations import produce_pw_statistics
from PW_stations import produce_geo_gnss_solved_stations
import pandas as pd
import xarray as xr
sites = group_sites_to_xarray(upper=False, scope='annual')
new = sites.T.values.ravel()
sites = group_sites_to_xarray(upper=False, scope='annual')
new = [x for x in new.astype(str) if x != 'nan']
pw_mm = xr.load_dataset(
work_yuval /
'GNSS_PW_monthly_thresh_{:.0f}.nc'.format(thresh))
pw_mm = pw_mm[new]
df = produce_pw_statistics(
thresh=thresh, resample_to_mm=False, pw_input=pw_mm)
if add_location:
cols = [x for x in df.columns]
cols.insert(1, 'Location')
gr_df = sites.to_dataframe('sites')
location = [gr_df[gr_df == x].dropna().index.values.item()[
1].title() for x in new]
df['Location'] = location
df = df[cols]
if add_height:
cols = [x for x in df.columns]
if add_location:
cols.insert(2, 'Height [m a.s.l]')
else:
cols.insert(1, 'Height [m a.s.l]')
df_gnss = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=False)
# pd.options.display.float_format = '{:.2f}'.format
df['Height [m a.s.l]'] = df_gnss['alt'].map('{:.0f}'.format)
df = df[cols]
print(df.to_latex(index=False))
return df
def plot_pwv_longterm_trend(path=work_yuval, model_name='LR', save=True,
fontsize=16, add_era5=True):
import matplotlib.pyplot as plt
from aux_gps import linear_fit_using_scipy_da_ts
# from PW_stations import ML_Switcher
import xarray as xr
from aux_gps import anomalize_xr
"""TSEN and LR for linear fit"""
# load GNSS Israel:
# pw = xr.load_dataset(path / 'GNSS_PW_monthly_thresh_50_homogenized.nc')
pw = xr.load_dataset(
path / 'GNSS_PW_monthly_thresh_50.nc').sel(time=slice('1998', None))
pw_anoms = anomalize_xr(pw, 'MS', verbose=False)
pw_mean = pw_anoms.to_array('station').mean('station')
pw_std = pw_anoms.to_array('station').std('station')
pw_weights = 1 / pw_anoms.to_array('station').count('station')
# add ERA5:
era5 = xr.load_dataset(work_yuval / 'GNSS_era5_monthly_PW.nc')
era5_anoms = anomalize_xr(era5, 'MS', verbose=False)
era5_anoms = era5_anoms.sel(time=slice(
pw_mean.time.min(), pw_mean.time.max()))
era5_mean = era5_anoms.to_array('station').mean('station')
era5_std = era5_anoms.to_array('station').std('station')
# init linear models
# ml = ML_Switcher()
# model = ml.pick_model(model_name)
if add_era5:
fig, ax = plt.subplots(2, 1, figsize=(15, 7.5))
trend, trend_hi, trend_lo, slope, slope_hi, slope_lo = linear_fit_using_scipy_da_ts(pw_mean, model=model_name, slope_factor=3650.25,
plot=False, ax=None, units=None, method='curve_fit', weights=pw_weights)
pwln = pw_mean.plot(ax=ax[0], color='k', marker='o', linewidth=1.5)
trendln = trend.plot(ax=ax[0], color='r', linewidth=2)
trend_hi.plot.line('r--', ax=ax[0], linewidth=1.5)
trend_lo.plot.line('r--', ax=ax[0], linewidth=1.5)
trend_label = '{} model, slope={:.2f} ({:.2f}, {:.2f}) mm/decade'.format(
model_name, slope, slope_lo, slope_hi)
handles = pwln+trendln
labels = ['PWV-mean']
labels.append(trend_label)
ax[0].legend(handles=handles, labels=labels, loc='upper left',
fontsize=fontsize)
ax[0].grid()
ax[0].set_xlabel('')
ax[0].set_ylabel('PWV mean anomalies [mm]', fontsize=fontsize)
ax[0].tick_params(labelsize=fontsize)
trend1, trend_hi1, trend_lo1, slope1, slope_hi1, slope_lo1 = linear_fit_using_scipy_da_ts(era5_mean, model=model_name, slope_factor=3650.25,
plot=False, ax=None, units=None, method='curve_fit', weights=era5_std)
era5ln = era5_mean.plot(ax=ax[1], color='k', marker='o', linewidth=1.5)
trendln1 = trend1.plot(ax=ax[1], color='r', linewidth=2)
trend_hi1.plot.line('r--', ax=ax[1], linewidth=1.5)
trend_lo1.plot.line('r--', ax=ax[1], linewidth=1.5)
trend_label = '{} model, slope={:.2f} ({:.2f}, {:.2f}) mm/decade'.format(
model_name, slope1, slope_lo1, slope_hi1)
handles = era5ln+trendln1
labels = ['ERA5-mean']
labels.append(trend_label)
ax[1].legend(handles=handles, labels=labels, loc='upper left',
fontsize=fontsize)
ax[1].grid()
ax[1].set_xlabel('')
ax[1].set_ylabel('PWV mean anomalies [mm]', fontsize=fontsize)
ax[1].tick_params(labelsize=fontsize)
else:
fig, ax = plt.subplots(1, 1, figsize=(15, 5.5))
trend, trend_hi, trend_lo, slope, slope_hi, slope_lo = linear_fit_using_scipy_da_ts(pw_mean, model=model_name, slope_factor=3650.25,
plot=False, ax=None, units=None)
pwln = pw_mean.plot(ax=ax, color='k', marker='o', linewidth=1.5)
trendln = trend.plot(ax=ax, color='r', linewidth=2)
trend_hi.plot.line('r--', ax=ax, linewidth=1.5)
trend_lo.plot.line('r--', ax=ax, linewidth=1.5)
trend_label = '{} model, slope={:.2f} ({:.2f}, {:.2f}) mm/decade'.format(
model_name, slope, slope_lo, slope_hi)
handles = pwln+trendln
labels = ['PWV-mean']
labels.append(trend_label)
ax.legend(handles=handles, labels=labels, loc='upper left',
fontsize=fontsize)
ax.grid()
ax.set_xlabel('')
ax.set_ylabel('PWV mean anomalies [mm]', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
fig.suptitle('PWV mean anomalies and linear trend',
fontweight='bold', fontsize=fontsize)
fig.tight_layout()
if save:
filename = 'pwv_mean_trend_{}.png'.format(model_name)
plt.savefig(savefig_path / filename, orientation='portrait')
return ax
def plot_trend_filled_pwv_and_era5_barh_plot(path=work_yuval):
import xarray as xr
from aux_gps import path_glob
from PW_stations import process_mkt_from_dataset
import pandas as pd
import seaborn as sns
file = sorted(
path_glob(path, 'GNSS_PW_monthly_homogenized_filled_*.nc'))[0]
gnss = xr.load_dataset(path / file)
era5 = xr.load_dataset(path / 'GNSS_era5_monthly_PW.nc')
era5 = era5.sel(time=slice(gnss.time.min(), gnss.time.max()))
era5 = era5[[x for x in era5 if x in gnss]]
df_gnss = process_mkt_from_dataset(
gnss,
alpha=0.95,
season_selection=None,
seasonal=False,
factor=120,
anomalize=True, CI=True)
df_gnss = add_location_to_GNSS_stations_dataframe(df_gnss)
df_gnss['sig'] = df_gnss['p'].astype(float) <= 0.05
df_era5 = process_mkt_from_dataset(
era5,
alpha=0.95,
season_selection=None,
seasonal=False,
factor=120,
anomalize=True, CI=True)
df_era5 = add_location_to_GNSS_stations_dataframe(df_era5)
df_era5['sig'] = df_era5['p'].astype(float) <= 0.05
df = pd.concat([df_gnss, df_era5], keys=['GNSS', 'ERA5'])
df1 = df.unstack(level=0)
df = df1.stack().reset_index()
df.columns = ['station', '', 'p', 'Tau', 'slope', 'intercept', 'CI_5_low',
'CI_5_high', 'Location', 'sig']
sns.barplot(x="slope", y='station', hue='', data=df[df['sig']])
# df['slope'].unstack(level=0).plot(kind='barh', subplots=False, xerr=1)
return df
def produce_filled_pwv_and_era5_mann_kendall_table(path=work_yuval):
import xarray as xr
from aux_gps import path_glob
file = sorted(
path_glob(path, 'GNSS_PW_monthly_homogenized_filled_*.nc'))[0]
gnss = xr.load_dataset(path / file)
era5 = xr.load_dataset(path / 'GNSS_era5_monthly_PW.nc')
era5 = era5.sel(time=slice(gnss.time.min(), gnss.time.max()))
df = add_comparison_to_mann_kendall_table(gnss, era5, 'GNSS', 'ERA5')
print(df.to_latex(header=False, index=False))
return df
def add_comparison_to_mann_kendall_table(ds1, ds2, name1='GNSS', name2='ERA5',
alpha=0.05):
df1 = produce_table_mann_kendall(ds1, alpha=alpha)
df2 = produce_table_mann_kendall(ds2, alpha=alpha)
df = df1['Site ID'].to_frame()
df[name1+'1'] = df1["Kendall's Tau"]
df[name2+'1'] = df2["Kendall's Tau"]
df[name1+'2'] = df1['P-value']
df[name2+'2'] = df2['P-value']
df[name1+'3'] = df1["Sen's slope"]
df[name2+'3'] = df2["Sen's slope"]
df[name1+'4'] = df1["Percent change"]
df[name2+'4'] = df2["Percent change"]
return df
def produce_table_mann_kendall(pwv_ds, alpha=0.05,
sort_by=['groups_annual', 'lat']):
from PW_stations import process_mkt_from_dataset
from PW_stations import produce_geo_gnss_solved_stations
from aux_gps import reduce_tail_xr
import xarray as xr
def table_process_df(df, means):
df_sites = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=True)
sites = df_sites.dropna()[['lat', 'alt', 'distance', 'groups_annual']].sort_values(
by=sort_by, ascending=[1, 0]).index
# calculate percent changes from last decade means:
df['CI95'] = '(' + df['CI_95_low'].map('{:.2f}'.format).astype(
str) + ', ' + df['CI_95_high'].map('{:.2f}'.format).astype(str) + ')'
df['means'] = means
df['Pct_change'] = 100 * df['slope'] / df['means']
Pct_high = 100 * df['CI_95_high'] / df['means']
Pct_low = 100 * df['CI_95_low'] / df['means']
df['Pct_change_CI95'] = '(' + Pct_low.map('{:.2f}'.format).astype(
str) + ', ' + Pct_high.map('{:.2f}'.format).astype(str) + ')'
# df['Temperature change'] = df['Percent change'] / 7.0
df.drop(['means', 'CI_95_low', 'CI_95_high'], axis=1, inplace=True)
# station id is big:
df['id'] = df.index.str.upper()
# , 'Temperature change']]
df = df[['id', 'Tau', 'p', 'slope', 'CI95',
'Pct_change', 'Pct_change_CI95']]
# filter for non significant trends:
# df['slope'] = df['slope'][df['p'] < 0.05]
# df['Pct_change'] = df['Pct_change'][df['p'] < 0.05]
# df['CI95'] = df['CI95'][df['p'] < 0.05]
# df['Pct_change_CI95'] = df['Pct_change_CI95'][df['p'] < 0.05]
# higher and better results:
df.loc[:, 'p'][df['p'] < 0.001] = '<0.001'
df['p'][df['p'] != '<0.001'] = df['p'][df['p'] !=
'<0.001'].astype(float).map('{:,.3f}'.format)
df['Tau'] = df['Tau'].map('{:,.3f}'.format)
df['slope'] = df['slope'].map('{:,.2f}'.format)
df['slope'][df['slope'] == 'nan'] = '-'
df.columns = [
'Site ID',
"Kendall's Tau",
'P-value',
"Sen's slope", "Sen's slope CI 95%",
'Percent change', 'Percent change CI 95%'] # , 'Temperature change']
df['Percent change'] = df['Percent change'].map('{:,.1f}'.format)
df['Percent change'] = df[df["Sen's slope"] != '-']['Percent change']
df['Percent change'] = df['Percent change'].fillna('-')
df["Sen's slope CI 95%"] = df["Sen's slope CI 95%"].fillna(' ')
df['Percent change CI 95%'] = df['Percent change CI 95%'].fillna(' ')
df["Sen's slope"] = df["Sen's slope"].astype(
str) + ' ' + df["Sen's slope CI 95%"].astype(str)
df['Percent change'] = df['Percent change'].astype(
str) + ' ' + df['Percent change CI 95%'].astype(str)
df.drop(['Percent change CI 95%', "Sen's slope CI 95%"],
axis=1, inplace=True)
# df['Temperature change'] = df['Temperature change'].map('{:,.1f}'.format)
# df['Temperature change'] = df[df["Sen's slope"] != '-']['Temperature change']
# df['Temperature change'] = df['Temperature change'].fillna('-')
# last, reindex according to geography:
# gr = group_sites_to_xarray(scope='annual')
# new = [x for x in gr.T.values.ravel() if isinstance(x, str)]
new = [x for x in sites if x in df.index]
df = df.reindex(new)
return df
# if load_data == 'pwv-homo':
# print('loading homogenized (RH) pwv dataset.')
# data = xr.load_dataset(work_yuval /
# 'GNSS_PW_monthly_thresh_{:.0f}_homogenized.nc'.format(thresh))
# elif load_data == 'pwv-orig':
# print('loading original pwv dataset.')
# data = xr.load_dataset(work_yuval /
# 'GNSS_PW_monthly_thresh_{:.0f}.nc'.format(thresh))
# elif load_data == 'pwv-era5':
# print('loading era5 pwv dataset.')
# data = xr.load_dataset(work_yuval / 'GNSS_era5_monthly_PW.nc')
# if pwv_ds is not None:
# print('loading user-input pwv dataset.')
# data = pwv_ds
df = process_mkt_from_dataset(
pwv_ds,
alpha=alpha,
season_selection=None,
seasonal=False,
factor=120,
anomalize=True, CI=True)
df_mean = reduce_tail_xr(pwv_ds, reduce='mean', records=120,
return_df=True)
table = table_process_df(df, df_mean)
# print(table.to_latex(index=False))
return table
def plot_filled_and_unfilled_pwv_monthly_anomalies(pw_da, anomalize=True,
max_gap=6,
method='cubic',
ax=None):
from aux_gps import anomalize_xr
import matplotlib.pyplot as plt
import numpy as np
if anomalize:
pw_da = anomalize_xr(pw_da, 'MS')
max_gap_td = np.timedelta64(max_gap, 'M')
filled = pw_da.interpolate_na('time', method=method, max_gap=max_gap_td)
if ax is None:
fig, ax = plt.subplots(figsize=(15, 5))
filledln = filled.plot.line('b-', ax=ax)
origln = pw_da.plot.line('r-', ax=ax)
ax.legend(origln + filledln,
['original time series',
'filled using {} interpolation with max gap of {} months'.format(method,
max_gap)])
ax.grid()
ax.set_xlabel('')
ax.set_ylabel('PWV [mm]')
ax.set_title('PWV station {}'.format(pw_da.name.upper()))
return ax
def plot_pwv_statistic_vs_height(pwv_ds, stat='mean', x='alt', season=None,
ax=None, color='b'):
from PW_stations import produce_geo_gnss_solved_stations
import matplotlib.pyplot as plt
from aux_gps import calculate_std_error
import pandas as pd
if season is not None:
print('{} season selected'.format(season))
pwv_ds = pwv_ds.sel(time=pwv_ds['time.season'] == season)
df = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=True)
if stat == 'mean':
pw_stat = pwv_ds.mean()
pw_stat_error = pwv_ds.map(calculate_std_error, statistic=stat)
elif stat == 'std':
pw_stat = pwv_ds.std()
pw_stat_error = pwv_ds.map(calculate_std_error, statistic=stat)
df[stat] = pd.Series(
pw_stat.to_array(
dim='gnss'),
index=pw_stat.to_array('gnss')['gnss'])
df['{}_error'.format(stat)] = pd.Series(pw_stat_error.to_array(
dim='gnss'), index=pw_stat_error.to_array('gnss')['gnss'])
if ax is None:
fig, ax = plt.subplots()
if x == 'alt':
ax.set_xlabel('Altitude [m a.s.l]')
elif x == 'distance':
ax.set_xlabel('Distance to sea shore [km]')
ax.set_ylabel('{} [mm]'.format(stat))
ax.errorbar(df[x],
df[stat],
df['{}_error'.format(stat)],
marker='o',
ls='',
capsize=2.5,
elinewidth=2.5,
markeredgewidth=2.5,
color=color)
if season is not None:
ax.set_title('{} season'.format(season))
ax.grid()
return ax
def add_location_to_GNSS_stations_dataframe(df, scope='annual'):
import pandas as pd
# load location data:
gr = group_sites_to_xarray(scope=scope)
gr_df = gr.to_dataframe('sites')
new = gr.T.values.ravel()
# remove nans form mixed nans and str numpy:
new = new[~pd.isnull(new)]
geo = [gr_df[gr_df == x].dropna().index.values.item()[1] for x in new]
geo = [x.title() for x in geo]
df = df.reindex(new)
df['Location'] = geo
return df
def plot_peak_amplitude_altitude_long_term_pwv(path=work_yuval, era5=False,
add_a1a2=True, save=True, fontsize=16):
import xarray as xr
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from fitting_routines import fit_poly_model_xr
from aux_gps import remove_suffix_from_ds
from PW_stations import produce_geo_gnss_solved_stations
# load alt data, distance etc.,
sns.set_style('whitegrid')
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
df_geo = produce_geo_gnss_solved_stations(
plot=False, add_distance_to_coast=True)
if era5:
dss = xr.load_dataset(path / 'GNSS_PW_ERA5_harmonics_annual.nc')
else:
dss = xr.load_dataset(path / 'GNSS_PW_harmonics_annual.nc')
dss = dss[[x for x in dss if '_params' in x]]
dss = remove_suffix_from_ds(dss)
df = dss.sel(cpy=1, params='ampl').reset_coords(drop=True).to_dataframe().T
df.columns = ['A1', 'A1std']
df = df.join(dss.sel(cpy=2, params='ampl').reset_coords(drop=True).to_dataframe().T)
# abs bc sometimes the fit get a sine amp negative:
df = np.abs(df)
df.columns =['A1', 'A1std', 'A2', 'A2std']
df['A2A1'] = df['A2'] / df['A1']
a2a1std = np.sqrt((df['A2std']/df['A1'])**2 + (df['A2']*df['A1std']/df['A1']**2)**2)
df['A2A1std'] = a2a1std
# load location data:
gr = group_sites_to_xarray(scope='annual')
gr_df = gr.to_dataframe('sites')
new = gr.T.values.ravel()
# remove nans form mixed nans and str numpy:
new = new[~pd.isnull(new)]
geo = [gr_df[gr_df == x].dropna().index.values.item()[1] for x in new]
geo = [x.title() for x in geo]
df = df.reindex(new)
df['Location'] = geo
df['alt'] = df_geo['alt']
df = df.set_index('alt')
df = df.sort_index()
cdict = produce_colors_for_pwv_station(scope='annual', as_cat_dict=True)
cdict = dict(zip([x.capitalize() for x in cdict.keys()], cdict.values()))
if add_a1a2:
fig, axes=plt.subplots(2, 1, sharex=False, figsize=(8, 12))
ax = axes[0]
else:
ax = None
# colors=produce_colors_for_pwv_station(scope='annual')
ax = sns.scatterplot(data=df, y='A1', x='alt', hue='Location',
palette=cdict, ax=ax, s=100, zorder=20)
# ax.legend(prop={'size': fontsize})
x_coords = []
y_coords = []
colors = []
for point_pair in ax.collections:
colors.append(point_pair.get_facecolor())
for x, y in point_pair.get_offsets():
x_coords.append(x)
y_coords.append(y)
ax.errorbar(x_coords, y_coords,
yerr=df['A1std'].values, ecolor=colors[0][:,0:-1],
ls='', capsize=None, fmt=" ")#, zorder=-1)
# linear fit:
x = df.index.values
y = df['A1'].values
p = fit_poly_model_xr(x, y, 1, plot=None, ax=None, return_just_p=True)
fit_label = r'Fitted line, slope: {:.2f} mm$\cdot$km$^{{-1}}$'.format(p[0] * -1000)
fit_poly_model_xr(x,y,1,plot='manual', ax=ax, fit_label=fit_label)
ax.set_ylabel('PWV annual amplitude [mm]', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
ax.set_yticks(np.arange(1, 6, 1))
if add_a1a2:
ax.set_xlabel('')
else:
ax.set_xlabel('GNSS station height [m a.s.l]')
ax.grid(True)
ax.legend(prop={'size': fontsize-3})
if add_a1a2:
# convert to percent:
df['A2A1'] = df['A2A1'].mul(100)
df['A2A1std'] = df['A2A1std'].mul(100)
ax = sns.scatterplot(data=df, y='A2A1', x='alt',
hue='Location', ax=axes[1],
legend=True, palette=cdict,
s=100, zorder=20)
x_coords = []
y_coords = []
colors = []
# ax.legend(prop={'size':fontsize+4}, fontsize=fontsize)
for point_pair in ax.collections:
colors.append(point_pair.get_facecolor())
for x, y in point_pair.get_offsets():
x_coords.append(x)
y_coords.append(y)
ax.errorbar(x_coords, y_coords,
yerr=df['A2A1std'].values, ecolor=colors[0][:,0:-1],
ls='', capsize=None, fmt=" ")#, zorder=-1)
df_upper = df.iloc[9:]
y = df_upper['A2A1'].values
x = df_upper.index.values
p = fit_poly_model_xr(x, y, 1, return_just_p=True)
fit_label = r'Fitted line, slope: {:.1f} %$\cdot$km$^{{-1}}$'.format(p[0] * 1000)
p = fit_poly_model_xr(x, y, 1, plot='manual', ax=ax,
return_just_p=False, color='r',
fit_label=fit_label)
df_lower = df.iloc[:11]
mean = df_lower['A2A1'].mean()
std = df_lower['A2A1'].std()
stderr = std / np.sqrt(len(df_lower))
ci = 1.96 * stderr
ax.hlines(xmin=df_lower.index.min(), xmax=df_lower.index.max(), y=mean,
color='k', label='Mean ratio: {:.1f} %'.format(mean))
ax.fill_between(df_lower.index.values, mean + ci, mean - ci, color="#b9cfe7", edgecolor=None, alpha=0.6)
# y = df_lower['A2A1'].values
# x = df_lower.index.values
# p = fit_poly_model_xr(x, y, 1, return_just_p=True)
# fit_label = 'Linear Fit intercept: {:.2f} %'.format(p[1])
# p = fit_poly_model_xr(x, y, 1, plot='manual', ax=ax,
# return_just_p=False, color='k',
# fit_label=fit_label)
# arrange the legend a bit:
handles, labels = ax.get_legend_handles_labels()
h_stns = handles[1:4]
l_stns = labels[1:4]
h_fits = [handles[0] , handles[-1]]
l_fits = [labels[0], labels[-1]]
ax.legend(handles=h_fits+h_stns, labels=l_fits+l_stns, loc='upper left', prop={'size':fontsize-3})
ax.set_ylabel('PWV semi-annual to annual amplitude ratio [%]', fontsize=fontsize)
ax.set_xlabel('GNSS station height [m a.s.l]', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
ax.grid(True)
ax.set_yticks(np.arange(0, 100, 20))
fig.tight_layout()
if save:
filename = 'pwv_peak_amplitude_altitude.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def plot_peak_hour_distance(path=work_yuval, season='JJA',
remove_station='dsea', fontsize=22, save=True):
from PW_stations import produce_geo_gnss_solved_stations
from aux_gps import groupby_half_hour_xr
from aux_gps import xr_reindex_with_date_range
import xarray as xr
import pandas as pd
import seaborn as sns
import numpy as np
from sklearn.metrics import r2_score
pw = xr.open_dataset(path / 'GNSS_PW_thresh_50_for_diurnal_analysis.nc')
pw = pw[[x for x in pw if '_error' not in x]]
pw.load()
pw = pw.sel(time=pw['time.season'] == season)
pw = pw.map(xr_reindex_with_date_range)
df = groupby_half_hour_xr(pw)
halfs = [df.isel(half_hour=x)['half_hour'] for x in df.argmax().values()]
names = [x for x in df]
dfh = pd.DataFrame(halfs, index=names)
geo = produce_geo_gnss_solved_stations(
add_distance_to_coast=True, plot=False)
geo['phase'] = dfh
geo = geo.dropna()
groups = group_sites_to_xarray(upper=False, scope='diurnal')
geo.loc[groups.sel(group='coastal').values, 'group'] = 'coastal'
geo.loc[groups.sel(group='highland').values, 'group'] = 'highland'
geo.loc[groups.sel(group='eastern').values, 'group'] = 'eastern'
fig, ax = plt.subplots(figsize=(14, 10))
ax.grid()
if remove_station is not None:
removed = geo.loc[remove_station].to_frame().T
geo = geo.drop(remove_station, axis=0)
# lnall = sns.scatterplot(data=geo.loc[only], x='distance', y='phase', ax=ax, hue='group', s=100)
# geo['phase'] = pd.to_timedelta(geo['phase'], unit='H')
coast = geo[geo['group'] == 'coastal']
yerr = 1.0
lncoast = ax.errorbar(x=coast.loc[:,
'distance'],
y=coast.loc[:,
'phase'],
yerr=yerr,
marker='o',
ls='',
capsize=2.5,
elinewidth=2.5,
markeredgewidth=2.5,
color='b')
# lncoast = ax.scatter(coast.loc[:, 'distance'], coast.loc[:, 'phase'], color='b', s=50)
highland = geo[geo['group'] == 'highland']
# lnhighland = ax.scatter(highland.loc[:, 'distance'], highland.loc[:, 'phase'], color='brown', s=50)
lnhighland = ax.errorbar(x=highland.loc[:,
'distance'],
y=highland.loc[:,
'phase'],
yerr=yerr,
marker='o',
ls='',
capsize=2.5,
elinewidth=2.5,
markeredgewidth=2.5,
color='brown')
eastern = geo[geo['group'] == 'eastern']
# lneastern = ax.scatter(eastern.loc[:, 'distance'], eastern.loc[:, 'phase'], color='green', s=50)
lneastern = ax.errorbar(x=eastern.loc[:,
'distance'],
y=eastern.loc[:,
'phase'],
yerr=yerr,
marker='o',
ls='',
capsize=2.5,
elinewidth=2.5,
markeredgewidth=2.5,
color='green')
lnremove = ax.scatter(
removed.loc[:, 'distance'], removed.loc[:, 'phase'], marker='x', color='k', s=50)
ax.legend([lncoast,
lnhighland,
lneastern,
lnremove],
['Coastal stations',
'Highland stations',
'Eastern stations',
'DSEA station'],
fontsize=fontsize)
params = np.polyfit(geo['distance'].values, geo.phase.values, 1)
params2 = np.polyfit(geo['distance'].values, geo.phase.values, 2)
x = np.linspace(0, 210, 100)
y = np.polyval(params, x)
y2 = np.polyval(params2, x)
r2 = r2_score(geo.phase.values, np.polyval(params, geo['distance'].values))
ax.plot(x, y, color='k')
textstr = '\n'.join([r'R$^2$: {:.2f}'.format(r2)])
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
ax.text(0.5, 0.95, textstr, transform=ax.transAxes, fontsize=fontsize,
verticalalignment='top', bbox=props)
# ax.plot(x,y2, color='green')
ax.tick_params(axis='both', which='major', labelsize=16)
ax.set_xlabel('Distance from shore [km]', fontsize=fontsize)
ax.set_ylabel('Peak hour [UTC]', fontsize=fontsize)
# add sunrise UTC hour
ax.axhline(16.66, color='tab:orange', linewidth=2)
# change yticks to hours minuets:
fig.canvas.draw()
labels = [item.get_text() for item in ax.get_yticklabels()]
labels = [pd.to_timedelta(float(x), unit='H') for x in labels]
labels = ['{}:{}'.format(x.components[1], x.components[2])
if x.components[2] != 0 else '{}:00'.format(x.components[1]) for x in labels]
ax.set_yticklabels(labels)
fig.canvas.draw()
ax.tick_params(axis='both', which='major', labelsize=fontsize)
if save:
filename = 'pw_peak_distance_shore.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def plot_monthly_variability_heatmap_from_pwv_anomalies(load_path=work_yuval,
thresh=50, save=True,
fontsize=16,
sort_by=['groups_annual', 'alt']):
"""sort_by=['group_annual', 'lat'], ascending=[1,0]"""
import xarray as xr
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from calendar import month_abbr
from PW_stations import produce_geo_gnss_solved_stations
df = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=True)
sites = df.dropna()[['lat', 'alt', 'distance', 'groups_annual']].sort_values(
by=sort_by, ascending=[1, 1]).index
# anoms = xr.load_dataset(
# load_path /
# 'GNSS_PW_monthly_anoms_thresh_{:.0f}_homogenized.nc'.format(thresh))
anoms = xr.load_dataset(
load_path /
'GNSS_PW_monthly_anoms_thresh_{:.0f}.nc'.format(thresh))
df = anoms.groupby('time.month').std().to_dataframe()
# sites = group_sites_to_xarray(upper=True, scope='annual').T
# sites_flat = [x.lower() for x in sites.values.flatten() if isinstance(x, str)]
# df = df[sites_flat]
# cols = [x for x in sites if x in df.columns]
df = df[sites]
df.columns = [x.upper() for x in df.columns]
fig = plt.figure(figsize=(14, 10))
grid = plt.GridSpec(
2, 1, height_ratios=[
2, 1], hspace=0)
ax_heat = fig.add_subplot(grid[0, 0]) # plt.subplot(221)
ax_group = fig.add_subplot(grid[1, 0]) # plt.subplot(223)
cbar_ax = fig.add_axes([0.91, 0.37, 0.02, 0.62]) # [left, bottom, width,
# height]
ax_heat = sns.heatmap(
df.T,
cmap='Reds',
vmin=df.min().min(),
vmax=df.max().max(),
annot=True,
yticklabels=True,
ax=ax_heat,
cbar_ax=cbar_ax,
cbar_kws={'label': 'PWV anomalies STD [mm]'},
annot_kws={'fontsize': fontsize}, xticklabels=False)
cbar_ax.set_ylabel('PWV anomalies STD [mm]', fontsize=fontsize)
cbar_ax.tick_params(labelsize=fontsize)
# activate top ticks and tickslabales:
ax_heat.xaxis.set_tick_params(
bottom='off',
labelbottom='off',
labelsize=fontsize)
# emphasize the yticklabels (stations):
ax_heat.yaxis.set_tick_params(left='on')
ax_heat.set_yticklabels(ax_heat.get_ymajorticklabels(),
fontweight='bold', fontsize=fontsize)
df_mean = df.T.mean()
df_mean = df_mean.to_frame()
df_mean[1] = [month_abbr[x] for x in range(1, 13)]
df_mean.columns = ['std', 'month']
g = sns.barplot(data=df_mean, x='month', y='std', ax=ax_group, palette='Reds',
hue='std', dodge=False, linewidth=2.5)
g.legend_.remove()
ax_group.set_ylabel('PWV anomalies STD [mm]', fontsize=fontsize)
ax_group.grid(color='k', linestyle='--',
linewidth=1.5, alpha=0.5, axis='y')
ax_group.xaxis.set_tick_params(labelsize=fontsize)
ax_group.yaxis.set_tick_params(labelsize=fontsize)
ax_group.set_xlabel('', fontsize=fontsize)
# df.T.mean().plot(ax=ax_group, kind='bar', color='k', fontsize=fontsize, rot=0)
fig.tight_layout()
fig.subplots_adjust(right=0.906)
if save:
filename = 'pw_anoms_monthly_variability_heatmap.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def plot_monthly_means_anomalies_with_station_mean(load_path=work_yuval,
thresh=50, save=True,
anoms=None, agg='mean',
fontsize=16, units=None,
remove_stations=['nizn', 'spir'],
sort_by=['groups_annual', 'lat']):
import xarray as xr
import seaborn as sns
from palettable.scientific import diverging as divsci
import numpy as np
import matplotlib.dates as mdates
import pandas as pd
from aux_gps import anomalize_xr
from PW_stations import produce_geo_gnss_solved_stations
sns.set_style('whitegrid')
sns.set_style('ticks')
div_cmap = divsci.Vik_20.mpl_colormap
df = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=True)
sites = df.dropna()[['lat', 'alt', 'distance', 'groups_annual']].sort_values(
by=sort_by, ascending=[1, 0]).index
if anoms is None:
# anoms = xr.load_dataset(
# load_path /
# 'GNSS_PW_monthly_anoms_thresh_{:.0f}_homogenized.nc'.format(thresh))
anoms = xr.load_dataset(
load_path /
'GNSS_PW_monthly_thresh_{:.0f}.nc'.format(thresh))
anoms = anomalize_xr(anoms, 'MS', units=units)
if remove_stations is not None:
anoms = anoms[[x for x in anoms if x not in remove_stations]]
df = anoms.to_dataframe()[:'2019']
# sites = group_sites_to_xarray(upper=True, scope='annual').T
# sites_flat = [x.lower() for x in sites.values.flatten() if isinstance(x, str)]
# df = df[sites_flat]
cols = [x for x in sites if x in df.columns]
df = df[cols]
df.columns = [x.upper() for x in df.columns]
weights = df.count(axis=1).shift(periods=-1, freq='15D').astype(int)
fig = plt.figure(figsize=(20, 10))
grid = plt.GridSpec(
2, 1, height_ratios=[
2, 1], hspace=0.0225)
ax_heat = fig.add_subplot(grid[0, 0]) # plt.subplot(221)
ax_group = fig.add_subplot(grid[1, 0]) # plt.subplot(223)
cbar_ax = fig.add_axes([0.95, 0.43, 0.0125, 0.45]) # [left, bottom, width,
# height]
ax_heat = sns.heatmap(
df.T,
center=0.0,
cmap=div_cmap,
yticklabels=True,
ax=ax_heat,
cbar_ax=cbar_ax,
cbar_kws={'label': 'PWV anomalies [mm]'}, xticklabels=False)
cbar_ax.set_ylabel('PWV anomalies [mm]', fontsize=fontsize-4)
cbar_ax.tick_params(labelsize=fontsize)
# activate top ticks and tickslabales:
ax_heat.xaxis.set_tick_params(
bottom='off', labelbottom='off', labelsize=fontsize)
# emphasize the yticklabels (stations):
ax_heat.yaxis.set_tick_params(left='on')
ax_heat.set_yticklabels(ax_heat.get_ymajorticklabels(),
fontweight='bold', fontsize=fontsize)
ax_heat.set_xlabel('')
if agg == 'mean':
ts = df.T.mean().shift(periods=-1, freq='15D')
elif agg == 'median':
ts = df.T.median().shift(periods=-1, freq='15D')
ts.index.name = ''
# dt_as_int = [x for x in range(len(ts.index))]
# xticks_labels = ts.index.strftime('%Y-%m').values[::6]
# xticks = dt_as_int[::6]
# xticks = ts.index
# ts.index = dt_as_int
ts.plot(ax=ax_group, color='k', fontsize=fontsize, lw=2)
barax = ax_group.twinx()
barax.bar(ts.index, weights.values, width=35, color='k', alpha=0.2)
barax.yaxis.set_major_locator(ticker.MaxNLocator(6))
barax.set_ylabel('Stations [#]', fontsize=fontsize-4)
barax.tick_params(labelsize=fontsize)
ax_group.set_xlim(ts.index.min(), ts.index.max() +
pd.Timedelta(15, unit='D'))
ax_group.set_ylabel('PWV {} anomalies [mm]'.format(agg), fontsize=fontsize-4)
# set ticks and align with heatmap axis (move by 0.5):
# ax_group.set_xticks(dt_as_int)
# offset = 1
# ax_group.xaxis.set(ticks=np.arange(offset / 2.,
# max(dt_as_int) + 1 - min(dt_as_int),
# offset),
# ticklabels=dt_as_int)
# move the lines also by 0.5 to align with heatmap:
# lines = ax_group.lines # get the lines
# [x.set_xdata(x.get_xdata() - min(dt_as_int) + 0.5) for x in lines]
# ax_group.xaxis.set(ticks=xticks, ticklabels=xticks_labels)
# ax_group.xaxis.set(ticks=xticks)
years_fmt = mdates.DateFormatter('%Y')
ax_group.xaxis.set_major_locator(mdates.YearLocator())
ax_group.xaxis.set_major_formatter(years_fmt)
ax_group.xaxis.set_minor_locator(mdates.MonthLocator())
# ax_group.xaxis.tick_top()
# ax_group.xaxis.set_ticks_position('both')
# ax_group.tick_params(axis='x', labeltop='off', top='on',
# bottom='on', labelbottom='on')
ax_group.grid()
# ax_group.axvline('2015-09-15')
# ax_group.axhline(2.5)
# plt.setp(ax_group.xaxis.get_majorticklabels(), rotation=45 )
fig.tight_layout()
fig.subplots_adjust(right=0.946)
if save:
filename = 'pw_monthly_means_anomaly_heatmap.png'
plt.savefig(savefig_path / filename, bbox_inches='tight', pad_inches=0.1)
return ts
def plot_grp_anomlay_heatmap(load_path=work_yuval, gis_path=gis_path,
thresh=50, grp='hour', remove_grp=None, season=None,
n_clusters=4, save=True, title=False):
import xarray as xr
import seaborn as sns
import numpy as np
from PW_stations import group_anoms_and_cluster
from aux_gps import geo_annotate
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.colors import ListedColormap
from palettable.scientific import diverging as divsci
from PW_stations import produce_geo_gnss_solved_stations
div_cmap = divsci.Vik_20.mpl_colormap
dem_path = load_path / 'AW3D30'
def weighted_average(grp_df, weights_col='weights'):
return grp_df._get_numeric_data().multiply(
grp_df[weights_col], axis=0).sum() / grp_df[weights_col].sum()
df, labels_sorted, weights = group_anoms_and_cluster(
load_path=load_path, thresh=thresh, grp=grp, season=season,
n_clusters=n_clusters, remove_grp=remove_grp)
# create figure and subplots axes:
fig = plt.figure(figsize=(15, 10))
if title:
if season is not None:
fig.suptitle(
'Precipitable water {}ly anomalies analysis for {} season'.format(grp, season))
else:
fig.suptitle('Precipitable water {}ly anomalies analysis (Weighted KMeans {} clusters)'.format(
grp, n_clusters))
grid = plt.GridSpec(
2, 2, width_ratios=[
3, 2], height_ratios=[
4, 1], wspace=0.1, hspace=0)
ax_heat = fig.add_subplot(grid[0, 0]) # plt.subplot(221)
ax_group = fig.add_subplot(grid[1, 0]) # plt.subplot(223)
ax_map = fig.add_subplot(grid[0:, 1]) # plt.subplot(122)
# get the camp and zip it to groups and produce dictionary:
cmap = plt.get_cmap("Accent")
cmap = qualitative_cmap(n_clusters)
# cmap = plt.get_cmap("Set2_r")
# cmap = ListedColormap(cmap.colors[::-1])
groups = list(set(labels_sorted.values()))
palette = dict(zip(groups, [cmap(x) for x in range(len(groups))]))
label_cmap_dict = dict(zip(labels_sorted.keys(),
[palette[x] for x in labels_sorted.values()]))
cm = ListedColormap([x for x in palette.values()])
# plot heatmap and colorbar:
cbar_ax = fig.add_axes([0.57, 0.24, 0.01, 0.69]) # [left, bottom, width,
# height]
ax_heat = sns.heatmap(
df.T,
center=0.0,
cmap=div_cmap,
yticklabels=True,
ax=ax_heat,
cbar_ax=cbar_ax,
cbar_kws={'label': '[mm]'})
# activate top ticks and tickslabales:
ax_heat.xaxis.set_tick_params(top='on', labeltop='on')
# emphasize the yticklabels (stations):
ax_heat.yaxis.set_tick_params(left='on')
ax_heat.set_yticklabels(ax_heat.get_ymajorticklabels(),
fontweight='bold', fontsize=10)
# paint ytick labels with categorical cmap:
boxes = [dict(facecolor=x, boxstyle="square,pad=0.7", alpha=0.6)
for x in label_cmap_dict.values()]
ylabels = [x for x in ax_heat.yaxis.get_ticklabels()]
for label, box in zip(ylabels, boxes):
label.set_bbox(box)
# rotate xtick_labels:
# ax_heat.set_xticklabels(ax_heat.get_xticklabels(), rotation=0,
# fontsize=10)
# plot summed groups (with weights):
df_groups = df.T
df_groups['groups'] = pd.Series(labels_sorted)
df_groups['weights'] = weights
df_groups = df_groups.groupby('groups').apply(weighted_average)
df_groups.drop(['groups', 'weights'], axis=1, inplace=True)
df_groups.T.plot(ax=ax_group, linewidth=2.0, legend=False, cmap=cm)
if grp == 'hour':
ax_group.set_xlabel('hour (UTC)')
ax_group.grid()
group_limit = ax_heat.get_xlim()
ax_group.set_xlim(group_limit)
ax_group.set_ylabel('[mm]')
# set ticks and align with heatmap axis (move by 0.5):
ax_group.set_xticks(df.index.values)
offset = 1
ax_group.xaxis.set(ticks=np.arange(offset / 2.,
max(df.index.values) + 1 -
min(df.index.values),
offset),
ticklabels=df.index.values)
# move the lines also by 0.5 to align with heatmap:
lines = ax_group.lines # get the lines
[x.set_xdata(x.get_xdata() - min(df.index.values) + 0.5) for x in lines]
# plot israel map:
ax_map = plot_israel_map(gis_path=gis_path, ax=ax_map)
# overlay with dem data:
cmap = plt.get_cmap('terrain', 41)
dem = xr.open_dataarray(dem_path / 'israel_dem_250_500.nc')
# dem = xr.open_dataarray(dem_path / 'israel_dem_500_1000.nc')
im = dem.plot.imshow(ax=ax_map, alpha=0.5, cmap=cmap,
vmin=dem.min(), vmax=dem.max(), add_colorbar=False)
cbar_kwargs = {'fraction': 0.1, 'aspect': 50, 'pad': 0.03}
cb = fig.colorbar(im, ax=ax_map, **cbar_kwargs)
# cb = plt.colorbar(fg, **cbar_kwargs)
cb.set_label(label='meters above sea level', size=8, weight='normal')
cb.ax.tick_params(labelsize=8)
ax_map.set_xlabel('')
ax_map.set_ylabel('')
print('getting solved GNSS israeli stations metadata...')
gps = produce_geo_gnss_solved_stations(path=gis_path, plot=False)
gps.index = gps.index.str.upper()
gps = gps.loc[[x for x in df.columns], :]
gps['group'] = pd.Series(labels_sorted)
gps.plot(ax=ax_map, column='group', categorical=True, marker='o',
edgecolor='black', cmap=cm, s=100, legend=True, alpha=1.0,
legend_kwds={'prop': {'size': 10}, 'fontsize': 14,
'loc': 'upper left', 'title': 'clusters'})
# ax_map.set_title('Groupings of {}ly anomalies'.format(grp))
# annotate station names in map:
geo_annotate(ax_map, gps.lon, gps.lat,
gps.index, xytext=(6, 6), fmt=None,
c='k', fw='bold', fs=10, colorupdown=False)
# plt.legend(['IMS stations', 'GNSS stations'],
# prop={'size': 10}, bbox_to_anchor=(-0.15, 1.0),
# title='Stations')
# plt.legend(prop={'size': 10}, loc='upper left')
# plt.tight_layout()
plt.subplots_adjust(top=0.92,
bottom=0.065,
left=0.065,
right=0.915,
hspace=0.19,
wspace=0.215)
filename = 'pw_{}ly_anoms_{}_clusters_with_map.png'.format(grp, n_clusters)
if save:
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='landscape')
return df
def plot_lomb_scargle(path=work_yuval, save=True):
from aux_gps import lomb_scargle_xr
import xarray as xr
pw_mm = xr.load_dataset(path / 'GNSS_PW_monthly_thresh_50_homogenized.nc')
pw_mm_median = pw_mm.to_array('station').median('station')
da = lomb_scargle_xr(
pw_mm_median.dropna('time'),
user_freq='MS',
kwargs={
'nyquist_factor': 1,
'samples_per_peak': 100})
plt.ylabel('')
plt.title('Lomb–Scargle periodogram')
plt.xlim([0, 4])
plt.grid()
filename = 'Lomb_scargle_monthly_means.png'
if save:
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='landscape')
return da
def plot_vertical_climatology_months(path=sound_path, field='Rho_wv',
center_month=7):
from aux_gps import path_glob
import xarray as xr
ds = xr.open_dataset(
path /
'bet_dagan_phys_sounding_height_2007-2019.nc')[field]
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
day = ds.sel(sound_time=ds['sound_time.hour'] == 12).groupby(
'sound_time.month').mean('sound_time')
night = ds.sel(sound_time=ds['sound_time.hour'] == 00).groupby(
'sound_time.month').mean('sound_time')
next_month = center_month + 1
last_month = center_month - 1
day = day.sel(month=[last_month, center_month, next_month])
night = night.sel(month=[last_month, center_month, next_month])
for month in day.month:
h = day.sel(month=month)['H-Msl'].values
rh = day.sel(month=month).values
ax[0].semilogy(rh, h)
ax[0].set_title('noon')
ax[0].set_ylabel('height [m]')
ax[0].set_xlabel('{}, [{}]'.format(field, day.attrs['units']))
plt.legend([x for x in ax.lines], [x for x in day.month.values])
for month in night.month:
h = night.sel(month=month)['H-Msl'].values
rh = night.sel(month=month).values
ax[1].semilogy(rh, h)
ax[1].set_title('midnight')
ax[1].set_ylabel('height [m]')
ax[1].set_xlabel('{}, [{}]'.format(field, night.attrs['units']))
plt.legend([x for x in ax.lines], [x for x in night.month.values])
return day, night
def plot_global_warming_with_pwv_annual(climate_path=climate_path, work_path=work_yuval, fontsize=16):
import pandas as pd
import xarray as xr
import numpy as np
from aux_gps import anomalize_xr
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style('whitegrid')
sns.set_style('ticks')
df = pd.read_csv(climate_path/'GLB.Ts+dSST_2007.csv',
header=1, na_values='*******')
df = df.iloc[:19, :13]
df = df.melt(id_vars='Year')
df['time'] = pd.to_datetime(df['Year'].astype(
str)+'-'+df['variable'].astype(str))
df = df.set_index('time')
df = df.drop(['Year', 'variable'], axis=1)
df.columns = ['T']
df['T'] = pd.to_numeric(df['T'])
df = df.sort_index()
df.columns = ['AIRS-ST-Global']
# df = df.loc['2003':'2019']
# df = df.resample('AS').mean()
dss = xr.open_dataset(climate_path/'AIRS.2002-2021.L3.RetStd_IR031.v7.0.3.0.nc')
dss = dss.sel(time=slice('2003','2019'), Longitude=slice(34,36), Latitude=slice(34,29))
ds = xr.concat([dss['SurfAirTemp_A'], dss['SurfAirTemp_D']], 'dn')
ds['dn'] = ['day', 'night']
ds = ds.mean('dn')
ds -= ds.sel(time=slice('2007','2016')).mean('time')
anoms = anomalize_xr(ds, 'MS')
anoms = anoms.mean('Latitude').mean('Longitude')
df['AIRS-ST-Regional'] = anoms.to_dataframe('AIRS-ST-Regional')
# else:
# df = pd.read_csv(climate_path/'GLB.Ts+dSST.csv',
# header=1, na_values='***')
# df = df.iloc[:, :13]
# df = df.melt(id_vars='Year')
# df['time'] = pd.to_datetime(df['Year'].astype(
# str)+'-'+df['variable'].astype(str))
# df = df.set_index('time')
# df = df.drop(['Year', 'variable'], axis=1)
# df.columns = ['T']
# # df = df.resample('AS').mean()
# df = df.sort_index()
pw = xr.load_dataset(work_path/'GNSS_PW_monthly_anoms_thresh_50.nc')
# pw_2007_2016_mean = pw.sel(time=slice('2007','2016')).mean()
# pw -= pw_2007_2016_mean
pw = pw.to_array('s').mean('s')
pw_df = pw.to_dataframe('PWV')
# df['pwv'] = pw_df.resample('AS').mean()
df['PWV'] = pw_df
df = df.loc['2003': '2019']
df = df.resample('AS').mean()
fig, ax = plt.subplots(figsize=(15, 6))
ax = df.plot(kind='bar', secondary_y='PWV',
color=['tab:red', 'tab:orange', 'tab:blue'],
ax=ax, legend=False, rot=45)
twin = get_twin(ax, 'x')
align_yaxis_np(ax, twin)
# twin.set_yticks([-0.5, 0, 0.5, 1.0, 1.5])
# locator = ticker.MaxNLocator(6)
# ax.yaxis.set_major_locator(locator)
twin.yaxis.set_major_locator(ticker.MaxNLocator(6))
twin.set_ylabel('PWV anomalies [mm]', fontsize=fontsize)
ax.set_ylabel(r'Surface Temperature anomalies [$\degree$C]', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
twin.tick_params(labelsize=fontsize)
ax.set_xticklabels(np.arange(2003, 2020))
ax.grid(True)
# add legend:
handles, labels = [], []
for h, l in zip(*ax.get_legend_handles_labels()):
handles.append(h)
labels.append(l)
for h, l in zip(*twin.get_legend_handles_labels()):
handles.append(h)
labels.append(l)
ax.legend(handles, labels, prop={'size': fontsize-2}, loc='upper left')
ax.set_xlabel('')
fig.tight_layout()
return df
def plot_SST_med(sst_path=work_yuval/'SST', fontsize=16, loop=True):
import xarray as xr
import seaborn as sns
from aux_gps import lat_mean
import numpy as np
def clim_mean(med_sst):
sst = med_sst - 273.15
mean_sst = sst.mean('lon')
mean_sst = lat_mean(mean_sst)
mean_sst = mean_sst.groupby('time.dayofyear').mean()
return mean_sst
sns.set_style('whitegrid')
sns.set_style('ticks')
ds = xr.open_dataset(
sst_path/'med1-1981_2020-NCEI-L4_GHRSST-SSTblend-AVHRR_OI-GLOB-v02.0-fv02.0.nc')
sst = ds['analysed_sst'].sel(time=slice('1997', '2019')).load()
whole_med_lon = [-5, 37]
whole_med_lat = [30, 40]
sst_w = sst.copy().sel(lat=slice(*whole_med_lat), lon=slice(*whole_med_lon))
sst_clim_w = clim_mean(sst_w)
df = sst_clim_w.to_dataframe('SST_whole_Med')
# now for emed:
for i, min_lon in enumerate(np.arange(23, 34, 1)):
e_med_lon = [min_lon, 37]
e_med_lat = [30, 40]
sst_e = sst.copy().sel(lat=slice(*e_med_lat), lon=slice(*e_med_lon))
sst_clim_e = clim_mean(sst_e)
df['SST_EMed_{}'.format(min_lon)] = sst_clim_e.to_dataframe()
# df['SST_EMed'] = sst_clim_e.to_dataframe()
if loop:
ax = df.idxmax().plot(kind='barh')
ax.set_xticks(np.linspace(0, 365, 13)[:-1])
ax.set_xticklabels(np.arange(1, 13))
ax.grid(True)
ax.set_xlabel('month')
else:
ax = df.plot(lw=2, legend=True)
ax.set_xticks(np.linspace(0, 365, 13)[:-1])
ax.set_xticklabels(np.arange(1, 13))
ax.grid(True)
ax.tick_params(labelsize=fontsize)
ax.set_ylabel(r'Temperature [$^{\circ}$C]', fontsize=fontsize)
ax.set_xlabel('month')
return df
def plot_SST_med_with_PWV_S1_panel(path=work_yuval,
sst_path=work_yuval/'SST',
ims_path=ims_path,
stations=['tela', 'jslm'], fontsize=16, save=True):
from ims_procedures import gnss_ims_dict
import matplotlib.pyplot as plt
ims_stations = [gnss_ims_dict.get(x) for x in stations]
fig, axes = plt.subplots(1, len(stations), figsize=(15, 6))
for i, (pwv, ims) in enumerate(zip(stations, ims_stations)):
plot_SST_med_with_PWV_first_annual_harmonic(path=work_yuval,
sst_path=sst_path,
ims_path=ims_path,
station=pwv, ims_station=ims,
fontsize=16, ax=axes[i],
save=False)
twin = get_twin(axes[i], 'x')
twin.set_ylim(-4.5, 4.5)
axes[i].set_ylim(8, 30)
fig.tight_layout()
if save:
filename = 'Med_SST_surface_temp_PWV_harmonic_annual_{}_{}.png'.format(
*stations)
plt.savefig(savefig_path / filename, orientation='portrait')
return
def plot_SST_med_with_PWV_first_annual_harmonic(path=work_yuval,
sst_path=work_yuval/'SST',
ims_path=ims_path,
station='tela', ims_station='TEL-AVIV-COAST',
fontsize=16, ax=None,
save=True):
import xarray as xr
from aux_gps import month_to_doy_dict
import pandas as pd
import numpy as np
from aux_gps import lat_mean
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
sns.set_style('ticks')
# load harmonics:
ds = xr.load_dataset(path/'GNSS_PW_harmonics_annual.nc')
# stns = group_sites_to_xarray(scope='annual').sel(group='coastal').values
# harms = []
# for stn in stns:
# da = ds['{}_mean'.format(stn)].sel(cpy=1)
# harms.append(da)
# harm_da = xr.concat(harms, 'station')
# harm_da['station'] = stns
harm_da = ds['{}_mean'.format(station)].sel(cpy=1).reset_coords(drop=True)
# harm_da = harm_da.reset_coords(drop=True)
harm_da['month'] = [month_to_doy_dict.get(
x) for x in harm_da['month'].values]
harm_da = harm_da.rename({'month': 'dayofyear'})
# df = harm_da.to_dataset('station').to_dataframe()
df = harm_da.to_dataframe(station)
# load surface temperature data:
# da = xr.open_dataset(ims_path/'GNSS_5mins_TD_ALL_1996_2020.nc')[station]
da = xr.open_dataset(ims_path / 'IMS_TD_israeli_10mins.nc')[ims_station]
da.load()
print(da.groupby('time.year').count())
# da += 273.15
da_mean = da.groupby('time.dayofyear').mean()
df['{}_ST'.format(station)] = da_mean.to_dataframe()
# add 366 dayofyear for visualization:
df366 = pd.DataFrame(df.iloc[0].values+0.01).T
df366.index = [366]
df366.columns = df.columns
df = df.append(df366)
ind = np.arange(1, 367)
df = df.reindex(ind)
df = df.interpolate('cubic')
# now load sst for MED
ds = xr.open_dataset(
sst_path/'med1-1981_2020-NCEI-L4_GHRSST-SSTblend-AVHRR_OI-GLOB-v02.0-fv02.0.nc')
sst = ds['analysed_sst'].sel(time=slice('1997', '2019')).load()
# sst_mean = sst.sel(lon=slice(25,35)).mean('lon')
sst -= 273.15
sst_mean = sst.mean('lon')
sst_mean = lat_mean(sst_mean)
sst_clim = sst_mean.groupby('time.dayofyear').mean()
df['Med-SST'] = sst_clim.to_dataframe()
pwv_name = '{} PWV-S1'.format(station.upper())
ims_name = '{} IMS-ST'.format(station.upper())
df.columns = [pwv_name, ims_name, 'Med-SST']
if ax is None:
fig, ax = plt.subplots(figsize=(8, 6))
# first plot temp:
df[[ims_name, 'Med-SST']].plot(ax=ax, color=['tab:red', 'tab:blue'],
style=['-', '-'], lw=2, legend=False)
ax.set_xticks(np.linspace(0, 365, 13)[:-1])
ax.set_xticklabels(np.arange(1, 13))
ax.grid(True)
ax.tick_params(labelsize=fontsize)
ax.set_ylabel(r'Temperature [$^{\circ}$C]', fontsize=fontsize)
vl = df[[ims_name, 'Med-SST']].idxmax().to_frame('x')
vl['colors'] = ['tab:red', 'tab:blue']
vl['ymin'] = df[[ims_name, 'Med-SST']].min()
vl['ymax'] = df[[ims_name, 'Med-SST']].max()
print(vl)
ax.vlines(x=vl['x'], ymin=vl['ymin'], ymax=vl['ymax'],
colors=vl['colors'], zorder=0)
ax.plot(vl.iloc[0]['x'], vl.iloc[0]['ymax'], color=vl.iloc[0]['colors'],
linewidth=0, marker='o', zorder=15)
ax.plot(vl.iloc[1]['x'], vl.iloc[1]['ymax'], color=vl.iloc[1]['colors'],
linewidth=0, marker='o', zorder=15)
# ax.annotate(text='', xy=(213,15), xytext=(235,15), arrowprops=dict(arrowstyle='<->'), color='k')
# ax.arrow(213, 15, dx=21, dy=0, shape='full', color='k', width=0.25)
#p1 = patches.FancyArrowPatch((213, 15), (235, 15), arrowstyle='<->', mutation_scale=20)
# ax.arrow(217, 15, 16, 0, head_width=0.14, head_length=2,
# linewidth=2, color='k', length_includes_head=True)
# ax.arrow(231, 15, -16, 0, head_width=0.14, head_length=2,
# linewidth=2, color='k', length_includes_head=True)
start = vl.iloc[0]['x'] + 4
end = vl.iloc[1]['x'] - 4
mid = vl['x'].mean()
dy = vl.iloc[1]['x'] - vl.iloc[0]['x'] - 8
days = dy + 8
ax.arrow(start, 15, dy, 0, head_width=0.14, head_length=2,
linewidth=1.5, color='k', length_includes_head=True, zorder=20)
ax.arrow(end, 15, -dy, 0, head_width=0.14, head_length=2,
linewidth=1.5, color='k', length_includes_head=True, zorder=20)
t = ax.text(
mid, 15.8, "{} days".format(days), ha="center", va="center", rotation=0, size=12,
bbox=dict(boxstyle="round4,pad=0.15", fc="white", ec="k", lw=1), zorder=21)
twin = ax.twinx()
df[pwv_name].plot(ax=twin, color='tab:cyan', style='--', lw=2, zorder=0)
twin.set_ylabel('PWV annual anomalies [mm]', fontsize=fontsize)
ax.set_xlabel('month', fontsize=fontsize)
locator = ticker.MaxNLocator(7)
ax.yaxis.set_major_locator(locator)
twin.yaxis.set_major_locator(ticker.MaxNLocator(7))
# add legend:
handles, labels = [], []
for h, l in zip(*ax.get_legend_handles_labels()):
handles.append(h)
labels.append(l)
for h, l in zip(*twin.get_legend_handles_labels()):
handles.append(h)
labels.append(l)
ax.legend(handles, labels, prop={'size': fontsize-2}, loc='upper left')
# ax.right_ax.set_yticks(np.linspace(ax.right_ax.get_yticks()[0], ax.right_ax.get_yticks()[-1], 7))
twin.vlines(x=df[pwv_name].idxmax(), ymin=df[pwv_name].min(),
ymax=df[pwv_name].max(), colors=['tab:cyan'], ls=['--'], zorder=0)
twin.tick_params(labelsize=fontsize)
# plot points:
twin.plot(df[pwv_name].idxmax(), df[pwv_name].max(),
color='tab:cyan', linewidth=0, marker='o')
# fig.tight_layout()
if save:
filename = 'Med_SST_surface_temp_PWV_harmonic_annual_{}.png'.format(
station)
plt.savefig(savefig_path / filename, orientation='portrait')
return df
def plot_pw_lapse_rate_fit(path=work_yuval, model='TSEN', plot=True):
from PW_stations import produce_geo_gnss_solved_stations
import xarray as xr
from PW_stations import ML_Switcher
import pandas as pd
import matplotlib.pyplot as plt
pw = xr.load_dataset(path / 'GNSS_PW_thresh_50.nc')
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
df_gnss = produce_geo_gnss_solved_stations(plot=False)
df_gnss = df_gnss.loc[[x for x in pw.data_vars], :]
alt = df_gnss['alt'].values
# add mean to anomalies:
pw_new = pw.resample(time='MS').mean()
pw_mean = pw_new.mean('time')
# compute std:
# pw_std = pw_new.std('time')
pw_std = (pw_new.groupby('time.month') -
pw_new.groupby('time.month').mean('time')).std('time')
pw_vals = pw_mean.to_array().to_dataframe(name='pw')
pw_vals = pd.Series(pw_vals.squeeze()).values
pw_std_vals = pw_std.to_array().to_dataframe(name='pw')
pw_std_vals = pd.Series(pw_std_vals.squeeze()).values
ml = ML_Switcher()
fit_model = ml.pick_model(model)
y = pw_vals
X = alt.reshape(-1, 1)
fit_model.fit(X, y)
predict = fit_model.predict(X)
coef = fit_model.coef_[0]
inter = fit_model.intercept_
pw_lapse_rate = abs(coef)*1000
if plot:
fig, ax = plt.subplots(1, 1, figsize=(16, 4))
ax.errorbar(x=alt, y=pw_vals, yerr=pw_std_vals,
marker='.', ls='', capsize=1.5, elinewidth=1.5,
markeredgewidth=1.5, color='k')
ax.grid()
ax.plot(X, predict, c='r')
ax.set_xlabel('meters a.s.l')
ax.set_ylabel('Precipitable Water [mm]')
ax.legend(['{} ({:.2f} [mm/km], {:.2f} [mm])'.format(model,
pw_lapse_rate, inter)])
return df_gnss['alt'], pw_lapse_rate
def plot_time_series_as_barplot(ts, anoms=False, ts_ontop=None):
# plt.style.use('fast')
time_dim = list(set(ts.dims))[0]
fig, ax = plt.subplots(figsize=(20, 6), dpi=150)
import matplotlib.dates as mdates
import matplotlib.ticker
from matplotlib.ticker import (AutoMinorLocator, MultipleLocator)
import pandas as pd
if not anoms:
# sns.barplot(x=ts[time_dim].values, y=ts.values, ax=ax, linewidth=5)
ax.bar(ts[time_dim].values, ts.values, linewidth=5, width=0.0,
facecolor='black', edgecolor='black')
# Series.plot.bar(ax=ax, linewidth=0, width=1)
else:
warm = 'tab:orange'
cold = 'tab:blue'
positive = ts.where(ts > 0).dropna(time_dim)
negative = ts.where(ts < 0).dropna(time_dim)
ax.bar(
positive[time_dim].values,
positive.values,
linewidth=3.0,
width=1.0,
facecolor=warm, edgecolor=warm, alpha=1.0)
ax.bar(
negative[time_dim].values,
negative.values,
width=1.0,
linewidth=3.0,
facecolor=cold, edgecolor=cold, alpha=1.0)
if ts_ontop is not None:
ax_twin = ax.twinx()
color = 'red'
ts_ontop.plot.line(color=color, linewidth=2.0, ax=ax_twin)
# we already handled the x-label with ax1
ax_twin.set_ylabel('PW [mm]', color=color)
ax_twin.tick_params(axis='y', labelcolor=color)
ax_twin.legend(['3-month running mean of PW anomalies'])
title_add = ' and the median Precipitable Water anomalies from Israeli GNSS sites'
l2 = ax_twin.get_ylim()
ax.set_ylim(l2)
else:
title_add = ''
ax.grid(None)
ax.set_xlim([pd.to_datetime('1996'), pd.to_datetime('2020')])
ax.set_title('Multivariate ENSO Index Version 2 {}'.format(title_add))
ax.set_ylabel('MEI.v2')
# ax.xaxis.set_major_locator(MultipleLocator(20))
# Change minor ticks to show every 5. (20/4 = 5)
# ax.xaxis.set_minor_locator(AutoMinorLocator(4))
years_fmt = mdates.DateFormatter('%Y')
# ax.figure.autofmt_xdate()
ax.xaxis.set_major_locator(mdates.YearLocator(2))
ax.xaxis.set_minor_locator(mdates.YearLocator(1))
ax.xaxis.set_major_formatter(years_fmt)
# ax.xaxis.set_minor_locator(mdates.MonthLocator())
ax.figure.autofmt_xdate()
# plt.tick_params(
# axis='x', # changes apply to the x-axis
# which='both', # both major and minor ticks are affected
# bottom=True, # ticks along the bottom edge are off
# top=False, # ticks along the top edge are off
# labelbottom=True)
# fig.tight_layout()
plt.show()
return
def plot_tide_pw_lags(path=hydro_path, pw_anom=False, rolling='1H', save=True):
from aux_gps import path_glob
import xarray as xr
import numpy as np
file = path_glob(path, 'PW_tide_sites_*.nc')[-1]
if pw_anom:
file = path_glob(path, 'PW_tide_sites_anom_*.nc')[-1]
ds = xr.load_dataset(file)
names = [x for x in ds.data_vars]
fig, ax = plt.subplots(figsize=(8, 6))
for name in names:
da = ds.mean('station').mean('tide_start')[name]
ser = da.to_series()
if rolling is not None:
ser = ser.rolling(rolling).mean()
time = (ser.index / np.timedelta64(1, 'D')).astype(float)
# ser = ser.loc[pd.Timedelta(-2.2,unit='D'):pd.Timedelta(1, unit='D')]
ser.index = time
ser.plot(marker='.', linewidth=0., ax=ax)
ax.set_xlabel('Days around tide event')
if pw_anom:
ax.set_ylabel('PWV anomalies [mm]')
else:
ax.set_ylabel('PWV [mm]')
hstations = [ds[x].attrs['hydro_stations'] for x in ds.data_vars]
events = [ds[x].attrs['total_events'] for x in ds.data_vars]
fmt = list(zip(names, hstations, events))
ax.legend(['{} with {} stations ({} total events)'.format(x.upper(), y, z)
for x, y, z in fmt])
ax.set_xlim([-3, 1])
ax.axvline(0, color='k', linestyle='--')
ax.grid()
filename = 'pw_tide_sites.png'
if pw_anom:
filename = 'pw_tide_sites_anom.png'
if save:
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='landscape')
# ax.xaxis.set_major_locator(mdates.HourLocator(interval=24)) # tick every two hours
# ax.xaxis.set_major_formatter(mdates.DateFormatter('%H'))
# locator = mdates.AutoDateLocator(minticks=3, maxticks=7)
# formatter = mdates.ConciseDateFormatter(locator)
# ax.xaxis.set_major_locator(locator)
# ax.xaxis.set_major_formatter(formatter)
# title = 'Mean PW for tide stations near all GNSS stations'
# ax.set_title(title)
return
def plot_profiler(path=work_yuval, ceil_path=ceil_path, title=False,
field='maxsnr', save=True):
import xarray as xr
from ceilometers import read_coastal_BL_levi_2011
from aux_gps import groupby_half_hour_xr
from calendar import month_abbr
df = read_coastal_BL_levi_2011(path=ceil_path)
ds = df.to_xarray()
pw = xr.open_dataset(path / 'GNSS_PW_thresh_50_for_diurnal_analysis.nc')
pw = pw['csar']
pw.load()
pw = pw.sel(time=pw['time.month'] == 7).dropna('time')
pw_size = pw.dropna('time').size
pwyears = [pw.time.dt.year.min().item(), pw.time.dt.year.max().item()]
pw_std = groupby_half_hour_xr(pw, reduce='std')['csar']
pw_hour = groupby_half_hour_xr(pw, reduce='mean')['csar']
pw_hour_plus = (pw_hour + pw_std).values
pw_hour_minus = (pw_hour - pw_std).values
if field == 'maxsnr':
mlh_hour = ds['maxsnr']
mlh_std = ds['std_maxsnr']
label = 'Max SNR'
elif field == 'tv_inversion':
mlh_hour = ds['tv_inversion']
mlh_std = ds['std_tv200']
label = 'Tv inversion'
mlh_hour_minus = (mlh_hour - mlh_std).values
mlh_hour_plus = (mlh_hour + mlh_std).values
half_hours = pw_hour.half_hour.values
fig, ax = plt.subplots(figsize=(10, 8))
red = 'tab:red'
blue = 'tab:blue'
pwln = pw_hour.plot(color=blue, marker='s', ax=ax)
ax.fill_between(half_hours, pw_hour_minus,
pw_hour_plus, color=blue, alpha=0.5)
twin = ax.twinx()
mlhln = mlh_hour.plot(color=red, marker='o', ax=twin)
twin.fill_between(half_hours, mlh_hour_minus,
mlh_hour_plus, color=red, alpha=0.5)
pw_label = 'PW: {}-{}, {} ({} pts)'.format(
pwyears[0], pwyears[1], month_abbr[7], pw_size)
mlh_label = 'MLH: {}-{}, {} ({} pts)'.format(1997, 1999, month_abbr[7], 90)
# if month is not None:
# pwmln = pw_m_hour.plot(color='tab:orange', marker='^', ax=ax)
# pwm_label = 'PW: {}-{}, {} ({} pts)'.format(pw_years[0], pw_years[1], month_abbr[month], pw_month.dropna('time').size)
# ax.legend(pwln + mlhln + pwmln, [pw_label, mlh_label, pwm_label], loc=leg_loc)
# else:
ax.legend([pwln[0], mlhln[0]], [pw_label, mlh_label], loc='best')
# plt.legend([pw_label, mlh_label])
ax.tick_params(axis='y', colors=blue)
twin.tick_params(axis='y', colors=red)
ax.set_ylabel('PW [mm]', color=blue)
twin.set_ylabel('MLH [m]', color=red)
twin.set_ylim(400, 1250)
ax.set_xticks([x for x in range(24)])
ax.set_xlabel('Hour of day [UTC]')
ax.grid()
mlh_name = 'Hadera'
textstr = '{}, {}'.format(mlh_name, pw.name.upper())
props = dict(boxstyle='round', facecolor='white', alpha=0.5)
ax.text(0.05, 0.95, textstr, transform=ax.transAxes, fontsize=10,
verticalalignment='top', bbox=props)
if title:
ax.set_title('The diurnal cycle of {} Mixing Layer Height ({}) and {} GNSS site PW'.format(
mlh_name, label, pw.name.upper()))
fig.tight_layout()
if save:
filename = 'PW_diurnal_with_MLH_csar_{}.png'.format(field)
plt.savefig(savefig_path / filename, orientation='landscape')
return ax
def plot_ceilometers(path=work_yuval, ceil_path=ceil_path, interpolate='6H',
fontsize=14, save=True):
import xarray as xr
from ceilometers import twin_hourly_mean_plot
from ceilometers import read_all_ceilometer_stations
import numpy as np
pw = xr.open_dataset(path / 'GNSS_PW_thresh_50_for_diurnal_analysis.nc')
pw = pw[['tela', 'jslm', 'yrcm', 'nzrt', 'klhv', 'csar']]
pw.load()
ds = read_all_ceilometer_stations(path=ceil_path)
if interpolate is not None:
attrs = [x.attrs for x in ds.data_vars.values()]
ds = ds.interpolate_na('time', max_gap=interpolate, method='cubic')
for i, da in enumerate(ds):
ds[da].attrs.update(attrs[i])
fig, axes = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(15, 6))
couples = [['tela', 'TLV'], ['jslm', 'JR']]
twins = []
for i, ax in enumerate(axes.flatten()):
ax, twin = twin_hourly_mean_plot(pw[couples[i][0]],
ds[couples[i][1]],
month=None,
ax=ax,
title=False,
leg_loc='best', fontsize=fontsize)
twins.append(twin)
ax.xaxis.set_ticks(np.arange(0, 23, 3))
ax.grid()
twin_ylim_min = min(min([x.get_ylim() for x in twins]))
twin_ylim_max = max(max([x.get_ylim() for x in twins]))
for twin in twins:
twin.set_ylim(twin_ylim_min, twin_ylim_max)
fig.tight_layout()
filename = 'PW_diurnal_with_MLH_tela_jslm.png'
if save:
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='landscape')
return fig
def plot_field_with_fill_between(da, dim='hour', mean_dim=None, ax=None,
color='b', marker='s'):
if dim not in da.dims:
raise KeyError('{} not in {}'.format(dim, da.name))
if mean_dim is None:
mean_dim = [x for x in da.dims if dim not in x][0]
da_mean = da.mean(mean_dim)
da_std = da.std(mean_dim)
da_minus = da_mean - da_std
da_plus = da_mean + da_std
if ax is None:
fig, ax = plt.subplots(figsize=(8, 6))
line = da_mean.plot(color=color, marker=marker, ax=ax)
ax.fill_between(da_mean[dim], da_minus, da_plus, color=color, alpha=0.5)
return line
def plot_mean_with_fill_between_std(da, grp='hour', mean_dim='time', ax=None,
color='b', marker='s', alpha=0.5):
da_mean = da.groupby('{}.{}'.format(mean_dim, grp)
).mean('{}'.format(mean_dim))
da_std = da.groupby('{}.{}'.format(mean_dim, grp)
).std('{}'.format(mean_dim))
da_minus = da_mean - da_std
da_plus = da_mean + da_std
if ax is None:
fig, ax = plt.subplots(figsize=(8, 6))
line = da_mean.plot(color=color, marker=marker, ax=ax)
ax.fill_between(da_mean[grp], da_minus, da_plus, color=color, alpha=alpha)
return line
def plot_hist_with_seasons(da_ts):
import seaborn as sns
fig, ax = plt.subplots(figsize=(10, 7))
sns.kdeplot(da_ts.dropna('time'), ax=ax, color='k')
sns.kdeplot(
da_ts.sel(
time=da_ts['time.season'] == 'DJF').dropna('time'),
legend=False,
ax=ax,
shade=True)
sns.kdeplot(
da_ts.sel(
time=da_ts['time.season'] == 'MAM').dropna('time'),
legend=False,
ax=ax,
shade=True)
sns.kdeplot(
da_ts.sel(
time=da_ts['time.season'] == 'JJA').dropna('time'),
legend=False,
ax=ax,
shade=True)
sns.kdeplot(
da_ts.sel(
time=da_ts['time.season'] == 'SON').dropna('time'),
legend=False,
ax=ax,
shade=True)
plt.legend(['ALL', 'MAM', 'DJF', 'SON', 'JJA'])
return
def plot_diurnal_pw_all_seasons(path=work_yuval, season='ALL', synoptic=None,
fontsize=20, labelsize=18,
ylim=[-2.7, 3.3], save=True, dss=None):
import xarray as xr
from synoptic_procedures import slice_xr_with_synoptic_class
if dss is None:
gnss_filename = 'GNSS_PW_thresh_50_for_diurnal_analysis_removed_daily.nc'
pw = xr.load_dataset(path / gnss_filename)
else:
pw = dss
df_annual = pw.groupby('time.hour').mean().to_dataframe()
if season is None and synoptic is None:
# plot annual diurnal cycle only:
fg = plot_pw_geographical_segments(df_annual, fg=None, marker='o', color='b',
ylim=ylim)
legend = ['Annual']
elif season == 'ALL' and synoptic is None:
df_jja = pw.sel(time=pw['time.season'] == 'JJA').groupby(
'time.hour').mean().to_dataframe()
df_son = pw.sel(time=pw['time.season'] == 'SON').groupby(
'time.hour').mean().to_dataframe()
df_djf = pw.sel(time=pw['time.season'] == 'DJF').groupby(
'time.hour').mean().to_dataframe()
df_mam = pw.sel(time=pw['time.season'] == 'MAM').groupby(
'time.hour').mean().to_dataframe()
fg = plot_pw_geographical_segments(
df_jja,
fg=None,
marker='s',
color='tab:green',
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=0, label='JJA')
fg = plot_pw_geographical_segments(
df_son,
fg=fg,
marker='^',
color='tab:red',
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=1, label='SON')
fg = plot_pw_geographical_segments(
df_djf,
fg=fg,
marker='x',
color='tab:blue',
fontsize=fontsize,
labelsize=labelsize, zorder=2, label='DJF')
fg = plot_pw_geographical_segments(
df_mam,
fg=fg,
marker='+',
color='tab:orange',
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=4, label='MAM')
fg = plot_pw_geographical_segments(df_annual, fg=fg, marker='d',
color='tab:purple', ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=3,
label='Annual')
elif season is None and synoptic == 'ALL':
df_pt = slice_xr_with_synoptic_class(
pw, path=path, syn_class='PT').groupby('time.hour').mean().to_dataframe()
df_rst = slice_xr_with_synoptic_class(
pw, path=path, syn_class='RST').groupby('time.hour').mean().to_dataframe()
df_cl = slice_xr_with_synoptic_class(
pw, path=path, syn_class='CL').groupby('time.hour').mean().to_dataframe()
df_h = slice_xr_with_synoptic_class(
pw, path=path, syn_class='H').groupby('time.hour').mean().to_dataframe()
fg = plot_pw_geographical_segments(
df_pt,
fg=None,
marker='s',
color='tab:green',
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=0, label='PT')
fg = plot_pw_geographical_segments(
df_rst,
fg=fg,
marker='^',
color='tab:red',
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=1, label='RST')
fg = plot_pw_geographical_segments(
df_cl,
fg=fg,
marker='x',
color='tab:blue',
fontsize=fontsize,
labelsize=labelsize, zorder=2, label='CL')
fg = plot_pw_geographical_segments(
df_h,
fg=fg,
marker='+',
color='tab:orange',
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=4, label='H')
fg = plot_pw_geographical_segments(df_annual, fg=fg, marker='d',
color='tab:purple', ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=3,
label='Annual')
sites = group_sites_to_xarray(False, scope='diurnal')
for i, (ax, site) in enumerate(zip(fg.axes.flatten(), sites.values.flatten())):
lns = ax.get_lines()
if site in ['yrcm']:
leg_loc = 'upper right'
elif site in ['nrif', 'elat']:
leg_loc = 'upper center'
elif site in ['ramo']:
leg_loc = 'lower center'
else:
leg_loc = None
# do legend for each panel:
# ax.legend(
# lns,
# legend,
# prop={
# 'size': 12},
# framealpha=0.5,
# fancybox=True,
# ncol=2,
# loc=leg_loc, fontsize=12)
lines_labels = [ax.get_legend_handles_labels() for ax in fg.fig.axes][0]
# lines, labels = [sum(lol, []) for lol in zip(*lines_labels)]
fg.fig.legend(lines_labels[0], lines_labels[1], prop={'size': 20}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=20, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.subplots_adjust(
top=0.973,
bottom=0.029,
left=0.054,
right=0.995,
hspace=0.15,
wspace=0.12)
if save:
filename = 'pw_diurnal_geo_{}.png'.format(season)
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='portrait')
return fg
def plot_climate_classification(path=climate_path, gis_path=gis_path,
fontsize=16):
import xarray as xr
from climate_works import read_climate_classification_legend
from PW_stations import produce_geo_gnss_solved_stations
import numpy as np
from matplotlib import colors
ras = xr.open_rasterio(path / 'Beck_KG_V1_present_0p0083.tif')
ds = ras.isel(band=0)
minx = 34.0
miny = 29.0
maxx = 36.5
maxy = 34.0
ds = ds.sortby('y')
ds = ds.sel(x=slice(minx, maxx), y=slice(miny, maxy))
ds = ds.astype(int)
ds = ds.reset_coords(drop=True)
ax_map = plot_israel_map(
gis_path=gis_path,
ax=None,
ticklabelsize=fontsize)
df = read_climate_classification_legend(path)
# get color pixels to dict:
d = df['color'].to_dict()
sort_idx = np.argsort([x for x in d.keys()])
idx = np.searchsorted([x for x in d.keys()], ds.values, sorter=sort_idx)
out = np.asarray([x for x in d.values()])[sort_idx][idx]
ds_as_color = xr.DataArray(out, dims=['y', 'x', 'c'])
ds_as_color['y'] = ds['y']
ds_as_color['x'] = ds['x']
ds_as_color['c'] = ['R', 'G', 'B']
# overlay with dem data:
# cmap = plt.get_cmap('terrain', 41)
# df_gnss = produce_geo_gnss_solved_stations(plot=False)
# c_colors = df.set_index('class_code').loc[df_gnss['code'].unique()]['color'].values
c_colors = df['color'].values
c_li = [c for c in c_colors]
c_colors = np.asarray(c_li)
c_colors = np.unique(ds_as_color.stack(coor=['x', 'y']).T.values, axis=0)
# remove black:
# c_colors = c_colors[:-1]
int_code = np.unique(ds.stack(coor=['x', 'y']).T.values, axis=0)
ticks = [df.loc[x]['class_code'] for x in int_code[1:]]
cc = [df.set_index('class_code').loc[x]['color'] for x in ticks]
cc_as_hex = [colors.rgb2hex(x) for x in cc]
tickd = dict(zip(cc_as_hex, ticks))
# ticks.append('Water')
# ticks.reverse()
bounds = [x for x in range(len(c_colors) + 1)]
chex = [colors.rgb2hex(x) for x in c_colors]
ticks = [tickd.get(x, 'Water') for x in chex]
cmap = colors.ListedColormap(chex)
norm = colors.BoundaryNorm(bounds, cmap.N)
# vmin = ds_as_color.min().item()
# vmax = ds_as_color.max().item()
im = ds_as_color.plot.imshow(
ax=ax_map,
alpha=.7,
add_colorbar=False,
cmap=cmap,
interpolation='antialiased',
origin='lower',
norm=norm)
# colours = im.cmap(im.norm(np.unique(ds_as_color)))
# chex = [colors.rgb2hex(x) for x in colours]
# cmap = colors.ListedColormap(chex)
# bounds=[x for x in range(len(colours))]
cbar_kwargs = {'fraction': 0.1, 'aspect': 50, 'pad': 0.03}
cb = plt.colorbar(
im,
boundaries=bounds,
ticks=None,
ax=ax_map,
**cbar_kwargs)
cb.set_label(
label='climate classification',
size=fontsize,
weight='normal')
n = len(c_colors)
tick_locs = (np.arange(n) + 0.5) * (n) / n
cb.set_ticks(tick_locs)
# set tick labels (as before)
cb.set_ticklabels(ticks)
cb.ax.tick_params(labelsize=fontsize)
ax_map.set_xlabel('')
ax_map.set_ylabel('')
# now for the gps stations:
gps = produce_geo_gnss_solved_stations(plot=False)
removed = ['hrmn', 'gilb', 'lhav', 'nizn', 'spir']
removed = []
print('removing {} stations from map.'.format(removed))
# merged = ['klhv', 'lhav', 'mrav', 'gilb']
merged = []
gps_list = [x for x in gps.index if x not in merged and x not in removed]
gps.loc[gps_list, :].plot(ax=ax_map, edgecolor='black', marker='s',
alpha=1.0, markersize=35, facecolor="None", linewidth=2, zorder=3)
gps_stations = gps_list
to_plot_offset = []
for x, y, label in zip(gps.loc[gps_stations, :].lon, gps.loc[gps_stations,
:].lat, gps.loc[gps_stations, :].index.str.upper()):
if label.lower() in to_plot_offset:
ax_map.annotate(label, xy=(x, y), xytext=(4, -6),
textcoords="offset points", color='k',
fontweight='bold', fontsize=fontsize - 2)
else:
ax_map.annotate(label, xy=(x, y), xytext=(3, 3),
textcoords="offset points", color='k',
fontweight='bold', fontsize=fontsize - 2)
return
def group_sites_to_xarray(upper=False, scope='diurnal'):
import xarray as xr
import numpy as np
if scope == 'diurnal':
group1 = ['KABR', 'BSHM', 'CSAR', 'TELA', 'ALON', 'SLOM', 'NIZN']
group2 = ['NZRT', 'MRAV', 'YOSH', 'JSLM', 'KLHV', 'YRCM', 'RAMO']
group3 = ['ELRO', 'KATZ', 'DRAG', 'DSEA', 'SPIR', 'NRIF', 'ELAT']
elif scope == 'annual':
group1 = ['KABR', 'BSHM', 'CSAR', 'TELA', 'ALON', 'SLOM', 'NIZN']
group2 = ['NZRT', 'MRAV', 'YOSH', 'JSLM', 'KLHV', 'YRCM', 'RAMO']
group3 = ['ELRO', 'KATZ', 'DRAG', 'DSEA', 'SPIR', 'NRIF', 'ELAT']
if not upper:
group1 = [x.lower() for x in group1]
group2 = [x.lower() for x in group2]
group3 = [x.lower() for x in group3]
gr1 = xr.DataArray(group1, dims='GNSS')
gr2 = xr.DataArray(group2, dims='GNSS')
gr3 = xr.DataArray(group3, dims='GNSS')
gr1['GNSS'] = np.arange(0, len(gr1))
gr2['GNSS'] = np.arange(0, len(gr2))
gr3['GNSS'] = np.arange(0, len(gr3))
sites = xr.concat([gr1, gr2, gr3], 'group').T
sites['group'] = ['coastal', 'highland', 'eastern']
return sites
# def plot_diurnal_pw_geographical_segments(df, fg=None, marker='o', color='b',
# ylim=[-2, 3]):
# import xarray as xr
# import numpy as np
# from matplotlib.ticker import MultipleLocator
# from PW_stations import produce_geo_gnss_solved_stations
# geo = produce_geo_gnss_solved_stations(plot=False)
# sites = group_sites_to_xarray(upper=False, scope='diurnal')
# sites_flat = [x for x in sites.values.flatten() if isinstance(x, str)]
# da = xr.DataArray([x for x in range(len(sites_flat))], dims='GNSS')
# da['GNSS'] = [x for x in range(len(da))]
# if fg is None:
# fg = xr.plot.FacetGrid(
# da,
# col='GNSS',
# col_wrap=3,
# sharex=False,
# sharey=False, figsize=(20, 20))
# for i in range(fg.axes.shape[0]): # i is rows
# for j in range(fg.axes.shape[1]): # j is cols
# try:
# site = sites.values[i, j]
# ax = fg.axes[i, j]
# df.loc[:, site].plot(ax=ax, marker=marker, color=color)
# ax.set_xlabel('Hour of day [UTC]')
# ax.yaxis.tick_left()
# ax.grid()
# ax.spines["top"].set_visible(False)
# ax.spines["right"].set_visible(False)
# ax.spines["bottom"].set_visible(False)
# ax.xaxis.set_ticks(np.arange(0, 23, 3))
# if j == 0:
# ax.set_ylabel('PW anomalies [mm]', fontsize=12)
# elif j == 1:
# if i>5:
## ax.set_ylabel('PW anomalies [mm]', fontsize=12)
# site_label = '{} ({:.0f})'.format(site.upper(), geo.loc[site].alt)
# ax.text(.12, .85, site_label,
# horizontalalignment='center', fontweight='bold',
# transform=ax.transAxes)
# ax.yaxis.set_minor_locator(MultipleLocator(3))
# ax.yaxis.grid(
# True,
# which='minor',
# linestyle='--',
# linewidth=1,
# alpha=0.7)
## ax.yaxis.grid(True, linestyle='--', linewidth=1, alpha=0.7)
# if ylim is not None:
# ax.set_ylim(*ylim)
# except KeyError:
# ax.set_axis_off()
# for i, ax in enumerate(fg.axes[:, 0]):
# try:
## df[gr1].iloc[:, i].plot(ax=ax)
# except IndexError:
# ax.set_axis_off()
# for i, ax in enumerate(fg.axes[:, 1]):
# try:
## df[gr2].iloc[:, i].plot(ax=ax)
# except IndexError:
# ax.set_axis_off()
# for i, ax in enumerate(fg.axes[:, 2]):
# try:
## df[gr3].iloc[:, i].plot(ax=ax)
# except IndexError:
# ax.set_axis_off()
#
# fg.fig.tight_layout()
# fg.fig.subplots_adjust()
# return fg
def prepare_reanalysis_monthly_pwv_to_dataframe(path=work_yuval, re='era5',
ds=None):
import xarray as xr
import pandas as pd
if re == 'era5':
reanalysis = xr.load_dataset(work_yuval / 'GNSS_era5_monthly_PW.nc')
re_name = 'ERA5'
elif re == 'uerra':
reanalysis = xr.load_dataset(work_yuval / 'GNSS_uerra_monthly_PW.nc')
re_name = 'UERRA-HARMONIE'
elif re is not None and ds is not None:
reanalysis = ds
re_name = re
df_re = reanalysis.to_dataframe()
df_re['month'] = df_re.index.month
pw_mm = xr.load_dataset(
work_yuval /
'GNSS_PW_monthly_thresh_50_homogenized.nc')
df = pw_mm.to_dataframe()
df['month'] = df.index.month
# concat:
dff = pd.concat([df, df_re], keys=['GNSS', re_name])
dff['source'] = dff.index.get_level_values(0)
dff = dff.reset_index()
return dff
def plot_long_term_era5_comparison(path=work_yuval, era5_path=era5_path,
fontsize=16,
remove_stations=['nizn', 'spir'], save=True):
import xarray as xr
from aux_gps import anomalize_xr
# from aeronet_analysis import prepare_station_to_pw_comparison
# from PW_stations import ML_Switcher
# from aux_gps import get_julian_dates_from_da
# from scipy.stats.mstats import theilslopes
# TODO: add merra2, 3 panel plot and trend
# load GNSS Israel:
sns.set_style('whitegrid')
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
pw = xr.load_dataset(
path / 'GNSS_PW_monthly_thresh_50.nc').sel(time=slice('1998', None))
if remove_stations is not None:
pw = pw[[x for x in pw if x not in remove_stations]]
pw_anoms = anomalize_xr(pw, 'MS', verbose=False)
pw_percent = anomalize_xr(pw, 'MS', verbose=False, units='%')
pw_percent = pw_percent.to_array('station').mean('station')
pw_mean = pw_anoms.to_array('station').mean('station')
pw_mean = pw_mean.sel(time=slice('1998', '2019'))
# load ERA5:
era5 = xr.load_dataset(path / 'GNSS_era5_monthly_PW.nc')
era5_anoms = anomalize_xr(era5, 'MS', verbose=False)
era5_mean = era5_anoms.to_array('station').mean('station')
df = pw_mean.to_dataframe(name='GNSS')
# load MERRA2:
# merra2 = xr.load_dataset(
# path / 'MERRA2/MERRA2_TQV_israel_area_1995-2019.nc')['TQV']
# merra2_mm = merra2.resample(time='MS').mean()
# merra2_anoms = anomalize_xr(
# merra2_mm, time_dim='time', freq='MS', verbose=False)
# merra2_mean = merra2_anoms.mean('lat').mean('lon')
# load AERONET:
# if aero_path is not None:
# aero = prepare_station_to_pw_comparison(path=aero_path, gis_path=gis_path,
# station='boker', mm_anoms=True)
# df['AERONET'] = aero.to_dataframe()
era5_to_plot = era5_mean - 5
# merra2_to_plot = merra2_mean - 10
df['ERA5'] = era5_mean.to_dataframe(name='ERA5')
# df['MERRA2'] = merra2_mean.to_dataframe('MERRA2')
fig, ax = plt.subplots(1, 1, figsize=(16, 5))
# df['GNSS'].plot(ax=ax, color='k')
# df['ERA5'].plot(ax=ax, color='r')
# df['AERONET'].plot(ax=ax, color='b')
pwln = pw_mean.plot.line('k-', marker='o', ax=ax,
linewidth=2, markersize=3.5)
era5ln = era5_to_plot.plot.line(
'k--', marker='s', ax=ax, linewidth=2, markersize=3.5)
# merra2ln = merra2_to_plot.plot.line(
# 'g-', marker='d', ax=ax, linewidth=2, markersize=2.5)
era5corr = df.corr().loc['GNSS', 'ERA5']
# merra2corr = df.corr().loc['GNSS', 'MERRA2']
handles = pwln + era5ln # + merra2ln
# labels = ['GNSS', 'ERA5, r={:.2f}'.format(
# era5corr), 'MERRA2, r={:.2f}'.format(merra2corr)]
labels = ['GNSS station average', 'ERA5 regional mean, r={:.2f}'.format(
era5corr)]
ax.legend(handles=handles, labels=labels, loc='upper left',
prop={'size': fontsize-2})
# if aero_path is not None:
# aeroln = aero.plot.line('b-.', ax=ax, alpha=0.8)
# aerocorr = df.corr().loc['GNSS', 'AERONET']
# aero_label = 'AERONET, r={:.2f}'.format(aerocorr)
# handles += aeroln
ax.set_ylabel('PWV anomalies [mm]', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
ax.set_xlabel('')
ax.grid()
ax = fix_time_axis_ticks(ax, limits=['1998-01', '2020-01'])
fig.tight_layout()
if save:
filename = 'pwv_long_term_anomalies_era5_comparison.png'
plt.savefig(savefig_path / filename, orientation='portrait')
return fig
def plot_long_term_anomalies_with_trends(path=work_yuval,
model_name='TSEN',
fontsize=16,
remove_stations=['nizn', 'spir'],
save=True,
add_percent=False): # ,aero_path=aero_path):
import xarray as xr
from aux_gps import anomalize_xr
from PW_stations import mann_kendall_trend_analysis
from aux_gps import linear_fit_using_scipy_da_ts
sns.set_style('whitegrid')
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
pw = xr.load_dataset(
path / 'GNSS_PW_monthly_thresh_50.nc').sel(time=slice('1998', None))
if remove_stations is not None:
pw = pw[[x for x in pw if x not in remove_stations]]
pw_anoms = anomalize_xr(pw, 'MS', verbose=False)
pw_percent = anomalize_xr(pw, 'MS', verbose=False, units='%')
pw_percent = pw_percent.to_array('station').mean('station')
pw_mean = pw_anoms.to_array('station').mean('station')
pw_mean = pw_mean.sel(time=slice('1998', '2019'))
if add_percent:
fig, axes = plt.subplots(2, 1, figsize=(16, 10))
else:
fig, ax = plt.subplots(1, 1, figsize=(16, 5))
axes = [ax, ax]
pwln = pw_mean.plot.line('k-', marker='o', ax=axes[0],
linewidth=2, markersize=5.5)
handles = pwln
labels = ['GNSS station average']
pwv_trends, trend_dict = linear_fit_using_scipy_da_ts(
pw_mean, model=model_name, slope_factor=3652.5, plot=False)
trend = pwv_trends['trend']
trend_hi = pwv_trends['trend_hi']
trend_lo = pwv_trends['trend_lo']
slope_hi = trend_dict['slope_hi']
slope_lo = trend_dict['slope_lo']
slope = trend_dict['slope']
mann_pval = mann_kendall_trend_analysis(pw_mean).loc['p']
trend_label = r'{} model, slope={:.2f} ({:.2f}, {:.2f}) mm$\cdot$decade$^{{-1}}$, pvalue={:.4f}'.format(
model_name, slope, slope_lo, slope_hi, mann_pval)
labels.append(trend_label)
trendln = trend.plot(ax=axes[0], color='b', linewidth=2, alpha=1)
handles += trendln
trend_hi.plot.line('b--', ax=axes[0], linewidth=1.5, alpha=0.8)
trend_lo.plot.line('b--', ax=axes[0], linewidth=1.5, alpha=0.8)
pwv_trends, trend_dict = linear_fit_using_scipy_da_ts(
pw_mean.sel(time=slice('2010', '2019')), model=model_name, slope_factor=3652.5, plot=False)
mann_pval = mann_kendall_trend_analysis(pw_mean.sel(time=slice('2010','2019'))).loc['p']
trend = pwv_trends['trend']
trend_hi = pwv_trends['trend_hi']
trend_lo = pwv_trends['trend_lo']
slope_hi = trend_dict['slope_hi']
slope_lo = trend_dict['slope_lo']
slope = trend_dict['slope']
trendln = trend.plot(ax=axes[0], color='r', linewidth=2, alpha=1)
handles += trendln
trend_label = r'{} model, slope={:.2f} ({:.2f}, {:.2f}) mm$\cdot$decade$^{{-1}}$, pvalue={:.4f}'.format(
model_name, slope, slope_lo, slope_hi, mann_pval)
labels.append(trend_label)
trend_hi.plot.line('r--', ax=axes[0], linewidth=1.5, alpha=0.8)
trend_lo.plot.line('r--', ax=axes[0], linewidth=1.5, alpha=0.8)
# ax.grid()
# ax.set_xlabel('')
# ax.set_ylabel('PWV mean anomalies [mm]')
# ax.legend(labels=[],handles=[trendln[0]])
# fig.tight_layout()
axes[0].legend(handles=handles, labels=labels, loc='upper left',
prop={'size': fontsize-2})
axes[0].set_ylabel('PWV anomalies [mm]', fontsize=fontsize)
axes[0].tick_params(labelsize=fontsize)
axes[0].set_xlabel('')
axes[0].grid(True)
axes[0] = fix_time_axis_ticks(axes[0], limits=['1998-01', '2020-01'])
if add_percent:
pwln = pw_percent.plot.line('k-', marker='o', ax=axes[1],
linewidth=2, markersize=5.5)
handles = pwln
labels = ['GNSS station average']
pwv_trends, trend_dict = linear_fit_using_scipy_da_ts(
pw_percent, model=model_name, slope_factor=3652.5, plot=False)
trend = pwv_trends['trend']
trend_hi = pwv_trends['trend_hi']
trend_lo = pwv_trends['trend_lo']
slope_hi = trend_dict['slope_hi']
slope_lo = trend_dict['slope_lo']
slope = trend_dict['slope']
mann_pval = mann_kendall_trend_analysis(pw_percent).loc['p']
trend_label = r'{} model, slope={:.2f} ({:.2f}, {:.2f}) %$\cdot$decade$^{{-1}}$, pvalue={:.4f}'.format(
model_name, slope, slope_lo, slope_hi, mann_pval)
labels.append(trend_label)
trendln = trend.plot(ax=axes[1], color='b', linewidth=2, alpha=1)
handles += trendln
trend_hi.plot.line('b--', ax=axes[1], linewidth=1.5, alpha=0.8)
trend_lo.plot.line('b--', ax=axes[1], linewidth=1.5, alpha=0.8)
pwv_trends, trend_dict = linear_fit_using_scipy_da_ts(
pw_percent.sel(time=slice('2010', '2019')), model=model_name, slope_factor=3652.5, plot=False)
mann_pval = mann_kendall_trend_analysis(pw_percent.sel(time=slice('2010','2019'))).loc['p']
trend = pwv_trends['trend']
trend_hi = pwv_trends['trend_hi']
trend_lo = pwv_trends['trend_lo']
slope_hi = trend_dict['slope_hi']
slope_lo = trend_dict['slope_lo']
slope = trend_dict['slope']
trendln = trend.plot(ax=axes[1], color='r', linewidth=2, alpha=1)
handles += trendln
trend_label = r'{} model, slope={:.2f} ({:.2f}, {:.2f}) %$\cdot$decade$^{{-1}}$, pvalue={:.4f}'.format(
model_name, slope, slope_lo, slope_hi, mann_pval)
labels.append(trend_label)
trend_hi.plot.line('r--', ax=axes[1], linewidth=1.5, alpha=0.8)
trend_lo.plot.line('r--', ax=axes[1], linewidth=1.5, alpha=0.8)
# ax.grid()
# ax.set_xlabel('')
# ax.set_ylabel('PWV mean anomalies [mm]')
# ax.legend(labels=[],handles=[trendln[0]])
# fig.tight_layout()
axes[1].legend(handles=handles, labels=labels, loc='upper left',
prop={'size': fontsize-2})
axes[1].set_ylabel('PWV anomalies [%]', fontsize=fontsize)
axes[1].tick_params(labelsize=fontsize)
axes[1].set_xlabel('')
axes[1].grid()
axes[1] = fix_time_axis_ticks(axes[1], limits=['1998-01', '2020-01'])
fig.tight_layout()
if save:
filename = 'pwv_station_averaged_trends.png'
plt.savefig(savefig_path / filename, orientation='portrait')
return fig
def plot_day_night_pwv_monthly_mean_std_heatmap(
path=work_yuval, day_time=['09:00', '15:00'], night_time=['17:00', '21:00'], compare=['day', 'std']):
import xarray as xr
import seaborn as sns
import matplotlib.pyplot as plt
pw = xr.load_dataset(work_yuval / 'GNSS_PW_thresh_50_homogenized.nc')
pw = pw[[x for x in pw if 'error' not in x]]
df = pw.to_dataframe()
sites = group_sites_to_xarray(upper=False, scope='annual')
coast = [x for x in sites.sel(group='coastal').dropna('GNSS').values]
high = [x for x in sites.sel(group='highland').dropna('GNSS').values]
east = [x for x in sites.sel(group='eastern').dropna('GNSS').values]
box_coast = dict(facecolor='cyan', pad=0.05, alpha=0.4)
box_high = dict(facecolor='green', pad=0.05, alpha=0.4)
box_east = dict(facecolor='yellow', pad=0.05, alpha=0.4)
color_dict = [{x: box_coast} for x in coast]
color_dict += [{x: box_high} for x in high]
color_dict += [{x: box_east} for x in east]
color_dict = dict((key, d[key]) for d in color_dict for key in d)
sites = sites.T.values.ravel()
sites_flat = [x for x in sites if isinstance(x, str)]
df = df[sites_flat]
df_mm = df.resample('MS').mean()
df_mm_mean = df_mm.groupby(df_mm.index.month).mean()
df_mm_std = df_mm.groupby(df_mm.index.month).std()
df_day = df.between_time(*day_time)
df_night = df.between_time(*night_time)
df_day_mm = df_day.resample('MS').mean()
df_night_mm = df_night.resample('MS').mean()
day_std = df_day_mm.groupby(df_day_mm.index.month).std()
night_std = df_night_mm.groupby(df_night_mm.index.month).std()
day_mean = df_day_mm.groupby(df_day_mm.index.month).mean()
night_mean = df_night_mm.groupby(df_night_mm.index.month).mean()
per_day_std = 100 * (day_std - df_mm_std) / df_mm_std
per_day_mean = 100 * (day_mean - df_mm_mean) / df_mm_mean
per_night_std = 100 * (night_std - df_mm_std) / df_mm_std
per_night_mean = 100 * (night_mean - df_mm_mean) / df_mm_mean
day_night = compare[0]
mean_std = compare[1]
fig, axes = plt.subplots(
1, 2, sharex=False, sharey=False, figsize=(17, 10))
cbar_ax = fig.add_axes([.91, .3, .03, .4])
if compare[1] == 'std':
all_heat = df_mm_std.T
day_heat = day_std.T
title = 'STD'
elif compare[1] == 'mean':
all_heat = df_mm_mean.T
day_heat = day_mean.T
title = 'MEAN'
vmax = max(day_heat.max().max(), all_heat.max().max())
vmin = min(day_heat.min().min(), all_heat.min().min())
sns.heatmap(all_heat, ax=axes[0], cbar=False, vmin=vmin, vmax=vmax,
annot=True, cbar_ax=None, cmap='Reds')
sns.heatmap(day_heat, ax=axes[1], cbar=True, vmin=vmin, vmax=vmax,
annot=True, cbar_ax=cbar_ax, cmap='Reds')
labels_1 = [x for x in axes[0].yaxis.get_ticklabels()]
[label.set_bbox(color_dict[label.get_text()]) for label in labels_1]
labels_2 = [x for x in axes[1].yaxis.get_ticklabels()]
[label.set_bbox(color_dict[label.get_text()]) for label in labels_2]
axes[0].set_title('All {} in mm'.format(title))
axes[1].set_title('Day only ({}-{}) {} in mm'.format(*day_time, title))
[ax.set_xlabel('month') for ax in axes]
fig.tight_layout(rect=[0, 0, .9, 1])
# fig, axes = plt.subplots(1, 2, sharex=False, sharey=False, figsize=(17, 10))
# ax_mean = sns.heatmap(df_mm_mean.T, annot=True, ax=axes[0])
# ax_mean.set_title('All mean in mm')
# ax_std = sns.heatmap(df_mm_std.T, annot=True, ax=axes[1])
# ax_std.set_title('All std in mm')
# labels_mean = [x for x in ax_mean.yaxis.get_ticklabels()]
# [label.set_bbox(color_dict[label.get_text()]) for label in labels_mean]
# labels_std = [x for x in ax_std.yaxis.get_ticklabels()]
# [label.set_bbox(color_dict[label.get_text()]) for label in labels_std]
# [ax.set_xlabel('month') for ax in axes]
# fig.tight_layout()
# fig_day, axes_day = plt.subplots(1, 2, sharex=False, sharey=False, figsize=(17, 10))
# ax_mean = sns.heatmap(per_day_mean.T, annot=True, cmap='bwr', center=0, ax=axes_day[0])
# ax_mean.set_title('Day mean - All mean in % from All mean')
# ax_std = sns.heatmap(per_day_std.T, annot=True, cmap='bwr', center=0, ax=axes_day[1])
# ax_std.set_title('Day std - All std in % from All std')
# labels_mean = [x for x in ax_mean.yaxis.get_ticklabels()]
# [label.set_bbox(color_dict[label.get_text()]) for label in labels_mean]
# labels_std = [x for x in ax_std.yaxis.get_ticklabels()]
# [label.set_bbox(color_dict[label.get_text()]) for label in labels_std]
# [ax.set_xlabel('month') for ax in axes_day]
# fig_day.tight_layout()
# fig_night, axes_night = plt.subplots(1, 2, sharex=False, sharey=False, figsize=(17, 10))
# ax_mean = sns.heatmap(per_night_mean.T, annot=True, cmap='bwr', center=0, ax=axes_night[0])
# ax_mean.set_title('Night mean - All mean in % from All mean')
# ax_std = sns.heatmap(per_night_std.T, annot=True, cmap='bwr', center=0, ax=axes_night[1])
# ax_std.set_title('Night std - All std in % from All std')
# labels_mean = [x for x in ax_mean.yaxis.get_ticklabels()]
# [label.set_bbox(color_dict[label.get_text()]) for label in labels_mean]
# labels_std = [x for x in ax_std.yaxis.get_ticklabels()]
# [label.set_bbox(color_dict[label.get_text()]) for label in labels_std]
# [ax.set_xlabel('month') for ax in axes_night]
# fig_night.tight_layout()
return fig
def plot_pw_geographical_segments(df, scope='diurnal', kind=None, fg=None,
marker='o', color='b', ylim=[-2, 3],
hue=None, fontsize=14, labelsize=10,
ticklabelcolor=None,
zorder=0, label=None, save=False, bins=None):
import xarray as xr
import numpy as np
from scipy.stats import kurtosis
from scipy.stats import skew
from matplotlib.ticker import MultipleLocator
from PW_stations import produce_geo_gnss_solved_stations
from matplotlib.ticker import AutoMinorLocator
from matplotlib.ticker import FormatStrFormatter
import seaborn as sns
scope_dict = {'diurnal': {'xticks': np.arange(0, 23, 3),
'xlabel': 'Hour of day [UTC]',
'ylabel': 'PWV anomalies [mm]',
'colwrap': 3},
'annual': {'xticks': np.arange(1, 13),
'xlabel': 'month',
'ylabel': 'PWV [mm]',
'colwrap': 3}
}
sns.set_style('whitegrid')
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
color_dict = produce_colors_for_pwv_station(scope=scope, zebra=False, as_dict=True)
geo = produce_geo_gnss_solved_stations(plot=False)
sites = group_sites_to_xarray(upper=False, scope=scope)
# if scope == 'annual':
# sites = sites.T
sites_flat = [x for x in sites.values.flatten() if isinstance(x, str)]
da = xr.DataArray([x for x in range(len(sites_flat))], dims='GNSS')
da['GNSS'] = [x for x in range(len(da))]
if fg is None:
fg = xr.plot.FacetGrid(
da,
col='GNSS',
col_wrap=scope_dict[scope]['colwrap'],
sharex=False,
sharey=False, figsize=(20, 20))
for i in range(fg.axes.shape[0]): # i is rows
for j in range(fg.axes.shape[1]): # j is cols
site = sites.values[i, j]
ax = fg.axes[i, j]
if not isinstance(site, str):
ax.set_axis_off()
continue
else:
if kind is None:
df[site].plot(ax=ax, marker=marker, color=color,
zorder=zorder, label=label)
ax.xaxis.set_ticks(scope_dict[scope]['xticks'])
ax.grid(True, which='major')
ax.grid(True, axis='y', which='minor', linestyle='--')
elif kind == 'violin':
if not 'month' in df.columns:
df['month'] = df.index.month
pal = sns.color_palette("Paired", 12)
sns.violinplot(ax=ax, data=df, x='month', y=df[site],
hue=hue,
fliersize=4, gridsize=250, inner='quartile',
scale='area')
ax.set_ylabel('')
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.grid(True, axis='y', which='major')
ax.grid(True, axis='y', which='minor', linestyle='--')
elif kind == 'violin+swarm':
if not 'month' in df.columns:
df['month'] = df.index.month
pal = sns.color_palette("Paired", 12)
pal = sns.color_palette("tab20")
sns.violinplot(ax=ax, data=df, x='month', y=df[site],
hue=None, color=color_dict.get(site), fliersize=4, gridsize=250, inner=None,
scale='width')
sns.swarmplot(ax=ax, data=df, x='month', y=df[site],
color="k", edgecolor="gray",
size=2.8)
ax.set_ylabel('')
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.grid(True, axis='y', which='major')
ax.grid(True, axis='y', which='minor', linestyle='--')
elif kind == 'mean_month':
if not 'month' in df.columns:
df['month'] = df.index.month
df_mean = df.groupby('month').mean()
df_mean[site].plot(ax=ax, color=color, marker='o', markersize=10, markerfacecolor="None")
ax.set_ylabel('')
ax.xaxis.set_ticks(scope_dict[scope]['xticks'])
ax.set_xlabel('')
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.grid(True, axis='y', which='major')
ax.grid(True, axis='y', which='minor', linestyle='--')
elif kind == 'hist':
if bins is None:
bins = 15
sns.histplot(ax=ax, data=df[site].dropna(),
line_kws={'linewidth': 3}, stat='density', kde=True, bins=bins)
ax.set_xlabel('PWV [mm]', fontsize=fontsize)
ax.grid(True)
ax.set_ylabel('')
xmean = df[site].mean()
xmedian = df[site].median()
std = df[site].std()
sk = skew(df[site].dropna().values)
kurt = kurtosis(df[site].dropna().values)
# xmode = df[y].mode().median()
data_x, data_y = ax.lines[0].get_data()
ymean = np.interp(xmean, data_x, data_y)
ymed = np.interp(xmedian, data_x, data_y)
# ymode = np.interp(xmode, data_x, data_y)
ax.vlines(x=xmean, ymin=0, ymax=ymean,
color='r', linestyle='--', linewidth=3)
ax.vlines(x=xmedian, ymin=0, ymax=ymed,
color='g', linestyle='-', linewidth=3)
# ax.vlines(x=xmode, ymin=0, ymax=ymode, color='k', linestyle='-')
ax.legend(['Mean: {:.1f}'.format(
xmean), 'Median: {:.1f}'.format(xmedian)], fontsize=fontsize)
# ax.text(0.55, 0.45, "Std-Dev: {:.1f}\nSkewness: {:.1f}\nKurtosis: {:.1f}".format(std, sk, kurt),transform=ax.transAxes, fontsize=fontsize)
ax.tick_params(axis='x', which='major', labelsize=labelsize)
if kind != 'hist':
ax.set_xlabel(scope_dict[scope]['xlabel'], fontsize=16)
ax.yaxis.set_major_locator(plt.MaxNLocator(4))
ax.yaxis.set_minor_locator(AutoMinorLocator(2))
ax.tick_params(axis='y', which='major', labelsize=labelsize)
# set minor y tick labels:
# ax.yaxis.set_minor_formatter(FormatStrFormatter("%.2f"))
# ax.tick_params(axis='y', which='minor', labelsize=labelsize-8)
ax.yaxis.tick_left()
if j == 0:
if kind != 'hist':
ax.set_ylabel(scope_dict[scope]['ylabel'], fontsize=16)
else:
ax.set_ylabel('Frequency', fontsize=16)
# elif j == 1:
# if i>5:
# ax.set_ylabel(scope_dict[scope]['ylabel'], fontsize=12)
site_label = '{} ({:.0f})'.format(
site.upper(), geo.loc[site].alt)
ax.text(.17, .87, site_label, fontsize=fontsize,
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes)
if ticklabelcolor is not None:
ax.tick_params(axis='y', labelcolor=ticklabelcolor)
# ax.yaxis.grid(
# True,
# which='minor',
# linestyle='--',
# linewidth=1,
# alpha=0.7)
# ax.yaxis.grid(True, linestyle='--', linewidth=1, alpha=0.7)
if ylim is not None:
ax.set_ylim(*ylim)
# except KeyError:
# ax.set_axis_off()
# for i, ax in enumerate(fg.axes[:, 0]):
# try:
# df[gr1].iloc[:, i].plot(ax=ax)
# except IndexError:
# ax.set_axis_off()
# for i, ax in enumerate(fg.axes[:, 1]):
# try:
# df[gr2].iloc[:, i].plot(ax=ax)
# except IndexError:
# ax.set_axis_off()
# for i, ax in enumerate(fg.axes[:, 2]):
# try:
# df[gr3].iloc[:, i].plot(ax=ax)
# except IndexError:
# ax.set_axis_off()
fg.fig.tight_layout()
fg.fig.subplots_adjust()
if save:
filename = 'pw_{}_means_{}.png'.format(scope, kind)
plt.savefig(savefig_path / filename, orientation='portrait')
# plt.savefig(savefig_path / filename, orientation='landscape')
return fg
def plot_PWV_comparison_GNSS_radiosonde(path=work_yuval, sound_path=sound_path,
save=True, fontsize=16):
import xarray as xr
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.patches as mpatches
import matplotlib
matplotlib.rcParams['lines.markeredgewidth'] = 1
sns.set_style('whitegrid')
sns.set_style('ticks')
pal = sns.color_palette("tab10", 2)
# load radiosonde:
radio = xr.load_dataarray(sound_path / 'bet_dagan_2s_sounding_PWV_2014-2019.nc')
radio = radio.rename({'sound_time': 'time'})
radio = radio.resample(time='MS').mean()
radio.name = 'radio'
dfr = radio.to_dataframe()
dfr['month'] = dfr.index.month
# load tela:
tela = xr.load_dataset(path / 'GNSS_PW_monthly_thresh_50.nc')['tela']
dfm = tela.to_dataframe(name='tela-pwv')
dfm = dfm.loc[dfr.index]
dfm['month'] = dfm.index.month
dff = pd.concat([dfm, dfr], keys=['GNSS-TELA', 'Radiosonde'])
dff['source'] = dff.index.get_level_values(0)
# dff['month'] = dfm.index.month
dff = dff.reset_index()
dff['pwv'] = dff['tela-pwv'].fillna(0)+dff['radio'].fillna(0)
dff = dff[dff['pwv'] != 0]
fig = plt.figure(figsize=(20, 6))
sns.set_style("whitegrid")
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
grid = plt.GridSpec(
1, 2, width_ratios=[
2, 1], wspace=0.1, hspace=0)
ax_ts = fig.add_subplot(grid[0]) # plt.subplot(221)
ax_v = fig.add_subplot(grid[1])
# fig, axes = plt.subplots(1, 2, figsize=(20, 6))
ax_v = sns.violinplot(data=dff, x='month', y='pwv',
fliersize=10, gridsize=250, ax=ax_v,
inner=None, scale='width', palette=pal,
hue='source', split=True, zorder=20)
[x.set_alpha(0.5) for x in ax_v.collections]
ax_v = sns.pointplot(x='month', y='pwv', data=dff, estimator=np.mean,
dodge=True, ax=ax_v, hue='source', color=None,
linestyles='-', markers=['s', 'o'], scale=0.7,
ci=None, alpha=0.5, zorder=0, style='source',edgecolor='k', edgewidth=0.4)
ax_v.get_legend().set_title('')
p1 = (mpatches.Patch(facecolor=pal[0], edgecolor='k', alpha=0.5))
p2 = (mpatches.Patch(facecolor=pal[1], edgecolor='k', alpha=0.5))
handles = [p1, p2]
ax_v.legend(handles=handles, labels=['GNSS-TELA', 'Radiosonde'],
loc='upper left', prop={'size': fontsize-2})
# ax_v.legend(loc='upper left', prop={'size': fontsize-2})
ax_v.tick_params(labelsize=fontsize)
ax_v.set_ylabel('')
ax_v.grid(True, axis='both')
ax_v.set_xlabel('month', fontsize=fontsize)
df = dfm['tela-pwv'].to_frame()
df.columns = ['GNSS-TELA']
df['Radiosonde'] = dfr['radio']
cmap = sns.color_palette("tab10", as_cmap=True)
df.plot(ax=ax_ts, style=['s-', 'o-'], cmap=cmap)
# df['GNSS-TELA'].plot(ax=ax_ts, style='s-', cmap=cmap)
# df['Radiosonde'].plot(ax=ax_ts, style='o-', cmap=cmap)
ax_ts.grid(True, axis='both')
ylim = ax_v.get_ylim()
ax_ts.set_ylim(*ylim)
ax_ts.set_ylabel('PWV [mm]', fontsize=fontsize)
ax_ts.set_xlabel('')
ax_ts.legend(loc='upper left', prop={'size': fontsize-2})
ax_ts.tick_params(labelsize=fontsize)
fig.tight_layout()
if save:
filename = 'pwv_radio_comparison_violin+ts.png'
plt.savefig(savefig_path / filename, orientation='landscape',bbox_inches='tight')
return fig
def prepare_diurnal_variability_table(path=work_yuval, rename_cols=True):
from PW_stations import calculate_diurnal_variability
df = calculate_diurnal_variability()
gr = group_sites_to_xarray(scope='diurnal')
gr_df = gr.to_dataframe('sites')
new = gr.T.values.ravel()
geo = [gr_df[gr_df == x].dropna().index.values.item()[1] for x in new]
geo = [x.title() for x in geo]
df = df.reindex(new)
if rename_cols:
df.columns = ['Annual [%]', 'JJA [%]', 'SON [%]', 'DJF [%]', 'MAM [%]']
cols = [x for x in df.columns]
df['Location'] = geo
cols = ['Location'] + cols
df = df[cols]
df.index = df.index.str.upper()
print(df.to_latex())
print('')
print(df.groupby('Location').mean().to_latex())
return df
def prepare_harmonics_table(path=work_yuval, season='ALL',
scope='diurnal', era5=False, add_third=False):
import xarray as xr
from aux_gps import run_MLR_harmonics
import pandas as pd
import numpy as np
from calendar import month_abbr
if scope == 'diurnal':
cunits = 'cpd'
grp = 'hour'
grp_slice = [0, 12]
tunits = 'UTC'
elif scope == 'annual':
cunits = 'cpy'
grp = 'month'
grp_slice = [7, 12]
tunits = 'month'
if era5:
ds = xr.load_dataset(work_yuval / 'GNSS_PW_ERA5_harmonics_annual.nc')
else:
ds = xr.load_dataset(work_yuval / 'GNSS_PW_harmonics_{}.nc'.format(scope))
stations = list(set([x.split('_')[0] for x in ds]))
records = []
for station in stations:
if season in ds.dims:
diu_ph = ds[station + '_mean'].sel({season: season, cunits: 1}).idxmax()
diu_amp = ds[station + '_mean'].sel({season: season, cunits: 1}).max()
semidiu_ph = ds[station +
'_mean'].sel({season: season, cunits: 2, grp: slice(*grp_slice)}).idxmax()
semidiu_amp = ds[station +
'_mean'].sel({season: season, cunits: 2, grp: slice(*grp_slice)}).max()
else:
diu_ph = ds[station + '_mean'].sel({cunits: 1}).idxmax()
diu_amp = ds[station + '_mean'].sel({cunits: 1}).max()
semidiu_ph = ds[station +
'_mean'].sel({cunits: 2, grp: slice(*grp_slice)}).idxmax()
semidiu_amp = ds[station +
'_mean'].sel({cunits: 2, grp: slice(*grp_slice)}).max()
if add_third:
third_ph = ds[station +
'_mean'].sel({cunits: 3, grp: slice(*grp_slice)}).idxmax()
third_amp = ds[station +
'_mean'].sel({cunits: 3, grp: slice(*grp_slice)}).max()
ds_for_MLR = ds[['{}'.format(station), '{}_mean'.format(station)]]
if add_third:
harm_di = run_MLR_harmonics(
ds_for_MLR, season=season, cunits=cunits, plot=False)
record = [station, diu_amp.item(), diu_ph.item(), harm_di[1],
semidiu_amp.item(), semidiu_ph.item(), harm_di[2],
third_amp.item(), third_ph.item(), harm_di[3],
harm_di[1] + harm_di[2] + harm_di[3]]
else:
harm_di = run_MLR_harmonics(
ds_for_MLR, season=season, cunits=cunits, plot=False)
record = [station, diu_amp.item(), diu_ph.item(), harm_di[1],
semidiu_amp.item(), semidiu_ph.item(), harm_di[2],
harm_di[1] + harm_di[2]]
records.append(record)
df = pd.DataFrame(records)
if add_third:
df.columns = ['Station', 'A1 [mm]', 'P1 [{}]'.format(tunits), 'V1 [%]', 'A2 [mm]',
'P2 [{}]'.format(tunits), 'V2 [%]', 'A3 [mm]', 'P3 [{}]'.format(tunits), 'V3 [%]', 'VT [%]']
else:
df.columns = ['Station', 'A1 [mm]', 'P1 [{}]'.format(tunits), 'V1 [%]', 'A2 [mm]',
'P2 [{}]'.format(tunits), 'V2 [%]', 'VT [%]']
df = df.set_index('Station')
gr = group_sites_to_xarray(scope=scope)
gr_df = gr.to_dataframe('sites')
new = gr.T.values.ravel()
# remove nans form mixed nans and str numpy:
new = new[~pd.isnull(new)]
geo = [gr_df[gr_df == x].dropna().index.values.item()[1] for x in new]
geo = [x.title() for x in geo]
df = df.reindex(new)
df['Location'] = geo
df.index = df.index.str.upper()
pd.options.display.float_format = '{:.1f}'.format
if scope == 'annual':
df['P1 [Month]'] = df['P1 [Month]'].astype(int).apply(lambda x: month_abbr[x])
df['P2 [Month]'] = df['P2 [Month]'].astype(int).apply(lambda x: month_abbr[x])
if add_third:
df['P3 [Month]'] = df['P3 [Month]'].astype(int).apply(lambda x: month_abbr[x])
if add_third:
df = df[['Location', 'A1 [mm]', 'A2 [mm]', 'A3 [mm]', 'P1 [{}]'.format(tunits),
'P2 [{}]'.format(tunits),'P3 [{}]'.format(tunits), 'V1 [%]', 'V2 [%]', 'V3 [%]', 'VT [%]']]
else:
df = df[['Location', 'A1 [mm]', 'A2 [mm]', 'P1 [{}]'.format(tunits),
'P2 [{}]'.format(tunits), 'V1 [%]', 'V2 [%]', 'VT [%]']]
print(df.to_latex())
return df
def plot_station_mean_violin_plot(path=work_yuval,
remove_stations=['nizn','spir'],
fontsize=16, save=True):
import xarray as xr
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
sns.set_style('whitegrid')
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
pw = xr.load_dataset(path / 'GNSS_PW_monthly_anoms_thresh_50.nc')
if remove_stations is not None:
pw = pw[[x for x in pw if x not in remove_stations]]
pw_mean = pw.to_array('s').mean('s')
df = pw_mean.to_dataframe(name='pwv')
df['month'] = df.index.month
df['last_decade'] = df.index.year >= 2010
df['years'] = '1997-2009'
df['years'].loc[df['last_decade']] = '2010-2019'
fig, axes = plt.subplots(2, 1, figsize=(12, 10))
# sns.histplot(pw_mean, bins=25, ax=axes[0], kde=True, stat='count')
# axes[0].set_xlabel('PWV anomalies [mm]')
# df = pw_mean.groupby('time.month').std().to_dataframe(name='PWV-SD')
# df.plot.bar(ax=axes[1], rot=0)
# axes[1].set_ylabel('PWV anomalies SD [mm]')
axes[0]= sns.violinplot(ax=axes[0], data=df, x='month', y='pwv', color='tab:purple',
fliersize=10, gridsize=250, inner=None, scale='width',
hue=None)
[x.set_alpha(0.8) for x in axes[0].collections]
sns.swarmplot(ax=axes[0], x="month", y='pwv', data=df,
color="k", edgecolor="gray",
hue=None, dodge=False)
colors = ["tab:blue", "tab:red"] # Set your custom color palette
blue_red = sns.set_palette(sns.color_palette(colors))
axes[1] = sns.violinplot(ax=axes[1], data=df, x='month', y='pwv',
palette=blue_red, fliersize=10, gridsize=250,
inner=None, scale='width',
hue='years', split=True)
sns.swarmplot(ax=axes[1], x="month", y='pwv', data=df,
size=4.5, color='k', edgecolor="gray", palette=None,
hue='years', dodge=True)
[x.set_alpha(0.8) for x in axes[1].collections]
# remove legend, reorder and re-plot:
axes[1].get_legend().remove()
handles, labels = axes[1].get_legend_handles_labels()
axes[1].legend(handles=handles[0:2], labels=labels[0:2],
loc='upper left', prop={'size': 16})
# upper legend:
color = axes[0].collections[0].get_facecolor()[0]
handle = (mpatches.Patch(facecolor=color, edgecolor='k', alpha=0.8))
axes[0].legend(handles=[handle], labels=['1997-2019'],
loc='upper left', prop={'size': 16})
axes[0].grid()
axes[1].grid()
axes[0].set_ylabel('PWV anomalies [mm]', fontsize=fontsize)
axes[1].set_ylabel('PWV anomalies [mm]', fontsize=fontsize)
axes[0].tick_params(labelsize=fontsize)
axes[1].tick_params(labelsize=fontsize)
axes[1].set_xlabel('month', fontsize=fontsize)
# draw 0 line:
axes[0].axhline(0, color='k', lw=2, zorder=0)
axes[1].axhline(0, color='k', lw=2, zorder=0)
# annotate extreme events :
axes[0].annotate('2015', xy=(9, 5.58), xycoords='data',
xytext=(8, 7), textcoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right', verticalalignment='center',
fontsize=fontsize, fontweight='bold')
axes[0].annotate('2013', xy=(9, -5.8), xycoords='data',
xytext=(8, -7), textcoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right', verticalalignment='center',
fontsize=fontsize, fontweight='bold')
axes[0].set_ylim(-10, 10)
axes[1].set_ylim(-10, 10)
fig.tight_layout()
fig.subplots_adjust(top=0.984,
bottom=0.078,
left=0.099,
right=0.988,
hspace=0.092,
wspace=0.175)
if save:
filename = 'pwv_inter-annual_violin+swarm.png'
plt.savefig(savefig_path / filename, orientation='portrait')
return fig
def plot_october_2015(path=work_yuval):
import xarray as xr
pw_daily = xr.load_dataset(work_yuval /
'GNSS_PW_daily_thresh_50_homogenized.nc')
pw = xr.load_dataset(work_yuval / 'GNSS_PW_thresh_50_homogenized.nc')
pw = pw[[x for x in pw if '_error' not in x]]
pw_daily = pw_daily[[x for x in pw if '_error' not in x]]
fig, ax = plt.subplots(figsize=(20, 12))
ln1 = pw['tela'].sel(time=slice('2015-07', '2015-12')
).plot(linewidth=0.5, ax=ax)
ln2 = pw['jslm'].sel(time=slice('2015-07', '2015-12')
).plot(linewidth=0.5, ax=ax)
ln3 = pw_daily['tela'].sel(time=slice(
'2015-07', '2015-12')).plot(color=ln1[0].get_color(), linewidth=2.0, ax=ax)
ln4 = pw_daily['jslm'].sel(time=slice(
'2015-07', '2015-12')).plot(color=ln2[0].get_color(), linewidth=2.0, ax=ax)
ax.grid()
ax.legend(ln1+ln2+ln3+ln4, ['TELA-5mins',
'JSLM-5mins', 'TELA-daily', 'JSLM-daily'])
fig, ax = plt.subplots(figsize=(20, 12))
ln1 = pw['tela'].sel(time='2015-10').plot(ax=ax)
ln2 = pw['jslm'].sel(time='2015-10').plot(ax=ax)
ax.grid()
ax.legend(ln1+ln2, ['TELA-5mins', 'JSLM-5mins'])
fig, ax = plt.subplots(figsize=(20, 12))
ln1 = pw['tela'].sel(time=slice('2015-10-22', '2015-10-27')).plot(ax=ax)
ln2 = pw['jslm'].sel(time=slice('2015-10-22', '2015-10-27')).plot(ax=ax)
ax.grid()
ax.legend(ln1+ln2, ['TELA-5mins', 'JSLM-5mins'])
return ax
def plot_correlation_pwv_mean_anoms_and_qflux_anoms(era5_path=era5_path,
work_path=work_yuval,
anoms=None, pwv_mm=None,
all_months=False, mf='qf',
add_hline=None, title=None,
save=True,
remove_stations=['nizn', 'spir']):
import xarray as xr
from aux_gps import anomalize_xr
import matplotlib.pyplot as plt
from aux_gps import get_season_for_pandas_dtindex
from aux_gps import calculate_pressure_integral
import seaborn as sns
# first load pw and produce mean anomalies:
pw = xr.load_dataset(work_path/'GNSS_PW_monthly_thresh_50.nc')
if remove_stations is not None:
pw = pw[[x for x in pw if x not in remove_stations]]
if anoms is None:
pw_anoms = anomalize_xr(pw, 'MS')
pw_anoms_mean = pw_anoms.to_array('s').mean('s')
else:
pw_anoms_mean = pw[anoms]
if pwv_mm is not None:
pw_anoms_mean = pwv_mm
# now load qflux and resmaple to mm:
if anoms is None:
ds = xr.load_dataset(
era5_path/'ERA5_MF_anomalies_4xdaily_israel_mean_1996-2019.nc')
else:
ds = xr.load_dataset(work_path / 'GNSS_ERA5_qf_1996-2019.nc')
mf = anoms
qf_mm = ds[mf].resample(time='MS').mean()
# add pressure integral:
iqf = calculate_pressure_integral(qf_mm)/9.79
iqf = iqf.expand_dims('level')
iqf['level'] = ['integrated']
qf_mm = xr.concat([qf_mm.sortby('level'), iqf], 'level')
# now produce corr for each level:
dsl = [xr.corr(qf_mm.sel(level=x), pw_anoms_mean) for x in ds['level']][::-1]
dsl.append(xr.corr(qf_mm.sel(level='integrated'), pw_anoms_mean))
dsl = xr.concat(dsl, 'level')
# corr = xr.concat(dsl + [iqf], 'level')
corr_annual = xr.concat(dsl, 'level')
df = pw_anoms_mean.to_dataframe('pwv')
df = df.join(qf_mm.to_dataset('level').to_dataframe())
season = get_season_for_pandas_dtindex(df)
# corr = df.groupby(df.index.month).corr()['pwv'].unstack()
corr = df.groupby(season).corr()['pwv'].unstack()
corr = corr.drop('pwv', axis=1).T
corr = corr[['DJF','MAM','JJA','SON']]
corr['Annual'] = corr_annual.to_dataframe('Annual')
if all_months:
corr.index.name = 'season'
fig, ax = plt.subplots(figsize=(6, 9))
sns.heatmap(corr, annot=True, center=0, cmap='coolwarm', ax=ax, cbar_kws={
'label': 'Pearson correlation coefficient ', 'aspect': 40})
ax.set_ylabel('pressure level [hPa]')
ax.set_xlabel('')
# add line to separate integrated from level
ax.hlines([37], *ax.get_xlim(), color='k')
# add boxes around maximal values:
ax.hlines([26], [1], [5], color='w', lw=0.5)
ax.hlines([27], [1], [5], color='w', lw=0.5)
ax.vlines([1, 2, 3, 4], 26, 27, color='w', lw=0.5)
ax.hlines([28], [0], [1], color='w', lw=0.5)
ax.hlines([29], [0], [1], color='w', lw=0.5)
ax.vlines([0, 1], 28, 29, color='w', lw=0.5)
fig.tight_layout()
filename = 'pwv_qflux_levels_correlations_months.png'
else:
# fig = plt.figure(figsize=(20, 6))
# gridax = plt.GridSpec(1, 2, width_ratios=[
# 10, 2], wspace=0.05)
# ax_level = fig.add_subplot(gridax[0, 1]) # plt.subplot(221)
# ax_ts = fig.add_subplot(gridax[0, 0]) # plt.subplot(122)
fig, ax = plt.subplots(figsize=(8, 6))
corr_annual = corr_annual.to_dataframe('Annual')
corr_annual.plot(ax=ax, lw=2, label='Annual', color=seasonal_colors['Annual'])
colors = [seasonal_colors[x] for x in corr.columns]
corr.iloc[0:37].plot(ax=ax, lw=2, color=colors)
# ax_level.yaxis.set_ticks_position("right")
# ax_level.yaxis.set_label_position("right")
ax.grid()
ax.set_ylabel('pearson correlation coefficient')
ax.set_xlabel('pressure level [hPa]')
if add_hline is not None:
ax.axvline(add_hline, color='k', lw=2)
int_corr = df[['pwv','integrated']].corr()['integrated']['pwv']
# ax.axhline(int_corr, color='r', linestyle='--', lw=2)
# df[['pwv', add_hline]].loc['1997':'2019'].plot(ax=ax_ts, secondary_y=add_hline)
filename = 'pwv_qflux_levels_correlations.png'
if title is not None:
fig.suptitle(title)
if save:
plt.savefig(savefig_path / filename, orientation='portrait')
return fig
def plot_pwv_anomalies_histogram(path=work_yuval):
import xarray as xr
import numpy as np
import seaborn as sns
from scipy.stats import norm
pw = xr.load_dataset(
path / 'GNSS_PW_monthly_anoms_thresh_50_homogenized.nc')
arr = pw.to_array('station').to_dataframe('pw').values.ravel()
arr_no_nans = arr[~np.isnan(arr)]
mu, std = norm.fit(arr_no_nans)
ax = sns.histplot(
arr_no_nans,
stat='density',
color='tab:orange',
alpha=0.5)
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, 100)
p = norm.pdf(x, mu, std)
ln = ax.plot(x, p, 'k', linewidth=2)
# x_std = x[(x>=-std) & (x<=std)]
# y_std = norm.pdf(x_std, mu, std)
# x_std2 = x[(x>=-2*std) & (x<=-std) | (x>=std) & (x<=2*std)]
# y_std2 = norm.pdf(x_std2, mu, std)
# ax.fill_between(x_std,y_std,0, alpha=0.7, color='b')
# ax.fill_between(x_std2,y_std2,0, alpha=0.7, color='r')
y_std = [norm.pdf(std, mu, std), norm.pdf(-std, mu, std)]
y_std2 = [norm.pdf(std * 2, mu, std), norm.pdf(-std * 2, mu, std)]
ln_std = ax.vlines([-std, std], ymin=[0, 0], ymax=y_std,
color='tab:blue', linewidth=2)
ln_std2 = ax.vlines([-std * 2, std * 2], ymin=[0, 0],
ymax=y_std2, color='tab:red', linewidth=2)
leg_labels = ['Normal distribution fit',
'1-Sigma: {:.2f} mm'.format(std),
'2-Sigma: {:.2f} mm'.format(2 * std)]
ax.legend([ln[0], ln_std, ln_std2], leg_labels)
ax.set_xlabel('PWV anomalies [mm]')
return ax
return ax
# def plot_quiver_panels(u, v, tcwv,
# times=['2015-10', '2013-10'], level=750):
# import matplotlib.pyplot as plt
# from matplotlib.colors import Normalize
# from mpl_toolkits.axes_grid1 import AxesGrid
# import matplotlib.cm as cm
# import pandas as pd
# from palettable.colorbrewer import sequential as seq_cmap
# from palettable.colorbrewer import diverging as div_cmap
# from aux_gps import anomalize_xr
# cmap_yl = seq_cmap.YlOrRd_9.mpl_colormap
# cmap_rb = div_cmap.PuOr_11.mpl_colormap
# cmap = cmap_rb
# times = pd.to_datetime(times)
# tcwv = slice_time_level_geo_field(tcwv, level=None, time=times,
# anoms=True,
# lats=[17, 47], lons=[17, 47])
# qu = slice_time_level_geo_field(u, level=750, time=times,
# anoms=True,
# lats=[17, 47], lons=[17, 47])
# qv = slice_time_level_geo_field(v, level=750, time=times,
# anoms=True,
# lats=[17, 47], lons=[17, 47])
# fig = plt.figure(figsize=(15, 5))
# # fig, axes = plt.subplots(1, 2, figsize=(15, 5))
# grid = AxesGrid(fig, 111, # as in plt.subplot(111)
# nrows_ncols=(1, 2),
# axes_pad=0.15,
# share_all=True,
# cbar_location="right",
# cbar_mode="single",
# cbar_size="7%",
# cbar_pad=0.15,
# )
# # normalizer=Normalize(-6,6)
# vmax= abs(max(abs(tcwv.min().values), abs(tcwv.max().values)))
# vmin = -vmax
# print(vmin, vmax)
# # vmax = tcwv.max().item()
# cs1 = plot_contourf_field_with_map_overlay(tcwv.sel(time=times[0]), ax=grid[0],
# vmin=vmin, vmax=vmax, cmap=cmap,
# colorbar=False, title='2015-10',
# cbar_label='', extend=None,
# alpha=0.5, levels=21)
# cs2 = plot_contourf_field_with_map_overlay(tcwv.sel(time=times[1]), ax=grid[1],
# vmin=vmin, vmax=vmax, cmap=cmap,
# colorbar=False, title='2013-10',
# cbar_label='', extend=None,
# alpha=0.5, levels=21)
# cbar = grid[0].cax.colorbar(cs2)
# # cbar = grid.cbar_axes[0].colorbar(cs2)
# label = 'PWV anomalies [mm]'
# cbar.set_label_text(label)
# # for cax in grid.cbar_axes:
# # cax.toggle_label(False)
# # im=cm.ScalarMappable(norm=normalizer)
# return fig
# TODO: calculate long term monthly mean from slice and incorporate it easily:
def plot_quiver_panels(u, v, sf,
times=['2013-10', '2015-10'], level=750,
anoms=False, suptitle='', labelsize=12):
import matplotlib.pyplot as plt
import pandas as pd
# from palettable.colorbrewer import sequential as seq_cmap
from palettable.colorbrewer import sequential as colorbrewer_seq
from palettable.scientific import sequential as scientific_seq
from palettable.cmocean import sequential as cmocean_seq
from palettable.cartocolors import sequential as seq_cmap
from palettable.cartocolors import diverging as div_cmap
import cartopy.crs as ccrs
import xarray as xr
cmap_seq = seq_cmap.BluYl_7.mpl_colormap
cmap_seq = colorbrewer_seq.Blues_9.mpl_colormap
cmap_div = div_cmap.Tropic_7.mpl_colormap
cmap_quiver = seq_cmap.SunsetDark_7.mpl_colormap
# cmap_quiver = colorbrewer_seq.YlOrRd_9.mpl_colormap
# cmap_quiver = scientific_seq.LaJolla_20.mpl_colormap
# cmap_quiver = cmocean_seq.Solar_20.mpl_colormap
cmap = cmap_seq
if anoms:
cmap = cmap_div
times_dt = pd.to_datetime(times)
cb_label = 'PWV [mm]'
tcwv = slice_time_level_geo_field(sf, level=None, time=times_dt,
anoms=anoms, clim_month=10,
lats=[17, 47], lons=[17, 47])
qu = slice_time_level_geo_field(u, level=750, time=times_dt,
anoms=anoms, clim_month=10,
lats=[17, 47], lons=[17, 47])
qv = slice_time_level_geo_field(v, level=750, time=times_dt,
anoms=anoms, clim_month=10,
lats=[17, 47], lons=[17, 47])
fg = plot_scaler_field_ontop_map_cartopy(tcwv, col='time', levels=21,
cmap=cmap, alpha=0.8, cbar_label=cb_label,
labelsize=labelsize, figsize=(18, 6))
fg = plot_vector_arrows_ontop_map_cartopy(qu, qv, lon_dim='longitude',
lat_dim='latitude', fg=fg,
qp=5, col='time', qkey=True,
cmap=cmap_quiver, zorder=20)
gdf = box_lat_lon_polygon_as_gpd(lat_bounds=[29, 34], lon_bounds=[34, 36])
for i, ax in enumerate(fg.axes.flat):
# add the box over Israel:
ax.add_geometries(gdf['geometry'].values, crs=ccrs.PlateCarree(),
edgecolor='k', linestyle='--', alpha=1, linewidth=2)
# add gridlines:
gl = ax.gridlines(alpha=0.5, color='k', linestyle='--', draw_labels=True,
dms=True, x_inline=False, y_inline=False, linewidth=1)
gl.top_labels = False
# gl.left_labels = False
gl.xlabel_style = {'size': labelsize, 'color': 'k'}
gl.ylabel_style = {'size': labelsize, 'color': 'k'}
if i == 0:
gl.right_labels = False
elif i == 1:
gl.right_labels = False
gl.left_labels = False
elif i == 2:
gl.right_labels = False
gl.left_labels = False
if i <= 1:
ax.set_title(times_dt[i].strftime('%b %Y'))
else:
ax.set_title('Mean Oct')
fg.fig.suptitle(suptitle)
fg.fig.subplots_adjust(top=0.899,
bottom=0.111,
left=0.03,
right=0.94,
hspace=0.17,
wspace=0.0)
return fg
def slice_time_level_geo_field(field, level=750, lat_dim='latitude',
lon_dim='longitude', time='2012-10',
level_dim='level', time_dim='time',
lats=[None, None], lons=[None, None],
anoms=False, clim_month=None):
from aux_gps import anomalize_xr
import pandas as pd
import xarray as xr
if level is not None:
field = field.sel({level_dim: level})
if field[lat_dim].diff(lat_dim).median() < 0:
lats = lats[::-1]
field = field.sel({lat_dim: slice(*lats), lon_dim: slice(*lons)}).load()
if time is not None and anoms and clim_month is None:
field = field.load()
field = anomalize_xr(field, freq='MS', time_dim=time_dim)
if time is not None and clim_month is None:
field = field.sel({time_dim: time})
elif time is None and clim_month is not None:
field = field.load()
field = field.groupby('{}.month'.format(
time_dim)).mean().sel(month=clim_month)
elif time is not None and clim_month is not None:
clim = field.groupby('{}.month'.format(time_dim)
).mean().sel(month=clim_month)
clim = clim.rename({'month': time_dim})
clim[time_dim] = pd.to_datetime(
'2200-{}'.format(clim_month), format='%Y-%m')
field = field.sel({time_dim: time})
field = xr.concat([field, clim], time_dim)
field = field.sortby(lat_dim).squeeze()
return field
# def plot_contourf_field_with_map_overlay(field, lat_dim='latitude',
# lon_dim='longitude', ax=None,
# vmin=None, vmax=None, cmap='viridis',
# colorbar=False, title=None,
# cbar_label='', extend=None,
# alpha=0.5, levels=11):
# import salem
# import matplotlib.pyplot as plt
# field = field.transpose(lon_dim, lat_dim, ...)
# if ax is None:
# f, ax = plt.subplots(figsize=(10, 8))
# # plot the salem map background, make countries in grey
# smap = field.salem.get_map(countries=False)
# smap.set_shapefile(countries=False, oceans=True, lakes=True, color='grey')
# smap.plot(ax=ax)
# # transform the coordinates to the map reference system and contour the data
# xx, yy = smap.grid.transform(field[lat_dim].values, field[lon_dim].values,
# crs=field.salem.grid.proj)
# cs = ax.contourf(xx, yy, field, cmap=cmap, levels=levels,
# alpha=alpha, vmin=vmin, vmax=vmax, extend=extend)
# if colorbar:
# f.colorbar(cs, ax=ax, aspect=40, label=cbar_label)
# if title is not None:
# ax.set_title(title)
# return cs
# def plot_quiver_ontop_map(u, v, ax=None, lat_dim='latitude',
# lon_dim='longitude', plot_map=False,
# qp=5, qkey=True):
# import salem
# import matplotlib.pyplot as plt
# import numpy as np
# u = u.transpose(lon_dim, lat_dim, ...)
# v = v.transpose(lon_dim, lat_dim, ...)
# if ax is None:
# f, ax = plt.subplots(figsize=(10, 8))
# # plot the salem map background, make countries in grey
# smap = u.salem.get_map(countries=False)
# smap.set_shapefile(countries=False, oceans=True, lakes=True, color='grey')
# if plot_map:
# smap.plot(ax=ax)
# # transform the coordinates to the map reference system and contour the data
# xx, yy = smap.grid.transform(u[lat_dim].values, u[lon_dim].values,
# crs=u.salem.grid.proj)
# # Quiver only every 7th grid point
# u = u[4::qp, 4::qp]
# v = v[4::qp, 4::qp]
# # transform their coordinates to the map reference system and plot the arrows
# xx, yy = smap.grid.transform(u[lat_dim].values, u[lon_dim].values,
# crs=u.salem.grid.proj)
# xx, yy = np.meshgrid(xx, yy)
# qu = ax.quiver(xx, yy, u.values, v.values)
# if qkey:
# qk = ax.quiverkey(qu, 0.7, 1.05, 2, '2 msec',
# labelpos='E', coordinates='axes')
# return ax
def plot_scaler_field_ontop_map_cartopy(field, col='time', levels=21,
cmap='bwr', alpha=1,
labelsize=14, figsize=(15, 6),
cbar_label=''):
import cartopy.crs as ccrs
import cartopy.feature as cfeature
fg = field.plot.contourf(levels=levels, col=col, transform=ccrs.PlateCarree(),
cmap=cmap, alpha=alpha, figsize=figsize, add_colorbar=False,
subplot_kws={"projection": ccrs.PlateCarree()})
# add an axes, lower left corner in [0.83, 0.1] measured in figure coordinate with axes width 0.02 and height 0.8
cbar_ax = fg.fig.add_axes([0.94, 0.1, 0.01, 0.8])
fg.add_colorbar(cax=cbar_ax, label=cbar_label)
for ax in fg.axes.flat:
# land_50m = cfeature.NaturalEarthFeature('physical', 'lakes', '10m',
# edgecolor='face',
# facecolor='b', alpha=0.3)
# ax.add_feature(land_50m, zorder=30)
# ax.add_feature(cfeature.LAKES.with_scale('110m'), facecolor='b')
# ax.add_image(tiler, 6)
ax.coastlines('50m')
# ax.background_img(extent=[17, 47, 17, 47])
ax.tick_params(axis="y", direction="out", length=8)
return fg
def plot_vector_arrows_ontop_map_cartopy(u, v, lon_dim='longitude',
lat_dim='latitude', fg=None,
qp=5, col='time', qkey=True,
cmap=None, zorder=None):
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import cartopy.feature as cfeature
import numpy as np
scale = np.sqrt(u**2+v**2).max().item()
import numpy as np
if fg is None:
fg = plt.figure(figsize=(8, 10))
ax = plt.subplot(1, 1, 1, projection=ccrs.PlateCarree())
ax.add_feature(cfeature.LAND.with_scale('110m'))
# ax.add_image(tiler, 6)
ax.coastlines('50m')
gl = ax.gridlines(alpha=0.5, color='k', linestyle='--', draw_labels=True,
dms=True, x_inline=False, y_inline=False, linewidth=1)
# Quiver only every 7th grid point
u = u[4::qp, 4::qp]
v = v[4::qp, 4::qp]
x = u[lon_dim].values
y = u[lat_dim].values
# set displayed arrow length for longest arrow
displayed_arrow_length = 2
scale_factor = scale / displayed_arrow_length
ax.quiver(x, y, u, v, units='xy',
width=0.1, zorder=zorder,
scale=scale_factor, scale_units='xy',
transform=ccrs.PlateCarree())
return fg
for i, ax in enumerate(fg.axes.flat):
# set displayed arrow length for longest arrow
displayed_arrow_length = 2
scale_factor = scale / displayed_arrow_length
u1 = u.isel({col: i})
v1 = v.isel({col: i})
# colors1 = colors.isel({col: i})
# Quiver only every 7th grid point
u1 = u1[4::qp, 4::qp]
v1 = v1[4::qp, 4::qp]
colors = np.sqrt(u1**2 + v1**2) / scale
x = u1[lon_dim].values
y = u1[lat_dim].values
if cmap is not None:
q = ax.quiver(x, y, u1, v1, colors, units='xy',
width=0.1, cmap=cmap,
scale=scale_factor, scale_units='xy',
transform=ccrs.PlateCarree(),
zorder=zorder)
else:
q = ax.quiver(x, y, u1, v1, units='xy',
width=0.1, zorder=zorder,
scale=scale_factor, scale_units='xy',
transform=ccrs.PlateCarree())
if qkey:
qk = ax.quiverkey(q, 0.7, 1.05, 0.03, r'0.03 m$\cdot$sec$^{-1}$',
labelpos='E', coordinates='axes')
return fg
def box_lat_lon_polygon_as_gpd(lat_bounds=[29, 34], lon_bounds=[34, 36.5]):
import geopandas as gpd
from shapely.geometry import Point, LineString
point1 = [lon_bounds[0], lat_bounds[0]]
point2 = [lon_bounds[0], lat_bounds[1]]
point3 = [lon_bounds[1], lat_bounds[1]]
point4 = [lon_bounds[1], lat_bounds[0]]
line1 = LineString([Point(*point1), Point(*point2)])
line2 = LineString([Point(*point2), Point(*point3)])
line3 = LineString([Point(*point3), Point(*point4)])
line4 = LineString([Point(*point4), Point(*point1)])
geo_df = gpd.GeoDataFrame(geometry=[line1, line2, line3, line4])
return geo_df
def plot_relative_wind_direction_frequency(station='tela', ims_path=ims_path,
clim=True):
import xarray as xr
import pandas as pd
wd_daily = xr.load_dataset(ims_path / 'GNSS_WD_daily.nc')[station]
bins = [0, 45, 90, 135, 180, 215, 270, 315, 360]
bin_labels = ['N-NE', 'NE-E', 'E-SE',
'SE-S', 'S-SW', 'SW-W', 'W-NW', 'NW-N']
wd_daily = wd_daily.dropna('time')
cats = pd.cut(wd_daily.values, bins=bins, labels=bin_labels)
df = wd_daily.dropna('time').to_dataframe(name='WD')
df['month'] = df.index.month
df['year'] = df.index.year
df['months'] = df['year'].astype(str) + '-' + df['month'].astype(str)
cats = pd.Series(cats, index=df.index)
df['direction'] = cats
ndf = df.groupby([df['months'], df['direction']]).size().to_frame()
ndf = ndf.unstack()
ndf.columns = ndf.columns.droplevel()
ndf.index.name = 'time'
ndf.index = pd.to_datetime(ndf.index)
da = ndf.to_xarray()
return da
def plot_multiparams_daily_pwv_single_time(station='tela', ims_path=ims_path,
climate_path=climate_path,
ts1='2013-09-15', days=47,
ts2='2015-09-15',
pwv_lim=[10, 45], dtr_lim=[6, 14.5],
wd_lim=[50, 320],
add_synoptics=['CL', 'RST', 'PT'],
save=True, fontsize=16):
import matplotlib.pyplot as plt
import pandas as pd
import xarray as xr
import numpy as np
from calendar import month_abbr
from aux_gps import replace_time_series_with_its_group
from synoptic_procedures import read_synoptic_classification
sns.set_style('whitegrid')
sns.set_style('ticks')
dt1 = pd.date_range(ts1, periods=days)
# dt2 = pd.date_range(ts2, periods=days)
months = list(set(dt1.month))
year = list(set(dt1.year))[0] # just one year
dt1_str = ', '.join([month_abbr[x] for x in months]) + ' {}'.format(year)
# months = list(set(dt2.month))
# year = list(set(dt2.year))[0] # just one year
# dt2_str = ', '.join([month_abbr[x] for x in months]) + ' {}'.format(year)
pw_daily_all = xr.open_dataset(
work_yuval/'GNSS_PW_daily_thresh_50.nc')[station].load()
# pw_daily2 = pw_daily_all.sel(time=dt2)
pw_daily = pw_daily_all.sel(time=dt1)
dtr_daily_all = xr.load_dataset(ims_path /'GNSS_IMS_DTR_mm_israel_1996-2020.nc')[station]
dtr_daily = dtr_daily_all.sel(time=dt1)
# dtr_daily2 = dtr_daily_all.sel(time=dt2)
wd_daily_all = xr.load_dataset(ims_path /'GNSS_WD_daily.nc')[station]
wd_daily = wd_daily_all.sel(time=dt1)
# wd_daily2 = wd_daily_all.sel(time=dt2)
# wind directions:
# 0 north
# 45 northeast
# 90 east
# 135 southeast
# 180 south
# 225 southwest
# 270 west
# 315 northwest
# 360 north
fig, axes = plt.subplots(3, 1, figsize=(20, 10))
# twins = [ax.twiny() for ax in axes]
pwv_mm = replace_time_series_with_its_group(pw_daily, 'month')
# pwv_mm2 = replace_time_series_with_its_group(pw_daily2, 'month')
blue = 'k'
red = 'tab:red'
pwv1 = dt1_str + ' PWV'
# pwv2 = dt2_str + ' PWV'
pwv1_mm = pwv1 + ' monthly mean'
# pwv2_mm = pwv2 + ' monthly mean'
pw_daily.plot.line('-', color=blue, lw=2, ax=axes[0], label=pwv1)
# pw_daily2.plot.line('-', lw=2, color=red, ax=twins[0], label=pwv2)
pwv_mm.plot.line('--', lw=2, color=blue, ax=axes[0], label=pwv1_mm)
# pwv_mm2.plot.line('--', lw=2, color=red, ax=twins[0], label=pwv2_mm)
axes[0].set_ylabel('PWV [mm]', fontsize=fontsize)
hand, labl = axes[0].get_legend_handles_labels()
# hand2, labl2 = twins[0].get_legend_handles_labels()
# axes[0].legend(handles=hand+hand2, labels=labl+labl2)
axes[0].set_ylim(*pwv_lim)
wd_daily.plot.line('-', lw=2, color=blue, ax=axes[1])
# wd_daily2.plot.line('-', lw=2,color=red, ax=twins[1])
axes[1].set_ylabel(r'Wind Direction [$^{\circ}$]', fontsize=fontsize)
axes[1].set_ylabel('Wind Direction', fontsize=fontsize)
# axes[1].set_ylim(*wd_lim)
dtr_daily.plot.line('-', lw=2, color=blue, ax=axes[2])
# dtr_daily2.plot.line('-', lw=2, color=red, ax=twins[2])
axes[2].set_ylabel('Diurnal Temperature Range [K]', fontsize=fontsize)
axes[2].set_ylim(*dtr_lim)
[ax.xaxis.set_major_locator(mdates.DayLocator(interval=1)) for ax in axes]
# set formatter
[ax.xaxis.set_major_formatter(mdates.DateFormatter('%d')) for ax in axes]
[ax.grid(True) for ax in axes]
[ax.set_xlabel('') for ax in axes]
[ax.tick_params(labelsize=fontsize) for ax in axes]
xlim = [dt1[0]- pd.Timedelta(1, unit='d'), dt1[-1]+ pd.Timedelta(1, unit='d')]
[ax.set_xlim(*xlim) for ax in axes]
[ax.set_xticks(ax.get_xticks()[1:-1]) for ax in axes]
# for ax, twin in zip(axes, twins):
# ylims_low = min(min(ax.get_ylim()), min(twin.get_ylim()))
# ylims_high = max(max(ax.get_ylim()), max(twin.get_ylim()))
# ax.set_ylim(ylims_low, ylims_high)
wd_ticks = np.arange(45, 360, 45)
wind_labels = ['NE', 'E', 'SE', 'S', 'SW', 'W', 'NW']
lbl = []
for tick, label in zip(wd_ticks, wind_labels):
if len(label) == 1:
lbl.append(label + ' ' + str(tick))
elif len(label) == 2:
lbl.append(label + ' ' + str(tick))
# wind_label = [y + ' ' + str(x) for x,y in zip(wd_ticks, wind_labels)]
axes[1].set_yticks(wd_ticks)
axes[1].set_yticklabels(wind_labels, ha='left')
fig.canvas.draw()
yax = axes[1].get_yaxis()
# find the maximum width of the label on the major ticks
pad = max(T.label.get_window_extent().width for T in yax.majorTicks)
yax.set_tick_params(pad=pad-10)
if add_synoptics is not None:
df = read_synoptic_classification(climate_path, report=False)
ind = pw_daily.to_dataframe().index
df = df.loc[ind]
grp_dict = df.groupby('upper_class').groups
[grp_dict.pop(x) for x in grp_dict.copy().keys()
if x not in add_synoptics]
# add_ARSTs:
grp_dict['ARST'] = pd.DatetimeIndex(['2013-10-30', '2015-10-05',
'2015-10-19', '2015-10-20',
'2015-10-25', '2015-10-29'])
grp_dict['RST'] = grp_dict['RST'].difference(grp_dict['ARST'])
color_dict = {'CL': 'tab:green', 'ARST': 'tab:orange',
'RST': 'tab:orange', 'PT': 'tab:purple'}
alpha_dict = {'CL': 0.3, 'ARST': 0.6,
'RST': 0.3, 'PT': 0.3}
ylim0 = axes[0].get_ylim()
ylim1 = axes[1].get_ylim()
ylim2 = axes[2].get_ylim()
for key_class, key_ind in grp_dict.items():
color = color_dict[key_class]
alpha = alpha_dict[key_class]
# ecolor='k'
# edge_color = edge_dict[key_class]
# abbr = add_class_abbr(key_class)
# # abbr_count = month_counts.sel(syn_cls=key_class).sum().item()
# abbr_count = df[df['class'] == key_class].count().values[0]
# abbr_label = r'${{{}}}$: {}'.format(abbr, int(abbr_count))
# # for ind, row in df.iterrows():
# da_ts[da_ts['syn_class'] == key_class].plot.line(
# 'k-', lw=0, ax=ax, marker='o', markersize=20,
# markerfacecolor=color, markeredgewidth=2,
# markeredgecolor=edge_color, label=abbr_label)
axes[0].vlines(key_ind, ylim0[0], ylim0[1],
color=color, alpha=alpha, lw=20,
label=key_class)
axes[1].vlines(key_ind, ylim1[0], ylim1[1],
color=color, alpha=alpha, lw=20,
label=key_class)
axes[2].vlines(key_ind, ylim2[0], ylim2[1],
color=color, alpha=alpha, lw=20,
label=key_class)
handles, labels = axes[2].get_legend_handles_labels()
fig.legend(handles=handles, labels=labels, prop={'size': 16}, edgecolor='k',
framealpha=0.5, fancybox=False, facecolor='white',
ncol=4, fontsize=fontsize, loc='upper left', bbox_to_anchor=(0.05, 1.005),
bbox_transform=plt.gcf().transFigure)
# [twin.tick_params(axis='x',which='both', top=False, # ticks along the top edge are off
# labeltop=False) for twin in twins]
# [twin.set_xlabel('') for twin in twins]
# months = list(set(times_dt.month))
# year = list(set(times_dt.year))[0] # just one year
dt_str = ', '.join([month_abbr[x] for x in months]) + ' {}'.format(year)
# axes[2].set_xlabel(dt_str)
fig.suptitle('{} {}'.format(station.upper(),dt_str), fontsize=fontsize)
fig.tight_layout()
if save:
filename = '{}_multiparam_{}-{}.png'.format(station, '-'.join([str(x) for x in months]), year)
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='landscape')
return fig
def plot_synoptic_daily_on_pwv_daily_with_colors(climate_path=climate_path,
station='tela',ims_path=ims_path,
times=['2013-09-15',
'2015-09-15'],
days=47, add_era5=True,
add_dtr=True,
twin_ylims=None):
from synoptic_procedures import visualize_synoptic_class_on_time_series
import matplotlib.pyplot as plt
import xarray as xr
import pandas as pd
import matplotlib.dates as mdates
from calendar import month_abbr
# TODO: add option of plotting 3 stations and/without ERA5
times_dt = [pd.date_range(x, periods=days) for x in times]
if isinstance(station, list):
pw_daily = [xr.open_dataset(
work_yuval/'GNSS_PW_daily_thresh_50_homogenized.nc')[x].load() for x in station]
pw_daily = xr.merge(pw_daily)
add_mm = False
label = ', '.join([x.upper() for x in station])
ncol = 6
else:
pw_daily = xr.open_dataset(
work_yuval/'GNSS_PW_daily_thresh_50.nc')[station].load()
add_mm = True
label = station.upper()
ncol = 4
era5_hourly = xr.open_dataset(work_yuval/'GNSS_era5_hourly_PW.nc')[station]
era5_daily = era5_hourly.resample(time='D').mean().load()
dtr_daily = xr.load_dataset(work_yuval/'GNSS_ERA5_DTR_daily_1996-2020.nc')[station]
dtr_daily = xr.load_dataset(ims_path /'GNSS_IMS_DTR_mm_israel_1996-2020.nc')[station]
fig, axes = plt.subplots(len(times), 1, figsize=(20, 10))
leg_locs = ['upper right', 'lower right']
for i, ax in enumerate(axes.flat):
if add_era5:
second_da_ts = era5_daily.sel(time=times_dt[i])
elif add_dtr:
second_da_ts = dtr_daily.sel(time=times_dt[i])
else:
second_da_ts = None
visualize_synoptic_class_on_time_series(pw_daily.sel(time=times_dt[i]),
path=climate_path, ax=ax,
second_da_ts=second_da_ts,
leg_ncol=ncol,
leg_loc=leg_locs[i],
add_mm=add_mm,
twin=twin_ylims)
ax.set_ylabel('PWV [mm]')
ax.xaxis.set_major_locator(mdates.DayLocator(interval=1))
# set formatter
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d'))
# set font and rotation for date tick labels
months = list(set(times_dt[i].month))
year = list(set(times_dt[i].year))[0] # just one year
dt_str = ', '.join([month_abbr[x] for x in months]) + ' {}'.format(year)
ax.set_title(dt_str, fontweight='bold', fontsize=14)
ax.set_xlabel('')
# set ylims :
ylims_low = [ax.get_ylim()[0] for ax in axes]
ylims_high = [ax.get_ylim()[1] for ax in axes]
[ax.set_ylim(min(ylims_low), max(ylims_high)) for ax in axes]
# set ylims in right_axes:
# ylims_low = [ax.right_ax.get_ylim()[0] for ax in axes]
# ylims_high = [ax.right_ax.get_ylim()[1] for ax in axes]
# [ax.right_ax.set_ylim(min(ylims_low), max(ylims_high)) for ax in axes]
# axes[0].right_ax.set_ylim(0,100)
if add_era5:
fig.suptitle(
'Daily PWV and synoptic class for {} station using GNSS(solid - monthly means in dot-dashed) and ERA5(dashed)'.format(label))
elif add_dtr:
fig.suptitle(
'Daily PWV and synoptic class for {} station using GNSS(solid - monthly means in dot-dashed) and DTR(dashed)'.format(label))
else:
fig.suptitle(
'Daily PWV and synoptic class for {} station using GNSS(solid)'.format(label))
fig.tight_layout()
return axes
def create_enhanced_qualitative_color_map(plot=True, alevels=[1, 0.75, 0.5, 0.25]):
import matplotlib.colors as cm
import seaborn as sns
colors = sns.color_palette('colorblind')
colors_with_alpha = [cm.to_rgba(colors[x]) for x in range(len(colors))]
new = []
for color in colors_with_alpha:
r = color[0]
g = color[1]
b = color[2]
for alev in alevels:
alpha = alev
new.append(tuple([r, g, b, alpha]))
if plot:
sns.palplot(new)
return new
def plot_IMS_wind_speed_direction_violins(ims_path=ims_path,
station='tela', save=True,
fontsize=16):
from ims_procedures import gnss_ims_dict
import seaborn as sns
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
sns.set_style('whitegrid')
sns.set_style('ticks')
pal = sns.color_palette(n_colors=4)
green = pal[2]
red = pal[3]
ims_station = gnss_ims_dict.get(station)
WS = xr.open_dataset(ims_path / 'IMS_WS_israeli_10mins.nc')[ims_station]
WD = xr.open_dataset(ims_path / 'IMS_WD_israeli_10mins.nc')[ims_station]
ws_mm = WS.resample(time='MS').mean().sel(time=slice('2014', '2019'))
wd_mm = WD.resample(time='MS').mean().sel(time=slice('2014', '2019'))
df = ws_mm.to_dataframe(name='Wind Speed')
df['Wind Direction'] = wd_mm.to_dataframe(name='Wind Direction')
df['month'] = df.index.month
fig, axes = plt.subplots(1, 2, figsize=(20, 7))
axes[0] = sns.violinplot(data=df, x='month', y='Wind Speed',
fliersize=10, gridsize=250, ax=axes[0],
inner=None, scale='width', color=green,
hue=None, split=False, zorder=20)
axes[1] = sns.violinplot(data=df, x='month', y='Wind Direction',
fliersize=10, gridsize=250, ax=axes[1],
inner=None, scale='width', color=red,
hue=None, split=False, zorder=20)
[x.set_alpha(0.5) for x in axes[0].collections]
[x.set_alpha(0.5) for x in axes[1].collections]
axes[0] = sns.pointplot(x='month', y='Wind Speed', data=df,
estimator=np.mean,
dodge=False, ax=axes[0], hue=None, color=green,
linestyles="None", markers=['s'], scale=0.7,
ci=None, alpha=0.5, zorder=0, style=None)
axes[1] = sns.pointplot(x='month', y='Wind Direction', data=df,
estimator=np.mean,
dodge=False, ax=axes[1], hue=None, color=red,
linestyles="None", markers=['o'], scale=0.7,
ci=None, alpha=0.5, zorder=0, style=None)
[ax.grid(True) for ax in axes]
wind_labels = ['SE', 'S', 'SW', 'W', 'NW']
wd_ticks = np.arange(135, 360, 45)
axes[1].set_yticks(wd_ticks)
axes[1].set_yticklabels(wind_labels, ha='left')
fig.canvas.draw()
yax = axes[1].get_yaxis()
# find the maximum width of the label on the major ticks
pad = max(T.label.get_window_extent().width for T in yax.majorTicks)
yax.set_tick_params(pad=pad-10)
axes[0].set_ylabel(r'Wind Speed [m$\cdot$sec$^{-1}$]')
fig.tight_layout()
return
def plot_ERA5_wind_speed_direction_profiles_at_bet_dagan(ear5_path=era5_path,
save=True, fontsize=16):
import seaborn as sns
import xarray as xr
from aux_gps import convert_wind_direction
import numpy as np
import matplotlib.pyplot as plt
sns.set_style('whitegrid')
sns.set_style('ticks')
pal = sns.color_palette(n_colors=4)
bd_lat = 32.01
bd_lon = 34.81
v = xr.open_dataset(era5_path/'ERA5_V_mm_EM_area_1979-2020.nc')
u = xr.open_dataset(era5_path/'ERA5_U_mm_EM_area_1979-2020.nc')
u = u.sel(expver=1)
v = v.sel(expver=1)
u1 = u.sel(latitude=bd_lat, longitude=bd_lon, method='nearest')
v1 = v.sel(latitude=bd_lat, longitude=bd_lon, method='nearest')
u1.load().dropna('time')
v1.load().dropna('time')
ws1, wd1 = convert_wind_direction(u=u1['u'], v=v1['v'])
ws1 = ws1.reset_coords(drop=True)
wd1 = wd1.reset_coords(drop=True)
levels = [1000, 900, 800, 700]
df_ws = ws1.sel(level=levels).to_dataframe('ws')
df_ws['level'] = df_ws.index.get_level_values(1)
df_ws['month'] = df_ws.index.get_level_values(0).month
df_wd = wd1.sel(level=levels).to_dataframe('wd')
df_wd['level'] = df_wd.index.get_level_values(1)
df_wd['month'] = df_wd.index.get_level_values(0).month
fig, axes = plt.subplots(2, 1, figsize=(8, 15))
axes[0] = sns.lineplot(data=df_ws, x='month', y='ws',
hue='level', markers=True,
style='level', markersize=10,
ax=axes[0], palette=pal)
axes[1] = sns.lineplot(data=df_wd, x='month', y='wd',
hue='level', markers=True,
style='level', markersize=10,
ax=axes[1], palette=pal)
axes[0].legend(title='pressure level [hPa]', prop={'size': fontsize-2}, loc='upper center')
axes[1].legend(title='pressure level [hPa]', prop={'size': fontsize-2}, loc='lower center')
[ax.grid(True) for ax in axes]
wind_labels = ['SE', 'S', 'SW', 'W', 'NW']
wd_ticks = np.arange(135, 360, 45)
axes[1].set_yticks(wd_ticks)
axes[1].set_yticklabels(wind_labels, ha='left')
fig.canvas.draw()
yax = axes[1].get_yaxis()
# find the maximum width of the label on the major ticks
pad = max(T.label.get_window_extent().width for T in yax.majorTicks)
yax.set_tick_params(pad=pad)
axes[0].set_ylabel(r'Wind Speed [m$\cdot$sec$^{-1}$]', fontsize=fontsize)
axes[1].set_ylabel('Wind Direction', fontsize=fontsize)
axes[1].set_xlabel('month', fontsize=fontsize)
mticks = np.arange(1, 13)
[ax.set_xticks(mticks) for ax in axes]
[ax.tick_params(labelsize=fontsize) for ax in axes]
fig.tight_layout()
fig.subplots_adjust(hspace=0.051)
if save:
filename = 'ERA5_wind_speed_dir_bet-dagan_profiles.png'
plt.savefig(savefig_path / filename, orientation='potrait')
return fig
def plot_PWV_anomalies_groups_maps_with_mean(work_path=work_yuval, station='drag',
fontsize=16, save=True):
import xarray as xr
import seaborn as sns
import numpy as np
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
from scipy.ndimage.filters import gaussian_filter
from PW_stations import produce_geo_gnss_solved_stations
sns.set_theme(style='ticks', font_scale=1.5)
cmap = 'jet' # sns.color_palette('terrain', as_cmap=True)
df = produce_geo_gnss_solved_stations(plot=False)
file = work_path/'GNSS_PW_thresh_0_hour_dayofyear_rest.nc'
pw = xr.open_dataset(file)
if isinstance(station, str):
st_mean = pw[station].mean('rest').expand_dims('station')
st_mean['station'] = [station.upper()]
data = gaussian_filter(st_mean, 5)
st_mean = st_mean.copy(data=data)
elif isinstance(station, list):
pws = [pw[x].mean('rest') for x in pw if x in station]
pws = [x.copy(data=gaussian_filter(x, 5)) for x in pws]
st_mean = xr.merge(pws)
st_mean = st_mean[station].to_array('station')
st_mean['station'] = [x.upper() for x in st_mean['station'].values]
alts = df.loc[station,'alt'].values
# drag = pw['drag'].mean('rest')
# elat = pw['elat'].mean('rest')
# dsea = pw['dsea'].mean('rest')
# da = xr.concat([drag, dsea, elat], 'station')
# da['station'] = ['DRAG', 'DSEA', 'ELAT']
n = st_mean['station'].size
st_mean = st_mean.transpose('dayofyear', 'hour', 'station')
norm = mcolors.Normalize(vmin=st_mean.min().item(), vmax=st_mean.max().item(),
clip=True)
fig = plt.figure(constrained_layout=False, figsize=(7, 13))
ratio = 1.0 / len(station)
bots = [1.0 - ratio*(x+1) for x in range(len(station))]
tops = [1 - x - 0.05 for x in reversed(bots)]
bots[-1] = 0.05
# custom tops and bots for 3 figures:
tops = [0.95, 0.6333333333339999, 0.3166666666673999]
bots = [0.6833333333339999, 0.3666666666673999, 0.05]
for i, st in enumerate(station):
gs = fig.add_gridspec(nrows=2, ncols=1, hspace=0, height_ratios=[3,1],
bottom=bots[i], top=tops[i], right=0.7)
ax_heat = fig.add_subplot(gs[0])
ax_bottom = fig.add_subplot(gs[1])
cf = st_mean.sel(station=st.upper()).plot.contourf(levels=41,
add_colorbar=False,
cmap=cmap, ax=ax_heat,
norm=norm)
st_mean.sel(station=st.upper()).mean('dayofyear').plot(ax=ax_bottom,
color='k', linewidth=2)
bottom_limit = ax_heat.get_xlim()
ax_bottom.set_xlim(bottom_limit)
ax_bottom.set_title('')
ax_bottom.yaxis.set_major_locator(tck.MaxNLocator(3))
ax_bottom.set_xlabel('')
ax_bottom.grid(True)
ax_heat.set_xlabel('')
ax_heat.tick_params(labelbottom=False)
ax_bottom.tick_params(top='on', labelsize=fontsize)
ax_bottom.set_ylabel('PWV [mm]', fontsize=fontsize)
ax_heat.set_yticks(np.arange(50, 400, 50))
title = ax_heat.get_title()
title = title + ' ({:.0f} m a.s.l)'.format(alts[i])
ax_heat.set_title(title)
ax_bottom.set_xlabel('Hour of Day [UTC]')
cbar_ax = fig.add_axes([0.80, 0.049, 0.05, 0.900])
cb = fig.colorbar(cf, cax=cbar_ax)
cb.set_ticks(np.arange(7, 31+2, 2))
# cb.ax.set_yticklabels(['{:.0f}'.format(x) for x in np.arange(9, 31+1, 1)], fontsize=16, weight='bold')
# cb.ax.tick_params()# labelsize=fontsize-2)
cb.set_label('PWV [mm]')#, size=fontsize-2)
if save:
filename = 'PWV_climatology_{}_stacked_groups_with_mean.png'.format('_'.join(station))
plt.savefig(savefig_path / filename, orientation='potrait')
return fig
def plot_PWV_anomalies_groups_maps(work_path=work_yuval, station='drag',
fontsize=16, save=True):
import xarray as xr
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
from PW_stations import produce_geo_gnss_solved_stations
sns.set_style('whitegrid')
sns.set_style('ticks')
cmap = 'jet' # sns.color_palette('terrain', as_cmap=True)
df = produce_geo_gnss_solved_stations(plot=False)
file = work_path/'GNSS_PW_thresh_0_hour_dayofyear_rest.nc'
pw = xr.open_dataset(file)
if isinstance(station, str):
st_mean = pw[station].mean('rest').expand_dims('station')
st_mean['station'] = [station.upper()]
data = gaussian_filter(st_mean, 5)
st_mean = st_mean.copy(data=data)
elif isinstance(station, list):
pws = [pw[x].mean('rest') for x in pw if x in station]
pws = [x.copy(data=gaussian_filter(x, 5)) for x in pws]
st_mean = xr.merge(pws)
st_mean = st_mean[station].to_array('station')
st_mean['station'] = [x.upper() for x in st_mean['station'].values]
alts = df.loc[station,'alt'].values
# drag = pw['drag'].mean('rest')
# elat = pw['elat'].mean('rest')
# dsea = pw['dsea'].mean('rest')
# da = xr.concat([drag, dsea, elat], 'station')
# da['station'] = ['DRAG', 'DSEA', 'ELAT']
n = st_mean['station'].size
st_mean = st_mean.transpose('dayofyear', 'hour', 'station')
fg = st_mean.plot.contourf(levels=41, row='station', add_colorbar=False,
figsize=(7, 13), cmap=cmap)
for i, ax in enumerate(fg.fig.axes):
ax.set_yticks(np.arange(50, 400, 50))
ax.tick_params(labelsize=fontsize)
ax.set_ylabel('Day of Year', fontsize=fontsize)
title = ax.get_title()
title = title + ' ({:.0f} m a.s.l)'.format(alts[i])
ax.set_title(title, fontsize=fontsize)
fg.fig.axes[-1].set_xlabel('Hour of day [UTC]', fontsize=fontsize)
cbar_ax = fg.fig.add_axes([0.85, 0.074, 0.025, 0.905])
fg.add_colorbar(cax=cbar_ax)
cb = fg.cbar
cb.ax.tick_params(labelsize=fontsize-2)
cb.set_label('PWV [mm]', size=fontsize-2)
fg.fig.subplots_adjust(top=0.967,
bottom=0.075,
left=0.13,
right=0.83,
hspace=0.135,
wspace=0.195)
if save:
filename = 'PWV_climatology_{}_stacked_groups.png'.format('_'.join(station))
plt.savefig(savefig_path / filename, orientation='potrait')
return fg
def plot_hydro_pwv_before_event_motivation(work_path=work_yuval,
hydro_path=hydro_path,
days_prior=3, fontsize=16,
save=True, smoothed=False):
import xarray as xr
from hydro_procedures import hydro_pw_dict
from hydro_procedures import produce_pwv_days_before_tide_events
from hydro_procedures import read_station_from_tide_database
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
def smooth_df(df):
import numpy as np
dfs = df.copy()
dfs.index = pd.to_timedelta(dfs.index, unit='d')
dfs = dfs.resample('15S').interpolate(method='cubic')
dfs = dfs.resample('5T').mean()
dfs = dfs.reset_index(drop=True)
dfs.index = np.linspace(df.index[0], df.index[-1], dfs.index.size)
return dfs
sns.set_style('whitegrid')
sns.set_style('ticks')
pw = xr.open_dataset(work_path / 'GNSS_PW_thresh_0_hour_dayofyear_anoms.nc')
pws = [pw[x].load() for x in hydro_pw_dict.keys()]
dfs = [read_station_from_tide_database(hydro_pw_dict.get(x), hydro_path=hydro_path) for x in hydro_pw_dict.keys()]
df_list = []
for pw_da, df_da in zip(pws, dfs):
df, _, _ = produce_pwv_days_before_tide_events(pw_da, df_da,
plot=False,
days_prior=days_prior,
drop_thresh=0.5,
max_gap='12H')
df_list.append(df)
n_events = [len(x.columns) for x in df_list]
if smoothed:
df_list = [smooth_df(x) for x in df_list]
df_mean = pd.concat([x.T.mean().to_frame(x.columns[0].split('_')[0]) for x in df_list], axis=1)
fig, ax = plt.subplots(figsize=(8, 10))
labels = ['{}: mean from {} events'.format(x.upper(), y) for x,y in zip(df_mean.columns, n_events)]
for i, station in enumerate(df_mean.columns):
sns.lineplot(data=df_mean, y=station, x=df.index, ax=ax, label=labels[i], lw=4)
ax.grid(True)
ax.axvline(0, color='k', linestyle='--')
ax.set_xlabel('Days before/after tide event', fontsize=fontsize)
ax.set_ylabel('PWV anomalies [mm]', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
ax.legend(prop={'size': fontsize-2})
fig.tight_layout()
if save:
filename = 'PWV_anoms_dsea_drag_elat_{}_prior_tides.png'.format(days_prior)
plt.savefig(savefig_path / filename, orientation='potrait')
return fig
def plot_typical_tide_event_with_PWV(work_path=work_yuval,
hydro_path=hydro_path,
station='yrcm',
days_prior=3, days_after=1, fontsize=16,
date='2018-04-27',
save=True, smoothed=True):
# possible dates: 2014-11-16T13:50, 2018-04-26T18:55
# best to show 2018-04-24-27,
# TODO: x-axis time hours, ylabels in color
# TODO: change flow to bars instead of lineplot
import xarray as xr
import pandas as pd
from hydro_procedures import hydro_pw_dict
from matplotlib.ticker import FormatStrFormatter
import numpy as np
def smooth_df(df):
dfs = df.copy()
# dfs.index = pd.to_timedelta(dfs.index, unit='d')
dfs = dfs.resample('15S').interpolate(method='cubic')
dfs = dfs.resample('5T').mean()
# dfs = dfs.reset_index(drop=True)
# dfs.index = np.linspace(df.index[0], df.index[-1], dfs.index.size)
return dfs
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
colors = sns.color_palette('tab10', n_colors=2)
# sns.set_style('whitegrid')
sns.set_theme(style='ticks', font_scale=1.8)
# load hydro graphs:
hgs = xr.open_dataset(hydro_path/'hydro_graphs.nc')
# select times:
dt_start = pd.to_datetime(date) - pd.Timedelta(days_prior, unit='d')
dt_end = pd.to_datetime(date) + pd.Timedelta(days_after, unit='d')
hs_id = hydro_pw_dict.get(station)
hg_da = hgs['HS_{}_flow'.format(hs_id)].sel(time=slice(dt_start, dt_end)).dropna('time')
# hg_da = hg_da.resample(time='15T').mean().interpolate_na('time', method='spline', max_gap='12H')
hg_da = hg_da.resample(time='15T').mean()
# load pwv:
pw = xr.open_dataset(work_path / 'GNSS_PW_thresh_0_for_hydro_analysis.nc')[station]
pw = pw.sel(time=slice(dt_start, dt_end))
df = pw.to_dataframe(name='pwv')
df['flow'] = hg_da.to_dataframe()
df['flow'] = df['flow'].fillna(0)
if smoothed:
df = smooth_df(df)
fig, ax = plt.subplots(figsize=(15, 4))
flow_label = r'Flow [m$^3\cdot$sec$^{-1}$]'
# df['time'] = df.index
# sns.lineplot(data=df, y='flow', x=df.index, ax=ax, label=48125, lw=2, color=colors[0])
# twin = ax.twinx()
# sns.lineplot(data=df, y='pwv', x=df.index, ax=twin, label='DRAG', lw=2, color=colors[1])
df.index.name=''
sns.lineplot(data = df, y='pwv', x=df.index, lw=2,color=colors[1], marker=None, sort = False, ax=ax)
twin = ax.twinx()
twin.bar(x=df.index,height=df['flow'].values, width=0.05, linewidth=0, color=colors[0], alpha=0.5)
# ax = df['flow'].plot(color=colors[0], ax=ax, lw=2)
# df['pwv'].plot(color=colors[1], ax=twin, lw=2)
twin.set_ylim(0, 100)
twin.set_ylabel(flow_label, color=colors[0])
ax.set_ylabel('PWV [mm]', color=colors[1])
twin.tick_params(axis='y', labelcolor=colors[0])
# ax.tick_params(axis='x', labelsize=fontsize, bottom=True, which='both')
ax.tick_params(axis='y', labelcolor=colors[1])
ax.yaxis.set_ticks(np.arange(10, 35, 5))
# twin.yaxis.set_major_locator(ticker.FixedLocator(locs=np.arange(0,35,5)))
ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
# align_yaxis_np(ax, twin)
# alignYaxes([ax, twin], [0, 10])
# lim = ax.get_ylim()
# l2 = twin.get_ylim()
# fun = lambda x: l2[0]+(x-lim[0])/(lim[1]-lim[0])*(l2[1]-l2[0])
# ticks = fun(ax.get_yticks())
sns.set(rc={"xtick.bottom" : True, "ytick.left" : True})
xticks=df.resample('12H').mean().index
ax.xaxis.set_ticks(xticks)
strDates = [x.strftime('%d-%H') for x in xticks]
ax.set_xticklabels(strDates)
xticks=df.resample('4H').mean().index
ax.xaxis.set_ticks(xticks, minor=True)
# locator = mdates.AutoDateLocator(minticks = 15,
# maxticks = 20)
# # formatter = mdates.ConciseDateFormatter(locator)
# ax.xaxis.set_major_locator(locator)
# ax.xaxis.set_major_formatter(formatter)
# loc = mdates.AutoDateLocator()
# ax.xaxis.set_major_locator(loc)
# ax.xaxis.set_major_formatter(mdates.ConciseDateFormatter(loc))
# ax.xaxis.set_minor_locator(mdates.HourLocator(interval=3))
# ax.xaxis.set_major_locator(mdates.DayLocator())
# minorLocator = ticker.AutoMinorLocator()
# ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%H'))
# ax.xaxis.set_major_locator(mdates.DayLocator())
# ax.xaxis.set_minor_locator(minorLocator)
# ax.xaxis.set_minor_locator(mdates.HourLocator(interval=3))
ax.grid(True, which='major', axis='y',color='k', ls='--')
# ax.set_xticklabels([x.strftime("%d-%H") for x in df.index], rotation=45)
# ax.grid(True, which='minor', axis='x')
ax.grid(True, which='major', axis='x',color='k', ls='--')
# twin.yaxis.set_major_locator(ticker.FixedLocator(ticks))
# twin.grid(True, axis='y',color='k', ls='--')
# twin.xaxis.set_major_locator(mdates.DayLocator())
# twin.xaxis.set_minor_locator(mdates.HourLocator())
# Fmt = mdates.AutoDateFormatter(mdates.DayLocator())
# twin.xaxis.set_major_formatter(Fmt)
# ax.set_ylim(0, 20)
fig.autofmt_xdate()
fig.tight_layout()
if save:
filename = 'typical_tide_event_with_pwv'
plt.savefig(savefig_path / filename, orientation='potrait')
return df
def plot_hydro_pressure_anomalies(hydro_path=hydro_path, std=False,
fontsize=16, save=True):
import xarray as xr
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
sns.set_style('ticks')
if std:
feats = xr.load_dataset(hydro_path/'hydro_tides_hourly_features_with_positives_std.nc')
else:
feats = xr.load_dataset(hydro_path/'hydro_tides_hourly_features_with_positives.nc')
dts = pd.DatetimeIndex(feats['X_pos']['positive_sample'].values)
bd = feats['bet-dagan']
dts_ranges = []
for dt in dts:
prior = dt - pd.Timedelta(3, unit='D')
after = dt + pd.Timedelta(1, unit='D')
dt_range = pd.date_range(start=prior, end=after, freq='H')
bd_range = bd.sel(time=dt_range)
dts_ranges.append(bd_range.values)
df = pd.DataFrame(dts_ranges).T
df.index = np.linspace(-3, 1, len(df))
fig, ax = plt.subplots(figsize=(8, 6))
ts = df.T.mean() #.shift(periods=-1, freq='15D')
ts_std = df.T.std()
ts.index.name = ''
ts.plot(ax=ax, color='k', fontsize=fontsize, lw=2)
ax.fill_between(x=ts.index, y1=ts-ts_std, y2=ts+ts_std, color='k', alpha=0.4)
ax.set_xlim(ts.index.min(), ts.index.max()) #+
# pd.Timedelta(15, unit='D'))
if std:
label = 'Pressure mean standartized anomalies'
else:
label = 'Pressure mean anomalies [hPa]'
ax.set_ylabel(label, fontsize=fontsize-2)
ax.set_xlabel('Days before/after a flood event', fontsize=fontsize-2)
ax.axvline(0, color='k', ls='--')
ax.grid(True)
fig.tight_layout()
fig.subplots_adjust(right=0.946)
if save:
filename = 'Pressure_anoms_3_prior_flood.png'
plt.savefig(savefig_path / filename, bbox_inches='tight', pad_inches=0.1)
return df
def plot_hydro_pwv_anomalies_with_station_mean(work_path=work_yuval,
hydro_path=hydro_path,
days_prior=3, fontsize=14,
save=True, smoothed=False,
wv_label='PWV'):
import xarray as xr
from hydro_procedures import hydro_pw_dict
from hydro_procedures import produce_pwv_days_before_tide_events
from hydro_procedures import read_station_from_tide_database
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def smooth_df(df):
import numpy as np
dfs = df.copy()
dfs.index = pd.to_timedelta(dfs.index, unit='d')
dfs = dfs.resample('15S').interpolate(method='cubic')
dfs = dfs.resample('5T').mean()
dfs = dfs.reset_index(drop=True)
dfs.index = np.linspace(df.index[0], df.index[-1], dfs.index.size)
return dfs
sns.set_style('whitegrid')
sns.set_style('ticks')
cmap = 'jet' #sns.color_palette('gist_rainbow_r', as_cmap=True)
if wv_label == 'PWV':
units = 'mm'
elif wv_label == 'IWV':
units = 'kg$\cdot$m$^{-2}$'
pw = xr.open_dataset(work_path / 'GNSS_PW_thresh_0_hour_dayofyear_anoms.nc')
pws = [pw[x].load() for x in hydro_pw_dict.keys()]
dfs = [read_station_from_tide_database(hydro_pw_dict.get(x), hydro_path=hydro_path) for x in hydro_pw_dict.keys()]
df_list = []
for pw_da, df_da in zip(pws, dfs):
df, _, _ = produce_pwv_days_before_tide_events(pw_da, df_da,
plot=False,
days_prior=days_prior,
drop_thresh=0.75,
max_gap='6H')
df_list.append(df)
n_events = [len(x.columns) for x in df_list]
if smoothed:
df_list = [smooth_df(x) for x in df_list]
df_mean = pd.concat([x.T.mean().to_frame(x.columns[0].split('_')[0]) for x in df_list], axis=1)
df_mean.columns = [x.upper() for x in df_mean.columns]
df_mean.index = pd.to_timedelta(df_mean.index, unit='D')
df_mean = df_mean.resample('30T').mean()
df_mean.index = np.linspace(-3, 1, len(df_mean.index))
# weights = df.count(axis=1).shift(periods=-1, freq='15D').astype(int)
fig = plt.figure(figsize=(5, 8))
grid = plt.GridSpec(
2, 1, height_ratios=[
1, 1], hspace=0.0225)
ax_heat = fig.add_subplot(grid[0, 0]) # plt.subplot(221)
ax_group = fig.add_subplot(grid[1, 0]) # plt.subplot(223)
cbar_ax = fig.add_axes([0.95, 0.50, 0.02, 0.38]) # [left, bottom, width,
# height]
ax_heat = sns.heatmap(
df_mean.T,
cmap=cmap,
yticklabels=True,
ax=ax_heat,
cbar_ax=cbar_ax,
cbar_kws={'label': r'{} anomalies [{}]'.format(wv_label, units)}, xticklabels=False)
cbar_ax.set_ylabel(r'{} anomalies [{}]'.format(wv_label, units), fontsize=fontsize-2)
cbar_ax.tick_params(labelsize=fontsize)
zero_in_heat = df_mean.index.get_loc(0, method='nearest') + 1
ax_heat.vlines([zero_in_heat], *ax_heat.get_ylim(), color='k',
linestyle='--', linewidth=1.5, zorder=20)
# activate top ticks and tickslabales:
ax_heat.xaxis.set_tick_params(
bottom='off', labelbottom='off', labelsize=fontsize)
# emphasize the yticklabels (stations):
ax_heat.yaxis.set_tick_params(left='on')
# labels = ['{} ({})'.format(x.get_text(), y) for x, y in zip(ax_heat.get_ymajorticklabels(), n_events)]
# ax_heat.set_yticklabels(labels,
# fontweight='bold', fontsize=fontsize,
# rotation='horizontal')
ax_heat.set_xlabel('')
ts = df_mean.T.mean() #.shift(periods=-1, freq='15D')
ts_std = df_mean.T.std()
# ts.index= pd.to_timedelta(ts.index, unit='D')
ts.index.name = ''
ts.plot(ax=ax_group, color='k', fontsize=fontsize, lw=2)
ax_group.fill_between(x=ts.index, y1=ts-ts_std, y2=ts+ts_std, color='k', alpha=0.4)
# barax = ax_group.twinx()
# barax.bar(ts.index, weights.values, width=35, color='k', alpha=0.2)
# barax.yaxis.set_major_locator(ticker.MaxNLocator(6))
# barax.set_ylabel('Stations [#]', fontsize=fontsize-4)
# barax.tick_params(labelsize=fontsize)
ax_group.set_xlim(ts.index.min(), ts.index.max()) #+
# pd.Timedelta(15, unit='D'))
ax_group.set_ylabel(r'{} mean anomalies [{}]'.format(wv_label, units), fontsize=fontsize-2)
ax_group.set_xlabel('Days before/after a flood event', fontsize=fontsize-2)
# set ticks and align with heatmap axis (move by 0.5):
# ax_group.xaxis.set_major_locator(ticker.MultipleLocator(0.25))
# ax_group.set_xticks(np.arange(-3, 1, 0.25))
# offset = 1
# ax_group.xaxis.set(ticks=np.arange(offset / 2.,
# max(ts.index) + 1 - min(ts.index),
# offset),
# ticklabels=ts.index)
# # move the lines also by 0.5 to align with heatmap:
# lines = ax_group.lines # get the lines
# [x.set_xdata(x.get_xdata() - min(ts.index) + 0.5) for x in lines]
# ax_group.xaxis.set(ticks=xticks, ticklabels=xticks_labels)
# ax_group.xaxis.set(ticks=xticks)
# mytime = mdates.DateFormatter('%D-%H')
# ax_group.xaxis.set_major_formatter(mytime)
# ax_group.xaxis.set_major_locator(mdates.DayLocator(interval=0.5))
# xticks = pd.timedelta_range(pd.Timedelta(-3, unit='D'), pd.Timedelta(1, unit='D'), freq='3H')
# ax_group.set_xticks(xticks)
ax_group.axvline(0, color='k', ls='--', lw=1.5)
# ax_heat.axvline(0, color='r', ls='--')
ax_group.grid(True)
fig.tight_layout()
fig.subplots_adjust(right=0.946)
if save:
filename = 'PWV_anoms_{}_prior_flood.png'.format(days_prior)
plt.savefig(savefig_path / filename, bbox_inches='tight', pad_inches=0.1)
return ax_group
def produce_hydro_and_GNSS_stations_table(work_path=work_yuval,
hydro_path=hydro_path, gis_path=gis_path):
from PW_stations import produce_geo_gnss_solved_stations
from hydro_procedures import hydro_pw_dict, hydro_st_name_dict
from hydro_procedures import read_hydro_metadata
from hydro_procedures import get_hydro_near_GNSS
import xarray as xr
import pandas as pd
stns = [x for x in hydro_pw_dict.keys()]
df_gnss = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=False)
df_gnss = df_gnss.loc[stns]
df_gnss['ID'] = df_gnss.index.str.upper()
pd.options.display.float_format = '{:.2f}'.format
df = df_gnss[['name', 'ID', 'lat', 'lon', 'alt']]
df['alt'] = df['alt'].map('{:,.0f}'.format)
cols = ['GNSS Station name', 'Station ID', 'Latitude [N]',
'Longitude [E]', 'Altitude [m a.s.l]']
df.columns = cols
# df.loc['spir', 'GNSS station name'] = 'Sapir'
hydro_meta = read_hydro_metadata(hydro_path, gis_path, plot=False)
hdf = hydro_meta.loc[:, ['id', 'alt', 'lat', 'lon']]
hdf = hdf.set_index('id')
hdf = hdf.loc[[x for x in hydro_pw_dict.values()], :]
hdf['station_name'] = [x for x in hydro_st_name_dict.values()]
hdf['nearest_gnss'] = [x.upper() for x in hydro_pw_dict.keys()]
hdf1 = get_hydro_near_GNSS(radius=15, plot=False)
li = []
for st, hs_id in hydro_pw_dict.items():
dis = hdf1[hdf1['id'] == hs_id].loc[:, st]
li.append(dis.values[0])
hdf['distance_to_gnss'] = [x/1000.0 for x in li]
hdf['alt'] = hdf['alt'].map('{:,.0f}'.format)
hdf['station_number'] = [int(x) for x in hydro_pw_dict.values()]
hdf['distance_to_gnss'] = hdf['distance_to_gnss'].map('{:,.0f}'.format)
# add tide events per station:
file = hydro_path / 'hydro_tides_hourly_features_with_positives.nc'
tides = xr.load_dataset(file)['Tides']
tide_count = tides.to_dataset('GNSS').to_dataframe().count()
hdf['tides'] = [x for x in tide_count]
hdf = hdf[['station_name', 'station_number', 'lat', 'lon', 'alt', 'nearest_gnss', 'distance_to_gnss', 'tides']]
hdf.columns = ['Hydro station name', 'Station ID', 'Latitude [N]',
'Longitude [E]', 'Altitude [m a.s.l]', 'Nearest GNSS station', 'Distance to GNSS station [km]', 'Flood events near GNSS station']
return df, hdf
def plot_hydro_events_climatology(hydro_path=hydro_path, fontsize=16, save=True):
import xarray as xr
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
sns.set_style('whitegrid')
sns.set_style('ticks')
file = hydro_path / 'hydro_tides_hourly_features_with_positives.nc'
X = xr.load_dataset(file)['X_pos']
df = X['positive_sample'].groupby('positive_sample.month').count().to_dataframe()
# add July and August:
add = pd.DataFrame([0, 0], index=[7, 8])
add.index.name = 'month'
add.columns = ['positive_sample']
df = df.append(add).sort_index()
fig, ax = plt.subplots(figsize=(8, 6))
sns.barplot(data=df, x=df.index, y='positive_sample', ax=ax, color='k', alpha=0.8)
ax.grid(True)
ax.set_ylabel('Number of unique flood events [#]', fontsize=fontsize)
ax.set_xlabel('month', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
fig.tight_layout()
if save:
filename = 'tides_count_climatology.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def plot_hydro_GNSS_periods_and_map(path=work_yuval, gis_path=gis_path,
ims=False, dem_path=dem_path,
hydro_path=hydro_path,
fontsize=22, save=True):
from aux_gps import gantt_chart
import xarray as xr
import pandas as pd
import geopandas as gpd
from PW_stations import produce_geo_gnss_solved_stations
from aux_gps import geo_annotate
from ims_procedures import produce_geo_ims
from hydro_procedures import hydro_pw_dict
import cartopy.crs as ccrs
from hydro_procedures import read_hydro_metadata
from hydro_procedures import prepare_tide_events_GNSS_dataset
sns.set_style('whitegrid')
sns.set_style('ticks')
fig = plt.figure(figsize=(20, 15))
grid = plt.GridSpec(1, 2, width_ratios=[
5, 5], wspace=0.125)
ax_gantt = fig.add_subplot(grid[0, 0]) # plt.subplot(221)
ax_map = fig.add_subplot(grid[0, 1], projection=ccrs.PlateCarree()) # plt.subplot(122)
extent = [34, 36.0, 29.2, 32.5]
ax_map.set_extent(extent)
# fig, ax = plt.subplots(1, 2, sharex=False, sharey=False, figsize=(20, 6))
# RINEX gantt chart:
file = hydro_path / 'hydro_tides_hourly_features.nc'
ds = xr.open_dataset(file)
just_pw = [x for x in hydro_pw_dict.keys()]
ds = ds[just_pw]
da = ds.to_array('station')
da['station'] = [x.upper() for x in da.station.values]
ds = da.to_dataset('station')
# add tide events
ds_events = prepare_tide_events_GNSS_dataset(hydro_path)
# merge in couples for keeping the original order:
li = []
for pwv, tide in zip(ds, ds_events):
first = ds[pwv]
second = ds_events[tide]
second.name = first.name + '*'
li.append(first)
li.append(second)
ds = xr.merge(li)
# colors:
# title = 'Daily RINEX files availability for the Israeli GNSS stations'
c = sns.color_palette('Dark2', n_colors=int(len(ds) / 2))
colors = []
for color in c:
colors.append(color)
colors.append(color)
ax_gantt = gantt_chart(
ds,
ax=ax_gantt,
fw='bold', grid=True,marker='x', marker_suffix='*',
title='', colors=colors,
pe_dict=None, fontsize=fontsize, linewidth=24, antialiased=False)
years_fmt = mdates.DateFormatter('%Y')
# ax_gantt.xaxis.set_major_locator(mdates.YearLocator())
ax_gantt.xaxis.set_major_locator(mdates.YearLocator(4))
ax_gantt.xaxis.set_minor_locator(mdates.YearLocator(1))
ax_gantt.xaxis.set_major_formatter(years_fmt)
# ax_gantt.xaxis.set_minor_formatter(years_fmt)
ax_gantt.tick_params(axis='x', labelrotation=0)
# Israel gps ims map:
ax_map = plot_israel_map(
gis_path=gis_path, ax=ax_map, ticklabelsize=fontsize)
# overlay with dem data:
cmap = plt.get_cmap('terrain', 41)
dem = xr.open_dataarray(dem_path / 'israel_dem_250_500.nc')
# dem = xr.open_dataarray(dem_path / 'israel_dem_500_1000.nc')
dem = dem.sel(lat=slice(29.2, 32.5), lon=slice(34, 36.3))
fg = dem.plot.imshow(ax=ax_map, alpha=0.5, cmap=cmap,
vmin=dem.min(), vmax=dem.max(), add_colorbar=False)
# scale_bar(ax_map, 50)
cbar_kwargs = {'fraction': 0.1, 'aspect': 50, 'pad': 0.03}
cb = plt.colorbar(fg, **cbar_kwargs)
cb.set_label(label='meters above sea level',
size=fontsize, weight='normal')
cb.ax.tick_params(labelsize=fontsize)
ax_map.set_xlabel('')
ax_map.set_ylabel('')
# ax_map.xaxis.set_major_locator(ticker.MaxNLocator(2))
# ax_map.yaxis.set_major_locator(ticker.MaxNLocator(5))
# ax_map.yaxis.set_major_formatter(lat_formatter)
# ax_map.xaxis.set_major_formatter(lon_formatter)
# ax_map.gridlines(draw_labels=True, dms=False, x_inline=False,
# y_inline=False, xformatter=lon_formatter, yformatter=lat_formatter,
# xlocs=ticker.MaxNLocator(2), ylocs=ticker.MaxNLocator(5))
# fig.canvas.draw()
ax_map.set_xticks([34, 35, 36])
ax_map.set_yticks([29.5, 30, 30.5, 31, 31.5, 32, 32.5])
ax_map.tick_params(top=True, bottom=True, left=True, right=True,
direction='out', labelsize=fontsize)
gps = produce_geo_gnss_solved_stations(path=gis_path, plot=False)
gps = gps.loc[just_pw, :]
# gps_list = [x for x in gps.index if x not in merged and x not in removed]
gps.plot(ax=ax_map, edgecolor='black', marker='s',
alpha=1.0, markersize=100, facecolor="None", linewidth=2, zorder=3)
to_plot_offset = ['nizn', 'ramo', 'nrif']
for x, y, label in zip(gps.lon, gps.lat, gps.index.str.upper()):
if label.lower() in to_plot_offset:
ax_map.annotate(label, xy=(x, y), xytext=(4, -15),
textcoords="offset points", color='k',
fontweight='bold', fontsize=fontsize - 2)
else:
ax_map.annotate(label, xy=(x, y), xytext=(3, 3),
textcoords="offset points", color='k',
fontweight='bold', fontsize=fontsize - 2)
# geo_annotate(ax_map, gps_normal_anno.lon, gps_normal_anno.lat,
# gps_normal_anno.index.str.upper(), xytext=(3, 3), fmt=None,
# c='k', fw='normal', fs=10, colorupdown=False)
# geo_annotate(ax_map, gps_offset_anno.lon, gps_offset_anno.lat,
# gps_offset_anno.index.str.upper(), xytext=(4, -6), fmt=None,
# c='k', fw='normal', fs=10, colorupdown=False)
# plot bet-dagan:
df = pd.Series([32.00, 34.81]).to_frame().T
df.index = ['Bet-Dagan']
df.columns = ['lat', 'lon']
bet_dagan = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon,
df.lat),
crs=gps.crs)
bet_dagan.plot(ax=ax_map, color='black', edgecolor='black',
marker='x', linewidth=2, zorder=2, markersize=100)
geo_annotate(ax_map, bet_dagan.lon, bet_dagan.lat,
bet_dagan.index, xytext=(4, -6), fmt=None,
c='k', fw='bold', fs=fontsize - 2, colorupdown=False)
# now add hydro stations:
hydro_meta = read_hydro_metadata(hydro_path, gis_path, plot=False)
hm = hydro_meta.loc[:, ['id', 'name', 'alt', 'lat', 'lon']]
hm = hm.set_index('id')
hm = hm.loc[[x for x in hydro_pw_dict.values()], :]
hmgdf = gpd.GeoDataFrame(hm, geometry=gpd.points_from_xy(hm.lon, hm.lat), crs=gps.crs)
hmgdf.plot(ax=ax_map, edgecolor='black', marker='o',
alpha=1.0, markersize=100, facecolor='tab:pink', zorder=4)
# plt.legend(['GNSS \nreceiver sites',
# 'removed \nGNSS sites',
# 'merged \nGNSS sites',
# 'radiosonde\nstation'],
# loc='upper left', framealpha=0.7, fancybox=True,
# handletextpad=0.2, handlelength=1.5)
if ims:
print('getting IMS temperature stations metadata...')
ims = produce_geo_ims(path=gis_path, freq='10mins', plot=False)
ims.plot(ax=ax_map, marker='o', edgecolor='tab:orange', alpha=1.0,
markersize=35, facecolor="tab:orange", zorder=1)
# ims, gps = produce_geo_df(gis_path=gis_path, plot=False)
print('getting solved GNSS israeli stations metadata...')
plt.legend(['GNSS \nstations',
'radiosonde\nstation', 'IMS stations'],
loc='upper left', framealpha=0.7, fancybox=True,
handletextpad=0.2, handlelength=1.5, fontsize=fontsize - 2)
else:
plt.legend(['GNSS \nstations',
'radiosonde\nstation',
'hydrometric\nstations'],
loc='upper left', framealpha=0.7, fancybox=True,
handletextpad=0.2, handlelength=1.5, fontsize=fontsize - 2)
fig.subplots_adjust(top=0.95,
bottom=0.11,
left=0.05,
right=0.95,
hspace=0.2,
wspace=0.2)
# plt.legend(['IMS stations', 'GNSS stations'], loc='upper left')
filename = 'hydro_israeli_gnss_map.png'
# caption('Daily RINEX files availability for the Israeli GNSS station network at the SOPAC/GARNER website')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def produce_all_param_grid_tables():
import pandas as pd
mlp = produce_single_param_grid_table('MLP')
rf = produce_single_param_grid_table('RF')
svc = produce_single_param_grid_table('SVC')
df = pd.concat([svc, rf, mlp], axis=1)
df = df.fillna(' ')
return df
def produce_single_param_grid_table(model='MLP'):
import pandas as pd
from hydro_procedures import ML_Classifier_Switcher
numeric = ['C', 'alpha', 'gamma', 'max_depth', 'n_estimators']
numeric_type = ['log', 'log', 'log', 'int', 'int']
numeric_dict = dict(zip(numeric, numeric_type))
ml = ML_Classifier_Switcher()
ml.pick_model(model)
params = ml.param_grid
num_params = [x for x in params.keys() if x in numeric]
num_dict = dict((k, params[k]) for k in num_params)
other_params = [x for x in params.keys() if x not in numeric]
other_dict = dict((k, params[k]) for k in other_params)
di = {}
for key, val in other_dict.items():
val = [str(x) for x in val]
di[key] = ', '.join(val)
for key, val in num_dict.items():
if numeric_dict[key] != 'log':
val = '{} to {}'.format(val[0], val[-1])
else:
val = r'{} to {}'.format(sci_notation(val[0]), sci_notation(val[-1]))
di[key] = val
df = pd.Series(di).to_frame('Options')
df['Parameters'] = df.index.str.replace('_', ' ')
df = df[['Parameters', 'Options']]
df = df.reset_index(drop=True)
return df
| 44.445172
| 257
| 0.579172
|
ed27b955d92808f1f201ff30a6bdfe99c48de31c
| 3,890
|
py
|
Python
|
build_netmap.py
|
majduk/net-surveror
|
07c775e9804029ef70661168a046e10e35b75535
|
[
"MIT"
] | 4
|
2020-04-21T12:55:37.000Z
|
2021-06-08T03:24:08.000Z
|
build_netmap.py
|
majduk/net-surveror
|
07c775e9804029ef70661168a046e10e35b75535
|
[
"MIT"
] | 1
|
2020-06-17T20:06:44.000Z
|
2020-06-18T07:23:30.000Z
|
build_netmap.py
|
majduk/net-surveror
|
07c775e9804029ef70661168a046e10e35b75535
|
[
"MIT"
] | 3
|
2020-04-21T18:21:31.000Z
|
2021-06-08T03:24:11.000Z
|
#!/usr/bin/python3
import json
import os
from optparse import OptionParser
def init_netmap(netmap):
netmap['vlans'] = []
netmap['machines'] = {}
netmap['switches'] = {}
netmap['links'] = []
def add_vlan(netmap, vlan):
if vlan not in netmap['vlans']:
netmap['vlans'].append(vlan)
def add_switch_port(netmap, switch, pdata):
if switch not in netmap['switches']:
netmap['switches'][switch] = {}
if 'ports' not in netmap['switches'][switch]:
netmap['switches'][switch]['ports'] = {}
if pdata['port'] not in netmap['switches'][switch]['ports']:
netmap['switches'][switch]['ports'][pdata['port']] = pdata
if 'vlans' not in netmap['switches'][switch]:
netmap['switches'][switch]['vlans'] = {}
if 'vlan' in pdata:
vlan_id = pdata['vlan']
else:
vlan_id = 'untagged'
if vlan_id not in netmap['switches'][switch]['vlans']:
netmap['switches'][switch]['vlans'][vlan_id] = []
netmap['switches'][switch]['vlans'][vlan_id].append(pdata['port'])
def add_host_port(netmap, hostname, ifname, pdata):
if hostname not in netmap['machines']:
netmap['machines'][hostname] = {}
if 'ports' not in netmap['machines'][hostname]:
netmap['machines'][hostname]['ports'] = {}
if ifname not in netmap['machines'][hostname]['ports']:
netmap['machines'][hostname]['ports'][ifname] = pdata
if 'vlans' not in netmap['machines'][hostname]:
netmap['machines'][hostname]['vlans'] = {}
if 'vlan' in pdata:
vlan_id = pdata['vlan']
else:
vlan_id = 'untagged'
if vlan_id not in netmap['machines'][hostname]['vlans']:
netmap['machines'][hostname]['vlans'][vlan_id] = []
netmap['machines'][hostname]['vlans'][vlan_id].append(ifname)
def add_link(netmap, hostname, host_port, switch_name, switch_port):
link = {'source_host': hostname,
'source_port': host_port,
'destination_host': switch_name,
'destination_port': switch_port,
}
netmap['links'].append(link)
def parse_machine_file(netmap, work_dir, fname):
hostname=fname.split('.')[0]
with open(work_dir + "/" + fname) as f:
data = json.load(f)
for iface in data['lldp']['interface']:
for ifname in iface.keys():
iface_lldp = iface[ifname]
pdata = {}
pdata['raw'] = iface_lldp
pdata['descr'] = iface_lldp['port']['descr']
pdata['port'] = iface_lldp['port']['id']['value']
if 'vlan' in iface_lldp:
vid = iface_lldp['vlan']['vlan-id']
pdata['vlan'] = vid
add_vlan(netmap, vid)
for chassis_name in iface_lldp['chassis'].keys():
pdata['chassis'] = chassis_name
add_switch_port(netmap, chassis_name, pdata)
add_host_port(netmap, hostname, ifname, pdata)
add_link(netmap, hostname, ifname, chassis_name, pdata['port'])
def populate_netmap(netmap, work_dir):
for fname in os.listdir(work_dir):
parse_machine_file(netmap, work_dir, fname)
def main(options):
netmap = {}
init_netmap(netmap)
populate_netmap(netmap, options.work_dir)
with open(options.outfile, 'w') as outfile:
json.dump(netmap, outfile)
if __name__ == "__main__":
usage = "usage: %prog [options] arg1 arg2"
parser = OptionParser(usage=usage)
parser.add_option("-d", "--dir",
action="store", type="string", dest="work_dir", default="/tmp/lldp", help="Input directory")
parser.add_option("-o", "--output",
action="store", type="string", dest="outfile", default='netmap.json', help="Output file")
(options, args) = parser.parse_args()
main(options)
| 38.137255
| 116
| 0.586375
|
dd9a4c1fb885cb400b7002b88eec12a1923d57b0
| 138
|
py
|
Python
|
mainapp/admin.py
|
CheboiDerrick/hood-alert
|
620db39eaffa6e3c914f44e05cb853e4de99220f
|
[
"MIT"
] | null | null | null |
mainapp/admin.py
|
CheboiDerrick/hood-alert
|
620db39eaffa6e3c914f44e05cb853e4de99220f
|
[
"MIT"
] | null | null | null |
mainapp/admin.py
|
CheboiDerrick/hood-alert
|
620db39eaffa6e3c914f44e05cb853e4de99220f
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from mainapp.models import Neighborhood
# Register your models here.
admin.site.register(Neighborhood)
| 19.714286
| 39
| 0.826087
|
cc4641df55ff00e26bd94b3b076cba9d146aa02e
| 1,813
|
py
|
Python
|
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/chardet/enums.py
|
brianherrera/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
[
"AML"
] | 1,738
|
2017-09-21T10:59:12.000Z
|
2022-03-31T21:05:46.000Z
|
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/chardet/enums.py
|
ArchitectureStudios/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
[
"AML"
] | 427
|
2017-09-29T22:54:36.000Z
|
2022-02-15T19:26:50.000Z
|
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/chardet/enums.py
|
ArchitectureStudios/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
[
"AML"
] | 671
|
2017-09-21T08:04:01.000Z
|
2022-03-29T14:30:07.000Z
|
"""
All of the Enums that are used throughout the chardet package.
:author: Dan Blanchard (dan.blanchard@gmail.com)
"""
class InputState(object):
"""
This enum represents the different states a universal detector can be in.
"""
PURE_ASCII = 0
ESC_ASCII = 1
HIGH_BYTE = 2
class LanguageFilter(object):
"""
This enum represents the different language filters we can apply to a
``UniversalDetector``.
"""
CHINESE_SIMPLIFIED = 0x01
CHINESE_TRADITIONAL = 0x02
JAPANESE = 0x04
KOREAN = 0x08
NON_CJK = 0x10
ALL = 0x1F
CHINESE = CHINESE_SIMPLIFIED | CHINESE_TRADITIONAL
CJK = CHINESE | JAPANESE | KOREAN
class ProbingState(object):
"""
This enum represents the different states a prober can be in.
"""
DETECTING = 0
FOUND_IT = 1
NOT_ME = 2
class MachineState(object):
"""
This enum represents the different states a state machine can be in.
"""
START = 0
ERROR = 1
ITS_ME = 2
class SequenceLikelihood(object):
"""
This enum represents the likelihood of a character following the previous one.
"""
NEGATIVE = 0
UNLIKELY = 1
LIKELY = 2
POSITIVE = 3
@classmethod
def get_num_categories(cls):
""":returns: The number of likelihood categories in the enum."""
return 4
class CharacterCategory(object):
"""
This enum represents the different categories language models for
``SingleByteCharsetProber`` put characters into.
Anything less than CONTROL is considered a letter.
"""
UNDEFINED = 255
LINE_BREAK = 254
SYMBOL = 253
DIGIT = 252
CONTROL = 251
| 23.545455
| 84
| 0.600662
|
4654c7d13b785f3b0cff5fe97ea6764a5b61bace
| 11,994
|
py
|
Python
|
tracker.py
|
jokajak/infinity_tracker
|
21f83925d9899dc25bc58b198426f329a549b0e0
|
[
"Apache-2.0"
] | 1
|
2021-01-21T08:44:21.000Z
|
2021-01-21T08:44:21.000Z
|
tracker.py
|
jokajak/infinity_tracker
|
21f83925d9899dc25bc58b198426f329a549b0e0
|
[
"Apache-2.0"
] | 126
|
2020-08-03T22:07:38.000Z
|
2022-03-28T22:25:59.000Z
|
tracker.py
|
jokajak/infinity_tracker
|
21f83925d9899dc25bc58b198426f329a549b0e0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This proxies connections for a Carrier Infinity system.
# It reads the data being transferred and logs it to an influxdb server
import argparse
import json
import logging
import re
import urllib2
import urlparse
import xml.etree.ElementTree as ET
import proxy
import requests
from utils import _escape_tag, _escape_value, extract_req_body
logger = logging.getLogger()
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# global for passing influxdb URL
influxdb = None
# global for passing wunderground api key and location
api_key = None
location_query = None
# Registration of routes
routes = {"save": {}, "request": {}, "response": {}}
def route(rule, f_type="save"):
"""A decorator that is used to register a view function for a
given URL rule.
f_type allows specifying whether it handles a request, a response,
or a save
"""
def decorator(f):
routes[f_type][rule] = f
return f
return decorator
def get_current_temp(api_key, location_query):
if not (api_key and location_query):
return False
url = "http://api.wunderground.com/api/{}/geolookup/conditions/q/{}.json"
url = url.format(api_key, location_query)
f = urllib2.urlopen(url)
json_string = f.read()
parsed_json = json.loads(json_string)
temp_f = parsed_json["current_observation"]["temp_f"]
f.close()
return temp_f
def status_handler(update_text, sn="Unknown"):
if not influxdb:
return
try:
root = ET.fromstring(update_text)
except Exception:
logger.exception("Failed to parse request: %s", update_text)
return
sn = _escape_tag(sn)
lines = []
value = _escape_value(float(root.find("filtrlvl").text))
lines.append("filter,sn={} value={}".format(sn, value))
unit_mode = _escape_value(root.find("mode").text)
lines.append("mode,sn={} value={}".format(sn, unit_mode))
zones = []
known_tags = {
"enabled": None,
"currentActivity": "activity",
"rt": "temp",
"rh": "humidity",
"fan": "fan",
"htsp": "heat_set_point",
"clsp": "cool_set_point",
"hold": "hold",
"name": None,
"otmr": None,
}
transforms = {
"temp": float,
"humidity": float,
"heat_set_point": float,
"cool_set_point": float,
"fan": lambda val: val != "off", # Converts to boolean
"hold": lambda val: val != "off", # Converts to boolean
}
for zone_set in root.findall("zones"):
for zone in zone_set.findall("zone"):
if zone.find("enabled").text == "off":
continue
hvac_zone = {
"zone_id": _escape_tag(zone.attrib["id"]),
"name": _escape_tag(zone.find("name").text),
"attributes": {},
}
for tag, key in known_tags.items():
node = zone.find(tag)
if node is None:
logger.debug(
"Could not find tag %s in body: %s", tag, zone.find(tag)
)
continue
value = node.text or "0"
transform = transforms.get(key, str)
value = transform(value)
hvac_zone["attributes"][key] = _escape_value(value)
zones.append(hvac_zone)
for child in zone:
if child.tag not in known_tags:
logger.info("Unknown tag: %s: %s", child.tag, child.text)
for zone in zones:
templ = "sn={},zone={},zone_id={}".format(sn, zone["name"], zone["zone_id"])
for field, value in zone["attributes"].items():
if not field:
continue
if isinstance(value, float):
line = "{},{} value={}".format(field, templ, value)
else:
line = "{},{} value={}".format(field, templ, value)
lines.append(line)
logger.debug(unit_mode)
logger.debug(unit_mode == '"cool"')
if unit_mode == '"cool"' or unit_mode == '"dehumidify"':
logger.debug("Cooling")
field = "cooling"
value = zone["attributes"]["temp"]
line = "{},{} value={}".format(field, templ, value)
lines.append(line)
if unit_mode == '"heat"':
field = "heating"
value = zone["attributes"]["temp"]
line = "{},{} value={}".format(field, templ, value)
lines.append(line)
headers = {"Content-Type": "application/octet-stream", "Accept": "text/plain"}
try:
temp_f = get_current_temp(api_key, location_query)
if temp_f:
lines.append("outside_temp,sn={} value={}".format(sn, temp_f))
except Exception as e:
logger.exception("Failed to get current temp: %s", e)
lines = "\n".join(lines)
lines = lines + "\n"
# logger.debug('Submitting %s', lines)
r = requests.post(influxdb, headers=headers, data=lines)
logging.getLogger("requests").debug(r.text)
return
@route("/systems/(?P<sn>.*)/status", "request")
def systems_status_req_handler(req, req_body, sn):
"""Handle save requests for systems status."""
content_length = req.headers.get("Content-Length", "")
if content_length == 0:
logger.debug("Status check")
else:
req_body_text = None
content_type = req.headers.get("Content-Type", "")
if content_type.startswith("application/x-www-form-urlencoded"):
if req_body is not None:
req_body_text = extract_req_body(req_body)
status_handler(req_body_text, sn)
@route("/systems/(?P<sn>.*)/status")
def systems_status_save_handler(req, req_body, res, res_body, sn):
"""Handle save requests for systems status."""
content_type = res.headers.get("Content-Type", "")
if content_type.startswith("application/xml"):
try:
root = ET.fromstring(res_body)
server_has_changes = root.find("serverHasChanges").text
if server_has_changes != "false":
logger.debug("Remote changes")
else:
logger.debug("No remote changes")
except Exception:
logger.exception("Failed to parse response: %s", res_body)
return True
@route("/systems/(?P<sn>.*)")
def config_handler(req, req_body, res, res_body, sn):
"""Handle system config updates."""
logger.info("System config update")
return True
@route("/systems/(?P<sn>.*)/idu_config")
def idu_config_handler(req, req_body, res, res_body, sn):
"""Handle InDoor Unit config updates."""
logger.info("InDoor Unit config update")
pass
@route("/systems/(?P<sn>.*)/odu_config")
def odu_config_handler(req, req_body, res, res_body, sn):
"""Handle OutDoor Unit config updates."""
logger.info("OutDoor Unit config update")
pass
@route("/systems/(?P<sn>.*)/idu_status")
def idu_status_handler(req, req_body, res, res_body, sn):
"""Handle InDoor Unit status updates."""
logger.info("InDoor Unit status update")
pass
@route("/systems/(?P<sn>.*)/odu_status")
def odu_status_handler(req, req_body, res, res_body, sn):
"""Handle OutDoor Unit status updates."""
logger.info("OutDoor Unit status update")
pass
@route("/Alive")
def alive_handler(req, req_body, res, res_body):
"""Handles Alive calls."""
logger.info("Alive called")
return True
@route("/weather/(?P<zip>.*)/forecast")
def forecast_handler(req, req_body, res, res_body, zip):
"""Handles forecast requests"""
return True
class CarrierProxyRequestHandler(proxy.ProxyRequestHandler):
def request_handler(self, req, req_body):
"""Used to modify requests."""
u = urlparse.urlsplit(req.path)
path = u.path
handler = None
handler_routes = routes["request"]
for route in handler_routes:
route_re = "^{}$".format(route) # Find exact matches only
m = re.match(route_re, path)
if m:
handler = handler_routes[route]
# From https://stackoverflow.com/q/11065419
# Convert match elements to kw args
handler(req, req_body, **m.groupdict())
pass
def response_handler(self, req, req_body, res, res_body):
"""Used to modify responses."""
u = urlparse.urlsplit(req.path)
path = u.path
handler = None
handler_routes = routes["response"]
for route in handler_routes:
route_re = "^{}$".format(route) # Find exact matches only
m = re.match(route_re, path)
if m:
handler = handler_routes[route]
# From https://stackoverflow.com/q/11065419
# Convert match elements to kw args
handler(req, req_body, res, res_body, **m.groupdict())
pass
def save_handler(self, req, req_body, res, res_body):
squelch_output = False
u = urlparse.urlsplit(req.path)
path = u.path
handler = None
handler_routes = routes["save"]
for route in handler_routes:
route_re = "^{}$".format(route) # Find exact matches only
m = re.match(route_re, path)
if m:
logger.debug("Found a save handler for %s", path)
handler = handler_routes[route]
# From https://stackoverflow.com/q/11065419
# Convert match elements to kw args
squelch_output = handler(req, req_body, res, res_body, **m.groupdict())
if not squelch_output:
self.print_info(req, req_body, res, res_body)
if handler is None:
logger.info("Unknown save path: %s", path)
return
def main():
parser = argparse.ArgumentParser(description="Proxy server.")
parser.add_argument(
"-p", "--port", default=8080, type=int, help="Port to listen on"
)
parser.add_argument(
"-a", "--address", default="", type=str, help="Address to listen on"
)
parser.add_argument(
"-s", "--server", type=str, default="", help="InfluxDB Server DSN"
)
parser.add_argument("--api_key", type=str, help="Weather Underground API Key")
parser.add_argument(
"--location", type=str, help="Weather Underground location query."
)
log_choices = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
parser.add_argument(
"-l",
"--log",
dest="logLevel",
default="INFO",
choices=log_choices,
help="Set the logging level",
)
args = parser.parse_args()
# Set up clean logging to stderr
log_level = getattr(logging, args.logLevel)
datefmt = "%m/%d/%Y %H:%M:%S"
log_format = "%(asctime)s"
if args.logLevel == "DEBUG":
log_format = "{} %(filename)s".format(log_format)
log_format = "{} %(funcName)s:%(lineno)d".format(log_format)
log_format = "{} %(levelname)s: %(message)s".format(log_format)
logging.basicConfig(level=log_level, format=log_format, datefmt=datefmt)
global influxdb
if args.server != "":
influxdb = args.server
global api_key
if args.api_key:
api_key = args.api_key
global location_query
if args.location:
location_query = args.location
server_address = (args.address, args.port)
HandlerClass = CarrierProxyRequestHandler
ServerClass = proxy.ThreadingHTTPServer
protocol = "HTTP/1.1"
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
logging.info("Serving HTTP Proxy on %s port %s ...", sa[0], sa[1])
httpd.serve_forever()
return
if __name__ == "__main__":
try:
main()
except (KeyboardInterrupt, SystemExit):
exit(1)
| 33.041322
| 87
| 0.597049
|
cb4ed701e33719db1f676a63e533054c52948626
| 1,215
|
py
|
Python
|
test/test_cloud_pools.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
test/test_cloud_pools.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
test/test_cloud_pools.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.cloud_pools import CloudPools
class TestCloudPools(unittest.TestCase):
""" CloudPools unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testCloudPools(self):
"""
Test CloudPools
"""
model = swagger_client.models.cloud_pools.CloudPools()
if __name__ == '__main__':
unittest.main()
| 24.795918
| 75
| 0.722634
|
f439f07790274ec2e9fbe869dc52bdc21c593a2b
| 34,957
|
py
|
Python
|
pandas/core/arrays/timedeltas.py
|
Veronur/pandas
|
6258397047b9debc11bd77b3dd0cd60aa49762fd
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4
|
2015-06-09T07:27:52.000Z
|
2021-08-06T13:50:05.000Z
|
pandas/core/arrays/timedeltas.py
|
Veronur/pandas
|
6258397047b9debc11bd77b3dd0cd60aa49762fd
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 7
|
2015-08-30T23:51:00.000Z
|
2018-12-29T19:52:35.000Z
|
pandas/core/arrays/timedeltas.py
|
Veronur/pandas
|
6258397047b9debc11bd77b3dd0cd60aa49762fd
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 5
|
2017-10-04T22:24:49.000Z
|
2021-08-06T13:50:13.000Z
|
from datetime import timedelta
from typing import List
import numpy as np
from pandas._libs import lib, tslibs
from pandas._libs.tslibs import NaT, Period, Timedelta, Timestamp, iNaT, to_offset
from pandas._libs.tslibs.conversion import precision_from_unit
from pandas._libs.tslibs.fields import get_timedelta_field
from pandas._libs.tslibs.timedeltas import array_to_timedelta64, parse_timedelta_unit
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
TD64NS_DTYPE,
is_dtype_equal,
is_float_dtype,
is_integer_dtype,
is_object_dtype,
is_scalar,
is_string_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.generic import ABCSeries, ABCTimedeltaIndex
from pandas.core.dtypes.missing import isna
from pandas.core import nanops
from pandas.core.algorithms import checked_add_with_arr
from pandas.core.arrays import datetimelike as dtl
from pandas.core.arrays._ranges import generate_regular_range
import pandas.core.common as com
from pandas.core.construction import extract_array
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.tseries.offsets import Tick
def _field_accessor(name, alias, docstring=None):
def f(self):
values = self.asi8
result = get_timedelta_field(values, alias)
if self._hasnans:
result = self._maybe_mask_results(
result, fill_value=None, convert="float64"
)
return result
f.__name__ = name
f.__doc__ = f"\n{docstring}\n"
return property(f)
class TimedeltaArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps):
"""
Pandas ExtensionArray for timedelta data.
.. versionadded:: 0.24.0
.. warning::
TimedeltaArray is currently experimental, and its API may change
without warning. In particular, :attr:`TimedeltaArray.dtype` is
expected to change to be an instance of an ``ExtensionDtype``
subclass.
Parameters
----------
values : array-like
The timedelta data.
dtype : numpy.dtype
Currently, only ``numpy.dtype("timedelta64[ns]")`` is accepted.
freq : Offset, optional
copy : bool, default False
Whether to copy the underlying array of data.
Attributes
----------
None
Methods
-------
None
"""
_typ = "timedeltaarray"
_scalar_type = Timedelta
_recognized_scalars = (timedelta, np.timedelta64, Tick)
_is_recognized_dtype = is_timedelta64_dtype
__array_priority__ = 1000
# define my properties & methods for delegation
_other_ops: List[str] = []
_bool_ops: List[str] = []
_object_ops = ["freq"]
_field_ops = ["days", "seconds", "microseconds", "nanoseconds"]
_datetimelike_ops = _field_ops + _object_ops + _bool_ops
_datetimelike_methods = [
"to_pytimedelta",
"total_seconds",
"round",
"floor",
"ceil",
]
# Note: ndim must be defined to ensure NaT.__richcmp(TimedeltaArray)
# operates pointwise.
@property
def _box_func(self):
return lambda x: Timedelta(x, unit="ns")
@property
def dtype(self):
"""
The dtype for the TimedeltaArray.
.. warning::
A future version of pandas will change dtype to be an instance
of a :class:`pandas.api.extensions.ExtensionDtype` subclass,
not a ``numpy.dtype``.
Returns
-------
numpy.dtype
"""
return TD64NS_DTYPE
# ----------------------------------------------------------------
# Constructors
def __init__(self, values, dtype=TD64NS_DTYPE, freq=lib.no_default, copy=False):
values = extract_array(values)
inferred_freq = getattr(values, "_freq", None)
explicit_none = freq is None
freq = freq if freq is not lib.no_default else None
if isinstance(values, type(self)):
if explicit_none:
# dont inherit from values
pass
elif freq is None:
freq = values.freq
elif freq and values.freq:
freq = to_offset(freq)
freq, _ = dtl.validate_inferred_freq(freq, values.freq, False)
values = values._data
if not isinstance(values, np.ndarray):
msg = (
f"Unexpected type '{type(values).__name__}'. 'values' must be a "
"TimedeltaArray ndarray, or Series or Index containing one of those."
)
raise ValueError(msg)
if values.ndim not in [1, 2]:
raise ValueError("Only 1-dimensional input arrays are supported.")
if values.dtype == "i8":
# for compat with datetime/timedelta/period shared methods,
# we can sometimes get here with int64 values. These represent
# nanosecond UTC (or tz-naive) unix timestamps
values = values.view(TD64NS_DTYPE)
_validate_td64_dtype(values.dtype)
dtype = _validate_td64_dtype(dtype)
if freq == "infer":
msg = (
"Frequency inference not allowed in TimedeltaArray.__init__. "
"Use 'pd.array()' instead."
)
raise ValueError(msg)
if copy:
values = values.copy()
if freq:
freq = to_offset(freq)
self._data = values
self._dtype = dtype
self._freq = freq
if inferred_freq is None and freq is not None:
type(self)._validate_frequency(self, freq)
@classmethod
def _simple_new(cls, values, freq=None, dtype=TD64NS_DTYPE):
assert dtype == TD64NS_DTYPE, dtype
assert isinstance(values, np.ndarray), type(values)
if values.dtype != TD64NS_DTYPE:
assert values.dtype == "i8"
values = values.view(TD64NS_DTYPE)
result = object.__new__(cls)
result._data = values
result._freq = to_offset(freq)
result._dtype = TD64NS_DTYPE
return result
@classmethod
def _from_sequence(
cls, data, dtype=TD64NS_DTYPE, copy=False, freq=lib.no_default, unit=None
):
if dtype:
_validate_td64_dtype(dtype)
explicit_none = freq is None
freq = freq if freq is not lib.no_default else None
freq, freq_infer = dtl.maybe_infer_freq(freq)
data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=unit)
freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer)
if explicit_none:
freq = None
result = cls._simple_new(data, freq=freq)
if inferred_freq is None and freq is not None:
# this condition precludes `freq_infer`
cls._validate_frequency(result, freq)
elif freq_infer:
# Set _freq directly to bypass duplicative _validate_frequency
# check.
result._freq = to_offset(result.inferred_freq)
return result
@classmethod
def _generate_range(cls, start, end, periods, freq, closed=None):
periods = dtl.validate_periods(periods)
if freq is None and any(x is None for x in [periods, start, end]):
raise ValueError("Must provide freq argument if no data is supplied")
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the four parameters: start, end, periods, "
"and freq, exactly three must be specified"
)
if start is not None:
start = Timedelta(start)
if end is not None:
end = Timedelta(end)
left_closed, right_closed = dtl.validate_endpoints(closed)
if freq is not None:
index = generate_regular_range(start, end, periods, freq)
else:
index = np.linspace(start.value, end.value, periods).astype("i8")
if len(index) >= 2:
# Infer a frequency
td = Timedelta(index[1] - index[0])
freq = to_offset(td)
if not left_closed:
index = index[1:]
if not right_closed:
index = index[:-1]
return cls._simple_new(index, freq=freq)
# ----------------------------------------------------------------
# DatetimeLike Interface
def _unbox_scalar(self, value):
if not isinstance(value, self._scalar_type) and value is not NaT:
raise ValueError("'value' should be a Timedelta.")
self._check_compatible_with(value)
return value.value
def _scalar_from_string(self, value):
return Timedelta(value)
def _check_compatible_with(self, other, setitem: bool = False):
# we don't have anything to validate.
pass
def _maybe_clear_freq(self):
self._freq = None
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
def astype(self, dtype, copy=True):
# We handle
# --> timedelta64[ns]
# --> timedelta64
# DatetimeLikeArrayMixin super call handles other cases
dtype = pandas_dtype(dtype)
if is_timedelta64_dtype(dtype) and not is_timedelta64_ns_dtype(dtype):
# by pandas convention, converting to non-nano timedelta64
# returns an int64-dtyped array with ints representing multiples
# of the desired timedelta unit. This is essentially division
if self._hasnans:
# avoid double-copying
result = self._data.astype(dtype, copy=False)
values = self._maybe_mask_results(
result, fill_value=None, convert="float64"
)
return values
result = self._data.astype(dtype, copy=copy)
return result.astype("i8")
elif is_timedelta64_ns_dtype(dtype):
if copy:
return self.copy()
return self
return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy=copy)
# ----------------------------------------------------------------
# Reductions
def sum(
self,
axis=None,
dtype=None,
out=None,
keepdims: bool = False,
initial=None,
skipna: bool = True,
min_count: int = 0,
):
nv.validate_sum(
(), dict(dtype=dtype, out=out, keepdims=keepdims, initial=initial)
)
if not len(self):
return NaT
if not skipna and self._hasnans:
return NaT
result = nanops.nansum(
self._data, axis=axis, skipna=skipna, min_count=min_count
)
return Timedelta(result)
def std(
self,
axis=None,
dtype=None,
out=None,
ddof: int = 1,
keepdims: bool = False,
skipna: bool = True,
):
nv.validate_stat_ddof_func(
(), dict(dtype=dtype, out=out, keepdims=keepdims), fname="std"
)
if not len(self):
return NaT
if not skipna and self._hasnans:
return NaT
result = nanops.nanstd(self._data, axis=axis, skipna=skipna, ddof=ddof)
return Timedelta(result)
def median(
self,
axis=None,
out=None,
overwrite_input: bool = False,
keepdims: bool = False,
skipna: bool = True,
):
nv.validate_median(
(), dict(out=out, overwrite_input=overwrite_input, keepdims=keepdims)
)
return nanops.nanmedian(self._data, axis=axis, skipna=skipna)
# ----------------------------------------------------------------
# Rendering Methods
def _formatter(self, boxed=False):
from pandas.io.formats.format import _get_format_timedelta64
return _get_format_timedelta64(self, box=True)
def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs):
from pandas.io.formats.format import _get_format_timedelta64
formatter = _get_format_timedelta64(self._data, na_rep)
return np.array([formatter(x) for x in self._data.ravel()]).reshape(self.shape)
# ----------------------------------------------------------------
# Arithmetic Methods
def _add_offset(self, other):
assert not isinstance(other, Tick)
raise TypeError(
f"cannot add the type {type(other).__name__} to a {type(self).__name__}"
)
def _add_period(self, other: Period):
"""
Add a Period object.
"""
# We will wrap in a PeriodArray and defer to the reversed operation
from .period import PeriodArray
i8vals = np.broadcast_to(other.ordinal, self.shape)
oth = PeriodArray(i8vals, freq=other.freq)
return oth + self
def _add_datetime_arraylike(self, other):
"""
Add DatetimeArray/Index or ndarray[datetime64] to TimedeltaArray.
"""
if isinstance(other, np.ndarray):
# At this point we have already checked that dtype is datetime64
from pandas.core.arrays import DatetimeArray
other = DatetimeArray(other)
# defer to implementation in DatetimeArray
return other + self
def _add_datetimelike_scalar(self, other):
# adding a timedeltaindex to a datetimelike
from pandas.core.arrays import DatetimeArray
assert other is not NaT
other = Timestamp(other)
if other is NaT:
# In this case we specifically interpret NaT as a datetime, not
# the timedelta interpretation we would get by returning self + NaT
result = self.asi8.view("m8[ms]") + NaT.to_datetime64()
return DatetimeArray(result)
i8 = self.asi8
result = checked_add_with_arr(i8, other.value, arr_mask=self._isnan)
result = self._maybe_mask_results(result)
dtype = DatetimeTZDtype(tz=other.tz) if other.tz else DT64NS_DTYPE
return DatetimeArray(result, dtype=dtype, freq=self.freq)
def _addsub_object_array(self, other, op):
# Add or subtract Array-like of objects
try:
# TimedeltaIndex can only operate with a subset of DateOffset
# subclasses. Incompatible classes will raise AttributeError,
# which we re-raise as TypeError
return super()._addsub_object_array(other, op)
except AttributeError as err:
raise TypeError(
f"Cannot add/subtract non-tick DateOffset to {type(self).__name__}"
) from err
@unpack_zerodim_and_defer("__mul__")
def __mul__(self, other):
if is_scalar(other):
# numpy will accept float and int, raise TypeError for others
result = self._data * other
freq = None
if self.freq is not None and not isna(other):
freq = self.freq * other
return type(self)(result, freq=freq)
if not hasattr(other, "dtype"):
# list, tuple
other = np.array(other)
if len(other) != len(self) and not is_timedelta64_dtype(other.dtype):
# Exclude timedelta64 here so we correctly raise TypeError
# for that instead of ValueError
raise ValueError("Cannot multiply with unequal lengths")
if is_object_dtype(other.dtype):
# this multiplication will succeed only if all elements of other
# are int or float scalars, so we will end up with
# timedelta64[ns]-dtyped result
result = [self[n] * other[n] for n in range(len(self))]
result = np.array(result)
return type(self)(result)
# numpy will accept float or int dtype, raise TypeError for others
result = self._data * other
return type(self)(result)
__rmul__ = __mul__
@unpack_zerodim_and_defer("__truediv__")
def __truediv__(self, other):
# timedelta / X is well-defined for timedelta-like or numeric X
if isinstance(other, (timedelta, np.timedelta64, Tick)):
other = Timedelta(other)
if other is NaT:
# specifically timedelta64-NaT
result = np.empty(self.shape, dtype=np.float64)
result.fill(np.nan)
return result
# otherwise, dispatch to Timedelta implementation
return self._data / other
elif lib.is_scalar(other):
# assume it is numeric
result = self._data / other
freq = None
if self.freq is not None:
# Tick division is not implemented, so operate on Timedelta
freq = self.freq.delta / other
return type(self)(result, freq=freq)
if not hasattr(other, "dtype"):
# e.g. list, tuple
other = np.array(other)
if len(other) != len(self):
raise ValueError("Cannot divide vectors with unequal lengths")
elif is_timedelta64_dtype(other.dtype):
# let numpy handle it
return self._data / other
elif is_object_dtype(other.dtype):
# We operate on raveled arrays to avoid problems in inference
# on NaT
srav = self.ravel()
orav = other.ravel()
result = [srav[n] / orav[n] for n in range(len(srav))]
result = np.array(result).reshape(self.shape)
# We need to do dtype inference in order to keep DataFrame ops
# behavior consistent with Series behavior
inferred = lib.infer_dtype(result)
if inferred == "timedelta":
flat = result.ravel()
result = type(self)._from_sequence(flat).reshape(result.shape)
elif inferred == "floating":
result = result.astype(float)
return result
else:
result = self._data / other
return type(self)(result)
@unpack_zerodim_and_defer("__rtruediv__")
def __rtruediv__(self, other):
# X / timedelta is defined only for timedelta-like X
if isinstance(other, (timedelta, np.timedelta64, Tick)):
other = Timedelta(other)
if other is NaT:
# specifically timedelta64-NaT
result = np.empty(self.shape, dtype=np.float64)
result.fill(np.nan)
return result
# otherwise, dispatch to Timedelta implementation
return other / self._data
elif lib.is_scalar(other):
raise TypeError(
f"Cannot divide {type(other).__name__} by {type(self).__name__}"
)
if not hasattr(other, "dtype"):
# e.g. list, tuple
other = np.array(other)
if len(other) != len(self):
raise ValueError("Cannot divide vectors with unequal lengths")
elif is_timedelta64_dtype(other.dtype):
# let numpy handle it
return other / self._data
elif is_object_dtype(other.dtype):
# Note: unlike in __truediv__, we do not _need_ to do type
# inference on the result. It does not raise, a numeric array
# is returned. GH#23829
result = [other[n] / self[n] for n in range(len(self))]
return np.array(result)
else:
raise TypeError(
f"Cannot divide {other.dtype} data by {type(self).__name__}"
)
@unpack_zerodim_and_defer("__floordiv__")
def __floordiv__(self, other):
if is_scalar(other):
if isinstance(other, (timedelta, np.timedelta64, Tick)):
other = Timedelta(other)
if other is NaT:
# treat this specifically as timedelta-NaT
result = np.empty(self.shape, dtype=np.float64)
result.fill(np.nan)
return result
# dispatch to Timedelta implementation
result = other.__rfloordiv__(self._data)
return result
# at this point we should only have numeric scalars; anything
# else will raise
result = self.asi8 // other
result[self._isnan] = iNaT
freq = None
if self.freq is not None:
# Note: freq gets division, not floor-division
freq = self.freq / other
if freq.nanos == 0 and self.freq.nanos != 0:
# e.g. if self.freq is Nano(1) then dividing by 2
# rounds down to zero
freq = None
return type(self)(result.view("m8[ns]"), freq=freq)
if not hasattr(other, "dtype"):
# list, tuple
other = np.array(other)
if len(other) != len(self):
raise ValueError("Cannot divide with unequal lengths")
elif is_timedelta64_dtype(other.dtype):
other = type(self)(other)
# numpy timedelta64 does not natively support floordiv, so operate
# on the i8 values
result = self.asi8 // other.asi8
mask = self._isnan | other._isnan
if mask.any():
result = result.astype(np.int64)
result[mask] = np.nan
return result
elif is_object_dtype(other.dtype):
result = [self[n] // other[n] for n in range(len(self))]
result = np.array(result)
if lib.infer_dtype(result, skipna=False) == "timedelta":
result, _ = sequence_to_td64ns(result)
return type(self)(result)
return result
elif is_integer_dtype(other.dtype) or is_float_dtype(other.dtype):
result = self._data // other
return type(self)(result)
else:
dtype = getattr(other, "dtype", type(other).__name__)
raise TypeError(f"Cannot divide {dtype} by {type(self).__name__}")
@unpack_zerodim_and_defer("__rfloordiv__")
def __rfloordiv__(self, other):
if is_scalar(other):
if isinstance(other, (timedelta, np.timedelta64, Tick)):
other = Timedelta(other)
if other is NaT:
# treat this specifically as timedelta-NaT
result = np.empty(self.shape, dtype=np.float64)
result.fill(np.nan)
return result
# dispatch to Timedelta implementation
result = other.__floordiv__(self._data)
return result
raise TypeError(
f"Cannot divide {type(other).__name__} by {type(self).__name__}"
)
if not hasattr(other, "dtype"):
# list, tuple
other = np.array(other)
if len(other) != len(self):
raise ValueError("Cannot divide with unequal lengths")
elif is_timedelta64_dtype(other.dtype):
other = type(self)(other)
# numpy timedelta64 does not natively support floordiv, so operate
# on the i8 values
result = other.asi8 // self.asi8
mask = self._isnan | other._isnan
if mask.any():
result = result.astype(np.int64)
result[mask] = np.nan
return result
elif is_object_dtype(other.dtype):
result = [other[n] // self[n] for n in range(len(self))]
result = np.array(result)
return result
else:
dtype = getattr(other, "dtype", type(other).__name__)
raise TypeError(f"Cannot divide {dtype} by {type(self).__name__}")
@unpack_zerodim_and_defer("__mod__")
def __mod__(self, other):
# Note: This is a naive implementation, can likely be optimized
if isinstance(other, (timedelta, np.timedelta64, Tick)):
other = Timedelta(other)
return self - (self // other) * other
@unpack_zerodim_and_defer("__rmod__")
def __rmod__(self, other):
# Note: This is a naive implementation, can likely be optimized
if isinstance(other, (timedelta, np.timedelta64, Tick)):
other = Timedelta(other)
return other - (other // self) * self
@unpack_zerodim_and_defer("__divmod__")
def __divmod__(self, other):
# Note: This is a naive implementation, can likely be optimized
if isinstance(other, (timedelta, np.timedelta64, Tick)):
other = Timedelta(other)
res1 = self // other
res2 = self - res1 * other
return res1, res2
@unpack_zerodim_and_defer("__rdivmod__")
def __rdivmod__(self, other):
# Note: This is a naive implementation, can likely be optimized
if isinstance(other, (timedelta, np.timedelta64, Tick)):
other = Timedelta(other)
res1 = other // self
res2 = other - res1 * self
return res1, res2
def __neg__(self):
if self.freq is not None:
return type(self)(-self._data, freq=-self.freq)
return type(self)(-self._data)
def __pos__(self):
return type(self)(self._data, freq=self.freq)
def __abs__(self):
# Note: freq is not preserved
return type(self)(np.abs(self._data))
# ----------------------------------------------------------------
# Conversion Methods - Vectorized analogues of Timedelta methods
def total_seconds(self):
"""
Return total duration of each element expressed in seconds.
This method is available directly on TimedeltaArray, TimedeltaIndex
and on Series containing timedelta values under the ``.dt`` namespace.
Returns
-------
seconds : [ndarray, Float64Index, Series]
When the calling object is a TimedeltaArray, the return type
is ndarray. When the calling object is a TimedeltaIndex,
the return type is a Float64Index. When the calling object
is a Series, the return type is Series of type `float64` whose
index is the same as the original.
See Also
--------
datetime.timedelta.total_seconds : Standard library version
of this method.
TimedeltaIndex.components : Return a DataFrame with components of
each Timedelta.
Examples
--------
**Series**
>>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d'))
>>> s
0 0 days
1 1 days
2 2 days
3 3 days
4 4 days
dtype: timedelta64[ns]
>>> s.dt.total_seconds()
0 0.0
1 86400.0
2 172800.0
3 259200.0
4 345600.0
dtype: float64
**TimedeltaIndex**
>>> idx = pd.to_timedelta(np.arange(5), unit='d')
>>> idx
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq=None)
>>> idx.total_seconds()
Float64Index([0.0, 86400.0, 172800.0, 259200.00000000003, 345600.0],
dtype='float64')
"""
return self._maybe_mask_results(1e-9 * self.asi8, fill_value=None)
def to_pytimedelta(self) -> np.ndarray:
"""
Return Timedelta Array/Index as object ndarray of datetime.timedelta
objects.
Returns
-------
datetimes : ndarray
"""
return tslibs.ints_to_pytimedelta(self.asi8)
days = _field_accessor("days", "days", "Number of days for each element.")
seconds = _field_accessor(
"seconds",
"seconds",
"Number of seconds (>= 0 and less than 1 day) for each element.",
)
microseconds = _field_accessor(
"microseconds",
"microseconds",
"Number of microseconds (>= 0 and less than 1 second) for each element.",
)
nanoseconds = _field_accessor(
"nanoseconds",
"nanoseconds",
"Number of nanoseconds (>= 0 and less than 1 microsecond) for each element.",
)
@property
def components(self):
"""
Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
Returns
-------
a DataFrame
"""
from pandas import DataFrame
columns = [
"days",
"hours",
"minutes",
"seconds",
"milliseconds",
"microseconds",
"nanoseconds",
]
hasnans = self._hasnans
if hasnans:
def f(x):
if isna(x):
return [np.nan] * len(columns)
return x.components
else:
def f(x):
return x.components
result = DataFrame([f(x) for x in self], columns=columns)
if not hasnans:
result = result.astype("int64")
return result
# ---------------------------------------------------------------------
# Constructor Helpers
def sequence_to_td64ns(data, copy=False, unit="ns", errors="raise"):
"""
Parameters
----------
data : list-like
copy : bool, default False
unit : str, default "ns"
The timedelta unit to treat integers as multiples of.
errors : {"raise", "coerce", "ignore"}, default "raise"
How to handle elements that cannot be converted to timedelta64[ns].
See ``pandas.to_timedelta`` for details.
Returns
-------
converted : numpy.ndarray
The sequence converted to a numpy array with dtype ``timedelta64[ns]``.
inferred_freq : Tick or None
The inferred frequency of the sequence.
Raises
------
ValueError : Data cannot be converted to timedelta64[ns].
Notes
-----
Unlike `pandas.to_timedelta`, if setting ``errors=ignore`` will not cause
errors to be ignored; they are caught and subsequently ignored at a
higher level.
"""
inferred_freq = None
unit = parse_timedelta_unit(unit)
# Unwrap whatever we have into a np.ndarray
if not hasattr(data, "dtype"):
# e.g. list, tuple
if np.ndim(data) == 0:
# i.e. generator
data = list(data)
data = np.array(data, copy=False)
elif isinstance(data, ABCSeries):
data = data._values
elif isinstance(data, (ABCTimedeltaIndex, TimedeltaArray)):
inferred_freq = data.freq
data = data._data
# Convert whatever we have into timedelta64[ns] dtype
if is_object_dtype(data.dtype) or is_string_dtype(data.dtype):
# no need to make a copy, need to convert if string-dtyped
data = objects_to_td64ns(data, unit=unit, errors=errors)
copy = False
elif is_integer_dtype(data.dtype):
# treat as multiples of the given unit
data, copy_made = ints_to_td64ns(data, unit=unit)
copy = copy and not copy_made
elif is_float_dtype(data.dtype):
# cast the unit, multiply base/frac separately
# to avoid precision issues from float -> int
mask = np.isnan(data)
m, p = precision_from_unit(unit)
base = data.astype(np.int64)
frac = data - base
if p:
frac = np.round(frac, p)
data = (base * m + (frac * m).astype(np.int64)).view("timedelta64[ns]")
data[mask] = iNaT
copy = False
elif is_timedelta64_dtype(data.dtype):
if data.dtype != TD64NS_DTYPE:
# non-nano unit
# TODO: watch out for overflows
data = data.astype(TD64NS_DTYPE)
copy = False
else:
# This includes datetime64-dtype, see GH#23539, GH#29794
raise TypeError(f"dtype {data.dtype} cannot be converted to timedelta64[ns]")
data = np.array(data, copy=copy)
assert data.dtype == "m8[ns]", data
return data, inferred_freq
def ints_to_td64ns(data, unit="ns"):
"""
Convert an ndarray with integer-dtype to timedelta64[ns] dtype, treating
the integers as multiples of the given timedelta unit.
Parameters
----------
data : numpy.ndarray with integer-dtype
unit : str, default "ns"
The timedelta unit to treat integers as multiples of.
Returns
-------
numpy.ndarray : timedelta64[ns] array converted from data
bool : whether a copy was made
"""
copy_made = False
unit = unit if unit is not None else "ns"
if data.dtype != np.int64:
# converting to int64 makes a copy, so we can avoid
# re-copying later
data = data.astype(np.int64)
copy_made = True
if unit != "ns":
dtype_str = f"timedelta64[{unit}]"
data = data.view(dtype_str)
# TODO: watch out for overflows when converting from lower-resolution
data = data.astype("timedelta64[ns]")
# the astype conversion makes a copy, so we can avoid re-copying later
copy_made = True
else:
data = data.view("timedelta64[ns]")
return data, copy_made
def objects_to_td64ns(data, unit="ns", errors="raise"):
"""
Convert a object-dtyped or string-dtyped array into an
timedelta64[ns]-dtyped array.
Parameters
----------
data : ndarray or Index
unit : str, default "ns"
The timedelta unit to treat integers as multiples of.
errors : {"raise", "coerce", "ignore"}, default "raise"
How to handle elements that cannot be converted to timedelta64[ns].
See ``pandas.to_timedelta`` for details.
Returns
-------
numpy.ndarray : timedelta64[ns] array converted from data
Raises
------
ValueError : Data cannot be converted to timedelta64[ns].
Notes
-----
Unlike `pandas.to_timedelta`, if setting `errors=ignore` will not cause
errors to be ignored; they are caught and subsequently ignored at a
higher level.
"""
# coerce Index to np.ndarray, converting string-dtype if necessary
values = np.array(data, dtype=np.object_, copy=False)
result = array_to_timedelta64(values, unit=unit, errors=errors)
return result.view("timedelta64[ns]")
def _validate_td64_dtype(dtype):
dtype = pandas_dtype(dtype)
if is_dtype_equal(dtype, np.dtype("timedelta64")):
# no precision disallowed GH#24806
msg = (
"Passing in 'timedelta' dtype with no precision is not allowed. "
"Please pass in 'timedelta64[ns]' instead."
)
raise ValueError(msg)
if not is_dtype_equal(dtype, TD64NS_DTYPE):
raise ValueError(f"dtype {dtype} cannot be converted to timedelta64[ns]")
return dtype
| 33.166034
| 87
| 0.585605
|
5c66a0c3a2af22062989e68159d19b15242cd23c
| 3,651
|
py
|
Python
|
airflow/providers/amazon/aws/sensors/cloud_formation.py
|
tinyclues/incubator-airflow
|
3caa539092d3a4196083d1db829fa1ed7d83fa95
|
[
"Apache-2.0"
] | null | null | null |
airflow/providers/amazon/aws/sensors/cloud_formation.py
|
tinyclues/incubator-airflow
|
3caa539092d3a4196083d1db829fa1ed7d83fa95
|
[
"Apache-2.0"
] | null | null | null |
airflow/providers/amazon/aws/sensors/cloud_formation.py
|
tinyclues/incubator-airflow
|
3caa539092d3a4196083d1db829fa1ed7d83fa95
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains sensors for AWS CloudFormation."""
from airflow.providers.amazon.aws.hooks.cloud_formation import AWSCloudFormationHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class CloudFormationCreateStackSensor(BaseSensorOperator):
"""
Waits for a stack to be created successfully on AWS CloudFormation.
:param stack_name: The name of the stack to wait for (templated)
:type stack_name: str
:param aws_conn_id: ID of the Airflow connection where credentials and extra configuration are
stored
:type aws_conn_id: str
:param poke_interval: Time in seconds that the job should wait between each try
:type poke_interval: int
"""
template_fields = ['stack_name']
ui_color = '#C5CAE9'
@apply_defaults
def __init__(self, *, stack_name, aws_conn_id='aws_default', region_name=None, **kwargs):
super().__init__(**kwargs)
self.stack_name = stack_name
self.hook = AWSCloudFormationHook(aws_conn_id=aws_conn_id, region_name=region_name)
def poke(self, context):
stack_status = self.hook.get_stack_status(self.stack_name)
if stack_status == 'CREATE_COMPLETE':
return True
if stack_status in ('CREATE_IN_PROGRESS', None):
return False
raise ValueError(f'Stack {self.stack_name} in bad state: {stack_status}')
class CloudFormationDeleteStackSensor(BaseSensorOperator):
"""
Waits for a stack to be deleted successfully on AWS CloudFormation.
:param stack_name: The name of the stack to wait for (templated)
:type stack_name: str
:param aws_conn_id: ID of the Airflow connection where credentials and extra configuration are
stored
:type aws_conn_id: str
:param poke_interval: Time in seconds that the job should wait between each try
:type poke_interval: int
"""
template_fields = ['stack_name']
ui_color = '#C5CAE9'
@apply_defaults
def __init__(self, *, stack_name, aws_conn_id='aws_default', region_name=None, **kwargs):
super().__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.stack_name = stack_name
self.hook = None
def poke(self, context):
stack_status = self.get_hook().get_stack_status(self.stack_name)
if stack_status in ('DELETE_COMPLETE', None):
return True
if stack_status == 'DELETE_IN_PROGRESS':
return False
raise ValueError(f'Stack {self.stack_name} in bad state: {stack_status}')
def get_hook(self):
"""Create and return an AWSCloudFormationHook"""
if not self.hook:
self.hook = AWSCloudFormationHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
return self.hook
| 39.684783
| 105
| 0.718707
|
9577644620319acdc2d25646033ea877f95daf52
| 64,200
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_06_01/aio/operations/_network_interfaces_operations.py
|
praveenkuttappan/azure-sdk-for-python
|
4b79413667b7539750a6c7dde15737013a3d4bd5
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_06_01/aio/operations/_network_interfaces_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_06_01/aio/operations/_network_interfaces_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfacesOperations:
"""NetworkInterfacesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_interface_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.NetworkInterface":
"""Gets information about the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.NetworkInterface
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.NetworkInterface",
**kwargs: Any
) -> "_models.NetworkInterface":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkInterface')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.NetworkInterface",
**kwargs: Any
) -> AsyncLROPoller["_models.NetworkInterface"]:
"""Creates or updates a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to the create or update network interface operation.
:type parameters: ~azure.mgmt.network.v2019_06_01.models.NetworkInterface
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkInterface or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_06_01.models.NetworkInterface]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.NetworkInterface":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.NetworkInterface"]:
"""Updates a network interface tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to update network interface tags.
:type parameters: ~azure.mgmt.network.v2019_06_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkInterface or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_06_01.models.NetworkInterface]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets all network interfaces in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_06_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets all network interfaces in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_06_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces'} # type: ignore
async def _get_effective_route_table_initial(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs: Any
) -> Optional["_models.EffectiveRouteListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.EffectiveRouteListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self._get_effective_route_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_effective_route_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'} # type: ignore
async def begin_get_effective_route_table(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.EffectiveRouteListResult"]:
"""Gets all route tables applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EffectiveRouteListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_06_01.models.EffectiveRouteListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.EffectiveRouteListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_effective_route_table_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_effective_route_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'} # type: ignore
async def _list_effective_network_security_groups_initial(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs: Any
) -> Optional["_models.EffectiveNetworkSecurityGroupListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.EffectiveNetworkSecurityGroupListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self._list_effective_network_security_groups_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_effective_network_security_groups_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'} # type: ignore
async def begin_list_effective_network_security_groups(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.EffectiveNetworkSecurityGroupListResult"]:
"""Gets all network security groups applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EffectiveNetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_06_01.models.EffectiveNetworkSecurityGroupListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.EffectiveNetworkSecurityGroupListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_effective_network_security_groups_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_effective_network_security_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'} # type: ignore
def list_virtual_machine_scale_set_vm_network_interfaces(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
**kwargs: Any
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets information about all network interfaces in a virtual machine in a virtual machine scale
set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_06_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_vm_network_interfaces.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_vm_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces'} # type: ignore
def list_virtual_machine_scale_set_network_interfaces(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
**kwargs: Any
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets all network interfaces in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_06_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_network_interfaces.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces'} # type: ignore
async def get_virtual_machine_scale_set_network_interface(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.NetworkInterface":
"""Get the specified network interface in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.NetworkInterface
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
# Construct URL
url = self.get_virtual_machine_scale_set_network_interface.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_network_interface.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}'} # type: ignore
def list_virtual_machine_scale_set_ip_configurations(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.NetworkInterfaceIPConfigurationListResult"]:
"""Get the specified network interface ip configuration in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_06_01.models.NetworkInterfaceIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_ip_configurations.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_ip_configurations.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipConfigurations'} # type: ignore
async def get_virtual_machine_scale_set_ip_configuration(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
ip_configuration_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.NetworkInterfaceIPConfiguration":
"""Get the specified network interface ip configuration in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the ip configuration.
:type ip_configuration_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterfaceIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.NetworkInterfaceIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
# Construct URL
url = self.get_virtual_machine_scale_set_ip_configuration.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterfaceIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_ip_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipConfigurations/{ipConfigurationName}'} # type: ignore
| 52.622951
| 354
| 0.680374
|
24bd2daec439cf4b8498c7aa025922f280a9771a
| 2,570
|
py
|
Python
|
generate.py
|
elleryqueenhomels/fast_neural_style_transfer
|
b0a7668f091e248d3bee27b3d7f213794f82ec86
|
[
"MIT"
] | 6
|
2017-12-07T06:31:35.000Z
|
2018-11-22T23:51:34.000Z
|
generate.py
|
elleryqueenhomels/fast_neural_style_transfer
|
b0a7668f091e248d3bee27b3d7f213794f82ec86
|
[
"MIT"
] | 1
|
2017-12-14T06:31:09.000Z
|
2017-12-14T06:31:09.000Z
|
generate.py
|
elleryqueenhomels/fast_neural_style_transfer
|
b0a7668f091e248d3bee27b3d7f213794f82ec86
|
[
"MIT"
] | 2
|
2018-01-15T23:05:53.000Z
|
2022-03-01T21:07:56.000Z
|
# Use a trained Image Transform Net to generate
# a style transferred image with a specific style
import tensorflow as tf
import image_transform_net as itn
from utils import get_images, save_images
def generate(contents_path, model_path, is_same_size=False, resize_height=None, resize_width=None, save_path=None, prefix='stylized-', suffix=None):
if isinstance(contents_path, str):
contents_path = [contents_path]
if is_same_size or (resize_height is not None and resize_width is not None):
outputs = _handler1(contents_path, model_path, resize_height=resize_height, resize_width=resize_width, save_path=save_path, prefix=prefix, suffix=suffix)
return list(outputs)
else:
outputs = _handler2(contents_path, model_path, save_path=save_path, prefix=prefix, suffix=suffix)
return outputs
def _handler1(content_path, model_path, resize_height=None, resize_width=None, save_path=None, prefix=None, suffix=None):
# get the actual image data, output shape: (num_images, height, width, color_channels)
content_target = get_images(content_path, resize_height, resize_width)
with tf.Graph().as_default(), tf.Session() as sess:
# build the dataflow graph
content_image = tf.placeholder(tf.float32, shape=content_target.shape, name='content_image')
output_image = itn.transform(content_image)
# restore the trained model and run the style transferring
saver = tf.train.Saver()
saver.restore(sess, model_path)
output = sess.run(output_image, feed_dict={content_image: content_target})
if save_path is not None:
save_images(content_path, output, save_path, prefix=prefix, suffix=suffix)
return output
def _handler2(content_path, model_path, save_path=None, prefix=None, suffix=None):
with tf.Graph().as_default(), tf.Session() as sess:
# build the dataflow graph
content_image = tf.placeholder(tf.float32, shape=(1, None, None, 3), name='content_image')
output_image = itn.transform(content_image)
# restore the trained model and run the style transferring
saver = tf.train.Saver()
saver.restore(sess, model_path)
output = []
for content in content_path:
content_target = get_images(content)
result = sess.run(output_image, feed_dict={content_image: content_target})
output.append(result[0])
if save_path is not None:
save_images(content_path, output, save_path, prefix=prefix, suffix=suffix)
return output
| 38.939394
| 161
| 0.717899
|
11e68501dbd93ebde8442d2db4dff612c0ee3497
| 819
|
py
|
Python
|
botstory/integrations/mocktracker/tracker_test.py
|
botstory/bot-story
|
9c5b2fc7f7a14dbd467d70f60d5ba855ef89dac3
|
[
"MIT"
] | 5
|
2017-01-14T13:42:13.000Z
|
2021-07-27T21:52:04.000Z
|
botstory/integrations/mocktracker/tracker_test.py
|
botstory/bot-story
|
9c5b2fc7f7a14dbd467d70f60d5ba855ef89dac3
|
[
"MIT"
] | 235
|
2016-11-07T23:33:28.000Z
|
2018-03-13T11:27:33.000Z
|
botstory/integrations/mocktracker/tracker_test.py
|
hyzhak/bot-story
|
9c5b2fc7f7a14dbd467d70f60d5ba855ef89dac3
|
[
"MIT"
] | 5
|
2017-01-14T13:42:14.000Z
|
2020-11-06T08:33:20.000Z
|
import pytest
from . import tracker
from .. import mocktracker
from ... import di, Story
story = None
def teardown_function(function):
story and story.clear()
def test_event():
t = tracker.MockTracker()
t.event()
def test_new_message():
t = tracker.MockTracker()
t.new_message()
def test_new_user():
t = tracker.MockTracker()
t.new_user()
def test_story():
t = tracker.MockTracker()
t.story()
def test_get_mock_tracker_as_dep():
global story
story = Story()
story.use(mocktracker.MockTracker())
with di.child_scope():
@di.desc()
class OneClass:
@di.inject()
def deps(self, tracker):
self.tracker = tracker
assert isinstance(di.injector.get('one_class').tracker, mocktracker.MockTracker)
| 17.425532
| 88
| 0.636142
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.