hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
424c4cb5ef573a261b8c06538da73fe2cb6674e4
| 2,398
|
py
|
Python
|
var/spack/repos/builtin/packages/py-pyfr/package.py
|
loumalouomega/spack
|
b5fa1b2a78d94e6d057534e25d4d8ab9b31eda1e
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-pyfr/package.py
|
loumalouomega/spack
|
b5fa1b2a78d94e6d057534e25d4d8ab9b31eda1e
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-pyfr/package.py
|
loumalouomega/spack
|
b5fa1b2a78d94e6d057534e25d4d8ab9b31eda1e
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPyfr(PythonPackage):
"""PyFR is an open-source Python based framework for solving
advection-diffusion type problems on streaming architectures
using the Flux Reconstruction approach of Huynh."""
homepage = "http://www.pyfr.org/"
pypi = "pyfr/pyfr-1.13.0.tar.gz"
git = "https://github.com/PyFR/PyFR/"
maintainers = ["MichaelLaufer"]
# git branches
version("develop", branch="develop")
version("master", branch="master")
# pypi releases
version(
"1.14.0",
sha256="ebf40ce0896cce9ac802e03fd9430b5be30ea837c31224531a6d5fd68f820766",
)
version(
"1.13.0",
sha256="ac6ecec738d4e23799ab8c50dea9bdbd7d37bc971bd33f22720c5a230b8e7b2f",
)
variant("metis", default=True, description="Metis for mesh partitioning")
variant("scotch", default=False, description="Scotch for mesh partitioning")
variant("cuda", default=False, description="CUDA backend support")
variant("hip", default=False, description="HIP backend support")
variant("libxsmm", default=True, description="LIBXSMM for OpenMP backend")
variant("scipy", default=True, description="Scipy acceleration for point sampling")
# Required dependencies
depends_on("python@3.9:", type=("build", "run"))
depends_on("py-setuptools", type="build")
depends_on("py-gimmik@2.2:2", type=('build', 'run'))
depends_on("py-gimmik@2.3:2", when='@1.14.0:', type=('build', 'run'))
depends_on("py-h5py@2.10:", type=('build', 'run'))
depends_on("py-mako@1.0.0:", type=('build', 'run'))
depends_on("py-mpi4py@3.1.0:", type=('build', 'run'))
depends_on("py-numpy@1.20:+blas", type=('build', 'run'))
depends_on("py-platformdirs@2.2.0:", type=('build', 'run'))
depends_on("py-pytools@2016.2.1:", type=('build', 'run'))
# Optional dependencies
depends_on("py-scipy", when="+scipy", type=('build', 'run'))
depends_on("scotch@6.0:", when="+scotch", type=('run'))
depends_on("cuda@8.0:", when="+cuda", type=('run'))
depends_on("rocblas@4.5.0:", when="+hip", type=('run'))
depends_on("libxsmm@1.18:+shared blas=0 +large_jit_buffer", when="+libxsmm", type=('run'))
| 40.644068
| 94
| 0.665972
|
7e421f5fcd6fa350e551997089fa97cff36589dc
| 129
|
py
|
Python
|
kaa/filetype/python/__init__.py
|
atsuoishimoto/kaaedit
|
5233fdb70a04783c6513a5ec339452450e62e995
|
[
"Unlicense"
] | 1
|
2015-11-04T13:37:08.000Z
|
2015-11-04T13:37:08.000Z
|
kaa/filetype/python/__init__.py
|
atsuoishimoto/kaaedit
|
5233fdb70a04783c6513a5ec339452450e62e995
|
[
"Unlicense"
] | null | null | null |
kaa/filetype/python/__init__.py
|
atsuoishimoto/kaaedit
|
5233fdb70a04783c6513a5ec339452450e62e995
|
[
"Unlicense"
] | null | null | null |
FILE_EXT = {'.py', '.pyw'}
def get_modetype():
from kaa.filetype.python.pythonmode import PythonMode
return PythonMode
| 18.428571
| 57
| 0.705426
|
1f406f471c0bd4e577fd6dcff8c7911aefb2b9c7
| 15,237
|
py
|
Python
|
orttraining/orttraining/python/training/postprocess.py
|
dennyac/onnxruntime
|
d5175795d2b7f2db18b0390f394a49238f814668
|
[
"MIT"
] | 6,036
|
2019-05-07T06:03:57.000Z
|
2022-03-31T17:59:54.000Z
|
orttraining/orttraining/python/training/postprocess.py
|
dennyac/onnxruntime
|
d5175795d2b7f2db18b0390f394a49238f814668
|
[
"MIT"
] | 5,730
|
2019-05-06T23:04:55.000Z
|
2022-03-31T23:55:56.000Z
|
orttraining/orttraining/python/training/postprocess.py
|
dennyac/onnxruntime
|
d5175795d2b7f2db18b0390f394a49238f814668
|
[
"MIT"
] | 1,566
|
2019-05-07T01:30:07.000Z
|
2022-03-31T17:06:50.000Z
|
import sys
import os.path
from onnx import *
import onnx
import numpy as np
import struct
from onnx import helper
from onnx import numpy_helper
def run_postprocess(model):
# this post pass is not required for pytorch >= 1.5
# where add_node_name in torch.onnx.export is default to True
model = add_name(model)
# this post pass is not required for pytorch > 1.6
model = fuse_softmaxNLL_to_softmaxCE(model)
model = fix_expand_shape(model)
model = fix_expand_shape_pt_1_5(model)
return model
def find_input_node(model, arg):
result = []
for node in model.graph.node:
for output in node.output:
if output == arg:
result.append(node)
return result[0] if len(result)== 1 else None
def find_output_node(model, arg):
result = []
for node in model.graph.node:
for input in node.input:
if input == arg:
result.append(node)
return result[0] if len(result) == 1 else result
def add_name(model):
i = 0
for node in model.graph.node:
node.name = '%s_%d' %(node.op_type, i)
i += 1
return model
# Expand Shape PostProcess
def fix_expand_shape(model):
expand_nodes = [n for n in model.graph.node if n.op_type == 'Expand']
model_inputs_names = [i.name for i in model.graph.input]
for expand_node in expand_nodes:
shape = find_input_node(model, expand_node.input[1])
if shape.op_type == 'Shape':
# an expand subgraph
# Input Input2
# | |
# | Shape
# | |
# |__ __|
# | |
# Expand
# |
# output
#
# Only if Input2 is one of the model inputs, assign Input2's shape to output of expand.
shape_input_name = shape.input[0]
if shape_input_name in model_inputs_names:
index = model_inputs_names.index(shape_input_name)
expand_out = model.graph.value_info.add()
expand_out.name = expand_node.output[0]
expand_out.type.CopyFrom(model.graph.input[index].type)
return model
def fix_expand_shape_pt_1_5(model):
# expand subgraph
# Constant
# +
# ConstantOfShape
# | + |
# | + |
# (Reshape subgraph) Mul |
# |___ _________| |
# + | | |
# + Equal |
# +++++|++++++++++++++|++
# |____________ | +
# | | +
# (subgraph) Where
# | |
# |_____ ___________|
# | |
# Expand
# |
# output
#
# where the Reshape subgraph is
#
# Input
# | |
# | |___________________
# | |
# Shape Constant Shape Constant
# | ______| | ______|
# | | | |
# Gather Gather
# | |
# Unsqueeze Unsqueeze
# | |
# | ..Number of dims.. |
# | _________________|
# |...|
# Concat Constant
# | |
# |______ __________________|
# | |
# Reshape
# |
# output
#
# This pass will copy Input's shape to the output of Expand.
expand_nodes = [n for n in model.graph.node if n.op_type == 'Expand']
model_inputs_names = [i.name for i in model.graph.input]
for expand_node in expand_nodes:
n_where = find_input_node(model, expand_node.input[1])
if n_where.op_type != 'Where':
continue
n_equal = find_input_node(model, n_where.input[0])
n_cos = find_input_node(model, n_where.input[1])
n_reshape = find_input_node(model, n_where.input[2])
if n_equal.op_type != 'Equal' or n_cos.op_type != 'ConstantOfShape' or n_reshape.op_type != 'Reshape':
continue
n_reshape_e = find_input_node(model, n_equal.input[0])
n_mul = find_input_node(model, n_equal.input[1])
if n_reshape_e != n_reshape or n_mul.op_type != 'Mul':
continue
n_cos_m = find_input_node(model, n_mul.input[0])
n_constant = find_input_node(model, n_mul.input[1])
if n_cos_m != n_cos or n_constant.op_type != 'Constant':
continue
n_concat = find_input_node(model, n_reshape.input[0])
n_constant_r = find_input_node(model, n_reshape.input[1])
if n_concat.op_type != 'Concat' or n_constant_r.op_type != 'Constant':
continue
n_input_candidates = []
for concat_in in n_concat.input:
n_unsqueeze = find_input_node(model, concat_in)
if n_unsqueeze.op_type != 'Unsqueeze':
break
n_gather = find_input_node(model, n_unsqueeze.input[0])
if n_gather.op_type != 'Gather':
break
n_shape = find_input_node(model, n_gather.input[0])
n_constant_g = find_input_node(model, n_gather.input[1])
if n_shape.op_type != 'Shape' or n_constant_g.op_type != 'Constant':
break
n_input = n_shape.input[0]
if not n_input in model_inputs_names:
break
n_input_candidates.append(n_input)
if not n_input_candidates or not all(elem == n_input_candidates[0] for elem in n_input_candidates):
continue
index = model_inputs_names.index(n_input_candidates[0])
expand_out = model.graph.value_info.add()
expand_out.name = expand_node.output[0]
expand_out.type.CopyFrom(model.graph.input[index].type)
return model
# LayerNorm PostProcess
def find_nodes(graph, op_type):
nodes = []
for node in graph.node:
if node.op_type == op_type:
nodes.append(node)
return nodes
def is_type(node, op_type):
if node is None or isinstance(node, list):
return False
return node.op_type == op_type
def add_const(model, name, output, t_value = None, f_value = None):
const_node = model.graph.node.add()
const_node.op_type = 'Constant'
const_node.name = name
const_node.output.extend([output])
attr = const_node.attribute.add()
attr.name = 'value'
if t_value is not None:
attr.type = 4
attr.t.CopyFrom(t_value)
else:
attr.type = 1
attr.f = f_value
return const_node
def layer_norm_transform(model):
# DEPRECATED: This pass is no longer needed as the transform is handled at the backend.
# Converting below subgraph
#
# input
# |
# ReduceMean
# |
# Sub Constant
# _||_____ |
# | | |
# | | |
# | (optional) Cast (optional) Cast
# | | |
# | | ____________________|
# | | |
# | Pow
# | |
# | ReduceMean
# | |
# | Add
# | |
# |__ __Sqrt
# | |
# Div (weight)
# | |
# | _____|
# | |
# Mul (bias)
# | |
# | _____|
# | |
# Add
# |
# output
#
# to the below subgraph
#
# input (weight) (bias)
# | | |
# | _______| |
# | | ________________|
# | | |
# LayerNormalization
# |
# output
graph = model.graph
nodes_ReduceMean = find_nodes(graph, "ReduceMean")
id = 0
layer_norm_nodes = []
remove_nodes = []
for reduce_mean in nodes_ReduceMean:
# check that reduce_mean output is Sub
sub = find_output_node(model, reduce_mean.output[0])
if not is_type(sub, "Sub"):
continue
# check that sub output[0] is Div and output[1] is Pow
pow, div = find_output_node(model, sub.output[0])
if is_type(pow, "Cast"):
# During an update in PyTorch, Cast nodes are inserted between Sub and Pow.
remove_nodes += [pow]
pow = find_output_node(model, pow.output[0])
if not is_type(pow, "Pow"):
continue
cast_pow = find_input_node(model, pow.input[1])
if not is_type(cast_pow, "Cast"):
continue
remove_nodes += [cast_pow]
if not is_type(div, "Div") or not is_type(pow, "Pow"):
continue
# check that pow ouput is ReduceMean
reduce_mean2 = find_output_node(model, pow.output[0])
if not is_type(reduce_mean2, "ReduceMean"):
continue
# check that reduce_mean2 output is Add
add = find_output_node(model, reduce_mean2.output[0])
if not is_type(add, "Add"):
continue
# check that add output is Sqrt
sqrt = find_output_node(model, add.output[0])
if not is_type(sqrt, "Sqrt"):
continue
# check that sqrt output is div
if div != find_output_node(model, sqrt.output[0]):
continue
# check if div output is Mul
optional_mul = find_output_node(model, div.output[0])
if not is_type(optional_mul, "Mul"):
optional_mul = None
continue # default bias and weight not supported
# check if mul output is Add
if optional_mul is not None:
optional_add = find_output_node(model, optional_mul.output[0])
else:
optional_add = find_output_node(model, div.output[0])
if not is_type(optional_add, "Add"):
optional_add = None
continue # default bias and weight not supported
# add nodes to remove_nodes
remove_nodes.extend([reduce_mean, sub, div, pow, reduce_mean2, add, sqrt])
# create LayerNorm node
layer_norm_input = []
layer_norm_output = []
layer_norm_input.append(reduce_mean.input[0])
if optional_mul is not None:
remove_nodes.append(optional_mul)
weight = optional_mul.input[1]
layer_norm_input.append(weight)
if optional_add is not None:
remove_nodes.append(optional_add)
bias = optional_add.input[1]
layer_norm_input.append(bias)
if optional_add is not None:
layer_norm_output.append(optional_add.output[0])
elif optional_mul is not None:
layer_norm_output.append(optional_mul.output[0])
else:
layer_norm_output.append(div.output[0])
layer_norm_output.append('saved_mean_' + str(id))
layer_norm_output.append('saved_inv_std_var_' + str(id))
epsilon_node = find_input_node(model, add.input[1])
epsilon = epsilon_node.attribute[0].t.raw_data
epsilon = struct.unpack('f', epsilon)[0]
layer_norm = helper.make_node("LayerNormalization",
layer_norm_input,
layer_norm_output,
"LayerNormalization_" + str(id),
None,
axis = reduce_mean.attribute[0].ints[0],
epsilon = epsilon)
layer_norm_nodes.append(layer_norm)
id += 1
# remove orphan constant nodes
for constant in graph.node:
if constant.op_type == "Constant" and constant not in remove_nodes:
is_orphan = True
for out_name in constant.output:
out = find_output_node(model, out_name)
if out not in remove_nodes:
is_orphan = False
if is_orphan:
remove_nodes.append(constant)
all_nodes = []
for node in graph.node:
if node not in remove_nodes:
all_nodes.append(node)
for node in layer_norm_nodes:
all_nodes.append(node)
graph.ClearField("node")
graph.node.extend(all_nodes)
return model
# Fuse SoftmaxCrossEntropy
def fuse_softmaxNLL_to_softmaxCE(onnx_model):
# Converting below subgraph
#
# (subgraph)
# |
# LogSoftmax (target) (optional weight)
# | | |
# nll_loss/NegativeLogLikelihoodLoss
# |
# output
#
# to the following
#
# (subgraph) (target) (optional weight)
# | | _____|
# | | |
# SparseSoftmaxCrossEntropy
# |
# output
nll_count = 0
while True:
nll_count = nll_count + 1
nll_loss_node = None
nll_loss_node_index = 0
for nll_loss_node_index, node in enumerate(onnx_model.graph.node):
if node.op_type == "nll_loss" or node.op_type == "NegativeLogLikelihoodLoss":
nll_loss_node = node
break
if nll_loss_node is None:
break
softmax_node = None
softmax_node_index = 0
label_input_name = None
weight_input_name = None
for softmax_node_index, node in enumerate(onnx_model.graph.node):
if node.op_type == "LogSoftmax":
# has to be connected to nll_loss
if len(nll_loss_node.input) > 2:
weight_input_name = nll_loss_node.input[2]
if node.output[0] == nll_loss_node.input[0]:
softmax_node = node
label_input_name = nll_loss_node.input[1]
break
elif node.output[0] == nll_loss_node.input[1]:
softmax_node = node
label_input_name = nll_loss_node.input[0]
break
else:
if softmax_node is not None:
break
if softmax_node is None:
break
# delete nll_loss and LogSoftmax nodes in order
if nll_loss_node_index < softmax_node_index:
del onnx_model.graph.node[softmax_node_index]
del onnx_model.graph.node[nll_loss_node_index]
else:
del onnx_model.graph.node[nll_loss_node_index]
del onnx_model.graph.node[softmax_node_index]
probability_output_name = softmax_node.output[0]
node = onnx_model.graph.node.add()
inputs = [softmax_node.input[0], label_input_name, weight_input_name] if weight_input_name else [softmax_node.input[0], label_input_name]
node.CopyFrom(onnx.helper.make_node("SparseSoftmaxCrossEntropy", inputs,
[nll_loss_node.output[0], probability_output_name],
"nll_loss_node_" + str(nll_count)))
return onnx_model
| 33.341357
| 145
| 0.537442
|
2b6411d8cadcdd18cd8d915bf430709cc768ef5b
| 5,738
|
py
|
Python
|
quickDDM/processingCore.py
|
CSymes/quickDDM
|
4c55cbc62f36020ec90c69cd9440b12bc6b4a6a5
|
[
"MIT"
] | 4
|
2019-03-20T22:10:57.000Z
|
2021-06-07T21:15:53.000Z
|
quickDDM/processingCore.py
|
CSymes/quickDDM
|
4c55cbc62f36020ec90c69cd9440b12bc6b4a6a5
|
[
"MIT"
] | 3
|
2021-03-19T00:54:02.000Z
|
2021-09-08T01:01:13.000Z
|
quickDDM/processingCore.py
|
CSymes/quickDDM
|
4c55cbc62f36020ec90c69cd9440b12bc6b4a6a5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#processingCore.py
"""
Created on Tue Mar 26 14:26:28 2019
This is the most basic way of running the process. It will call each part
of the process in turn, chaining them together.
@author: Lionel
"""
import sys
import numpy as np
import quickDDM.readVideo as rV
import quickDDM.twoDFourier as tDF
import quickDDM.calculateQCurves as cQC
import quickDDM.calculateCorrelation as cC
from collections import deque
"""
Defaults to 1GB
Does not currently use spacings, simplest possible version
Based on the process outlined in figure 2 in the technical note
"""
def sequentialChunkerMain(videoPath, spacings, outputPath = None, RAMGB = 1, progress=None, abortFlag=None):
if progress is not None:
progress.setText('Reading Video from Disk')
progress.cycle()
videoInput = rV.readVideo(videoPath)
numFrames = videoInput.shape[0]
correlations = [None] * (numFrames - 1)
#Number of pixels per frame, times 128 for the size of a complex float,
#but halved because the real transform is used
RAMBytes = RAMGB * np.power(2.0,30.0)
complexFrameByteSize = videoInput.shape[1] * videoInput.shape[2] * 128 / 2
#TODO: adjust for the other RAM using variables
#one frame's RAM in reserve for the head
framesPerSlice = int((RAMBytes // complexFrameByteSize) - 1)
#The number of different slice intervals that must be take
numSpacingSets = int(np.ceil((numFrames -1) / framesPerSlice))
print('numSpacingSets:', numSpacingSets)
print('framesPerSlice:', framesPerSlice)
print('complexFrameByteSize:', complexFrameByteSize)
# Used to show progress in the UI
framesProcessed = 0
target = numFrames * (numFrames - 1) / 2 # algorithm complexity
# allow 10% extra time to calculate the q curves
qProgress = target * 0.1 / numSpacingSets # per-slice q-curve allowance
target += qProgress * numSpacingSets
#For each diagonal section
for sliceSpacing in range(0, numSpacingSets):
if progress is not None: progress.setText(f'Working on Slice {sliceSpacing+1}/{numSpacingSets}')
#A double ended queue, more efficient than a list for queue operations
currentSlice = deque()
#The index by which new frames are grabbed for the slice
baseIndex = 0
#Finding the expected shape of the transform results
#trying something new, dropping a couple of samples to match matlab (1 in each dimension)
if (videoInput.shape[2] - 1) % 2 == 0:
transformShape = (videoInput.shape[1] - 1, (videoInput.shape[2] - 1)//2 + 1)
else:
#+1 for the real transform correction, -1 to drop a sample based on MATLAB
transformShape = (videoInput.shape[1] - 1,(videoInput.shape[2]+1-1)//2)
totalDifferencesShape = (framesPerSlice, transformShape[0], transformShape[1])
#Preparing the destination of the frame differences
totalDifferences = np.zeros(totalDifferencesShape)
numDifferences = np.zeros((framesPerSlice,))
#For each head
for headIndex in range((sliceSpacing * framesPerSlice) + 1, numFrames):
#If the queue is full, remove the oldest element
if len(currentSlice) == framesPerSlice:
currentSlice.popleft()
#Get a new value into the slice queue
#Also drops a row and column
currentSlice.append(tDF.realTwoDFourierUnnormalized(videoInput[baseIndex,:-1,:-1]))
baseIndex += 1
#Drops a row and column
head = videoInput[headIndex,:-1,:-1]
head = tDF.realTwoDFourierUnnormalized(head)
#time difference between this frame and the first in the queue
relativeDifference = 0
#iterating backwards through the list, oldest element first
for sliceFrameIndex in range(len(currentSlice) - 1, -1, -1):
# Update progress tracker
if progress is not None:
framesProcessed += 1
progress.setProgress(framesProcessed, target)
difference = head - currentSlice[sliceFrameIndex]
totalDifferences[relativeDifference,:,:] += tDF.castToReal(difference)
numDifferences[relativeDifference] += 1
relativeDifference += 1
if abortFlag: return None
for relativeDifference in range(0,len(currentSlice)):
if progress is not None:
framesProcessed += qProgress / len(currentSlice)
progress.setProgress(framesProcessed, target)
meanDifference = (totalDifferences[relativeDifference,:,:] / numDifferences[relativeDifference])
timeDifference = relativeDifference + sliceSpacing * framesPerSlice
correlations[timeDifference] = cQC.calculateRealQCurves(meanDifference)
if abortFlag: return None
if progress is not None:
progress.cycle()
progress.setText('Calculating Correlation Curves')
correlations = cC.calculateCorrelation(correlations)
frameRate = rV.readFramerate(videoPath)
timeSpacings = np.array(np.arange(1,len(correlations) + 1)) / frameRate
#This is the way to stack arrays in numpy
outputMatrix = np.c_[timeSpacings, correlations]
if abortFlag: return None
if outputPath is not None:
if progress is not None: progress.setText('Saving to Disk')
np.savetxt(outputPath, outputMatrix)
if progress is not None:
progress.setPercentage(100)
progress.setText('Done!')
return outputMatrix
if __name__ == '__main__':
sequentialChunkerMain('..\\tests\\data\\10frames.avi', None)
| 40.408451
| 108
| 0.672011
|
ae5d87c3eef2018dbaf36c08aeef9f756ae2be15
| 627
|
py
|
Python
|
src/client/python/env.py
|
jenny0322/food-safety-sample
|
de630e373480c25fdcd5b52f6358468ae2821bd5
|
[
"MIT"
] | 8
|
2019-10-21T19:37:45.000Z
|
2022-03-07T17:41:53.000Z
|
src/client/python/env.py
|
jenny0322/food-safety-sample
|
de630e373480c25fdcd5b52f6358468ae2821bd5
|
[
"MIT"
] | 6
|
2019-11-14T21:28:47.000Z
|
2022-01-22T12:36:06.000Z
|
src/client/python/env.py
|
jenny0322/food-safety-sample
|
de630e373480c25fdcd5b52f6358468ae2821bd5
|
[
"MIT"
] | 5
|
2019-11-06T20:04:09.000Z
|
2021-12-13T16:40:06.000Z
|
import os
import pprint
import platform
import yaml
def main():
z = add(4, 5)
print("The result of add is %s" %z)
with open("config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
for section in cfg:
print(section)
print(cfg['iothub'])
print(cfg['other'])
print(cfg['iothub']['connectionstring'])
env_var = os.environ
print('The operating system version is ' + platform.platform())
print("User Env Variables:")
pprint.pprint(dict(env_var), width = 1)
def add(x, y):
return x + y
if __name__ == "__main__":
main()
| 19.59375
| 68
| 0.575758
|
9e67023234210f9c4fd37b2068c636424c2ad646
| 23,006
|
py
|
Python
|
smriprep/workflows/surfaces.py
|
Zeigar/smriprep
|
bf1086b69f2b672d165f2c0741f2e4bd9a6dcf1d
|
[
"Apache-2.0"
] | 1
|
2020-09-18T23:45:51.000Z
|
2020-09-18T23:45:51.000Z
|
smriprep/workflows/surfaces.py
|
Zeigar/smriprep
|
bf1086b69f2b672d165f2c0741f2e4bd9a6dcf1d
|
[
"Apache-2.0"
] | null | null | null |
smriprep/workflows/surfaces.py
|
Zeigar/smriprep
|
bf1086b69f2b672d165f2c0741f2e4bd9a6dcf1d
|
[
"Apache-2.0"
] | null | null | null |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Surface preprocessing workflows.
**sMRIPrep** uses FreeSurfer to reconstruct surfaces from T1w/T2w
structural images.
"""
from nipype.pipeline import engine as pe
from nipype.interfaces.base import Undefined
from nipype.interfaces import (
io as nio,
utility as niu,
freesurfer as fs,
)
from ..interfaces.freesurfer import ReconAll
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.freesurfer import (
FSDetectInputs,
FSInjectBrainExtracted,
MakeMidthickness,
PatchedLTAConvert as LTAConvert,
PatchedRobustRegister as RobustRegister,
RefineBrainMask,
)
from niworkflows.interfaces.surf import NormalizeSurf
def init_surface_recon_wf(omp_nthreads, hires, name='surface_recon_wf'):
r"""
Reconstruct anatomical surfaces using FreeSurfer's ``recon-all``.
Reconstruction is performed in three phases.
The first phase initializes the subject with T1w and T2w (if available)
structural images and performs basic reconstruction (``autorecon1``) with the
exception of skull-stripping.
For example, a subject with only one session with T1w and T2w images
would be processed by the following command::
$ recon-all -sd <output dir>/freesurfer -subjid sub-<subject_label> \
-i <bids-root>/sub-<subject_label>/anat/sub-<subject_label>_T1w.nii.gz \
-T2 <bids-root>/sub-<subject_label>/anat/sub-<subject_label>_T2w.nii.gz \
-autorecon1 \
-noskullstrip
The second phase imports an externally computed skull-stripping mask.
This workflow refines the external brainmask using the internal mask
implicit the the FreeSurfer's ``aseg.mgz`` segmentation,
to reconcile ANTs' and FreeSurfer's brain masks.
First, the ``aseg.mgz`` mask from FreeSurfer is refined in two
steps, using binary morphological operations:
1. With a binary closing operation the sulci are included
into the mask. This results in a smoother brain mask
that does not exclude deep, wide sulci.
2. Fill any holes (typically, there could be a hole next to
the pineal gland and the corpora quadrigemina if the great
cerebral brain is segmented out).
Second, the brain mask is grown, including pixels that have a high likelihood
to the GM tissue distribution:
3. Dilate and substract the brain mask, defining the region to search for candidate
pixels that likely belong to cortical GM.
4. Pixels found in the search region that are labeled as GM by ANTs
(during ``antsBrainExtraction.sh``) are directly added to the new mask.
5. Otherwise, estimate GM tissue parameters locally in patches of ``ww`` size,
and test the likelihood of the pixel to belong in the GM distribution.
This procedure is inspired on mindboggle's solution to the problem:
https://github.com/nipy/mindboggle/blob/7f91faaa7664d820fe12ccc52ebaf21d679795e2/mindboggle/guts/segment.py#L1660
The final phase resumes reconstruction, using the T2w image to assist
in finding the pial surface, if available.
See :py:func:`~smriprep.workflows.surfaces.init_autorecon_resume_wf` for details.
Memory annotations for FreeSurfer are based off `their documentation
<https://surfer.nmr.mgh.harvard.edu/fswiki/SystemRequirements>`_.
They specify an allocation of 4GB per subject. Here we define 5GB
to have a certain margin.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from smriprep.workflows.surfaces import init_surface_recon_wf
wf = init_surface_recon_wf(omp_nthreads=1, hires=True)
Parameters
----------
omp_nthreads : int
Maximum number of threads an individual process may use
hires : bool
Enable sub-millimeter preprocessing in FreeSurfer
Inputs
------
t1w
List of T1-weighted structural images
t2w
List of T2-weighted structural images (only first used)
flair
List of FLAIR images
skullstripped_t1
Skull-stripped T1-weighted image (or mask of image)
ants_segs
Brain tissue segmentation from ANTS ``antsBrainExtraction.sh``
corrected_t1
INU-corrected, merged T1-weighted image
subjects_dir
FreeSurfer SUBJECTS_DIR
subject_id
FreeSurfer subject ID
Outputs
-------
subjects_dir
FreeSurfer SUBJECTS_DIR
subject_id
FreeSurfer subject ID
t1w2fsnative_xfm
LTA-style affine matrix translating from T1w to FreeSurfer-conformed subject space
fsnative2t1w_xfm
LTA-style affine matrix translating from FreeSurfer-conformed subject space to T1w
surfaces
GIFTI surfaces for gray/white matter boundary, pial surface,
midthickness (or graymid) surface, and inflated surfaces
out_brainmask
Refined brainmask, derived from FreeSurfer's ``aseg`` volume
out_aseg
FreeSurfer's aseg segmentation, in native T1w space
out_aparc
FreeSurfer's aparc+aseg segmentation, in native T1w space
See also
--------
* :py:func:`~smriprep.workflows.surfaces.init_autorecon_resume_wf`
* :py:func:`~smriprep.workflows.surfaces.init_gifti_surface_wf`
"""
workflow = Workflow(name=name)
workflow.__desc__ = """\
Brain surfaces were reconstructed using `recon-all` [FreeSurfer {fs_ver},
RRID:SCR_001847, @fs_reconall], and the brain mask estimated
previously was refined with a custom variation of the method to reconcile
ANTs-derived and FreeSurfer-derived segmentations of the cortical
gray-matter of Mindboggle [RRID:SCR_002438, @mindboggle].
""".format(fs_ver=fs.Info().looseversion() or '<ver>')
inputnode = pe.Node(
niu.IdentityInterface(
fields=['t1w', 't2w', 'flair', 'skullstripped_t1', 'corrected_t1', 'ants_segs',
'subjects_dir', 'subject_id']), name='inputnode')
outputnode = pe.Node(
niu.IdentityInterface(
fields=['subjects_dir', 'subject_id', 't1w2fsnative_xfm',
'fsnative2t1w_xfm', 'surfaces', 'out_brainmask',
'out_aseg', 'out_aparc']),
name='outputnode')
recon_config = pe.Node(FSDetectInputs(hires_enabled=hires), name='recon_config')
fov_check = pe.Node(niu.Function(function=_check_cw256), name='fov_check')
autorecon1 = pe.Node(
ReconAll(directive='autorecon1', openmp=omp_nthreads),
name='autorecon1', n_procs=omp_nthreads, mem_gb=5)
autorecon1.interface._can_resume = False
autorecon1.interface._always_run = True
skull_strip_extern = pe.Node(FSInjectBrainExtracted(), name='skull_strip_extern')
fsnative2t1w_xfm = pe.Node(RobustRegister(auto_sens=True, est_int_scale=True),
name='fsnative2t1w_xfm')
t1w2fsnative_xfm = pe.Node(LTAConvert(out_lta=True, invert=True),
name='t1w2fsnative_xfm')
autorecon_resume_wf = init_autorecon_resume_wf(omp_nthreads=omp_nthreads)
gifti_surface_wf = init_gifti_surface_wf()
aseg_to_native_wf = init_segs_to_native_wf()
aparc_to_native_wf = init_segs_to_native_wf(segmentation='aparc_aseg')
refine = pe.Node(RefineBrainMask(), name='refine')
workflow.connect([
# Configuration
(inputnode, recon_config, [('t1w', 't1w_list'),
('t2w', 't2w_list'),
('flair', 'flair_list')]),
# Passing subjects_dir / subject_id enforces serial order
(inputnode, autorecon1, [('subjects_dir', 'subjects_dir'),
('subject_id', 'subject_id')]),
(autorecon1, skull_strip_extern, [('subjects_dir', 'subjects_dir'),
('subject_id', 'subject_id')]),
(skull_strip_extern, autorecon_resume_wf, [('subjects_dir', 'inputnode.subjects_dir'),
('subject_id', 'inputnode.subject_id')]),
(autorecon_resume_wf, gifti_surface_wf, [
('outputnode.subjects_dir', 'inputnode.subjects_dir'),
('outputnode.subject_id', 'inputnode.subject_id')]),
# Reconstruction phases
(inputnode, autorecon1, [('t1w', 'T1_files')]),
(inputnode, fov_check, [('t1w', 'in_files')]),
(fov_check, autorecon1, [('out', 'flags')]),
(recon_config, autorecon1, [('t2w', 'T2_file'),
('flair', 'FLAIR_file'),
('hires', 'hires'),
# First run only (recon-all saves expert options)
('mris_inflate', 'mris_inflate')]),
(inputnode, skull_strip_extern, [('skullstripped_t1', 'in_brain')]),
(recon_config, autorecon_resume_wf, [('use_t2w', 'inputnode.use_T2'),
('use_flair', 'inputnode.use_FLAIR')]),
# Construct transform from FreeSurfer conformed image to sMRIPrep
# reoriented image
(inputnode, fsnative2t1w_xfm, [('t1w', 'target_file')]),
(autorecon1, fsnative2t1w_xfm, [('T1', 'source_file')]),
(fsnative2t1w_xfm, gifti_surface_wf, [
('out_reg_file', 'inputnode.fsnative2t1w_xfm')]),
(fsnative2t1w_xfm, t1w2fsnative_xfm, [('out_reg_file', 'in_lta')]),
# Refine ANTs mask, deriving new mask from FS' aseg
(inputnode, refine, [('corrected_t1', 'in_anat'),
('ants_segs', 'in_ants')]),
(inputnode, aseg_to_native_wf, [('corrected_t1', 'inputnode.in_file')]),
(autorecon_resume_wf, aseg_to_native_wf, [
('outputnode.subjects_dir', 'inputnode.subjects_dir'),
('outputnode.subject_id', 'inputnode.subject_id')]),
(inputnode, aparc_to_native_wf, [('corrected_t1', 'inputnode.in_file')]),
(autorecon_resume_wf, aparc_to_native_wf, [
('outputnode.subjects_dir', 'inputnode.subjects_dir'),
('outputnode.subject_id', 'inputnode.subject_id')]),
(aseg_to_native_wf, refine, [('outputnode.out_file', 'in_aseg')]),
# Output
(autorecon_resume_wf, outputnode, [('outputnode.subjects_dir', 'subjects_dir'),
('outputnode.subject_id', 'subject_id')]),
(gifti_surface_wf, outputnode, [('outputnode.surfaces', 'surfaces')]),
(t1w2fsnative_xfm, outputnode, [('out_lta', 't1w2fsnative_xfm')]),
(fsnative2t1w_xfm, outputnode, [('out_reg_file', 'fsnative2t1w_xfm')]),
(refine, outputnode, [('out_file', 'out_brainmask')]),
(aseg_to_native_wf, outputnode, [('outputnode.out_file', 'out_aseg')]),
(aparc_to_native_wf, outputnode, [('outputnode.out_file', 'out_aparc')]),
])
return workflow
def init_autorecon_resume_wf(omp_nthreads, name='autorecon_resume_wf'):
r"""
Resume recon-all execution, assuming the `-autorecon1` stage has been completed.
In order to utilize resources efficiently, this is broken down into seven
sub-stages; after the first stage, the second and third stages may be run
simultaneously, and the fifth and sixth stages may be run simultaneously,
if resources permit; the fourth stage must be run prior to the fifth and
sixth, and the seventh must be run after::
$ recon-all -sd <output dir>/freesurfer -subjid sub-<subject_label> \
-autorecon2-volonly
$ recon-all -sd <output dir>/freesurfer -subjid sub-<subject_label> \
-autorecon-hemi lh -T2pial \
-noparcstats -noparcstats2 -noparcstats3 -nohyporelabel -nobalabels
$ recon-all -sd <output dir>/freesurfer -subjid sub-<subject_label> \
-autorecon-hemi rh -T2pial \
-noparcstats -noparcstats2 -noparcstats3 -nohyporelabel -nobalabels
$ recon-all -sd <output dir>/freesurfer -subjid sub-<subject_label> \
-cortribbon
$ recon-all -sd <output dir>/freesurfer -subjid sub-<subject_label> \
-autorecon-hemi lh -nohyporelabel
$ recon-all -sd <output dir>/freesurfer -subjid sub-<subject_label> \
-autorecon-hemi rh -nohyporelabel
$ recon-all -sd <output dir>/freesurfer -subjid sub-<subject_label> \
-autorecon3
The parcellation statistics steps are excluded from the second and third
stages, because they require calculation of the cortical ribbon volume
(the fourth stage).
Hypointensity relabeling is excluded from hemisphere-specific steps to avoid
race conditions, as it is a volumetric operation.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from smriprep.workflows.surfaces import init_autorecon_resume_wf
wf = init_autorecon_resume_wf(omp_nthreads=1)
Inputs
------
subjects_dir
FreeSurfer SUBJECTS_DIR
subject_id
FreeSurfer subject ID
use_T2
Refine pial surface using T2w image
use_FLAIR
Refine pial surface using FLAIR image
Outputs
-------
subjects_dir
FreeSurfer SUBJECTS_DIR
subject_id
FreeSurfer subject ID
"""
workflow = Workflow(name=name)
inputnode = pe.Node(
niu.IdentityInterface(
fields=['subjects_dir', 'subject_id', 'use_T2', 'use_FLAIR']),
name='inputnode')
outputnode = pe.Node(
niu.IdentityInterface(
fields=['subjects_dir', 'subject_id']),
name='outputnode')
autorecon2_vol = pe.Node(
ReconAll(directive='autorecon2-volonly', openmp=omp_nthreads),
n_procs=omp_nthreads, mem_gb=5, name='autorecon2_vol')
autorecon2_vol.interface._always_run = True
autorecon_surfs = pe.MapNode(
ReconAll(
directive='autorecon-hemi',
flags=['-noparcstats', '-noparcstats2', '-noparcstats3',
'-nohyporelabel', '-nobalabels'],
openmp=omp_nthreads),
iterfield='hemi', n_procs=omp_nthreads, mem_gb=5,
name='autorecon_surfs')
autorecon_surfs.inputs.hemi = ['lh', 'rh']
autorecon_surfs.interface._always_run = True
# -cortribbon is a prerequisite for -parcstats, -parcstats2, -parcstats3
# Claiming two threads because pial refinement can be split by hemisphere
# if -T2pial or -FLAIRpial is enabled.
# Parallelizing by hemisphere saves ~30 minutes over simply enabling
# OpenMP on an 8 core machine.
cortribbon = pe.Node(ReconAll(directive=Undefined, steps=['cortribbon'],
parallel=True),
n_procs=2, name='cortribbon')
cortribbon.interface._always_run = True
# -parcstats* can be run per-hemisphere
# -hyporelabel is volumetric, even though it's part of -autorecon-hemi
parcstats = pe.MapNode(
ReconAll(
directive='autorecon-hemi',
flags=['-nohyporelabel'],
openmp=omp_nthreads),
iterfield='hemi', n_procs=omp_nthreads, mem_gb=5,
name='parcstats')
parcstats.inputs.hemi = ['lh', 'rh']
parcstats.interface._always_run = True
# Runs: -hyporelabel -aparc2aseg -apas2aseg -segstats -wmparc
# All volumetric, so don't
autorecon3 = pe.Node(
ReconAll(directive='autorecon3', openmp=omp_nthreads),
n_procs=omp_nthreads, mem_gb=5,
name='autorecon3')
autorecon3.interface._always_run = True
def _dedup(in_list):
vals = set(in_list)
if len(vals) > 1:
raise ValueError(
"Non-identical values can't be deduplicated:\n{!r}".format(in_list))
return vals.pop()
workflow.connect([
(inputnode, cortribbon, [('use_T2', 'use_T2'),
('use_FLAIR', 'use_FLAIR')]),
(inputnode, autorecon2_vol, [('subjects_dir', 'subjects_dir'),
('subject_id', 'subject_id')]),
(autorecon2_vol, autorecon_surfs, [('subjects_dir', 'subjects_dir'),
('subject_id', 'subject_id')]),
(autorecon_surfs, cortribbon, [(('subjects_dir', _dedup), 'subjects_dir'),
(('subject_id', _dedup), 'subject_id')]),
(cortribbon, parcstats, [('subjects_dir', 'subjects_dir'),
('subject_id', 'subject_id')]),
(parcstats, autorecon3, [(('subjects_dir', _dedup), 'subjects_dir'),
(('subject_id', _dedup), 'subject_id')]),
(autorecon3, outputnode, [('subjects_dir', 'subjects_dir'),
('subject_id', 'subject_id')]),
])
return workflow
def init_gifti_surface_wf(name='gifti_surface_wf'):
r"""
Prepare GIFTI surfaces from a FreeSurfer subjects directory.
If midthickness (or graymid) surfaces do not exist, they are generated and
saved to the subject directory as ``lh/rh.midthickness``.
These, along with the gray/white matter boundary (``lh/rh.smoothwm``), pial
sufaces (``lh/rh.pial``) and inflated surfaces (``lh/rh.inflated``) are
converted to GIFTI files.
Additionally, the vertex coordinates are :py:class:`recentered
<smriprep.interfaces.NormalizeSurf>` to align with native T1w space.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from smriprep.workflows.surfaces import init_gifti_surface_wf
wf = init_gifti_surface_wf()
Inputs
------
subjects_dir
FreeSurfer SUBJECTS_DIR
subject_id
FreeSurfer subject ID
fsnative2t1w_xfm
LTA formatted affine transform file (inverse)
Outputs
-------
surfaces
GIFTI surfaces for gray/white matter boundary, pial surface,
midthickness (or graymid) surface, and inflated surfaces
"""
workflow = Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(['subjects_dir', 'subject_id',
'fsnative2t1w_xfm']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(['surfaces']), name='outputnode')
get_surfaces = pe.Node(nio.FreeSurferSource(), name='get_surfaces')
midthickness = pe.MapNode(
MakeMidthickness(thickness=True, distance=0.5, out_name='midthickness'),
iterfield='in_file',
name='midthickness')
save_midthickness = pe.Node(nio.DataSink(parameterization=False),
name='save_midthickness')
surface_list = pe.Node(niu.Merge(4, ravel_inputs=True),
name='surface_list', run_without_submitting=True)
fs2gii = pe.MapNode(fs.MRIsConvert(out_datatype='gii'),
iterfield='in_file', name='fs2gii')
fix_surfs = pe.MapNode(NormalizeSurf(), iterfield='in_file', name='fix_surfs')
workflow.connect([
(inputnode, get_surfaces, [('subjects_dir', 'subjects_dir'),
('subject_id', 'subject_id')]),
(inputnode, save_midthickness, [('subjects_dir', 'base_directory'),
('subject_id', 'container')]),
# Generate midthickness surfaces and save to FreeSurfer derivatives
(get_surfaces, midthickness, [('smoothwm', 'in_file'),
('graymid', 'graymid')]),
(midthickness, save_midthickness, [('out_file', 'surf.@graymid')]),
# Produce valid GIFTI surface files (dense mesh)
(get_surfaces, surface_list, [('smoothwm', 'in1'),
('pial', 'in2'),
('inflated', 'in3')]),
(save_midthickness, surface_list, [('out_file', 'in4')]),
(surface_list, fs2gii, [('out', 'in_file')]),
(fs2gii, fix_surfs, [('converted', 'in_file')]),
(inputnode, fix_surfs, [('fsnative2t1w_xfm', 'transform_file')]),
(fix_surfs, outputnode, [('out_file', 'surfaces')]),
])
return workflow
def init_segs_to_native_wf(name='segs_to_native', segmentation='aseg'):
"""
Get a segmentation from FreeSurfer conformed space into native T1w space.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from smriprep.workflows.surfaces import init_segs_to_native_wf
wf = init_segs_to_native_wf()
Parameters
----------
segmentation
The name of a segmentation ('aseg' or 'aparc_aseg' or 'wmparc')
Inputs
------
in_file
Anatomical, merged T1w image after INU correction
subjects_dir
FreeSurfer SUBJECTS_DIR
subject_id
FreeSurfer subject ID
Outputs
-------
out_file
The selected segmentation, after resampling in native space
"""
workflow = Workflow(name='%s_%s' % (name, segmentation))
inputnode = pe.Node(niu.IdentityInterface([
'in_file', 'subjects_dir', 'subject_id']), name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(['out_file']), name='outputnode')
# Extract the aseg and aparc+aseg outputs
fssource = pe.Node(nio.FreeSurferSource(), name='fs_datasource')
tonative = pe.Node(fs.Label2Vol(), name='tonative')
tonii = pe.Node(fs.MRIConvert(out_type='niigz', resample_type='nearest'), name='tonii')
if segmentation.startswith('aparc'):
if segmentation == 'aparc_aseg':
def _sel(x): return [parc for parc in x if 'aparc+' in parc][0] # noqa
elif segmentation == 'aparc_a2009s':
def _sel(x): return [parc for parc in x if 'a2009s+' in parc][0] # noqa
elif segmentation == 'aparc_dkt':
def _sel(x): return [parc for parc in x if 'DKTatlas+' in parc][0] # noqa
segmentation = (segmentation, _sel)
workflow.connect([
(inputnode, fssource, [
('subjects_dir', 'subjects_dir'),
('subject_id', 'subject_id')]),
(inputnode, tonii, [('in_file', 'reslice_like')]),
(fssource, tonative, [(segmentation, 'seg_file'),
('rawavg', 'template_file'),
('aseg', 'reg_header')]),
(tonative, tonii, [('vol_label_file', 'in_file')]),
(tonii, outputnode, [('out_file', 'out_file')]),
])
return workflow
def _check_cw256(in_files):
import numpy as np
from nibabel.funcs import concat_images
if isinstance(in_files, str):
in_files = [in_files]
summary_img = concat_images(in_files)
fov = np.array(summary_img.shape[:3]) * summary_img.header.get_zooms()[:3]
if np.any(fov > 256):
return ['-noskullstrip', '-cw256']
return '-noskullstrip'
| 41.527076
| 117
| 0.634747
|
4518aed41324212c63ed2caaf9e1c98ea5d7f305
| 463
|
py
|
Python
|
Introduction-to-data-visualization-with-matplotlib/3. Quantitative comparisons and statistical visualizations/script_6.py
|
nhutnamhcmus/datacamp-playground
|
25457e813b1145e1d335562286715eeddd1c1a7b
|
[
"MIT"
] | 1
|
2021-05-08T11:09:27.000Z
|
2021-05-08T11:09:27.000Z
|
Introduction-to-data-visualization-with-matplotlib/3. Quantitative comparisons and statistical visualizations/script_6.py
|
nhutnamhcmus/datacamp-playground
|
25457e813b1145e1d335562286715eeddd1c1a7b
|
[
"MIT"
] | 1
|
2022-03-12T15:42:14.000Z
|
2022-03-12T15:42:14.000Z
|
Introduction-to-data-visualization-with-matplotlib/3. Quantitative comparisons and statistical visualizations/script_6.py
|
nhutnamhcmus/datacamp-playground
|
25457e813b1145e1d335562286715eeddd1c1a7b
|
[
"MIT"
] | 1
|
2021-04-30T18:24:19.000Z
|
2021-04-30T18:24:19.000Z
|
fig, ax = plt.subplots()
# Add Seattle temperature data in each month with error bars
ax.errorbar(seattle_weather["MONTH"], seattle_weather["MLY-TAVG-NORMAL"], yerr=seattle_weather["MLY-TAVG-STDDEV"])
# Add Austin temperature data in each month with error bars
ax.errorbar(austin_weather["MONTH"], austin_weather["MLY-TAVG-NORMAL"], yerr=austin_weather["MLY-TAVG-STDDEV"])
# Set the y-axis label
ax.set_ylabel("Temperature (Fahrenheit)")
plt.show()
| 38.583333
| 115
| 0.7473
|
9a1e833eb12dd76b516960369b4fc67fda8ea344
| 8,429
|
py
|
Python
|
build.py
|
dllu/dllup
|
3d5e69754fb568cd70f01d277964df3dc7441a6b
|
[
"MIT"
] | 2
|
2021-09-14T18:33:00.000Z
|
2021-12-02T07:08:46.000Z
|
build.py
|
dllu/dllup
|
3d5e69754fb568cd70f01d277964df3dc7441a6b
|
[
"MIT"
] | null | null | null |
build.py
|
dllu/dllup
|
3d5e69754fb568cd70f01d277964df3dc7441a6b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# The templating engine and the parser for the dllup markup language are hereby
# released open-source under the MIT License.
#
# Copyright (c) 2015 Daniel Lawrence Lu
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import dllup
import hashlib
import os
import re
import struct
import time
from operator import itemgetter
RASTER_IMG = ['.png', '.jpg']
# navigation markup
PORTFOLIO_NAV = '<a href="{child}/"><figure><img src="{pic}" alt="{child}"/><figcaption>{title} ({subtitle})</figcaption></figure></a>'
BLOG_NAV = '<a href="{child}/"><span class="blogdate">{date}</span><span class="blogtitle">{title}</span></a>'
ROOT_NAV = '<a href="/{child}/">{child}</a>'
# the first breadcrumb
BREAD = '<a href="/"><span id="dllu"><span style="display:none;">dllu</span><span id="D"></span><span id="L0"></span><span id="L1"></span><span id="U"></span></span></a><span>/</span>'
BREAD_HERO = '<a href="/" id="hero-a"><span id="dllu-hero"><span style="display:none;">dllu</span><span id="D"></span><span id="L0"></span><span id="L1"></span><span id="U"></span></span></a>'
# all consecutive breadcrumbs
CRUMB = '<a href="{cpath}">{child}</a><span>/</span>'
# page markup
PAGE = '<!DOCTYPE html>\n{sig}\n{htmlhead}<nav id="breadcrumbs">{breadcrumbs}</nav><nav id="rootnav">{rootnav}</nav><nav id="{navtype}">{nav}</nav><main>{output}<footer><p>© Daniel Lawrence Lu. Page generated on {time} by <a href="/programming/dllup/">dllup</a>. (<a href="{text}">text version</a>)</footer></main>{htmlfoot}'
PAGE_HERO = PAGE.replace('id="breadcrumbs"', 'id="hero"')
def readconfig(configpath):
# Reads a config file which is a simple text file of key-value pairs.
# One key-value pair per line, key (no whitespaces) is separated from
# value by whitespace.
# Valid keys are: type, root
if not os.path.exists(configpath):
return {}
config = open(configpath).read()
configsplit = [cc.split(None, 1) for cc in config.split('\n')]
return {c[0]: c[1] for c in configsplit if len(c) >= 2}
def recurse(path='', rootnav='', root=''):
global htmlhead
global htmlfoot
children = os.listdir(path)
folderdata = [
get_folderdata(os.path.join(path, c)) for c in children
if os.path.isdir(os.path.join(path, c))
]
config = readconfig(os.path.join(path, 'config'))
if 'root' in config:
root = config['root']
navtype = config['type'] if 'type' in config else None
# generate navigation markup
nav = ''
if navtype == 'blogposts':
folderdata = sorted(
[f for f in folderdata if 'date' in f],
key=itemgetter('date'),
reverse=True,
)
elif navtype == 'portfolio':
folderdata = sorted(
[f for f in folderdata if 'subtitle' in f],
key=itemgetter('subtitle'),
reverse=True,
)
else:
folderdata = sorted([f for f in folderdata if 'child' in f],
key=itemgetter('child'))
for f in folderdata:
try:
if navtype == 'root':
rootnav += ROOT_NAV.format(**f)
elif navtype == 'blogposts':
nav += BLOG_NAV.format(**f)
elif navtype == 'portfolio':
nav += PORTFOLIO_NAV.format(**f)
except KeyError:
pass # ignore folders without complete data
breadcrumbs = crumbify(path)
# recurse through children
for child in children:
cpath = os.path.join(path, child)
if os.path.isdir(cpath):
recurse(cpath, rootnav, root)
if child[-4:] in RASTER_IMG and not '_600' in child:
resize_images(path, child)
pass
elif child[-5:] == '.dllu':
markup = open(os.path.join(path, child)).read()
sig = '<!--%s-->' % hashlib.sha1(
struct.pack('f', os.path.getmtime(cpath))).hexdigest()
sig2 = None
try:
with open(os.path.join(path, child[:-5] + '.html')) as f:
f.readline()
sig2 = f.readline()
except FileNotFoundError:
pass
if sig == sig2:
continue
output = dllup.parse(markup)
f = open(os.path.join(path, child[:-5] + '.html'), 'w')
PP = PAGE
if path == '.':
PP = PAGE_HERO
ss = markup.split('\n===\n', 1)
if len(ss) > 1:
title = ss[0].strip()
else:
title = path.split('/')[-1]
head = htmlhead.format(title=title)
f.write(
PP.format(htmlhead=head,
htmlfoot=htmlfoot,
breadcrumbs=breadcrumbs,
rootnav=rootnav,
navtype=navtype,
output=output,
time=time.strftime('%Y-%m-%d', time.gmtime()),
child=child,
nav=nav,
sig=sig,
text=child).replace(
' src="/',
' src="%s/' % root,
).replace(
' href="/',
' href="%s/' % root,
))
f.close()
def resize_images(path, child):
filename = os.path.join(path, child)
filename600 = os.path.join(path, child[:-4] + '_600' + child[-4:])
filename600x2 = os.path.join(path, child[:-4] + '_600@2x' + child[-4:])
for f in (filename600, filename600x2):
scale = 600
if '@2x' in f:
scale = 1200
if not os.path.exists(f):
os.system(f'gm convert "{filename}" -resize {scale} "{f}"')
def crumbify(path):
if path == '.':
return BREAD_HERO
breadcrumbs = BREAD
crumbs = '/'
for crumb in path.split('/')[1:]:
crumbs += crumb + '/'
breadcrumbs += CRUMB.format(cpath=crumbs, child=crumb)
return breadcrumbs
def get_folderdata(path):
if os.path.exists(os.path.join(path, 'private')):
return {}
folderdata = {'child': os.path.split(path)[1]}
index = os.path.join(path, 'index.dllu')
if os.path.exists(index):
content = open(index).read().split('\n===\n', 1)[0]
content = [d for d in content.split('\n') if d.strip() != '']
if len(content) >= 1:
folderdata['title'] = dllup.parsetext(content[0])
if len(content) >= 2:
folderdata['subtitle'] = dllup.parsetext(content[1])
else:
return {}
for extension in RASTER_IMG:
if os.path.exists(path + extension):
folderdata['pic'] = os.path.split(path)[1] + extension
if re.match('y\d\d\d\dm\d\dd\d\d', os.path.split(path)[1]):
folderdata['date'] = re.sub('m|d', '-', os.path.split(path)[1][1:])
return folderdata
def main():
global htmlhead, htmlfoot
htmlhead = open('html/head.html').read()
htmlfoot = open('html/foot.html').read()
cssname = 'dllu-%s.css' % hashlib.sha1(
struct.pack('f', os.path.getmtime('css'))).hexdigest()
os.system('sassc -t compressed css/dllu.scss > %s' % cssname)
htmlhead = htmlhead.replace('dllu.css', cssname)
recurse('.')
if __name__ == '__main__':
main()
| 39.023148
| 330
| 0.566971
|
c22e7ba6b7de8b53914f15f9c65b2350d3e4309e
| 61
|
py
|
Python
|
export_action/__init__.py
|
vakhov/django-export-action
|
25dfdd960523818dbbd38f8ccb3fa2f1516fc4c6
|
[
"MIT"
] | null | null | null |
export_action/__init__.py
|
vakhov/django-export-action
|
25dfdd960523818dbbd38f8ccb3fa2f1516fc4c6
|
[
"MIT"
] | 1
|
2021-05-24T08:18:42.000Z
|
2021-05-24T08:18:42.000Z
|
export_action/__init__.py
|
forum-group/django-export-action
|
1dcdefbdb5487161be68993d9d54ea7a04c97042
|
[
"MIT"
] | 1
|
2021-05-24T06:27:41.000Z
|
2021-05-24T06:27:41.000Z
|
default_app_config = 'export_action.apps.ExportActionConfig'
| 30.5
| 60
| 0.868852
|
ad0275b69cfc03006cd2882bf90dee3438186567
| 3,004
|
py
|
Python
|
build_tools/find_effected_packages.py
|
abujalski/Webports
|
47bea8edcb0a5e00baab53f1e850d25a5beeb5d3
|
[
"BSD-3-Clause"
] | 4
|
2016-12-01T05:19:53.000Z
|
2019-03-09T12:30:08.000Z
|
build_tools/find_effected_packages.py
|
abujalski/Webports
|
47bea8edcb0a5e00baab53f1e850d25a5beeb5d3
|
[
"BSD-3-Clause"
] | 3
|
2016-12-26T03:43:16.000Z
|
2021-01-28T10:41:45.000Z
|
build_tools/find_effected_packages.py
|
abujalski/Webports
|
47bea8edcb0a5e00baab53f1e850d25a5beeb5d3
|
[
"BSD-3-Clause"
] | 4
|
2016-08-03T19:32:01.000Z
|
2020-03-09T01:29:58.000Z
|
#!/usr/bin/env python
# Copyright 2015 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Find which packages are effected by a given change.
Accepts a list of changed files and outputs a list of effected
packages. Outputs 'all' if any shared/non-package-specific
file if changed."""
import argparse
import fnmatch
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
NACLPORTS_ROOT = os.path.dirname(SCRIPT_DIR)
sys.path.append(os.path.join(NACLPORTS_ROOT, 'lib'))
import webports
import webports.source_package
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--deps', action='store_true',
help='include dependencies of effected packages.')
parser.add_argument('files', nargs='+', help='Changes files.')
options = parser.parse_args(args)
webports.set_verbose(options.verbose)
if options.deps:
package_filter = sys.stdin.read().split()
else:
package_filter = None
effected_packages = find_effected_packages(options.files, options.deps,
package_filter)
print('\n'.join(effected_packages))
return 0
# Normally when changins files outside of the 'ports' directory will
# trigger the rebuilding of all packages. However certainly files are
# known to not effect the building on packages and those are listed here.
IGNORE_FILES = [
'build_tools/find_effected_packages.py',
'build_tools/partition*.txt',
'AUTHORS',
'*/test_*.py',
'docs/*.md',
'*.md',
]
def find_effected_packages(files, include_deps, package_filter):
packages = []
to_resolve = []
def add_package(package):
if package_filter and package.NAME not in package_filter:
webports.log_verbose('Filtered out package: %s' % package.NAME)
return
if package.NAME not in packages:
if include_deps:
for dep in package.transitive_dependencies():
if dep.NAME not in packages:
packages.append(dep.NAME)
packages.append(package.NAME)
to_resolve.append(package)
for filename in files:
parts = filename.split(os.path.sep)
if parts[0] != 'ports':
webports.log_verbose('effected file outside of ports tree: %s' % filename)
if any(fnmatch.fnmatch(filename, ignore) for ignore in IGNORE_FILES):
continue
return ['all']
package_name = parts[1]
pkg = webports.source_package.create_package(package_name)
add_package(pkg)
while to_resolve:
pkg = to_resolve.pop()
for r in pkg.reverse_dependencies():
add_package(r)
if package_filter:
packages = [p for p in packages if p in package_filter]
return packages
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except webports.Error as e:
sys.stderr.write('%s\n' % e)
sys.exit(-1)
| 30.04
| 80
| 0.697403
|
908c78ac9e6c74469ef7518a4379349c62d35ba7
| 3,982
|
py
|
Python
|
pi4home/components/stepper/__init__.py
|
khzd/pi4home
|
937bcdcf77bab111cca10af1fe45c63a55c29aae
|
[
"MIT"
] | 1
|
2019-05-16T02:52:12.000Z
|
2019-05-16T02:52:12.000Z
|
pi4home/components/stepper/__init__.py
|
khzd/pi4home
|
937bcdcf77bab111cca10af1fe45c63a55c29aae
|
[
"MIT"
] | null | null | null |
pi4home/components/stepper/__init__.py
|
khzd/pi4home
|
937bcdcf77bab111cca10af1fe45c63a55c29aae
|
[
"MIT"
] | null | null | null |
import voluptuous as vol
from pi4home.automation import ACTION_REGISTRY
import pi4home.config_validation as cv
from pi4home.const import CONF_ACCELERATION, CONF_DECELERATION, CONF_ID, CONF_MAX_SPEED, \
CONF_POSITION, CONF_TARGET
from pi4home.core import CORE
from pi4home.cpp_generator import Pvariable, add, get_variable, templatable
from pi4home.cpp_types import Action, pi4home_ns, int32
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend({
})
# pylint: disable=invalid-name
stepper_ns = pi4home_ns.namespace('stepper')
Stepper = stepper_ns.class_('Stepper')
SetTargetAction = stepper_ns.class_('SetTargetAction', Action)
ReportPositionAction = stepper_ns.class_('ReportPositionAction', Action)
def validate_acceleration(value):
value = cv.string(value)
for suffix in ('steps/s^2', 'steps/s*s', 'steps/s/s', 'steps/ss', 'steps/(s*s)'):
if value.endswith(suffix):
value = value[:-len(suffix)]
if value == 'inf':
return 1e6
try:
value = float(value)
except ValueError:
raise vol.Invalid("Expected acceleration as floating point number, got {}".format(value))
if value <= 0:
raise vol.Invalid("Acceleration must be larger than 0 steps/s^2!")
return value
def validate_speed(value):
value = cv.string(value)
for suffix in ('steps/s', 'steps/s'):
if value.endswith(suffix):
value = value[:-len(suffix)]
if value == 'inf':
return 1e6
try:
value = float(value)
except ValueError:
raise vol.Invalid("Expected speed as floating point number, got {}".format(value))
if value <= 0:
raise vol.Invalid("Speed must be larger than 0 steps/s!")
return value
STEPPER_SCHEMA = cv.Schema({
vol.Required(CONF_MAX_SPEED): validate_speed,
vol.Optional(CONF_ACCELERATION): validate_acceleration,
vol.Optional(CONF_DECELERATION): validate_acceleration,
})
STEPPER_PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(STEPPER_SCHEMA.schema)
def setup_stepper_core_(stepper_var, config):
if CONF_ACCELERATION in config:
add(stepper_var.set_acceleration(config[CONF_ACCELERATION]))
if CONF_DECELERATION in config:
add(stepper_var.set_deceleration(config[CONF_DECELERATION]))
if CONF_MAX_SPEED in config:
add(stepper_var.set_max_speed(config[CONF_MAX_SPEED]))
def setup_stepper(stepper_var, config):
CORE.add_job(setup_stepper_core_, stepper_var, config)
BUILD_FLAGS = '-DUSE_STEPPER'
CONF_STEPPER_SET_TARGET = 'stepper.set_target'
STEPPER_SET_TARGET_ACTION_SCHEMA = cv.Schema({
vol.Required(CONF_ID): cv.use_variable_id(Stepper),
vol.Required(CONF_TARGET): cv.templatable(cv.int_),
})
@ACTION_REGISTRY.register(CONF_STEPPER_SET_TARGET, STEPPER_SET_TARGET_ACTION_SCHEMA)
def stepper_set_target_to_code(config, action_id, template_arg, args):
for var in get_variable(config[CONF_ID]):
yield None
rhs = var.make_set_target_action(template_arg)
type = SetTargetAction.template(template_arg)
action = Pvariable(action_id, rhs, type=type)
for template_ in templatable(config[CONF_TARGET], args, int32):
yield None
add(action.set_target(template_))
yield action
CONF_STEPPER_REPORT_POSITION = 'stepper.report_position'
STEPPER_REPORT_POSITION_ACTION_SCHEMA = cv.Schema({
vol.Required(CONF_ID): cv.use_variable_id(Stepper),
vol.Required(CONF_POSITION): cv.templatable(cv.int_),
})
@ACTION_REGISTRY.register(CONF_STEPPER_REPORT_POSITION, STEPPER_REPORT_POSITION_ACTION_SCHEMA)
def stepper_report_position_to_code(config, action_id, template_arg, args):
for var in get_variable(config[CONF_ID]):
yield None
rhs = var.make_report_position_action(template_arg)
type = ReportPositionAction.template(template_arg)
action = Pvariable(action_id, rhs, type=type)
for template_ in templatable(config[CONF_POSITION], args, int32):
yield None
add(action.set_position(template_))
yield action
| 31.856
| 97
| 0.73782
|
4ac09dffb1c0c179c00f694837f6ceba43402fc5
| 1,397
|
py
|
Python
|
gchatautorespond/urls.py
|
merrlyne/gchatautorespond
|
a7f8d7b715ca9851a65588a268ce39addb906b6d
|
[
"BSD-2-Clause"
] | null | null | null |
gchatautorespond/urls.py
|
merrlyne/gchatautorespond
|
a7f8d7b715ca9851a65588a268ce39addb906b6d
|
[
"BSD-2-Clause"
] | null | null | null |
gchatautorespond/urls.py
|
merrlyne/gchatautorespond
|
a7f8d7b715ca9851a65588a268ce39addb906b6d
|
[
"BSD-2-Clause"
] | 1
|
2018-12-03T19:12:24.000Z
|
2018-12-03T19:12:24.000Z
|
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import RedirectView
from registration.backends.default.views import RegistrationView
from registration.forms import RegistrationFormTermsOfService as RegTOS
from .apps.autorespond import urls as autorespond_urls
from .apps.licensing import urls as licensing_urls
from .apps.autorespond.views import LoggedOutView, PrivacyView, TermsView
urlpatterns = [
url(r'^autorespond/', include(autorespond_urls)),
url(r'^payment/', include(licensing_urls)),
# Override default login to redirect if already logged in.
# TODO do this for signup as well?
url(r'^accounts/login', 'gchatautorespond.apps.autorespond.views.login'),
url(r'^admin/', include(admin.site.urls)),
# Override the default registration form.
url(r'^accounts/register/$', RegistrationView.as_view(form_class=RegTOS), name='registration_register'),
url(r'^accounts/', include('registration.backends.default.urls')),
url(r'^accounts/', include('django.contrib.auth.urls')),
# post-login will redirect here.
url(r'^accounts/profile/$', RedirectView.as_view(pattern_name='autorespond', permanent=False)),
url(r'^privacy/$', PrivacyView.as_view(), name='privacy'),
url(r'^terms/$', TermsView.as_view(), name='terms'),
url(r'^$', LoggedOutView.as_view(), name='logged_out'),
]
| 41.088235
| 108
| 0.743021
|
8e10edb8e9ed00a20afb44a9fc2e81951cbbfe19
| 24,041
|
py
|
Python
|
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2019_10_17/operations/_private_link_scopes_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2019_10_17/operations/_private_link_scopes_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 2
|
2020-03-03T23:11:13.000Z
|
2020-03-30T18:50:55.000Z
|
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2019_10_17/operations/_private_link_scopes_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PrivateLinkScopesOperations(object):
"""PrivateLinkScopesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~$(python-base-namespace).v2019_10_17.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.AzureMonitorPrivateLinkScopeListResult"]
"""Gets a list of all Azure Monitor PrivateLinkScopes within a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AzureMonitorPrivateLinkScopeListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~$(python-base-namespace).v2019_10_17.models.AzureMonitorPrivateLinkScopeListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AzureMonitorPrivateLinkScopeListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-17-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AzureMonitorPrivateLinkScopeListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/microsoft.insights/privateLinkScopes'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.AzureMonitorPrivateLinkScopeListResult"]
"""Gets a list of Azure Monitor PrivateLinkScopes within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AzureMonitorPrivateLinkScopeListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~$(python-base-namespace).v2019_10_17.models.AzureMonitorPrivateLinkScopeListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AzureMonitorPrivateLinkScopeListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-17-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AzureMonitorPrivateLinkScopeListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/privateLinkScopes'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
scope_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-17-preview"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'scopeName': self._serialize.url("scope_name", scope_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/privateLinkScopes/{scopeName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
scope_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a Azure Monitor PrivateLinkScope.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param scope_name: The name of the Azure Monitor PrivateLinkScope resource.
:type scope_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
scope_name=scope_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/privateLinkScopes/{scopeName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
scope_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.AzureMonitorPrivateLinkScope"
"""Returns a Azure Monitor PrivateLinkScope.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param scope_name: The name of the Azure Monitor PrivateLinkScope resource.
:type scope_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AzureMonitorPrivateLinkScope, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2019_10_17.models.AzureMonitorPrivateLinkScope
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AzureMonitorPrivateLinkScope"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-17-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'scopeName': self._serialize.url("scope_name", scope_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AzureMonitorPrivateLinkScope', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/privateLinkScopes/{scopeName}'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
scope_name, # type: str
azure_monitor_private_link_scope_payload, # type: "models.AzureMonitorPrivateLinkScope"
**kwargs # type: Any
):
# type: (...) -> "models.AzureMonitorPrivateLinkScope"
"""Creates (or updates) a Azure Monitor PrivateLinkScope. Note: You cannot specify a different
value for InstrumentationKey nor AppId in the Put operation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param scope_name: The name of the Azure Monitor PrivateLinkScope resource.
:type scope_name: str
:param azure_monitor_private_link_scope_payload: Properties that need to be specified to create
or update a Azure Monitor PrivateLinkScope.
:type azure_monitor_private_link_scope_payload: ~$(python-base-namespace).v2019_10_17.models.AzureMonitorPrivateLinkScope
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AzureMonitorPrivateLinkScope, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2019_10_17.models.AzureMonitorPrivateLinkScope
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AzureMonitorPrivateLinkScope"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-17-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'scopeName': self._serialize.url("scope_name", scope_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(azure_monitor_private_link_scope_payload, 'AzureMonitorPrivateLinkScope')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AzureMonitorPrivateLinkScope', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AzureMonitorPrivateLinkScope', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/privateLinkScopes/{scopeName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
scope_name, # type: str
private_link_scope_tags, # type: "models.TagsResource"
**kwargs # type: Any
):
# type: (...) -> "models.AzureMonitorPrivateLinkScope"
"""Updates an existing PrivateLinkScope's tags. To update other fields use the CreateOrUpdate
method.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param scope_name: The name of the Azure Monitor PrivateLinkScope resource.
:type scope_name: str
:param private_link_scope_tags: Updated tag information to set into the PrivateLinkScope
instance.
:type private_link_scope_tags: ~$(python-base-namespace).v2019_10_17.models.TagsResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AzureMonitorPrivateLinkScope, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2019_10_17.models.AzureMonitorPrivateLinkScope
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AzureMonitorPrivateLinkScope"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-17-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'scopeName': self._serialize.url("scope_name", scope_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(private_link_scope_tags, 'TagsResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AzureMonitorPrivateLinkScope', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/privateLinkScopes/{scopeName}'} # type: ignore
| 49.063265
| 184
| 0.667776
|
59719918a749000c35190491dc7abcfb5972042d
| 10,708
|
py
|
Python
|
src/lava/lib/dl/slayer/axon/delta.py
|
timcheck/lava-dl
|
e680722071129fde952ea0d744984aa2a038797a
|
[
"BSD-3-Clause"
] | 37
|
2021-09-30T16:47:15.000Z
|
2022-03-07T22:29:21.000Z
|
src/lava/lib/dl/slayer/axon/delta.py
|
timcheck/lava-dl
|
e680722071129fde952ea0d744984aa2a038797a
|
[
"BSD-3-Clause"
] | 36
|
2021-11-04T16:54:55.000Z
|
2022-03-31T02:26:29.000Z
|
src/lava/lib/dl/slayer/axon/delta.py
|
timcheck/lava-dl
|
e680722071129fde952ea0d744984aa2a038797a
|
[
"BSD-3-Clause"
] | 20
|
2021-10-29T22:55:58.000Z
|
2022-03-22T17:27:16.000Z
|
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
"""Delta encoder implementation."""
import torch
class _DeltaUnit(torch.autograd.Function):
""" """
@staticmethod
def forward(
ctx,
input, threshold,
pre_state, residual_state, error_state,
cum_error,
tau_grad, scale_grad
):
"""
"""
# ignoring the cumulative error and residual state,
# the delta forward unit can be formulated as
# delta_x[t] = x[t] - x[t-1]
# y[t] = delta_x[t] * H(|delta_x[t]| - threshold)
output = torch.zeros_like(input)
delta_input = torch.zeros_like(input)
error = error_state
if cum_error is True:
for t in range(input.shape[-1]):
delta = input[..., t] - pre_state + residual_state
delta_input[..., t] = delta
error += delta
output[..., t] = torch.where(
torch.abs(error) > threshold,
delta,
0 * delta
).to(input.dtype)
error *= 1 - (torch.abs(output[..., t]) > 0)
residual_state = (delta - output[..., t]).detach()
pre_state = input[..., t].detach()
else:
for t in range(input.shape[-1]):
delta = input[..., t] - pre_state + residual_state
delta_input[..., t] = delta
output[..., t] = torch.where(
torch.abs(delta) > threshold,
delta,
0 * delta
).to(input.dtype)
residual_state = (delta - output[..., t]).detach()
pre_state = input[..., t].detach()
ctx.save_for_backward(
delta_input, threshold,
torch.autograd.Variable(
torch.tensor(
tau_grad,
device=input.device,
dtype=input.dtype
),
requires_grad=False
),
torch.autograd.Variable(
torch.tensor(
scale_grad,
device=input.device,
dtype=input.dtype
),
requires_grad=False
),
)
return output, residual_state, error
@staticmethod
def backward(ctx, grad_output, *_):
"""
"""
# the backward computation is
# delta_val = delta(|delta_x[t]| - threshold)
# grad_threshold[t] = (- delta_x[t] * delta_val) * grad_y[t]
# grad_delta_x[t] = (|delta_x[t]| * delta_val) * grad_y[t]
# grad_x[t] = grad_delta_x[t] - grad_delta_x[t+1]
delta_input, threshold, tau_grad, scale_grad = ctx.saved_tensors
# different relaxation options
# nascent_delta = lambda x: scale_grad * torch.sinc(x/tau_grad)
def nascent_delta(x):
return scale_grad * torch.exp(
-torch.abs(x) / tau_grad
)
# nascent_delta = lambda x: scale_grad *
# (1 - torch.clamp(torch.abs(x), max=tau_grad) / tau_grad)
delta_sub = torch.abs(delta_input) - threshold
grad_threshold = torch.where(
delta_sub > 0,
-delta_input * nascent_delta(delta_sub) / threshold,
-torch.sign(delta_input)
) * grad_output
grad_delta_input = grad_output
grad_input = torch.zeros_like(grad_output)
grad_input[..., :-1] = grad_delta_input[..., :-1] \
- grad_delta_input[..., 1:]
grad_input[..., -1] = grad_delta_input[..., -1]
if threshold.shape[0] == 1: # shared_param is true
grad_threshold = torch.unsqueeze(torch.sum(grad_threshold), dim=0)
else:
# shared_param is false. In this case,
# the threshold needs to be reduced along
# all dimensions except channel
grad_threshold = torch.sum(
grad_threshold.reshape(
delta_input.shape[0], delta_input.shape[1], -1
),
dim=[0, -1]
).reshape(threshold.shape[:-1])
# threshold dim was unsqueezed in the last dim
return grad_input, grad_threshold, None, None, None, None, None, None
class Delta(torch.nn.Module):
"""Implements delta differential encoding followed by thresholding.
The thresholds are learnable, individually or as a group.
.. math::
\\Delta x[t] &= x[t] - x[t-1] + r[t-1] \\\\
y[t] &= \\begin{cases}
\\Delta x[t] &\\text{ if } \\Delta x[t] \\geq \\vartheta \\\\
0 &\\text{ otherwise}
\\end{cases}\\\\
r[t] &= \\Delta x[t] - y[t]
For cumulative error, output evaluation is changed to
.. math::
e[t] &= e[t] + \\Delta x[t]\\\\
y[t] &= \\begin{cases}
\\Delta x[t] &\\text{ if } e[t] \\geq \\vartheta \\\\
0 &\\text{ otherwise}\\\\
e[t] &= e[t] * (1 - \\mathcal{H}(|y[t]|))
\\end{cases}
Parameters
----------
threshold : float
threshold value.
min_threshold : float
minimum threshold value. Defaults to 0.
tau_grad : float
threshold gradient relaxation parameter. Defaults to 1.
scale_grad : float
threshold gradient scaling parameter. Defaults to 1.
cum_error : bool
flag to enable cumulative error before thresholding.
Defaults to False.
shared_param : bool
flag to enable shared threshold. Defaults to True.
persistent_state : bool
flag to enable persistent delta states. Defaults to False.
requires_grad : bool
flag to enable threshold gradient. Defaults to False.
Attributes
----------
min_threshold
tau_grad
scale_grad
cum_error
shared_param
persistent_state
requires_grad
quantizer : function prt or None:
quantizer method to be applied. If None, it is ignored. It needs to be
explicitly set.
shape: torch shape
shape of delta block. It is identified on runtime. The value is None
before that.
pre_state: torch tensor
previous state of delta unit.
residual_state : torch tensor
residual state of delta unit.
error_state : torch tensor
error state of delta unit.
Examples
--------
>> delta = Delta(threshold=1)
>> y = delta(x) # differential threshold encoding
"""
def __init__(
self, threshold, min_threshold=0,
tau_grad=1, scale_grad=1,
cum_error=False, shared_param=True, persistent_state=False,
requires_grad=False
):
super(Delta, self).__init__()
self.min_threshold = min_threshold
self.tau_grad = tau_grad
self.scale_grad = scale_grad
self.cum_error = cum_error
self.shared_param = shared_param
self.persistent_state = persistent_state
self.requires_grad = requires_grad
self.quantizer = None
self.shape = None
self.register_buffer(
'pre_state',
torch.zeros(1, dtype=torch.float),
persistent=False
)
self.register_buffer(
'residual_state',
torch.zeros(1, dtype=torch.float),
persistent=False
)
self.register_buffer(
'error_state',
torch.zeros(1, dtype=torch.float),
persistent=False
)
self.register_parameter(
'threshold',
torch.nn.Parameter(
torch.FloatTensor([threshold]),
requires_grad=self.requires_grad
),
)
def clamp(self):
"""Clamps the threshold value to
:math:`[\\verb~min_threshold~, \\infty)`."""
self.threshold.data.clamp_(self.min_threshold)
@property
def device(self):
"""Device property of object
Parameters
----------
Returns
-------
torch.device
returns the device memory where the object lives.
"""
# return self.inv_threshold.device
return self.threshold.device
def forward(self, input):
"""
"""
if self.shape is None:
self.shape = input.shape[1:-1]
if len(self.shape) == 0:
raise AssertionError(
f"Expected input to have at least 3 dimensions: "
f"[Batch, Spatial dims ..., Time]. "
f"It's shape is {input.shape}."
)
if self.shared_param is False:
self.threshold.data = self.threshold.data \
* torch.ones(self.shape[0]).to(self.device)
self.threshold.data = self.threshold.data.reshape(
[self.shape[0]] + [1 for _ in self.shape[1:]]
)
else:
if input.shape[1:-1] != self.shape:
raise AssertionError(
f'Input tensor shape ({input.shape}) does not match with'
f'Neuron shape ({self.shape}).'
)
if self.pre_state.shape[0] != input.shape[0]:
# persistent state cannot proceed due to change in batch dimension.
# this likely indicates change from training to testing set
self.pre_state = torch.zeros(input.shape[:-1]).to(
self.pre_state.dtype
).to(self.pre_state.device)
self.error_state = torch.zeros(input.shape[:-1]).to(
self.error_state.dtype
).to(self.error_state.device)
self.clamp()
if self.persistent_state is True:
_pre_state = self.pre_state
_residual_state = self.residual_state
else:
_pre_state = torch.zeros_like(input[..., 0])
_residual_state = torch.zeros_like(input[..., 0])
if self.cum_error is True:
_error_state = self.error_state
else:
_error_state = torch.zeros_like(input[..., 0])
output, residual_state, error_state = _DeltaUnit.apply(
input, self.threshold,
_pre_state,
_residual_state,
_error_state,
self.cum_error, self.tau_grad, self.scale_grad
)
if self.persistent_state is True:
self.pre_state = input[..., -1].clone().detach()
self.residual_state = residual_state.clone().detach()
if self.cum_error is True:
self.error_state = error_state.clone().detach()
return output
| 33.567398
| 79
| 0.537449
|
8d6dcda0f2f1cc8228a4706a43c877acabeee495
| 6,127
|
py
|
Python
|
examples/decoding/plot_oasis_vbm.py
|
fabianp/nilearn
|
367516822f538965368b936704a5346199331457
|
[
"BSD-2-Clause"
] | null | null | null |
examples/decoding/plot_oasis_vbm.py
|
fabianp/nilearn
|
367516822f538965368b936704a5346199331457
|
[
"BSD-2-Clause"
] | null | null | null |
examples/decoding/plot_oasis_vbm.py
|
fabianp/nilearn
|
367516822f538965368b936704a5346199331457
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Voxel-Based Morphometry on Oasis dataset
========================================
This example uses Voxel-Based Morphometry (VBM) to study the relationship
between aging and gray matter density.
The data come from the `OASIS <http://www.oasis-brains.org/>`_ project.
If you use it, you need to agree with the data usage agreement available
on the website.
It has been run through a standard VBM pipeline (using SPM8 and
NewSegment) to create VBM maps, which we study here.
Predictive modeling analysis: VBM bio-markers of aging?
--------------------------------------------------------
We run a standard SVM-ANOVA nilearn pipeline to predict age from the VBM
data. We use only 100 subjects from the OASIS dataset to limit the memory
usage.
Note that for an actual predictive modeling study of aging, the study
should be ran on the full set of subjects. Also, parameters such as the
smoothing applied to the data and the number of features selected by the
Anova step should be set by nested cross-validation, as they impact
significantly the prediction score.
Brain mapping with mass univariate
-----------------------------------
SVM weights are very noisy, partly because heavy smoothing is detrimental
for the prediction here. A standard analysis using mass-univariate GLM
(here permuted to have exact correction for multiple comparisons) gives a
much clearer view of the important regions.
____
"""
# Authors: Elvis Dhomatob, <elvis.dohmatob@inria.fr>, Apr. 2014
# Virgile Fritsch, <virgile.fritsch@inria.fr>, Apr 2014
# Gael Varoquaux, Apr 2014
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
from nilearn import datasets
from nilearn.input_data import NiftiMasker
n_subjects = 100 # more subjects requires more memory
### Load Oasis dataset ########################################################
oasis_dataset = datasets.fetch_oasis_vbm(n_subjects=n_subjects)
gray_matter_map_filenames = oasis_dataset.gray_matter_maps
age = oasis_dataset.ext_vars['age'].astype(float)
### Preprocess data ###########################################################
nifti_masker = NiftiMasker(
standardize=False,
smoothing_fwhm=2,
memory='nilearn_cache') # cache options
# remove features with too low between-subject variance
gm_maps_masked = nifti_masker.fit_transform(gray_matter_map_filenames)
gm_maps_masked[:, gm_maps_masked.var(0) < 0.01] = 0.
# final masking
new_images = nifti_masker.inverse_transform(gm_maps_masked)
gm_maps_masked = nifti_masker.fit_transform(new_images)
n_samples, n_features = gm_maps_masked.shape
print("%d samples, %d features" % (n_subjects, n_features))
### Prediction with SVR #######################################################
print("ANOVA + SVR")
### Define the prediction function to be used.
# Here we use a Support Vector Classification, with a linear kernel
from sklearn.svm import SVR
svr = SVR(kernel='linear')
### Dimension reduction
from sklearn.feature_selection import SelectKBest, f_regression
# Here we use a classical univariate feature selection based on F-test,
# namely Anova.
feature_selection = SelectKBest(f_regression, k=2000)
# We have our predictor (SVR), our feature selection (SelectKBest), and now,
# we can plug them together in a *pipeline* that performs the two operations
# successively:
from sklearn.pipeline import Pipeline
anova_svr = Pipeline([('anova', feature_selection), ('svr', svr)])
### Fit and predict
anova_svr.fit(gm_maps_masked, age)
age_pred = anova_svr.predict(gm_maps_masked)
### Visualization
### Look at the SVR's discriminating weights
coef = svr.coef_
# reverse feature selection
coef = feature_selection.inverse_transform(coef)
# reverse masking
weight_img = nifti_masker.inverse_transform(coef)
### Create the figure
from nilearn.plotting import plot_stat_map
bg_filename = gray_matter_map_filenames[0]
z_slice = 0
from nilearn.image.resampling import coord_transform
affine = weight_img.get_affine()
_, _, k_slice = coord_transform(0, 0, z_slice,
linalg.inv(affine))
k_slice = np.round(k_slice)
fig = plt.figure(figsize=(5.5, 7.5), facecolor='k')
weight_slice_data = weight_img.get_data()[..., k_slice, 0]
vmax = max(-np.min(weight_slice_data), np.max(weight_slice_data)) * 0.5
display = plot_stat_map(weight_img, bg_img=bg_filename,
display_mode='z', cut_coords=[z_slice],
figure=fig, vmax=vmax)
display.title('SVM weights', y=1.2)
### Measure accuracy with cross validation
from sklearn.cross_validation import cross_val_score
cv_scores = cross_val_score(anova_svr, gm_maps_masked, age)
### Return the corresponding mean prediction accuracy
prediction_accuracy = np.mean(cv_scores)
print("=== ANOVA ===")
print("Prediction accuracy: %f" % prediction_accuracy)
print("")
### Inference with massively univariate model #################################
print("Massively univariate model")
### Statistical inference
from nilearn.mass_univariate import permuted_ols
neg_log_pvals, t_scores_original_data, _ = permuted_ols(
age, gm_maps_masked, # + intercept as a covariate by default
n_perm=1000, # 1,000 in the interest of time; 10000 would be better
n_jobs=1) # can be changed to use more CPUs
signed_neg_log_pvals = neg_log_pvals * np.sign(t_scores_original_data)
signed_neg_log_pvals_unmasked = nifti_masker.inverse_transform(
signed_neg_log_pvals)
### Show results
threshold = -np.log10(0.1) # 10% corrected
fig = plt.figure(figsize=(5.5, 7.5), facecolor='k')
display = plot_stat_map(signed_neg_log_pvals_unmasked, bg_img=bg_filename,
threshold=threshold, cmap=plt.cm.RdBu_r,
display_mode='z', cut_coords=[z_slice],
figure=fig)
title = ('Negative $\log_{10}$ p-values'
'\n(Non-parametric + max-type correction)')
display.title(title, y=1.2)
signed_neg_log_pvals_slice_data = \
signed_neg_log_pvals_unmasked.get_data()[..., k_slice, 0]
n_detections = (np.abs(signed_neg_log_pvals_slice_data) > threshold).sum()
print('\n%d detections' % n_detections)
plt.show()
| 38.055901
| 79
| 0.715195
|
6f602fcf644bf954a7c97b159e6873bbe140a67e
| 1,427
|
py
|
Python
|
tests/loudness/test_loudness_ecma.py
|
TutorialMaker/MoSQITo
|
d5805d3f9748db7b49836b1c7f7a4aaf4c1cb839
|
[
"Apache-2.0"
] | null | null | null |
tests/loudness/test_loudness_ecma.py
|
TutorialMaker/MoSQITo
|
d5805d3f9748db7b49836b1c7f7a4aaf4c1cb839
|
[
"Apache-2.0"
] | null | null | null |
tests/loudness/test_loudness_ecma.py
|
TutorialMaker/MoSQITo
|
d5805d3f9748db7b49836b1c7f7a4aaf4c1cb839
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Optional package import
try:
import pytest
except ImportError:
raise RuntimeError(
"In order to perform the tests you need the 'pytest' package."
)
import numpy as np
# Local application imports
from mosqito.functions.shared.sine_wave_generator import (
sine_wave_generator,
)
from mosqito.functions.loudness_ecma.comp_loudness import comp_loudness
@pytest.mark.loudness_ecma # to skip or run only loudness ecma tests
def test_loudness_ecma():
"""Test function for the Loudness_ecma calculation"""
# Generate a 1kHz / 80 dB test tone and compute loudness
signal, _ = sine_wave_generator(
fs=48000,
t=0.25,
spl_value=80,
freq=1000,
)
n_specific, _ = comp_loudness(signal)
n_specific = np.array(n_specific)
n_tot = np.sum(n_specific, axis=0)
n_tot_mean_1kHz = np.mean(n_tot[5:])
# Generate a 5kHz / 78.7 dB test tone and compute loudness
signal, _ = sine_wave_generator(
fs=48000,
t=0.25,
spl_value=78.73977248964925,
freq=5000,
)
n_specific, _ = comp_loudness(signal)
n_specific = np.array(n_specific)
n_tot = np.sum(n_specific, axis=0)
n_tot_mean_5kHz = np.mean(n_tot[5:])
# Compare loudness
np.testing.assert_almost_equal(n_tot_mean_1kHz, n_tot_mean_5kHz)
# test de la fonction
if __name__ == "__main__":
test_loudness_ecma()
| 26.425926
| 71
| 0.682551
|
e1309311f8f5dc30f1f81dccaad63bf387e79ab9
| 5,172
|
py
|
Python
|
pw_docgen/py/pw_docgen/docgen.py
|
silvergasp/pigweed
|
b095218bcd7064ddcc5af5f689ce235fc9e4cc91
|
[
"Apache-2.0"
] | null | null | null |
pw_docgen/py/pw_docgen/docgen.py
|
silvergasp/pigweed
|
b095218bcd7064ddcc5af5f689ce235fc9e4cc91
|
[
"Apache-2.0"
] | 1
|
2021-06-18T13:54:41.000Z
|
2021-06-18T13:54:41.000Z
|
pw_docgen/py/pw_docgen/docgen.py
|
silvergasp/pigweed
|
b095218bcd7064ddcc5af5f689ce235fc9e4cc91
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Renders HTML documentation using Sphinx."""
# TODO(frolv): Figure out a solution for installing all library dependencies
# to run Sphinx and build RTD docs.
import argparse
import collections
import json
import os
import shutil
import subprocess
import sys
from pathlib import Path
from typing import Dict, List, Tuple
SCRIPT_HEADER: str = '''
██████╗ ██╗ ██████╗ ██╗ ██╗███████╗███████╗██████╗ ██████╗ ██████╗ ██████╗███████╗
██╔══██╗██║██╔════╝ ██║ ██║██╔════╝██╔════╝██╔══██╗ ██╔══██╗██╔═══██╗██╔════╝██╔════╝
██████╔╝██║██║ ███╗██║ █╗ ██║█████╗ █████╗ ██║ ██║ ██║ ██║██║ ██║██║ ███████╗
██╔═══╝ ██║██║ ██║██║███╗██║██╔══╝ ██╔══╝ ██║ ██║ ██║ ██║██║ ██║██║ ╚════██║
██║ ██║╚██████╔╝╚███╔███╔╝███████╗███████╗██████╔╝ ██████╔╝╚██████╔╝╚██████╗███████║
╚═╝ ╚═╝ ╚═════╝ ╚══╝╚══╝ ╚══════╝╚══════╝╚═════╝ ╚═════╝ ╚═════╝ ╚═════╝╚══════╝
'''
def parse_args() -> argparse.Namespace:
"""Parses command-line arguments."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--sphinx-build-dir',
required=True,
help='Directory in which to build docs')
parser.add_argument('--conf',
required=True,
help='Path to conf.py file for Sphinx')
parser.add_argument('--gn-root',
required=True,
help='Root of the GN build tree')
parser.add_argument('--gn-gen-root',
required=True,
help='Root of the GN gen tree')
parser.add_argument('sources',
nargs='+',
help='Paths to the root level rst source files')
parser.add_argument('--out-dir',
required=True,
help='Output directory for rendered HTML docs')
parser.add_argument('--metadata',
required=True,
type=argparse.FileType('r'),
help='Metadata JSON file')
return parser.parse_args()
def build_docs(src_dir: str, dst_dir: str) -> int:
"""Runs Sphinx to render HTML documentation from a doc tree."""
# TODO(frolv): Specify the Sphinx script from a prebuilts path instead of
# requiring it in the tree.
command = [
'sphinx-build', '-W', '-b', 'html', '-d', f'{dst_dir}/help', src_dir,
f'{dst_dir}/html'
]
return subprocess.call(command)
def mkdir(dirname: str, exist_ok: bool = False) -> None:
"""Wrapper around os.makedirs that prints the operation."""
print(f'MKDIR {dirname}')
os.makedirs(dirname, exist_ok=exist_ok)
def copy_doc_tree(args: argparse.Namespace) -> None:
"""Copies doc source and input files into a build tree."""
def build_path(path):
"""Converts a source path to a filename in the build directory."""
if path.startswith(args.gn_root):
path = os.path.relpath(path, args.gn_root)
elif path.startswith(args.gn_gen_root):
path = os.path.relpath(path, args.gn_gen_root)
return os.path.join(args.sphinx_build_dir, path)
source_files = json.load(args.metadata)
copy_paths = [build_path(f) for f in source_files]
mkdir(args.sphinx_build_dir)
for source_path in args.sources:
os.link(source_path,
f'{args.sphinx_build_dir}/{Path(source_path).name}')
os.link(args.conf, f'{args.sphinx_build_dir}/conf.py')
# Map of directory path to list of source and destination file paths.
dirs: Dict[str, List[Tuple[str, str]]] = collections.defaultdict(list)
for source_file, copy_path in zip(source_files, copy_paths):
dirname = os.path.dirname(copy_path)
dirs[dirname].append((source_file, copy_path))
for directory, file_pairs in dirs.items():
mkdir(directory, exist_ok=True)
for src, dst in file_pairs:
os.link(src, dst)
def main() -> int:
"""Script entry point."""
args = parse_args()
# Clear out any existing docs for the target.
if os.path.exists(args.sphinx_build_dir):
shutil.rmtree(args.sphinx_build_dir)
# TODO(pwbug/164): Printing the header causes unicode problems on Windows.
# Disabled for now; re-enable once the root issue is fixed.
# print(SCRIPT_HEADER)
copy_doc_tree(args)
# Flush all script output before running Sphinx.
print('-' * 80, flush=True)
return build_docs(args.sphinx_build_dir, args.out_dir)
if __name__ == '__main__':
sys.exit(main())
| 36.422535
| 91
| 0.573859
|
3855efe1af5ac5e9cd6bb36b99dc1b32d3a74c3c
| 286
|
py
|
Python
|
skeleton/main.py
|
elsampsa/skeleton
|
30679fc787014e347e0b21a6f74193237c0ffe61
|
[
"MIT"
] | 4
|
2022-02-18T10:59:53.000Z
|
2022-03-21T10:28:32.000Z
|
skeleton/main.py
|
elsampsa/skeleton
|
30679fc787014e347e0b21a6f74193237c0ffe61
|
[
"MIT"
] | null | null | null |
skeleton/main.py
|
elsampsa/skeleton
|
30679fc787014e347e0b21a6f74193237c0ffe61
|
[
"MIT"
] | null | null | null |
"""
NAME.py : Description of the file
* Copyright: 2017 [copyright holder]
* Authors : Sampsa Riikonen
* Date : 2017
* Version : 0.1
This file is part of the skeleton library
[copy-paste your license here]
"""
import logging
def app(command, options, config):
pass
| 14.3
| 41
| 0.674825
|
c9f34c980abb5d25bb3d5c328454a26d00a5da8b
| 19,594
|
py
|
Python
|
sdk/python/pulumi_sysdig/monitor/notification_channel_email.py
|
Sysdig-Hackathon-Picasso/pulumi-sysdig
|
e25b655f6ad4a5f52678bc445be2d59f28f5bb4b
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_sysdig/monitor/notification_channel_email.py
|
Sysdig-Hackathon-Picasso/pulumi-sysdig
|
e25b655f6ad4a5f52678bc445be2d59f28f5bb4b
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_sysdig/monitor/notification_channel_email.py
|
Sysdig-Hackathon-Picasso/pulumi-sysdig
|
e25b655f6ad4a5f52678bc445be2d59f28f5bb4b
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-12-01T08:57:09.000Z
|
2021-12-01T08:57:09.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['NotificationChannelEmailArgs', 'NotificationChannelEmail']
@pulumi.input_type
class NotificationChannelEmailArgs:
def __init__(__self__, *,
recipients: pulumi.Input[Sequence[pulumi.Input[str]]],
enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
notify_when_ok: Optional[pulumi.Input[bool]] = None,
notify_when_resolved: Optional[pulumi.Input[bool]] = None,
send_test_notification: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a NotificationChannelEmail resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] recipients: List of recipients that will receive
the message.
:param pulumi.Input[bool] enabled: If false, the channel will not emit notifications. Default is true.
:param pulumi.Input[str] name: The name of the Notification Channel. Must be unique.
:param pulumi.Input[bool] notify_when_ok: Send a new notification when the alert condition is
no longer triggered. Default is false.
:param pulumi.Input[bool] notify_when_resolved: Send a new notification when the alert is manually
acknowledged by a user. Default is false.
:param pulumi.Input[bool] send_test_notification: Send an initial test notification to check
if the notification channel is working. Default is false.
"""
pulumi.set(__self__, "recipients", recipients)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if name is not None:
pulumi.set(__self__, "name", name)
if notify_when_ok is not None:
pulumi.set(__self__, "notify_when_ok", notify_when_ok)
if notify_when_resolved is not None:
pulumi.set(__self__, "notify_when_resolved", notify_when_resolved)
if send_test_notification is not None:
pulumi.set(__self__, "send_test_notification", send_test_notification)
@property
@pulumi.getter
def recipients(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
List of recipients that will receive
the message.
"""
return pulumi.get(self, "recipients")
@recipients.setter
def recipients(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "recipients", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
If false, the channel will not emit notifications. Default is true.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Notification Channel. Must be unique.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="notifyWhenOk")
def notify_when_ok(self) -> Optional[pulumi.Input[bool]]:
"""
Send a new notification when the alert condition is
no longer triggered. Default is false.
"""
return pulumi.get(self, "notify_when_ok")
@notify_when_ok.setter
def notify_when_ok(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "notify_when_ok", value)
@property
@pulumi.getter(name="notifyWhenResolved")
def notify_when_resolved(self) -> Optional[pulumi.Input[bool]]:
"""
Send a new notification when the alert is manually
acknowledged by a user. Default is false.
"""
return pulumi.get(self, "notify_when_resolved")
@notify_when_resolved.setter
def notify_when_resolved(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "notify_when_resolved", value)
@property
@pulumi.getter(name="sendTestNotification")
def send_test_notification(self) -> Optional[pulumi.Input[bool]]:
"""
Send an initial test notification to check
if the notification channel is working. Default is false.
"""
return pulumi.get(self, "send_test_notification")
@send_test_notification.setter
def send_test_notification(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "send_test_notification", value)
@pulumi.input_type
class _NotificationChannelEmailState:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
notify_when_ok: Optional[pulumi.Input[bool]] = None,
notify_when_resolved: Optional[pulumi.Input[bool]] = None,
recipients: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
send_test_notification: Optional[pulumi.Input[bool]] = None,
version: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering NotificationChannelEmail resources.
:param pulumi.Input[bool] enabled: If false, the channel will not emit notifications. Default is true.
:param pulumi.Input[str] name: The name of the Notification Channel. Must be unique.
:param pulumi.Input[bool] notify_when_ok: Send a new notification when the alert condition is
no longer triggered. Default is false.
:param pulumi.Input[bool] notify_when_resolved: Send a new notification when the alert is manually
acknowledged by a user. Default is false.
:param pulumi.Input[Sequence[pulumi.Input[str]]] recipients: List of recipients that will receive
the message.
:param pulumi.Input[bool] send_test_notification: Send an initial test notification to check
if the notification channel is working. Default is false.
:param pulumi.Input[int] version: (Computed) The current version of the Notification Channel.
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if name is not None:
pulumi.set(__self__, "name", name)
if notify_when_ok is not None:
pulumi.set(__self__, "notify_when_ok", notify_when_ok)
if notify_when_resolved is not None:
pulumi.set(__self__, "notify_when_resolved", notify_when_resolved)
if recipients is not None:
pulumi.set(__self__, "recipients", recipients)
if send_test_notification is not None:
pulumi.set(__self__, "send_test_notification", send_test_notification)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
If false, the channel will not emit notifications. Default is true.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Notification Channel. Must be unique.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="notifyWhenOk")
def notify_when_ok(self) -> Optional[pulumi.Input[bool]]:
"""
Send a new notification when the alert condition is
no longer triggered. Default is false.
"""
return pulumi.get(self, "notify_when_ok")
@notify_when_ok.setter
def notify_when_ok(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "notify_when_ok", value)
@property
@pulumi.getter(name="notifyWhenResolved")
def notify_when_resolved(self) -> Optional[pulumi.Input[bool]]:
"""
Send a new notification when the alert is manually
acknowledged by a user. Default is false.
"""
return pulumi.get(self, "notify_when_resolved")
@notify_when_resolved.setter
def notify_when_resolved(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "notify_when_resolved", value)
@property
@pulumi.getter
def recipients(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of recipients that will receive
the message.
"""
return pulumi.get(self, "recipients")
@recipients.setter
def recipients(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "recipients", value)
@property
@pulumi.getter(name="sendTestNotification")
def send_test_notification(self) -> Optional[pulumi.Input[bool]]:
"""
Send an initial test notification to check
if the notification channel is working. Default is false.
"""
return pulumi.get(self, "send_test_notification")
@send_test_notification.setter
def send_test_notification(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "send_test_notification", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[int]]:
"""
(Computed) The current version of the Notification Channel.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "version", value)
class NotificationChannelEmail(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
notify_when_ok: Optional[pulumi.Input[bool]] = None,
notify_when_resolved: Optional[pulumi.Input[bool]] = None,
recipients: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
send_test_notification: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
## Import
Email notification channels for Monitor can be imported using the ID, e.g.
```sh
$ pulumi import sysdig:Monitor/notificationChannelEmail:NotificationChannelEmail example 12345
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] enabled: If false, the channel will not emit notifications. Default is true.
:param pulumi.Input[str] name: The name of the Notification Channel. Must be unique.
:param pulumi.Input[bool] notify_when_ok: Send a new notification when the alert condition is
no longer triggered. Default is false.
:param pulumi.Input[bool] notify_when_resolved: Send a new notification when the alert is manually
acknowledged by a user. Default is false.
:param pulumi.Input[Sequence[pulumi.Input[str]]] recipients: List of recipients that will receive
the message.
:param pulumi.Input[bool] send_test_notification: Send an initial test notification to check
if the notification channel is working. Default is false.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: NotificationChannelEmailArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
Email notification channels for Monitor can be imported using the ID, e.g.
```sh
$ pulumi import sysdig:Monitor/notificationChannelEmail:NotificationChannelEmail example 12345
```
:param str resource_name: The name of the resource.
:param NotificationChannelEmailArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NotificationChannelEmailArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
notify_when_ok: Optional[pulumi.Input[bool]] = None,
notify_when_resolved: Optional[pulumi.Input[bool]] = None,
recipients: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
send_test_notification: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NotificationChannelEmailArgs.__new__(NotificationChannelEmailArgs)
__props__.__dict__["enabled"] = enabled
__props__.__dict__["name"] = name
__props__.__dict__["notify_when_ok"] = notify_when_ok
__props__.__dict__["notify_when_resolved"] = notify_when_resolved
if recipients is None and not opts.urn:
raise TypeError("Missing required property 'recipients'")
__props__.__dict__["recipients"] = recipients
__props__.__dict__["send_test_notification"] = send_test_notification
__props__.__dict__["version"] = None
super(NotificationChannelEmail, __self__).__init__(
'sysdig:Monitor/notificationChannelEmail:NotificationChannelEmail',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
notify_when_ok: Optional[pulumi.Input[bool]] = None,
notify_when_resolved: Optional[pulumi.Input[bool]] = None,
recipients: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
send_test_notification: Optional[pulumi.Input[bool]] = None,
version: Optional[pulumi.Input[int]] = None) -> 'NotificationChannelEmail':
"""
Get an existing NotificationChannelEmail resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] enabled: If false, the channel will not emit notifications. Default is true.
:param pulumi.Input[str] name: The name of the Notification Channel. Must be unique.
:param pulumi.Input[bool] notify_when_ok: Send a new notification when the alert condition is
no longer triggered. Default is false.
:param pulumi.Input[bool] notify_when_resolved: Send a new notification when the alert is manually
acknowledged by a user. Default is false.
:param pulumi.Input[Sequence[pulumi.Input[str]]] recipients: List of recipients that will receive
the message.
:param pulumi.Input[bool] send_test_notification: Send an initial test notification to check
if the notification channel is working. Default is false.
:param pulumi.Input[int] version: (Computed) The current version of the Notification Channel.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _NotificationChannelEmailState.__new__(_NotificationChannelEmailState)
__props__.__dict__["enabled"] = enabled
__props__.__dict__["name"] = name
__props__.__dict__["notify_when_ok"] = notify_when_ok
__props__.__dict__["notify_when_resolved"] = notify_when_resolved
__props__.__dict__["recipients"] = recipients
__props__.__dict__["send_test_notification"] = send_test_notification
__props__.__dict__["version"] = version
return NotificationChannelEmail(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
If false, the channel will not emit notifications. Default is true.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the Notification Channel. Must be unique.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notifyWhenOk")
def notify_when_ok(self) -> pulumi.Output[Optional[bool]]:
"""
Send a new notification when the alert condition is
no longer triggered. Default is false.
"""
return pulumi.get(self, "notify_when_ok")
@property
@pulumi.getter(name="notifyWhenResolved")
def notify_when_resolved(self) -> pulumi.Output[Optional[bool]]:
"""
Send a new notification when the alert is manually
acknowledged by a user. Default is false.
"""
return pulumi.get(self, "notify_when_resolved")
@property
@pulumi.getter
def recipients(self) -> pulumi.Output[Sequence[str]]:
"""
List of recipients that will receive
the message.
"""
return pulumi.get(self, "recipients")
@property
@pulumi.getter(name="sendTestNotification")
def send_test_notification(self) -> pulumi.Output[Optional[bool]]:
"""
Send an initial test notification to check
if the notification channel is working. Default is false.
"""
return pulumi.get(self, "send_test_notification")
@property
@pulumi.getter
def version(self) -> pulumi.Output[int]:
"""
(Computed) The current version of the Notification Channel.
"""
return pulumi.get(self, "version")
| 43.349558
| 134
| 0.651577
|
722e02a356501ba219b1ceaba7fed17d1901278a
| 354
|
py
|
Python
|
data/store.py
|
toutpuissantged/BestOfWeb
|
fa4b62d26ef7ae73bdd69c5d65f6bfab46232f10
|
[
"MIT"
] | 4
|
2020-07-25T15:36:21.000Z
|
2022-02-17T21:38:48.000Z
|
data/store.py
|
toutpuissantged/BestOfWeb
|
fa4b62d26ef7ae73bdd69c5d65f6bfab46232f10
|
[
"MIT"
] | null | null | null |
data/store.py
|
toutpuissantged/BestOfWeb
|
fa4b62d26ef7ae73bdd69c5d65f6bfab46232f10
|
[
"MIT"
] | null | null | null |
class Store():
'''
sert a enregistrer les etats
'''
def __init__(self,id: int):
data={
'filedir': '',
}
self.id : int = id
self.data=data
self.TabState=[]
def get_data(self):
return self.data
def set_data(self,newdata):
self.data=newdata
return 0
| 16.857143
| 35
| 0.488701
|
9edddd240d329529f9c74259bc2218aad3e6cc5f
| 26,284
|
py
|
Python
|
haystack/pipeline.py
|
peterdemin/haystack
|
9ec2406a05aac3dc8afab68945a6afc2871bd2a3
|
[
"Apache-2.0"
] | null | null | null |
haystack/pipeline.py
|
peterdemin/haystack
|
9ec2406a05aac3dc8afab68945a6afc2871bd2a3
|
[
"Apache-2.0"
] | null | null | null |
haystack/pipeline.py
|
peterdemin/haystack
|
9ec2406a05aac3dc8afab68945a6afc2871bd2a3
|
[
"Apache-2.0"
] | null | null | null |
from abc import ABC
import os
from copy import deepcopy
from pathlib import Path
from typing import List, Optional, Dict
import networkx as nx
import yaml
from networkx import DiGraph
from networkx.drawing.nx_agraph import to_agraph
from haystack import BaseComponent
from haystack.generator.base import BaseGenerator
from haystack.reader.base import BaseReader
from haystack.retriever.base import BaseRetriever
from haystack.summarizer.base import BaseSummarizer
from haystack.translator.base import BaseTranslator
class Pipeline(ABC):
"""
Pipeline brings together building blocks to build a complex search pipeline with Haystack & user-defined components.
Under-the-hood, a pipeline is represented as a directed acyclic graph of component nodes. It enables custom query
flows with options to branch queries(eg, extractive qa vs keyword match query), merge candidate documents for a
Reader from multiple Retrievers, or re-ranking of candidate documents.
"""
def __init__(self, pipeline_type: str = "Query"):
self.graph = DiGraph()
if pipeline_type == "Query":
self.root_node_id = "Query"
self.graph.add_node("Query", component=RootNode())
elif pipeline_type == "Indexing":
self.root_node_id = "File"
self.graph.add_node("File", component=RootNode())
else:
raise Exception(f"pipeline_type '{pipeline_type}' is not valid. Supported types are 'Query' & 'Indexing'.")
self.pipeline_type = pipeline_type
self.components: dict = {}
def add_node(self, component, name: str, inputs: List[str]):
"""
Add a new node to the pipeline.
:param component: The object to be called when the data is passed to the node. It can be a Haystack component
(like Retriever, Reader, or Generator) or a user-defined object that implements a run()
method to process incoming data from predecessor node.
:param name: The name for the node. It must not contain any dots.
:param inputs: A list of inputs to the node. If the predecessor node has a single outgoing edge, just the name
of node is sufficient. For instance, a 'ElasticsearchRetriever' node would always output a single
edge with a list of documents. It can be represented as ["ElasticsearchRetriever"].
In cases when the predecessor node has multiple outputs, e.g., a "QueryClassifier", the output
must be specified explicitly as "QueryClassifier.output_2".
"""
self.graph.add_node(name, component=component, inputs=inputs)
if len(self.graph.nodes) == 2: # first node added; connect with Root
self.graph.add_edge(self.root_node_id, name, label="output_1")
return
for i in inputs:
if "." in i:
[input_node_name, input_edge_name] = i.split(".")
assert "output_" in input_edge_name, f"'{input_edge_name}' is not a valid edge name."
outgoing_edges_input_node = self.graph.nodes[input_node_name]["component"].outgoing_edges
assert int(input_edge_name.split("_")[1]) <= outgoing_edges_input_node, (
f"Cannot connect '{input_edge_name}' from '{input_node_name}' as it only has "
f"{outgoing_edges_input_node} outgoing edge(s)."
)
else:
outgoing_edges_input_node = self.graph.nodes[i]["component"].outgoing_edges
assert outgoing_edges_input_node == 1, (
f"Adding an edge from {i} to {name} is ambiguous as {i} has {outgoing_edges_input_node} edges. "
f"Please specify the output explicitly."
)
input_node_name = i
input_edge_name = "output_1"
self.graph.add_edge(input_node_name, name, label=input_edge_name)
def get_node(self, name: str):
"""
Get a node from the Pipeline.
:param name: The name of the node.
"""
component = self.graph.nodes[name]["component"]
return component
def set_node(self, name: str, component):
"""
Set the component for a node in the Pipeline.
:param name: The name of the node.
:param component: The component object to be set at the node.
"""
self.graph.nodes[name]["component"] = component
def run(self, **kwargs):
node_output = None
stack = {
self.root_node_id: {"pipeline_type": self.pipeline_type, **kwargs}
} # ordered dict with "node_id" -> "input" mapping that acts as a FIFO stack
nodes_executed = set()
i = -1 # the last item is popped off the stack unless it is a join node with unprocessed predecessors
while stack:
node_id = list(stack.keys())[i]
node_input = stack[node_id]
predecessors = set(self.graph.predecessors(node_id))
if predecessors.issubset(nodes_executed): # only execute if predecessor nodes are executed
nodes_executed.add(node_id)
node_output, stream_id = self.graph.nodes[node_id]["component"].run(**node_input)
stack.pop(node_id)
next_nodes = self.get_next_nodes(node_id, stream_id)
for n in next_nodes: # add successor nodes with corresponding inputs to the stack
if stack.get(n): # concatenate inputs if it's a join node
existing_input = stack[n]
if "inputs" not in existing_input.keys():
updated_input = {"inputs": [existing_input, node_output]}
else:
updated_input = existing_input["inputs"].append(node_output)
stack[n] = updated_input
else:
stack[n] = node_output
i = -1
else: # attempt executing lower nodes in the stack as `node_id` has unprocessed predecessors
i -= 1
return node_output
def get_next_nodes(self, node_id: str, stream_id: str):
current_node_edges = self.graph.edges(node_id, data=True)
next_nodes = [
next_node
for _, next_node, data in current_node_edges
if not stream_id or data["label"] == stream_id
]
return next_nodes
def draw(self, path: Path = Path("pipeline.png")):
"""
Create a Graphviz visualization of the pipeline.
:param path: the path to save the image.
"""
try:
import pygraphviz
except ImportError:
raise ImportError(f"Could not import `pygraphviz`. Please install via: \n"
f"pip install pygraphviz\n"
f"(You might need to run this first: apt install libgraphviz-dev graphviz )")
graphviz = to_agraph(self.graph)
graphviz.layout("dot")
graphviz.draw(path)
@classmethod
def load_from_yaml(cls, path: Path, pipeline_name: Optional[str] = None, overwrite_with_env_variables: bool = True):
"""
Load Pipeline from a YAML file defining the individual components and how they're tied together to form
a Pipeline. A single YAML can declare multiple Pipelines, in which case an explicit `pipeline_name` must
be passed.
Here's a sample configuration:
```yaml
| version: '0.7'
|
| components: # define all the building-blocks for Pipeline
| - name: MyReader # custom-name for the component; helpful for visualization & debugging
| type: FARMReader # Haystack Class name for the component
| params:
| no_ans_boost: -10
| model_name_or_path: deepset/roberta-base-squad2
| - name: MyESRetriever
| type: ElasticsearchRetriever
| params:
| document_store: MyDocumentStore # params can reference other components defined in the YAML
| custom_query: null
| - name: MyDocumentStore
| type: ElasticsearchDocumentStore
| params:
| index: haystack_test
|
| pipelines: # multiple Pipelines can be defined using the components from above
| - name: my_query_pipeline # a simple extractive-qa Pipeline
| nodes:
| - name: MyESRetriever
| inputs: [Query]
| - name: MyReader
| inputs: [MyESRetriever]
```
:param path: path of the YAML file.
:param pipeline_name: if the YAML contains multiple pipelines, the pipeline_name to load must be set.
:param overwrite_with_env_variables: Overwrite the YAML configuration with environment variables. For example,
to change index name param for an ElasticsearchDocumentStore, an env
variable 'MYDOCSTORE_PARAMS_INDEX=documents-2021' can be set. Note that an
`_` sign must be used to specify nested hierarchical properties.
"""
with open(path, "r", encoding='utf-8') as stream:
data = yaml.safe_load(stream)
if pipeline_name is None:
if len(data["pipelines"]) == 1:
pipeline_config = data["pipelines"][0]
else:
raise Exception("The YAML contains multiple pipelines. Please specify the pipeline name to load.")
else:
pipelines_in_yaml = list(filter(lambda p: p["name"] == pipeline_name, data["pipelines"]))
if not pipelines_in_yaml:
raise Exception(f"Cannot find any pipeline with name '{pipeline_name}' declared in the YAML file.")
pipeline_config = pipelines_in_yaml[0]
definitions = {} # definitions of each component from the YAML.
for definition in data["components"]:
if overwrite_with_env_variables:
cls._overwrite_with_env_variables(definition)
name = definition.pop("name")
definitions[name] = definition
pipeline = cls(pipeline_type=pipeline_config["type"])
components: dict = {} # instances of component objects.
for node_config in pipeline_config["nodes"]:
name = node_config["name"]
component = cls._load_or_get_component(name=name, definitions=definitions, components=components)
pipeline.add_node(component=component, name=node_config["name"], inputs=node_config.get("inputs", []))
return pipeline
@classmethod
def _load_or_get_component(cls, name: str, definitions: dict, components: dict):
"""
Load a component from the definition or return if component object already present in `components` dict.
:param name: name of the component to load or get.
:param definitions: dict containing definitions of all components retrieved from the YAML.
:param components: dict containing component objects.
"""
if name in components.keys(): # check if component is already loaded.
return components[name]
component_params = definitions[name]["params"]
component_type = definitions[name]["type"]
for key, value in component_params.items():
# Component params can reference to other components. For instance, a Retriever can reference a
# DocumentStore defined in the YAML. All references should be recursively resolved.
if value in definitions.keys(): # check if the param value is a reference to another component.
if value not in components.keys(): # check if the referenced component is already loaded.
cls._load_or_get_component(name=value, definitions=definitions, components=components)
component_params[key] = components[value] # substitute reference (string) with the component object.
instance = BaseComponent.load_from_args(component_type=component_type, **component_params)
components[name] = instance
return instance
@classmethod
def _overwrite_with_env_variables(cls, definition: dict):
"""
Overwrite the YAML configuration with environment variables. For example, to change index name param for an
ElasticsearchDocumentStore, an env variable 'MYDOCSTORE_PARAMS_INDEX=documents-2021' can be set. Note that an
`_` sign must be used to specify nested hierarchical properties.
:param definition: a dictionary containing the YAML definition of a component.
"""
env_prefix = f"{definition['name']}_params_".upper()
for key, value in os.environ.items():
if key.startswith(env_prefix):
param_name = key.replace(env_prefix, "").lower()
definition["params"][param_name] = value
class BaseStandardPipeline(ABC):
pipeline: Pipeline
def add_node(self, component, name: str, inputs: List[str]):
"""
Add a new node to the pipeline.
:param component: The object to be called when the data is passed to the node. It can be a Haystack component
(like Retriever, Reader, or Generator) or a user-defined object that implements a run()
method to process incoming data from predecessor node.
:param name: The name for the node. It must not contain any dots.
:param inputs: A list of inputs to the node. If the predecessor node has a single outgoing edge, just the name
of node is sufficient. For instance, a 'ElasticsearchRetriever' node would always output a single
edge with a list of documents. It can be represented as ["ElasticsearchRetriever"].
In cases when the predecessor node has multiple outputs, e.g., a "QueryClassifier", the output
must be specified explicitly as "QueryClassifier.output_2".
"""
self.pipeline.add_node(component=component, name=name, inputs=inputs)
def get_node(self, name: str):
"""
Get a node from the Pipeline.
:param name: The name of the node.
"""
component = self.pipeline.get_node(name)
return component
def set_node(self, name: str, component):
"""
Set the component for a node in the Pipeline.
:param name: The name of the node.
:param component: The component object to be set at the node.
"""
self.pipeline.set_node(name, component)
def draw(self, path: Path = Path("pipeline.png")):
"""
Create a Graphviz visualization of the pipeline.
:param path: the path to save the image.
"""
self.pipeline.draw(path)
class ExtractiveQAPipeline(BaseStandardPipeline):
def __init__(self, reader: BaseReader, retriever: BaseRetriever):
"""
Initialize a Pipeline for Extractive Question Answering.
:param reader: Reader instance
:param retriever: Retriever instance
"""
self.pipeline = Pipeline()
self.pipeline.add_node(component=retriever, name="Retriever", inputs=["Query"])
self.pipeline.add_node(component=reader, name="Reader", inputs=["Retriever"])
def run(self, query: str, filters: Optional[Dict] = None, top_k_retriever: int = 10, top_k_reader: int = 10):
output = self.pipeline.run(
query=query, filters=filters, top_k_retriever=top_k_retriever, top_k_reader=top_k_reader
)
return output
class DocumentSearchPipeline(BaseStandardPipeline):
def __init__(self, retriever: BaseRetriever):
"""
Initialize a Pipeline for semantic document search.
:param retriever: Retriever instance
"""
self.pipeline = Pipeline()
self.pipeline.add_node(component=retriever, name="Retriever", inputs=["Query"])
def run(self, query: str, filters: Optional[Dict] = None, top_k_retriever: Optional[int] = None):
output = self.pipeline.run(query=query, filters=filters, top_k_retriever=top_k_retriever)
document_dicts = [doc.to_dict() for doc in output["documents"]]
output["documents"] = document_dicts
return output
class GenerativeQAPipeline(BaseStandardPipeline):
def __init__(self, generator: BaseGenerator, retriever: BaseRetriever):
"""
Initialize a Pipeline for Generative Question Answering.
:param generator: Generator instance
:param retriever: Retriever instance
"""
self.pipeline = Pipeline()
self.pipeline.add_node(component=retriever, name="Retriever", inputs=["Query"])
self.pipeline.add_node(component=generator, name="Generator", inputs=["Retriever"])
def run(
self,
query: str,
filters: Optional[Dict] = None,
top_k_retriever: Optional[int] = None,
top_k_generator: Optional[int] = None
):
output = self.pipeline.run(
query=query, filters=filters, top_k_retriever=top_k_retriever, top_k_generator=top_k_generator
)
return output
class SearchSummarizationPipeline(BaseStandardPipeline):
def __init__(self, summarizer: BaseSummarizer, retriever: BaseRetriever):
"""
Initialize a Pipeline that retrieves documents for a query and then summarizes those documents.
:param summarizer: Summarizer instance
:param retriever: Retriever instance
"""
self.pipeline = Pipeline()
self.pipeline.add_node(component=retriever, name="Retriever", inputs=["Query"])
self.pipeline.add_node(component=summarizer, name="Summarizer", inputs=["Retriever"])
def run(
self,
query: str,
filters: Optional[Dict] = None,
top_k_retriever: Optional[int] = None,
generate_single_summary: Optional[bool] = None,
return_in_answer_format: bool = False,
):
"""
:param query: Your search query
:param filters:
:param top_k_retriever: Number of top docs the retriever should pass to the summarizer.
The higher this value, the slower your pipeline.
:param generate_single_summary: Whether to generate single summary from all retrieved docs (True) or one per doc (False).
:param return_in_answer_format: Whether the results should be returned as documents (False) or in the answer format used in other QA pipelines (True).
With the latter, you can use this pipeline as a "drop-in replacement" for other QA pipelines.
"""
output = self.pipeline.run(
query=query, filters=filters, top_k_retriever=top_k_retriever, generate_single_summary=generate_single_summary
)
# Convert to answer format to allow "drop-in replacement" for other QA pipelines
if return_in_answer_format:
results: Dict = {"query": query, "answers": []}
docs = deepcopy(output["documents"])
for doc in docs:
cur_answer = {
"query": query,
"answer": doc.text,
"document_id": doc.id,
"context": doc.meta.pop("context"),
"score": None,
"probability": None,
"offset_start": None,
"offset_end": None,
"meta": doc.meta,
}
results["answers"].append(cur_answer)
else:
results = output
return results
class FAQPipeline(BaseStandardPipeline):
def __init__(self, retriever: BaseRetriever):
"""
Initialize a Pipeline for finding similar FAQs using semantic document search.
:param retriever: Retriever instance
"""
self.pipeline = Pipeline()
self.pipeline.add_node(component=retriever, name="Retriever", inputs=["Query"])
def run(self, query: str, filters: Optional[Dict] = None, top_k_retriever: Optional[int] = None):
output = self.pipeline.run(query=query, filters=filters, top_k_retriever=top_k_retriever)
documents = output["documents"]
results: Dict = {"query": query, "answers": []}
for doc in documents:
# TODO proper calibration of pseudo probabilities
cur_answer = {
"query": doc.text,
"answer": doc.meta["answer"],
"document_id": doc.id,
"context": doc.meta["answer"],
"score": doc.score,
"probability": doc.probability,
"offset_start": 0,
"offset_end": len(doc.meta["answer"]),
"meta": doc.meta,
}
results["answers"].append(cur_answer)
return results
class TranslationWrapperPipeline(BaseStandardPipeline):
"""
Takes an existing search pipeline and adds one "input translation node" after the Query and one
"output translation" node just before returning the results
"""
def __init__(
self,
input_translator: BaseTranslator,
output_translator: BaseTranslator,
pipeline: BaseStandardPipeline
):
"""
Wrap a given `pipeline` with the `input_translator` and `output_translator`.
:param input_translator: A Translator node that shall translate the input query from language A to B
:param output_translator: A Translator node that shall translate the pipeline results from language B to A
:param pipeline: The pipeline object (e.g. ExtractiveQAPipeline) you want to "wrap".
Note that pipelines with split or merge nodes are currently not supported.
"""
self.pipeline = Pipeline()
self.pipeline.add_node(component=input_translator, name="InputTranslator", inputs=["Query"])
graph = pipeline.pipeline.graph
previous_node_name = ["InputTranslator"]
# Traverse in BFS
for node in graph.nodes:
if node == "Query":
continue
# TODO: Do not work properly for Join Node and Answer format
if graph.nodes[node]["inputs"] and len(graph.nodes[node]["inputs"]) > 1:
raise AttributeError("Split and merge nodes are not supported currently")
self.pipeline.add_node(name=node, component=graph.nodes[node]["component"], inputs=previous_node_name)
previous_node_name = [node]
self.pipeline.add_node(component=output_translator, name="OutputTranslator", inputs=previous_node_name)
def run(self, **kwargs):
output = self.pipeline.run(**kwargs)
return output
class RootNode:
outgoing_edges = 1
def run(self, **kwargs):
return kwargs, "output_1"
class JoinDocuments:
"""
A node to join documents outputted by multiple retriever nodes.
The node allows multiple join modes:
* concatenate: combine the documents from multiple nodes. Any duplicate documents are discarded.
* merge: merge scores of documents from multiple nodes. Optionally, each input score can be given a different
`weight` & a `top_k` limit can be set. This mode can also be used for "reranking" retrieved documents.
"""
outgoing_edges = 1
def __init__(
self, join_mode: str = "concatenate", weights: Optional[List[float]] = None, top_k_join: Optional[int] = None
):
"""
:param join_mode: `concatenate` to combine documents from multiple retrievers or `merge` to aggregate scores of
individual documents.
:param weights: A node-wise list(length of list must be equal to the number of input nodes) of weights for
adjusting document scores when using the `merge` join_mode. By default, equal weight is given
to each retriever score. This param is not compatible with the `concatenate` join_mode.
:param top_k_join: Limit documents to top_k based on the resulting scores of the join.
"""
assert join_mode in ["concatenate", "merge"], f"JoinDocuments node does not support '{join_mode}' join_mode."
assert not (
weights is not None and join_mode == "concatenate"
), "Weights are not compatible with 'concatenate' join_mode."
self.join_mode = join_mode
self.weights = weights
self.top_k = top_k_join
def run(self, **kwargs):
inputs = kwargs["inputs"]
if self.join_mode == "concatenate":
document_map = {}
for input_from_node in inputs:
for doc in input_from_node["documents"]:
document_map[doc.id] = doc
elif self.join_mode == "merge":
document_map = {}
if self.weights:
weights = self.weights
else:
weights = [1/len(inputs)] * len(inputs)
for input_from_node, weight in zip(inputs, weights):
for doc in input_from_node["documents"]:
if document_map.get(doc.id): # document already exists; update score
document_map[doc.id].score += doc.score * weight
else: # add the document in map
document_map[doc.id] = deepcopy(doc)
document_map[doc.id].score *= weight
else:
raise Exception(f"Invalid join_mode: {self.join_mode}")
documents = sorted(document_map.values(), key=lambda d: d.score, reverse=True)
if self.top_k:
documents = documents[: self.top_k]
output = {"query": inputs[0]["query"], "documents": documents}
return output, "output_1"
| 44.70068
| 158
| 0.619959
|
f2b64b448eb064f5e46475ac9cb3bc5f6e9dc238
| 5,907
|
py
|
Python
|
utils/scripts/dce_caffe2_model.py
|
jackm321/glow
|
756673683dccbf579b641dade7169f37032bba92
|
[
"Apache-2.0"
] | 2
|
2021-08-02T22:39:33.000Z
|
2021-11-17T11:00:17.000Z
|
utils/scripts/dce_caffe2_model.py
|
bertmaher/glow
|
7677eeaef263de4610a747069980f038f5b90b70
|
[
"Apache-2.0"
] | null | null | null |
utils/scripts/dce_caffe2_model.py
|
bertmaher/glow
|
7677eeaef263de4610a747069980f038f5b90b70
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from caffe2.proto import caffe2_pb2
from google.protobuf import text_format
import argparse
def read_model_from_file(path):
m = caffe2_pb2.NetDef()
with open(path, "rb") as f:
if ".pbtxt" in path:
text_format.Merge(f.read(), m)
else:
m.ParseFromString(f.read())
return m
def write_model_to_file(path, m):
with open(path, "wb") as f:
if ".pbtxt" in path:
f.write(text_format.MessageToString(m))
else:
f.write(m.SerializeToString())
# Perform dead code elimination on predict_net removing any nodes that aren't
# used for producing values in predict_net.external_output. Remove any nodes in
# init_net that produce values that are no longer needed by predict_net.
def dce(init_net, predict_net):
num_predict_net_ops_original = len(predict_net.op)
num_predict_net_inputs_original = len(predict_net.external_input)
# Find the set of tensors used in the computation of the outputs.
live_predict_net_op_outputs = set(predict_net.external_output)
prev_num_live_predict_net_op_outputs = len(live_predict_net_op_outputs)
while True:
for op in predict_net.op:
for output_tensor in op.output:
if output_tensor in live_predict_net_op_outputs:
for input_tensor in op.input:
live_predict_net_op_outputs.add(input_tensor)
num_live_predict_net_op_outputs = len(live_predict_net_op_outputs)
if num_live_predict_net_op_outputs == prev_num_live_predict_net_op_outputs:
break
prev_num_live_predict_net_op_outputs = num_live_predict_net_op_outputs
# Find the ops that are required to compute the tensors used during
# computation of the outputs.
live_predict_net_ops = []
for op in predict_net.op:
for output_tensor in op.output:
if output_tensor in live_predict_net_op_outputs:
live_predict_net_ops.append(op)
# Delete all unused ops in predict_net.
num_predict_net_ops_eliminated = len(predict_net.op) - len(live_predict_net_ops)
del predict_net.op[:]
predict_net.op.extend(live_predict_net_ops)
# Find the set of all used inputs tensors in predict_net.
live_predict_net_op_inputs = set()
for op in predict_net.op:
for input_tensor in op.input:
live_predict_net_op_inputs.add(input_tensor)
# Find the set of used external_inputs.
live_predict_net_external_inputs = set()
for external_input in predict_net.external_input:
if external_input in live_predict_net_op_inputs:
live_predict_net_external_inputs.add(external_input)
# Delete unused external_inputs in predict_net.
num_predict_net_inputs_eliminated = len(predict_net.external_input) - len(live_predict_net_external_inputs)
del predict_net.external_input[:]
predict_net.external_input.extend(live_predict_net_external_inputs)
print("predict_net ops eliminated: {}/{}".format(num_predict_net_ops_eliminated, num_predict_net_ops_original))
print("predict_net external_inputs eliminated: {}/{}".format(num_predict_net_inputs_eliminated, num_predict_net_inputs_original))
# Everything below pertains to removing unused outputs in the init_net,
# if no init net was provided then stop here.
if init_net == None:
return
num_init_net_ops_original = len(init_net.op)
# Find the set of init_net ops with outputs needed by the init_net
live_init_net_ops = []
for op in init_net.op:
for output_tensor in op.output:
if output_tensor in live_predict_net_external_inputs:
live_init_net_ops.append(op)
# Eliminate dead init_net ops
num_init_net_ops_eliminated = len(init_net.op) - len(live_init_net_ops)
del init_net.op[:]
init_net.op.extend(live_init_net_ops)
# Update init_net external_outputs
live_init_net_op_outputs = set()
for op in init_net.op:
for output_tensor in op.output:
live_init_net_op_outputs.add(output_tensor)
live_init_net_external_outputs = set()
for output_tensor in init_net.external_output:
if output_tensor in live_init_net_op_outputs:
live_init_net_external_outputs.add(output_tensor)
del init_net.external_output[:]
init_net.external_output.extend(live_init_net_external_outputs)
print("init_net ops eliminated: {}/{}".format(num_init_net_ops_eliminated, num_init_net_ops_original))
if __name__ == "__main__":
parser = argparse.ArgumentParser("Caffe2 model dead code elimination")
parser.add_argument('--input_init_net_path', type=str)
parser.add_argument('--input_predict_net_path', type=str, required=True)
parser.add_argument('--output_init_net_path', type=str)
parser.add_argument('--output_predict_net_path', type=str, required=True)
args = parser.parse_args()
predict_net = read_model_from_file(args.input_predict_net_path)
init_net = None
if args.input_init_net_path != None:
init_net = read_model_from_file(args.input_init_net_path)
dce(init_net, predict_net)
write_model_to_file(args.output_predict_net_path, predict_net)
if args.output_init_net_path != None:
write_model_to_file(args.output_init_net_path, init_net)
| 40.458904
| 133
| 0.735568
|
5e9fe067977a252899a4349a86250674ec312a4b
| 34,827
|
py
|
Python
|
ocp_resources/resource.py
|
akalenyu/openshift-python-wrapper
|
f403350b32b556ef15480e14f24cd40afcb8bbc5
|
[
"Apache-2.0"
] | null | null | null |
ocp_resources/resource.py
|
akalenyu/openshift-python-wrapper
|
f403350b32b556ef15480e14f24cd40afcb8bbc5
|
[
"Apache-2.0"
] | null | null | null |
ocp_resources/resource.py
|
akalenyu/openshift-python-wrapper
|
f403350b32b556ef15480e14f24cd40afcb8bbc5
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
import os
import re
from distutils.version import Version
import kubernetes
import urllib3
import yaml
from openshift.dynamic import DynamicClient
from openshift.dynamic.exceptions import (
InternalServerError,
NotFoundError,
ServerTimeoutError,
)
from urllib3.exceptions import ProtocolError
from ocp_resources.utils import TimeoutExpiredError, TimeoutSampler
DEFAULT_CLUSTER_RETRY_EXCEPTIONS = {
ConnectionAbortedError: [],
ConnectionResetError: [],
InternalServerError: ["etcdserver: leader changed"],
ServerTimeoutError: [],
}
LOGGER = logging.getLogger(__name__)
TIMEOUT = 240
MAX_SUPPORTED_API_VERSION = "v1"
def _collect_instance_data(directory, resource_object):
with open(os.path.join(directory, f"{resource_object.name}.yaml"), "w") as fd:
fd.write(resource_object.instance.to_str())
def _collect_pod_logs(dyn_client, resource_item, **kwargs):
kube_v1_api = kubernetes.client.CoreV1Api(api_client=dyn_client.client)
return kube_v1_api.read_namespaced_pod_log(
name=resource_item.metadata.name,
namespace=resource_item.metadata.namespace,
**kwargs,
)
def _collect_virt_launcher_data(dyn_client, directory, resource_object):
if resource_object.kind == "VirtualMachineInstance":
for pod in dyn_client.resources.get(kind="Pod").get().items:
pod_name = pod.metadata.name
pod_instance = dyn_client.resources.get(
api_version=pod.apiVersion, kind=pod.kind
).get(name=pod_name, namespace=pod.metadata.namespace)
if pod_name.startswith("virt-launcher"):
with open(os.path.join(directory, f"{pod_name}.log"), "w") as fd:
fd.write(
_collect_pod_logs(
dyn_client=dyn_client,
resource_item=pod,
container="compute",
)
)
with open(os.path.join(directory, f"{pod_name}.yaml"), "w") as fd:
fd.write(pod_instance.to_str())
def _collect_data_volume_data(dyn_client, directory, resource_object):
if resource_object.kind == "DataVolume":
cdi_worker_prefixes = ("importer", "cdi-upload")
for pod in dyn_client.resources.get(kind="Pod").get().items:
pod_name = pod.metadata.name
pod_instance = dyn_client.resources.get(
api_version=pod.apiVersion, kind=pod.kind
).get(name=pod_name, namespace=pod.metadata.namespace)
if pod_name.startswith(cdi_worker_prefixes) or pod_name.endswith(
"source-pod"
):
with open(os.path.join(directory, f"{pod_name}.log"), "w") as fd:
fd.write(
_collect_pod_logs(dyn_client=dyn_client, resource_item=pod)
)
with open(os.path.join(directory, f"{pod_name}.yaml"), "w") as fd:
fd.write(pod_instance.to_str())
def _collect_data(resource_object, dyn_client=None):
dyn_client = (
dyn_client
if dyn_client
else DynamicClient(kubernetes.config.new_client_from_config())
)
directory = os.environ.get("TEST_DIR_LOG")
_collect_instance_data(directory=directory, resource_object=resource_object)
_collect_virt_launcher_data(
dyn_client=dyn_client, directory=directory, resource_object=resource_object
)
_collect_data_volume_data(
dyn_client=dyn_client, directory=directory, resource_object=resource_object
)
def _find_supported_resource(dyn_client, api_group, kind):
results = dyn_client.resources.search(group=api_group, kind=kind)
sorted_results = sorted(
results, key=lambda result: KubeAPIVersion(result.api_version), reverse=True
)
for result in sorted_results:
if KubeAPIVersion(result.api_version) <= KubeAPIVersion(
MAX_SUPPORTED_API_VERSION
):
return result
def _get_api_version(dyn_client, api_group, kind):
# Returns api_group/api_version
res = _find_supported_resource(
dyn_client=dyn_client, api_group=api_group, kind=kind
)
if not res:
log = f"Couldn't find {kind} in {api_group} api group"
LOGGER.warning(log)
raise NotImplementedError(log)
return res.group_version
def sub_resource_level(current_class, owner_class, parent_class):
# return the name of the last class in MRO list that is not one of base
# classes; otherwise return None
for class_iterator in reversed(
list(
class_iterator
for class_iterator in current_class.mro()
if class_iterator not in owner_class.mro()
and issubclass(class_iterator, parent_class)
)
):
return class_iterator.__name__
class KubeAPIVersion(Version):
"""
Implement the Kubernetes API versioning scheme from
https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-versioning
"""
component_re = re.compile(r"(\d+ | [a-z]+)", re.VERBOSE)
def __init__(self, vstring=None):
self.vstring = vstring
self.version = None
super().__init__(vstring=vstring)
def parse(self, vstring):
components = [x for x in self.component_re.split(vstring) if x]
for i, obj in enumerate(components):
try:
components[i] = int(obj)
except ValueError:
pass
errmsg = f"version '{vstring}' does not conform to kubernetes api versioning guidelines"
if (
len(components) not in (2, 4)
or components[0] != "v"
or not isinstance(components[1], int)
):
raise ValueError(errmsg)
if len(components) == 4 and (
components[2] not in ("alpha", "beta") or not isinstance(components[3], int)
):
raise ValueError(errmsg)
self.version = components
def __str__(self):
return self.vstring
def __repr__(self):
return "KubeAPIVersion ('{0}')".format(str(self))
def _cmp(self, other):
if isinstance(other, str):
other = KubeAPIVersion(vstring=other)
myver = self.version
otherver = other.version
for ver in myver, otherver:
if len(ver) == 2:
ver.extend(["zeta", 9999])
if myver == otherver:
return 0
if myver < otherver:
return -1
if myver > otherver:
return 1
class classproperty(object): # noqa: N801
def __init__(self, func):
self.func = func
def __get__(self, obj, owner):
return self.func(owner)
class ValueMismatch(Exception):
"""
Raises when value doesn't match the class value
"""
class Resource(object):
"""
Base class for API resources
"""
api_group = None
api_version = None
singular_name = None
class Status:
SUCCEEDED = "Succeeded"
FAILED = "Failed"
DELETING = "Deleting"
DEPLOYED = "Deployed"
PENDING = "Pending"
COMPLETED = "Completed"
RUNNING = "Running"
TERMINATING = "Terminating"
class Condition:
UPGRADEABLE = "Upgradeable"
AVAILABLE = "Available"
DEGRADED = "Degraded"
PROGRESSING = "Progressing"
CREATED = "Created"
RECONCILE_COMPLETE = "ReconcileComplete"
READY = "Ready"
class Status:
TRUE = "True"
FALSE = "False"
UNKNOWN = "Unknown"
class Phase:
INSTALL_READY = "InstallReady"
SUCCEEDED = "Succeeded"
class Reason:
ALL_REQUIREMENTS_MET = "AllRequirementsMet"
INSTALL_SUCCEEDED = "InstallSucceeded"
class Interface:
class State:
UP = "up"
DOWN = "down"
ABSENT = "absent"
class ApiGroup:
ADMISSIONREGISTRATION_K8S_IO = "admissionregistration.k8s.io"
APIEXTENSIONS_K8S_IO = "apiextensions.k8s.io"
APIREGISTRATION_K8S_IO = "apiregistration.k8s.io"
APP_KUBERNETES_IO = "app.kubernetes.io"
APPS = "apps"
CDI_KUBEVIRT_IO = "cdi.kubevirt.io"
CONFIG_OPENSHIFT_IO = "config.openshift.io"
CONSOLE_OPENSHIFT_IO = "console.openshift.io"
FORKLIFT_KONVEYOR_IO = "forklift.konveyor.io"
HCO_KUBEVIRT_IO = "hco.kubevirt.io"
HOSTPATHPROVISIONER_KUBEVIRT_IO = "hostpathprovisioner.kubevirt.io"
IMAGE_OPENSHIFT_IO = "image.openshift.io"
K8S_CNI_CNCF_IO = "k8s.cni.cncf.io"
K8S_V1_CNI_CNCF_IO = "k8s.v1.cni.cncf.io"
KUBERNETES_IO = "kubernetes.io"
KUBEVIRT_IO = "kubevirt.io"
KUBEVIRT_KUBEVIRT_IO = "kubevirt.kubevirt.io"
LITMUS_IO = "litmuschaos.io"
MACHINE_OPENSHIFT_IO = "machine.openshift.io"
MACHINECONFIGURATION_OPENSHIFT_IO = "machineconfiguration.openshift.io"
MAISTRA_IO = "maistra.io"
MONITORING_COREOS_COM = "monitoring.coreos.com"
NETWORKADDONSOPERATOR_NETWORK_KUBEVIRT_IO = (
"networkaddonsoperator.network.kubevirt.io"
)
NETWORKING_ISTIO_IO = "networking.istio.io"
NETWORKING_K8S_IO = "networking.k8s.io"
NMSTATE_IO = "nmstate.io"
NODEMAINTENANCE_KUBEVIRT_IO = "nodemaintenance.kubevirt.io"
OPERATOR_OPENSHIFT_IO = "operator.openshift.io"
OPERATORS_COREOS_COM = "operators.coreos.com"
OS_TEMPLATE_KUBEVIRT_IO = "os.template.kubevirt.io"
PACKAGES_OPERATORS_COREOS_COM = "packages.operators.coreos.com"
PROJECT_OPENSHIFT_IO = "project.openshift.io"
RBAC_AUTHORIZATION_K8S_IO = "rbac.authorization.k8s.io"
RIPSAW_CLOUDBULLDOZER_IO = "ripsaw.cloudbulldozer.io"
ROUTE_OPENSHIFT_IO = "route.openshift.io"
SCHEDULING_K8S_IO = "scheduling.k8s.io"
SECURITY_OPENSHIFT_IO = "security.openshift.io"
SNAPSHOT_STORAGE_K8S_IO = "snapshot.storage.k8s.io"
SNAPSHOT_KUBEVIRT_IO = "snapshot.kubevirt.io"
SRIOVNETWORK_OPENSHIFT_IO = "sriovnetwork.openshift.io"
SSP_KUBEVIRT_IO = "ssp.kubevirt.io"
STORAGE_K8S_IO = "storage.k8s.io"
STORAGECLASS_KUBERNETES_IO = "storageclass.kubernetes.io"
TEMPLATE_KUBEVIRT_IO = "template.kubevirt.io"
TEMPLATE_OPENSHIFT_IO = "template.openshift.io"
UPLOAD_CDI_KUBEVIRT_IO = "upload.cdi.kubevirt.io"
V2V_KUBEVIRT_IO = "v2v.kubevirt.io"
VM_KUBEVIRT_IO = "vm.kubevirt.io"
class ApiVersion:
V1 = "v1"
V1BETA1 = "v1beta1"
V1ALPHA1 = "v1alpha1"
V1ALPHA3 = "v1alpha3"
def __init__(
self,
name=None,
client=None,
teardown=True,
timeout=TIMEOUT,
privileged_client=None,
yaml_file=None,
):
"""
Create a API resource
Args:
name (str): Resource name
"""
if not self.api_group and not self.api_version:
raise NotImplementedError(
"Subclasses of Resource require self.api_group or self.api_version to be defined"
)
self.namespace = None
self.name = name
self.client = client
self.privileged_client = privileged_client
self.yaml_file = yaml_file
self.resource_dict = None # Filled in case yaml_file is not None
if not (self.name or self.yaml_file):
raise ValueError("name or yaml file is required")
if not self.client:
try:
self.client = DynamicClient(
client=kubernetes.config.new_client_from_config()
)
except (
kubernetes.config.ConfigException,
urllib3.exceptions.MaxRetryError,
):
LOGGER.error(
"You need to be logged into a cluster or have $KUBECONFIG env configured"
)
raise
if not self.api_version:
self.api_version = _get_api_version(
dyn_client=self.client, api_group=self.api_group, kind=self.kind
)
self.teardown = teardown
self.timeout = timeout
@classproperty
def kind(cls): # noqa: N805
return sub_resource_level(cls, NamespacedResource, Resource)
def _base_body(self):
"""
Generate resource dict from yaml if self.yaml_file else return base resource dict.
Returns:
dict: Resource dict.
"""
if self.yaml_file:
with open(self.yaml_file, "r") as stream:
self.resource_dict = yaml.safe_load(stream=stream.read())
self.name = self.resource_dict["metadata"]["name"]
return self.resource_dict
return {
"apiVersion": self.api_version,
"kind": self.kind,
"metadata": {"name": self.name},
}
def to_dict(self):
"""
Generate intended dict representation of the resource.
"""
return self._base_body()
def __enter__(self):
return self.deploy()
def __exit__(self, exception_type, exception_value, traceback):
if self.teardown:
self.clean_up()
def deploy(self):
self.create()
return self
def clean_up(self):
if os.environ.get("CNV_TEST_COLLECT_LOGS", "0") == "1":
try:
_collect_data(resource_object=self)
except Exception as exception_:
LOGGER.warning(exception_)
data = self.to_dict()
LOGGER.info(f"Deleting {data}")
self.delete(wait=True, timeout=self.timeout)
@classmethod
def _prepare_resources(cls, dyn_client, singular_name, *args, **kwargs):
if not cls.api_version:
cls.api_version = _get_api_version(
dyn_client=dyn_client, api_group=cls.api_group, kind=cls.kind
)
get_kwargs = {"singular_name": singular_name} if singular_name else {}
return dyn_client.resources.get(
kind=cls.kind, api_version=cls.api_version, **get_kwargs
).get(*args, **kwargs)
def api(self, **kwargs):
"""
Get resource API
Keyword Args:
pretty
_continue
include_uninitialized
field_selector
label_selector
limit
resource_version
timeout_seconds
watch
async_req
Returns:
Resource: Resource object.
"""
if self.singular_name:
kwargs["singular_name"] = self.singular_name
return self.client.resources.get(
api_version=self.api_version, kind=self.kind, **kwargs
)
def wait(self, timeout=TIMEOUT, sleep=1):
"""
Wait for resource
Args:
timeout (int): Time to wait for the resource.
sleep (int): Time to wait between retries
Raises:
TimeoutExpiredError: If resource not exists.
"""
LOGGER.info(f"Wait until {self.kind} {self.name} is created")
samples = TimeoutSampler(
wait_timeout=timeout,
sleep=sleep,
exceptions=(ProtocolError, NotFoundError),
func=lambda: self.exists,
)
for sample in samples:
if sample:
return
def wait_deleted(self, timeout=TIMEOUT):
"""
Wait until resource is deleted
Args:
timeout (int): Time to wait for the resource.
Raises:
TimeoutExpiredError: If resource still exists.
"""
LOGGER.info(f"Wait until {self.kind} {self.name} is deleted")
return self.client_wait_deleted(timeout=timeout)
@property
def exists(self):
"""
Whether self exists on the server
"""
try:
return self.instance
except NotFoundError:
return None
def client_wait_deleted(self, timeout):
"""
client-side Wait until resource is deleted
Args:
timeout (int): Time to wait for the resource.
Raises:
TimeoutExpiredError: If resource still exists.
"""
samples = TimeoutSampler(
wait_timeout=timeout, sleep=1, func=lambda: self.exists
)
for sample in samples:
if not sample:
return
def wait_for_status(self, status, timeout=TIMEOUT, stop_status=None, sleep=1):
"""
Wait for resource to be in status
Args:
status (str): Expected status.
timeout (int): Time to wait for the resource.
stop_status (str): Status which should stop the wait and failed.
Raises:
TimeoutExpiredError: If resource in not in desire status.
"""
stop_status = stop_status if stop_status else self.Status.FAILED
LOGGER.info(f"Wait for {self.kind} {self.name} status to be {status}")
samples = TimeoutSampler(
wait_timeout=timeout,
sleep=sleep,
exceptions=ProtocolError,
func=self.api().get,
field_selector=f"metadata.name=={self.name}",
namespace=self.namespace,
)
current_status = None
try:
for sample in samples:
if sample.items:
sample_status = sample.items[0].status
if sample_status:
current_status = sample_status.phase
if current_status == status:
return
if current_status == stop_status:
raise TimeoutExpiredError(
f"Status of {self.kind} {self.name} is {current_status}"
)
except TimeoutExpiredError:
if current_status:
LOGGER.error(f"Status of {self.kind} {self.name} is {current_status}")
raise
def create(self, body=None, wait=False):
"""
Create resource.
Args:
body (dict): Resource data to create.
wait (bool) : True to wait for resource status.
Returns:
bool: True if create succeeded, False otherwise.
Raises:
ValueMismatch: When body value doesn't match class value
"""
data = self.to_dict()
if body:
kind = body["kind"]
name = body.get("name")
api_version = body["apiVersion"]
if kind != self.kind:
ValueMismatch(f"{kind} != {self.kind}")
if name and name != self.name:
ValueMismatch(f"{name} != {self.name}")
if api_version != self.api_version:
ValueMismatch(f"{api_version} != {self.api_version}")
data.update(body)
LOGGER.info(f"Posting {data}")
LOGGER.info(f"Create {self.kind} {self.name}")
res = self.api().create(body=data, namespace=self.namespace)
if wait and res:
return self.wait()
return res
def delete(self, wait=False, timeout=TIMEOUT):
resource_list = self.api()
try:
res = resource_list.delete(name=self.name, namespace=self.namespace)
except NotFoundError:
return False
LOGGER.info(f"Delete {self.kind} {self.name}")
if wait and res:
return self.wait_deleted(timeout=timeout)
return res
@property
def status(self):
"""
Get resource status
Status: Running, Scheduling, Pending, Unknown, CrashLoopBackOff
Returns:
str: Status
"""
LOGGER.info(f"Get {self.kind} {self.name} status")
return self.instance.status.phase
def update(self, resource_dict):
"""
Update resource with resource dict
Args:
resource_dict: Resource dictionary
"""
LOGGER.info(f"Update {self.kind} {self.name}: {resource_dict}")
self.api().patch(
body=resource_dict,
namespace=self.namespace,
content_type="application/merge-patch+json",
)
def update_replace(self, resource_dict):
"""
Replace resource metadata.
Use this to remove existing field. (update() will only update existing fields)
"""
LOGGER.info(f"Replace {self.kind} {self.name}: {resource_dict}")
self.api().replace(body=resource_dict, name=self.name, namespace=self.namespace)
@staticmethod
def _retry_cluster_exceptions(func):
sampler = TimeoutSampler(
wait_timeout=10,
sleep=1,
func=func,
exceptions_dict=DEFAULT_CLUSTER_RETRY_EXCEPTIONS,
print_log=False,
)
for sample in sampler:
return sample
@classmethod
def get(cls, dyn_client, singular_name=None, *args, **kwargs):
"""
Get resources
Args:
dyn_client (DynamicClient): Open connection to remote cluster
singular_name (str): Resource kind (in lowercase), in use where we have multiple matches for resource
Returns:
generator: Generator of Resources of cls.kind
"""
def _get():
_resources = cls._prepare_resources(
dyn_client=dyn_client, singular_name=singular_name, *args, **kwargs
)
try:
for resource_field in _resources.items:
yield cls(client=dyn_client, name=resource_field.metadata.name)
except TypeError:
yield cls(client=dyn_client, name=_resources.metadata.name)
return Resource._retry_cluster_exceptions(func=_get)
@property
def instance(self):
"""
Get resource instance
Returns:
openshift.dynamic.client.ResourceInstance
"""
def _instance():
return self.api().get(name=self.name)
return self._retry_cluster_exceptions(func=_instance)
@property
def labels(self):
"""
Method to get dict of labels for this resource
Returns:
labels(dict): dict labels
"""
return self.instance["metadata"]["labels"]
def wait_for_condition(self, condition, status, timeout=300):
"""
Wait for Pod condition to be in desire status.
Args:
condition (str): Condition to query.
status (str): Expected condition status.
timeout (int): Time to wait for the resource.
Raises:
TimeoutExpiredError: If Pod condition in not in desire status.
"""
LOGGER.info(
f"Wait for {self.kind}/{self.name}'s '{condition}' condition to be '{status}'"
)
samples = TimeoutSampler(
wait_timeout=timeout,
sleep=1,
exceptions=ProtocolError,
func=self.api().get,
field_selector=f"metadata.name=={self.name}",
namespace=self.namespace,
)
for sample in samples:
if (
sample.items
and sample.items[0].get("status")
and sample.items[0].status.get("conditions")
):
sample_conditions = sample.items[0].status.conditions
if sample_conditions:
for cond in sample_conditions:
if cond.type == condition and cond.status == status:
return
def api_request(self, method, action, url, **params):
"""
Handle API requests to resource.
Args:
method (str): Request method (GET/PUT etc.).
action (str): Action to perform (stop/start/guestosinfo etc.).
url (str): URL of resource.
Returns:
data(dict): response data
"""
client = self.privileged_client or self.client
response = client.client.request(
method=method,
url=f"{url}/{action}",
headers=self.client.configuration.api_key,
**params,
)
try:
return json.loads(response.data)
except json.decoder.JSONDecodeError:
return response.data
def wait_for_conditions(self):
samples = TimeoutSampler(
wait_timeout=30, sleep=1, func=lambda: self.instance.status.conditions
)
for sample in samples:
if sample:
return
class NamespacedResource(Resource):
"""
Namespaced object, inherited from Resource.
"""
def __init__(
self,
name=None,
namespace=None,
client=None,
teardown=True,
timeout=TIMEOUT,
privileged_client=None,
yaml_file=None,
):
super().__init__(
name=name,
client=client,
teardown=teardown,
timeout=timeout,
privileged_client=privileged_client,
yaml_file=yaml_file,
)
self.namespace = namespace
if not (self.name and self.namespace) and not self.yaml_file:
raise ValueError("name and namespace or yaml file is required")
@classmethod
def get(cls, dyn_client, singular_name=None, *args, **kwargs):
"""
Get resources
Args:
dyn_client (DynamicClient): Open connection to remote cluster
singular_name (str): Resource kind (in lowercase), in use where we have multiple matches for resource
Returns:
generator: Generator of Resources of cls.kind
"""
_resources = cls._prepare_resources(
dyn_client=dyn_client, singular_name=singular_name, *args, **kwargs
)
try:
for resource_field in _resources.items:
yield cls(
client=dyn_client,
name=resource_field.metadata.name,
namespace=resource_field.metadata.namespace,
)
except TypeError:
yield cls(
client=dyn_client,
name=_resources.metadata.name,
namespace=_resources.metadata.namespace,
)
@property
def instance(self):
"""
Get resource instance
Returns:
openshift.dynamic.client.ResourceInstance
"""
return self.api().get(name=self.name, namespace=self.namespace)
def _base_body(self):
res = super(NamespacedResource, self)._base_body()
if self.yaml_file:
self.namespace = self.resource_dict["metadata"].get(
"namespace", self.namespace
)
if not self.namespace:
raise ValueError("Namespace must be passed or specified in the YAML file.")
if not self.yaml_file:
res["metadata"]["namespace"] = self.namespace
return res
def to_dict(self):
return self._base_body()
class ResourceEditor(object):
def __init__(self, patches, action="update", user_backups=None):
"""
Args:
patches (dict): {<Resource object>: <yaml patch as dict>}
e.g. {<Resource object>:
{'metadata': {'labels': {'label1': 'true'}}}
Allows for temporary edits to cluster resources for tests. During
__enter__ user-specified patches (see args) are applied and old values
are backed up, and during __exit__ these backups are used to reverse
all changes made.
Flow:
1) apply patches
2) automation runs
3) edits made to resources are reversed
May also be used without being treated as a context manager by
calling the methods update() and restore() after instantiation.
*** the DynamicClient object used to get the resources must not be
using an unprivileged_user; use default_client or similar instead.***
"""
self._patches = patches
self.action = action
self.user_backups = user_backups
self._backups = {}
@property
def backups(self):
"""Returns a dict {<Resource object>: <backup_as_dict>}
The backup dict kept for each resource edited"""
return self._backups
@property
def patches(self):
"""Returns the patches dict provided in the constructor"""
return self._patches
def update(self, backup_resources=False):
"""Prepares backup dicts (where necessary) and applies patches"""
# prepare update dicts and backups
resource_to_patch = []
if backup_resources:
LOGGER.info("ResourceEdit: Backing up old data")
if self.user_backups:
resource_to_patch = self._patches
self._backups = self.user_backups
else:
for resource, update in self._patches.items():
namespace = None
# prepare backup
try:
original_resource_dict = resource.instance.to_dict()
except NotFoundError:
# Some resource cannot be found by name.
# happens in 'ServiceMonitor' resource.
original_resource_dict = list(
resource.get(
dyn_client=resource.client,
field_selector=f"metadata.name={resource.name}",
)
)[0].to_dict()
namespace = update.get("metadata", {}).get("namespace")
backup = self._create_backup(
original=original_resource_dict, patch=update
)
if namespace:
# Add namespace to metadata for restore.
backup["metadata"]["namespace"] = namespace
# no need to back up if no changes have been made
# if action is 'replace' we need to update even if no backup (replace update can be empty )
if backup or self.action == "replace":
resource_to_patch.append(resource)
self._backups[resource] = backup
else:
LOGGER.warning(
f"ResourceEdit: no diff found in patch for "
f"{resource.name} -- skipping"
)
if not resource_to_patch:
return
else:
resource_to_patch = self._patches
patches_to_apply = {
resource: self._patches[resource] for resource in resource_to_patch
}
# apply changes
self._apply_patches(
patches=patches_to_apply, action_text="Updating", action=self.action
)
def restore(self):
self._apply_patches(
patches=self._backups, action_text="Restoring", action=self.action
)
def __enter__(self):
self.update(backup_resources=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# restore backups
self.restore()
@staticmethod
def _create_backup(original, patch):
"""
Args:
original (dict*): source of values to back up if necessary
patch (dict*): 'new' values; keys needn't necessarily all be
contained in original
Returns a dict containing the fields in original that are different
from update. Performs the
Places None for fields in update that don't appear in
original (because that's how the API knows to remove those fields from
the yaml).
* the first call will be with both of these arguments as dicts but
this will not necessarily be the case during recursion"""
# when both are dicts, get the diff (recursively if need be)
if isinstance(original, dict) and isinstance(patch, dict):
diff_dict = {}
for key, value in patch.items():
if key not in original:
diff_dict[key] = None
continue
# recursive call
key_diff = ResourceEditor._create_backup(
original=original[key], patch=value
)
if key_diff is not None:
diff_dict[key] = key_diff
return diff_dict
# for one or more non-dict values, just compare them
if patch != original:
return original
else:
# this return value will be received by key_diff above
return None
@staticmethod
def _apply_patches(patches, action_text, action):
"""
Updates provided Resource objects with provided yaml patches
Args:
patches (dict): {<Resource object>: <yaml patch as dict>}
action_text (str):
"ResourceEdit <action_text> for resource <resource name>"
will be printed for each resource; see below
"""
for resource, patch in patches.items():
LOGGER.info(
f"ResourceEdits: {action_text} data for "
f"resource {resource.kind} {resource.name}"
)
# add name to patch
if "metadata" not in patch:
patch["metadata"] = {}
# the api requires this field to be present in a yaml patch for
# some resource kinds even if it is not changed
if "name" not in patch["metadata"]:
patch["metadata"]["name"] = resource.name
if action == "update":
resource.update(resource_dict=patch) # update the resource
if action == "replace":
if "metadata" not in patch:
patch["metadata"] = {}
patch["metadata"]["name"] = resource.name
patch["metadata"]["namespace"] = resource.namespace
patch["metadata"][
"resourceVersion"
] = resource.instance.metadata.resourceVersion
patch["kind"] = resource.kind
patch["apiVersion"] = resource.api_version
resource.update_replace(
resource_dict=patch
) # replace the resource metadata
| 32.824694
| 113
| 0.579148
|
1a7dede70d7b426a24d562cedb0ca8c823ef1224
| 3,197
|
py
|
Python
|
roulette/evaluation/utils.py
|
miararoy/bliz
|
0d197b6790ccfa0c71682abf551aa4df83a9b589
|
[
"MIT"
] | null | null | null |
roulette/evaluation/utils.py
|
miararoy/bliz
|
0d197b6790ccfa0c71682abf551aa4df83a9b589
|
[
"MIT"
] | null | null | null |
roulette/evaluation/utils.py
|
miararoy/bliz
|
0d197b6790ccfa0c71682abf551aa4df83a9b589
|
[
"MIT"
] | null | null | null |
import numpy as np
def validate_multiple_lists_length(*lists) -> bool:
"""Validates that a list of lists is of the same length
Args:
lists(list): a list of lists to be checked
Retuens:
b(bool): True if all list of the same length, False else.
"""
list_len = -1
for l in lists:
try:
iter(l)
except BaseException:
return False
if list_len == -1: # first list
list_len = len(l)
else:
if list_len != len(l):
return False
return True
def close_enough(a, b, precision=3) -> bool:
"""checks weather to Array-like are close enough to each other
Args:
a: first argument
b: second argument
precision(int): the decimal level of accuracy, 3 == 0.001
Returns:
out(bool): True, if a and b are close enough on precision, False else.
"""
def _check_precision(x, y):
if round(x, precision) == round(y, precision):
return True
else:
return False
try:
if a == b:
return True
except ValueError: # comparing numpy.ndarray for example
pass
try:
iter(a)
iter(b)
except BaseException: # objects are not iterable == scalar
return _check_precision(a, b)
out = True
for i, j in zip(
a,
b): # if we got here we need to iterate over all args in the array
out = out and _check_precision(i, j)
return out
def _sample_to_bin(a, bins):
"""return the bin number of each sample
Args:
a: list of array like, same length
bins: list of bin edges
Retrurns:
l: list same length as 'a' containing the bin number instead of values
"""
bins_list = []
for x in a:
x_bin, _ = np.histogram(x, bins)
bins_list.append(x_bin.argmax())
return bins_list
def samples_to_bin_numbers(*lists, bins):
"""return the bin number of each sample
Args:
lists: list of array like, same length
bins: list of bin edges
Raises:
ValueError: if lists not of same length
Retruns:
lists_out: list same length as *lists containing the bin number instead of values
"""
if validate_multiple_lists_length(lists):
lists_out = []
for l in lists:
lists_out.append(_sample_to_bin(l, bins))
return tuple(lists_out)
else:
raise ValueError("lists should all have the same length!")
def parse_ndarray_as_float_list(arr: np.ndarray) -> list:
"""returns a list of python floats rather than numpy.ndarray of float32
Args:
arr(numpy.ndarray): an array of float32
Returns:
lst(list): list of float
"""
return arr.tolist()
def is_binary(a) -> bool:
if len(np.asarray(a).shape) >= 1:
return False
if (np.bincount(a) == 2
and (np.max(a) == 1 or np.max(a) == 0)
and (np.min(a) == 0 or np.min(a) == 1)):
return True
else:
return False
__all__ = [
"samples_to_bin_numbers", "_sample_to_bin", "close_enough",
"validate_multiple_lists_length"
]
| 24.976563
| 89
| 0.587426
|
e5942f9713961c1a2f927ed1e5be000437de758b
| 10,906
|
py
|
Python
|
homeassistant/components/xiaomi_aqara.py
|
mfrueh/home-assistant
|
5d64628b5bf4713016883282fd54de9c7d5089d0
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/xiaomi_aqara.py
|
mfrueh/home-assistant
|
5d64628b5bf4713016883282fd54de9c7d5089d0
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/xiaomi_aqara.py
|
mfrueh/home-assistant
|
5d64628b5bf4713016883282fd54de9c7d5089d0
|
[
"Apache-2.0"
] | null | null | null |
"""
Support for Xiaomi Gateways.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/xiaomi_aqara/
"""
import asyncio
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.discovery import SERVICE_XIAOMI_GW
from homeassistant.const import (
ATTR_BATTERY_LEVEL, CONF_HOST, CONF_MAC, CONF_PORT,
EVENT_HOMEASSISTANT_STOP)
from homeassistant.core import callback
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import utcnow
from homeassistant.util import slugify
REQUIREMENTS = ['PyXiaomiGateway==0.8.1']
_LOGGER = logging.getLogger(__name__)
ATTR_GW_MAC = 'gw_mac'
ATTR_RINGTONE_ID = 'ringtone_id'
ATTR_RINGTONE_VOL = 'ringtone_vol'
ATTR_DEVICE_ID = 'device_id'
CONF_DISCOVERY_RETRY = 'discovery_retry'
CONF_GATEWAYS = 'gateways'
CONF_INTERFACE = 'interface'
CONF_KEY = 'key'
DOMAIN = 'xiaomi_aqara'
PY_XIAOMI_GATEWAY = "xiaomi_gw"
TIME_TILL_UNAVAILABLE = timedelta(minutes=150)
SERVICE_PLAY_RINGTONE = 'play_ringtone'
SERVICE_STOP_RINGTONE = 'stop_ringtone'
SERVICE_ADD_DEVICE = 'add_device'
SERVICE_REMOVE_DEVICE = 'remove_device'
GW_MAC = vol.All(
cv.string,
lambda value: value.replace(':', '').lower(),
vol.Length(min=12, max=12)
)
SERVICE_SCHEMA_PLAY_RINGTONE = vol.Schema({
vol.Required(ATTR_RINGTONE_ID):
vol.All(vol.Coerce(int), vol.NotIn([9, 14, 15, 16, 17, 18, 19])),
vol.Optional(ATTR_RINGTONE_VOL):
vol.All(vol.Coerce(int), vol.Clamp(min=0, max=100))
})
SERVICE_SCHEMA_REMOVE_DEVICE = vol.Schema({
vol.Required(ATTR_DEVICE_ID):
vol.All(cv.string, vol.Length(min=14, max=14))
})
GATEWAY_CONFIG = vol.Schema({
vol.Optional(CONF_MAC, default=None): vol.Any(GW_MAC, None),
vol.Optional(CONF_KEY):
vol.All(cv.string, vol.Length(min=16, max=16)),
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=9898): cv.port,
})
def _fix_conf_defaults(config):
"""Update some configuration defaults."""
config['sid'] = config.pop(CONF_MAC, None)
if config.get(CONF_KEY) is None:
_LOGGER.warning(
'Key is not provided for gateway %s. Controlling the gateway '
'will not be possible', config['sid'])
if config.get(CONF_HOST) is None:
config.pop(CONF_PORT)
return config
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_GATEWAYS, default={}):
vol.All(cv.ensure_list, [GATEWAY_CONFIG], [_fix_conf_defaults]),
vol.Optional(CONF_INTERFACE, default='any'): cv.string,
vol.Optional(CONF_DISCOVERY_RETRY, default=3): cv.positive_int
})
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the Xiaomi component."""
gateways = []
interface = 'any'
discovery_retry = 3
if DOMAIN in config:
gateways = config[DOMAIN][CONF_GATEWAYS]
interface = config[DOMAIN][CONF_INTERFACE]
discovery_retry = config[DOMAIN][CONF_DISCOVERY_RETRY]
@asyncio.coroutine
def xiaomi_gw_discovered(service, discovery_info):
"""Perform action when Xiaomi Gateway device(s) has been found."""
# We don't need to do anything here, the purpose of Home Assistant's
# discovery service is to just trigger loading of this
# component, and then its own discovery process kicks in.
discovery.listen(hass, SERVICE_XIAOMI_GW, xiaomi_gw_discovered)
from xiaomi_gateway import XiaomiGatewayDiscovery
xiaomi = hass.data[PY_XIAOMI_GATEWAY] = XiaomiGatewayDiscovery(
hass.add_job, gateways, interface)
_LOGGER.debug("Expecting %s gateways", len(gateways))
for k in range(discovery_retry):
_LOGGER.info("Discovering Xiaomi Gateways (Try %s)", k + 1)
xiaomi.discover_gateways()
if len(xiaomi.gateways) >= len(gateways):
break
if not xiaomi.gateways:
_LOGGER.error("No gateway discovered")
return False
xiaomi.listen()
_LOGGER.debug("Gateways discovered. Listening for broadcasts")
for component in ['binary_sensor', 'sensor', 'switch', 'light', 'cover']:
discovery.load_platform(hass, component, DOMAIN, {}, config)
def stop_xiaomi(event):
"""Stop Xiaomi Socket."""
_LOGGER.info("Shutting down Xiaomi Hub")
xiaomi.stop_listen()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_xiaomi)
def play_ringtone_service(call):
"""Service to play ringtone through Gateway."""
ring_id = call.data.get(ATTR_RINGTONE_ID)
gateway = call.data.get(ATTR_GW_MAC)
kwargs = {'mid': ring_id}
ring_vol = call.data.get(ATTR_RINGTONE_VOL)
if ring_vol is not None:
kwargs['vol'] = ring_vol
gateway.write_to_hub(gateway.sid, **kwargs)
def stop_ringtone_service(call):
"""Service to stop playing ringtone on Gateway."""
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, mid=10000)
def add_device_service(call):
"""Service to add a new sub-device within the next 30 seconds."""
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, join_permission='yes')
hass.components.persistent_notification.async_create(
'Join permission enabled for 30 seconds! '
'Please press the pairing button of the new device once.',
title='Xiaomi Aqara Gateway')
def remove_device_service(call):
"""Service to remove a sub-device from the gateway."""
device_id = call.data.get(ATTR_DEVICE_ID)
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, remove_device=device_id)
gateway_only_schema = _add_gateway_to_schema(xiaomi, vol.Schema({}))
hass.services.async_register(
DOMAIN, SERVICE_PLAY_RINGTONE, play_ringtone_service,
schema=_add_gateway_to_schema(xiaomi, SERVICE_SCHEMA_PLAY_RINGTONE))
hass.services.async_register(
DOMAIN, SERVICE_STOP_RINGTONE, stop_ringtone_service,
schema=gateway_only_schema)
hass.services.async_register(
DOMAIN, SERVICE_ADD_DEVICE, add_device_service,
schema=gateway_only_schema)
hass.services.async_register(
DOMAIN, SERVICE_REMOVE_DEVICE, remove_device_service,
schema=_add_gateway_to_schema(xiaomi, SERVICE_SCHEMA_REMOVE_DEVICE))
return True
class XiaomiDevice(Entity):
"""Representation a base Xiaomi device."""
def __init__(self, device, device_type, xiaomi_hub):
"""Initialize the Xiaomi device."""
self._state = None
self._is_available = True
self._sid = device['sid']
self._name = '{}_{}'.format(device_type, self._sid)
self._type = device_type
self._write_to_hub = xiaomi_hub.write_to_hub
self._get_from_hub = xiaomi_hub.get_from_hub
self._device_state_attributes = {}
self._remove_unavailability_tracker = None
xiaomi_hub.callbacks[self._sid].append(self._add_push_data_job)
self.parse_data(device['data'], device['raw_data'])
self.parse_voltage(device['data'])
if hasattr(self, '_data_key') \
and self._data_key: # pylint: disable=no-member
self._unique_id = slugify("{}-{}".format(
self._data_key, # pylint: disable=no-member
self._sid))
else:
self._unique_id = slugify("{}-{}".format(self._type, self._sid))
def _add_push_data_job(self, *args):
self.hass.add_job(self.push_data, *args)
@asyncio.coroutine
def async_added_to_hass(self):
"""Start unavailability tracking."""
self._async_track_unavailable()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self) -> str:
"""Return an unique ID."""
return self._unique_id
@property
def available(self):
"""Return True if entity is available."""
return self._is_available
@property
def should_poll(self):
"""Return the polling state. No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._device_state_attributes
@callback
def _async_set_unavailable(self, now):
"""Set state to UNAVAILABLE."""
self._remove_unavailability_tracker = None
self._is_available = False
self.async_schedule_update_ha_state()
@callback
def _async_track_unavailable(self):
if self._remove_unavailability_tracker:
self._remove_unavailability_tracker()
self._remove_unavailability_tracker = async_track_point_in_utc_time(
self.hass, self._async_set_unavailable,
utcnow() + TIME_TILL_UNAVAILABLE)
if not self._is_available:
self._is_available = True
return True
return False
@callback
def push_data(self, data, raw_data):
"""Push from Hub."""
_LOGGER.debug("PUSH >> %s: %s", self, data)
was_unavailable = self._async_track_unavailable()
is_data = self.parse_data(data, raw_data)
is_voltage = self.parse_voltage(data)
if is_data or is_voltage or was_unavailable:
self.async_schedule_update_ha_state()
def parse_voltage(self, data):
"""Parse battery level data sent by gateway."""
if 'voltage' not in data:
return False
max_volt = 3300
min_volt = 2800
voltage = data['voltage']
voltage = min(voltage, max_volt)
voltage = max(voltage, min_volt)
percent = ((voltage - min_volt) / (max_volt - min_volt)) * 100
self._device_state_attributes[ATTR_BATTERY_LEVEL] = round(percent, 1)
return True
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
raise NotImplementedError()
def _add_gateway_to_schema(xiaomi, schema):
"""Extend a voluptuous schema with a gateway validator."""
def gateway(sid):
"""Convert sid to a gateway."""
sid = str(sid).replace(':', '').lower()
for gateway in xiaomi.gateways.values():
if gateway.sid == sid:
return gateway
raise vol.Invalid('Unknown gateway sid {}'.format(sid))
gateways = list(xiaomi.gateways.values())
kwargs = {}
# If the user has only 1 gateway, make it the default for services.
if len(gateways) == 1:
kwargs['default'] = gateways[0]
return schema.extend({
vol.Required(ATTR_GW_MAC, **kwargs): gateway
})
| 32.849398
| 77
| 0.673941
|
345ddf1f426831d67b76b734466164c048c9e3a3
| 4,791
|
py
|
Python
|
example/sigmorphon2021-shared-tasks/augment.py
|
cuichenx/neural-transducer
|
cdd3d0ecfa5cc5acec04eafa2de1ff15fbac3f9f
|
[
"MIT"
] | 49
|
2019-06-29T21:19:59.000Z
|
2022-03-25T15:28:19.000Z
|
example/sigmorphon2021-shared-tasks/augment.py
|
cuichenx/neural-transducer
|
cdd3d0ecfa5cc5acec04eafa2de1ff15fbac3f9f
|
[
"MIT"
] | 5
|
2020-03-15T12:51:44.000Z
|
2021-09-08T17:45:45.000Z
|
example/sigmorphon2021-shared-tasks/augment.py
|
cuichenx/neural-transducer
|
cdd3d0ecfa5cc5acec04eafa2de1ff15fbac3f9f
|
[
"MIT"
] | 15
|
2019-09-23T21:35:16.000Z
|
2022-03-30T01:53:22.000Z
|
"""
Borrowed from https://github.com/antonisa/inflection
"""
import argparse
import codecs
import os
import sys
from random import choice, random
from typing import Any, List
sys.path.append("src")
import align # noqa: E402
def read_data(filename):
with codecs.open(filename, "r", "utf-8") as inp:
lines = inp.readlines()
inputs = []
outputs = []
tags = []
for line in lines:
line = line.strip().split("\t")
if line:
inputs.append(list(line[0].strip()))
outputs.append(list(line[1].strip()))
tags.append(line[2].strip().split(";"))
return inputs, outputs, tags
def find_good_range(a, b):
mask = [(a[i] == b[i] and a[i] != u" ") for i in range(len(a))]
if sum(mask) == 0:
return []
# Some times the alignment is off-by-one
b = " " + b
mask = [(a[i] == b[i] and a[i] != u" ") for i in range(len(a))]
ranges = []
prev = False
for i, k in enumerate(mask):
if k and prev:
prev = True
elif k and not prev:
start = i
prev = True
elif prev and not k:
end = i
ranges.append((start, end))
prev = False
elif not prev and not k:
prev = False
if prev:
ranges.append((start, i + 1))
ranges = [c for c in ranges if c[1] - c[0] > 2]
return ranges
def augment(inputs, outputs, tags, characters):
temp = [("".join(inputs[i]), "".join(outputs[i])) for i in range(len(outputs))]
aligned = align.Aligner(temp, align_symbol=" ").alignedpairs
vocab = list(characters)
try:
vocab.remove(u" ")
except ValueError:
pass
new_inputs = []
new_outputs = []
new_tags = []
for k, item in enumerate(aligned):
# print(''.join(inputs[k]) + '\t' + ''.join(outputs[k]))
i, o = item[0], item[1]
good_range = find_good_range(i, o)
# print(good_range)
if good_range:
new_i, new_o = list(i), list(o)
for r in good_range:
s = r[0]
e = r[1]
if e - s > 5: # arbitrary value
s += 1
e -= 1
for j in range(s, e):
if random() > 0.5: # arbitrary value
nc = choice(vocab)
new_i[j] = nc
new_o[j] = nc
new_i1 = [
c
for idx, c in enumerate(new_i)
if (c.strip() or (new_o[idx] == " " and new_i[idx] == " "))
]
new_o1 = [
c
for idx, c in enumerate(new_o)
if (c.strip() or (new_i[idx] == " " and new_o[idx] == " "))
]
new_inputs.append(new_i1)
new_outputs.append(new_o1)
new_tags.append(tags[k])
else:
new_inputs.append([])
new_outputs.append([])
new_tags.append([])
return new_inputs, new_outputs, new_tags
def get_chars(words):
flat_list = [char for word in words for char in word]
return list(set(flat_list))
parser = argparse.ArgumentParser()
parser.add_argument("datapath", help="path to data", type=str)
parser.add_argument("language", help="language", type=str)
parser.add_argument(
"--examples",
help="number of hallucinated examples to create (def: 10000)",
default=10000,
type=int,
)
parser.add_argument(
"--use_dev",
help="whether to use the development set (def: False)",
action="store_true",
)
args = parser.parse_args()
DATA_PATH = args.datapath
L2 = args.language
LOW_PATH = os.path.join(DATA_PATH, L2 + ".train")
DEV_PATH = os.path.join(DATA_PATH, L2 + ".dev")
N = args.examples
usedev = args.use_dev
lowi, lowo, lowt = read_data(LOW_PATH)
devi, devo, devt = read_data(DEV_PATH)
if usedev:
vocab = get_chars(lowi + lowo + devi + devo)
else:
vocab = get_chars(lowi + lowo)
i: List[Any] = []
o: List[Any] = []
t: List[Any] = []
while len(i) < N:
if usedev:
# Do augmentation also using examples from dev
ii, oo, tt = augment(devi + lowi, devo + lowo, devt + lowt, vocab)
else:
# Just augment the training set
ii, oo, tt = augment(lowi, lowo, lowt, vocab)
ii = [c for c in ii if c]
oo = [c for c in oo if c]
tt = [c for c in tt if c]
i += ii
o += oo
t += tt
if len(ii) == 0:
break
# Wait is this needed?
i = [c for c in i if c]
o = [c for c in o if c]
t = [c for c in t if c]
with codecs.open(os.path.join(DATA_PATH, L2 + ".hall"), "w", "utf-8") as outp:
for k in range(min(N, len(i))):
outp.write("".join(i[k]) + "\t" + "".join(o[k]) + "\t" + ";".join(t[k]) + "\n")
| 27.693642
| 87
| 0.526821
|
f53f43d2d39fb72623ddbc3419ded3b5380f7fa7
| 6,012
|
py
|
Python
|
interview-questions/harness-to-test-binaries.py
|
davemungo/various
|
ed7c17f8b75a27fc59b0a5cad6125d64d00cd3ce
|
[
"MIT"
] | 1
|
2020-01-19T01:21:56.000Z
|
2020-01-19T01:21:56.000Z
|
interview-questions/harness-to-test-binaries.py
|
davemungo/various
|
ed7c17f8b75a27fc59b0a5cad6125d64d00cd3ce
|
[
"MIT"
] | null | null | null |
interview-questions/harness-to-test-binaries.py
|
davemungo/various
|
ed7c17f8b75a27fc59b0a5cad6125d64d00cd3ce
|
[
"MIT"
] | 1
|
2021-07-02T14:40:01.000Z
|
2021-07-02T14:40:01.000Z
|
''' Explore funtionality of some unknown binary files.
Binary files are known to have some errors, construct a minimal test to find
the errors in each file
Verify expected functionality
-- entry for each word in the input file
-- line number for each word
-- line numbers ascend
-- one entry per line for a given word
Tests below highlight errors found during exploratory testing phase
'''
import os
import re
import hashlib
import subprocess
def get_binary_list():
executables = []
for filename in os.listdir('.'):
if re.search(r'.bin$', filename):
executables.append(filename)
return executables
def get_unique_filename(data):
return hashlib.md5(data.encode('utf-8')).hexdigest()
def get_output(binary_file, input_file):
cmd_line = "./" + binary_file + " < " + input_file
process_output = str(subprocess.check_output(cmd_line, shell=True), 'utf-8')
output_lines = process_output.split('\n')
# Last item is emptyline from output
if output_lines[-1] == '':
output_lines.pop()
return(output_lines)
def create_input_file(test_input_data):
test_file = get_unique_filename(test_input_data)
try:
test_input_file = open(test_file, 'w')
except IOError as e:
print('ERROR: {} - {}'.format(e.filename, e.strerror))
else:
try:
test_input_file.write(test_input_data)
except IOError as e:
print('ERROR: {} - {}'.format(e.filename, e.strerror))
test_input_file.close()
return(test_file)
def clean_up(filename):
try:
os.remove(filename)
except OSError as e:
print('ERROR: {} - {}'.format(e.filename, e.strerror))
def test_read_all_lines(binary_to_test):
# Setup
test_input = '''aaa
bbb
ccc
'''
input_for_test = create_input_file(test_input)
program_output = get_output(binary_to_test, input_for_test)
# Test
expected_length = len(test_input.split('\n')) - 1
if len(program_output) != expected_length:
return "Fail {}, {} != {}, output: {}, input: {}".format(
"read_all_lines",
len(program_output),
expected_length,
program_output,
input_for_test)
# Don't cleanup failed tests
clean_up(input_for_test)
return "PASS read all lines"
def test_no_final_line_return(binary_to_test):
# Setup
test_input = '''aaa
ccc '''
input_for_test = create_input_file(test_input)
program_output = get_output(binary_to_test, input_for_test)
# Test
expected_length = len(test_input.split('\n'))
if len(program_output) != expected_length:
return "Fail {}, {} != {}, output: {}, input: {}".format(
"no final line return",
len(program_output),
expected_length,
program_output,
input_for_test)
# Don't cleanup failed tests
clean_up(input_for_test)
return "PASS no final line return"
def test_multiple_words(binary_to_test):
# Setup
test_input = '''aaa
bbb bbb
ccc
ddd bbb
aaa bbb fff eee
'''
input_for_test = create_input_file(test_input)
program_output = get_output(binary_to_test, input_for_test)
# Test
expected_lengths = {"aaa":2, "bbb":3, "ccc":1, "ddd":1, "eee":1, "fff":1}
output_dict = {}
for entry in program_output:
word_lines = entry.split()
output_dict[word_lines[0]] = len(word_lines) -1
failed_list = []
for key, value in output_dict.items():
if value != expected_lengths[key]:
failed_list.append((key, value, expected_lengths[key]))
if len(failed_list) > 0:
return "Fail {} output: {}, input: {}".format("multiple words",
failed_list,
input_for_test)
# Don't cleanup failed tests
clean_up(input_for_test)
return "PASS multiple words"
def test_alphabetical_order(binary_to_test):
# Setup
test_input = '''bbb
abc
aaa
abb
ddd
'''
input_for_test = create_input_file(test_input)
program_output = get_output(binary_to_test, input_for_test)
# Test
expected_order = ["aaa", "abb", "abc", "bbb", "ddd"]
output_order = []
for entry in program_output:
word_lines = entry.split()
output_order.append(word_lines[0])
if output_order != expected_order:
return "Fail {} output: {}, input: {}".format("alpabetical order",
output_order,
input_for_test)
# Don't cleanup failed tests
clean_up(input_for_test)
return "PASS alphabetical order"
########################
### RUN IF MAIN ###
########################
if __name__ == "__main__":
binaries = get_binary_list()
#binaries = ['main1.bin']
for b in binaries:
print("\nTESTING {}".format(b))
print(test_read_all_lines(b))
print(test_no_final_line_return(b))
print(test_multiple_words(b))
print(test_alphabetical_order(b))
# EOF
| 29.043478
| 81
| 0.526281
|
e0ac6a98868a6d9e89f39f94a357860189336cf5
| 3,694
|
py
|
Python
|
tag_counter/console_app.py
|
armansyz/python_task
|
5d2d884ef350440d6264a1ff30c829c994d430fa
|
[
"MIT"
] | null | null | null |
tag_counter/console_app.py
|
armansyz/python_task
|
5d2d884ef350440d6264a1ff30c829c994d430fa
|
[
"MIT"
] | 1
|
2021-06-02T03:07:29.000Z
|
2021-06-02T03:07:29.000Z
|
tag_counter/console_app.py
|
armansyz/tag_counter
|
a569f805e8aa78af01969da193044f4c6072b194
|
[
"MIT"
] | null | null | null |
from tag_counter.process_url import count_tags, url_format, url_name
from tag_counter.db_manager import TagManager
from loguru import logger
import pickle
import argparse
import yaml
def get_insert(argsa):
"""Get tags from db or insert if they do not exist"""
t = TagManager()
logger.info('Attempting to create tables if they do not exist')
t.create_tables()
url = None
logger.info('Scanning yaml synonyms for the key:{}'.format(argsa.get))
try:
syn = yaml.load(open("tag_counter/synonyms.yaml"), yaml.SafeLoader)
url = syn[argsa.get] if (argsa.get in syn) else argsa.get
except KeyError as ke:
logger.error('Wrong key! \n Exception:{}'.format(ke))
except FileNotFoundError as fe:
logger.error('File was not found \n Exception:{}'.format(fe))
except Exception as exc:
logger.error('Something bad happened \n Exception:{}'.format(exc))
logger.info('Attempting to retrieve data from db')
s = t.get_tag(full_url=url_format(url)).first()
if not s:
try:
logger.info('No such tag info in the database')
logger.info('Attempting to process the tags')
tag_data = count_tags(url_format(url)).items()
logger.info('Attempting to insert tags into db')
t. \
insert_tag(url_name(url),
url_format(url),
pickle.dumps(list(tag_data)))
for tag, count in tag_data:
print(tag, ':', count)
except Exception as e:
logger.error('Error has occurred \n Exception:{}'.format(e))
else:
logger.info('Tags found in the database')
for tag, count in pickle.loads(s.tag_data):
print(tag, ':', count)
def view_data(argsb):
"""Retrieve the info on tags from the database"""
t = TagManager()
logger.info('Attempting to create tables if they do not exist')
t.create_tables()
url = None
logger.info('Scanning yaml synonyms for the key:{}'.format(argsb.view))
try:
syn = yaml.load(open("tag_counter/synonyms.yaml"), yaml.SafeLoader)
url = syn[argsb.view] if (argsb.view in syn) else argsb.view
except KeyError as ke:
logger.error('Wrong key! \n Exception:{}'.format(ke))
except FileNotFoundError as fe:
logger.error('File was not found \n Exception:{}'.format(fe))
except Exception as exc:
logger.error('Something bad happened \n Exception:{}'.format(exc))
logger.info('Attempting to retrieve data from db')
s = t.get_tag(full_url=url_format(url)).first()
if not s:
logger.info('No such tag info in the database')
print('no data on {}'.format(url))
else:
logger.info('Tags found in the database')
print("id:{} \nsite_name:{} \nfull_url:{} \nquery_date:{} \ntag_data:".
format(s.id, s.site_name, s.full_url, s.query_date))
for tag, count in pickle.loads(s.tag_data):
print(tag, ':', count)
def console():
"""Console app"""
parser = argparse.ArgumentParser(description="This is a tag counter app")
parser.add_argument('-g',
'--get',
metavar='url', default="", action='store', help='count html tags in a web page')
parser.add_argument('-vw',
'--view',
metavar='url', default="", action='store', help='view data stored in db')
args = parser.parse_args()
if args.get:
logger.info('Received a get command')
get_insert(args)
elif args.view:
logger.info('Received a view command')
view_data(args)
| 36.215686
| 104
| 0.610179
|
c9e839656fc1989292ce06bbd70a198e99f14715
| 878
|
py
|
Python
|
mrfetcher/__init__.py
|
benlast/mrfetcher
|
1a2c701b6b01ae14d03f7b4ccf505abbf2c947c5
|
[
"MIT"
] | null | null | null |
mrfetcher/__init__.py
|
benlast/mrfetcher
|
1a2c701b6b01ae14d03f7b4ccf505abbf2c947c5
|
[
"MIT"
] | null | null | null |
mrfetcher/__init__.py
|
benlast/mrfetcher
|
1a2c701b6b01ae14d03f7b4ccf505abbf2c947c5
|
[
"MIT"
] | null | null | null |
import collections
import os
import sys
from .forwarder import forward
ENV_VARS = [
('POP3_HOST', None),
('POP3_PORT', 110),
('POP3_USER', None),
('POP3_PASSWORD', None),
('SMTP_HOST', 'smtp.gmail.com'),
('SMTP_PORT', 465),
('SMTP_USER', None),
('SMTP_PASSWORD', None),
('TARGET_EMAIL_ADDRESS', None)
]
def main():
values = collections.OrderedDict(
(varname, os.getenv(varname, default))
for varname, default in ENV_VARS
)
missing = [
varname
for varname, value in values.iteritems()
if not value
]
if missing:
sys.stderr.write(
'Missing environment variable(s): %s\n' % ', '.join(missing)
)
sys.exit(1)
forward(
**dict(
(varname.lower(), value)
for varname, value in values.iteritems()
)
)
| 19.954545
| 72
| 0.555809
|
26a010934871017c04b9c14c35320bd23b7f78ad
| 704
|
py
|
Python
|
tools/model_downloader/arguments.py
|
russell-cooks/video-analytics-serving
|
517645a1555b66147879de786606dd13ec3afd5b
|
[
"BSD-3-Clause"
] | null | null | null |
tools/model_downloader/arguments.py
|
russell-cooks/video-analytics-serving
|
517645a1555b66147879de786606dd13ec3afd5b
|
[
"BSD-3-Clause"
] | null | null | null |
tools/model_downloader/arguments.py
|
russell-cooks/video-analytics-serving
|
517645a1555b66147879de786606dd13ec3afd5b
|
[
"BSD-3-Clause"
] | null | null | null |
'''
* Copyright (C) 2019-2020 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
'''
import argparse
def parse_args(args=None):
parser = argparse.ArgumentParser(description="Download & Convert models from Open Model Zoo.")
parser.add_argument('--output-dir', required=False, default=".",
help='path where to save models')
parser.add_argument('--model-list', default="models/models.list.yml",
help='input file with model names')
parser.add_argument("--force", required=False, dest="force", action="store_true",
default=False, help='force the download')
args = parser.parse_args(args)
return args
| 35.2
| 98
| 0.646307
|
8b4645c32ab3ef0ecb114ced866bbdd816e42a72
| 2,795
|
py
|
Python
|
main.py
|
Sinowrt/checkDuplicate
|
cc0806ca6f5bacd16bf9ca5a47d0510643648e1c
|
[
"MIT"
] | null | null | null |
main.py
|
Sinowrt/checkDuplicate
|
cc0806ca6f5bacd16bf9ca5a47d0510643648e1c
|
[
"MIT"
] | null | null | null |
main.py
|
Sinowrt/checkDuplicate
|
cc0806ca6f5bacd16bf9ca5a47d0510643648e1c
|
[
"MIT"
] | null | null | null |
import sys,os,checkDuplicate
from Ui_MainWindow import Ui_MainWindow
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
class MainWindow(QMainWindow, Ui_MainWindow):
appendSignal = pyqtSignal(str)
updateProgressSignal = pyqtSignal(float)
string_list=['公共基础知识', '判断推理', '数量关系', '言语理解与表达', '资料分析']
def __init__(self):
super(MainWindow, self).__init__()
self.setupUi(self)
self.browseButton.clicked.connect(self.open_directory)
self.checkButton.clicked.connect(self.check_duplicate)
self.addTitleBtn.clicked.connect(self.addTitle)
self.delTitleBtn.clicked.connect(self.delTitle)
self.appendSignal.connect(self.appendText)
self.updateProgressSignal.connect(self.updataProgress)
self.stringlistmodel = QStringListModel() # 创建stringlistmodel对象
self.stringlistmodel.setStringList(self.string_list) # 把数据赋值到 model 上
self.titleListView.setModel(self.stringlistmodel) # 把 view 和 model 关联
self.stringlistmodel.dataChanged.connect(self.save)
self.enableTitleCheckBox.setChecked(True)
def save(self):
self.string_list = self.stringlistmodel.stringList()
print(self.string_list)
def addTitle(self):
kw = self.titleLineEdit.text()
if kw == '':
self.appendSignal.emit('无法添加空标题!')
return
row = self.stringlistmodel.rowCount()
self.stringlistmodel.insertRow(row)
self.stringlistmodel.setData(self.stringlistmodel.index(row), kw)
self.save()
def delTitle(self):
index = self.titleListView.currentIndex()
print(index.row())
self.stringlistmodel.removeRow(index.row())
self.save()
print(self.string_list)
def open_directory(self):
path = QtWidgets.QFileDialog.getExistingDirectory(self, "选取文件夹", "./")
print(path)
self.lineEdit.setText(path)
def check_duplicate(self):
self.textBrowser.clear()
self.checkButton.setEnabled(False)
self.browseButton.setEnabled(False)
self.lineEdit.setEnabled(False)
if self.enableTitleCheckBox.isChecked():
checkDuplicate.ckduplicate(self.lineEdit.text(), self)
else:
checkDuplicate.checkDuplicateWithoutTitle(self.lineEdit.text(), self)
self.checkButton.setEnabled(True)
self.browseButton.setEnabled(True)
self.lineEdit.setEnabled(True)
def appendText(self,str):
self.textBrowser.append(str)
def updataProgress(self,percent):
self.progressBar.setProperty("value", percent)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
win = MainWindow()
win.show()
sys.exit(app.exec_())
| 34.9375
| 81
| 0.68229
|
a89a53e13ecb73b4d2bf5fea96f5746f2db7b3f1
| 162
|
py
|
Python
|
app/config/views.py
|
maro99/yapen
|
0de7aa9d4b152aadd18511be6e536e89645452d9
|
[
"MIT"
] | 1
|
2019-04-28T12:21:51.000Z
|
2019-04-28T12:21:51.000Z
|
app/config/views.py
|
maro99/yapen
|
0de7aa9d4b152aadd18511be6e536e89645452d9
|
[
"MIT"
] | 5
|
2018-07-30T05:44:44.000Z
|
2020-06-05T18:56:41.000Z
|
app/config/views.py
|
maro99/yapen
|
0de7aa9d4b152aadd18511be6e536e89645452d9
|
[
"MIT"
] | 5
|
2018-07-23T05:21:41.000Z
|
2018-08-08T05:00:42.000Z
|
from django.http import HttpResponse
from django.shortcuts import render, redirect
def index(request):
return render(request, 'pensions/pensions_list.html')
| 27
| 57
| 0.802469
|
8fee9b1eb4625c205d63b802513a953a281fbc36
| 98
|
py
|
Python
|
exercise_2017/5th_week/test.py
|
Taewan-P/python_study
|
f347e370aaa33aba5ab233252bcd759b94615348
|
[
"MIT"
] | null | null | null |
exercise_2017/5th_week/test.py
|
Taewan-P/python_study
|
f347e370aaa33aba5ab233252bcd759b94615348
|
[
"MIT"
] | null | null | null |
exercise_2017/5th_week/test.py
|
Taewan-P/python_study
|
f347e370aaa33aba5ab233252bcd759b94615348
|
[
"MIT"
] | null | null | null |
def fastmult3(m,n):
ans = 0
while(n > 0):
ans += m
n = n-1
return ans
print(fastmult3(3,6))
| 14
| 21
| 0.581633
|
7359b3cbe63cdadb190ff7ef5f16ad6ace751886
| 5,676
|
py
|
Python
|
neighbourhood/views.py
|
Ken-mbira/THE_WATCH
|
a6bfb65b2f134adf3b2e584ea8ebfc79588ef0b5
|
[
"MIT"
] | null | null | null |
neighbourhood/views.py
|
Ken-mbira/THE_WATCH
|
a6bfb65b2f134adf3b2e584ea8ebfc79588ef0b5
|
[
"MIT"
] | null | null | null |
neighbourhood/views.py
|
Ken-mbira/THE_WATCH
|
a6bfb65b2f134adf3b2e584ea8ebfc79588ef0b5
|
[
"MIT"
] | null | null | null |
from rest_framework.decorators import api_view,permission_classes
from rest_framework.permissions import IsAuthenticated,IsAuthenticatedOrReadOnly
from rest_framework.response import Response
from rest_framework import status
from rest_framework import generics
from neighbourhood.models import *
from account.serializers import *
from neighbourhood.serializers import *
@api_view(['GET','POST'])
@permission_classes([IsAuthenticatedOrReadOnly])
def neighbour_view(request):
data = {}
if request.method == 'GET':
neighbourhoods = Neighbourhood.objects.all()
data = GetNeighbourhoodSerializer(neighbourhoods,many=True).data
return Response(data,status = status.HTTP_200_OK)
elif request.method == 'POST':
serializer = NeighbourhoodSerializer(data = request.data)
if serializer.is_valid():
serializer.save(request)
data['success'] = "The neighbourhood was created successfully"
return Response(data,status = status.HTTP_201_CREATED)
else:
data = serializer.errors
return Response(data,status = status.HTTP_400_BAD_REQUEST)
class LocationList(generics.ListAPIView):
queryset = Location.objects.all()
serializer_class = LocationSerializer
permission_classes = [IsAuthenticated]
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def join_neighbourhood(request,pk):
data = {}
profile = Profile.objects.get(user = request.user)
neighbourhood = Neighbourhood.objects.get(pk=pk)
profile.neighbourhood = neighbourhood
profile.save()
data['success'] = f"You successfully joined ${neighbourhood.slogan}"
return Response(data,status = status.HTTP_200_OK)
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def move_out(request):
data = {}
profile = Profile.objects.get(user = request.user)
profile.neighbourhood = None
profile.save()
data['success'] = "You are no longer a member of the neighbourhood!"
return Response(data,status = status.HTTP_200_OK)
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def get_neighbourhood(request):
data = {}
profile = Profile.objects.get(user = request.user)
print(profile.neighbourhood)
data = ProfileSerializer(profile).data
return Response(data,status = status.HTTP_200_OK)
@api_view(['GET','POST'])
@permission_classes([IsAuthenticatedOrReadOnly])
def business_view(request):
data ={}
if request.method == 'GET':
businesses = Business.objects.all()
data = BusinessSerializer(businesses,many=True).data
return Response(data,status = status.HTTP_200_OK)
elif request.method == 'POST':
serializer = BusinessSerializer(data = request.data)
if serializer.is_valid():
serializer.save(request)
data['success'] = "The bussiness was created successfully"
return Response(data,status = status.HTTP_201_CREATED)
else:
data = serializer.errors
return Response(data,status = status.HTTP_400_BAD_REQUEST)
@api_view(['POST','GET'])
@permission_classes([IsAuthenticatedOrReadOnly])
def occurence_view(request,pk):
data = {}
try:
neighbourhood = Neighbourhood.objects.get(pk=pk)
except :
data['not found'] = "The neighbourhood was not found"
return Response(data,status = status.HTTP_404_NOT_FOUND)
if request.method == 'POST':
serializer = OccurrenceSerializer(data = request.data)
if serializer.is_valid():
serializer.save(request,neighbourhood)
data['success'] = "The occurrence was successfully reported"
return Response(data,status = status.HTTP_200_OK)
else:
data = serializer.errors
print(data)
return Response(data,status = status.HTTP_400_BAD_REQUEST)
elif request.method == 'GET':
events = Occurrence.get_events(pk)
data = OccurrenceSerializer(events,many=True).data
return Response(data,status= status.HTTP_200_OK)
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def get_businesses(request,pk):
"""The view for getting all businesses in a neighbourhood
Args:
request ([type]): [description]
pk ([type]): [description]
"""
businesses = Business.get_bussinesses(pk)
data = {}
data['businesses'] = BusinessSerializer(businesses,many=True).data
return Response(data,status.HTTP_200_OK)
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def get_residents(request,pk):
"""This parses the request to get the users in a certain neighbourhood
Args:
request ([type]): [description]
pk ([type]): [description]
"""
data = {}
users = Profile.get_residents(pk)
data['users'] = UserSerializer(users,many=True).data
return Response(data,status = status.HTTP_200_OK)
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def search_business(request,term):
"""This parses the view request for getting the businesses via a search term
Args:
request ([type]): [description]
"""
data = {}
results = Business.search_by_name(term)
data['businesses'] = BusinessSerializer(results,many=True).data
return Response(data,status=status.HTTP_200_OK)
class EventTypeList(generics.ListAPIView):
queryset = EventType.objects.all()
serializer_class = EventSerializer
permission_classes=[IsAuthenticated]
class ServiceList(generics.ListAPIView):
queryset = Services.objects.all()
serializer_class = ServiceSerializer
permission_classes = [IsAuthenticated]
| 31.186813
| 80
| 0.696617
|
19393d9ec478c05f6d108dd680ce641f7addc0f6
| 3,347
|
py
|
Python
|
haproxysessionmon/backends/graylog.py
|
thanethomson/haproxy-session-mon
|
7b19051ad7cc4814e874cfc0270607c9d1271831
|
[
"MIT",
"MIT-0",
"Unlicense"
] | null | null | null |
haproxysessionmon/backends/graylog.py
|
thanethomson/haproxy-session-mon
|
7b19051ad7cc4814e874cfc0270607c9d1271831
|
[
"MIT",
"MIT-0",
"Unlicense"
] | null | null | null |
haproxysessionmon/backends/graylog.py
|
thanethomson/haproxy-session-mon
|
7b19051ad7cc4814e874cfc0270607c9d1271831
|
[
"MIT",
"MIT-0",
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
from datetime import datetime
from haproxysessionmon.backends.base import StorageBackend
import logging
logger = logging.getLogger(__name__)
__all__ = [
"GraylogBackend"
]
class GraylogBackend(StorageBackend):
"""Uses Graylog to store statistics."""
def __init__(self, remote_addr, loop, facility="haproxy-session-mon"):
self.remote_addr = remote_addr
self.loop = loop
self.facility = facility
logger.debug("Connecting to Graylog server at {}:{}".format(*self.remote_addr))
self.transport, self.protocol = loop.run_until_complete(loop.create_datagram_endpoint(
lambda: GraylogProtocol(self, facility=facility),
remote_addr=remote_addr
))
async def store_stats(self, stats):
logger.debug("Sending {} metrics to Graylog".format(len(stats)))
sent = 0
for metric in stats:
try:
self.protocol.send_metric(metric)
sent += 1
except Exception as e:
logger.exception("Exception caught while attempting to log to Graylog: {}".format(e))
return sent
def close(self):
self.transport.close()
async def _reconnect(self):
logger.warning("Reconnecting to Graylog server at {}:{}".format(*self.remote_addr))
self.transport, self.protocol = await self.loop.create_datagram_endpoint(
lambda: GraylogProtocol(self, facility=self.facility),
remote_addr=self.remote_addr
)
def reconnect(self):
self.loop.call_soon(self._reconnect())
class GraylogProtocol(object):
"""Our simple protocol for interacting with Graylog."""
def __init__(self, backend, reconnect_on_failure=True, facility="haproxy-session-mon"):
self.backend = backend
self.transport = None
self.facility = facility
self.reconnect_on_failure = reconnect_on_failure
def connection_made(self, transport):
self.transport = transport
def send_metric(self, metric):
# GELF payload format, as per http://docs.graylog.org/en/stable/pages/gelf.html
payload = {
"version": "1.1",
"host": metric.server_id,
"short_message": "{} concurrent requests measured for backend \"{}\"".format(
metric.sessions,
metric.backend
),
"timestamp": datetime.now().timestamp(),
"level": 6, # INFO
"_facility": self.facility,
"_sessions": metric.sessions,
"_backend": metric.backend,
"_queued_sessions": metric.queued_sessions,
"_active_backends": metric.active_backends,
"_http_4xx": metric.http_4xx,
"_http_5xx": metric.http_5xx
}
self.transport.sendto(json.dumps(payload).encode())
def error_received(self, exc):
logger.exception("Error while communicating with Graylog server: {}".format(exc))
self.close_and_reconnect()
def connection_lost(self, exc):
logger.exception("Connection to Graylog server lost: {}".format(exc))
self.close_and_reconnect()
def close_and_reconnect(self):
self.transport.close()
if self.reconnect_on_failure:
self.backend.reconnect()
| 33.808081
| 101
| 0.632507
|
23aa5d56030df1e1cb2b6cd824124924ee6016a2
| 3,091
|
py
|
Python
|
reader.py
|
xtarx/cycleGAN-tensorflow
|
8e73ae8c6473149fb930bb05299ed64d2b5dd5b3
|
[
"MIT"
] | null | null | null |
reader.py
|
xtarx/cycleGAN-tensorflow
|
8e73ae8c6473149fb930bb05299ed64d2b5dd5b3
|
[
"MIT"
] | null | null | null |
reader.py
|
xtarx/cycleGAN-tensorflow
|
8e73ae8c6473149fb930bb05299ed64d2b5dd5b3
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import utils
class Reader():
def __init__(self, tfrecords_file, image_size=256,
min_queue_examples=1000, batch_size=1, num_threads=8, name=''):
"""
Args:
tfrecords_file: string, tfrecords file path
min_queue_examples: integer, minimum number of samples to retain in the queue that provides of batches of examples
batch_size: integer, number of images per batch
num_threads: integer, number of preprocess threads
"""
self.tfrecords_file = tfrecords_file
self.image_size = image_size
self.min_queue_examples = min_queue_examples
self.batch_size = batch_size
self.num_threads = num_threads
self.reader = tf.TFRecordReader()
self.name = name
def feed(self):
"""
Returns:
images: 4D tensor [batch_size, image_width, image_height, image_depth]
"""
with tf.name_scope(self.name):
filename_queue = tf.train.string_input_producer([self.tfrecords_file])
reader = tf.TFRecordReader()
_, serialized_example = self.reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'image/file_name': tf.FixedLenFeature([], tf.string),
'image/encoded_image': tf.FixedLenFeature([], tf.string),
})
image_buffer = features['image/encoded_image']
image = tf.image.decode_png(image_buffer, channels=3)
image = self._preprocess(image)
images = tf.train.shuffle_batch(
[image], batch_size=self.batch_size, num_threads=self.num_threads,
capacity=self.min_queue_examples + 3*self.batch_size,
min_after_dequeue=self.min_queue_examples
)
tf.summary.image('_input', images)
return images
def _preprocess(self, image):
image = tf.image.resize_images(image, size=(self.image_size, self.image_size))
image = utils.convert2float(image)
image.set_shape([self.image_size, self.image_size, 3])
return image
def test_reader():
TRAIN_FILE_1 = 'data/tfrecords/apple.tfrecords'
TRAIN_FILE_2 = 'data/tfrecords/orange.tfrecords'
with tf.Graph().as_default():
reader1 = Reader(TRAIN_FILE_1, batch_size=2)
reader2 = Reader(TRAIN_FILE_2, batch_size=2)
images_op1 = reader1.feed()
images_op2 = reader2.feed()
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
step = 0
while not coord.should_stop():
batch_images1, batch_images2 = sess.run([images_op1, images_op2])
print("image shape: {}".format(batch_images1))
print("image shape: {}".format(batch_images2))
print("="*10)
step += 1
except KeyboardInterrupt:
print('Interrupted')
coord.request_stop()
except Exception as e:
coord.request_stop(e)
finally:
# When done, ask the threads to stop.
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
test_reader()
| 32.882979
| 120
| 0.676804
|
f4c1a8fac2cdf8fcbf900b1444275efe5d50524a
| 1,093
|
py
|
Python
|
var/spack/repos/builtin/packages/conserver/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/conserver/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/conserver/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Conserver(AutotoolsPackage):
"""Conserver is an application that allows multiple users to
watch a serial console at the same time. """
homepage = "https://www.conserver.com/"
url = "https://github.com/bstansell/conserver/releases/download/v8.2.5/conserver-8.2.5.tar.gz"
version('8.2.5', sha256='7db192f304126d7e5c15421c4c83cd5c08039f2f2b3c61b2998e71881ae47eea')
version('8.2.4', sha256='a591eabb4abb632322d2f3058a2f0bd6502754069a99a153efe2d6d05bd97f6f')
version('8.2.3', sha256='764443b2798047f7429747510eeb3207240260590551700d13dbbad8a5bdee08')
version('8.2.2', sha256='05ea1693bf92b42ad2f0a9389c60352ccd35c2ea93c8fc8e618d0153362a7d81')
version('8.2.1', sha256='251ae01997e8f3ee75106a5b84ec6f2a8eb5ff2f8092438eba34384a615153d0')
def setup_run_environment(self, env):
env.prepend_path('PATH', self.prefix.sbin)
| 45.541667
| 103
| 0.772187
|
39d69081266240a20fde31d33a2ce4d57d00adbc
| 3,775
|
py
|
Python
|
python/examples/lowlevel.py
|
angiglesias/rpi_ws281x
|
013f38e62f804347b442e2144c14ee16a3c7e191
|
[
"BSD-2-Clause"
] | 1,583
|
2015-01-01T14:39:44.000Z
|
2022-03-28T19:21:19.000Z
|
python/examples/lowlevel.py
|
angiglesias/rpi_ws281x
|
013f38e62f804347b442e2144c14ee16a3c7e191
|
[
"BSD-2-Clause"
] | 428
|
2015-01-01T17:43:16.000Z
|
2022-03-24T21:20:21.000Z
|
python/examples/lowlevel.py
|
angiglesias/rpi_ws281x
|
013f38e62f804347b442e2144c14ee16a3c7e191
|
[
"BSD-2-Clause"
] | 637
|
2015-01-03T19:53:09.000Z
|
2022-03-31T16:33:52.000Z
|
# Example of low-level Python wrapper for rpi_ws281x library.
# Author: Tony DiCola (tony@tonydicola.com), Jeremy Garff (jer@jers.net)
#
# This is an example of how to use the SWIG-generated _rpi_ws281x module.
# You probably don't want to use this unless you are building your own library,
# because the SWIG generated module is clunky and verbose. Instead look at the
# high level Python port of Adafruit's NeoPixel Arduino library in strandtest.py.
#
# This code will animate a number of WS281x LEDs displaying rainbow colors.
import time
import _rpi_ws281x as ws
# LED configuration.
LED_CHANNEL = 0
LED_COUNT = 16 # How many LEDs to light.
LED_FREQ_HZ = 800000 # Frequency of the LED signal. Should be 800khz or 400khz.
LED_DMA_NUM = 10 # DMA channel to use, can be 0-14.
LED_GPIO = 18 # GPIO connected to the LED signal line. Must support PWM!
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = 0 # Set to 1 to invert the LED signal, good if using NPN
# transistor as a 3.3V->5V level converter. Keep at 0
# for a normal/non-inverted signal.
# Define colors which will be used by the example. Each color is an unsigned
# 32-bit value where the lower 24 bits define the red, green, blue data (each
# being 8 bits long).
DOT_COLORS = [ 0x200000, # red
0x201000, # orange
0x202000, # yellow
0x002000, # green
0x002020, # lightblue
0x000020, # blue
0x100010, # purple
0x200010 ] # pink
# Create a ws2811_t structure from the LED configuration.
# Note that this structure will be created on the heap so you need to be careful
# that you delete its memory by calling delete_ws2811_t when it's not needed.
leds = ws.new_ws2811_t()
# Initialize all channels to off
for channum in range(2):
channel = ws.ws2811_channel_get(leds, channum)
ws.ws2811_channel_t_count_set(channel, 0)
ws.ws2811_channel_t_gpionum_set(channel, 0)
ws.ws2811_channel_t_invert_set(channel, 0)
ws.ws2811_channel_t_brightness_set(channel, 0)
channel = ws.ws2811_channel_get(leds, LED_CHANNEL)
ws.ws2811_channel_t_count_set(channel, LED_COUNT)
ws.ws2811_channel_t_gpionum_set(channel, LED_GPIO)
ws.ws2811_channel_t_invert_set(channel, LED_INVERT)
ws.ws2811_channel_t_brightness_set(channel, LED_BRIGHTNESS)
ws.ws2811_t_freq_set(leds, LED_FREQ_HZ)
ws.ws2811_t_dmanum_set(leds, LED_DMA_NUM)
# Initialize library with LED configuration.
resp = ws.ws2811_init(leds)
if resp != ws.WS2811_SUCCESS:
message = ws.ws2811_get_return_t_str(resp)
raise RuntimeError('ws2811_init failed with code {0} ({1})'.format(resp, message))
# Wrap following code in a try/finally to ensure cleanup functions are called
# after library is initialized.
try:
offset = 0
while True:
# Update each LED color in the buffer.
for i in range(LED_COUNT):
# Pick a color based on LED position and an offset for animation.
color = DOT_COLORS[(i + offset) % len(DOT_COLORS)]
# Set the LED color buffer value.
ws.ws2811_led_set(channel, i, color)
# Send the LED color data to the hardware.
resp = ws.ws2811_render(leds)
if resp != ws.WS2811_SUCCESS:
message = ws.ws2811_get_return_t_str(resp)
raise RuntimeError('ws2811_render failed with code {0} ({1})'.format(resp, message))
# Delay for a small period of time.
time.sleep(0.25)
# Increase offset to animate colors moving. Will eventually overflow, which
# is fine.
offset += 1
finally:
# Ensure ws2811_fini is called before the program quits.
ws.ws2811_fini(leds)
# Example of calling delete function to clean up structure memory. Isn't
# strictly necessary at the end of the program execution here, but is good practice.
ws.delete_ws2811_t(leds)
| 38.131313
| 87
| 0.735099
|
ed5018ca32591177c227a04374c3f597c748775d
| 4,777
|
py
|
Python
|
portal/apps/comunidad/forms.py
|
Artis-Physis/utopia-cms
|
5cb8d941d0b2df53fddc566a52e9d3baee4a007e
|
[
"BSD-3-Clause"
] | 8
|
2020-12-15T17:11:08.000Z
|
2021-12-13T22:08:33.000Z
|
portal/apps/comunidad/forms.py
|
Artis-Physis/utopia-cms
|
5cb8d941d0b2df53fddc566a52e9d3baee4a007e
|
[
"BSD-3-Clause"
] | 28
|
2020-12-15T17:34:03.000Z
|
2022-02-01T04:09:10.000Z
|
portal/apps/comunidad/forms.py
|
Artis-Physis/utopia-cms
|
5cb8d941d0b2df53fddc566a52e9d3baee4a007e
|
[
"BSD-3-Clause"
] | 7
|
2020-12-15T19:59:17.000Z
|
2021-11-24T16:47:06.000Z
|
# -*- coding: utf-8 -*-
from django import forms
from comunidad.models import SubscriberEvento, SubscriberArticle, Registro
from thedaily.models import Subscriber
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Submit, Button, Field, Fieldset, HTML, MultiField
from crispy_forms.bootstrap import FormActions
class ArticleForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_id = 'articulo'
# self.helper.form_class = 'form-horizontal'
self.helper.form_style = 'inline'
self.helper.form_method = 'post'
# self.helper.form_action = reverse( 'community-article' )
self.helper.help_text_inline = True
self.helper.error_text_inline = True
self.helper.render_unmentioned_fields = False
self.helper.layout = Layout(
Field('sections'),
Field('headline'),
Field('deck'),
Field('body'),
FormActions(
Submit('save', u'Publicar'),
)
)
super(ArticleForm, self).__init__(*args, **kwargs)
class Meta:
model = SubscriberArticle
fields = ['headline' , 'deck', 'body']
class EventoForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_id = 'evento'
self.helper.form_class = 'form-horizontal'
self.helper.form_style = 'inline'
self.helper.form_method = 'post'
# self.helper.form_action = reverse( 'community-event' )
self.helper.help_text_inline = True
self.helper.error_text_inline = True
self.helper.render_unmentioned_fields = True
self.helper.layout = Layout(
Field('categoria'),
Field('title'),
Field('description'),
Field('start', css_class="datepicker", readonly=True),
Field('end', css_class="datepicker", readonly=True),
Field('precio'),
Field('poster'),
FormActions(
Submit('save', u'Publicar'),
)
)
super(EventoForm, self).__init__(*args, **kwargs)
class Meta:
model = SubscriberEvento
fields = ['categoria' ,
'title',
'description',
'start',
'end',
'precio',
'poster']
class RegistroForm(forms.ModelForm):
""" Registro de la utilizacion de un beneficio """
document = forms.CharField(max_length=50, required=False,
label='Documento', help_text=u'Número de cédula sin puntos ni guiones '
'u otro documento registrado en la diaria.')
def __init__(self, benefit_qs, *args, **kwargs):
self.helper = FormHelper()
self.helper.layout = Layout(
Field('document'), Field('benefit'),
FormActions(Submit('', u'Consultar')))
super(RegistroForm, self).__init__(*args, **kwargs)
self.fields['benefit'].queryset = benefit_qs
self.fields['benefit'].label = 'Beneficio'
def clean(self):
cleaned_data = super(RegistroForm, self).clean()
# first check for the benefit's limit left
benefit, cupo_restante = cleaned_data.get("benefit"), None
if benefit:
if benefit.limit:
cupo_restante = benefit.limit - len(benefit.registro_set.all())
if cupo_restante <= 0:
raise forms.ValidationError("No hay cupo")
# second: identify the subscriber and check the per subscriber quota
document = cleaned_data.get("document")
if document:
try:
subscriber = Subscriber.objects.get(document=document)
if not subscriber.is_subscriber():
raise forms.ValidationError("No es suscriptor activo")
else:
cleaned_data['subscriber'] = subscriber
if benefit and benefit.quota and benefit.quota - len(
subscriber.registro_set.filter(benefit=benefit)) <= 0:
raise forms.ValidationError("Ya utilizó el beneficio")
except Subscriber.DoesNotExist:
raise forms.ValidationError("Documento no encontrado")
except Subscriber.MultipleObjectsReturned:
raise forms.ValidationError("Muchos suscriptores encontrados")
elif benefit:
raise forms.ValidationError("Cupo restante: %s" % (cupo_restante \
if benefit.limit else 'ilimitado'))
return cleaned_data
class Meta:
model = Registro
fields = ['document', 'benefit']
| 38.524194
| 94
| 0.592841
|
caa216ad0ca56046b9179b0aae233772866b8c04
| 2,069
|
py
|
Python
|
lib/galaxy/work/context.py
|
maikenp/galaxy
|
eb3f3c816f1f94bc328d092f30c8966d41a56a0d
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/work/context.py
|
maikenp/galaxy
|
eb3f3c816f1f94bc328d092f30c8966d41a56a0d
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/work/context.py
|
maikenp/galaxy
|
eb3f3c816f1f94bc328d092f30c8966d41a56a0d
|
[
"CC-BY-3.0"
] | null | null | null |
from galaxy.managers.context import (
ProvidesHistoryContext,
)
class WorkRequestContext(ProvidesHistoryContext):
""" Stripped down implementation of Galaxy web transaction god object for
work request handling outside of web threads - uses mix-ins shared with
GalaxyWebTransaction to provide app, user, and history context convenience
methods - but nothing related to HTTP handling, mako views, etc....
Things that only need app shouldn't be consuming trans - but there is a
need for actions potentially tied to users and histories and hopefully
this can define that stripped down interface providing access to user and
history information - but not dealing with web request and response
objects.
"""
def __init__(self, app, user=None, history=None, workflow_building_mode=False):
self._app = app
self.__user = user
self.__user_current_roles = None
self.__history = history
self.workflow_building_mode = workflow_building_mode
@property
def app(self):
return self._app
def get_history(self, create=False):
return self.__history
@property
def history(self):
return self.get_history()
def get_user(self):
"""Return the current user if logged in or None."""
return self.__user
def get_current_user_roles(self):
if self.__user_current_roles is None:
self.__user_current_roles = super().get_current_user_roles()
return self.__user_current_roles
def set_user(self, user):
"""Set the current user."""
raise NotImplementedError("Cannot change users from a work request context.")
user = property(get_user, set_user)
class SessionRequestContext(WorkRequestContext):
"""Like WorkRequestContext, but provides access to galaxy session and session."""
def __init__(self, **kwargs):
self.galaxy_session = kwargs.pop('galaxy_session', None)
super().__init__(**kwargs)
def get_galaxy_session(self):
return self.galaxy_session
| 33.918033
| 85
| 0.704688
|
238220b5d56a0fa6c3032317ca6820d6a96857c4
| 826
|
py
|
Python
|
tsg/views.py
|
kityang/tushuguan
|
81ed68833250b16251ff9656f10cc44929c26fc1
|
[
"MIT"
] | null | null | null |
tsg/views.py
|
kityang/tushuguan
|
81ed68833250b16251ff9656f10cc44929c26fc1
|
[
"MIT"
] | null | null | null |
tsg/views.py
|
kityang/tushuguan
|
81ed68833250b16251ff9656f10cc44929c26fc1
|
[
"MIT"
] | null | null | null |
import time
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
from tsg.models import 书籍
# Create your views here.
def index(request):
return render(request, 'index.html')
def 图书添加(request):
书籍列表 = 书籍.objects.all().order_by('添加时间')
return render(request, '图书添加.html',context={'书籍列表':书籍列表})
@csrf_exempt
def 图书入库(a):
书名 = a.POST['书名']
作者 = a.POST['作者']
出版社 = a.POST['出版社']
ISBN = "0"
添加时间 = timezone.now()
书籍.objects.create(书名=书名, 作者=作者, 出版社=出版社,ISBN=ISBN, 添加时间=添加时间)
return redirect('图书添加.html')
@csrf_exempt
def 书号提交(request):
书号 = request.POST['ISBN']
添加时间 = timezone.now()
书籍.objects.create(ISBN=书号, 添加时间=添加时间)
return HttpResponse(status=200)
| 25.030303
| 65
| 0.691283
|
a911f42ef0c7a0178b38a94af52d8b803df7d08b
| 3,652
|
py
|
Python
|
backend/randomtest.py
|
ezg/PanoramicDataWin8
|
229e9ab64cda30a0bd1c6d39a70754ba4651ad43
|
[
"Apache-2.0"
] | 2
|
2015-09-29T03:25:03.000Z
|
2021-04-29T03:17:28.000Z
|
backend/randomtest.py
|
ezg/PanoramicDataWin8
|
229e9ab64cda30a0bd1c6d39a70754ba4651ad43
|
[
"Apache-2.0"
] | 1
|
2016-09-25T21:45:21.000Z
|
2016-09-25T21:45:21.000Z
|
backend/randomtest.py
|
ezg/PanoramicDataWin8
|
229e9ab64cda30a0bd1c6d39a70754ba4651ad43
|
[
"Apache-2.0"
] | 2
|
2015-10-26T17:21:48.000Z
|
2016-10-29T15:45:32.000Z
|
from dataprovider import SequentialDataProvider
from databinner import DataBinner
import json
import time
import numpy as np
job = {
"type": "execute",
"dataset": "facts.csv_5000",
"task": {
"filter": "",
"aggregateFunctions": [
"Count"
],
"type": "visualization",
"chunkSize": 10000,
"aggregateDimensions": [
"asian"
],
"nrOfBins": [
10.0,
10.0
],
"brushes": [
"( county == 'F46107' ) or ( county == 'F46129' ) or ( county == 'F8011' ) or ( county == 'F8017' ) or ( county == 'F8061' ) or ( county == 'F8063' ) or ( county == 'F8095' ) or ( county == 'F8099' ) or ( county == 'F8115' ) or ( county == 'F8125' ) or ( county == 'F31049' ) or ( county == 'F31069' ) or ( county == 'F5005' ) or ( county == 'F5029' ) or ( county == 'F5051' ) or ( county == 'F5089' ) or ( county == 'F5105' ) or ( county == 'F5115' ) or ( county == 'F5125' ) or ( county == 'F5129' ) or ( county == 'F5141' ) or ( county == 'F5149' ) or ( county == 'F17001' ) or ( county == 'F17067' ) or ( county == 'F19087' ) or ( county == 'F19103' ) or ( county == 'F19111' ) or ( county == 'F19113' ) or ( county == 'F19115' ) or ( county == 'F19177' ) or ( county == 'F19183' ) or ( county == 'F29007' ) or ( county == 'F29019' ) or ( county == 'F29027' ) or ( county == 'F29045' ) or ( county == 'F29051' ) or ( county == 'F29067' ) or ( county == 'F29105' ) or ( county == 'F29111' ) or ( county == 'F29125' ) or ( county == 'F29127' ) or ( county == 'F29131' ) or ( county == 'F29137' ) or ( county == 'F29151' ) or ( county == 'F29153' ) or ( county == 'F29169' ) or ( county == 'F29173' ) or ( county == 'F29205' ) or ( county == 'F29215' ) or ( county == 'F29229' )"
],
"dimensionAggregateFunctions": [
"None",
"Count"
],
"dimensions": [
"asian",
"asian"
]
}
}
print json.dumps(job)
task = job['task']
dp = SequentialDataProvider(job['dataset'], 'C:\\data\\', task['chunkSize'], 0)
db = DataBinner(task['dimensions'], task['dimensionAggregateFunctions'], task['nrOfBins'], task['aggregateDimensions'], task['aggregateFunctions'], task['brushes'])
aggName = 'mpg'
aggFunc = 'Count'
aggKey1 = (aggName, aggFunc, 0)
aggKey2 = (aggName, aggFunc, 1)
aggKey3 = (aggName, aggFunc, 1)
aggKey4 = (aggName, aggFunc, 1)
def default(o):
if isinstance(o, np.integer): return int(o)
raise TypeError
while True:
start = time.time()
c, df = dp.getDataFrame()
if not task['filter'] == '':
df = df.query(task['filter'])
print 'progress', str(c * 100.0) + '%'
if not df is None:
db.bin(df, c)
br0 = db.binStructure.binRanges[0]
br1 = db.binStructure.binRanges[1]
for b0 in br0.getBins():
print ' ' + br0.getLabel(b0)
bkey = (br0.getIndex(b0),)
for b1 in br1.getBins():
bkey = (br0.getIndex(b0), br1.getIndex(b1))
print ' ' + br1.getLabel(b1)
print ' ',
for k in [aggKey1, aggKey2, aggKey3, aggKey4]:
print str(db.binStructure.bins[bkey].values[k]) +',',
print
print ' ',
for k in [aggKey1, aggKey2, aggKey3, aggKey4]:
print str(db.binStructure.bins[bkey].counts[k]) +',',
print
#print len(db.binStructure.bins)
#for g in db.binStructure.bins:
# print g
if df is None or c == 1.0:
break
end = time.time()
print "time", (end - start)
print
| 36.888889
| 1,286
| 0.525192
|
e022397dcf37e3b4d2458120b14932b8a643eff1
| 11,710
|
py
|
Python
|
utils/workers.py
|
kushwahashivam/alphazero-othello
|
66fe081ac98a34a1c49db7ca3af3d67c7ee6a88b
|
[
"Apache-2.0"
] | 1
|
2021-07-02T15:52:14.000Z
|
2021-07-02T15:52:14.000Z
|
utils/workers.py
|
kushwahashivam/alphazero-othello
|
66fe081ac98a34a1c49db7ca3af3d67c7ee6a88b
|
[
"Apache-2.0"
] | 1
|
2020-10-11T18:23:49.000Z
|
2020-10-11T18:23:49.000Z
|
utils/workers.py
|
kushwahashivam/alphazero-othello
|
66fe081ac98a34a1c49db7ca3af3d67c7ee6a88b
|
[
"Apache-2.0"
] | 1
|
2020-10-11T18:21:00.000Z
|
2020-10-11T18:21:00.000Z
|
from __future__ import annotations
from typing import Union, OrderedDict, Dict
import numpy as np
import pickle
import time
import torch
from torch.multiprocessing import Process, Queue
from config import OthelloConfig
from players.azplayer import AZPlayer
from utils.game import Othello
from utils.model import Network
from utils.util import ReplayBuffer, image_to_tensor, Node, mcts, generate_training_data, calculate_loss
from players.vmctsplayer import VMCTSPlayer
class SelfPlayWorker(Process):
def __init__(
self, name: str, message_queue: Queue, log_queue: Queue,
shared_state_dicts: Dict[str, Union[Dict[str, torch.Tensor], OrderedDict[str, torch.Tensor], int]],
replay_buffer: ReplayBuffer, device_name: str, cfg: OthelloConfig
):
super().__init__(name=name)
self._message_queue = message_queue
self._log_queue = log_queue
self._shared_state_dicts = shared_state_dicts
self._replay_buffer = replay_buffer
self._cfg = cfg
self._device = torch.device(device_name)
self._network = Network()
self._game = Othello(self._cfg)
self._interrupted = False
def run(self):
print(super().name, "started.")
self._network.to(self._device).eval()
while True:
self._load_latest_network()
t1 = time.time()
self._game.reset()
target_policies = []
node, *_ = Node.get_new_node(self._cfg, self._game, self._network, self._device)
while not self._game.is_terminal():
self._check_message_queue()
if self._interrupted:
break
for _ in range(self._cfg.num_simulations):
mcts(node, self._cfg, self._network, self._device)
target_policy = node.get_policy()
action = node.select_optimal_action()
target_policies.append(target_policy)
child = node.child(action)
self._game = child.game()
node = child
if self._interrupted:
break
final_returns = np.array(self._game.returns()).astype(np.float32)
target_policies = np.array(target_policies).astype(np.float32)
training_data = generate_training_data(self._cfg, self._game, target_policies, final_returns)
self._replay_buffer.save_training_data(training_data)
t2 = time.time()
if self._cfg.debug:
print(super().name, "completed one simulation in", t2 - t1, "seconds.")
print(super().name, "terminated.")
def _check_message_queue(self):
if not self._message_queue.empty():
msg = self._message_queue.get()
if msg == self._cfg.message_interrupt:
self._interrupted = True
# noinspection DuplicatedCode
def _load_latest_network(self):
while True:
try:
state_dict = self._shared_state_dicts["network"]
for k, v in state_dict.items():
state_dict[k] = v.to(self._device)
self._network.load_state_dict(state_dict)
self._network.eval()
return
except KeyError:
pass
self._check_message_queue()
if self._interrupted:
return
time.sleep(1.0)
class TrainingWorker(Process):
def __init__(
self, name: str, message_queue: Queue, log_queue: Queue,
shared_state_dicts: Dict[str, Union[Dict[str, torch.Tensor], OrderedDict[str, torch.Tensor], int]],
replay_buffer: ReplayBuffer, device_name: str, cfg: OthelloConfig, resume: bool
):
super().__init__(name=name)
self._message_queue = message_queue
self._log_queue = log_queue
self._shared_state_dicts = shared_state_dicts
self._replay_buffer = replay_buffer
self._cfg = cfg
self._resume = resume
self._device = torch.device(device_name)
self._network = Network()
# noinspection PyUnresolvedReferences
self._optim = torch.optim.RMSprop(
self._network.parameters(), lr=self._cfg.learning_rate_schedule[1], weight_decay=self._cfg.weight_decay
)
self._gs = 1
self._interrupted = False
def run(self):
print(super().name, "started.")
self._network.to(self._device).train()
# noinspection PyUnresolvedReferences
self._optim = torch.optim.RMSprop(
self._network.parameters(), lr=self._cfg.learning_rate_schedule[1], weight_decay=self._cfg.weight_decay
)
if self._resume:
self._load_parameters()
self._flush_network()
for epoch in range(self._gs, self._cfg.training_steps + 1):
self._check_message_queue()
self._check_replay_buffer()
if self._interrupted:
break
t1 = time.time()
self._reschedule_lr()
images, target_action_probs, target_values, action_masks = self._replay_buffer.sample_batch()
images = image_to_tensor(images, self._device)
target_action_probs = torch.as_tensor(target_action_probs, dtype=torch.float32).to(self._device)
target_values = torch.as_tensor(target_values, dtype=torch.float32).to(self._device)
self._optim.zero_grad()
predicted_action_probs, predicted_values = self._network(images)
policy_loss, value_loss, total_loss = calculate_loss(predicted_action_probs, predicted_values,
target_action_probs, target_values)
total_loss.backward()
self._optim.step()
self._flush_network()
log = {
"type": "scalar",
"losses/policy_loss": policy_loss.item(),
"losses/value_loss": value_loss.item(),
"losses/total_loss": total_loss.item(),
"gs": self._gs
}
self._log_queue.put(log)
self._gs = self._gs + 1
if epoch % self._cfg.checkpoint_interval == 0:
self._save_parameters()
t2 = time.time()
if self._cfg.debug:
print("One training epoch completed in", t2 - t1, "seconds.")
print(log)
print(super().name, "terminated.")
def _load_parameters(self):
print("Loading parameters...")
with open(self._cfg.dir_gs, "rb") as f:
self._gs = pickle.load(f)
self._network.load_state_dict(torch.load(self._cfg.dir_network, map_location=self._device))
self._optim.load_state_dict(torch.load(self._cfg.dir_optim, map_location=self._device))
self._network.train()
print("Parameters loaded successfully.")
def _save_parameters(self):
with open(self._cfg.dir_gs, "wb") as f:
pickle.dump(self._gs, f)
torch.save(self._network.state_dict(), self._cfg.dir_network)
torch.save(self._optim.state_dict(), self._cfg.dir_optim)
def _reschedule_lr(self):
if self._gs in self._cfg.learning_rate_schedule.keys():
# noinspection PyUnresolvedReferences
self._optim = torch.optim.RMSprop(
self._network.parameters(), lr=self._cfg.learning_rate_schedule[self._gs],
weight_decay=self._cfg.weight_decay
)
def _flush_network(self):
network_state_dict = self._network.state_dict()
for k, v in network_state_dict.items():
network_state_dict[k] = v.detach().cpu()
self._shared_state_dicts["network"] = network_state_dict
def _check_message_queue(self):
if not self._message_queue.empty():
msg = self._message_queue.get()
if msg == self._cfg.message_interrupt:
self._interrupted = True
def _check_replay_buffer(self):
while self._replay_buffer.empty():
self._check_message_queue()
if self._interrupted:
return
time.sleep(1.0)
class EvaluationWorker(Process):
def __init__(
self, name: str, message_queue: Queue, log_queue: Queue,
shared_state_dicts: Dict[str, Union[Dict[str, torch.Tensor], OrderedDict[str, torch.Tensor], int]],
device_name: str, cfg: OthelloConfig, resume: bool
):
super().__init__(name=name)
self._message_queue = message_queue
self._log_queue = log_queue
self._shared_state_dicts = shared_state_dicts
self._cfg = cfg
self._device = torch.device(device_name)
self._network = Network()
self._gs = 1
self._interrupted = False
self._resume = resume
def run(self):
print(super().name, "started.")
self._network.to(self._device)
if self._resume:
with open(self._cfg.dir_eval_gs, "rb") as f:
self._gs = pickle.load(f)
az_first = True
while True:
self._load_latest_network()
if self._interrupted:
break
t1 = time.time()
az_player = AZPlayer(self._cfg, self._network, self._device)
vmcts_player = VMCTSPlayer(self._cfg)
az_turn = az_first
while not az_player.game().is_terminal():
self._check_message_queue()
if self._interrupted:
break
if az_turn:
action = az_player.choose_action()
az_player.play(action)
vmcts_player.play(action)
az_turn = False
else:
action = vmcts_player.choose_action()
vmcts_player.play(action)
az_player.play(action)
az_turn = True
if self._interrupted:
break
winner = az_player.game().winner()
if winner == 2:
az_score = 0
elif (az_first and winner == 0) or ((not az_first) and winner == 1):
az_score = 1
else:
az_score = -1
log = {
"type": "scalar",
"az_score": az_score,
"gs": self._gs
}
self._log_queue.put(log)
az_first = False if az_first else True
self._gs = self._gs + 1
with open(self._cfg.dir_eval_gs, "wb") as f:
pickle.dump(self._gs, f)
t2 = time.time()
if self._cfg.debug:
print(super().name, "completed one evaluation round in", t2 - t1, "seconds.")
print(log)
print(super().name, "terminated.")
def _check_message_queue(self):
if not self._message_queue.empty():
msg = self._message_queue.get()
if msg == self._cfg.message_interrupt:
self._interrupted = True
# noinspection DuplicatedCode
def _load_latest_network(self):
while True:
try:
state_dict = self._shared_state_dicts["network"]
for k, v in state_dict.items():
state_dict[k] = v.to(self._device)
self._network.load_state_dict(state_dict)
self._network.eval()
return
except KeyError:
pass
self._check_message_queue()
if self._interrupted:
return
time.sleep(1.0)
| 39.829932
| 115
| 0.583433
|
77093a57df4a35c85875ce2743eeaa5cce2c8fb7
| 2,298
|
py
|
Python
|
lib/ops/prepare_labels_for_prn_and_update_refine_blobs.py
|
xieshuqin/RefineNet
|
94d7f53db82aec2e3969850fe7c1d1b76ce7ad48
|
[
"Apache-2.0"
] | null | null | null |
lib/ops/prepare_labels_for_prn_and_update_refine_blobs.py
|
xieshuqin/RefineNet
|
94d7f53db82aec2e3969850fe7c1d1b76ce7ad48
|
[
"Apache-2.0"
] | null | null | null |
lib/ops/prepare_labels_for_prn_and_update_refine_blobs.py
|
xieshuqin/RefineNet
|
94d7f53db82aec2e3969850fe7c1d1b76ce7ad48
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from core.config import cfg
import roi_data.prn
import utils.blob as blob_utils
class PrepareLabelsForPRNAndUpdateRefineBlobsOp(object):
""" Prepare labels for PRN. And also update labels for the refinement
tasks.
inputs is [mask_ious, labels_int32]
if training, then inputs include labels for refinement task,
such as [refined_masks_int32]
outputs is [prn_labels_int32, roi_needs_refine_int32, refine_ratio]
and also includes labels for refinement task, such as
[refined_masks_int32]
"""
def __init__(self):
pass
def forward(self, inputs, outputs):
# prepare blobs_in
blobs_in = convert_inputs_to_dict(inputs)
# prepare blobs_out
blob_out_names = get_op_blob_out_names()
blobs_out = {k: [] for k in blob_out_names}
# add blobs for prn
roi_data.prn.add_prn_blobs(blobs_out, blobs_in)
# update refine blobs
update_refine_blobs(blobs_out, blobs_in)
# add to outputs
for i, k in enumerate(blob_out_names):
blob_utils.py_op_copy_blob(blobs_out[k], outputs[i])
def convert_inputs_to_dict(inputs):
blobs_in_names = get_op_blob_in_names()
blobs_in = {k: [] for k in blobs_in_names}
for i, k in enumerate(blobs_in_names):
blobs_in[k] = inputs[i].data
return blobs_in
def get_refine_blob_names():
blob_names = []
if cfg.MODEL.REFINE_MASK_ON:
blob_names += ['refined_masks_int32']
return blob_names
def get_op_blob_in_names():
blob_names = ['mask_ious', 'labels_int32']
blob_names += get_refine_blob_names()
return blob_names
def get_op_blob_out_names():
blob_names = roi_data.prn.get_prn_blob_names()
blob_names += get_refine_blob_names()
return blob_names
def update_refine_blobs(blobs_out, blobs_in):
# convert roi_needs_refine_int32 to bool
roi_needs_refine = blobs_out['roi_needs_refine_int32'].astype(np.bool)
# update the refine blobs
blob_names = get_refine_blob_names()
for k in blob_names:
blobs_out[k] = blobs_in[k][roi_needs_refine]
| 29.844156
| 76
| 0.709312
|
eda574dfa2fd600b997c2ac83639297f90e25a08
| 2,906
|
py
|
Python
|
koko/cam/machines/epilog.py
|
TheBeachLab/kokopelli
|
529b8149a951363d2a027946464ea0bb22346428
|
[
"MIT"
] | 196
|
2015-01-01T08:48:04.000Z
|
2022-03-29T13:21:45.000Z
|
koko/cam/machines/epilog.py
|
Inban-pythonic/kokopelli
|
c99b7909e138c42c7d5c99927f5031f021bffd77
|
[
"MIT"
] | 3
|
2015-01-14T09:55:15.000Z
|
2016-12-28T21:01:49.000Z
|
koko/cam/machines/epilog.py
|
Inban-pythonic/kokopelli
|
c99b7909e138c42c7d5c99927f5031f021bffd77
|
[
"MIT"
] | 26
|
2015-03-10T11:12:23.000Z
|
2022-02-12T17:12:17.000Z
|
"""
@namespace epilog
@brief Output details and UI panel for an Epilog laser cutter.
"""
NAME = 'Epilog'
import tempfile
import subprocess
import koko
from koko.cam.panel import OutputPanel
class EpilogOutput(OutputPanel):
""" @class EpilogOutput UI Panel for Epilog laser
"""
""" @var extension File extension for Epilog laser file
"""
extension = '.epi'
def __init__(self, parent):
OutputPanel.__init__(self, parent)
self.construct('Epilog laser cutter', [
('2D power (%)', 'power', int, lambda f: 0 <= f <= 100),
('Speed (%)', 'speed', int, lambda f: 0 <= f <= 100),
('Rate','rate', int, lambda f: f > 0),
('xmin (mm)', 'xmin', float, lambda f: f >= 0),
('ymin (mm)', 'ymin', float, lambda f: f >= 0),
('autofocus', 'autofocus', bool)
], start=True)
def run(self, paths):
''' Convert the path from the previous panel into an epilog
laser file (with .epi suffix).
'''
values = self.get_values()
if not values: return False
koko.FRAME.status = 'Converting to .epi file'
self.file = tempfile.NamedTemporaryFile(suffix=self.extension)
job_name = koko.APP.filename if koko.APP.filename else 'untitled'
self.file.write("%%-12345X@PJL JOB NAME=%s\r\nE@PJL ENTER LANGUAGE=PCL\r\n&y%iA&l0U&l0Z&u600D*p0X*p0Y*t600R*r0F&y50P&z50S*r6600T*r5100S*r1A*rC%%1BIN;XR%d;YP%d;ZS%d;\n" %
(job_name, 1 if values['autofocus'] else 0,
values['rate'], values['power'], values['speed']))
scale = 600/25.4 # The laser's tick is 600 DPI
xoffset = values['xmin']*scale
yoffset = values['ymin']*scale
xy = lambda x,y: (xoffset + scale*x, yoffset + scale*y)
for path in paths:
self.file.write("PU%d,%d;" % xy(*path.points[0][0:2]))
for pt in path.points[1:]:
self.file.write("PD%d,%d;" % xy(*pt[0:2]))
self.file.write("\n")
self.file.write("%%0B%%1BPUtE%%-12345X@PJL EOJ \r\n")
self.file.flush()
koko.FRAME.status = ''
return True
def send(self):
subprocess.call('printer=laser; lpr -P$printer "%s"'
% self.file.name, shell=True)
################################################################################
from koko.cam.path_panels import ContourPanel
INPUT = ContourPanel
PANEL = EpilogOutput
################################################################################
from koko.cam.inputs.cad import CadImgPanel
DEFAULTS = [
('<None>', {}),
('Cardboard',
{CadImgPanel: [('res',5)],
ContourPanel: [('diameter', 0.25)],
EpilogOutput: [('power', 25), ('speed', 75),
('rate', 500), ('xmin', 0), ('ymin', 0)]
}
)
]
| 29.958763
| 194
| 0.522023
|
d09847efd81c1e91dcec1f9bf7f49026196c785e
| 893
|
py
|
Python
|
ext/opentelemetry-ext-redis/setup.py
|
sethmaxwl/opentelemetry-python
|
224974879c286062d9a5e494fa7d1469832eabbe
|
[
"Apache-2.0"
] | 2
|
2020-08-13T21:10:48.000Z
|
2020-09-30T00:55:05.000Z
|
ext/opentelemetry-ext-redis/setup.py
|
sethmaxwl/opentelemetry-python
|
224974879c286062d9a5e494fa7d1469832eabbe
|
[
"Apache-2.0"
] | 1
|
2021-02-24T01:32:32.000Z
|
2021-02-24T01:32:32.000Z
|
ext/opentelemetry-ext-redis/setup.py
|
sethmaxwl/opentelemetry-python
|
224974879c286062d9a5e494fa7d1469832eabbe
|
[
"Apache-2.0"
] | 2
|
2020-06-01T12:38:23.000Z
|
2021-01-07T10:55:47.000Z
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import setuptools
BASE_DIR = os.path.dirname(__file__)
VERSION_FILENAME = os.path.join(
BASE_DIR, "src", "opentelemetry", "ext", "redis", "version.py"
)
PACKAGE_INFO = {}
with open(VERSION_FILENAME) as f:
exec(f.read(), PACKAGE_INFO)
setuptools.setup(version=PACKAGE_INFO["__version__"])
| 33.074074
| 74
| 0.753639
|
4032533845d7dd0d709e60765f106c9a3aaeae9b
| 465
|
py
|
Python
|
PostManagement/models.py
|
officialrafsan/travelxpRED
|
2209fba418496c7e64dde8cba271cfb8020856ac
|
[
"MIT"
] | null | null | null |
PostManagement/models.py
|
officialrafsan/travelxpRED
|
2209fba418496c7e64dde8cba271cfb8020856ac
|
[
"MIT"
] | null | null | null |
PostManagement/models.py
|
officialrafsan/travelxpRED
|
2209fba418496c7e64dde8cba271cfb8020856ac
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
from UserManagement.models import User
class Post(models.Model):
Post_title = models.CharField(max_length=100)
Post_location = models.CharField(max_length=100)
Post_tags = models.CharField(max_length=500)
Post_description = models.TextField(max_length=1000000)
User = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
def __str__(self):
return self.Post_title
| 27.352941
| 72
| 0.754839
|
f6db2f4027a21ad1664c447d31e061f9aa389ae2
| 18,620
|
py
|
Python
|
pywikibot/flow.py
|
anukaal/pywikibot
|
086e99d686ceebb40cb2e3dc7989e78ce6de3b85
|
[
"MIT"
] | 1
|
2021-07-28T11:48:03.000Z
|
2021-07-28T11:48:03.000Z
|
pywikibot/flow.py
|
anukaal/pywikibot
|
086e99d686ceebb40cb2e3dc7989e78ce6de3b85
|
[
"MIT"
] | null | null | null |
pywikibot/flow.py
|
anukaal/pywikibot
|
086e99d686ceebb40cb2e3dc7989e78ce6de3b85
|
[
"MIT"
] | null | null | null |
"""Objects representing Flow entities, like boards, topics, and posts."""
#
# (C) Pywikibot team, 2015-2020
#
# Distributed under the terms of the MIT license.
#
import abc
import logging
from urllib.parse import parse_qs, urlparse
from pywikibot.exceptions import (
LockedPageError,
NoPageError,
UnknownExtensionError,
)
from pywikibot.page import BasePage, User
from pywikibot.tools import deprecate_arg
logger = logging.getLogger('pywiki.wiki.flow')
# Flow page-like objects (boards and topics)
class FlowPage(BasePage, abc.ABC):
"""The base page meta class for the Flow extension.
It cannot be instantiated directly.
"""
def __init__(self, source, title: str = ''):
"""Initializer.
:param source: A Flow-enabled site or a Link or Page on such a site
:type source: Site, pywikibot.page.Link, or pywikibot.page.Page
:param title: normalized title of the page
:raises TypeError: incorrect use of parameters
:raises ValueError: use of non-Flow-enabled Site
"""
super().__init__(source, title)
if not self.site.has_extension('Flow'):
raise UnknownExtensionError('site is not Flow-enabled')
@abc.abstractmethod
def _load(self, force: bool = False):
"""Abstract method to load and cache the Flow data.
Subclasses must overwrite _load() method to load and cache
the object's internal data from the API.
"""
raise NotImplementedError
@property
def uuid(self) -> str:
"""Return the UUID of the page.
:return: UUID of the page
"""
if not hasattr(self, '_uuid'):
self._uuid = self._load()['workflowId']
return self._uuid
def get(self, force=False, get_redirect=False):
"""Get the page's content."""
if get_redirect or force:
raise NotImplementedError(
"Neither 'force' nor 'get_redirect' parameter is implemented "
'in {}.get()'.format(self.__class__.__name__))
# TODO: Return more useful data
return self._data
class Board(FlowPage):
"""A Flow discussion board."""
def _load(self, force: bool = False):
"""Load and cache the Board's data, derived from its topic list.
:param force: Whether to force a reload if the data is already loaded
"""
if not hasattr(self, '_data') or force:
self._data = self.site.load_board(self)
return self._data
def _parse_url(self, links):
"""Parse a URL retrieved from the API."""
rule = links['fwd']
parsed_url = urlparse(rule['url'])
params = parse_qs(parsed_url.query)
new_params = {}
for key, value in params.items():
if key != 'title':
key = key.replace('topiclist_', '').replace('-', '_')
if key == 'offset_dir':
new_params['reverse'] = (value == 'rev')
else:
new_params[key] = value
return new_params
@deprecate_arg('format', 'content_format')
def topics(self, content_format: str = 'wikitext', limit: int = 100,
sort_by: str = 'newest', offset=None, offset_uuid: str = '',
reverse: bool = False, include_offset: bool = False,
toc_only: bool = False):
"""Load this board's topics.
:param content_format: The content format to request the data in;
must be either 'wikitext', 'html', or 'fixed-html'
:param limit: The number of topics to fetch in each request.
:param sort_by: Algorithm to sort topics by;
must be either 'newest' or 'updated'
:param offset: The timestamp to start at (when sortby is 'updated').
:type offset: Timestamp or equivalent str
:param offset_uuid: The UUID to start at (when sortby is 'newest').
:param reverse: Whether to reverse the topic ordering.
:param include_offset: Whether to include the offset topic.
:param toc_only: Whether to only include information for the TOC.
:return: A generator of this board's topics.
:rtype: generator of Topic objects
"""
data = self.site.load_topiclist(self, content_format=content_format,
limit=limit, sortby=sort_by,
toconly=toc_only, offset=offset,
offset_id=offset_uuid, reverse=reverse,
include_offset=include_offset)
while data['roots']:
for root in data['roots']:
topic = Topic.from_topiclist_data(self, root, data)
yield topic
cont_args = self._parse_url(data['links']['pagination'])
data = self.site.load_topiclist(self, **cont_args)
@deprecate_arg('format', 'content_format')
def new_topic(self, title: str, content: str,
content_format: str = 'wikitext'):
"""Create and return a Topic object for a new topic on this Board.
:param title: The title of the new topic (must be in plaintext)
:param content: The content of the topic's initial post
:param content_format: The content format of the supplied content;
either 'wikitext' or 'html'
:return: The new topic
:rtype: Topic
"""
return Topic.create_topic(self, title, content, content_format)
class Topic(FlowPage):
"""A Flow discussion topic."""
def _load(self, force: bool = False, content_format: str = 'wikitext'):
"""Load and cache the Topic's data.
:param force: Whether to force a reload if the data is already loaded
:param content_format: The post format in which to load
"""
if not hasattr(self, '_data') or force:
self._data = self.site.load_topic(self, content_format)
return self._data
def _reload(self):
"""Forcibly reload the topic's root post."""
self.root._load(load_from_topic=True)
@classmethod
@deprecate_arg('format', 'content_format')
def create_topic(cls, board, title: str, content: str,
content_format: str = 'wikitext'):
"""Create and return a Topic object for a new topic on a Board.
:param board: The topic's parent board
:type board: Board
:param title: The title of the new topic (must be in plaintext)
:param content: The content of the topic's initial post
:param content_format: The content format of the supplied content;
either 'wikitext' or 'html'
:return: The new topic
:rtype: Topic
"""
data = board.site.create_new_topic(board, title, content,
content_format)
return cls(board.site, data['topic-page'])
@classmethod
def from_topiclist_data(cls, board, root_uuid: str, topiclist_data: dict):
"""Create a Topic object from API data.
:param board: The topic's parent Flow board
:type board: Board
:param root_uuid: The UUID of the topic and its root post
:param topiclist_data: The data returned by view-topiclist
:return: A Topic object derived from the supplied data
:rtype: Topic
:raises TypeError: any passed parameters have wrong types
:raises ValueError: the passed topiclist_data is missing required data
"""
if not isinstance(board, Board):
raise TypeError('board must be a pywikibot.flow.Board object.')
if not isinstance(root_uuid, str):
raise TypeError('Topic/root UUID must be a string.')
topic = cls(board.site, 'Topic:' + root_uuid)
topic._root = Post.fromJSON(topic, root_uuid, topiclist_data)
topic._uuid = root_uuid
return topic
@property
def root(self):
"""The root post of this topic."""
if not hasattr(self, '_root'):
self._root = Post.fromJSON(self, self.uuid, self._data)
return self._root
@property
def is_locked(self):
"""Whether this topic is locked."""
return self.root._current_revision['isLocked']
@property
def is_moderated(self):
"""Whether this topic is moderated."""
return self.root._current_revision['isModerated']
@deprecate_arg('format', 'content_format')
def replies(self, content_format: str = 'wikitext', force: bool = False):
"""A list of replies to this topic's root post.
:param content_format: Content format to return contents in;
must be 'wikitext', 'html', or 'fixed-html'
:param force: Whether to reload from the API instead of using the cache
:return: The replies of this topic's root post
:rtype: list of Posts
"""
return self.root.replies(content_format=content_format, force=force)
@deprecate_arg('format', 'content_format')
def reply(self, content: str, content_format: str = 'wikitext'):
"""A convenience method to reply to this topic's root post.
:param content: The content of the new post
:param content_format: The format of the given content;
must be 'wikitext' or 'html')
:return: The new reply to this topic's root post
:rtype: Post
"""
return self.root.reply(content, content_format)
# Moderation
def lock(self, reason: str):
"""Lock this topic.
:param reason: The reason for locking this topic
"""
self.site.lock_topic(self, True, reason)
self._reload()
def unlock(self, reason: str):
"""Unlock this topic.
:param reason: The reason for unlocking this topic
"""
self.site.lock_topic(self, False, reason)
self._reload()
def delete_mod(self, reason: str):
"""Delete this topic through the Flow moderation system.
:param reason: The reason for deleting this topic.
"""
self.site.delete_topic(self, reason)
self._reload()
def hide(self, reason: str):
"""Hide this topic.
:param reason: The reason for hiding this topic.
"""
self.site.hide_topic(self, reason)
self._reload()
def suppress(self, reason: str):
"""Suppress this topic.
:param reason: The reason for suppressing this topic.
"""
self.site.suppress_topic(self, reason)
self._reload()
def restore(self, reason: str):
"""Restore this topic.
:param reason: The reason for restoring this topic.
"""
self.site.restore_topic(self, reason)
self._reload()
# Flow non-page-like objects
class Post:
"""A post to a Flow discussion topic."""
def __init__(self, page, uuid: str):
"""
Initializer.
:param page: Flow topic
:type page: Topic
:param uuid: UUID of a Flow post
:raises TypeError: incorrect types of parameters
"""
if not isinstance(page, Topic):
raise TypeError('Page must be a Topic object')
if not page.exists():
raise NoPageError(page, 'Topic must exist: %s')
if not isinstance(uuid, str):
raise TypeError('Post UUID must be a string')
self._page = page
self._uuid = uuid
self._content = {}
@classmethod
def fromJSON(cls, page, post_uuid: str, data: dict): # noqa: N802
"""
Create a Post object using the data returned from the API call.
:param page: A Flow topic
:type page: Topic
:param post_uuid: The UUID of the post
:param data: The JSON data returned from the API
:return: A Post object
:raises TypeError: data is not a dict
:raises ValueError: data is missing required entries
"""
post = cls(page, post_uuid)
post._set_data(data)
return post
def _set_data(self, data: dict):
"""Set internal data and cache content.
:param data: The data to store internally
:raises TypeError: data is not a dict
:raises ValueError: missing data entries or post/revision not found
"""
if not isinstance(data, dict):
raise TypeError('Illegal post data (must be a dictionary).')
if ('posts' not in data) or ('revisions' not in data):
raise ValueError('Illegal post data (missing required data).')
if self.uuid not in data['posts']:
raise ValueError('Post not found in supplied data.')
current_revision_id = data['posts'][self.uuid][0]
if current_revision_id not in data['revisions']:
raise ValueError('Current revision of post'
'not found in supplied data.')
self._current_revision = data['revisions'][current_revision_id]
if 'content' in self._current_revision:
content = self._current_revision.pop('content')
assert isinstance(content, dict)
assert isinstance(content['content'], str)
self._content[content['format']] = content['content']
def _load(self, force: bool = True, content_format: str = 'wikitext',
load_from_topic: bool = False):
"""Load and cache the Post's data using the given content format.
:param load_from_topic: Whether to load the post from the whole topic
"""
if load_from_topic:
data = self.page._load(force=force, content_format=content_format)
else:
data = self.site.load_post_current_revision(self.page, self.uuid,
content_format)
self._set_data(data)
return self._current_revision
@property
def uuid(self) -> str:
"""Return the UUID of the post.
:return: UUID of the post
"""
return self._uuid
@property
def site(self):
"""Return the site associated with the post.
:return: Site associated with the post
:rtype: Site
"""
return self._page.site
@property
def page(self):
"""Return the page associated with the post.
:return: Page associated with the post
:rtype: Topic
"""
return self._page
@property
def is_moderated(self):
"""Whether this post is moderated."""
if not hasattr(self, '_current_revision'):
self._load()
return self._current_revision['isModerated']
@property
def creator(self):
"""The creator of this post."""
if not hasattr(self, '_current_revision'):
self._load()
if not hasattr(self, '_creator'):
self._creator = User(self.site,
self._current_revision['creator']['name'])
return self._creator
@deprecate_arg('format', 'content_format')
def get(self, content_format: str = 'wikitext',
force: bool = False) -> str:
"""Return the contents of the post in the given format.
:param force: Whether to reload from the API instead of using the cache
:param content_format: Content format to return contents in
:return: The contents of the post in the given content format
"""
if content_format not in self._content or force:
self._load(content_format=content_format)
return self._content[content_format]
@deprecate_arg('format', 'content_format')
def replies(self, content_format: str = 'wikitext', force: bool = False):
"""Return this post's replies.
:param content_format: Content format to return contents in;
must be 'wikitext', 'html', or 'fixed-html'
:param force: Whether to reload from the API instead of using the cache
:return: This post's replies
:rtype: list of Posts
"""
if content_format not in ('wikitext', 'html', 'fixed-html'):
raise ValueError('Invalid content format.')
if hasattr(self, '_replies') and not force:
return self._replies
# load_from_topic workaround due to T106733
# (replies not returned by view-post)
if not hasattr(self, '_current_revision') or force:
self._load(content_format=content_format, load_from_topic=True)
reply_uuids = self._current_revision['replies']
self._replies = [Post(self.page, uuid) for uuid in reply_uuids]
return self._replies
@deprecate_arg('format', 'content_format')
def reply(self, content: str, content_format: str = 'wikitext'):
"""Reply to this post.
:param content: The content of the new post
:param content_format: The format of the given content;
must be 'wikitext' or 'html'
:return: The new reply post
:rtype: Post
"""
self._load()
if self.page.is_locked:
raise LockedPageError(self.page, 'Topic %s is locked.')
reply_url = self._current_revision['actions']['reply']['url']
parsed_url = urlparse(reply_url)
params = parse_qs(parsed_url.query)
reply_to = params['topic_postId']
if self.uuid == reply_to:
del self._current_revision
del self._replies
data = self.site.reply_to_post(self.page, reply_to, content,
content_format)
post = Post(self.page, data['post-id'])
return post
# Moderation
def delete(self, reason: str):
"""Delete this post through the Flow moderation system.
:param reason: The reason for deleting this post.
"""
self.site.delete_post(self, reason)
self._load()
def hide(self, reason: str):
"""Hide this post.
:param reason: The reason for hiding this post.
"""
self.site.hide_post(self, reason)
self._load()
def suppress(self, reason: str):
"""Suppress this post.
:param reason: The reason for suppressing this post.
"""
self.site.suppress_post(self, reason)
self._load()
def restore(self, reason: str):
"""Restore this post.
:param reason: The reason for restoring this post.
"""
self.site.restore_post(self, reason)
self._load()
def thank(self):
"""Thank the user who made this post."""
self.site.thank_post(self)
| 34.934334
| 79
| 0.605639
|
bb160473c853b1e4b272db132845421ca557b82f
| 6,114
|
py
|
Python
|
convrnn/rcnn_sat/bl_net.py
|
esizikova/anytime-prediction
|
5c2672d6454a91873ca2b40796a29c6f5db5ec99
|
[
"MIT"
] | 15
|
2019-10-09T07:14:51.000Z
|
2022-02-07T16:42:28.000Z
|
convrnn/rcnn_sat/bl_net.py
|
esizikova/anytime-prediction
|
5c2672d6454a91873ca2b40796a29c6f5db5ec99
|
[
"MIT"
] | 1
|
2021-03-25T15:33:18.000Z
|
2021-03-25T15:33:18.000Z
|
convrnn/rcnn_sat/bl_net.py
|
esizikova/anytime-prediction
|
5c2672d6454a91873ca2b40796a29c6f5db5ec99
|
[
"MIT"
] | 4
|
2019-09-12T02:27:31.000Z
|
2021-06-17T21:03:03.000Z
|
'''
Keras implementation of BL network
'''
import tensorflow as tf
class BLConvLayer(object):
'''BL recurrent convolutional layer
Note that this is NOT A KERAS LAYER but is an object containing Keras layers
Args:
filters: Int, number of output filters in convolutions
kernel_size: Int or tuple/list of 2 integers, specifying the height and
width of the 2D convolution window. Can be a single integer to
specify the same value for all spatial dimensions.
layer_name: String, prefix for layers in the RCL
'''
def __init__(self, filters, kernel_size, layer_name):
# initialise convolutional layers
self.b_conv = tf.keras.layers.Conv2D(
filters, kernel_size, padding='same', use_bias=False,
kernel_initializer='glorot_uniform',
kernel_regularizer=tf.keras.regularizers.l2(1e-6),
name='{}_BConv'.format(layer_name))
self.l_conv = tf.keras.layers.Conv2D(
filters, kernel_size, padding='same', use_bias=False,
kernel_initializer='glorot_uniform',
kernel_regularizer=tf.keras.regularizers.l2(1e-6),
name='{}_LConv'.format(layer_name))
# layer for summing convolutions
self.sum_convs = tf.keras.layers.Lambda(
tf.add_n, name='{}_ConvSum'.format(layer_name))
# holds the most recent bottom-up conv
# useful when the bottom-up input does not change, e.g. input image
self.previous_b_conv = None
def __call__(self, b_input=None, l_input=None):
conv_list = []
if not b_input is None:
# run bottom-up conv and save result
conv_list.append(self.b_conv(b_input))
self.previous_b_conv = conv_list[-1]
elif not self.previous_b_conv is None:
# use the most recent bottom-up conv
conv_list.append(self.previous_b_conv)
else:
raise ValueError('b_input must be given on first pass')
# run lateral conv
if l_input is not None:
conv_list.append(self.l_conv(l_input))
# return element-wise sum of convolutions
return self.sum_convs(conv_list)
def bl_net(input_tensor, classes, n_timesteps=8, cumulative_readout=False):
'''Build the computational graph for the model
Note that evaluations based on model outputs will reflect instantaneous
rather than cumulative readouts
Args:
input_tensor: Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
classes: int, number of classes to classify images into
n_timesteps: int, number of model time steps to build
cumulative_readout: Bool, if True then the outputs correspond to a
cumulative readout on each time step if True then they
correspond to a instant readout
Returns:
model
'''
data_format = tf.keras.backend.image_data_format()
norm_axis = -1 if data_format == 'channels_last' else -3
# initialise trainable layers (RCLs and linear readout)
layers = [
BLConvLayer(96, 7, 'RCL_0'),
BLConvLayer(128, 5, 'RCL_1'),
BLConvLayer(192, 3, 'RCL_2'),
BLConvLayer(256, 3, 'RCL_3'),
BLConvLayer(512, 3, 'RCL_4'),
BLConvLayer(1024, 3, 'RCL_5'),
BLConvLayer(2048, 1, 'RCL_6'),
]
readout_dense = tf.keras.layers.Dense(
classes, kernel_initializer='glorot_uniform',
kernel_regularizer=tf.keras.regularizers.l2(1e-6),
name='ReadoutDense')
# initialise list for activations and outputs
n_layers = len(layers)
activations = [[None for _ in range(n_layers)]
for _ in range(n_timesteps)]
presoftmax = [None for _ in range(n_timesteps)]
outputs = [None for _ in range(n_timesteps)]
# build the model
for t in range(n_timesteps):
for n, layer in enumerate(layers):
# get the bottom-up input
if n == 0:
# B conv on the image does not need to be recomputed
b_input = input_tensor if t == 0 else None
else:
# pool b_input for all layers apart from input
b_input = tf.keras.layers.MaxPool2D(
pool_size=(2, 2),
name='MaxPool_Layer_{}_Time_{}'.format(n, t)
)(activations[t][n-1])
# get the lateral input
if t == 0:
l_input = None
else:
l_input = activations[t-1][n]
# convolutions
x_tn = layer(b_input, l_input)
# batch-normalisation
x_tn = tf.keras.layers.BatchNormalization(
norm_axis,
name='BatchNorm_Layer_{}_Time_{}'.format(n, t))(x_tn)
# ReLU
activations[t][n] = tf.keras.layers.Activation(
'relu', name='ReLU_Layer_{}_Time_{}'.format(n, t))(x_tn)
# add the readout layers
x = tf.keras.layers.GlobalAvgPool2D(
name='GlobalAvgPool_Time_{}'.format(t)
)(activations[t][-1])
presoftmax[t] = readout_dense(x)
# select cumulative or instant readout
if cumulative_readout and t > 0:
x = tf.keras.layers.Add(
name='CumulativeReadout_Time_{}'.format(t)
)(presoftmax[:t+1])
else:
x = presoftmax[t]
outputs[t] = tf.keras.layers.Activation(
'softmax', name='Sotfmax_Time_{}'.format(t))(x)
# create Keras model and return
model = tf.keras.Model(
inputs=input_tensor,
outputs=outputs,
name='bl_net')
return model
| 37.975155
| 80
| 0.567877
|
315abc3a775e1675a9812e40e4c750520421a6e0
| 963
|
py
|
Python
|
bluebrain/repo-patches/packages/random123/package.py
|
BlueBrain/Spack
|
dc328512c70e182f3c24bb0ce64fa3586482bdf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
bluebrain/repo-patches/packages/random123/package.py
|
BlueBrain/Spack
|
dc328512c70e182f3c24bb0ce64fa3586482bdf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2022-03-01T02:26:40.000Z
|
2022-03-15T02:33:38.000Z
|
bluebrain/repo-patches/packages/random123/package.py
|
BlueBrain/Spack
|
dc328512c70e182f3c24bb0ce64fa3586482bdf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
from spack import *
from spack.pkg.builtin.random123 import Random123 as BuiltinRandom123
class Random123(BuiltinRandom123):
__doc__ = BuiltinRandom123.__doc__
url = "https://github.com/DEShawResearch/random123/archive/refs/tags/v1.14.0.tar.gz"
version('1.14.0', sha256='effafd8656b18030b2a5b995cd3650c51a7c45052e6e1c21e48b9fa7a59d926e')
version('1.13.2', sha256='74a1c6bb66b2684f03d3b1008642a2e9141909103cd09f428d2c60bcaa51cb40',
url='https://www.deshawresearch.com/downloads/download_random123.cgi/Random123-1.13.2.tar.gz')
version('1.10', sha256='4afdfba4b941e33e23b5de9b7907b7e3ac326cb4d34b5fa8225edd00b5fe053b',
url='https://www.deshawresearch.com/downloads/download_random123.cgi/Random123-1.10.tar.gz')
version('1.09', sha256='cf6abf623061bcf3d17e5e49bf3f3f0ae400ee89ae2e97c8cb8dcb918b1ebabe',
url='https://www.deshawresearch.com/downloads/download_random123.cgi/Random123-1.09.tar.gz')
| 56.647059
| 106
| 0.777778
|
c71daec41df73f51f6c37338c042ef6aaf7039d0
| 1,414
|
py
|
Python
|
tests/clean_data_test.py
|
chika334/sport-prediction
|
3f62c4be5d803cd7bd7a37ad85625257fa9fb37f
|
[
"MIT"
] | 29
|
2018-09-27T20:17:35.000Z
|
2022-01-25T14:45:25.000Z
|
tests/clean_data_test.py
|
chika334/sport-prediction
|
3f62c4be5d803cd7bd7a37ad85625257fa9fb37f
|
[
"MIT"
] | 1
|
2020-02-17T09:17:47.000Z
|
2020-02-17T09:17:47.000Z
|
tests/clean_data_test.py
|
chika334/sport-prediction
|
3f62c4be5d803cd7bd7a37ad85625257fa9fb37f
|
[
"MIT"
] | 23
|
2018-10-26T07:13:47.000Z
|
2022-03-25T21:11:27.000Z
|
import pytest
import os
import pandas as pd
import requests
from match_history import (
get_current_fixtures,
convert_team_name,
)
from helpers import (
make_directory,
remove_directory,
)
def test_get_current_fixtures_api():
base_url = "http://api.football-data.org/v2/competitions/"
AUTH_TOKEN = "9f2efd00a5604f59a8f1c54860786e31"
headers={"X-Auth-Token": AUTH_TOKEN}
england_area_code = 2072
list_competitions_url = base_url + "?areas=" + str(england_area_code)
req = requests.get(list_competitions_url, headers=headers)
assert req.status_code == 200
def test_convert_team_name_exists():
assert convert_team_name('Manchester United FC') == 'Man United'
assert convert_team_name('Leicester City FC') == 'Leicester'
assert convert_team_name('Tottenham Hotspur FC') == 'Tottenham'
assert convert_team_name('Newcastle United FC') == 'Newcastle'
assert convert_team_name('Chelsea FC') == 'Chelsea'
assert convert_team_name('Huddersfield Town AFC') == 'Huddersfield'
def test_convert_team_name_not_exists():
assert convert_team_name('Brighton') == 'Brighton'
assert convert_team_name('Southampton') == 'Southampton'
assert convert_team_name('Sheffield') == 'Sheffield'
assert convert_team_name('Chelsea') == 'Chelsea'
assert convert_team_name('Liverpool') == 'Liverpool'
assert convert_team_name('Derby') == 'Derby'
| 35.35
| 73
| 0.736917
|
43f173cebda4b12cdfbdeba033ee4dd3ea066694
| 17,391
|
py
|
Python
|
gmocoin/private/dto.py
|
makotookamura/GmoCoin
|
025d3e68364bf52418dbc3445987ff21528db732
|
[
"Apache-2.0"
] | null | null | null |
gmocoin/private/dto.py
|
makotookamura/GmoCoin
|
025d3e68364bf52418dbc3445987ff21528db732
|
[
"Apache-2.0"
] | null | null | null |
gmocoin/private/dto.py
|
makotookamura/GmoCoin
|
025d3e68364bf52418dbc3445987ff21528db732
|
[
"Apache-2.0"
] | null | null | null |
#!python3
from marshmallow import fields
from marshmallow_enum import EnumField
from enum import Enum
from datetime import datetime
from pytz import timezone
from typing import List
from decimal import Decimal
from ..common.dto import BaseSchema, BaseResponse, BaseResponseSchema, \
Symbol, AssetSymbol, SalesSide, OrderType, ExecutionType, SettleType, \
OrderStatus, TimeInForce, MarginCallStatus
class GetMarginData:
"""
余力情報データクラスです。
"""
def __init__(self, actual_profit_loss: Decimal, available_amount: Decimal, margin: Decimal, margin_call_status: MarginCallStatus, margin_ratio: Decimal, profit_loss: Decimal) -> None:
"""
コンストラクタです。
Args:
actual_profit_loss:
時価評価総額
available_amount:
取引余力
margin:
拘束証拠金
margin_call_status:
追証ステータス: NORMAL MARGIN_CALL LOSSCUT
margin_ratio:
証拠金維持率
profit_loss:
評価損益
"""
self.actual_profit_loss = actual_profit_loss
self.available_amount = available_amount
self.margin = margin
self.margin_call_status = margin_call_status
self.margin_ratio = margin_ratio
self.profit_loss = profit_loss
class GetMarginDataSchema(BaseSchema):
"""
余力情報データスキーマクラスです。
"""
__model__ = GetMarginData
actual_profit_loss = fields.Decimal(data_key='actualProfitLoss')
available_amount = fields.Decimal(data_key='availableAmount')
margin = fields.Decimal(data_key='margin')
margin_call_status = EnumField(
MarginCallStatus, data_key='marginCallStatus')
margin_ratio = fields.Decimal(data_key='marginRatio')
profit_loss = fields.Decimal(data_key='profitLoss')
class GetMarginRes(BaseResponse):
"""
余力情報レスポンスクラスです。
"""
def __init__(self, status: int, responsetime: datetime, data: GetMarginData) -> None:
"""
コンストラクタです。
Args:
status:
ステータスコードを設定します。
responsetime:
レスポンスタイムを設定します。
data:
レスポンスデータを設定します。
"""
super().__init__(status, responsetime)
self.data = data
class GetMarginResSchema(BaseResponseSchema):
"""
余力情報レスポンススキーマクラスです。
"""
__model__ = GetMarginRes
data = fields.Nested(GetMarginDataSchema, data_key='data')
class GetAssetsData:
"""
資産残高データクラスです。
"""
def __init__(self, amount: Decimal, available: Decimal, conversion_rate: Decimal, symbol: AssetSymbol) -> None:
"""
コンストラクタです。
Args:
amount:
残高
available:
利用可能金額(残高 - 出金予定額)
conversion_rate
円転レート
symbol
銘柄名: JPY BTC ETH BCH LTC XRP
"""
self.amount = amount
self.available = available
self.conversion_rate = conversion_rate
self.symbol = symbol
class GetAssetsDataSchema(BaseSchema):
"""
資産残高データスキーマクラスです。
"""
__model__ = GetAssetsData
amount = fields.Decimal(data_key='amount')
available = fields.Decimal(data_key='available')
conversion_rate = fields.Decimal(data_key='conversionRate')
symbol = EnumField(AssetSymbol, data_key='symbol')
class GetAssetsRes(BaseResponse):
"""
資産残高レスポンスクラスです。
"""
def __init__(self, status: int, responsetime: datetime, data: GetAssetsData) -> None:
"""
コンストラクタです。
Args:
status:
ステータスコードを設定します。
responsetime:
レスポンスタイムを設定します。
data:
レスポンスデータを設定します。
"""
super().__init__(status, responsetime)
self.data = data
class GetAssetsResSchema(BaseResponseSchema):
"""
資産残高レスポンススキーマクラスです。
"""
__model__ = GetAssetsRes
data = fields.Nested(GetAssetsDataSchema, data_key='data', many=True)
class ActiveOrdersPagenation:
"""
有効注文一覧ページングデータクラスです。
"""
def __init__(self, current_page: int, count: int) -> None:
"""
コンストラクタです。
Args:
current_page:
現在のページ番号を設定します。
count:
データ数を設定します。
"""
self.current_page = current_page
self.count = count
class ActiveOrdersPagenationSchema(BaseSchema):
"""
有効注文一覧ページングデータスキーマクラスです。
"""
__model__ = ActiveOrdersPagenation
current_page = fields.Int(data_key='currentPage')
count = fields.Int(data_key='count')
class ActiveOrder:
"""
有効注文一覧クラスです。
"""
def __init__(self, root_order_id: int, order_id: int, symbol: Symbol, side: SalesSide, order_type: OrderType,
execution_type: ExecutionType, settle_type: SettleType, size: Decimal, executed_size: Decimal,
price: Decimal, losscut_price: Decimal, status: OrderStatus, time_in_force: TimeInForce, timestamp: datetime) -> None:
"""
コンストラクタです。
Args:
root_order_id:
親注文ID
order_id:
注文ID
symbol:
銘柄名: BTC ETH BCH LTC XRP BTC_JPY ETH_JPY BCH_JPY LTC_JPY XRP_JPY
side:
売買区分: BUY SELL
order_type:
取引区分: NORMAL LOSSCUT
execution_type:
注文タイプ: MARKET LIMIT STOP
settle_type:
決済区分: OPEN CLOSE
size:
発注数量
executed_size:
約定数量
price:
注文価格 (MARKET注文の場合は"0")
losscut_price:
ロスカットレート (現物取引や未設定の場合は"0")
status:
注文ステータス: WAITING ORDERED MODIFYING CANCELLING CANCELED EXECUTED EXPIRED
※逆指値注文の場合はWAITINGが有効
time_in_force:
執行数量条件: FAK FAS FOK (Post-onlyの場合はSOK)
timestamp:
注文日時
"""
self.root_order_id = root_order_id
self.order_id = order_id
self.symbol = symbol
self.side = side
self.order_type = order_type
self.execution_type = execution_type
self.settle_type = settle_type
self.size = size
self.executed_size = executed_size
self.price = price
self.losscut_price = losscut_price
self.status = status
self.time_in_force = time_in_force
self.timestamp = timestamp.astimezone(timezone('Asia/Tokyo'))
class ActiveOrderSchema(BaseSchema):
"""
有効注文一覧スキーマクラスです。
"""
__model__ = ActiveOrder
root_order_id = fields.Int(data_key='rootOrderId')
order_id = fields.Int(data_key='orderId')
symbol = EnumField(Symbol, data_key='symbol')
side = EnumField(SalesSide, data_key='side')
order_type = EnumField(OrderType, data_key='orderType')
execution_type = EnumField(ExecutionType, data_key='executionType')
settle_type = EnumField(SettleType, data_key='settleType')
size = fields.Decimal(data_key='size')
executed_size = fields.Decimal(data_key='executedSize')
price = fields.Decimal(data_key='price')
losscut_price = fields.Decimal(data_key='losscutPrice')
status = EnumField(OrderStatus, data_key='status')
time_in_force = EnumField(TimeInForce, data_key='timeInForce')
timestamp = fields.DateTime(
format='%Y-%m-%dT%H:%M:%S.%fZ', data_key='timestamp')
class GetActiveOrdersData:
"""
有効注文一覧データクラスです。
"""
def __init__(self, pagination: ActiveOrdersPagenation = None, active_orders: List[ActiveOrder] = None) -> None:
"""
コンストラクタです。
Args:
pagination:
ページングを設定します。
active_orders:
有効注文一覧リストを設定します。
"""
self.pagination = pagination
self.active_orders = active_orders
class GetActiveOrdersDataSchema(BaseSchema):
"""
有効注文一覧データスキーマクラスです。
"""
__model__ = GetActiveOrdersData
pagination = fields.Nested(
ActiveOrdersPagenationSchema, data_key='pagination')
active_orders = fields.Nested(
ActiveOrderSchema, data_key='list', many=True)
class GetActiveOrdersRes(BaseResponse):
"""
有効注文一覧レスポンスクラスです。
"""
def __init__(self, status: int, responsetime: datetime, data: GetActiveOrdersData) -> None:
"""
コンストラクタです。
Args:
status:
ステータスコードを設定します。
responsetime:
レスポンスタイムを設定します。
data:
レスポンスデータを設定します。
"""
super().__init__(status, responsetime)
self.data = data
class GetActiveOrdersResSchema(BaseResponseSchema):
"""
有効注文一覧レスポンススキーマクラスです。
"""
__model__ = GetActiveOrdersRes
data = fields.Nested(GetActiveOrdersDataSchema, data_key='data')
class LatestExecutionsPagenation:
"""
最新約定一覧ページングデータクラスです。
"""
def __init__(self, current_page: int, count: int) -> None:
"""
コンストラクタです。
Args:
current_page:
現在のページ番号を設定します。
count:
データ数を設定します。
"""
self.current_page = current_page
self.count = count
class LatestExecutionsPagenationSchema(BaseSchema):
"""
最新約定一覧ページングデータスキーマクラスです。
"""
__model__ = LatestExecutionsPagenation
current_page = fields.Int(data_key='currentPage')
count = fields.Int(data_key='count')
class LatestExecution:
"""
最新約定クラスです。
"""
def __init__(self, execution_id: int, order_id: int, symbol: Symbol, side: SalesSide, settle_type: SettleType,
size: Decimal, price: Decimal, loss_gain: Decimal, fee: Decimal, timestamp: datetime) -> None:
"""
コンストラクタです。
Args:
execution_id:
約定ID
orderId:
注文ID
symbol:
銘柄名: BTC ETH BCH LTC XRP BTC_JPY ETH_JPY BCH_JPY LTC_JPY XRP_JPY
side:
売買区分: BUY SELL
settle_type:
決済区分: OPEN CLOSE
size:
約定数量
price:
約定レート
loss_gain:
決済損益
fee:
取引手数料
※Takerの場合はプラスの値、Makerの場合はマイナスの値が返ってきます。
timestamp:
約定日時
"""
self.execution_id = execution_id
self.order_id = order_id
self.symbol = symbol
self.side = side
self.settle_type = settle_type
self.size = size
self.price = price
self.loss_gain = loss_gain
self.fee = fee
self.timestamp = timestamp.astimezone(timezone('Asia/Tokyo'))
class LatestExecutionSchema(BaseSchema):
"""
最新約定スキーマクラスです。
"""
__model__ = LatestExecution
execution_id = fields.Int(data_key='executionId')
order_id = fields.Int(data_key='orderId')
symbol = EnumField(Symbol, data_key='symbol')
side = EnumField(SalesSide, data_key='side')
settle_type = EnumField(SettleType, data_key='settleType')
size = fields.Decimal(data_key='size')
price = fields.Decimal(data_key='price')
loss_gain = fields.Decimal(data_key='lossGain')
fee = fields.Decimal(data_key='fee')
timestamp = fields.DateTime(
format='%Y-%m-%dT%H:%M:%S.%fZ', data_key='timestamp')
class GetLatestExecutionsData:
"""
最新約定一覧データクラスです。
"""
def __init__(self, pagination: LatestExecutionsPagenation = None, latest_executions: List[LatestExecution] = None) -> None:
"""
コンストラクタです。
Args:
pagination:
ページングを設定します。
active_orders:
最新約定一覧リストを設定します。
"""
self.pagination = pagination
self.latest_executions = latest_executions
class GetLatestExecutionsDataSchema(BaseSchema):
"""
最新約定一覧データスキーマクラスです。
"""
__model__ = GetLatestExecutionsData
pagination = fields.Nested(
LatestExecutionsPagenationSchema, data_key='pagination')
latest_executions = fields.Nested(
LatestExecutionSchema, data_key='list', many=True)
class GetLatestExecutionsRes(BaseResponse):
"""
最新約定一覧レスポンスクラスです。
"""
def __init__(self, status: int, responsetime: datetime, data: GetLatestExecutionsData) -> None:
"""
コンストラクタです。
Args:
status:
ステータスコードを設定します。
responsetime:
レスポンスタイムを設定します。
data:
レスポンスデータを設定します。
"""
super().__init__(status, responsetime)
self.data = data
class GetLatestExecutionsResSchema(BaseResponseSchema):
"""
最新約定一覧レスポンススキーマクラスです。
"""
__model__ = GetLatestExecutionsRes
data = fields.Nested(GetLatestExecutionsDataSchema, data_key='data')
class PositionSummary:
"""
建玉サマリークラスです。
"""
def __init__(self, average_position_rate: Decimal, position_loss_gain: Decimal, side: SalesSide,
sum_order_quantity: Decimal, sum_position_quantity: Decimal, symbol: Symbol) -> None:
"""
コンストラクタです。
Args:
average_position_rate:
平均建玉レート
position_loss_gain:
評価損益
side:
売買区分: BUY SELL
sum_order_quantity:
発注中数量
sum_position_quantity:
建玉数量
symbol:
銘柄名: BTC_JPY ETH_JPY BCH_JPY LTC_JPY XRP_JPY
"""
self.average_position_rate = average_position_rate
self.position_loss_gain = position_loss_gain
self.side = side
self.sum_order_quantity = sum_order_quantity
self.sum_position_quantity = sum_position_quantity
self.symbol = symbol
class PositionSummarySchema(BaseSchema):
"""
建玉サマリースキーマクラスです。
"""
__model__ = PositionSummary
average_position_rate = fields.Decimal(data_key='averagePositionRate')
position_loss_gain = fields.Decimal(data_key='positionLossGain')
side = EnumField(SalesSide, data_key='side')
sum_order_quantity = fields.Decimal(data_key='sumOrderQuantity')
sum_position_quantity = fields.Decimal(data_key='sumPositionQuantity')
symbol = EnumField(Symbol, data_key='symbol')
class GetPositionSummaryData:
"""
建玉サマリーデータクラスです。
"""
def __init__(self, position_summarys: List[PositionSummary] = []) -> None:
"""
コンストラクタです。
Args:
pagination:
ページングを設定します。
positions:
建玉サマリー一覧リストを設定します。
"""
self.position_summarys = position_summarys
class GetPositionSummaryDataSchema(BaseSchema):
"""
建玉サマリーデータスキーマクラスです。
"""
__model__ = GetPositionSummaryData
position_summarys = fields.Nested(
PositionSummarySchema, data_key='list', many=True, allow_none=True)
class GetPositionSummaryRes(BaseResponse):
"""
建玉サマリーレスポンスクラスです。
"""
def __init__(self, status: int, responsetime: str, data: GetPositionSummaryData) -> None:
"""
コンストラクタです。
Args:
status:
ステータスコードを設定します。
responsetime:
レスポンスタイムを設定します。
data:
レスポンスデータを設定します。
"""
super().__init__(status, responsetime)
self.data = data
class GetPositionSummaryResSchema(BaseResponseSchema):
"""
建玉サマリーレスポンススキーマクラスです。
"""
__model__ = GetPositionSummaryRes
data = fields.Nested(GetPositionSummaryDataSchema, data_key='data')
class PostOrderRes(BaseResponse):
"""
新規注文レスポンスクラスです。
"""
def __init__(self, status: int, responsetime: datetime, data: int) -> None:
"""
コンストラクタです。
Args:
status:
ステータスコードを設定します。
responsetime:
レスポンスタイムを設定します。
data:
レスポンスデータを設定します。
"""
super().__init__(status, responsetime)
self.data = data
class PostOrderResSchema(BaseResponseSchema):
"""
新規注文レスポンススキーマクラスです。
"""
__model__ = PostOrderRes
data = fields.Int(data_key='data')
class PostCloseOrderRes(BaseResponse):
"""
決済注文レスポンスクラスです。
"""
def __init__(self, status: int, responsetime: datetime, data: int) -> None:
"""
コンストラクタです。
Args:
status:
ステータスコードを設定します。
responsetime:
レスポンスタイムを設定します。
data:
レスポンスデータを設定します。
"""
super().__init__(status, responsetime)
self.data = data
class PostCloseOrderResSchema(BaseResponseSchema):
"""
決済注文レスポンススキーマクラスです。
"""
__model__ = PostCloseOrderRes
data = fields.Int(data_key='data')
class PostCloseBulkOrderRes(BaseResponse):
"""
一括決済注文レスポンスクラスです。
"""
def __init__(self, status: int, responsetime: datetime, data: int) -> None:
"""
コンストラクタです。
Args:
status:
ステータスコードを設定します。
responsetime:
レスポンスタイムを設定します。
data:
レスポンスデータを設定します。
"""
super().__init__(status, responsetime)
self.data = data
class PostCloseBulkOrderResSchema(BaseResponseSchema):
"""
一括決済注文レスポンススキーマクラスです。
"""
__model__ = PostCloseBulkOrderRes
data = fields.Int(data_key='data')
| 26.47032
| 187
| 0.598758
|
93491026e8ca33247efd2ac9118a0d1120ee7a18
| 57,635
|
py
|
Python
|
web3/contract.py
|
smudgil/web3.py
|
c2a39301468cd6db8f910a74273b0729b4d3556f
|
[
"MIT"
] | null | null | null |
web3/contract.py
|
smudgil/web3.py
|
c2a39301468cd6db8f910a74273b0729b4d3556f
|
[
"MIT"
] | null | null | null |
web3/contract.py
|
smudgil/web3.py
|
c2a39301468cd6db8f910a74273b0729b4d3556f
|
[
"MIT"
] | null | null | null |
"""Interaction with smart contracts over Web3 connector.
"""
import copy
import itertools
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Dict,
Generator,
Iterable,
List,
NoReturn,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
import warnings
from eth_abi.exceptions import (
DecodingError,
)
from eth_typing import (
Address,
BlockNumber,
ChecksumAddress,
HexStr,
)
from eth_utils import (
add_0x_prefix,
combomethod,
encode_hex,
function_abi_to_4byte_selector,
is_list_like,
is_text,
to_tuple,
)
from eth_utils.toolz import (
compose,
partial,
)
from hexbytes import (
HexBytes,
)
from web3._utils.abi import (
abi_to_signature,
check_if_arguments_can_be_encoded,
fallback_func_abi_exists,
filter_by_type,
get_abi_input_names,
get_abi_input_types,
get_abi_output_types,
get_constructor_abi,
is_array_type,
map_abi_data,
merge_args_and_kwargs,
receive_func_abi_exists,
)
from web3._utils.blocks import (
is_hex_encoded_block_hash,
)
from web3._utils.contracts import (
encode_abi,
find_matching_event_abi,
find_matching_fn_abi,
get_function_info,
prepare_transaction,
)
from web3._utils.datatypes import (
PropertyCheckingFactory,
)
from web3._utils.decorators import (
deprecated_for,
)
from web3._utils.empty import (
empty,
)
from web3._utils.encoding import (
to_4byte_hex,
to_hex,
)
from web3._utils.events import (
EventFilterBuilder,
get_event_data,
is_dynamic_sized_type,
)
from web3._utils.filters import (
LogFilter,
construct_event_filter_params,
)
from web3._utils.function_identifiers import (
FallbackFn,
ReceiveFn,
)
from web3._utils.normalizers import (
BASE_RETURN_NORMALIZERS,
normalize_abi,
normalize_address,
normalize_bytecode,
)
from web3._utils.transactions import (
fill_transaction_defaults,
)
from web3.datastructures import (
AttributeDict,
MutableAttributeDict,
)
from web3.exceptions import (
ABIEventFunctionNotFound,
ABIFunctionNotFound,
BadFunctionCallOutput,
BlockNumberOutofRange,
FallbackNotFound,
InvalidEventABI,
LogTopicError,
MismatchedABI,
NoABIEventsFound,
NoABIFound,
NoABIFunctionsFound,
ValidationError,
)
from web3.logs import (
DISCARD,
IGNORE,
STRICT,
WARN,
EventLogErrorFlags,
)
from web3.types import ( # noqa: F401
ABI,
ABIEvent,
ABIFunction,
BlockIdentifier,
EventData,
FunctionIdentifier,
LogReceipt,
TxParams,
TxReceipt,
)
if TYPE_CHECKING:
from web3 import Web3 # noqa: F401
ACCEPTABLE_EMPTY_STRINGS = ["0x", b"0x", "", b""]
class ContractFunctions:
"""Class containing contract function objects
"""
def __init__(self, abi: ABI, web3: 'Web3', address: Optional[ChecksumAddress] = None) -> None:
self.abi = abi
self.web3 = web3
self.address = address
if self.abi:
self._functions = filter_by_type('function', self.abi)
for func in self._functions:
setattr(
self,
func['name'],
ContractFunction.factory(
func['name'],
web3=self.web3,
contract_abi=self.abi,
address=self.address,
function_identifier=func['name']))
def __iter__(self) -> Generator[str, None, None]:
if not hasattr(self, '_functions') or not self._functions:
return
for func in self._functions:
yield func['name']
def __getattr__(self, function_name: str) -> "ContractFunction":
if self.abi is None:
raise NoABIFound(
"There is no ABI found for this contract.",
)
if '_functions' not in self.__dict__:
raise NoABIFunctionsFound(
"The abi for this contract contains no function definitions. ",
"Are you sure you provided the correct contract abi?"
)
elif function_name not in self.__dict__['_functions']:
raise ABIFunctionNotFound(
"The function '{}' was not found in this contract's abi. ".format(function_name),
"Are you sure you provided the correct contract abi?"
)
else:
return super().__getattribute__(function_name)
def __getitem__(self, function_name: str) -> ABIFunction:
return getattr(self, function_name)
def __hasattr__(self, event_name: str) -> bool:
try:
return event_name in self.__dict__['_events']
except ABIFunctionNotFound:
return False
class ContractEvents:
"""Class containing contract event objects
This is available via:
.. code-block:: python
>>> mycontract.events
<web3.contract.ContractEvents object at 0x108afde10>
To get list of all supported events in the contract ABI.
This allows you to iterate over :class:`ContractEvent` proxy classes.
.. code-block:: python
>>> for e in mycontract.events: print(e)
<class 'web3._utils.datatypes.LogAnonymous'>
...
"""
def __init__(self, abi: ABI, web3: 'Web3', address: Optional[ChecksumAddress] = None) -> None:
if abi:
self.abi = abi
self._events = filter_by_type('event', self.abi)
for event in self._events:
setattr(
self,
event['name'],
ContractEvent.factory(
event['name'],
web3=web3,
contract_abi=self.abi,
address=address,
event_name=event['name']))
def __getattr__(self, event_name: str) -> Type['ContractEvent']:
if '_events' not in self.__dict__:
raise NoABIEventsFound(
"The abi for this contract contains no event definitions. ",
"Are you sure you provided the correct contract abi?"
)
elif event_name not in self.__dict__['_events']:
raise ABIEventFunctionNotFound(
"The event '{}' was not found in this contract's abi. ".format(event_name),
"Are you sure you provided the correct contract abi?"
)
else:
return super().__getattribute__(event_name)
def __getitem__(self, event_name: str) -> Type['ContractEvent']:
return getattr(self, event_name)
def __iter__(self) -> Iterable[Type['ContractEvent']]:
"""Iterate over supported
:return: Iterable of :class:`ContractEvent`
"""
for event in self._events:
yield self[event['name']]
def __hasattr__(self, event_name: str) -> bool:
try:
return event_name in self.__dict__['_events']
except ABIEventFunctionNotFound:
return False
class Contract:
"""Base class for Contract proxy classes.
First you need to create your Contract classes using
:meth:`web3.eth.Eth.contract` that takes compiled Solidity contract
ABI definitions as input. The created class object will be a subclass of
this base class.
After you have your Contract proxy class created you can interact with
smart contracts
* Create a Contract proxy object for an existing deployed smart contract by
its address using :meth:`__init__`
* Deploy a new smart contract using :py:meth:`Contract.constructor.transact()`
"""
# set during class construction
web3: 'Web3' = None
# instance level properties
address: ChecksumAddress = None
# class properties (overridable at instance level)
abi: ABI = None
asm = None
ast = None
bytecode = None
bytecode_runtime = None
clone_bin = None
functions: ContractFunctions = None
caller: 'ContractCaller' = None
#: Instance of :class:`ContractEvents` presenting available Event ABIs
events: ContractEvents = None
dev_doc = None
interface = None
metadata = None
opcodes = None
src_map = None
src_map_runtime = None
user_doc = None
def __init__(self, address: Optional[ChecksumAddress] = None) -> None:
"""Create a new smart contract proxy object.
:param address: Contract address as 0x hex string
"""
if self.web3 is None:
raise AttributeError(
'The `Contract` class has not been initialized. Please use the '
'`web3.contract` interface to create your contract class.'
)
if address:
self.address = normalize_address(self.web3.ens, address)
if not self.address:
raise TypeError("The address argument is required to instantiate a contract.")
self.functions = ContractFunctions(self.abi, self.web3, self.address)
self.caller = ContractCaller(self.abi, self.web3, self.address)
self.events = ContractEvents(self.abi, self.web3, self.address)
self.fallback = Contract.get_fallback_function(self.abi, self.web3, self.address)
self.receive = Contract.get_receive_function(self.abi, self.web3, self.address)
@classmethod
def factory(cls, web3: 'Web3', class_name: Optional[str] = None, **kwargs: Any) -> 'Contract':
kwargs['web3'] = web3
normalizers = {
'abi': normalize_abi,
'address': partial(normalize_address, kwargs['web3'].ens),
'bytecode': normalize_bytecode,
'bytecode_runtime': normalize_bytecode,
}
contract = cast(Contract, PropertyCheckingFactory(
class_name or cls.__name__,
(cls,),
kwargs,
normalizers=normalizers,
))
contract.functions = ContractFunctions(contract.abi, contract.web3)
contract.caller = ContractCaller(contract.abi, contract.web3, contract.address)
contract.events = ContractEvents(contract.abi, contract.web3)
contract.fallback = Contract.get_fallback_function(contract.abi, contract.web3)
contract.receive = Contract.get_receive_function(contract.abi, contract.web3)
return contract
#
# Contract Methods
#
@classmethod
def constructor(cls, *args: Any, **kwargs: Any) -> 'ContractConstructor':
"""
:param args: The contract constructor arguments as positional arguments
:param kwargs: The contract constructor arguments as keyword arguments
:return: a contract constructor object
"""
if cls.bytecode is None:
raise ValueError(
"Cannot call constructor on a contract that does not have 'bytecode' associated "
"with it"
)
return ContractConstructor(cls.web3,
cls.abi,
cls.bytecode,
*args,
**kwargs)
# Public API
#
@combomethod
def encodeABI(cls, fn_name: str, args: Optional[Any] = None,
kwargs: Optional[Any] = None, data: Optional[HexStr] = None) -> HexStr:
"""
Encodes the arguments using the Ethereum ABI for the contract function
that matches the given name and arguments..
:param data: defaults to function selector
"""
fn_abi, fn_selector, fn_arguments = get_function_info(
fn_name, cls.web3.codec, contract_abi=cls.abi, args=args, kwargs=kwargs,
)
if data is None:
data = fn_selector
return encode_abi(cls.web3, fn_abi, fn_arguments, data)
@combomethod
def all_functions(self) -> List['ContractFunction']:
return find_functions_by_identifier(
self.abi, self.web3, self.address, lambda _: True
)
@combomethod
def get_function_by_signature(self, signature: str) -> 'ContractFunction':
if ' ' in signature:
raise ValueError(
'Function signature should not contain any spaces. '
'Found spaces in input: %s' % signature
)
def callable_check(fn_abi: ABIFunction) -> bool:
return abi_to_signature(fn_abi) == signature
fns = find_functions_by_identifier(self.abi, self.web3, self.address, callable_check)
return get_function_by_identifier(fns, 'signature')
@combomethod
def find_functions_by_name(self, fn_name: str) -> List['ContractFunction']:
def callable_check(fn_abi: ABIFunction) -> bool:
return fn_abi['name'] == fn_name
return find_functions_by_identifier(
self.abi, self.web3, self.address, callable_check
)
@combomethod
def get_function_by_name(self, fn_name: str) -> 'ContractFunction':
fns = self.find_functions_by_name(fn_name)
return get_function_by_identifier(fns, 'name')
@combomethod
def get_function_by_selector(self, selector: Union[bytes, int, HexStr]) -> 'ContractFunction':
def callable_check(fn_abi: ABIFunction) -> bool:
# typed dict cannot be used w/ a normal Dict
# https://github.com/python/mypy/issues/4976
return encode_hex(function_abi_to_4byte_selector(fn_abi)) == to_4byte_hex(selector) # type: ignore # noqa: E501
fns = find_functions_by_identifier(self.abi, self.web3, self.address, callable_check)
return get_function_by_identifier(fns, 'selector')
@combomethod
def decode_function_input(self, data: HexStr) -> Tuple['ContractFunction', Dict[str, Any]]:
# type ignored b/c expects data arg to be HexBytes
data = HexBytes(data) # type: ignore
selector, params = data[:4], data[4:]
func = self.get_function_by_selector(selector)
names = get_abi_input_names(func.abi)
types = get_abi_input_types(func.abi)
decoded = self.web3.codec.decode_abi(types, cast(HexBytes, params))
normalized = map_abi_data(BASE_RETURN_NORMALIZERS, types, decoded)
return func, dict(zip(names, normalized))
@combomethod
def find_functions_by_args(self, *args: Any) -> List['ContractFunction']:
def callable_check(fn_abi: ABIFunction) -> bool:
return check_if_arguments_can_be_encoded(fn_abi, self.web3.codec, args=args, kwargs={})
return find_functions_by_identifier(
self.abi, self.web3, self.address, callable_check
)
@combomethod
def get_function_by_args(self, *args: Any) -> 'ContractFunction':
fns = self.find_functions_by_args(*args)
return get_function_by_identifier(fns, 'args')
#
# Private Helpers
#
_return_data_normalizers: Tuple[Callable[..., Any], ...] = tuple()
@classmethod
def _prepare_transaction(cls,
fn_name: str,
fn_args: Optional[Any] = None,
fn_kwargs: Optional[Any] = None,
transaction: Optional[TxParams] = None) -> TxParams:
return prepare_transaction(
cls.address,
cls.web3,
fn_identifier=fn_name,
contract_abi=cls.abi,
transaction=transaction,
fn_args=fn_args,
fn_kwargs=fn_kwargs,
)
@classmethod
def _find_matching_fn_abi(
cls, fn_identifier: Optional[str] = None, args: Optional[Any] = None,
kwargs: Optional[Any] = None
) -> ABIFunction:
return find_matching_fn_abi(cls.abi,
cls.web3.codec,
fn_identifier=fn_identifier,
args=args,
kwargs=kwargs)
@classmethod
def _find_matching_event_abi(
cls, event_name: Optional[str] = None, argument_names: Optional[Sequence[str]] = None
) -> ABIEvent:
return find_matching_event_abi(
abi=cls.abi,
event_name=event_name,
argument_names=argument_names)
@staticmethod
def get_fallback_function(
abi: ABI, web3: 'Web3', address: Optional[ChecksumAddress] = None
) -> 'ContractFunction':
if abi and fallback_func_abi_exists(abi):
return ContractFunction.factory(
'fallback',
web3=web3,
contract_abi=abi,
address=address,
function_identifier=FallbackFn)()
return cast('ContractFunction', NonExistentFallbackFunction())
@staticmethod
def get_receive_function(
abi: ABI, web3: 'Web3', address: Optional[ChecksumAddress] = None
) -> 'ContractFunction':
if abi and receive_func_abi_exists(abi):
return ContractFunction.factory(
'receive',
web3=web3,
contract_abi=abi,
address=address,
function_identifier=ReceiveFn)()
return cast('ContractFunction', NonExistentReceiveFunction())
@combomethod
def _encode_constructor_data(cls, args: Optional[Any] = None,
kwargs: Optional[Any] = None) -> HexStr:
constructor_abi = get_constructor_abi(cls.abi)
if constructor_abi:
if args is None:
args = tuple()
if kwargs is None:
kwargs = {}
arguments = merge_args_and_kwargs(constructor_abi, args, kwargs)
deploy_data = add_0x_prefix(
encode_abi(cls.web3, constructor_abi, arguments, data=cls.bytecode)
)
else:
if args is not None or kwargs is not None:
msg = "Constructor args were provided, but no constructor function was provided."
raise TypeError(msg)
deploy_data = to_hex(cls.bytecode)
return deploy_data
def mk_collision_prop(fn_name: str) -> Callable[[], None]:
def collision_fn() -> NoReturn:
msg = "Namespace collision for function name {0} with ConciseContract API.".format(fn_name)
raise AttributeError(msg)
collision_fn.__name__ = fn_name
return collision_fn
class ContractConstructor:
"""
Class for contract constructor API.
"""
def __init__(
self, web3: 'Web3', abi: ABI, bytecode: HexStr, *args: Any, **kwargs: Any
) -> None:
self.web3 = web3
self.abi = abi
self.bytecode = bytecode
self.data_in_transaction = self._encode_data_in_transaction(*args, **kwargs)
@combomethod
def _encode_data_in_transaction(self, *args: Any, **kwargs: Any) -> HexStr:
constructor_abi = get_constructor_abi(self.abi)
if constructor_abi:
if not args:
args = tuple()
if not kwargs:
kwargs = {}
arguments = merge_args_and_kwargs(constructor_abi, args, kwargs)
data = add_0x_prefix(
encode_abi(self.web3, constructor_abi, arguments, data=self.bytecode)
)
else:
data = to_hex(self.bytecode)
return data
@combomethod
def estimateGas(
self, transaction: Optional[TxParams] = None,
block_identifier: Optional[BlockIdentifier] = None
) -> int:
if transaction is None:
estimate_gas_transaction: TxParams = {}
else:
estimate_gas_transaction = cast(TxParams, dict(**transaction))
self.check_forbidden_keys_in_transaction(estimate_gas_transaction,
["data", "to"])
if self.web3.eth.default_account is not empty:
# type ignored b/c check prevents an empty default_account
estimate_gas_transaction.setdefault('from', self.web3.eth.default_account) # type: ignore # noqa: E501
estimate_gas_transaction['data'] = self.data_in_transaction
return self.web3.eth.estimateGas(
estimate_gas_transaction, block_identifier=block_identifier
)
@combomethod
def transact(self, transaction: Optional[TxParams] = None) -> HexBytes:
if transaction is None:
transact_transaction: TxParams = {}
else:
transact_transaction = cast(TxParams, dict(**transaction))
self.check_forbidden_keys_in_transaction(transact_transaction,
["data", "to"])
if self.web3.eth.default_account is not empty:
# type ignored b/c check prevents an empty default_account
transact_transaction.setdefault('from', self.web3.eth.default_account) # type: ignore
transact_transaction['data'] = self.data_in_transaction
# TODO: handle asynchronous contract creation
return self.web3.eth.sendTransaction(transact_transaction)
@combomethod
def buildTransaction(self, transaction: Optional[TxParams] = None) -> TxParams:
"""
Build the transaction dictionary without sending
"""
if transaction is None:
built_transaction: TxParams = {}
else:
built_transaction = cast(TxParams, dict(**transaction))
self.check_forbidden_keys_in_transaction(built_transaction,
["data", "to"])
if self.web3.eth.default_account is not empty:
# type ignored b/c check prevents an empty default_account
built_transaction.setdefault('from', self.web3.eth.default_account) # type: ignore
built_transaction['data'] = self.data_in_transaction
built_transaction['to'] = Address(b'')
return fill_transaction_defaults(self.web3, built_transaction)
@staticmethod
def check_forbidden_keys_in_transaction(
transaction: TxParams, forbidden_keys: Optional[Collection[str]] = None
) -> None:
keys_found = set(transaction.keys()) & set(forbidden_keys)
if keys_found:
raise ValueError("Cannot set {} in transaction".format(', '.join(keys_found)))
class ConciseMethod:
ALLOWED_MODIFIERS = {'call', 'estimateGas', 'transact', 'buildTransaction'}
def __init__(
self, function: 'ContractFunction',
normalizers: Optional[Tuple[Callable[..., Any], ...]] = None
) -> None:
self._function = function
self._function._return_data_normalizers = normalizers
def __call__(self, *args: Any, **kwargs: Any) -> 'ContractFunction':
return self.__prepared_function(*args, **kwargs)
def __prepared_function(self, *args: Any, **kwargs: Any) -> 'ContractFunction':
modifier_dict: Dict[Any, Any]
if not kwargs:
modifier, modifier_dict = 'call', {}
elif len(kwargs) == 1:
modifier, modifier_dict = kwargs.popitem()
if modifier not in self.ALLOWED_MODIFIERS:
raise TypeError(
"The only allowed keyword arguments are: %s" % self.ALLOWED_MODIFIERS)
else:
raise TypeError("Use up to one keyword argument, one of: %s" % self.ALLOWED_MODIFIERS)
return getattr(self._function(*args), modifier)(modifier_dict)
class ConciseContract:
"""
An alternative Contract Factory which invokes all methods as `call()`,
unless you add a keyword argument. The keyword argument assigns the prep method.
This call
> contract.withdraw(amount, transact={'from': eth.accounts[1], 'gas': 100000, ...})
is equivalent to this call in the classic contract:
> contract.functions.withdraw(amount).transact({'from': eth.accounts[1], 'gas': 100000, ...})
"""
@deprecated_for(
"contract.caller.<method name> or contract.caller({transaction_dict}).<method name>"
)
def __init__(
self,
classic_contract: Contract,
method_class: Union[Type['ConciseMethod'], Type['ImplicitMethod']] = ConciseMethod
) -> None:
classic_contract._return_data_normalizers += CONCISE_NORMALIZERS
self._classic_contract = classic_contract
self.address = self._classic_contract.address
protected_fn_names = [fn for fn in dir(self) if not fn.endswith('__')]
for fn_name in self._classic_contract.functions:
# Override namespace collisions
if fn_name in protected_fn_names:
_concise_method = cast('ConciseMethod', mk_collision_prop(fn_name))
else:
_classic_method = getattr(
self._classic_contract.functions,
fn_name)
_concise_method = method_class(
_classic_method,
self._classic_contract._return_data_normalizers
)
setattr(self, fn_name, _concise_method)
@classmethod
def factory(cls, *args: Any, **kwargs: Any) -> Contract:
return compose(cls, Contract.factory(*args, **kwargs))
def _none_addr(datatype: str, data: ChecksumAddress) -> Tuple[str, Optional[ChecksumAddress]]:
if datatype == 'address' and int(data, base=16) == 0:
return (datatype, None)
else:
return (datatype, data)
CONCISE_NORMALIZERS: Tuple[Callable[..., Any]] = (
_none_addr,
)
class ImplicitMethod(ConciseMethod):
def __call_by_default(self, args: Any) -> bool:
function_abi = find_matching_fn_abi(self._function.contract_abi,
self._function.web3.codec,
fn_identifier=self._function.function_identifier,
args=args)
return function_abi['constant'] if 'constant' in function_abi.keys() else False
@deprecated_for("classic contract syntax. Ex: contract.functions.withdraw(amount).transact({})")
def __call__(self, *args: Any, **kwargs: Any) -> 'ContractFunction':
# Modifier is not provided and method is not constant/pure do a transaction instead
if not kwargs and not self.__call_by_default(args):
return super().__call__(*args, transact={})
else:
return super().__call__(*args, **kwargs)
class ImplicitContract(ConciseContract):
"""
ImplicitContract class is similar to the ConciseContract class
however it performs a transaction instead of a call if no modifier
is given and the method is not marked 'constant' in the ABI.
The transaction will use the default account to send the transaction.
This call
> contract.withdraw(amount)
is equivalent to this call in the classic contract:
> contract.functions.withdraw(amount).transact({})
"""
def __init__(
self,
classic_contract: Contract,
method_class: Union[Type[ImplicitMethod], Type[ConciseMethod]] = ImplicitMethod
) -> None:
super().__init__(classic_contract, method_class=method_class)
class NonExistentFallbackFunction:
@staticmethod
def _raise_exception() -> NoReturn:
raise FallbackNotFound("No fallback function was found in the contract ABI.")
def __getattr__(self, attr: Any) -> Callable[[], None]:
return self._raise_exception
class NonExistentReceiveFunction:
@staticmethod
def _raise_exception() -> NoReturn:
raise FallbackNotFound("No receive function was found in the contract ABI.")
def __getattr__(self, attr: Any) -> Callable[[], None]:
return self._raise_exception
class ContractFunction:
"""Base class for contract functions
A function accessed via the api contract.functions.myMethod(*args, **kwargs)
is a subclass of this class.
"""
address: ChecksumAddress = None
function_identifier: FunctionIdentifier = None
web3: 'Web3' = None
contract_abi: ABI = None
abi: ABIFunction = None
transaction: TxParams = None
arguments: Tuple[Any, ...] = None
args: Any = None
kwargs: Any = None
def __init__(self, abi: Optional[ABIFunction] = None) -> None:
self.abi = abi
self.fn_name = type(self).__name__
def __call__(self, *args: Any, **kwargs: Any) -> 'ContractFunction':
clone = copy.copy(self)
if args is None:
clone.args = tuple()
else:
clone.args = args
if kwargs is None:
clone.kwargs = {}
else:
clone.kwargs = kwargs
clone._set_function_info()
return clone
def _set_function_info(self) -> None:
if not self.abi:
self.abi = find_matching_fn_abi(
self.contract_abi,
self.web3.codec,
self.function_identifier,
self.args,
self.kwargs
)
if self.function_identifier is FallbackFn:
self.selector = encode_hex(b'')
elif self.function_identifier is ReceiveFn:
self.selector = encode_hex(b'')
elif is_text(self.function_identifier):
# https://github.com/python/mypy/issues/4976
self.selector = encode_hex(function_abi_to_4byte_selector(self.abi)) # type: ignore
else:
raise TypeError("Unsupported function identifier")
self.arguments = merge_args_and_kwargs(self.abi, self.args, self.kwargs)
def call(
self, transaction: Optional[TxParams] = None, block_identifier: BlockIdentifier = 'latest'
) -> Any:
"""
Execute a contract function call using the `eth_call` interface.
This method prepares a ``Caller`` object that exposes the contract
functions and public variables as callable Python functions.
Reading a public ``owner`` address variable example:
.. code-block:: python
ContractFactory = w3.eth.contract(
abi=wallet_contract_definition["abi"]
)
# Not a real contract address
contract = ContractFactory("0x2f70d3d26829e412A602E83FE8EeBF80255AEeA5")
# Read "owner" public variable
addr = contract.functions.owner().call()
:param transaction: Dictionary of transaction info for web3 interface
:return: ``Caller`` object that has contract public functions
and variables exposed as Python methods
"""
if transaction is None:
call_transaction: TxParams = {}
else:
call_transaction = cast(TxParams, dict(**transaction))
if 'data' in call_transaction:
raise ValueError("Cannot set data in call transaction")
if self.address:
call_transaction.setdefault('to', self.address)
if self.web3.eth.default_account is not empty:
# type ignored b/c check prevents an empty default_account
call_transaction.setdefault('from', self.web3.eth.default_account) # type: ignore
if 'to' not in call_transaction:
if isinstance(self, type):
raise ValueError(
"When using `Contract.[methodtype].[method].call()` from"
" a contract factory you "
"must provide a `to` address with the transaction"
)
else:
raise ValueError(
"Please ensure that this contract instance has an address."
)
block_id = parse_block_identifier(self.web3, block_identifier)
return call_contract_function(
self.web3,
self.address,
self._return_data_normalizers,
self.function_identifier,
call_transaction,
block_id,
self.contract_abi,
self.abi,
*self.args,
**self.kwargs
)
def transact(self, transaction: Optional[TxParams] = None) -> HexBytes:
if transaction is None:
transact_transaction: TxParams = {}
else:
transact_transaction = cast(TxParams, dict(**transaction))
if 'data' in transact_transaction:
raise ValueError("Cannot set data in transact transaction")
if self.address is not None:
transact_transaction.setdefault('to', self.address)
if self.web3.eth.default_account is not empty:
# type ignored b/c check prevents an empty default_account
transact_transaction.setdefault('from', self.web3.eth.default_account) # type: ignore
if 'to' not in transact_transaction:
if isinstance(self, type):
raise ValueError(
"When using `Contract.transact` from a contract factory you "
"must provide a `to` address with the transaction"
)
else:
raise ValueError(
"Please ensure that this contract instance has an address."
)
return transact_with_contract_function(
self.address,
self.web3,
self.function_identifier,
transact_transaction,
self.contract_abi,
self.abi,
*self.args,
**self.kwargs
)
def estimateGas(
self, transaction: Optional[TxParams] = None,
block_identifier: Optional[BlockIdentifier] = None
) -> int:
if transaction is None:
estimate_gas_transaction: TxParams = {}
else:
estimate_gas_transaction = cast(TxParams, dict(**transaction))
if 'data' in estimate_gas_transaction:
raise ValueError("Cannot set data in estimateGas transaction")
if 'to' in estimate_gas_transaction:
raise ValueError("Cannot set to in estimateGas transaction")
if self.address:
estimate_gas_transaction.setdefault('to', self.address)
if self.web3.eth.default_account is not empty:
# type ignored b/c check prevents an empty default_account
estimate_gas_transaction.setdefault('from', self.web3.eth.default_account) # type: ignore # noqa: E501
if 'to' not in estimate_gas_transaction:
if isinstance(self, type):
raise ValueError(
"When using `Contract.estimateGas` from a contract factory "
"you must provide a `to` address with the transaction"
)
else:
raise ValueError(
"Please ensure that this contract instance has an address."
)
return estimate_gas_for_function(
self.address,
self.web3,
self.function_identifier,
estimate_gas_transaction,
self.contract_abi,
self.abi,
block_identifier,
*self.args,
**self.kwargs
)
def buildTransaction(self, transaction: Optional[TxParams] = None) -> TxParams:
"""
Build the transaction dictionary without sending
"""
if transaction is None:
built_transaction: TxParams = {}
else:
built_transaction = cast(TxParams, dict(**transaction))
if 'data' in built_transaction:
raise ValueError("Cannot set data in build transaction")
if not self.address and 'to' not in built_transaction:
raise ValueError(
"When using `ContractFunction.buildTransaction` from a contract factory "
"you must provide a `to` address with the transaction"
)
if self.address and 'to' in built_transaction:
raise ValueError("Cannot set to in contract call build transaction")
if self.address:
built_transaction.setdefault('to', self.address)
if 'to' not in built_transaction:
raise ValueError(
"Please ensure that this contract instance has an address."
)
return build_transaction_for_function(
self.address,
self.web3,
self.function_identifier,
built_transaction,
self.contract_abi,
self.abi,
*self.args,
**self.kwargs
)
@combomethod
def _encode_transaction_data(cls) -> HexStr:
return add_0x_prefix(encode_abi(cls.web3, cls.abi, cls.arguments, cls.selector))
_return_data_normalizers: Optional[Tuple[Callable[..., Any], ...]] = tuple()
@classmethod
def factory(cls, class_name: str, **kwargs: Any) -> 'ContractFunction':
return PropertyCheckingFactory(class_name, (cls,), kwargs)(kwargs.get('abi'))
def __repr__(self) -> str:
if self.abi:
_repr = '<Function %s' % abi_to_signature(self.abi)
if self.arguments is not None:
_repr += ' bound to %r' % (self.arguments,)
return _repr + '>'
return '<Function %s>' % self.fn_name
class ContractEvent:
"""Base class for contract events
An event accessed via the api contract.events.myEvents(*args, **kwargs)
is a subclass of this class.
"""
address: ChecksumAddress = None
event_name: str = None
web3: 'Web3' = None
contract_abi: ABI = None
abi: ABIEvent = None
def __init__(self, *argument_names: Tuple[str]) -> None:
if argument_names is None:
# https://github.com/python/mypy/issues/6283
self.argument_names = tuple() # type: ignore
else:
self.argument_names = argument_names
self.abi = self._get_event_abi()
@classmethod
def _get_event_abi(cls) -> ABIEvent:
return find_matching_event_abi(
cls.contract_abi,
event_name=cls.event_name)
@combomethod
def processReceipt(
self, txn_receipt: TxReceipt, errors: EventLogErrorFlags = WARN
) -> Iterable[EventData]:
return self._parse_logs(txn_receipt, errors)
@to_tuple
def _parse_logs(
self, txn_receipt: TxReceipt, errors: EventLogErrorFlags
) -> Iterable[EventData]:
try:
errors.name
except AttributeError:
raise AttributeError(f'Error flag must be one of: {EventLogErrorFlags.flag_options()}')
for log in txn_receipt['logs']:
try:
rich_log = get_event_data(self.web3.codec, self.abi, log)
except (MismatchedABI, LogTopicError, InvalidEventABI, TypeError) as e:
if errors == DISCARD:
continue
elif errors == IGNORE:
# type ignores b/c rich_log set on 1092 conflicts with mutated types
new_log = MutableAttributeDict(log) # type: ignore
new_log['errors'] = e
rich_log = AttributeDict(new_log) # type: ignore
elif errors == STRICT:
raise e
else:
warnings.warn(
f"The log with transaction hash: {log['transactionHash']} and "
f"logIndex: {log['logIndex']} encountered the following error "
f'during processing: {type(e).__name__}({e}). It has been discarded.'
)
continue
yield rich_log
@combomethod
def processLog(self, log: HexStr) -> EventData:
return get_event_data(self.web3.codec, self.abi, log)
@combomethod
def createFilter(
self, *, # PEP 3102
argument_filters: Optional[Dict[str, Any]] = None,
fromBlock: Optional[BlockIdentifier] = None,
toBlock: BlockIdentifier = "latest",
address: Optional[ChecksumAddress] = None,
topics: Optional[Sequence[Any]] = None) -> LogFilter:
"""
Create filter object that tracks logs emitted by this contract event.
:param filter_params: other parameters to limit the events
"""
if fromBlock is None:
raise TypeError("Missing mandatory keyword argument to createFilter: fromBlock")
if argument_filters is None:
argument_filters = dict()
_filters = dict(**argument_filters)
event_abi = self._get_event_abi()
check_for_forbidden_api_filter_arguments(event_abi, _filters)
_, event_filter_params = construct_event_filter_params(
self._get_event_abi(),
self.web3.codec,
contract_address=self.address,
argument_filters=_filters,
fromBlock=fromBlock,
toBlock=toBlock,
address=address,
topics=topics,
)
filter_builder = EventFilterBuilder(event_abi, self.web3.codec)
filter_builder.address = cast(ChecksumAddress, event_filter_params.get('address'))
filter_builder.fromBlock = event_filter_params.get('fromBlock')
filter_builder.toBlock = event_filter_params.get('toBlock')
match_any_vals = {
arg: value for arg, value in _filters.items()
if not is_array_type(filter_builder.args[arg].arg_type) and is_list_like(value)
}
for arg, value in match_any_vals.items():
filter_builder.args[arg].match_any(*value)
match_single_vals = {
arg: value for arg, value in _filters.items()
if not is_array_type(filter_builder.args[arg].arg_type) and not is_list_like(value)
}
for arg, value in match_single_vals.items():
filter_builder.args[arg].match_single(value)
log_filter = filter_builder.deploy(self.web3)
log_filter.log_entry_formatter = get_event_data(self.web3.codec, self._get_event_abi())
log_filter.builder = filter_builder
return log_filter
@combomethod
def build_filter(self) -> EventFilterBuilder:
builder = EventFilterBuilder(
self._get_event_abi(),
self.web3.codec,
formatter=get_event_data(self.web3.codec, self._get_event_abi()))
builder.address = self.address
return builder
@combomethod
def getLogs(self,
argument_filters: Optional[Dict[str, Any]] = None,
fromBlock: Optional[BlockIdentifier] = None,
toBlock: Optional[BlockIdentifier] = None,
blockHash: Optional[HexBytes] = None) -> Iterable[EventData]:
"""Get events for this contract instance using eth_getLogs API.
This is a stateless method, as opposed to createFilter.
It can be safely called against nodes which do not provide
eth_newFilter API, like Infura nodes.
If there are many events,
like ``Transfer`` events for a popular token,
the Ethereum node might be overloaded and timeout
on the underlying JSON-RPC call.
Example - how to get all ERC-20 token transactions
for the latest 10 blocks:
.. code-block:: python
from = max(mycontract.web3.eth.block_number - 10, 1)
to = mycontract.web3.eth.block_number
events = mycontract.events.Transfer.getLogs(fromBlock=from, toBlock=to)
for e in events:
print(e["args"]["from"],
e["args"]["to"],
e["args"]["value"])
The returned processed log values will look like:
.. code-block:: python
(
AttributeDict({
'args': AttributeDict({}),
'event': 'LogNoArguments',
'logIndex': 0,
'transactionIndex': 0,
'transactionHash': HexBytes('...'),
'address': '0xF2E246BB76DF876Cef8b38ae84130F4F55De395b',
'blockHash': HexBytes('...'),
'blockNumber': 3
}),
AttributeDict(...),
...
)
See also: :func:`web3.middleware.filter.local_filter_middleware`.
:param argument_filters:
:param fromBlock: block number or "latest", defaults to "latest"
:param toBlock: block number or "latest". Defaults to "latest"
:param blockHash: block hash. blockHash cannot be set at the
same time as fromBlock or toBlock
:yield: Tuple of :class:`AttributeDict` instances
"""
if not self.address:
raise TypeError("This method can be only called on "
"an instated contract with an address")
abi = self._get_event_abi()
if argument_filters is None:
argument_filters = dict()
_filters = dict(**argument_filters)
blkhash_set = blockHash is not None
blknum_set = fromBlock is not None or toBlock is not None
if blkhash_set and blknum_set:
raise ValidationError(
'blockHash cannot be set at the same'
' time as fromBlock or toBlock')
# Construct JSON-RPC raw filter presentation based on human readable Python descriptions
# Namely, convert event names to their keccak signatures
data_filter_set, event_filter_params = construct_event_filter_params(
abi,
self.web3.codec,
contract_address=self.address,
argument_filters=_filters,
fromBlock=fromBlock,
toBlock=toBlock,
address=self.address,
)
if blockHash is not None:
event_filter_params['blockHash'] = blockHash
# Call JSON-RPC API
logs = self.web3.eth.getLogs(event_filter_params)
# Convert raw binary data to Python proxy objects as described by ABI
return tuple(get_event_data(self.web3.codec, abi, entry) for entry in logs)
@classmethod
def factory(cls, class_name: str, **kwargs: Any) -> PropertyCheckingFactory:
return PropertyCheckingFactory(class_name, (cls,), kwargs)
class ContractCaller:
"""
An alternative Contract API.
This call:
> contract.caller({'from': eth.accounts[1], 'gas': 100000, ...}).add(2, 3)
is equivalent to this call in the classic contract:
> contract.functions.add(2, 3).call({'from': eth.accounts[1], 'gas': 100000, ...})
Other options for invoking this class include:
> contract.caller.add(2, 3)
or
> contract.caller().add(2, 3)
or
> contract.caller(transaction={'from': eth.accounts[1], 'gas': 100000, ...}).add(2, 3)
"""
def __init__(self,
abi: ABI,
web3: 'Web3',
address: ChecksumAddress,
transaction: Optional[TxParams] = None,
block_identifier: BlockIdentifier = 'latest') -> None:
self.web3 = web3
self.address = address
self.abi = abi
self._functions = None
if self.abi:
if transaction is None:
transaction = {}
self._functions = filter_by_type('function', self.abi)
for func in self._functions:
fn: ContractFunction = ContractFunction.factory(
func['name'],
web3=self.web3,
contract_abi=self.abi,
address=self.address,
function_identifier=func['name'])
block_id = parse_block_identifier(self.web3, block_identifier)
caller_method = partial(self.call_function,
fn,
transaction=transaction,
block_identifier=block_id)
setattr(self, func['name'], caller_method)
def __getattr__(self, function_name: str) -> Any:
if self.abi is None:
raise NoABIFound(
"There is no ABI found for this contract.",
)
elif not self._functions or len(self._functions) == 0:
raise NoABIFunctionsFound(
"The ABI for this contract contains no function definitions. ",
"Are you sure you provided the correct contract ABI?"
)
elif function_name not in set(fn['name'] for fn in self._functions):
functions_available = ', '.join([fn['name'] for fn in self._functions])
raise ABIFunctionNotFound(
"The function '{}' was not found in this contract's ABI. ".format(function_name),
"Here is a list of all of the function names found: ",
"{}. ".format(functions_available),
"Did you mean to call one of those functions?"
)
else:
return super().__getattribute__(function_name)
def __hasattr__(self, event_name: str) -> bool:
try:
return event_name in self.__dict__['_events']
except ABIFunctionNotFound:
return False
def __call__(
self, transaction: Optional[TxParams] = None, block_identifier: BlockIdentifier = 'latest'
) -> 'ContractCaller':
if transaction is None:
transaction = {}
return type(self)(self.abi,
self.web3,
self.address,
transaction=transaction,
block_identifier=block_identifier)
@staticmethod
def call_function(
fn: ContractFunction,
*args: Any,
transaction: Optional[TxParams] = None,
block_identifier: BlockIdentifier = 'latest',
**kwargs: Any
) -> Any:
if transaction is None:
transaction = {}
return fn(*args, **kwargs).call(transaction, block_identifier)
def check_for_forbidden_api_filter_arguments(
event_abi: ABIEvent, _filters: Dict[str, Any]
) -> None:
name_indexed_inputs = {_input['name']: _input for _input in event_abi['inputs']}
for filter_name, filter_value in _filters.items():
_input = name_indexed_inputs[filter_name]
if is_array_type(_input['type']):
raise TypeError(
"createFilter no longer supports array type filter arguments. "
"see the build_filter method for filtering array type filters.")
if is_list_like(filter_value) and is_dynamic_sized_type(_input['type']):
raise TypeError(
"createFilter no longer supports setting filter argument options for dynamic sized "
"types. See the build_filter method for setting filters with the match_any "
"method.")
def call_contract_function(
web3: 'Web3',
address: ChecksumAddress,
normalizers: Tuple[Callable[..., Any], ...],
function_identifier: FunctionIdentifier,
transaction: TxParams,
block_id: Optional[BlockIdentifier] = None,
contract_abi: Optional[ABI] = None,
fn_abi: Optional[ABIFunction] = None,
*args: Any,
**kwargs: Any) -> Any:
"""
Helper function for interacting with a contract function using the
`eth_call` API.
"""
call_transaction = prepare_transaction(
address,
web3,
fn_identifier=function_identifier,
contract_abi=contract_abi,
fn_abi=fn_abi,
transaction=transaction,
fn_args=args,
fn_kwargs=kwargs,
)
if block_id is None:
return_data = web3.eth.call(call_transaction)
else:
return_data = web3.eth.call(call_transaction, block_identifier=block_id)
if fn_abi is None:
fn_abi = find_matching_fn_abi(contract_abi, web3.codec, function_identifier, args, kwargs)
output_types = get_abi_output_types(fn_abi)
try:
output_data = web3.codec.decode_abi(output_types, return_data)
except DecodingError as e:
# Provide a more helpful error message than the one provided by
# eth-abi-utils
is_missing_code_error = (
return_data in ACCEPTABLE_EMPTY_STRINGS
and web3.eth.getCode(address) in ACCEPTABLE_EMPTY_STRINGS)
if is_missing_code_error:
msg = (
"Could not transact with/call contract function, is contract "
"deployed correctly and chain synced?"
)
else:
msg = (
"Could not decode contract function call {} return data {} for "
"output_types {}".format(
function_identifier,
return_data,
output_types
)
)
raise BadFunctionCallOutput(msg) from e
_normalizers = itertools.chain(
BASE_RETURN_NORMALIZERS,
normalizers,
)
normalized_data = map_abi_data(_normalizers, output_types, output_data)
if len(normalized_data) == 1:
return normalized_data[0]
else:
return normalized_data
def parse_block_identifier(web3: 'Web3', block_identifier: BlockIdentifier) -> BlockIdentifier:
if isinstance(block_identifier, int):
return parse_block_identifier_int(web3, block_identifier)
elif block_identifier in ['latest', 'earliest', 'pending']:
return block_identifier
elif isinstance(block_identifier, bytes) or is_hex_encoded_block_hash(block_identifier):
return web3.eth.get_block(block_identifier)['number']
else:
raise BlockNumberOutofRange
def parse_block_identifier_int(web3: 'Web3', block_identifier_int: int) -> BlockNumber:
if block_identifier_int >= 0:
block_num = block_identifier_int
else:
last_block = web3.eth.get_block('latest')['number']
block_num = last_block + block_identifier_int + 1
if block_num < 0:
raise BlockNumberOutofRange
return BlockNumber(block_num)
def transact_with_contract_function(
address: ChecksumAddress,
web3: 'Web3',
function_name: Optional[FunctionIdentifier] = None,
transaction: Optional[TxParams] = None,
contract_abi: Optional[ABI] = None,
fn_abi: Optional[ABIFunction] = None,
*args: Any,
**kwargs: Any) -> HexBytes:
"""
Helper function for interacting with a contract function by sending a
transaction.
"""
transact_transaction = prepare_transaction(
address,
web3,
fn_identifier=function_name,
contract_abi=contract_abi,
transaction=transaction,
fn_abi=fn_abi,
fn_args=args,
fn_kwargs=kwargs,
)
txn_hash = web3.eth.sendTransaction(transact_transaction)
return txn_hash
def estimate_gas_for_function(
address: ChecksumAddress,
web3: 'Web3',
fn_identifier: Optional[FunctionIdentifier] = None,
transaction: Optional[TxParams] = None,
contract_abi: Optional[ABI] = None,
fn_abi: Optional[ABIFunction] = None,
block_identifier: Optional[BlockIdentifier] = None,
*args: Any,
**kwargs: Any) -> int:
"""Estimates gas cost a function call would take.
Don't call this directly, instead use :meth:`Contract.estimateGas`
on your contract instance.
"""
estimate_transaction = prepare_transaction(
address,
web3,
fn_identifier=fn_identifier,
contract_abi=contract_abi,
fn_abi=fn_abi,
transaction=transaction,
fn_args=args,
fn_kwargs=kwargs,
)
return web3.eth.estimateGas(estimate_transaction, block_identifier)
def build_transaction_for_function(
address: ChecksumAddress,
web3: 'Web3',
function_name: Optional[FunctionIdentifier] = None,
transaction: Optional[TxParams] = None,
contract_abi: Optional[ABI] = None,
fn_abi: Optional[ABIFunction] = None,
*args: Any,
**kwargs: Any) -> TxParams:
"""Builds a dictionary with the fields required to make the given transaction
Don't call this directly, instead use :meth:`Contract.buildTransaction`
on your contract instance.
"""
prepared_transaction = prepare_transaction(
address,
web3,
fn_identifier=function_name,
contract_abi=contract_abi,
fn_abi=fn_abi,
transaction=transaction,
fn_args=args,
fn_kwargs=kwargs,
)
prepared_transaction = fill_transaction_defaults(web3, prepared_transaction)
return prepared_transaction
def find_functions_by_identifier(
contract_abi: ABI, web3: 'Web3', address: ChecksumAddress, callable_check: Callable[..., Any]
) -> List[ContractFunction]:
fns_abi = filter_by_type('function', contract_abi)
return [
ContractFunction.factory(
fn_abi['name'],
web3=web3,
contract_abi=contract_abi,
address=address,
function_identifier=fn_abi['name'],
abi=fn_abi
)
for fn_abi in fns_abi
if callable_check(fn_abi)
]
def get_function_by_identifier(
fns: Sequence[ContractFunction], identifier: str
) -> ContractFunction:
if len(fns) > 1:
raise ValueError(
'Found multiple functions with matching {0}. '
'Found: {1!r}'.format(identifier, fns)
)
elif len(fns) == 0:
raise ValueError(
'Could not find any function with matching {0}'.format(identifier)
)
return fns[0]
| 34.265755
| 124
| 0.611347
|
b29395e258c3af02e8cc1e49fd5c4a4d4da64b83
| 392
|
py
|
Python
|
backend/app/core/jobs/test_job.py
|
yangyuchi/ml-job-scheduler
|
bdafbf7fd266751a974d716e8d3ba64d3187fdcd
|
[
"MIT"
] | null | null | null |
backend/app/core/jobs/test_job.py
|
yangyuchi/ml-job-scheduler
|
bdafbf7fd266751a974d716e8d3ba64d3187fdcd
|
[
"MIT"
] | null | null | null |
backend/app/core/jobs/test_job.py
|
yangyuchi/ml-job-scheduler
|
bdafbf7fd266751a974d716e8d3ba64d3187fdcd
|
[
"MIT"
] | null | null | null |
import logging
from ._base import JobBase
logging.basicConfig(level=logging.INFO)
class PrintHello(JobBase):
description: str = "Just for Test"
arguments: dict = {
"times": "How many hellos to print (int)",
}
@classmethod
async def run(cls, **kwargs):
args = kwargs["args"]
for i in range(int(args["times"])):
logging.info("Hello!")
| 23.058824
| 50
| 0.614796
|
5982f7a1e036078409c78d6e513c56e6bae74b03
| 15,318
|
py
|
Python
|
tensorflow_estimator/python/estimator/canned/parsing_utils.py
|
tirkarthi/estimator
|
5d962124f1c2ad5b2886ada53d5c604257b660b6
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_estimator/python/estimator/canned/parsing_utils.py
|
tirkarthi/estimator
|
5d962124f1c2ad5b2886ada53d5c604257b660b6
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_estimator/python/estimator/canned/parsing_utils.py
|
tirkarthi/estimator
|
5d962124f1c2ad5b2886ada53d5c604257b660b6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Parsing related helper function to be used in `input_fn`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import six
from tensorflow.python.feature_column import feature_column_lib as fc
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import parsing_ops
from tensorflow.python.util.tf_export import estimator_export
@estimator_export('estimator.classifier_parse_example_spec', v1=[])
def classifier_parse_example_spec_v2(feature_columns,
label_key,
label_dtype=tf.dtypes.int64,
label_default=None,
weight_column=None):
"""Generates parsing spec for tf.parse_example to be used with classifiers.
If users keep data in tf.Example format, they need to call tf.parse_example
with a proper feature spec. There are two main things that this utility helps:
* Users need to combine parsing spec of features with labels and weights
(if any) since they are all parsed from same tf.Example instance. This
utility combines these specs.
* It is difficult to map expected label by a classifier such as
`DNNClassifier` to corresponding tf.parse_example spec. This utility encodes
it by getting related information from users (key, dtype).
Example output of parsing spec:
```python
# Define features and transformations
feature_b = tf.feature_column.numeric_column(...)
feature_c_bucketized = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("feature_c"), ...)
feature_a_x_feature_c = tf.feature_column.crossed_column(
columns=["feature_a", feature_c_bucketized], ...)
feature_columns = [feature_b, feature_c_bucketized, feature_a_x_feature_c]
parsing_spec = tf.estimator.classifier_parse_example_spec(
feature_columns, label_key='my-label', label_dtype=tf.string)
# For the above example, classifier_parse_example_spec would return the dict:
assert parsing_spec == {
"feature_a": parsing_ops.VarLenFeature(tf.string),
"feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32)
"my-label" : parsing_ops.FixedLenFeature([1], dtype=tf.string)
}
```
Example usage with a classifier:
```python
feature_columns = # define features via tf.feature_column
estimator = DNNClassifier(
n_classes=1000,
feature_columns=feature_columns,
weight_column='example-weight',
label_vocabulary=['photos', 'keep', ...],
hidden_units=[256, 64, 16])
# This label configuration tells the classifier the following:
# * weights are retrieved with key 'example-weight'
# * label is string and can be one of the following ['photos', 'keep', ...]
# * integer id for label 'photos' is 0, 'keep' is 1, ...
# Input builders
def input_fn_train(): # Returns a tuple of features and labels.
features = tf.contrib.learn.read_keyed_batch_features(
file_pattern=train_files,
batch_size=batch_size,
# creates parsing configuration for tf.parse_example
features=tf.estimator.classifier_parse_example_spec(
feature_columns,
label_key='my-label',
label_dtype=tf.string,
weight_column='example-weight'),
reader=tf.RecordIOReader)
labels = features.pop('my-label')
return features, labels
estimator.train(input_fn=input_fn_train)
```
Args:
feature_columns: An iterable containing all feature columns. All items
should be instances of classes derived from `FeatureColumn`.
label_key: A string identifying the label. It means tf.Example stores labels
with this key.
label_dtype: A `tf.dtype` identifies the type of labels. By default it is
`tf.int64`. If user defines a `label_vocabulary`, this should be set as
`tf.string`. `tf.float32` labels are only supported for binary
classification.
label_default: used as label if label_key does not exist in given
tf.Example. An example usage: let's say `label_key` is 'clicked' and
tf.Example contains clicked data only for positive examples in following
format `key:clicked, value:1`. This means that if there is no data with
key 'clicked' it should count as negative example by setting
`label_deafault=0`. Type of this value should be compatible with
`label_dtype`.
weight_column: A string or a `NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
Returns:
A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature`
value.
Raises:
ValueError: If label is used in `feature_columns`.
ValueError: If weight_column is used in `feature_columns`.
ValueError: If any of the given `feature_columns` is not a `_FeatureColumn`
instance.
ValueError: If `weight_column` is not a `NumericColumn` instance.
ValueError: if label_key is None.
"""
parsing_spec = fc.make_parse_example_spec_v2(feature_columns)
label_spec = tf.io.FixedLenFeature((1,), label_dtype, label_default)
return _add_label_and_weight_to_parsing_spec(
parsing_spec=parsing_spec,
label_key=label_key,
label_spec=label_spec,
weight_column=weight_column)
@estimator_export('estimator.regressor_parse_example_spec', v1=[])
def regressor_parse_example_spec_v2(feature_columns,
label_key,
label_dtype=tf.dtypes.float32,
label_default=None,
label_dimension=1,
weight_column=None):
"""Generates parsing spec for tf.parse_example to be used with regressors.
If users keep data in tf.Example format, they need to call tf.parse_example
with a proper feature spec. There are two main things that this utility helps:
* Users need to combine parsing spec of features with labels and weights
(if any) since they are all parsed from same tf.Example instance. This
utility combines these specs.
* It is difficult to map expected label by a regressor such as `DNNRegressor`
to corresponding tf.parse_example spec. This utility encodes it by getting
related information from users (key, dtype).
Example output of parsing spec:
```python
# Define features and transformations
feature_b = tf.feature_column.numeric_column(...)
feature_c_bucketized = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("feature_c"), ...)
feature_a_x_feature_c = tf.feature_column.crossed_column(
columns=["feature_a", feature_c_bucketized], ...)
feature_columns = [feature_b, feature_c_bucketized, feature_a_x_feature_c]
parsing_spec = tf.estimator.regressor_parse_example_spec(
feature_columns, label_key='my-label')
# For the above example, regressor_parse_example_spec would return the dict:
assert parsing_spec == {
"feature_a": parsing_ops.VarLenFeature(tf.string),
"feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32)
"my-label" : parsing_ops.FixedLenFeature([1], dtype=tf.float32)
}
```
Example usage with a regressor:
```python
feature_columns = # define features via tf.feature_column
estimator = DNNRegressor(
hidden_units=[256, 64, 16],
feature_columns=feature_columns,
weight_column='example-weight',
label_dimension=3)
# This label configuration tells the regressor the following:
# * weights are retrieved with key 'example-weight'
# * label is a 3 dimension tensor with float32 dtype.
# Input builders
def input_fn_train(): # Returns a tuple of features and labels.
features = tf.contrib.learn.read_keyed_batch_features(
file_pattern=train_files,
batch_size=batch_size,
# creates parsing configuration for tf.parse_example
features=tf.estimator.classifier_parse_example_spec(
feature_columns,
label_key='my-label',
label_dimension=3,
weight_column='example-weight'),
reader=tf.RecordIOReader)
labels = features.pop('my-label')
return features, labels
estimator.train(input_fn=input_fn_train)
```
Args:
feature_columns: An iterable containing all feature columns. All items
should be instances of classes derived from `_FeatureColumn`.
label_key: A string identifying the label. It means tf.Example stores labels
with this key.
label_dtype: A `tf.dtype` identifies the type of labels. By default it is
`tf.float32`.
label_default: used as label if label_key does not exist in given
tf.Example. By default default_value is none, which means
`tf.parse_example` will error out if there is any missing label.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
weight_column: A string or a `NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
Returns:
A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature`
value.
Raises:
ValueError: If label is used in `feature_columns`.
ValueError: If weight_column is used in `feature_columns`.
ValueError: If any of the given `feature_columns` is not a `_FeatureColumn`
instance.
ValueError: If `weight_column` is not a `NumericColumn` instance.
ValueError: if label_key is None.
"""
parsing_spec = fc.make_parse_example_spec_v2(feature_columns)
label_spec = tf.io.FixedLenFeature(
(label_dimension,), label_dtype, label_default)
return _add_label_and_weight_to_parsing_spec(
parsing_spec=parsing_spec,
label_key=label_key,
label_spec=label_spec,
weight_column=weight_column)
def _add_label_and_weight_to_parsing_spec(
parsing_spec, label_key, label_spec, weight_column=None):
"""Adds label and weight spec to given parsing spec.
Args:
parsing_spec: A dict mapping each feature key to a `FixedLenFeature` or
`VarLenFeature` to which label and weight spec are added.
label_key: A string identifying the label. It means tf.Example stores labels
with this key.
label_spec: A `FixedLenFeature`.
weight_column: A string or a `NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
Returns:
A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature`
value.
"""
if label_key in parsing_spec:
raise ValueError('label should not be used as feature. '
'label_key: {}, features: {}'.format(
label_key, parsing_spec.keys()))
parsing_spec[label_key] = label_spec
if weight_column is None:
return parsing_spec
if isinstance(weight_column, six.string_types):
weight_column = tf.feature_column.numeric_column(weight_column)
if not isinstance(weight_column, fc.NumericColumn):
raise ValueError('weight_column should be an instance of '
'tf.feature_column.numeric_column. '
'Given type: {} value: {}'.format(
type(weight_column), weight_column))
if weight_column.key in parsing_spec:
raise ValueError('weight_column should not be used as feature. '
'weight_column: {}, features: {}'.format(
weight_column.key, parsing_spec.keys()))
parsing_spec.update(weight_column.parse_example_spec)
return parsing_spec
@estimator_export(v1=['estimator.classifier_parse_example_spec'])
def classifier_parse_example_spec(feature_columns,
label_key,
label_dtype=tf.dtypes.int64,
label_default=None,
weight_column=None):
parsing_spec = tf.compat.v1.feature_column.make_parse_example_spec(feature_columns)
label_spec = tf.io.FixedLenFeature((1,), label_dtype, label_default)
return _add_label_and_weight_to_parsing_spec(
parsing_spec=parsing_spec,
label_key=label_key,
label_spec=label_spec,
weight_column=weight_column)
classifier_parse_example_spec.__doc__ = classifier_parse_example_spec_v2.__doc__
@estimator_export(v1=['estimator.regressor_parse_example_spec'])
def regressor_parse_example_spec(feature_columns, # pylint: disable=missing-docstring
label_key,
label_dtype=tf.dtypes.float32,
label_default=None,
label_dimension=1,
weight_column=None):
parsing_spec = tf.compat.v1.feature_column.make_parse_example_spec(feature_columns)
label_spec = tf.io.FixedLenFeature(
(label_dimension,), label_dtype, label_default)
return _add_label_and_weight_to_parsing_spec(
parsing_spec=parsing_spec,
label_key=label_key,
label_spec=label_spec,
weight_column=weight_column)
regressor_parse_example_spec.__doc__ = regressor_parse_example_spec_v2.__doc__
| 43.641026
| 86
| 0.705379
|
6a89f7623c746992a53ffd9aad4cce29adf5d424
| 8,500
|
py
|
Python
|
transformers/recommendations/matrixfactorization.py
|
james94/driverlessai-recipes
|
87c35460db59ffda8dc18ad82cb3a9b8291410e4
|
[
"Apache-2.0"
] | null | null | null |
transformers/recommendations/matrixfactorization.py
|
james94/driverlessai-recipes
|
87c35460db59ffda8dc18ad82cb3a9b8291410e4
|
[
"Apache-2.0"
] | null | null | null |
transformers/recommendations/matrixfactorization.py
|
james94/driverlessai-recipes
|
87c35460db59ffda8dc18ad82cb3a9b8291410e4
|
[
"Apache-2.0"
] | null | null | null |
"""Collaborative filtering features using various techniques of Matrix Factorization for recommendations.
Recommended for large data"""
"""
Add the user column name and item column name in recipe_dict in config to match the
column names as per the dataset or use the default 'user' and 'item' respectively in your dataset
Sample Datasets
# Netflix - https://www.kaggle.com/netflix-inc/netflix-prize-data
recipe_dict = "{'user_col': 'user', 'item_col': 'movie'}"
# MovieLens - https://grouplens.org/datasets/movielens/
recipe_dict = "{'user_col': 'userId', 'item_col': 'movieId'}"
# RPackages - https://www.kaggle.com/c/R/data
recipe_dict = "{'user_col': 'User', 'item_col': 'Package'}"
"""
import datatable as dt
import numpy as np
import pandas as pd
import h2o4gpu
import scipy
from h2oaicore.systemutils import config
from h2oaicore.transformer_utils import CustomTransformer
from sklearn.decomposition import NMF
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.preprocessing import LabelEncoder
class RecH2OMFTransformer(CustomTransformer):
_multiclass = False
_can_use_gpu = True
_mf_type = "h2o4gpu"
def __init__(self, n_components=50, _lambda=0.1, batches=1, max_iter=100, alpha=0.1, **kwargs):
super().__init__(**kwargs)
self.user_col = config.recipe_dict['user_col'] if "user_col" in config.recipe_dict else "user"
self.item_col = config.recipe_dict['item_col'] if "item_col" in config.recipe_dict else "item"
if self.__class__._mf_type == "h2o4gpu":
self._n_components = n_components
self._lambda = _lambda
self._batches = batches
self._max_iter = max_iter
elif self.__class__._mf_type == "nmf":
self._n_components = n_components
self._alpha = alpha
self._max_iter = max_iter
@staticmethod
def do_acceptance_test():
return False
@staticmethod
def get_default_properties():
return dict(col_type="all", min_cols="all", max_cols="all", relative_importance=1, num_default_instances=1)
@staticmethod
def get_parameter_choices():
return {"n_components": [10, 30, 50, 70, 100],
"_lambda": [0.01, 0.05, 0.1],
"batches": [1],
"max_iter": [10, 50, 100, 200],
"alpha": [0.01, 0.05, 0.1]}
def fit_transform(self, X: dt.Frame, y: np.array = None):
if len(np.unique(self.labels)) == 2:
le = LabelEncoder()
self.labels = le.fit_transform(self.labels)
y = np.array(le.transform(y), dtype="float32")
else:
y = np.array(y, dtype="float32")
X = X[:, [self.user_col, self.item_col]]
self.user_le = LabelEncoder()
self.item_le = LabelEncoder()
X[:, self.user_col] = dt.Frame(self.user_le.fit_transform(X[:, self.user_col]))
X[:, self.item_col] = dt.Frame(self.item_le.fit_transform(X[:, self.item_col]))
X_pd = X.to_pandas()
if len(np.unique(self.labels)) == 2:
kfold = StratifiedKFold(n_splits=10)
else:
kfold = KFold(n_splits=10)
preds = np.full(X.nrows, fill_value=np.nan)
for train_index, val_index in kfold.split(X_pd, y):
X_train, y_train = X_pd.iloc[train_index,], y[train_index]
X_val, y_val = X_pd.iloc[val_index,], y[val_index]
X_val2 = X_val[(X_val[self.user_col].isin(np.unique(X_train[self.user_col]))) & (
X_val[self.item_col].isin(np.unique(X_train[self.item_col])))]
y_val2 = y_val[(X_val[self.user_col].isin(np.unique(X_train[self.user_col]))) & (
X_val[self.item_col].isin(np.unique(X_train[self.item_col])))]
X_panel = pd.concat([X_train, X_val2], axis=0)
users, user_indices = np.unique(np.array(X_panel[self.user_col], dtype="int32"), return_inverse=True)
items, item_indices = np.unique(np.array(X_panel[self.item_col], dtype="int32"), return_inverse=True)
X_train_user_item_matrix = scipy.sparse.coo_matrix(
(y_train, (user_indices[:len(X_train)], item_indices[:len(X_train)])), shape=(len(users), len(items)))
X_train_shape = X_train_user_item_matrix.shape
X_val_user_item_matrix = scipy.sparse.coo_matrix(
(np.ones(len(X_val2), dtype="float32"), (user_indices[len(X_train):], item_indices[len(X_train):])),
shape=X_train_shape)
if self.__class__._mf_type == "h2o4gpu":
factorization = h2o4gpu.solvers.FactorizationH2O(self._n_components, self._lambda,
max_iter=self._max_iter)
factorization.fit(X_train_user_item_matrix, X_BATCHES=self._batches, THETA_BATCHES=self._batches)
preds[val_index[(X_val[self.user_col].isin(np.unique(X_train[self.user_col]))) & (
X_val[self.item_col].isin(np.unique(X_train[self.item_col])))]] = factorization.predict(
X_val_user_item_matrix).data
elif self.__class__._mf_type == "nmf":
factorization = NMF(n_components=self._n_components, alpha=self._alpha, max_iter=self._max_iter)
user_matrix = factorization.fit_transform(X_train_user_item_matrix)
item_matrix = factorization.components_.T
val_users = np.take(user_matrix, X_val_user_item_matrix.row, axis=0)
val_items = np.take(item_matrix, X_val_user_item_matrix.col, axis=0)
preds[val_index[(X_val[self.user_col].isin(np.unique(X_train[self.user_col]))) & (
X_val[self.item_col].isin(np.unique(X_train[self.item_col])))]] = np.sum(val_users * val_items,
axis=1)
users, user_indices = np.unique(np.array(X_pd[self.user_col], dtype="int32"), return_inverse=True)
items, item_indices = np.unique(np.array(X_pd[self.item_col], dtype="int32"), return_inverse=True)
X_train_user_item_matrix = scipy.sparse.coo_matrix(
(y_train, (user_indices[:len(X_train)], item_indices[:len(X_train)])), shape=(len(users), len(items)))
self.X_train_shape = X_train_user_item_matrix.shape
if self.__class__._mf_type == "h2o4gpu":
self.factorization = h2o4gpu.solvers.FactorizationH2O(self._n_components, self._lambda,
max_iter=self._max_iter)
self.factorization.fit(X_train_user_item_matrix, X_BATCHES=self._batches, THETA_BATCHES=self._batches)
elif self.__class__._mf_type == "nmf":
factorization = NMF(n_components=self._n_components, alpha=self._alpha, max_iter=self._max_iter)
self.user_matrix = factorization.fit_transform(X_train_user_item_matrix)
self.item_matrix = factorization.components_.T
return preds
def transform(self, X: dt.Frame):
X = X[:, [self.user_col, self.item_col]]
preds = np.full(X.nrows, fill_value=np.nan)
X_pd = X.to_pandas()
X_test = X_pd[
(X_pd[self.user_col].isin(self.user_le.classes_)) & (X_pd[self.item_col].isin(self.item_le.classes_))]
X_test[self.user_col] = self.user_le.transform(X_test[self.user_col])
X_test[self.item_col] = self.item_le.transform(X_test[self.item_col])
X_test_user_item_matrix = scipy.sparse.coo_matrix(
(np.ones(len(X_test), dtype="float32"), (X_test[self.user_col], X_test[self.item_col])),
shape=self.X_train_shape)
if self.__class__._mf_type == "h2o4gpu":
preds[(X_pd[self.user_col].isin(self.user_le.classes_)) & (
X_pd[self.item_col].isin(self.item_le.classes_))] = self.factorization.predict(
X_test_user_item_matrix).data
elif self.__class__._mf_type == "nmf":
test_users = np.take(self.user_matrix, X_test_user_item_matrix.row, axis=0)
test_items = np.take(self.item_matrix, X_test_user_item_matrix.col, axis=0)
preds[(X_pd[self.user_col].isin(self.user_le.classes_)) & (
X_pd[self.item_col].isin(self.item_le.classes_))] = np.sum(test_users * test_items, axis=1)
return preds
class RecNMFTransformer(RecH2OMFTransformer):
_can_use_gpu = False
_mf_type = "nmf"
| 46.448087
| 118
| 0.638824
|
dfd131fd1aee88423928b970a9177b87a8652cbc
| 16,522
|
py
|
Python
|
fpga/lib/axi/tb/test_axi_adapter_16_32.py
|
totuwei/corundum
|
e983ad519fb4523d0ffca32f5e436195bcfc945c
|
[
"BSD-2-Clause-FreeBSD"
] | 603
|
2018-07-30T05:31:48.000Z
|
2022-03-31T02:53:20.000Z
|
fpga/lib/axi/tb/test_axi_adapter_16_32.py
|
akira2009999/corundum
|
cdc14769c33186c6d45fcd79b95c70889febff2b
|
[
"BSD-2-Clause-FreeBSD"
] | 78
|
2020-08-20T20:06:33.000Z
|
2022-03-30T23:44:37.000Z
|
fpga/lib/axi/tb/test_axi_adapter_16_32.py
|
akira2009999/corundum
|
cdc14769c33186c6d45fcd79b95c70889febff2b
|
[
"BSD-2-Clause-FreeBSD"
] | 200
|
2018-08-24T14:11:21.000Z
|
2022-03-30T14:40:48.000Z
|
#!/usr/bin/env python
"""
Copyright (c) 2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axi
module = 'axi_adapter'
testbench = 'test_%s_16_32' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../rtl/axi_adapter_rd.v")
srcs.append("../rtl/axi_adapter_wr.v")
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
ADDR_WIDTH = 32
S_DATA_WIDTH = 16
S_STRB_WIDTH = (S_DATA_WIDTH/8)
M_DATA_WIDTH = 32
M_STRB_WIDTH = (M_DATA_WIDTH/8)
ID_WIDTH = 8
AWUSER_ENABLE = 0
AWUSER_WIDTH = 1
WUSER_ENABLE = 0
WUSER_WIDTH = 1
BUSER_ENABLE = 0
BUSER_WIDTH = 1
ARUSER_ENABLE = 0
ARUSER_WIDTH = 1
RUSER_ENABLE = 0
RUSER_WIDTH = 1
CONVERT_BURST = 1
CONVERT_NARROW_BURST = 1
FORWARD_ID = 1
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
s_axi_awid = Signal(intbv(0)[ID_WIDTH:])
s_axi_awaddr = Signal(intbv(0)[ADDR_WIDTH:])
s_axi_awlen = Signal(intbv(0)[8:])
s_axi_awsize = Signal(intbv(0)[3:])
s_axi_awburst = Signal(intbv(0)[2:])
s_axi_awlock = Signal(bool(0))
s_axi_awcache = Signal(intbv(0)[4:])
s_axi_awprot = Signal(intbv(0)[3:])
s_axi_awqos = Signal(intbv(0)[4:])
s_axi_awregion = Signal(intbv(0)[4:])
s_axi_awuser = Signal(intbv(0)[AWUSER_WIDTH:])
s_axi_awvalid = Signal(bool(0))
s_axi_wdata = Signal(intbv(0)[S_DATA_WIDTH:])
s_axi_wstrb = Signal(intbv(0)[S_STRB_WIDTH:])
s_axi_wlast = Signal(bool(0))
s_axi_wuser = Signal(intbv(0)[WUSER_WIDTH:])
s_axi_wvalid = Signal(bool(0))
s_axi_bready = Signal(bool(0))
s_axi_arid = Signal(intbv(0)[ID_WIDTH:])
s_axi_araddr = Signal(intbv(0)[ADDR_WIDTH:])
s_axi_arlen = Signal(intbv(0)[8:])
s_axi_arsize = Signal(intbv(0)[3:])
s_axi_arburst = Signal(intbv(0)[2:])
s_axi_arlock = Signal(bool(0))
s_axi_arcache = Signal(intbv(0)[4:])
s_axi_arprot = Signal(intbv(0)[3:])
s_axi_arqos = Signal(intbv(0)[4:])
s_axi_arregion = Signal(intbv(0)[4:])
s_axi_aruser = Signal(intbv(0)[ARUSER_WIDTH:])
s_axi_arvalid = Signal(bool(0))
s_axi_rready = Signal(bool(0))
m_axi_awready = Signal(bool(0))
m_axi_wready = Signal(bool(0))
m_axi_bid = Signal(intbv(0)[ID_WIDTH:])
m_axi_bresp = Signal(intbv(0)[2:])
m_axi_buser = Signal(intbv(0)[BUSER_WIDTH:])
m_axi_bvalid = Signal(bool(0))
m_axi_arready = Signal(bool(0))
m_axi_rid = Signal(intbv(0)[ID_WIDTH:])
m_axi_rdata = Signal(intbv(0)[M_DATA_WIDTH:])
m_axi_rresp = Signal(intbv(0)[2:])
m_axi_rlast = Signal(bool(0))
m_axi_ruser = Signal(intbv(0)[RUSER_WIDTH:])
m_axi_rvalid = Signal(bool(0))
# Outputs
s_axi_awready = Signal(bool(0))
s_axi_wready = Signal(bool(0))
s_axi_bid = Signal(intbv(0)[ID_WIDTH:])
s_axi_bresp = Signal(intbv(0)[2:])
s_axi_buser = Signal(intbv(0)[BUSER_WIDTH:])
s_axi_bvalid = Signal(bool(0))
s_axi_arready = Signal(bool(0))
s_axi_rid = Signal(intbv(0)[ID_WIDTH:])
s_axi_rdata = Signal(intbv(0)[S_DATA_WIDTH:])
s_axi_rresp = Signal(intbv(0)[2:])
s_axi_rlast = Signal(bool(0))
s_axi_ruser = Signal(intbv(0)[RUSER_WIDTH:])
s_axi_rvalid = Signal(bool(0))
m_axi_awid = Signal(intbv(0)[ID_WIDTH:])
m_axi_awaddr = Signal(intbv(0)[ADDR_WIDTH:])
m_axi_awlen = Signal(intbv(0)[8:])
m_axi_awsize = Signal(intbv(0)[3:])
m_axi_awburst = Signal(intbv(0)[2:])
m_axi_awlock = Signal(bool(0))
m_axi_awcache = Signal(intbv(0)[4:])
m_axi_awprot = Signal(intbv(0)[3:])
m_axi_awqos = Signal(intbv(0)[4:])
m_axi_awregion = Signal(intbv(0)[4:])
m_axi_awuser = Signal(intbv(0)[AWUSER_WIDTH:])
m_axi_awvalid = Signal(bool(0))
m_axi_wdata = Signal(intbv(0)[M_DATA_WIDTH:])
m_axi_wstrb = Signal(intbv(0)[M_STRB_WIDTH:])
m_axi_wlast = Signal(bool(0))
m_axi_wuser = Signal(intbv(0)[WUSER_WIDTH:])
m_axi_wvalid = Signal(bool(0))
m_axi_bready = Signal(bool(0))
m_axi_arid = Signal(intbv(0)[ID_WIDTH:])
m_axi_araddr = Signal(intbv(0)[ADDR_WIDTH:])
m_axi_arlen = Signal(intbv(0)[8:])
m_axi_arsize = Signal(intbv(0)[3:])
m_axi_arburst = Signal(intbv(0)[2:])
m_axi_arlock = Signal(bool(0))
m_axi_arcache = Signal(intbv(0)[4:])
m_axi_arprot = Signal(intbv(0)[3:])
m_axi_arqos = Signal(intbv(0)[4:])
m_axi_arregion = Signal(intbv(0)[4:])
m_axi_aruser = Signal(intbv(0)[ARUSER_WIDTH:])
m_axi_arvalid = Signal(bool(0))
m_axi_rready = Signal(bool(0))
# AXI4 master
axi_master_inst = axi.AXIMaster()
axi_master_pause = Signal(bool(False))
axi_master_logic = axi_master_inst.create_logic(
clk,
rst,
m_axi_awid=s_axi_awid,
m_axi_awaddr=s_axi_awaddr,
m_axi_awlen=s_axi_awlen,
m_axi_awsize=s_axi_awsize,
m_axi_awburst=s_axi_awburst,
m_axi_awlock=s_axi_awlock,
m_axi_awcache=s_axi_awcache,
m_axi_awprot=s_axi_awprot,
m_axi_awqos=s_axi_awqos,
m_axi_awregion=s_axi_awregion,
m_axi_awvalid=s_axi_awvalid,
m_axi_awready=s_axi_awready,
m_axi_wdata=s_axi_wdata,
m_axi_wstrb=s_axi_wstrb,
m_axi_wlast=s_axi_wlast,
m_axi_wvalid=s_axi_wvalid,
m_axi_wready=s_axi_wready,
m_axi_bid=s_axi_bid,
m_axi_bresp=s_axi_bresp,
m_axi_bvalid=s_axi_bvalid,
m_axi_bready=s_axi_bready,
m_axi_arid=s_axi_arid,
m_axi_araddr=s_axi_araddr,
m_axi_arlen=s_axi_arlen,
m_axi_arsize=s_axi_arsize,
m_axi_arburst=s_axi_arburst,
m_axi_arlock=s_axi_arlock,
m_axi_arcache=s_axi_arcache,
m_axi_arprot=s_axi_arprot,
m_axi_arqos=s_axi_arqos,
m_axi_arregion=s_axi_arregion,
m_axi_arvalid=s_axi_arvalid,
m_axi_arready=s_axi_arready,
m_axi_rid=s_axi_rid,
m_axi_rdata=s_axi_rdata,
m_axi_rresp=s_axi_rresp,
m_axi_rlast=s_axi_rlast,
m_axi_rvalid=s_axi_rvalid,
m_axi_rready=s_axi_rready,
pause=axi_master_pause,
name='master'
)
# AXI4 RAM model
axi_ram_inst = axi.AXIRam(2**16)
axi_ram_pause = Signal(bool(False))
axi_ram_port0 = axi_ram_inst.create_port(
clk,
s_axi_awid=m_axi_awid,
s_axi_awaddr=m_axi_awaddr,
s_axi_awlen=m_axi_awlen,
s_axi_awsize=m_axi_awsize,
s_axi_awburst=m_axi_awburst,
s_axi_awlock=m_axi_awlock,
s_axi_awcache=m_axi_awcache,
s_axi_awprot=m_axi_awprot,
s_axi_awvalid=m_axi_awvalid,
s_axi_awready=m_axi_awready,
s_axi_wdata=m_axi_wdata,
s_axi_wstrb=m_axi_wstrb,
s_axi_wlast=m_axi_wlast,
s_axi_wvalid=m_axi_wvalid,
s_axi_wready=m_axi_wready,
s_axi_bid=m_axi_bid,
s_axi_bresp=m_axi_bresp,
s_axi_bvalid=m_axi_bvalid,
s_axi_bready=m_axi_bready,
s_axi_arid=m_axi_arid,
s_axi_araddr=m_axi_araddr,
s_axi_arlen=m_axi_arlen,
s_axi_arsize=m_axi_arsize,
s_axi_arburst=m_axi_arburst,
s_axi_arlock=m_axi_arlock,
s_axi_arcache=m_axi_arcache,
s_axi_arprot=m_axi_arprot,
s_axi_arvalid=m_axi_arvalid,
s_axi_arready=m_axi_arready,
s_axi_rid=m_axi_rid,
s_axi_rdata=m_axi_rdata,
s_axi_rresp=m_axi_rresp,
s_axi_rlast=m_axi_rlast,
s_axi_rvalid=m_axi_rvalid,
s_axi_rready=m_axi_rready,
pause=axi_ram_pause,
name='port0'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
s_axi_awid=s_axi_awid,
s_axi_awaddr=s_axi_awaddr,
s_axi_awlen=s_axi_awlen,
s_axi_awsize=s_axi_awsize,
s_axi_awburst=s_axi_awburst,
s_axi_awlock=s_axi_awlock,
s_axi_awcache=s_axi_awcache,
s_axi_awprot=s_axi_awprot,
s_axi_awqos=s_axi_awqos,
s_axi_awregion=s_axi_awregion,
s_axi_awuser=s_axi_awuser,
s_axi_awvalid=s_axi_awvalid,
s_axi_awready=s_axi_awready,
s_axi_wdata=s_axi_wdata,
s_axi_wstrb=s_axi_wstrb,
s_axi_wlast=s_axi_wlast,
s_axi_wuser=s_axi_wuser,
s_axi_wvalid=s_axi_wvalid,
s_axi_wready=s_axi_wready,
s_axi_bid=s_axi_bid,
s_axi_bresp=s_axi_bresp,
s_axi_buser=s_axi_buser,
s_axi_bvalid=s_axi_bvalid,
s_axi_bready=s_axi_bready,
s_axi_arid=s_axi_arid,
s_axi_araddr=s_axi_araddr,
s_axi_arlen=s_axi_arlen,
s_axi_arsize=s_axi_arsize,
s_axi_arburst=s_axi_arburst,
s_axi_arlock=s_axi_arlock,
s_axi_arcache=s_axi_arcache,
s_axi_arprot=s_axi_arprot,
s_axi_arqos=s_axi_arqos,
s_axi_arregion=s_axi_arregion,
s_axi_aruser=s_axi_aruser,
s_axi_arvalid=s_axi_arvalid,
s_axi_arready=s_axi_arready,
s_axi_rid=s_axi_rid,
s_axi_rdata=s_axi_rdata,
s_axi_rresp=s_axi_rresp,
s_axi_rlast=s_axi_rlast,
s_axi_ruser=s_axi_ruser,
s_axi_rvalid=s_axi_rvalid,
s_axi_rready=s_axi_rready,
m_axi_awid=m_axi_awid,
m_axi_awaddr=m_axi_awaddr,
m_axi_awlen=m_axi_awlen,
m_axi_awsize=m_axi_awsize,
m_axi_awburst=m_axi_awburst,
m_axi_awlock=m_axi_awlock,
m_axi_awcache=m_axi_awcache,
m_axi_awprot=m_axi_awprot,
m_axi_awqos=m_axi_awqos,
m_axi_awregion=m_axi_awregion,
m_axi_awuser=m_axi_awuser,
m_axi_awvalid=m_axi_awvalid,
m_axi_awready=m_axi_awready,
m_axi_wdata=m_axi_wdata,
m_axi_wstrb=m_axi_wstrb,
m_axi_wlast=m_axi_wlast,
m_axi_wuser=m_axi_wuser,
m_axi_wvalid=m_axi_wvalid,
m_axi_wready=m_axi_wready,
m_axi_bid=m_axi_bid,
m_axi_bresp=m_axi_bresp,
m_axi_buser=m_axi_buser,
m_axi_bvalid=m_axi_bvalid,
m_axi_bready=m_axi_bready,
m_axi_arid=m_axi_arid,
m_axi_araddr=m_axi_araddr,
m_axi_arlen=m_axi_arlen,
m_axi_arsize=m_axi_arsize,
m_axi_arburst=m_axi_arburst,
m_axi_arlock=m_axi_arlock,
m_axi_arcache=m_axi_arcache,
m_axi_arprot=m_axi_arprot,
m_axi_arqos=m_axi_arqos,
m_axi_arregion=m_axi_arregion,
m_axi_aruser=m_axi_aruser,
m_axi_arvalid=m_axi_arvalid,
m_axi_arready=m_axi_arready,
m_axi_rid=m_axi_rid,
m_axi_rdata=m_axi_rdata,
m_axi_rresp=m_axi_rresp,
m_axi_rlast=m_axi_rlast,
m_axi_ruser=m_axi_ruser,
m_axi_rvalid=m_axi_rvalid,
m_axi_rready=m_axi_rready
)
@always(delay(4))
def clkgen():
clk.next = not clk
def wait_normal():
while not axi_master_inst.idle():
yield clk.posedge
def wait_pause_master():
while not axi_master_inst.idle():
axi_master_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
axi_master_pause.next = False
yield clk.posedge
def wait_pause_slave():
while not axi_master_inst.idle():
axi_ram_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
axi_ram_pause.next = False
yield clk.posedge
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
# testbench stimulus
yield clk.posedge
print("test 1: write")
current_test.next = 1
addr = 4
test_data = b'\x11\x22\x33\x44'
axi_master_inst.init_write(addr, test_data)
yield axi_master_inst.wait()
yield clk.posedge
data = axi_ram_inst.read_mem(addr&0xffffff80, 32)
for i in range(0, len(data), 16):
print(" ".join(("{:02x}".format(c) for c in bytearray(data[i:i+16]))))
assert axi_ram_inst.read_mem(addr, len(test_data)) == test_data
yield delay(100)
yield clk.posedge
print("test 2: read")
current_test.next = 2
addr = 4
test_data = b'\x11\x22\x33\x44'
axi_ram_inst.write_mem(addr, test_data)
axi_master_inst.init_read(addr, len(test_data))
yield axi_master_inst.wait()
yield clk.posedge
data = axi_master_inst.get_read_data()
assert data[0] == addr
assert data[1] == test_data
yield delay(100)
yield clk.posedge
print("test 3: various writes")
current_test.next = 3
for length in list(range(1,8))+[1024]:
for offset in list(range(4,8))+[4096-4]:
for size, cache in ((1, 0b0011), (1, 0b0000), (0, 0b0011)):
for wait in wait_normal, wait_pause_master, wait_pause_slave:
print("length %d, offset %d, size %d, cache %d"% (length, offset, size, cache))
#addr = 256*(16*offset+length)+offset
addr = offset
test_data = bytearray([x%256 for x in range(length)])
axi_ram_inst.write_mem(addr&0xffffff80, b'\xAA'*(length+256))
axi_master_inst.init_write(addr, test_data, size=size, cache=cache)
yield wait()
yield clk.posedge
data = axi_ram_inst.read_mem(addr&0xffffff80, 32)
for i in range(0, len(data), 16):
print(" ".join(("{:02x}".format(c) for c in bytearray(data[i:i+16]))))
assert axi_ram_inst.read_mem(addr, length) == test_data
assert axi_ram_inst.read_mem(addr-1, 1) == b'\xAA'
assert axi_ram_inst.read_mem(addr+length, 1) == b'\xAA'
yield delay(100)
yield clk.posedge
print("test 4: various reads")
current_test.next = 4
for length in list(range(1,8))+[1024]:
for offset in list(range(4,8))+[4096-4]:
for size, cache in ((1, 0b0011), (1, 0b0000), (0, 0b0011)):
for wait in wait_normal, wait_pause_master, wait_pause_slave:
print("length %d, offset %d, size %d, cache %d"% (length, offset, size, cache))
#addr = 256*(16*offset+length)+offset
addr = offset
test_data = bytearray([x%256 for x in range(length)])
axi_ram_inst.write_mem(addr, test_data)
axi_master_inst.init_read(addr, length, size=size, cache=cache)
yield wait()
yield clk.posedge
data = axi_master_inst.get_read_data()
assert data[0] == addr
assert data[1] == test_data
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| 32.846918
| 103
| 0.634124
|
a67fdda2542a1a0804b65f5d16d9200fcd967d04
| 957
|
py
|
Python
|
test/unit/mock.py
|
xtough/sapcli
|
bdcb54afe387829af251543ef8c17394b5595d4d
|
[
"Apache-2.0"
] | null | null | null |
test/unit/mock.py
|
xtough/sapcli
|
bdcb54afe387829af251543ef8c17394b5595d4d
|
[
"Apache-2.0"
] | null | null | null |
test/unit/mock.py
|
xtough/sapcli
|
bdcb54afe387829af251543ef8c17394b5595d4d
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, NamedTuple
import sap.adt
class Response(NamedTuple):
text: str
status_code: int
headers: Dict
class Request(NamedTuple):
method: str
adt_uri: str
headers: Dict
body: str
params: Dict
def ok_responses():
yield Response(text='', status_code=200, headers={})
class Connection(sap.adt.Connection):
def __init__(self, responses=None, user='ANZEIGER'):
super(Connection, self).__init__('mockhost', 'mockclient', user, 'mockpass')
self.execs = list()
self._resp_iter = ok_responses() if responses is None else iter(responses)
def execute(self, method, adt_uri, params=None, headers=None, body=None):
final_uri = '/' + self.uri + '/' + adt_uri
self.execs.append(Request(method, final_uri, headers, body, params))
return next(self._resp_iter)
def mock_methods(self):
return [(e.method, e.adt_uri) for e in self.execs]
| 22.255814
| 84
| 0.660397
|
d422aba8ebdc3f5c081d3073f7479e8ba0f5d229
| 217
|
py
|
Python
|
L1Trigger/L1TGEM/python/me0TriggerDigis_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
L1Trigger/L1TGEM/python/me0TriggerDigis_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
L1Trigger/L1TGEM/python/me0TriggerDigis_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
me0TriggerDigis = cms.EDProducer("ME0TriggerProducer",
ME0PadDigis = cms.InputTag("simMuonME0PadDigis"),
tmbParam = cms.PSet(
verbosity = cms.int32(0)
)
)
| 24.111111
| 54
| 0.709677
|
2ae1ed0d343e2a2c28e827b9cca77ef3199c150c
| 1,573
|
py
|
Python
|
distill/util.py
|
EthanZhangYC/invariance-equivariance
|
6e369fd6f43c6b217740f7acd9533c298c43d360
|
[
"MIT"
] | 64
|
2021-03-26T12:11:28.000Z
|
2022-03-22T02:19:48.000Z
|
distill/util.py
|
wct5217488/invariance-equivariance
|
6dfadb39a485d0e55c1cd0c8ce0e0f6dfc602dd3
|
[
"MIT"
] | 4
|
2021-03-27T16:14:04.000Z
|
2021-11-29T08:11:33.000Z
|
distill/util.py
|
wct5217488/invariance-equivariance
|
6dfadb39a485d0e55c1cd0c8ce0e0f6dfc602dd3
|
[
"MIT"
] | 8
|
2021-06-09T02:41:37.000Z
|
2022-02-27T02:14:17.000Z
|
from __future__ import print_function
import torch.nn as nn
class Embed(nn.Module):
"""Embedding module"""
def __init__(self, dim_in=1024, dim_out=128):
super(Embed, self).__init__()
self.linear = nn.Linear(dim_in, dim_out)
self.l2norm = Normalize(2)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.linear(x)
x = self.l2norm(x)
return x
class LinearEmbed(nn.Module):
"""Linear Embedding"""
def __init__(self, dim_in=1024, dim_out=128):
super(LinearEmbed, self).__init__()
self.linear = nn.Linear(dim_in, dim_out)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.linear(x)
return x
class MLPEmbed(nn.Module):
"""non-linear embed by MLP"""
def __init__(self, dim_in=1024, dim_out=128):
super(MLPEmbed, self).__init__()
self.linear1 = nn.Linear(dim_in, 2 * dim_out)
self.relu = nn.ReLU(inplace=True)
self.linear2 = nn.Linear(2 * dim_out, dim_out)
self.l2norm = Normalize(2)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.relu(self.linear1(x))
x = self.l2norm(self.linear2(x))
return x
class Normalize(nn.Module):
"""normalization layer"""
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm)
return out
if __name__ == '__main__':
pass
| 25.370968
| 74
| 0.589955
|
d4bf00f5a699ff23820f9d68a5b932b8702fa4a1
| 169
|
py
|
Python
|
django_underconstruction/apps.py
|
koenwoortman/django-underconstruction
|
0f00c9b7a8c3cf863c9ac04736fe65724898a847
|
[
"MIT"
] | 1
|
2021-08-10T18:01:38.000Z
|
2021-08-10T18:01:38.000Z
|
django_underconstruction/apps.py
|
koenwoortman/django-underconstruction
|
0f00c9b7a8c3cf863c9ac04736fe65724898a847
|
[
"MIT"
] | null | null | null |
django_underconstruction/apps.py
|
koenwoortman/django-underconstruction
|
0f00c9b7a8c3cf863c9ac04736fe65724898a847
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class DjangoUnderConstructionConfig(AppConfig):
name = 'django_underconstruction'
verbose_name = "Django Under Construction"
| 24.142857
| 47
| 0.798817
|
f7ee4df0fb2f49eed08fecc5043e0186a7446ca8
| 53,769
|
py
|
Python
|
spectramanipulator/user_namespace.py
|
dmadea/Spectra-Manipulator
|
ddc1b27cb4f4691096dfa7b2975df350d2eaf40e
|
[
"MIT"
] | 1
|
2020-07-18T17:46:01.000Z
|
2020-07-18T17:46:01.000Z
|
spectramanipulator/user_namespace.py
|
dmadea/Spectra-Manipulator
|
ddc1b27cb4f4691096dfa7b2975df350d2eaf40e
|
[
"MIT"
] | null | null | null |
spectramanipulator/user_namespace.py
|
dmadea/Spectra-Manipulator
|
ddc1b27cb4f4691096dfa7b2975df350d2eaf40e
|
[
"MIT"
] | null | null | null |
import numpy as np
from typing import Iterable
import os
import matplotlib as mpl
import matplotlib.pyplot as plt # we plot graphs with this library
from matplotlib import cm
from matplotlib.ticker import *
import matplotlib.gridspec as gridspec
from matplotlib import colors as c
# from copy import deepcopy
from PyQt5.QtWidgets import QApplication
from scipy.linalg import lstsq
from spectramanipulator.settings import Settings
from spectramanipulator.spectrum import fi, Spectrum, SpectrumList, group2mat
from scipy.integrate import simps, cumtrapz
from scipy.stats import linregress
from uncertainties import ufloat, unumpy
import functools
# for backward compatibility of smpj files
ItemList = list
WL_LABEL = 'Wavelength / nm'
WN_LABEL = "Wavenumber / $10^{4}$ cm$^{-1}$"
# needed for correctly display tics for symlog scale
class MinorSymLogLocator(Locator):
"""
Dynamically find minor tick positions based on the positions of
major ticks for a symlog scaling.
"""
def __init__(self, linthresh, nints=10):
"""
Ticks will be placed between the major ticks.
The placement is linear for x between -linthresh and linthresh,
otherwise its logarithmically. nints gives the number of
intervals that will be bounded by the minor ticks.
"""
self.linthresh = linthresh
self.nintervals = nints
def __call__(self):
# Return the locations of the ticks
majorlocs = self.axis.get_majorticklocs()
if len(majorlocs) == 1:
return self.raise_if_exceeds(np.array([]))
# add temporary major tick locs at either end of the current range
# to fill in minor tick gaps
dmlower = majorlocs[1] - majorlocs[0] # major tick difference at lower end
dmupper = majorlocs[-1] - majorlocs[-2] # major tick difference at upper e nd
# add temporary major tick location at the lower end
if majorlocs[0] != 0. and ((majorlocs[0] != self.linthresh and dmlower > self.linthresh) or (
dmlower == self.linthresh and majorlocs[0] < 0)):
majorlocs = np.insert(majorlocs, 0, majorlocs[0] * 10.)
else:
majorlocs = np.insert(majorlocs, 0, majorlocs[0] - self.linthresh)
# add temporary major tick location at the upper end
if majorlocs[-1] != 0. and ((np.abs(majorlocs[-1]) != self.linthresh and dmupper > self.linthresh) or (
dmupper == self.linthresh and majorlocs[-1] > 0)):
majorlocs = np.append(majorlocs, majorlocs[-1] * 10.)
else:
majorlocs = np.append(majorlocs, majorlocs[-1] + self.linthresh)
# iterate through minor locs
minorlocs = []
# handle the lowest part
for i in range(1, len(majorlocs)):
majorstep = majorlocs[i] - majorlocs[i - 1]
if abs(majorlocs[i - 1] + majorstep / 2) < self.linthresh:
ndivs = self.nintervals
else:
ndivs = self.nintervals - 1.
minorstep = majorstep / ndivs
locs = np.arange(majorlocs[i - 1], majorlocs[i], minorstep)[1:]
minorlocs.extend(locs)
return self.raise_if_exceeds(np.array(minorlocs))
def tick_values(self, vmin, vmax):
raise NotImplementedError('Cannot get tick locations for a '
'%s type.' % type(self))
def setup_wavenumber_axis(ax, x_label=WN_LABEL,
x_major_locator=None, x_minor_locator=AutoMinorLocator(5), factor=1e3):
secondary_ax = ax.secondary_xaxis('top', functions=(lambda x: factor / x, lambda x: 1 / (factor * x)))
secondary_ax.tick_params(which='major', direction='in')
secondary_ax.tick_params(which='minor', direction='in')
if x_major_locator:
secondary_ax.xaxis.set_major_locator(x_major_locator)
if x_minor_locator:
secondary_ax.xaxis.set_minor_locator(x_minor_locator)
secondary_ax.set_xlabel(x_label)
return secondary_ax
def set_main_axis(ax, x_label=WL_LABEL, y_label="Absorbance", xlim=(None, None), ylim=(None, None),
x_major_locator=None, x_minor_locator=None, y_major_locator=None, y_minor_locator=None):
ax.set_ylabel(y_label)
ax.set_xlabel(x_label)
if xlim[0] is not None:
ax.set_xlim(xlim)
if ylim[0] is not None:
ax.set_ylim(ylim)
if x_major_locator:
ax.xaxis.set_major_locator(x_major_locator)
if x_minor_locator:
ax.xaxis.set_minor_locator(x_minor_locator)
if y_major_locator:
ax.yaxis.set_major_locator(y_major_locator)
if y_minor_locator:
ax.yaxis.set_minor_locator(y_minor_locator)
ax.tick_params(axis='both', which='major', direction='in')
ax.tick_params(axis='both', which='minor', direction='in')
def _transform_func(transform=lambda y: y):
def decorator(fn):
@functools.wraps(fn)
def func(item):
fn_name = fn.__name__
if isinstance(item, Spectrum):
y_data = transform(item.data[:, 1])
return Spectrum.from_xy_values(item.data[:, 0], np.nan_to_num(y_data), name=f'{fn_name}({item.name})')
elif isinstance(item, SpectrumList):
sl = SpectrumList(name=f'{fn_name}({item.name})')
for sp in item:
y_data = transform(sp.data[:, 1])
new_sp = Spectrum.from_xy_values(sp.data[:, 0], np.nan_to_num(y_data), name=f'{fn_name}({sp.name})')
sl.children.append(new_sp)
return sl
else:
return transform(item) # this may not be implemented, but for numbers and ndarrays it will work
return func
return decorator
@_transform_func(lambda y: np.exp(y))
def exp(item):
"""
Calculates the exponential of y values and returns a new Spectrum/SpectrumList.
"""
pass
@_transform_func(lambda y: np.log10(y))
def log10(item):
"""
Calculates the decadic logarithm of y values and returns a new Spectrum/SpectrumList.
"""
pass
@_transform_func(lambda y: np.log(y))
def log(item):
"""
Calculates the natural logarithm of y values and returns a new Spectrum/SpectrumList
"""
pass
@_transform_func(lambda y: -np.log10(-y))
def T2A_LFP(item):
"""
Performs the transmittance to absorbance conversion for nano kinetics,
y_transformed = -log10(-y)."""
pass
#
# def add_to_list(spectra):
# """
# Copies all spectra and imports them to the Tree Widget.
#
# Parameters
# ----------
# spectra : {:class:`Spectrum`, :class:`SpectrumList`, list, list of lists, SpectrumItemGroup, SpectrumItem}
# The input spectra to be added into Tree Widget.
# """
#
# if UserNamespace.instance is not None:
# UserNamespace.instance.tw.add_to_list(spectra)
# def load_kinetics(spectra):
# """
# Copies all spectra and imports them to the Tree Widget.
#
# Parameters
# ----------
# spectra : {:class:`Spectrum`, :class:`SpectrumList`, list, list of lists, SpectrumItemGroup, SpectrumItem}
# The input spectra to be added into Tree Widget.
# """
#
# if UserNamespace.instance is not None:
# UserNamespace.instance.tw.add_to_list(spectra)
def import_files(filepaths):
"""
Imports the filepaths and add to Tree Widget
Parameters
----------
filepaths : list of strs or str
List of filepaths to import.
"""
if UserNamespace.instance is not None:
UserNamespace.instance.main.tree_widget.import_files(filepaths)
def set_xy_range(x0=None, x1=None, y0=None, y1=None, padding=0):
"""
Changes the x and y ranges of scene in Plot Widget.
Parameters
----------
x0 : {int, float, None}
New fist x value. If None, old value is kept.
x1 : {int, float, None}
New last x value. If None, old value is kept.
y0 : {int, float, None}
New fist y value. If None, old value is kept.
y1 : {int, float, None}
New last y value. If None, old value is kept.
padding : {int, float}
Sets the padding around the choosed rectangle. If 0, no padding will be used.
"""
plot_widget = UserNamespace.instance.main.grpView
x_range, y_range = plot_widget.plotItem.getViewBox().viewRange()
plot_widget.plotItem.getViewBox().setXRange(x_range[0] if x0 is None else x0,
x_range[1] if x1 is None else x1,
padding=padding)
plot_widget.plotItem.getViewBox().setYRange(y_range[0] if y0 is None else y0,
y_range[1] if y1 is None else y1,
padding=padding)
def set_default_HSV_color_scheme():
"""Sets the default values for HSV color scheme."""
Settings.hues = 9
Settings.values = 1
Settings.maxValue = 255
Settings.minValue = 150
Settings.maxHue = 360
Settings.minHue = 0
Settings.sat = 255
Settings.alpha = 255
if Settings.HSV_color_scheme:
redraw_all_spectra()
def set_HSV_color_scheme(active=True, **kwargs):
"""Set the options for HSV color scheme and whether the scheme is active.
Options
-------
================ =================================================================================
*active* (bool) True for setting the scheme active, False for not (default color scheme will be
used).
*hues* (int) The number of hues that will be repeating, default 9.
*values* (int) The number of values/brightnesses that will be repeating, default 1.
*minValue* (int) A minimum value/brightness, this can be <0, 255>, default 150.
*maxValue* (int) A maximum value/brightness, this can be <0, 255>, default 255.
*minHue* (int) A minimum hue, this can be <0, 360>, default 0
*maxHue* (int) A maximum hue, this can be <0, 360>, default 360
*sat* (int) The saturation value, this can be <0, 255>, default 255
*alpha* (int) The transparency value, this can be <0, 255>, default 255
================ =================================================================================
"""
hues = kwargs.get('hues', None)
values = kwargs.get('values', None)
maxValue = kwargs.get('maxValue', None)
minValue = kwargs.get('minValue', None)
maxHue = kwargs.get('maxHue', None)
minHue = kwargs.get('minHue', None)
sat = kwargs.get('sat', None)
alpha = kwargs.get('alpha', None)
Settings.HSV_color_scheme = active
Settings.hues = hues if hues is not None else Settings.hues
Settings.values = values if values is not None else Settings.values
Settings.maxValue = maxValue if maxValue is not None else Settings.maxValue
Settings.minValue = minValue if minValue is not None else Settings.minValue
Settings.maxHue = maxHue if maxHue is not None else Settings.maxHue
Settings.minHue = minHue if minHue is not None else Settings.minHue
Settings.sat = sat if sat is not None else Settings.sat
Settings.alpha = alpha if alpha is not None else Settings.alpha
redraw_all_spectra()
def copy_to_clipboard(array, delimiter='\t', decimal_sep='.', new_line='\n'):
"""Copies the *array* of numbers into clipboard. This can be then pasted to Excel for example.
Parameters
----------
array : {array_like, iterable}
Array of values. Can be 1D or 2D array
delimiter : str
Delimiter between numbers, default tabulator '\\\\t'
decimal_sep : str
Decimal separator, default '.'
new_line : str
New line character, default '\\\\n'
"""
if not isinstance(array, (np.ndarray, Iterable, list, tuple)):
raise ValueError(f"Cannot copy {type(array)} to clipboard.")
try:
text = new_line.join(delimiter.join(str(num).replace('.', decimal_sep) for num in row) for row in array)
except: # the second dimension is not iterable, we probably got only 1D array, so lets put into clipboard only this
text = delimiter.join(str(num).replace('.', decimal_sep) for num in array)
cb = QApplication.clipboard()
cb.clear(mode=cb.Clipboard)
cb.setText(text, mode=cb.Clipboard)
def update_view():
"""Updates the Tree Widget."""
if UserNamespace.instance is None:
return
mw = UserNamespace.instance.main
mw.tree_widget.update_view()
mw.tree_widget.setup_info()
def redraw_all_spectra():
"""Redraws all spectra."""
if UserNamespace.instance is None:
return
mw = UserNamespace.instance.main
mw.redraw_all_spectra()
### Calculation of epsilon from concentration-dependent absorption spectra, the name of the spectra must contain
### real concentration, the spectra must me ordered from lowest to highest concentration
def _get_C(group):
"""Returns parsed names to floats from a group"""
x_vals_temp = []
for sp in group:
try:
x_vals_temp.append(float(sp.name.replace(',', '.').strip()))
except ValueError:
raise ValueError("Names of spectra cannot be parsed to float.")
return np.asarray(x_vals_temp, dtype=np.float64)
# def _get_D(group):
# D = group[0].data[:, 1]
# for i in range(1, len(group)):
# D = np.vstack((D, group[i].data[:, 1]))
# return D
def calc_Eps(group):
wls, c, D = group2mat(group)
# C needs to be changed to column vector
ST = lstsq(c[:, None], D.T)[0]
# add a spectrum to list
return Spectrum.from_xy_values(wls, ST.flatten(), name=group.name + '-epsilon')
def rename_times(group, decimal_places=1):
"""Renames the group that has names in seconds. Changes for minutes for 60s <= time < 1 hour to minutes and
time >= 1 hour to hours."""
parsed_times = []
times = _get_C(group)
for time in times:
unit = ' s'
if time >= 3600:
time /= 3600
unit = ' h'
elif 60 <= time < 3600:
time /= 60
unit = ' min'
time = np.round(time, decimal_places)
parsed_times.append(f'{time}{unit}')
group.set_names(parsed_times)
# def load_kinetics(dir_name, spectra_dir_name='spectra', times_fname='times.txt', blank_spectrum='blank.dx', dt=None,
# b_corr=None, cut=None, corr_to_zero_time=True):
# """Given a directory name that contains folders of individual experiments, it loads all kinetics.
# each experiment folder must contain folder spectra (or defined in spectra_dir_name arg.)
# if blank is given, it will be subtracted from all spectra, times.txt will contain
# times for all spectra, optional baseline correction and cut can be done.
#
# Folder structure:
# [dir_name]
# [exp1_dir]
# [spectra]
# 01.dx (or .csv or .txt)
# 02.dx
# ...
# times.txt (optional)
# blank.dx (optional)
# [exp2_dir]
# ...
# ...
# """
#
# if UserNamespace.instance is None:
# return
#
# if not os.path.isdir(dir_name):
# raise ValueError(f'{dir_name} does not exist!')
#
# for item in os.listdir(dir_name):
# path = os.path.join(dir_name, item)
# if not os.path.isdir(path):
# continue
#
# load_kinetic(path, spectra_dir_name=spectra_dir_name, times_fname=times_fname, blank_spectrum=blank_spectrum,
# dt=dt, b_corr=b_corr, cut=cut, corr_to_zero_time=corr_to_zero_time)
#
#
# def load_kinetic(dir_name, spectra_dir_name='spectra', times_fname='times.txt', blank_spectrum='blank.dx', dt=None,
# b_corr=None, cut=None, corr_to_zero_time=True):
# """Given a directory name, it loads all spectra in dir named "spectra" - func. arg.,
# if blank is given, it will be subtracted from all spectra, times.txt will contain
# times for all spectra, optional baseline correction and cut can be done.
#
# Folder structure:
# [dir_name]
# [spectra]
# 01.dx
# 02.dx
# ...
# times.txt (optional)
# blank.dx (optional)
# """
#
# if UserNamespace.instance is None:
# return
#
# tw = UserNamespace.instance.main.tree_widget
# root = tw.myModel.root # item in IPython console
#
# if not os.path.isdir(dir_name):
# raise ValueError(f'{dir_name} does not exist!')
#
# spectra_path = os.path.join(dir_name, spectra_dir_name)
#
# if not os.path.isdir(spectra_path):
# raise ValueError(f'{spectra_dir_name} does not exist in {dir_name}!')
#
# spectras = [os.path.join(spectra_path, filename) for filename in os.listdir(spectra_path)]
#
# n_items_before = root.__len__()
# tw.import_files(spectras)
# n_spectra = root.__len__() - n_items_before
#
# tw.add_items_to_group(root[n_items_before:], edit=False) # add loaded spectra to group
# root[n_items_before].name = f'raw [{os.path.split(dir_name)[1]}]' # set name of a group
#
# times = np.asarray([dt * i for i in range(n_spectra)]) if dt is not None else None
# # idx_add = 0
# group_idx = n_items_before
# blank_used = False
#
# # load explicit times
# times_fpath = os.path.join(dir_name, times_fname)
# if os.path.isfile(times_fpath):
# tw.import_files(times_fpath)
# # idx_add += 1
# if times is None:
# times = root[-1].data[:, 0].copy()
# if corr_to_zero_time:
# times -= times[0]
#
# # push times variable to the console
# UserNamespace.instance.main.console.push_variables(
# {
# 'times': times
# }
# )
#
# if times is not None:
# root[group_idx].set_names(times)
#
# # load blank spectrum if available
# blank_fpath = os.path.join(dir_name, blank_spectrum)
# if os.path.isfile(blank_fpath):
# last_idx = root.__len__() - 1
# tw.import_files(blank_fpath)
# add_to_list(root[group_idx] - root[last_idx + 1])
# if times is not None:
# root[-1].set_names(times)
# blank_used = True
#
# corr_idx = -1 if blank_used else group_idx
#
# if b_corr is not None:
# root[corr_idx].baseline_correct(*b_corr)
# root[corr_idx].name += 'bcorr'
# if cut is not None:
# root[corr_idx].cut(*cut)
# root[corr_idx].name += 'cut'
#
# # return times
#
def _setup_wavenumber_axis(ax, x_label=WN_LABEL,
x_major_locator=None, x_minor_locator=AutoMinorLocator(5), factor=1e3):
secondary_ax = ax.secondary_xaxis('top', functions=(lambda x: factor / x, lambda x: 1 / (factor * x)))
secondary_ax.tick_params(which='major', direction='in')
secondary_ax.tick_params(which='minor', direction='in')
if x_major_locator:
secondary_ax.xaxis.set_major_locator(x_major_locator)
if x_minor_locator:
secondary_ax.xaxis.set_minor_locator(x_minor_locator)
secondary_ax.set_xlabel(x_label)
return secondary_ax
def _set_main_axis(ax, x_label=WL_LABEL, y_label="Absorbance", xlim=(None, None), ylim=(None, None),
x_major_locator=None, x_minor_locator=None, y_major_locator=None, y_minor_locator=None,
direction='in'):
ax.set_ylabel(y_label)
ax.set_xlabel(x_label)
if xlim[0] is not None:
ax.set_xlim(xlim)
if ylim[0] is not None:
ax.set_ylim(ylim)
if x_major_locator:
ax.xaxis.set_major_locator(x_major_locator)
if x_minor_locator:
ax.xaxis.set_minor_locator(x_minor_locator)
if y_major_locator:
ax.yaxis.set_major_locator(y_major_locator)
if y_minor_locator:
ax.yaxis.set_minor_locator(y_minor_locator)
ax.tick_params(axis='both', which='major', direction=direction)
ax.tick_params(axis='both', which='minor', direction=direction)
def setup_twin_x_axis(ax, y_label="$I_{0,\\mathrm{m}}$ / $10^{-10}$ einstein s$^{-1}$ nm$^{-1}$",
x_label=None, ylim=(None, None), y_major_locator=None, y_minor_locator=None,
keep_zero_aligned=True):
ax2 = ax.twinx()
ax2.tick_params(which='major', direction='in')
ax2.tick_params(which='minor', direction='in')
if y_major_locator:
ax2.yaxis.set_major_locator(y_major_locator)
if y_minor_locator:
ax2.yaxis.set_minor_locator(y_minor_locator)
ax2.set_ylabel(y_label)
if keep_zero_aligned and ylim[0] is None and ylim[1] is not None:
# a = bx/(x-1)
ax1_ylim = ax.get_ylim()
x = -ax1_ylim[0] / (ax1_ylim[1] - ax1_ylim[0]) # position of zero in ax1, from 0, to 1
a = ylim[1] * x / (x - 1) # calculates the ylim[0] so that zero position is the same for both axes
ax2.set_ylim(a, ylim[1])
elif ylim[0] is not None:
ax2.set_ylim(ylim)
return ax2
def plot_kinetics(kin_group_items: list, n_rows: int = None, n_cols: int = None, n_spectra=50, linscale=1,
linthresh=100, cmap='jet_r', major_ticks_labels=(100, 1000), emph_t=(0, 200, 1000),
inset_loc=(0.75, 0.1, 0.03, 0.8), colorbar_label='Time / s', lw=0.5, alpha=0.5,
fig_size_one_graph=(5, 4), y_label='Absorbance', x_label=WL_LABEL, x_lim=(230, 600), filepath=None,
dpi=500, transparent=True, LED_sources: list = None):
kin_group_items = kin_group_items if isinstance(kin_group_items, list) else [kin_group_items]
n = len(kin_group_items) # number of EEMs to plot
if LED_sources is not None:
LED_sources = LED_sources if isinstance(LED_sources, list) else [LED_sources]
if len(LED_sources) == 1 and n > 1:
LED_sources = LED_sources * n
assert len(LED_sources) == n, "Number of provided LEDs must be the same as spectra"
else:
LED_sources = [None] * n
if n_rows is None and n_cols is None: # estimate the n_rows and n_cols from the sqrt of number of graphs
sqrt = n ** 0.5
n_rows = int(np.ceil(sqrt))
n_cols = int(sqrt)
elif n_rows is None and n_cols is not None:
n_rows = int(np.ceil(n / n_cols))
elif n_rows is not None and n_cols is None:
n_cols = int(np.ceil(n / n_rows))
# assert n_rows * n_cols >= n # not necessary, if the condition is not valid, fewer plots will be plotted
fig, axes = plt.subplots(n_rows, n_cols, figsize=(fig_size_one_graph[0] * n_cols, fig_size_one_graph[1] * n_rows))
axes = axes.flatten() if np.iterable(axes) else [axes]
for ax, group, LED_source in zip(axes, kin_group_items, LED_sources):
t = np.asarray(group.get_names(), dtype=np.float64)
w = group[0].data[:, 0]
_set_main_axis(ax, x_label=x_label, y_label=y_label, xlim=x_lim, x_minor_locator=None, y_minor_locator=None)
_ = _setup_wavenumber_axis(ax)
cmap = cm.get_cmap(cmap)
norm = mpl.colors.SymLogNorm(vmin=t[0], vmax=t[-1], linscale=linscale, linthresh=linthresh, base=10, clip=True)
tsb_idxs = fi(t, emph_t)
ts_real = np.round(t[tsb_idxs])
x_space = np.linspace(0, 1, n_spectra, endpoint=True, dtype=np.float64)
t_idx_space = fi(t, norm.inverse(x_space))
t_idx_space = np.sort(np.asarray(list(set(t_idx_space).union(set(tsb_idxs)))))
for i in t_idx_space:
x_real = norm(t[i])
x_real = 0 if np.ma.is_masked(x_real) else x_real
ax.plot(w, group[i].data[:, 1], color=cmap(x_real),
lw=1.5 if i in tsb_idxs else lw,
alpha=1 if i in tsb_idxs else alpha,
zorder=1 if i in tsb_idxs else 0)
cbaxes = ax.inset_axes(inset_loc)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
cbar = plt.colorbar(sm, cax=cbaxes, orientation='vertical',
format=mpl.ticker.ScalarFormatter(),
label=colorbar_label)
cbaxes.invert_yaxis()
minor_ticks = [10, 20, 30, 40, 50, 60, 70, 80, 90, 200, 300, 400, 500, 600, 700, 800, 900] + list(
np.arange(2e3, t[-1], 1e3))
cbaxes.yaxis.set_ticks(cbar._locate(minor_ticks), minor=True)
major_ticks = np.sort(np.hstack((np.asarray([100, 1000]), ts_real)))
major_ticks_labels = np.sort(np.hstack((np.asarray(major_ticks_labels), ts_real)))
cbaxes.yaxis.set_ticks(cbar._locate(major_ticks), minor=False)
cbaxes.set_yticklabels([(f'{num:0.0f}' if num in major_ticks_labels else "") for num in major_ticks])
for ytick, ytick_label, _t in zip(cbaxes.yaxis.get_major_ticks(), cbaxes.get_yticklabels(), major_ticks):
if _t in ts_real:
color = cmap(norm(_t))
ytick_label.set_color(color)
ytick_label.set_fontweight('bold')
ytick.tick2line.set_color(color)
ytick.tick2line.set_markersize(5)
# ytick.tick2line.set_markeredgewidth(2)
if LED_source is not None:
ax_sec = setup_twin_x_axis(ax, ylim=(None, LED_source.y.max() * 3), y_label="", y_major_locator=FixedLocator([]))
ax_sec.fill(LED_source.x, LED_source.y, facecolor='gray', alpha=0.5)
ax_sec.plot(LED_source.x, LED_source.y, color='black', ls='dotted', lw=1)
plt.tight_layout()
if filepath:
plt.savefig(fname=filepath, transparent=transparent, dpi=dpi)
plt.show()
#
# def plot_kinetic_ax(group_item, n_spectra=50, linscale=1, linthresh=100, cmap='jet_r',
# major_ticks_labels=(100, 1000), emph_t=(0, 200, 1000), inset_loc=(0.75, 0.1, 0.03, 0.8),
# colorbar_label='Time / s', lw=0.5, alpha=0.5, fig_size=(5, 4), y_label='Absorbance', x_label=WL_LABEL,
# x_lim=(230, 600), filepath=None, dpi=500, transparent=True, LED_source_xy=(None, None)):
#
# t = np.asarray(group_item.get_names(), dtype=np.float64)
# w = group_item[0].data[:, 0]
#
# fig, ax1 = plt.subplots(1, 1, figsize=fig_size)
#
# _set_main_axis(ax1, x_label=x_label, y_label=y_label, xlim=x_lim, x_minor_locator=None, y_minor_locator=None)
# _ = _setup_wavenumber_axis(ax1)
#
# cmap = cm.get_cmap(cmap)
# norm = mpl.colors.SymLogNorm(vmin=t[0], vmax=t[-1], linscale=linscale, linthresh=linthresh, base=10, clip=True)
#
# tsb_idxs = fi(t, emph_t)
# ts_real = np.round(t[tsb_idxs])
#
# x_space = np.linspace(0, 1, n_spectra, endpoint=True, dtype=np.float64)
#
# t_idx_space = fi(t, norm.inverse(x_space))
# t_idx_space = np.sort(np.asarray(list(set(t_idx_space).union(set(tsb_idxs)))))
#
# for i in t_idx_space:
# x_real = norm(t[i])
# x_real = 0 if np.ma.is_masked(x_real) else x_real
# ax1.plot(w, group_item[i].data[:, 1], color=cmap(x_real),
# lw=1.5 if i in tsb_idxs else lw,
# alpha=1 if i in tsb_idxs else alpha,
# zorder=1 if i in tsb_idxs else 0)
#
# cbaxes = ax1.inset_axes(inset_loc)
#
# sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
# sm.set_array([])
# cbar = plt.colorbar(sm, cax=cbaxes, orientation='vertical',
# format=mpl.ticker.ScalarFormatter(),
# label=colorbar_label)
#
# cbaxes.invert_yaxis()
#
# minor_ticks = [10, 20, 30, 40, 50, 60, 70, 80, 90, 200, 300, 400, 500, 600, 700, 800, 900] + list(
# np.arange(2e3, t[-1], 1e3))
# cbaxes.yaxis.set_ticks(cbar._locate(minor_ticks), minor=True)
#
# major_ticks = np.sort(np.hstack((np.asarray([100, 1000]), ts_real)))
# major_ticks_labels = np.sort(np.hstack((np.asarray(major_ticks_labels), ts_real)))
#
# cbaxes.yaxis.set_ticks(cbar._locate(major_ticks), minor=False)
# cbaxes.set_yticklabels([(f'{num:0.0f}' if num in major_ticks_labels else "") for num in major_ticks])
#
# for ytick, ytick_label, _t in zip(cbaxes.yaxis.get_major_ticks(), cbaxes.get_yticklabels(), major_ticks):
# if _t in ts_real:
# color = cmap(norm(_t))
# ytick_label.set_color(color)
# ytick_label.set_fontweight('bold')
# ytick.tick2line.set_color(color)
# ytick.tick2line.set_markersize(5)
# # ytick.tick2line.set_markeredgewidth(2)
#
# if LED_source_xy[0] is not None and LED_source_xy[1] is not None:
# x_LED, y_LED = LED_source_xy
# ax_sec = setup_twin_x_axis(ax, ylim=(None, y_LED.max() * 3), y_label="", y_major_locator=FixedLocator([]))
# ax_sec.fill(x_LED, y_LED, facecolor='gray', alpha=0.5)
# ax_sec.plot(x_LED, y_LED, color='black', ls='dotted', lw=1)
#
# if filepath:
# ext = os.path.splitext(filepath)[1].lower()[1:]
# plt.savefig(fname=filepath, format=ext, transparent=transparent, dpi=dpi)
#
# plt.show()
def plot_kinetics_no_colorbar(group_item, x_lim=(None, None), y_lim=(None, None), slice_to_plot=slice(0, -1, 5),
x_label='Time / s', y_label='$A$', cmap='jet', darkens_factor_cmap=1, colors=None,
x_major_locator=None, x_minor_locator=None,
y_major_locator=None, y_minor_locator=None,
add_wn_axis=True, lw=1.5, ls='-', plot_zero_line=True,
label_format_fcn=lambda name: name,
legend_loc='best', legend_spacing=0.2, legend_columns=1, legend_column_spacing=2,
legend_entry_prefix='pH = ', legend_entry_postfix='', plot_legend_line=True,
fig_size=(5.5, 4.5),
dpi=500, filepath=None, transparent=True):
fig, ax = plt.subplots(1, 1, figsize=fig_size)
x = group_item[0].data[:, 0]
sel_items = group_item[slice_to_plot]
x_range = (x_lim[0] if x_lim[0] is not None else x[0], x_lim[1] if x_lim[1] is not None else x[-1])
set_main_axis(ax, x_label=x_label, y_label=y_label, xlim=x_range, ylim=y_lim,
x_major_locator=x_major_locator, x_minor_locator=x_minor_locator,
y_major_locator=y_major_locator, y_minor_locator=y_minor_locator)
if add_wn_axis:
_ = setup_wavenumber_axis(ax, x_major_locator=MultipleLocator(0.5))
_cmap = cm.get_cmap(cmap, len(sel_items))
if plot_zero_line:
ax.axhline(0, x_range[0], x_range[1], ls='--', color='black', lw=1)
for i, item in enumerate(sel_items):
if colors is None:
color = np.asarray(c.to_rgb(_cmap(i))) * darkens_factor_cmap
color[color > 1] = 1
else:
color = colors[i % len(colors)]
ax.plot(item.x, item.y, color=color, lw=lw, ls=ls,
label=f'{legend_entry_prefix}{label_format_fcn(item.name)}{legend_entry_postfix}')
l = ax.legend(loc=legend_loc, frameon=False, labelspacing=legend_spacing, ncol=legend_columns,
handlelength=None if plot_legend_line else 0, handletextpad=None if plot_legend_line else 0,
columnspacing=legend_column_spacing)
for i, text in enumerate(l.get_texts()):
# text.set_ha('right')
text.set_color(_cmap(i))
ax.set_axisbelow(False)
ax.yaxis.set_ticks_position('both')
plt.tight_layout()
if filepath:
ext = os.path.splitext(filepath)[1].lower()[1:]
plt.savefig(fname=filepath, format=ext, transparent=transparent, dpi=dpi)
else:
plt.show()
def plot_EEMs(EEM_group_items: list, n_rows: int = None, n_cols: int = None, log_z: bool = False, transform2wavenumber=True,
fig_size_one_graph=(5.5, 4), x_lim=(None, None), y_lim=(None, None), z_lim=(1, None), filepath=None, dpi=500,
transparent=False, show_title=True, cmap='hot_r', z_label='Counts', x_major_locators=(None, None), x_minor_locators=(None, None),
y_major_locators=(None, None), y_minor_locators=(None, None)):
"""This will assume that excitation wavelengths are used as names for individual spectra
x a y lims are in given wavelengths, despite the possible recalculation to wavenumber."""
n = len(EEM_group_items) # number of EEMs to plot
if n_rows is None and n_cols is None: # estimate the n_rows and n_cols from the sqrt of number of graphs
sqrt = n ** 0.5
n_rows = int(np.ceil(sqrt))
n_cols = int(sqrt)
elif n_rows is None and n_cols is not None:
n_rows = int(np.ceil(n / n_cols))
elif n_rows is not None and n_cols is None:
n_cols = int(np.ceil(n / n_rows))
# assert n_rows * n_cols >= n # not necessary, if the condition is not valid, fewer plots will be plotted
fig, axes = plt.subplots(n_rows, n_cols, figsize=(fig_size_one_graph[0] * n_cols, fig_size_one_graph[1] * n_rows))
axes = axes.flatten() if np.iterable(axes) else [axes]
t2w = lambda x: 1e3 / x # function that transforms wavelength into 10^4 cm-1
for ax, item in zip(axes, EEM_group_items):
em_wls, ex_wls, mat = group2mat(item) # convert group to matrix and extracts ex. wavelengths
if ex_wls is None:
raise ValueError(f'Excitation wavelengths of {item.name} could not be extracted from spectra names.')
x, y = em_wls, ex_wls
# emission wavelengths limits
xlim0 = x_lim[0] if x_lim[0] is not None else x[0]
xlim1 = x_lim[1] if x_lim[1] is not None else x[-1]
# excitation wavelengths limits
ylim0 = y_lim[0] if y_lim[0] is not None else y[0]
ylim1 = y_lim[1] if y_lim[1] is not None else y[-1]
x_label_down, x_label_top = 'Em. wavelength / nm', 'Em. wavenumber / $10^4$ cm$^{-1}$'
y_label_left, y_label_right = 'Ex. wavelength / nm', 'Ex. wavenumber / $10^4$ cm$^{-1}$'
if transform2wavenumber:
x, y = t2w(x), t2w(y)
xlim0, xlim1 = t2w(xlim0), t2w(xlim1)
ylim0, ylim1 = t2w(ylim0), t2w(ylim1)
# switch the labels
x_label_down, x_label_top = x_label_top, x_label_down
y_label_left, y_label_right = y_label_right, y_label_left
_set_main_axis(ax, xlim=(xlim0, xlim1), ylim=(ylim0, ylim1),
y_label=y_label_left,
x_label=x_label_down, direction='out',
x_major_locator=x_major_locators[0],
y_major_locator=y_major_locators[0],
x_minor_locator=x_minor_locators[0],
y_minor_locator=y_minor_locators[0])
if log_z: # use log of z axis
# mat[mat < 0] = 0
zmin = mat.max() * 1e-3 if z_lim[0] is None else z_lim[0] # 3 orders lower than max as default value
else:
zmin = mat.min() if z_lim[0] is None else z_lim[0] # for linear plot, min as default value
zmax = mat.max() if z_lim[1] is None else z_lim[1]
# add left axis
lambda_ax = ax.secondary_xaxis('top', functions=(t2w, t2w))
lambda_ax.tick_params(which='both', direction='out', zorder=1000)
if x_major_locators[1] is not None:
lambda_ax.xaxis.set_major_locator(x_major_locators[1]) # FixedLocator([500, 600, ...])
if x_minor_locators[1] is not None:
lambda_ax.xaxis.set_minor_locator(x_minor_locators[1])
lambda_ax.set_xlabel(x_label_top)
# add right axis
lambda_ax2 = ax.secondary_yaxis('right', functions=(t2w, t2w))
lambda_ax2.tick_params(which='both', direction='out', zorder=1000)
if y_major_locators[1] is not None:
lambda_ax2.yaxis.set_major_locator(y_major_locators[1]) # MultipleLocator(20)
if y_minor_locators[1] is not None:
lambda_ax2.yaxis.set_minor_locator(y_minor_locators[1]) # AutoMinorLocator(2)
lambda_ax2.set_ylabel(y_label_right)
# norm for z values
norm = mpl.colors.LogNorm(vmin=zmin, vmax=zmax, clip=True) if log_z else mpl.colors.Normalize(vmin=zmin,
vmax=zmax,
clip=True)
_x, _y = np.meshgrid(x, y)
mappable = ax.pcolormesh(_x, _y, mat.T, norm=norm, cmap=cmap, shading='auto')
fig.colorbar(mappable, ax=ax, label=z_label, pad=0.17, format=None if log_z else '%.0e')
if show_title:
ax.set_title(item.name)
# if x_major_formatter:
# ax_data.xaxis.set_major_formatter(x_major_formatter)
# ax_res.xaxis.set_major_formatter(x_major_formatter)
plt.tight_layout()
if filepath:
ext = os.path.splitext(filepath)[1].lower()[1:]
plt.savefig(fname=filepath, format=ext, transparent=transparent, dpi=dpi)
plt.show()
def plot_fit(data_item, fit_item, residuals_item, symlog=False, linscale=1, linthresh=100,
lw_data=0.5, lw_fit=1.5, fig_size_one_graph=(5, 4), y_label='$\\Delta$A', x_label='Time / $\\mu$s',
x_lim=(None, None), t_mul_factor=1, y_lim=(None, None), x_margin=1, y_margin=1.05, filepath=None, dpi=500,
transparent=False, x_major_formatter=ScalarFormatter(), x_major_locator=None, y_major_locator=None,
data_color='red', show_title=True):
plot_fits([data_item], [fit_item], [residuals_item], n_rows=1, n_cols=1, symlog=symlog, linscale=linscale,
linthresh=linthresh, lw_data=lw_data, lw_fit=lw_fit, fig_size_one_graph=fig_size_one_graph,
y_label=y_label, x_label=x_label, x_lim=x_lim, t_mul_factor=t_mul_factor, y_lim=y_lim, x_margin=x_margin,
y_margin=y_margin, filepath=filepath, dpi=dpi, transparent=transparent,
x_major_formatter=x_major_formatter, x_major_locator=x_major_locator, y_major_locator=y_major_locator,
data_color=data_color, show_title=show_title)
def plot_fits(data_group, fit_group, residuals_group, n_rows=None, n_cols=None, symlog=False, linscale=1, linthresh=100,
lw_data=0.5, lw_fit=1.5, fig_size_one_graph=(5, 4), y_label='$\\Delta$A', x_label='Time / $\\mu$s',
x_lim=(None, None), t_mul_factor=1, y_lim=(None, None), x_margin=1, y_margin=1.05, filepath=None, dpi=500,
transparent=False, x_major_formatter=ScalarFormatter(), x_major_locator=None, y_major_locator=None,
data_color='red', show_title=True):
n = len(data_group)
assert n == len(fit_group) == len(residuals_group)
if n_rows is None and n_cols is None: # estimate the n_rows and n_cols from the sqrt of number of graphs
sqrt = n ** 0.5
n_rows = int(np.ceil(sqrt))
n_cols = int(sqrt)
elif n_rows is None and n_cols is not None:
n_rows = int(np.ceil(n / n_cols))
elif n_rows is not None and n_cols is None:
n_cols = int(np.ceil(n / n_rows))
# assert n_rows * n_cols >= n # not necessary, if the condition is not valid, fewer plots will be plotted
fig = plt.figure(figsize=(fig_size_one_graph[0] * n_cols, fig_size_one_graph[1] * n_rows))
outer_grid = gridspec.GridSpec(n_rows, n_cols, wspace=0.25, hspace=0.3)
for og, data, fit, res in zip(outer_grid, data_group, fit_group, residuals_group):
# nice tutorial about gridspec here https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.gridspec.GridSpecFromSubplotSpec.html
# each unit consist of two graphs - data and residuals
inner_grid = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=og, wspace=0.1, hspace=0.1,
height_ratios=(4, 1))
ax_data = fig.add_subplot(inner_grid[0])
ax_res = fig.add_subplot(inner_grid[1])
t_data = data.data[:, 0] * t_mul_factor
_x_lim = list(x_lim)
_y_lim = list(y_lim)
_x_lim[0] = data.data[0, 0] * x_margin * t_mul_factor if _x_lim[0] is None else _x_lim[0]
_x_lim[1] = data.data[-1, 0] * x_margin * t_mul_factor if _x_lim[1] is None else _x_lim[1]
_y_lim[0] = data.data[:, 1].min() * y_margin if _y_lim[0] is None else _y_lim[0]
_y_lim[1] = data.data[:, 1].max() * y_margin if _y_lim[1] is None else _y_lim[1]
_set_main_axis(ax_data, x_label="", y_label=y_label, xlim=_x_lim, ylim=_y_lim, x_major_locator=x_major_locator,
y_major_locator=y_major_locator)
_set_main_axis(ax_res, x_label=x_label, y_label='res.', xlim=_x_lim, x_minor_locator=None, y_minor_locator=None)
# plot zero lines
ax_data.axline((0, 0), slope=0, ls='--', color='black', lw=0.5)
ax_res.axline((0, 0), slope=0, ls='--', color='black', lw=0.5)
ax_data.tick_params(labelbottom=False)
if show_title:
ax_data.set_title(data.name)
ax_data.plot(t_data, data.data[:, 1], lw=lw_data, color=data_color)
ax_data.plot(fit.data[:, 0] * t_mul_factor, fit.data[:, 1], lw=lw_fit, color='black')
ax_res.plot(res.data[:, 0] * t_mul_factor, res.data[:, 1], lw=lw_data, color=data_color)
ax_data.set_axisbelow(False)
ax_res.set_axisbelow(False)
ax_data.yaxis.set_ticks_position('both')
ax_data.xaxis.set_ticks_position('both')
ax_res.yaxis.set_ticks_position('both')
ax_res.xaxis.set_ticks_position('both')
if symlog:
ax_data.set_xscale('symlog', subs=[2, 3, 4, 5, 6, 7, 8, 9], linscale=linscale, linthresh=linthresh)
ax_res.set_xscale('symlog', subs=[2, 3, 4, 5, 6, 7, 8, 9], linscale=linscale, linthresh=linthresh)
ax_data.xaxis.set_minor_locator(MinorSymLogLocator(linthresh))
ax_res.xaxis.set_minor_locator(MinorSymLogLocator(linthresh))
if x_major_formatter:
ax_data.xaxis.set_major_formatter(x_major_formatter)
ax_res.xaxis.set_major_formatter(x_major_formatter)
if filepath:
ext = os.path.splitext(filepath)[1].lower()[1:]
plt.savefig(fname=filepath, format=ext, transparent=transparent, dpi=dpi)
plt.show()
def save_group(group_item, fname='', delimiter='\t', encoding='utf8'):
"""Data will be saved x-axis explicit"""
x, y, mat = group2mat(group_item)
mat = np.vstack((x, mat.T))
buffer = delimiter + delimiter.join(f"{num}" for num in y) + '\n'
buffer += '\n'.join(delimiter.join(f"{num}" for num in row) for row in mat.T)
with open(fname, 'w', encoding=encoding) as f:
f.write(buffer)
def reaction_QY_relative(sample_items, actinometer_items, QY_act=1, irradiation_spectrum=None,
irradiation_wavelength=None, integration_range=(None, None), V_solution=1,
conc_calc_range=(None, None), samples_times=None, actinometers_times=None,
c_0_samples=1, c_0_acts=1, integration_method='trapz'):
"""
TODO.....
Initial spectrua of both sample and actinometers must be at time = 0 (before irradiaiton).
if conc_calc_range is (None, None), all spectrum will be taken for calclation of conccentration
V solution
integration method works for spectra integration as well as for cumulative time integration
:param sample_items:
:param actinometer_items:
:param irradiation_spectrum:
:param irradiation_wavelength:
:param integration_range:
:param sample_times:
:param actinometer_times:
:param integration_method: trapz or simps - simpsons rule
:return:
"""
# type checking
if sample_items is None or actinometer_items is None:
raise ValueError("Arguments sample_items and actinometer_items must not be None.")
if not isinstance(sample_items, (list, tuple, SpectrumList)) and \
not isinstance(actinometer_items, (list, tuple, SpectrumList)):
raise ValueError("Arguments sample_items and actinometer_items must be type list, tuple or SpectrumList.")
if isinstance(sample_items, (list, tuple)) and not isinstance(sample_items[0], SpectrumList):
raise ValueError("Entries of sample_items must be type of SpectrumList.")
if isinstance(actinometer_items, (list, tuple)) and not isinstance(actinometer_items[0], SpectrumList):
raise ValueError("Entries of actinometer_items must be type of SpectrumList.")
if irradiation_wavelength is None and irradiation_spectrum is None:
raise ValueError("Argument irradiation_spectrum or irradiation_wavelength must be provided.")
if irradiation_spectrum is not None and irradiation_wavelength is not None:
raise ValueError("Only one argument of irradiation_spectrum or irradiation_wavelength must be provided.")
if irradiation_spectrum is None and not isinstance(irradiation_wavelength, (int, float)):
raise ValueError("Argument irradiation_wavelength must be type of int or float.")
if irradiation_wavelength is None and not isinstance(irradiation_spectrum, (Spectrum, np.ndarray, list)):
raise ValueError("Argument irradiation_spectrum must be type of Spectrum, ndarray or list.")
if not isinstance(integration_range, tuple) or len(integration_range) != 2:
raise ValueError("Argument integration_range must be type of tuple and have length of 2.")
samples = [sample_items] if isinstance(sample_items, SpectrumList) else sample_items
acts = [actinometer_items] if isinstance(actinometer_items, SpectrumList) else actinometer_items
if irradiation_spectrum:
irr_sp_x = None
if isinstance(irradiation_spectrum, Spectrum):
irr_sp_x = irradiation_spectrum.data[:, 0]
irr_sp_y = irradiation_spectrum.data[:, 1]
else:
irr_sp_y = np.asarray(irradiation_spectrum)
x0, x1 = integration_range
if x0 is not None and x1 is not None and x0 > x1:
x0, x1 = x1, x0
def abs_photons(data):
start = 0
end = data.shape[0]
start_sp = 0
end_sp = irr_sp_y.shape[0]
if x0 is not None:
start = fi(data[:, 0], x0)
start_sp = fi(irr_sp_y, x0)
if x1 is not None:
end = fi(data[:, 0], x1) + 1
end_sp = fi(irr_sp_y, x1) + 1
if start - end != start_sp - end_sp:
if irr_sp_x is None:
raise ValueError("Irradiation spectrum and data does not have equal dimension.")
irr = np.interp(data[:, 0], irr_sp_x, irr_sp_y) # interpolate to match data if x vals are provided
else:
irr = irr_sp_y[start_sp:end_sp] # slice the irradiation spectrum to match the data
x = data[start:end, 0]
y = data[start:end, 1]
abs_light = (1 - 10 ** -y) * irr # (1 - 10 ** -A) * I_irr
if integration_method == 'trapz':
return np.trapz(abs_light, x) # integrate using trapezoidal rule
else:
return simps(abs_light, x) # integrate using simpsons rule
else: # only irradiation wavelength
def abs_photons(data):
idx = fi(data[:, 0], irradiation_wavelength)
return 1 - 10 ** -data[idx, 1] # 1 - 10 ** -A
_sample_times = [] if samples_times is None else samples_times
_acs_times = [] if actinometers_times is None else actinometers_times
if samples_times is None:
for sample in samples:
_sample_times = np.asarray([float(sp.name) for sp in sample])
if actinometers_times is None:
for act in acts:
_acs_times = np.asarray([float(sp.name) for sp in act])
_c_0_samples = [c_0_samples] * len(samples) if isinstance(c_0_samples, (int, float)) else c_0_samples
_c_0_acts = [c_0_acts] * len(acts) if isinstance(c_0_acts, (int, float)) else c_0_acts
def cumintegrate(y, x, initial=0):
if integration_method == 'trapz':
return cumtrapz(y, x, initial=initial)
else:
# simpsons rule
raise NotImplementedError() # TODO----->
def calc_c(unknown_sp_data, sp_data_c0, c0=1):
"""
Calculation of concentariton by least squares.
:param unknown_sp_data:
:param sp_data_c0:
:param c0:
:return:
"""
assert unknown_sp_data.shape == sp_data_c0.shape
x0, x1 = conc_calc_range
start = 0
end = sp_data_c0.shape[0]
if x0 is not None:
start = fi(sp_data_c0[:, 0], x0)
if x1 is not None:
end = fi(sp_data_c0[:, 0], x1) + 1
# for min || xA - B ||_2^2 for scalar x, x = sum(A*B) / sum(A*A)
a = sp_data_c0[start:end, 1]
b = unknown_sp_data[start:end, 1]
return c0 * (a * b).sum() / (a * a).sum()
results_sample = SpectrumList(name='Results of samples')
results_act = SpectrumList(name='Results of actinometers')
def calculate_line(sl, times, c0):
# calculate the amount of absorbed photons
qs = np.asarray([abs_photons(sp.data) for sp in sl])
# calculate the time-dependent concentration
c0s = np.asarray([calc_c(sp.data, sl[0].data, c0) for sp in sl])
# Delta n = (c(t=0) - c(t)) * V
d_n_dec = (c0s[0] - c0s) * V_solution
# cumulative integration of light absorbed: int_0^t q(t') dt'
np_abs = cumintegrate(qs, times, initial=0)
return Spectrum.from_xy_values(np_abs, d_n_dec, name=f'Result for {sl.name}')
for sample, s_times, c0_sample in zip(samples, _sample_times, _c_0_samples):
results_sample.children.append(calculate_line(sample, s_times, c0_sample))
for act, act_times, c0_act in zip(acts, _acs_times, _c_0_acts):
results_act.children.append(calculate_line(act, act_times, c0_act))
QYs = []
# calculate for each combination of sample and actinometer kinetics
for res_sample in results_sample:
slope_sam, intercept_sam, r_sam, _, err_sam = linregress(res_sample.x, res_sample.y)
for res_act in results_act:
slope_act, intercept_act, r_act, _, err_act = linregress(res_act.x, res_act.y)
# use uncertainty package to automatically propagate errors
# QY is type of ufloat - uncertainties.core.Variable
QY = QY_act * ufloat(slope_sam, err_sam) / ufloat(slope_act, err_act)
QYs.append(QY)
average_QY = sum(QYs) / len(QYs)
add_to_list(results_sample)
add_to_list(results_act)
def bcorr_1D(item, first_der_tresh=1e-4, second_der_tresh=0.1):
"""Gradient based baseline correction"""
x = item.data[:, 0].copy()
y = item.data[:, 1].copy()
grad1 = np.gradient(y, x) # first central derivative
grad2 = np.gradient(grad1, x) # second central derivative
grad1, grad2 = grad1 / grad1.max(), grad2 / grad2.max()
zero_idxs = np.argwhere(
(grad1 < first_der_tresh) & (grad1 > -first_der_tresh) &
(grad2 < second_der_tresh) & (grad2 >= 0)
)
zero_idxs = zero_idxs.squeeze()
baseline = np.interp(x, x[zero_idxs], y[zero_idxs])
sp = Spectrum.from_xy_values(x, baseline, f'{item.name} baseline')
UserNamespace.instance.add_items_to_list(sp)
class UserNamespace:
instance = None
def __init__(self, main):
self.main = main
UserNamespace.instance = self
self.tw = self.main.tree_widget
# execute first commands
self.main.console.execute_command(
"""
import numpy as np
from spectramanipulator.user_namespace import *
# from spectramanipulator.spectrum import fi, group2mat
import matplotlib.pyplot as plt
%matplotlib inline
# setup important methods
add_to_list = UserNamespace.instance.tw.add_to_list
load_kinetic = UserNamespace.instance.tw.load_kinetic
load_kinetics = UserNamespace.instance.tw.load_kinetics
import_files = UserNamespace.instance.tw.import_files
"""
)
# from IPython.display import display, Math, Latex\n
self.main.console.push_variables(
{
'main': self.main,
'tree_widget': self.main.tree_widget,
'item': self.main.tree_widget.myModel.root
}
)
# def add_items_to_list(self, spectra):
# """
# Copies all spectra and import them to the treewidget
# :param spectra: input parameter can be single spectrum object, or hierarchic list of spectra
# """
#
# # self.main.tree_widget.get
#
# if spectra.__class__ == Spectrum:
# self.main.tree_widget.import_spectra([spectra])
# return
#
# if spectra.__class__.__name__ == 'SpectrumItem':
# self.main.tree_widget.import_spectra([spectra.__copy__()])
# return
#
# if isinstance(spectra, list):
# self.main.tree_widget.import_spectra(spectra)
# return
#
# if spectra.__class__.__name__ == 'SpectrumItemGroup' or spectra.__class__.__name__ == 'SpectrumList':
# l = []
# for sp in spectra:
# new_sp = sp.__copy__()
# new_sp.group_name = spectra.name
# l.append(new_sp)
#
# self.main.tree_widget.import_spectra([l])
# return
| 39.391209
| 143
| 0.626513
|
e1aabe1e15b59ed8b6e70688a2cb18114bd67cc3
| 32,714
|
py
|
Python
|
python/tvm/relay/op/nn/_nn.py
|
DongZhaoYu/incubator-tvm
|
e1b053ac07bf6f52ec56713801de5dd040d7f4ac
|
[
"Apache-2.0"
] | 18
|
2017-08-29T02:26:12.000Z
|
2021-12-14T06:13:33.000Z
|
python/tvm/relay/op/nn/_nn.py
|
DongZhaoYu/incubator-tvm
|
e1b053ac07bf6f52ec56713801de5dd040d7f4ac
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/relay/op/nn/_nn.py
|
DongZhaoYu/incubator-tvm
|
e1b053ac07bf6f52ec56713801de5dd040d7f4ac
|
[
"Apache-2.0"
] | 5
|
2018-03-27T01:02:13.000Z
|
2020-12-29T00:32:31.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, invalid-name, unused-argument, too-many-arguments, consider-using-in
"""Backend compiler related feature registration"""
from __future__ import absolute_import
from tvm import topi
from tvm.topi.util import get_const_tuple
from tvm.runtime import convert
from tvm.te.hybrid import script
from .. import op as reg
from .. import strategy
from ..op import OpPattern
from .._tensor import elemwise_shape_func
from ..strategy.generic import is_depthwise_conv2d
# relu
reg.register_broadcast_schedule("nn.relu")
reg.register_pattern("nn.relu", OpPattern.ELEMWISE)
# softmax
reg.register_strategy("nn.softmax", strategy.softmax_strategy)
reg.register_pattern("nn.softmax", OpPattern.OPAQUE)
# log_softmax
reg.register_schedule("nn.log_softmax", strategy.schedule_log_softmax)
reg.register_pattern("nn.log_softmax", OpPattern.OPAQUE)
# dense
reg.register_strategy("nn.dense", strategy.dense_strategy)
reg.register_pattern("nn.dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# fifo_buffer
@reg.register_compute("nn.fifo_buffer")
def compute_fifo_buffer(attrs, inputs, out_type):
return [topi.nn.fifo_buffer(inputs[0], inputs[1], axis=attrs.get_int("axis"))]
reg.register_injective_schedule("nn.fifo_buffer")
reg.register_pattern("nn.fifo_buffer", OpPattern.OPAQUE)
# batch_matmul
reg.register_strategy("nn.batch_matmul", strategy.batch_matmul_strategy)
reg.register_pattern("nn.batch_matmul", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# sparse_dense
@reg.register_compute("nn.sparse_dense")
def compute_sparse_dense(attrs, inputs, out_type):
"""Compute definition of sparse_dense"""
return [topi.nn.sparse_dense(inputs[0], inputs[1], inputs[2], inputs[3])]
reg.register_strategy("nn.sparse_dense", strategy.sparse_dense_strategy)
reg.register_pattern("nn.sparse_dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_alter_op_layout("nn.sparse_dense")
def alter_op_layout_sparse_dense(attrs, inputs, tinfos, out_type):
"""Alternate the layout of sparse_dense"""
return topi.nn.sparse_dense_alter_layout(attrs, inputs, tinfos, out_type)
@reg.register_compute("nn.internal.sparse_dense_padded")
def compute_sparse_dense_padded(attrs, inputs, out_type):
"""Compute definition of sparse_dense_padded"""
raise NotImplementedError("nn.internal.sparse_dense_padded is only available on cuda")
reg.register_strategy("nn.internal.sparse_dense_padded", strategy.sparse_dense_padded_strategy)
reg.register_pattern("nn.internal.sparse_dense_padded", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# sparse_transpose
@reg.register_compute("nn.sparse_transpose")
def compute_sparse_transpose(attrs, inputs, out_type):
"""Compute definition of sparse_transpose"""
return topi.nn.sparse_transpose(inputs[0], inputs[1], inputs[2])
reg.register_schedule("nn.sparse_transpose", strategy.schedule_sparse_transpose)
reg.register_pattern("nn.sparse_transpose", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# conv1d
reg.register_strategy("nn.conv1d", strategy.conv1d_strategy)
reg.register_pattern("nn.conv1d", OpPattern.OUT_ELEMWISE_FUSABLE)
# conv2d
reg.register_strategy("nn.conv2d", strategy.conv2d_strategy)
reg.register_pattern("nn.conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_alter_op_layout("nn.conv2d")
def alter_op_layout_conv2d(attrs, inputs, tinfos, out_type):
"""Alternate the layout of conv2d"""
return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type)
@reg.register_legalize("nn.conv2d")
def legalize_conv2d(attrs, inputs, types):
"""Legalize conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.conv2d_legalize(attrs, inputs, types)
@reg.register_convert_op_layout("nn.conv2d")
def convert_conv2d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
data, weight = inputs
new_attrs = dict(attrs)
assert len(desired_layouts) == 2, "A desired layout is expected for both of nn.conv2d's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.conv2d(data, weight, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCHW":
new_attrs["kernel_layout"] = "OIHW"
return relay.nn.conv2d(data, weight, **new_attrs)
elif desired_data_layout == "NHWC":
# Check for depthwise convolution.
data_info, weight_info = tinfos
if is_depthwise_conv2d(
data_info.shape,
attrs["data_layout"],
weight_info.shape,
attrs["kernel_layout"],
attrs["groups"],
):
new_attrs["kernel_layout"] = "HWOI"
else:
new_attrs["kernel_layout"] = "HWIO"
return relay.nn.conv2d(data, weight, **new_attrs)
raise ValueError("Layout %s is not yet supported." % desired_data_layout)
# conv2d_transpose
reg.register_strategy("nn.conv2d_transpose", strategy.conv2d_transpose_strategy)
reg.register_pattern("nn.conv2d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_legalize("nn.conv2d_transpose")
def legalize_conv2d_transpose(attrs, inputs, types):
"""Legalize conv2d_transpose op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current Transposed convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.conv2d_transpose_legalize(attrs, inputs, types)
@reg.register_convert_op_layout("nn.conv2d_transpose")
def convert_conv2d_transpose(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for conv2d_transpose op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
data, weight = inputs
new_attrs = dict(attrs)
assert len(desired_layouts) == 2, "A desired layout is expected for both of nn.conv2d's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCHW":
new_attrs["kernel_layout"] = "OIHW"
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
elif desired_data_layout == "NHWC":
new_attrs["kernel_layout"] = "HWIO"
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
raise ValueError("Layout %s is not yet supported." % desired_data_layout)
# conv3d_transpose
reg.register_strategy("nn.conv3d_transpose", strategy.conv3d_transpose_strategy)
reg.register_pattern("nn.conv3d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_legalize("nn.conv3d_transpose")
def legalize_conv3d_transpose(attrs, inputs, types):
"""Legalize conv3d_transpose op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current Transposed convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.conv3d_transpose_legalize(attrs, inputs, types)
# conv3d
reg.register_strategy("nn.conv3d", strategy.conv3d_strategy)
reg.register_pattern("nn.conv3d", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_alter_op_layout("nn.conv3d")
def alter_op_layout_conv3d(attrs, inputs, tinfos, out_type):
"""Alternate the layout of conv3d"""
return topi.nn.conv3d_alter_layout(attrs, inputs, tinfos, out_type)
@reg.register_convert_op_layout("nn.conv3d")
def convert_conv3d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for conv3d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
data, weight = inputs
new_attrs = dict(attrs)
assert len(desired_layouts) == 2, "A desired layout is expected for both of nn.conv3d's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.conv3d(data, weight, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCDHW":
new_attrs["kernel_layout"] = "OIDHW"
return relay.nn.conv3d(data, weight, **new_attrs)
elif desired_data_layout == "NDHWC":
new_attrs["kernel_layout"] = "DHWIO"
return relay.nn.conv3d(data, weight, **new_attrs)
raise ValueError("Layout %s is not yet supported" % desired_data_layout)
# conv3d_winograd related operators
reg.register_strategy(
"nn.contrib_conv3d_winograd_without_weight_transform",
strategy.conv3d_winograd_without_weight_transfrom_strategy,
)
reg.register_pattern(
"nn.contrib_conv3d_winograd_without_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE
)
@reg.register_compute("nn.contrib_conv3d_winograd_weight_transform")
def compute_contrib_conv3d_winograd_weight_transform(attrs, inputs, out_dtype):
"""Compute definition of contrib_conv3d_winograd_weight_transform"""
out = topi.nn.conv3d_winograd_weight_transform(inputs[0], attrs.get_int("tile_size"))
return [out]
reg.register_schedule(
"nn.contrib_conv3d_winograd_weight_transform",
strategy.schedule_conv3d_winograd_weight_transform,
)
reg.register_pattern("nn.contrib_conv3d_winograd_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE)
# conv1d_transpose
reg.register_strategy("nn.conv1d_transpose", strategy.conv1d_transpose_strategy)
reg.register_pattern("nn.conv1d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)
# bias_add
reg.register_injective_schedule("nn.bias_add")
reg.register_pattern("nn.bias_add", OpPattern.BROADCAST)
# max_pool1d
reg.register_schedule("nn.max_pool1d", strategy.schedule_pool)
reg.register_pattern("nn.max_pool1d", OpPattern.OUT_ELEMWISE_FUSABLE)
# max_pool2d
reg.register_schedule("nn.max_pool2d", strategy.schedule_pool)
reg.register_pattern("nn.max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# max_pool3d
reg.register_schedule("nn.max_pool3d", strategy.schedule_pool)
reg.register_pattern("nn.max_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool1d
reg.register_schedule("nn.avg_pool1d", strategy.schedule_pool)
reg.register_pattern("nn.avg_pool1d", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool2d
reg.register_schedule("nn.avg_pool2d", strategy.schedule_pool)
reg.register_pattern("nn.avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool3d
reg.register_schedule("nn.avg_pool3d", strategy.schedule_pool)
reg.register_pattern("nn.avg_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# max_pool2d_grad
reg.register_schedule("nn.max_pool2d_grad", strategy.schedule_pool_grad)
reg.register_pattern("nn.max_pool2d_grad", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool2d_grad
reg.register_schedule("nn.avg_pool2d_grad", strategy.schedule_pool_grad)
reg.register_pattern("nn.avg_pool2d_grad", OpPattern.OUT_ELEMWISE_FUSABLE)
# global_max_pool2d
reg.register_schedule("nn.global_max_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.global_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# global_avg_pool2d
reg.register_schedule("nn.global_avg_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.global_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_max_pool2d
reg.register_schedule("nn.adaptive_max_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_avg_pool2d
reg.register_schedule("nn.adaptive_avg_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_max_pool3d
reg.register_schedule("nn.adaptive_max_pool3d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_max_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_avg_pool3d
reg.register_schedule("nn.adaptive_avg_pool3d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_avg_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# leaky_relu
reg.register_broadcast_schedule("nn.leaky_relu")
reg.register_pattern("nn.leaky_relu", OpPattern.ELEMWISE)
# prelu
reg.register_broadcast_schedule("nn.prelu")
reg.register_pattern("nn.prelu", OpPattern.BROADCAST)
# flatten
reg.register_broadcast_schedule("nn.batch_flatten")
reg.register_pattern("nn.batch_flatten", OpPattern.INJECTIVE)
# lrn
@reg.register_compute("nn.lrn")
def compute_lrn(attrs, inputs, out_dtype):
"""Compute definition of lrn"""
assert len(inputs) == 1
return [topi.nn.lrn(inputs[0], attrs.size, attrs.axis, attrs.alpha, attrs.beta, attrs.bias)]
reg.register_schedule("nn.lrn", strategy.schedule_lrn)
reg.register_pattern("nn.lrn", OpPattern.OPAQUE)
# upsampling
@reg.register_compute("nn.upsampling")
def compute_upsampling(attrs, inputs, out_dtype):
scale_h = attrs.scale_h
scale_w = attrs.scale_w
layout = attrs.layout
method = attrs.method
align_corners = attrs.align_corners
return [topi.nn.upsampling(inputs[0], scale_h, scale_w, layout, method, align_corners)]
reg.register_injective_schedule("nn.upsampling")
# upsampling3d
@reg.register_compute("nn.upsampling3d")
def compute_upsampling3d(attrs, inputs, out_dtype):
scale_d = attrs.scale_d
scale_h = attrs.scale_h
scale_w = attrs.scale_w
layout = attrs.layout
method = attrs.method
coordinate_transformation_mode = attrs.coordinate_transformation_mode
return [
topi.nn.upsampling3d(
inputs[0], scale_d, scale_h, scale_w, layout, method, coordinate_transformation_mode
)
]
reg.register_injective_schedule("nn.upsampling3d")
# pad
reg.register_broadcast_schedule("nn.pad")
# mirror_pad
@reg.register_compute("nn.mirror_pad")
def compute_mirror_pad(attrs, inputs, out_dtype):
pad_before, pad_after = list(zip(*attrs.pad_width))
mode = attrs.mode
out = topi.nn.mirror_pad(inputs[0], pad_before=pad_before, pad_after=pad_after, mode=mode)
return [out]
reg.register_broadcast_schedule("nn.mirror_pad")
@script
def _mirror_pad_func(data_shape, pad_width):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(data_shape.shape[0]):
out[i] = data_shape[i] + int64(pad_width[i][0]) + int64(pad_width[i][1])
return out
@reg.register_shape_func("nn.mirror_pad", False)
def mirror_pad_func(attrs, inputs, _):
pad_width_tuple = [get_const_tuple(p) for p in attrs.pad_width]
return [_mirror_pad_func(inputs[0], convert(pad_width_tuple))]
# conv2d_winograd related operators
reg.register_strategy(
"nn.contrib_conv2d_winograd_without_weight_transform",
strategy.conv2d_winograd_without_weight_transfrom_strategy,
)
reg.register_pattern(
"nn.contrib_conv2d_winograd_without_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE
)
# conv2d_gemm related operators
reg.register_strategy(
"nn.contrib_conv2d_gemm_without_weight_transform",
strategy.conv2d_gemm_without_weight_transform_strategy,
)
reg.register_pattern(
"nn.contrib_conv2d_gemm_without_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE
)
@reg.register_compute("nn.contrib_conv2d_gemm_weight_transform")
def compute_contrib_conv2d_gemm_weight_transform(attrs, inputs, out_dtype):
"""Compute definition of contrib_conv2d_gemm_weight_transform"""
out = topi.nn.conv2d_gemm_weight_transform(inputs[0], attrs.tile_rows, attrs.tile_cols)
return [out]
reg.register_schedule(
"nn.contrib_conv2d_gemm_weight_transform", strategy.schedule_conv2d_gemm_weight_transform
)
reg.register_pattern("nn.contrib_conv2d_gemm_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.contrib_conv2d_winograd_weight_transform")
def compute_contrib_conv2d_winograd_weight_transform(attrs, inputs, out_dtype):
"""Compute definition of contrib_conv2d_winograd_weight_transform"""
out = topi.nn.conv2d_winograd_weight_transform(inputs[0], attrs.get_int("tile_size"))
return [out]
reg.register_schedule(
"nn.contrib_conv2d_winograd_weight_transform",
strategy.schedule_conv2d_winograd_weight_transform,
)
reg.register_pattern("nn.contrib_conv2d_winograd_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.contrib_conv2d_winograd_nnpack_weight_transform")
def compute_contrib_conv2d_winograd_nnpack_weight_transform(attrs, inputs, out_dtype):
"""Compute definition of contrib_conv2d_winograd_nnpack_weight_transform"""
convolution_algorithm = attrs.get_int("convolution_algorithm")
out = topi.nn.conv2d_winograd_nnpack_weight_transform(
inputs[0], convolution_algorithm, out_dtype
)
return [out]
reg.register_schedule(
"nn.contrib_conv2d_winograd_nnpack_weight_transform",
strategy.schedule_conv2d_winograd_nnpack_weight_transform,
)
reg.register_pattern("nn.contrib_conv2d_winograd_nnpack_weight_transform", OpPattern.OPAQUE)
# conv2d_NCHWc
reg.register_strategy("nn.contrib_conv2d_NCHWc", strategy.conv2d_NCHWc_strategy)
reg.register_pattern("nn.contrib_conv2d_NCHWc", OpPattern.OUT_ELEMWISE_FUSABLE)
# depthwise_conv2d_NCHWc
reg.register_strategy("nn.contrib_depthwise_conv2d_NCHWc", strategy.depthwise_conv2d_NCHWc_strategy)
reg.register_pattern("nn.contrib_depthwise_conv2d_NCHWc", OpPattern.OUT_ELEMWISE_FUSABLE)
# deformable_conv2d
reg.register_strategy("nn.deformable_conv2d", strategy.deformable_conv2d_strategy)
reg.register_pattern("nn.deformable_conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# bitpack
@reg.register_compute("nn.bitpack")
def compute_bitpack(attrs, inputs, out_dtype):
"""Compute definition for bitpack"""
bits = attrs.bits
pack_axis = attrs.pack_axis
bit_axis = attrs.bit_axis
pack_type = attrs.pack_type
name = attrs.name
out = topi.nn.bitpack(inputs[0], bits, pack_axis, bit_axis, pack_type, name)
return [out]
reg.register_schedule("nn.bitpack", strategy.schedule_bitpack)
reg.register_pattern("nn.bitpack", OpPattern.INJECTIVE)
# bitserial_conv2d
reg.register_strategy("nn.bitserial_conv2d", strategy.bitserial_conv2d_strategy)
reg.register_pattern("nn.bitserial_conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_legalize("nn.bitserial_conv2d")
def legalize_bitserial_conv2d(attrs, inputs, types):
"""Legalize bitserial_conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.bitserial_conv2d_legalize(attrs, inputs, types)
# bitserial_dense
reg.register_strategy("nn.bitserial_dense", strategy.bitserial_dense_strategy)
reg.register_pattern("nn.bitserial_dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# cross_entropy
@reg.register_compute("nn.cross_entropy")
def compute_cross_entropy(attrs, inputs, out_dtype):
x, y = inputs
return [-topi.sum(topi.log(x) * y) / x.shape[0]]
reg.register_reduce_schedule("nn.cross_entropy")
reg.register_pattern("nn.cross_entropy", OpPattern.OPAQUE)
# dilate
@reg.register_compute("nn.dilate")
def compute_dilate(attrs, inputs, out_dtype):
return [topi.nn.dilate(inputs[0], attrs.strides, attrs.dilation_value)]
reg.register_broadcast_schedule("nn.dilate")
reg.register_pattern("nn.dilate", OpPattern.INJECTIVE)
# cross_entropy_with_logits
@reg.register_compute("nn.cross_entropy_with_logits")
def compute_cross_entropy_with_logits(attrs, inputs, out_dtype):
x, y = inputs
return [-topi.sum(x * y) / x.shape[0]]
reg.register_reduce_schedule("nn.cross_entropy_with_logits")
reg.register_pattern("nn.cross_entropy_with_logits", OpPattern.OPAQUE)
# depth_to_space
@reg.register_compute("nn.depth_to_space")
def compute_depth_to_space(attrs, inputs, out_dtype):
block_size = attrs.block_size
layout = attrs.layout
mode = attrs.mode
return [topi.nn.depth_to_space(inputs[0], block_size, layout=layout, mode=mode)]
reg.register_injective_schedule("nn.depth_to_space")
reg.register_pattern("nn.depth_to_space", OpPattern.INJECTIVE)
# space_to_depth
@reg.register_compute("nn.space_to_depth")
def compute_space_to_depth(attrs, inputs, out_dtype):
block_size = attrs.block_size
layout = attrs.layout
return [topi.nn.space_to_depth(inputs[0], block_size, layout=layout)]
reg.register_injective_schedule("nn.space_to_depth")
reg.register_pattern("nn.space_to_depth", OpPattern.INJECTIVE)
# correlation
reg.register_strategy("nn.correlation", strategy.correlation_strategy)
reg.register_pattern("nn.correlation", OpPattern.OUT_ELEMWISE_FUSABLE)
#####################
# Shape functions #
#####################
@script
def _conv_shape_func(dshape, kshape, strides, padding, dilation):
out = output_tensor((dshape.shape[0],), "int64")
out[0] = dshape[0]
out[1] = kshape[0]
for i in const_range(dshape.shape[0] - 2):
dilated_k = (kshape[i + 2] - 1) * dilation[i] + 1
out[i + 2] = (dshape[i + 2] + 2 * padding[i] - dilated_k) // strides[i] + 1
return out
def conv_shape_func(attrs, inputs, _):
"""
Shape function for contrib_conv2d_NCHWc op.
"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
return [
_conv_shape_func(
inputs[0],
inputs[1],
convert(strides),
convert(padding),
convert(dilation),
)
]
reg.register_shape_func("nn.conv1d", False, conv_shape_func)
reg.register_shape_func("nn.conv2d", False, conv_shape_func)
reg.register_shape_func("nn.conv3d", False, conv_shape_func)
@script
def _conv2d_NCHWc_shape_func(dshape, kshape, strides, padding, dilation, oc_bn):
out = output_tensor((dshape.shape[0],), "int64")
ic_chunk = dshape[1]
height = dshape[2]
width = dshape[3]
ic_bn = dshape[4]
kheight = kshape[2]
kwidth = kshape[3]
dilated_kh = (kheight - 1) * dilation[0] + 1
dilated_kw = (kwidth - 1) * dilation[1] + 1
kflatten = int64(1)
for i in const_range(kshape.shape[0]):
kflatten *= kshape[i]
oc = kflatten // (kheight * kwidth * ic_chunk * ic_bn)
oc_chunk = oc // oc_bn
out_height = (height + 2 * padding[0] - dilated_kh) // strides[0] + 1
out_width = (width + 2 * padding[1] - dilated_kw) // strides[1] + 1
out[0] = dshape[0]
out[1] = oc_chunk
out[2] = out_height
out[3] = out_width
out[4] = int64(oc_bn)
return out
@reg.register_shape_func("nn.contrib_conv2d_NCHWc", False)
def conv2d_NCHWc_shape_func(attrs, inputs, _):
"""
Shape function for contrib_conv2d_NCHWc op.
"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
out_layout = attrs.out_layout
oc_bn = int(out_layout[4:-1])
return [
_conv2d_NCHWc_shape_func(
inputs[0],
inputs[1],
convert(strides),
convert(padding),
convert(dilation),
convert(oc_bn),
)
]
@script
def _conv2d_transpose_nchw_shape_func(dshape, kshape, strides, padding, dilation, output_padding):
out = output_tensor((dshape.shape[0],), "int64")
kheight = kshape[2]
kwidth = kshape[3]
dilated_kh = (kheight - 1) * dilation[0] + 1
dilated_kw = (kwidth - 1) * dilation[1] + 1
out_height = strides[0] * (dshape[2] - 1) + dilated_kh - 2 * padding[0] + output_padding[0]
out_width = strides[1] * (dshape[3] - 1) + dilated_kw - 2 * padding[1] + output_padding[1]
out[0] = dshape[0]
out[1] = kshape[1]
out[2] = out_height
out[3] = out_width
return out
@reg.register_shape_func("nn.conv2d_transpose", False)
def conv2d_transpose_nchw_shape_func(attrs, inputs, _):
"""
Shape function for conv2d_transpose op.
"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
output_padding = get_const_tuple(attrs.output_padding)
return [
_conv2d_transpose_nchw_shape_func(
inputs[0],
inputs[1],
convert(strides),
convert(padding),
convert(dilation),
convert(output_padding),
)
]
@script
def _pool2d_shape_func(data_shape, pool_size, strides, padding, height_axis, width_axis):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(data_shape.shape[0]):
if i == height_axis:
out[i] = (data_shape[i] + padding[0] + padding[2] - pool_size[0]) // strides[0] + 1
elif i == width_axis:
out[i] = (data_shape[i] + padding[1] + padding[3] - pool_size[1]) // strides[1] + 1
else:
out[i] = data_shape[i]
return out
def pool2d_shape_func(attrs, inputs, _):
"""
Shape function for pool2d op.
"""
pool_size = get_const_tuple(attrs.pool_size)
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
layout = attrs.layout
height_axis = layout.index("H")
width_axis = layout.index("W")
if len(padding) == 1:
padding = [padding[0]] * 4
elif len(padding) == 2:
padding = [padding[0], padding[1], padding[0], padding[1]]
return [
_pool2d_shape_func(
inputs[0],
convert(pool_size),
convert(strides),
convert(padding),
convert(height_axis),
convert(width_axis),
)
]
reg.register_shape_func("nn.max_pool2d", False, pool2d_shape_func)
reg.register_shape_func("nn.avg_pool2d", False, pool2d_shape_func)
@script
def _global_pool2d_shape_func(data_shape, height_axis, width_axis):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0]):
if i == height_axis or i == width_axis:
out[i] = int64(1)
else:
out[i] = data_shape[i]
return out
def global_pool2d_shape_func(attrs, inputs, _):
"""
Shape function for global pool2d op.
"""
layout = attrs.layout
height_axis = width_axis = 1
for i, letter in enumerate(layout):
if letter == "H":
height_axis = i
if letter == "W":
width_axis = i
return [_global_pool2d_shape_func(inputs[0], convert(height_axis), convert(width_axis))]
reg.register_shape_func("nn.global_max_pool2d", False, global_pool2d_shape_func)
reg.register_shape_func("nn.global_avg_pool2d", False, global_pool2d_shape_func)
@script
def _batch_flatten_shape_func(data_shape):
out = output_tensor((2,), "int64")
out[0] = data_shape[0]
out[1] = int64(1)
for i in const_range(data_shape.shape[0] - 1):
out[1] *= data_shape[i + 1]
return out
@reg.register_shape_func("nn.batch_flatten", False)
def batch_flatten_shape_func(attrs, inputs, _):
"""
Shape function for batch_flatten op.
"""
return [_batch_flatten_shape_func(inputs[0])]
@script
def _dense_shape_func(data_shape, weight_shape):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0] - 1):
out[i] = data_shape[i]
out[out.shape[0] - 1] = weight_shape[0]
return out
@reg.register_shape_func("nn.dense", False)
def dense_shape_func(attrs, inputs, _):
"""
Shape function for dense op.
"""
ret = [_dense_shape_func(inputs[0], inputs[1])]
return ret
@script
def _batch_matmul_shape_func(data_shape, weight_shape):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0] - 1):
if i == 0:
out[i] = max(data_shape[i], weight_shape[i])
else:
out[i] = data_shape[i]
out[out.shape[0] - 1] = weight_shape[weight_shape.shape[0] - 2]
return out
@reg.register_shape_func("nn.batch_matmul", False)
def batch_matmul_shape_func(attrs, inputs, _):
"""
Shape function for dense op.
"""
ret = [_batch_matmul_shape_func(inputs[0], inputs[1])]
return ret
@script
def _pad_shape_func(data_shape, pad_width):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0]):
out[i] = data_shape[i] + pad_width[i][0] + pad_width[i][1]
return out
@reg.register_shape_func("nn.pad", False)
def pad_shape_func(attrs, inputs, _):
"""
Shape function for pad op.
"""
pad_width = []
for pair in attrs.pad_width:
pad_width.append(get_const_tuple(pair))
return [_pad_shape_func(inputs[0], convert(pad_width))]
@script
def _dilate_shape_func(data_shape, strides):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0]):
out[i] = (data_shape[i] - 1) * strides[i] + 1
return out
@reg.register_shape_func("nn.dilate", False)
def dilate_shape_func(attrs, inputs, _):
"""
Shape function for dilate op.
"""
return [_dilate_shape_func(inputs[0], convert(attrs.strides))]
reg.register_shape_func("nn.bias_add", False, elemwise_shape_func)
reg.register_shape_func("nn.softmax", False, elemwise_shape_func)
reg.register_shape_func("nn.relu", False, elemwise_shape_func)
| 31.365292
| 102
| 0.728312
|
14e4f9f055bb7864f0f1294cb525fa19e87277d7
| 274,929
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/network/_help.py
|
scottsauber/azure-cli
|
29767d75d850ddc1c24cc85bd46d861b61d77a47
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/network/_help.py
|
scottsauber/azure-cli
|
29767d75d850ddc1c24cc85bd46d861b61d77a47
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/network/_help.py
|
scottsauber/azure-cli
|
29767d75d850ddc1c24cc85bd46d861b61d77a47
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps # pylint: disable=unused-import
# pylint: disable=line-too-long, too-many-lines
helps['network'] = """
type: group
short-summary: Manage Azure Network resources.
"""
helps['network application-gateway'] = """
type: group
short-summary: Manage application-level routing and load balancing services.
long-summary: To learn more about Application Gateway, visit https://docs.microsoft.com/azure/application-gateway/application-gateway-create-gateway-cli
"""
helps['network application-gateway address-pool'] = """
type: group
short-summary: Manage address pools of an application gateway.
"""
helps['network application-gateway address-pool create'] = """
type: command
short-summary: Create an address pool.
examples:
- name: Create an address pool with two endpoints.
text: |
az network application-gateway address-pool create -g MyResourceGroup \\
--gateway-name MyAppGateway -n MyAddressPool --servers 10.0.0.4 10.0.0.5
"""
helps['network application-gateway address-pool delete'] = """
type: command
short-summary: Delete an address pool.
examples:
- name: Delete an address pool.
text: az network application-gateway address-pool delete -g MyResourceGroup --gateway-name MyAppGateway -n MyAddressPool
"""
helps['network application-gateway address-pool list'] = """
type: command
short-summary: List address pools.
examples:
- name: List address pools.
text: az network application-gateway address-pool list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway address-pool show'] = """
type: command
short-summary: Get the details of an address pool.
examples:
- name: Get the details of an address pool.
text: az network application-gateway address-pool show -g MyResourceGroup --gateway-name MyAppGateway -n MyAddressPool
"""
helps['network application-gateway address-pool update'] = """
type: command
short-summary: Update an address pool.
examples:
- name: Update backend address pool.
text: az network application-gateway address-pool update -g MyResourceGroup --gateway-name MyAppGateway \\ -n MyAddressPool --servers 10.0.0.4 10.0.0.5 10.0.0.6
- name: Add to the backend address pool by using backend server IP address.
text: |
az network application-gateway address-pool update -g MyResourceGroup --gateway-name MyAppGateway -n MyAddressPool \\
--add backendAddresses "{ \"ip_address\": \"{10.0.0.13}\" }"
"""
helps['network application-gateway auth-cert'] = """
type: group
short-summary: Manage authorization certificates of an application gateway.
"""
helps['network application-gateway auth-cert create'] = """
type: command
short-summary: Create an authorization certificate.
examples:
- name: Create an authorization certificate.
text: |
az network application-gateway auth-cert create -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyAuthCert --cert-file /path/to/cert/file
"""
helps['network application-gateway auth-cert delete'] = """
type: command
short-summary: Delete an authorization certificate.
examples:
- name: Delete an authorization certificate.
text: az network application-gateway auth-cert delete -g MyResourceGroup --gateway-name MyAppGateway -n MyAuthCert
"""
helps['network application-gateway auth-cert list'] = """
type: command
short-summary: List authorization certificates.
examples:
- name: List authorization certificates.
text: az network application-gateway auth-cert list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway auth-cert show'] = """
type: command
short-summary: Show an authorization certificate.
examples:
- name: Show an authorization certificate.
text: az network application-gateway auth-cert show -g MyResourceGroup --gateway-name MyAppGateway -n MyAuthCert
- name: View expiry date of an authorization certificate. It is in Base-64 encoded X.509(.CER) format.
text: |
az network application-gateway auth-cert show -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyAuthCert --query data -o tsv | base64 -d | openssl x509 -enddate -noout
"""
helps['network application-gateway auth-cert update'] = """
type: command
short-summary: Update an authorization certificate.
examples:
- name: Update authorization certificates to use a new cert file.
text: az network application-gateway auth-cert update -g MyResourceGroup --gateway-name MyAppGateway \\ -n MyAuthCert --cert-file /path/to/new/cert/file
"""
helps['network application-gateway create'] = """
type: command
short-summary: Create an application gateway.
parameters:
- name: --trusted-client-cert
short-summary: The application gateway trusted client certificate.
long-summary: |
Usage: --trusted-client-certificates name=client1 data=client.cer
name: Required. Name of the trusted client certificate that is unique within an Application Gateway
data: Required. Certificate public data.
Multiple trusted client certificates can be specified by using more than one `--trusted-client-certificates` argument.
- name: --ssl-profile
short-summary: The application gateway ssl profiles.
long-summary: |
Usage: --ssl-profile name=MySslProfile client-auth-configuration=True cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 policy-type=Custom min-protocol-version=TLSv1_0
name: Required. Name of the SSL profile that is unique within an Application Gateway.
polic-name: Name of Ssl Policy.
policy-type: Type of Ssl Policy.
min-protocol-version: Minimum version of Ssl protocol to be supported on application gateway.
cipher-suites: Ssl cipher suites to be enabled in the specified order to application gateway.
disabled-ssl-protocols: Space-separated list of protocols to disable.
trusted-client-certificates: Array of references to application gateway trusted client certificates.
client-auth-configuration: Client authentication configuration of the application gateway resource.
Multiple ssl profiles can be specified by using more than one `--ssl-profile` argument.
examples:
- name: Create an application gateway with VMs as backend servers.
text: |
az network application-gateway create -g MyResourceGroup -n MyAppGateway --capacity 2 --sku Standard_Medium \\
--vnet-name MyVNet --subnet MySubnet --http-settings-cookie-based-affinity Enabled \\
--public-ip-address MyAppGatewayPublicIp --servers 10.0.0.4 10.0.0.5
- name: Create an application gateway. (autogenerated)
text: |
az network application-gateway create --capacity 2 --frontend-port MyFrontendPort --http-settings-cookie-based-affinity Enabled --http-settings-port 80 --http-settings-protocol Http --location westus2 --name MyAppGateway --public-ip-address MyAppGatewayPublicIp --resource-group MyResourceGroup --sku Standard_Small --subnet MySubnet --vnet-name MyVNet
crafted: true
"""
helps['network application-gateway delete'] = """
type: command
short-summary: Delete an application gateway.
examples:
- name: Delete an application gateway.
text: az network application-gateway delete -g MyResourceGroup -n MyAppGateway
"""
helps['network application-gateway private-link'] = """
type: group
short-summary: Manage Private Link of an Application Gateway
"""
helps['network application-gateway private-link add'] = """
type: command
short-summary: Add a new Private Link with a default IP Configuration and associate it with an existing Frontend IP
"""
helps['network application-gateway private-link remove'] = """
type: command
short-summary: Remove a Private Link and clear association with Frontend IP. The subnet associate with a Private Link might need to clear manually
"""
helps['network application-gateway private-link show'] = """
type: command
short-summary: Show a Private Link
"""
helps['network application-gateway private-link list'] = """
type: command
short-summary: List all the Private Link
"""
helps['network application-gateway private-link wait'] = """
type: command
short-summary: Place the CLI in a waiting state until the condition of corresponding application gateway is met
"""
helps['network application-gateway private-link ip-config'] = """
type: group
short-summary: Manage IP configuration of a Private Link to configure its capability
"""
helps['network application-gateway private-link ip-config add'] = """
type: command
short-summary: Add an IP configuration to a Private Link to scale up its capability
"""
helps['network application-gateway private-link ip-config remove'] = """
type: command
short-summary: Remove an IP configuration from a Private Link to scale down its capability
"""
helps['network application-gateway private-link ip-config show'] = """
type: command
short-summary: Show an IP configuration of a Private Link
"""
helps['network application-gateway private-link ip-config list'] = """
type: command
short-summary: List all the IP configuration of a Private Link
"""
helps['network application-gateway private-link ip-config wait'] = """
type: command
short-summary: Place the CLI in a waiting state until the condition of corresponding application gateway is met
"""
helps['network application-gateway frontend-ip'] = """
type: group
short-summary: Manage frontend IP addresses of an application gateway.
"""
helps['network application-gateway frontend-ip create'] = """
type: command
short-summary: Create a frontend IP address.
examples:
- name: Create a frontend IP address.
text: |
az network application-gateway frontend-ip create -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyFrontendIp --public-ip-address MyPublicIpAddress
- name: Create a frontend IP address. (autogenerated)
text: |
az network application-gateway frontend-ip create --gateway-name MyAppGateway --name MyFrontendIp --private-ip-address 10.10.10.50 --resource-group MyResourceGroup --subnet MySubnet --vnet-name MyVnet
crafted: true
"""
helps['network application-gateway frontend-ip delete'] = """
type: command
short-summary: Delete a frontend IP address.
examples:
- name: Delete a frontend IP address.
text: az network application-gateway frontend-ip delete -g MyResourceGroup --gateway-name MyAppGateway -n MyFrontendIp
"""
helps['network application-gateway frontend-ip list'] = """
type: command
short-summary: List frontend IP addresses.
examples:
- name: List frontend IP addresses.
text: az network application-gateway frontend-ip list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway frontend-ip show'] = """
type: command
short-summary: Get the details of a frontend IP address.
examples:
- name: Get the details of a frontend IP address.
text: az network application-gateway frontend-ip show -g MyResourceGroup --gateway-name MyAppGateway -n MyFrontendIp
"""
helps['network application-gateway frontend-ip update'] = """
type: command
short-summary: Update a frontend IP address.
examples:
- name: Update a frontend IP address. (autogenerated)
text: |
az network application-gateway frontend-ip update --gateway-name MyAppGateway --name MyFrontendIp --private-ip-address 10.10.10.50 --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway frontend-port'] = """
type: group
short-summary: Manage frontend ports of an application gateway.
"""
helps['network application-gateway frontend-port create'] = """
type: command
short-summary: Create a frontend port.
examples:
- name: Create a frontend port.
text: |
az network application-gateway frontend-port create -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyFrontendPort --port 8080
"""
helps['network application-gateway frontend-port delete'] = """
type: command
short-summary: Delete a frontend port.
examples:
- name: Delete a frontend port.
text: az network application-gateway frontend-port delete -g MyResourceGroup --gateway-name MyAppGateway -n MyFrontendPort
"""
helps['network application-gateway frontend-port list'] = """
type: command
short-summary: List frontend ports.
examples:
- name: List frontend ports.
text: az network application-gateway frontend-port list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway frontend-port show'] = """
type: command
short-summary: Get the details of a frontend port.
examples:
- name: Get the details of a frontend port.
text: az network application-gateway frontend-port show -g MyResourceGroup --gateway-name MyAppGateway -n MyFrontendPort
"""
helps['network application-gateway frontend-port update'] = """
type: command
short-summary: Update a frontend port.
examples:
- name: Update a frontend port to use a different port.
text: |
az network application-gateway frontend-port update -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyFrontendPort --port 8081
"""
helps['network application-gateway http-listener'] = """
type: group
short-summary: Manage HTTP listeners of an application gateway.
"""
helps['network application-gateway http-listener create'] = """
type: command
short-summary: Create an HTTP listener.
examples:
- name: Create an HTTP listener.
text: |
az network application-gateway http-listener create -g MyResourceGroup --gateway-name MyAppGateway \\
--frontend-port MyFrontendPort -n MyHttpListener --frontend-ip MyAppGatewayPublicIp
"""
helps['network application-gateway http-listener delete'] = """
type: command
short-summary: Delete an HTTP listener.
examples:
- name: Delete an HTTP listener.
text: az network application-gateway http-listener delete -g MyResourceGroup --gateway-name MyAppGateway -n MyHttpListener
"""
helps['network application-gateway http-listener list'] = """
type: command
short-summary: List HTTP listeners.
examples:
- name: List HTTP listeners.
text: az network application-gateway http-listener list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway http-listener show'] = """
type: command
short-summary: Get the details of an HTTP listener.
examples:
- name: Get the details of an HTTP listener.
text: az network application-gateway http-listener show -g MyResourceGroup --gateway-name MyAppGateway -n MyHttpListener
"""
helps['network application-gateway http-listener update'] = """
type: command
short-summary: Update an HTTP listener.
examples:
- name: Update an HTTP listener to use a different hostname.
text: |
az network application-gateway http-listener update -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyHttpListener --host-name www.mynewhost.com
"""
helps['network application-gateway http-settings'] = """
type: group
short-summary: Manage HTTP settings of an application gateway.
"""
helps['network application-gateway http-settings create'] = """
type: command
short-summary: Create HTTP settings.
examples:
- name: Create HTTP settings.
text: |
az network application-gateway http-settings create -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyHttpSettings --port 80 --protocol Http --cookie-based-affinity Disabled --timeout 30
- name: Create HTTP settings. (autogenerated)
text: |
az network application-gateway http-settings create --gateway-name MyAppGateway --host-name MyHost --name MyHttpSettings --port 80 --probe MyNewProbe --protocol Http --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway http-settings delete'] = """
type: command
short-summary: Delete HTTP settings.
examples:
- name: Delete HTTP settings.
text: az network application-gateway http-settings delete -g MyResourceGroup --gateway-name MyAppGateway -n MyHttpSettings
"""
helps['network application-gateway http-settings list'] = """
type: command
short-summary: List HTTP settings.
examples:
- name: List HTTP settings.
text: az network application-gateway http-settings list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway http-settings show'] = """
type: command
short-summary: Get the details of a gateway's HTTP settings.
examples:
- name: Get the details of a gateway's HTTP settings.
text: az network application-gateway http-settings show -g MyResourceGroup --gateway-name MyAppGateway -n MyHttpSettings
"""
helps['network application-gateway http-settings update'] = """
type: command
short-summary: Update HTTP settings.
examples:
- name: Update HTTP settings to use a new probe.
text: |
az network application-gateway http-settings update -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyHttpSettings --probe MyNewProbe
- name: Update HTTP settings. (autogenerated)
text: |
az network application-gateway http-settings update --enable-probe true --gateway-name MyAppGateway --name MyHttpSettings --probe MyNewProbe --resource-group MyResourceGroup
crafted: true
- name: Update HTTP settings. (autogenerated)
text: |
az network application-gateway http-settings update --gateway-name MyAppGateway --host-name-from-backend-pool true --name MyHttpSettings --port 80 --probe MyNewProbe --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway identity'] = """
type: group
short-summary: Manage the managed service identity of an application gateway.
"""
helps['network application-gateway identity assign'] = """
type: command
short-summary: Assign a managed service identity to an application-gateway
examples:
- name: Assign an identity to the application gateway
text: az network application-gateway identity assign -g MyResourceGroup --gateway-name ag1 \\ --identity /subscriptions/*-000000000000/resourceGroups/myResourceGroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/id1
"""
helps['network application-gateway identity remove'] = """
type: command
short-summary: Remove the managed service identity of an application-gateway
examples:
- name: Remove an identity to the application gateway
text: az network application-gateway identity remove -g MyResourceGroup --gateway-name ag1
"""
helps['network application-gateway identity show'] = """
type: command
short-summary: Show the managed service identity of an application-gateway
examples:
- name: Show an identity to the application gateway
text: az network application-gateway identity show -g MyResourceGroup --gateway-name ag1
"""
helps['network application-gateway list'] = """
type: command
short-summary: List application gateways.
examples:
- name: List application gateways.
text: az network application-gateway list -g MyResourceGroup
"""
helps['network application-gateway probe'] = """
type: group
short-summary: Manage probes to gather and evaluate information on a gateway.
"""
helps['network application-gateway probe create'] = """
type: command
short-summary: Create a probe.
examples:
- name: Create an application gateway probe.
text: |
az network application-gateway probe create -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyProbe --protocol https --host 127.0.0.1 --path /path/to/probe
"""
helps['network application-gateway probe delete'] = """
type: command
short-summary: Delete a probe.
examples:
- name: Delete a probe.
text: az network application-gateway probe delete -g MyResourceGroup --gateway-name MyAppGateway -n MyProbe
- name: Delete a probe. (autogenerated)
text: |
az network application-gateway probe delete --gateway-name MyAppGateway --name MyProbe --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network application-gateway probe list'] = """
type: command
short-summary: List probes.
examples:
- name: List probes.
text: az network application-gateway probe list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway probe show'] = """
type: command
short-summary: Get the details of a probe.
examples:
- name: Get the details of a probe.
text: az network application-gateway probe show -g MyResourceGroup --gateway-name MyAppGateway -n MyProbe
"""
helps['network application-gateway probe update'] = """
type: command
short-summary: Update a probe.
examples:
- name: Update an application gateway probe with a timeout of 60 seconds.
text: |
az network application-gateway probe update -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyProbe --timeout 60
- name: Update a probe. (autogenerated)
text: |
az network application-gateway probe update --gateway-name MyAppGateway --host 127.0.0.1 --name MyProbe --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network application-gateway redirect-config'] = """
type: group
short-summary: Manage redirect configurations.
"""
helps['network application-gateway redirect-config create'] = """
type: command
short-summary: Create a redirect configuration.
examples:
- name: Create a redirect configuration to a http-listener called MyBackendListener.
text: |
az network application-gateway redirect-config create -g MyResourceGroup \\
--gateway-name MyAppGateway -n MyRedirectConfig --type Permanent \\
--include-path true --include-query-string true --target-listener MyBackendListener
"""
helps['network application-gateway redirect-config delete'] = """
type: command
short-summary: Delete a redirect configuration.
examples:
- name: Delete a redirect configuration.
text: az network application-gateway redirect-config delete -g MyResourceGroup \\ --gateway-name MyAppGateway -n MyRedirectConfig
"""
helps['network application-gateway redirect-config list'] = """
type: command
short-summary: List redirect configurations.
examples:
- name: List redirect configurations.
text: az network application-gateway redirect-config list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway redirect-config show'] = """
type: command
short-summary: Get the details of a redirect configuration.
examples:
- name: Get the details of a redirect configuration.
text: az network application-gateway redirect-config show -g MyResourceGroup --gateway-name MyAppGateway -n MyRedirectConfig
"""
helps['network application-gateway redirect-config update'] = """
type: command
short-summary: Update a redirect configuration.
examples:
- name: Update a redirect configuration to a different http-listener.
text: |
az network application-gateway redirect-config update -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyRedirectConfig --type Permanent --target-listener MyNewBackendListener
- name: Update a redirect configuration. (autogenerated)
text: |
az network application-gateway redirect-config update --gateway-name MyAppGateway --include-path true --include-query-string true --name MyRedirectConfig --resource-group MyResourceGroup --target-listener MyNewBackendListener --type Permanent
crafted: true
"""
helps['network application-gateway rewrite-rule'] = """
short-summary: Manage rewrite rules of an application gateway.
type: group
"""
helps['network application-gateway rewrite-rule condition'] = """
short-summary: Manage rewrite rule conditions of an application gateway.
type: group
"""
helps['network application-gateway rewrite-rule condition create'] = """
short-summary: Create a rewrite rule condition.
type: command
parameters:
- name: --variable
populator-commands:
- az network application-gateway rewrite-rule condition list-server-variables
"""
helps['network application-gateway rewrite-rule condition delete'] = """
short-summary: Delete a rewrite rule condition.
type: command
"""
helps['network application-gateway rewrite-rule condition list'] = """
short-summary: List rewrite rule conditions.
type: command
examples:
- name: List rewrite rule conditions. (autogenerated)
text: |
az network application-gateway rewrite-rule condition list --gateway-name MyGateway --resource-group MyResourceGroup --rule-name MyRule --rule-set-name MyRuleSet
crafted: true
"""
helps['network application-gateway rewrite-rule condition show'] = """
short-summary: Get the details of a rewrite rule condition.
type: command
"""
helps['network application-gateway rewrite-rule condition update'] = """
short-summary: Update a rewrite rule condition.
type: command
parameters:
- name: --variable
populator-commands:
- az network application-gateway rewrite-rule condition list-server-variables
"""
helps['network application-gateway rewrite-rule create'] = """
short-summary: Create a rewrite rule.
type: command
parameters:
- name: --request-headers
populator-commands:
- az network application-gateway rewrite-rule list-request-headers
- name: --response-headers
populator-commands:
- az network application-gateway rewrite-rule list-response-headers
"""
helps['network application-gateway rewrite-rule delete'] = """
short-summary: Delete a rewrite rule.
type: command
examples:
- name: Delete a rewrite rule. (autogenerated)
text: |
az network application-gateway rewrite-rule delete --gateway-name MyGateway --name MyRewriteRule --resource-group MyResourceGroup --rule-set-name MyRuleSet
crafted: true
"""
helps['network application-gateway rewrite-rule list'] = """
short-summary: List rewrite rules.
type: command
examples:
- name: List rewrite rules. (autogenerated)
text: |
az network application-gateway rewrite-rule list --gateway-name MyGateway --resource-group MyResourceGroup --rule-set-name MyRuleSet
crafted: true
"""
helps['network application-gateway rewrite-rule set'] = """
short-summary: Manage rewrite rule sets of an application gateway.
type: group
"""
helps['network application-gateway rewrite-rule set create'] = """
short-summary: Create a rewrite rule set.
type: command
examples:
- name: Create a rewrite rule set. (autogenerated)
text: |
az network application-gateway rewrite-rule set create --gateway-name MyGateway --name MyRewriteRuleSet --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway rewrite-rule set delete'] = """
short-summary: Delete a rewrite rule set.
type: command
"""
helps['network application-gateway rewrite-rule set list'] = """
short-summary: List rewrite rule sets.
type: command
examples:
- name: List rewrite rule sets. (autogenerated)
text: |
az network application-gateway rewrite-rule set list --gateway-name MyGateway --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway rewrite-rule set show'] = """
short-summary: Get the details of a rewrite rule set.
type: command
examples:
- name: Get the details of a rewrite rule set. (autogenerated)
text: |
az network application-gateway rewrite-rule set show --gateway-name MyGateway --name MyRewriteRuleSet --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway rewrite-rule set update'] = """
short-summary: Update a rewrite rule set.
type: command
examples:
- name: Update a rewrite rule set. (autogenerated)
text: |
az network application-gateway rewrite-rule set update --gateway-name MyGateway --name MyRewriteRuleSet --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway rewrite-rule show'] = """
short-summary: Get the details of a rewrite rule.
type: command
examples:
- name: Get the details of a rewrite rule. (autogenerated)
text: |
az network application-gateway rewrite-rule show --gateway-name MyGateway --name MyRewriteRule --resource-group MyResourceGroup --rule-set-name MyRuleSet
crafted: true
"""
helps['network application-gateway rewrite-rule update'] = """
short-summary: Update a rewrite rule.
type: command
parameters:
- name: --request-headers
populator-commands:
- az network application-gateway rewrite-rule list-request-headers
- name: --response-headers
populator-commands:
- az network application-gateway rewrite-rule list-response-headers
examples:
- name: Update a rewrite rule. (autogenerated)
text: |
az network application-gateway rewrite-rule update --gateway-name MyGateway --name MyRewriteRule --remove tags.no_80 --resource-group MyResourceGroup --rule-set-name MyRuleSet
crafted: true
"""
helps['network application-gateway root-cert'] = """
type: group
short-summary: Manage trusted root certificates of an application gateway.
"""
helps['network application-gateway root-cert create'] = """
type: command
short-summary: Upload a trusted root certificate.
examples:
- name: Upload a trusted root certificate. (autogenerated)
text: |
az network application-gateway root-cert create --cert-file /path/to/cert/file --gateway-name MyGateway --name MyTrustedRootCertificate --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway root-cert delete'] = """
type: command
short-summary: Delete a trusted root certificate.
examples:
- name: Delete a trusted root certificate.
text: az network application-gateway root-cert delete -g MyResourceGroup --gateway-name MyAppGateway -n MyRootCert
"""
helps['network application-gateway root-cert list'] = """
type: command
short-summary: List trusted root certificates.
examples:
- name: List trusted root certificates.
text: az network application-gateway root-cert list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway root-cert show'] = """
type: command
short-summary: Get the details of a trusted root certificate.
examples:
- name: Get the details of a trusted root certificate.
text: az network application-gateway root-cert show -g MyResourceGroup --gateway-name MyAppGateway -n MyRootCert
"""
helps['network application-gateway root-cert update'] = """
type: command
short-summary: Update a trusted root certificate.
examples:
- name: Update a trusted root certificate. (autogenerated)
text: |
az network application-gateway root-cert update --cert-file /path/to/cert/file --gateway-name MyGateway --name MyTrustedRootCertificate --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway rule'] = """
type: group
short-summary: Evaluate probe information and define routing rules.
long-summary: >
For more information, visit, https://docs.microsoft.com/azure/application-gateway/application-gateway-customize-waf-rules-cli
"""
helps['network application-gateway rule create'] = """
type: command
short-summary: Create a rule.
long-summary: Rules are executed in the order in which they are created.
examples:
- name: Create a basic rule.
text: |
az network application-gateway rule create -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyRule --http-listener MyBackendListener --rule-type Basic --address-pool MyAddressPool --http-settings MyHttpSettings
"""
helps['network application-gateway rule delete'] = """
type: command
short-summary: Delete a rule.
examples:
- name: Delete a rule.
text: az network application-gateway rule delete -g MyResourceGroup --gateway-name MyAppGateway -n MyRule
"""
helps['network application-gateway rule list'] = """
type: command
short-summary: List rules.
examples:
- name: List rules.
text: az network application-gateway rule list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway rule show'] = """
type: command
short-summary: Get the details of a rule.
examples:
- name: Get the details of a rule.
text: az network application-gateway rule show -g MyResourceGroup --gateway-name MyAppGateway -n MyRule
"""
helps['network application-gateway rule update'] = """
type: command
short-summary: Update a rule.
examples:
- name: Update a rule use a new HTTP listener.
text: |
az network application-gateway rule update -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyRule --http-listener MyNewBackendListener
- name: Update a rule. (autogenerated)
text: |
az network application-gateway rule update --address-pool MyAddressPool --gateway-name MyAppGateway --name MyRule --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway show'] = """
type: command
short-summary: Get the details of an application gateway.
examples:
- name: Get the details of an application gateway.
text: az network application-gateway show -g MyResourceGroup -n MyAppGateway
"""
helps['network application-gateway show-backend-health'] = """
type: command
short-summary: Get information on the backend health of an application gateway.
examples:
- name: Show backend health of an application gateway.
text: az network application-gateway show-backend-health -g MyResourceGroup -n MyAppGateway
"""
helps['network application-gateway ssl-cert'] = """
type: group
short-summary: Manage SSL certificates of an application gateway.
long-summary: For more information visit https://docs.microsoft.com/azure/application-gateway/application-gateway-ssl-cli
"""
helps['network application-gateway ssl-cert create'] = """
type: command
short-summary: Upload an SSL certificate.
examples:
- name: Upload an SSL certificate via --cert-file and --cert-password.
text: |
az network application-gateway ssl-cert create -g MyResourceGroup --gateway-name MyAppGateway \\
-n MySSLCert --cert-file \\path\\to\\cert\\file --cert-password Abc123
- name: |-
Upload an SSL certificate via --key-vault-secret-id of a KeyVault Secret
with Base64 encoded value of an unencrypted pfx
text: |-
openssl req -x509 -nodes -days 365 -newkey rsa:2048 \\
-out azure-cli-app-tls.crt \\
-keyout azure-cli-app-tls.key \\
-subj "/CN=azure-cli-app"
openssl pkcs12 -export \\
-in azure-cli-tls.crt \\
-inkey sample-app-tls.key \\
-passout pass: -out azure-cli-cert.pfx
SecretValue=$(cat azure-cli-cert.pfx | base64)
az keyvault secret set --vault-name MyKeyVault --name MySecret --value ${SecretValue}
az network application-gateway ssl-cert create \\
--resource-group MyResourceGroup \\
--gateway-name MyAppGateway \\
-n MySSLCert \\
--key-vault-secret-id MySecretSecretID
- name: |-
Upload an SSL certificate via --key-vault-secret-id of a KeyVault Certificate
text: |-
az keyvault certificate create \\
--vault-name MyKeyVault \\
--name MyCertificate \\
--policy "$(az keyvault certificate get-default-policy)" \\
az network application-gateway ssl-cert create \\
--resource-group MyResourceGroup \\
--gateway-name MyAppGateway \\
-n MySSLCert \\
--key-vault-secret-id MyCertificateSecretID
"""
helps['network application-gateway ssl-cert delete'] = """
type: command
short-summary: Delete an SSL certificate.
examples:
- name: Delete an SSL certificate.
text: az network application-gateway ssl-cert delete -g MyResourceGroup --gateway-name MyAppGateway -n MySslCert
"""
helps['network application-gateway ssl-cert list'] = """
type: command
short-summary: List SSL certificates.
examples:
- name: List SSL certificates.
text: az network application-gateway ssl-cert list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway ssl-cert show'] = """
type: command
short-summary: Get the details of an SSL certificate.
examples:
- name: Get the details of an SSL certificate.
text: az network application-gateway ssl-cert show -g MyResourceGroup --gateway-name MyAppGateway -n MySslCert
- name: Display the expiry date of SSL certificate. The certificate is returned in PKCS7 format from which the expiration date needs to be retrieved.
text: |
publiccert=`az network application-gateway ssl-cert show -g MyResourceGroup --gateway-name MyAppGateway --name mywebsite.com --query publicCertData -o tsv`
echo "-----BEGIN CERTIFICATE-----" >> public.cert; echo "${publiccert}" >> public.cert; echo "-----END CERTIFICATE-----" >> public.cert
cat public.cert | fold -w 64 | openssl pkcs7 -print_certs | openssl x509 -noout -enddate
"""
helps['network application-gateway ssl-cert update'] = """
type: command
short-summary: Update an SSL certificate.
examples:
- name: Change a gateway SSL certificate and password.
text: |
az network application-gateway ssl-cert update -g MyResourceGroup --gateway-name MyAppGateway -n MySslCert \\
--cert-file \\path\\to\\new\\cert\\file --cert-password Abc123Abc123
"""
helps['network application-gateway ssl-policy'] = """
type: group
short-summary: Manage the SSL policy of an application gateway.
"""
helps['network application-gateway ssl-policy list-options'] = """
type: command
short-summary: Lists available SSL options for configuring SSL policy.
examples:
- name: List available SSL options for configuring SSL policy.
text: az network application-gateway ssl-policy list-options
"""
helps['network application-gateway ssl-policy predefined'] = """
type: group
short-summary: Get information on predefined SSL policies.
"""
helps['network application-gateway ssl-policy predefined list'] = """
type: command
short-summary: Lists all SSL predefined policies for configuring SSL policy.
examples:
- name: Lists all SSL predefined policies for configuring SSL policy.
text: az network application-gateway ssl-policy predefined list
"""
helps['network application-gateway ssl-policy predefined show'] = """
type: command
short-summary: Gets SSL predefined policy with the specified policy name.
examples:
- name: Gets SSL predefined policy with the specified policy name.
text: az network application-gateway ssl-policy predefined show -n AppGwSslPolicy20170401
"""
helps['network application-gateway ssl-policy set'] = """
type: command
short-summary: Update or clear SSL policy settings.
long-summary: To view the predefined policies, use `az network application-gateway ssl-policy predefined list`.
parameters:
- name: --cipher-suites
populator-commands:
- az network application-gateway ssl-policy list-options
- name: --disabled-ssl-protocols
populator-commands:
- az network application-gateway ssl-policy list-options
- name: --min-protocol-version
populator-commands:
- az network application-gateway ssl-policy list-options
examples:
- name: Set a predefined SSL policy.
text: |
az network application-gateway ssl-policy set -g MyResourceGroup --gateway-name MyAppGateway \\
-n AppGwSslPolicy20170401S --policy-type Predefined
- name: Set a custom SSL policy with TLSv1_2 and the cipher suites below.
text: |
az network application-gateway ssl-policy set -g MyResourceGroup --gateway-name MyAppGateway \\
--policy-type Custom --min-protocol-version TLSv1_2 \\
--cipher-suites TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 TLS_RSA_WITH_AES_128_GCM_SHA256
"""
helps['network application-gateway ssl-policy show'] = """
type: command
short-summary: Get the details of gateway's SSL policy settings.
examples:
- name: Get the details of a gateway's SSL policy settings.
text: az network application-gateway ssl-policy show -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway start'] = """
type: command
short-summary: Start an application gateway.
examples:
- name: Start an application gateway.
text: az network application-gateway start -g MyResourceGroup -n MyAppGateway
"""
helps['network application-gateway stop'] = """
type: command
short-summary: Stop an application gateway.
examples:
- name: Stop an application gateway.
text: az network application-gateway stop -g MyResourceGroup -n MyAppGateway
"""
helps['network application-gateway update'] = """
type: command
short-summary: Update an application gateway.
examples:
- name: Update an application gateway. (autogenerated)
text: |
az network application-gateway update --name MyApplicationGateway --resource-group MyResourceGroup --set useRemoteGateways=true
crafted: true
"""
helps['network application-gateway url-path-map'] = """
type: group
short-summary: Manage URL path maps of an application gateway.
"""
helps['network application-gateway url-path-map create'] = """
type: command
short-summary: Create a URL path map.
long-summary: >
The map must be created with at least one rule. This command requires the creation of the
first rule at the time the map is created. To learn more
visit https://docs.microsoft.com/azure/application-gateway/application-gateway-create-url-route-cli
examples:
- name: Create a URL path map with a rule.
text: |
az network application-gateway url-path-map create -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyUrlPathMap --rule-name MyUrlPathMapRule1 --paths /mypath1/* --address-pool MyAddressPool \\
--default-address-pool MyAddressPool --http-settings MyHttpSettings --default-http-settings MyHttpSettings
"""
helps['network application-gateway url-path-map delete'] = """
type: command
short-summary: Delete a URL path map.
examples:
- name: Delete a URL path map.
text: az network application-gateway url-path-map delete -g MyResourceGroup --gateway-name MyAppGateway -n MyUrlPathMap
"""
helps['network application-gateway url-path-map list'] = """
type: command
short-summary: List URL path maps.
examples:
- name: List URL path maps.
text: az network application-gateway url-path-map list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway url-path-map rule'] = """
type: group
short-summary: Manage the rules of a URL path map.
"""
helps['network application-gateway url-path-map rule create'] = """
type: command
short-summary: Create a rule for a URL path map.
examples:
- name: Create a rule for a URL path map.
text: |
az network application-gateway url-path-map rule create -g MyResourceGroup \\
--gateway-name MyAppGateway -n MyUrlPathMapRule2 --path-map-name MyUrlPathMap \\
--paths /mypath2/* --address-pool MyAddressPool --http-settings MyHttpSettings
"""
helps['network application-gateway url-path-map rule delete'] = """
type: command
short-summary: Delete a rule of a URL path map.
examples:
- name: Delete a rule of a URL path map.
text: |
az network application-gateway url-path-map rule delete -g MyResourceGroup --gateway-name MyAppGateway \\
--path-map-name MyUrlPathMap -n MyUrlPathMapRule2
"""
helps['network application-gateway url-path-map show'] = """
type: command
short-summary: Get the details of a URL path map.
examples:
- name: Get the details of a URL path map.
text: az network application-gateway url-path-map show -g MyResourceGroup --gateway-name MyAppGateway -n MyUrlPathMap
"""
helps['network application-gateway url-path-map update'] = """
type: command
short-summary: Update a URL path map.
examples:
- name: Update a URL path map to use new default HTTP settings.
text: |
az network application-gateway url-path-map update -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyUrlPathMap --default-http-settings MyNewHttpSettings
- name: Update a URL path map. (autogenerated)
text: |
az network application-gateway url-path-map update --default-address-pool MyAddressPool --default-http-settings MyNewHttpSettings --gateway-name MyAppGateway --name MyUrlPathMap --remove tags.no_80 --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway waf-config'] = """
type: group
short-summary: Configure the settings of a web application firewall.
long-summary: >
These commands are only applicable to application gateways with an SKU type of WAF. To learn
more, visit https://docs.microsoft.com/azure/application-gateway/application-gateway-web-application-firewall-cli
"""
helps['network application-gateway waf-config list-rule-sets'] = """
type: command
short-summary: Get information on available WAF rule sets, rule groups, and rule IDs.
parameters:
- name: --group
short-summary: >
List rules for the specified rule group. Use `*` to list rules for all groups.
Omit to suppress listing individual rules.
- name: --type
short-summary: Rule set type to list. Omit to list all types.
- name: --version
short-summary: Rule set version to list. Omit to list all versions.
examples:
- name: List available rule groups in OWASP type rule sets.
text: az network application-gateway waf-config list-rule-sets --type OWASP
- name: List available rules in the OWASP 3.0 rule set.
text: az network application-gateway waf-config list-rule-sets --group '*' --type OWASP --version 3.0
- name: List available rules in the `crs_35_bad_robots` rule group.
text: az network application-gateway waf-config list-rule-sets --group crs_35_bad_robots
- name: List available rules in table format.
text: az network application-gateway waf-config list-rule-sets -o table
"""
helps['network application-gateway waf-config set'] = """
type: command
short-summary: Update the firewall configuration of a web application.
long-summary: >
This command is only applicable to application gateways with an SKU type of WAF. To learn
more, visit https://docs.microsoft.com/azure/application-gateway/application-gateway-web-application-firewall-cli
parameters:
- name: --rule-set-type
short-summary: Rule set type.
populator-commands:
- az network application-gateway waf-config list-rule-sets
- name: --rule-set-version
short-summary: Rule set version.
populator-commands:
- az network application-gateway waf-config list-rule-sets
- name: --disabled-rule-groups
short-summary: Space-separated list of rule groups to disable. To disable individual rules, use `--disabled-rules`.
populator-commands:
- az network application-gateway waf-config list-rule-sets
- name: --disabled-rules
short-summary: Space-separated list of rule IDs to disable.
populator-commands:
- az network application-gateway waf-config list-rule-sets
- name: --exclusion
short-summary: Add an exclusion expression to the WAF check.
long-summary: |
Usage: --exclusion VARIABLE OPERATOR VALUE
Multiple exclusions can be specified by using more than one `--exclusion` argument.
examples:
- name: Configure WAF on an application gateway in detection mode with default values
text: |
az network application-gateway waf-config set -g MyResourceGroup --gateway-name MyAppGateway \\
--enabled true --firewall-mode Detection --rule-set-version 3.0
- name: Disable rules for validation of request body parsing and SQL injection.
text: |
az network application-gateway waf-config set -g MyResourceGroup --gateway-name MyAppGateway \\
--enabled true --rule-set-type OWASP --rule-set-version 3.0 \\
--disabled-rule-groups REQUEST-942-APPLICATION-ATTACK-SQLI \\
--disabled-rules 920130 920140
- name: Configure WAF on an application gateway with exclusions.
text: |
az network application-gateway waf-config set -g MyResourceGroup --gateway-name MyAppGateway \\
--enabled true --firewall-mode Detection --rule-set-version 3.0 \\
--exclusion "RequestHeaderNames StartsWith x-header" \\
--exclusion "RequestArgNames Equals IgnoreThis"
"""
helps['network application-gateway waf-config show'] = """
type: command
short-summary: Get the firewall configuration of a web application.
examples:
- name: Get the firewall configuration of a web application.
text: az network application-gateway waf-config show -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway waf-policy'] = """
type: group
short-summary: Manage application gateway web application firewall (WAF) policies.
"""
helps['network application-gateway waf-policy create'] = """
type: command
short-summary: Create an application gateway WAF policy.
examples:
- name: Create an application gateway WAF policy. (autogenerated)
text: |
az network application-gateway waf-policy create --name MyApplicationGatewayWAFPolicy --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway waf-policy delete'] = """
type: command
short-summary: Delete an application gateway WAF policy.
examples:
- name: Delete an application gateway WAF policy. (autogenerated)
text: |
az network application-gateway waf-policy delete --name MyApplicationGatewayWAFPolicy --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway waf-policy list'] = """
type: command
short-summary: List application gateway WAF policies.
examples:
- name: List application gateway WAF policies. (autogenerated)
text: |
az network application-gateway waf-policy list --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway waf-policy policy-setting'] = """
type: group
short-summary: Defines contents of a web application firewall global configuration.
"""
helps['network application-gateway waf-policy policy-setting update'] = """
type: command
short-summary: Update properties of a web application firewall global configuration.
examples:
- name: Update properties of a web application firewall global configuration. (autogenerated)
text: |
az network application-gateway waf-policy policy-setting update --mode Prevention --policy-name MyPolicy --resource-group MyResourceGroup --state Disabled
crafted: true
"""
helps['network application-gateway waf-policy policy-setting list'] = """
type: command
short-summary: List properties of a web application firewall global configuration.
examples:
- name: List properties of a web application firewall global configuration. (autogenerated)
text: |
az network application-gateway waf-policy policy-setting list --policy-name MyPolicy --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway waf-policy custom-rule'] = """
type: group
short-summary: Manage application gateway web application firewall (WAF) policy custom rules.
"""
helps['network application-gateway waf-policy custom-rule create'] = """
type: command
short-summary: Create an application gateway WAF policy custom rule.
examples:
- name: Create an application gateway WAF policy custom rule. (autogenerated)
text: |
az network application-gateway waf-policy custom-rule create --action Allow --name MyWafPolicyRule --policy-name MyPolicy --priority 500 --resource-group MyResourceGroup --rule-type MatchRule
crafted: true
"""
helps['network application-gateway waf-policy custom-rule delete'] = """
type: command
short-summary: Delete an application gateway WAF policy custom rule.
examples:
- name: Delete an application gateway WAF policy custom rule. (autogenerated)
text: |
az network application-gateway waf-policy custom-rule delete --name MyWafPolicyRule --policy-name MyPolicy --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network application-gateway waf-policy custom-rule list'] = """
type: command
short-summary: List application gateway WAF policy custom rules.
examples:
- name: List application gateway WAF policy custom rules. (autogenerated)
text: |
az network application-gateway waf-policy custom-rule list --policy-name MyPolicy --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway waf-policy custom-rule match-condition'] = """
type: group
short-summary: Manage application gateway web application firewall (WAF) policies.
"""
helps['network application-gateway waf-policy custom-rule match-condition add'] = """
type: command
short-summary: A match condition to an application gateway WAF policy custom rule.
"""
helps['network application-gateway waf-policy custom-rule match-condition list'] = """
type: command
short-summary: List application gateway WAF policy custom rule match conditions.
examples:
- name: List application gateway WAF policy custom rule match conditions. (autogenerated)
text: |
az network application-gateway waf-policy custom-rule match-condition list --name MyWAFPolicyRule --policy-name MyPolicy --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network application-gateway waf-policy custom-rule match-condition remove'] = """
type: command
short-summary: Remove a match condition from an application gateway WAF policy custom rule.
"""
helps['network application-gateway waf-policy custom-rule show'] = """
type: command
short-summary: Get the details of an application gateway WAF policy custom rule.
examples:
- name: Get the details of an application gateway WAF policy custom rule. (autogenerated)
text: |
az network application-gateway waf-policy custom-rule show --name MyWAFPolicyRule --policy-name MyPolicy --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway waf-policy custom-rule update'] = """
type: command
short-summary: Update an application gateway WAF policy custom rule.
examples:
- name: Update an application gateway WAF policy custom rule. (autogenerated)
text: |
az network application-gateway waf-policy custom-rule update --name MyWAFPolicyRule --policy-name MyPolicy --resource-group MyResourceGroup --set useRemoteGateways=true
crafted: true
- name: Update an application gateway WAF policy custom rule. (autogenerated)
text: |
az network application-gateway waf-policy custom-rule update --action Allow --name MyWAFPolicyRule --policy-name MyPolicy --priority 500 --resource-group MyResourceGroup --rule-type MatchRule
crafted: true
"""
helps['network application-gateway waf-policy managed-rule'] = """
type: group
short-summary: >
Manage managed rules of a waf-policy.
Visit: https://docs.microsoft.com/en-us/azure/web-application-firewall/afds/afds-overview
"""
helps['network application-gateway waf-policy managed-rule rule-set'] = """
type: group
short-summary: Manage managed rule set of managed rules of a WAF policy.
"""
helps['network application-gateway waf-policy managed-rule rule-set add'] = """
type: command
short-summary: >
Add managed rule set to the WAF policy managed rules. For rule set and rules, please visit:
https://docs.microsoft.com/en-us/azure/web-application-firewall/ag/application-gateway-crs-rulegroups-rules
examples:
- name: Disable an attack protection rule
text: |
az network application-gateway waf-policy managed-rule rule-set add --policy-name MyPolicy -g MyResourceGroup --type OWASP --version 3.1 --group-name REQUEST-921-PROTOCOL-ATTACK --rules 921110
- name: Add managed rule set to the WAF policy managed rules (autogenerated)
text: |
az network application-gateway waf-policy managed-rule rule-set add --policy-name MyPolicy --resource-group MyResourceGroup --type Microsoft_BotManagerRuleSet --version 0.1
crafted: true
"""
helps['network application-gateway waf-policy managed-rule rule-set update'] = """
type: command
short-summary: >
Manage rules of a WAF policy.
If --group-name and --rules are provided, override existing rules. If --group-name is provided, clear all rules under a certain rule group. If neither of them are provided, update rule set and clear all rules under itself.
For rule set and rules, please visit: https://docs.microsoft.com/en-us/azure/web-application-firewall/ag/application-gateway-crs-rulegroups-rules
examples:
- name: Override rules under rule group EQUEST-921-PROTOCOL-ATTACK
text: |
az network application-gateway waf-policy managed-rule rule-set update --policy-name MyPolicy -g MyResourceGroup --type OWASP --version 3.1 --group-name REQUEST-921-PROTOCOL-ATTACK --rules 921130 921160
- name: Update the OWASP protocol version from 3.1 to 3.0 which will clear the old rules
text: |
az network application-gateway waf-policy managed-rule rule-set update --policy-name MyPolicy -g MyResourceGroup --type OWASP --version 3.0
"""
helps['network application-gateway waf-policy managed-rule rule-set remove'] = """
type: command
short-summary: >
Remove a managed rule set by rule set group name if rule_group_name is specified. Otherwise, remove all rule set.
examples:
- name: Remove a managed rule set by rule set group name if rule_group_name is specified. Otherwise, remove all rule set.
text: |
az network application-gateway waf-policy managed-rule rule-set remove --policy-name MyPolicy --resource-group MyResourceGroup --type OWASP --version 3.1
"""
helps['network application-gateway waf-policy managed-rule rule-set list'] = """
type: command
short-summary: List all managed rule set.
examples:
- name: List all managed rule set. (autogenerated)
text: |
az network application-gateway waf-policy managed-rule rule-set list --policy-name MyPolicy --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway waf-policy managed-rule exclusion'] = """
type: group
short-summary: Manage OWASP CRS exclusions that are applied on a WAF policy managed rules.
"""
helps['network application-gateway waf-policy managed-rule exclusion add'] = """
type: command
short-summary: Add an OWASP CRS exclusion rule to the WAF policy managed rules.
"""
helps['network application-gateway waf-policy managed-rule exclusion remove'] = """
type: command
short-summary: List all OWASP CRS exclusion rules that are applied on a Waf policy managed rules.
"""
helps['network application-gateway waf-policy managed-rule exclusion list'] = """
type: command
short-summary: List all OWASP CRS exclusion rules that are applied on a Waf policy managed rules.
"""
helps['network application-gateway waf-policy show'] = """
type: command
short-summary: Get the details of an application gateway WAF policy.
examples:
- name: Get the details of an application gateway WAF policy. (autogenerated)
text: |
az network application-gateway waf-policy show --name MyApplicationGatewayWAFPolicy --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway waf-policy update'] = """
type: command
short-summary: Update an application gateway WAF policy.
examples:
- name: Update an application gateway WAF policy. (autogenerated)
text: |
az network application-gateway waf-policy update --add communities='12076:5010' --name MyApplicationGatewayWAFPolicy --resource-group MyResourceGroup
crafted: true
- name: Update an application gateway WAF policy. (autogenerated)
text: |
az network application-gateway waf-policy update --name MyApplicationGatewayWAFPolicy --remove tags.no_80 --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway waf-policy wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the application gateway WAF policy is met.
"""
helps['network application-gateway wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the application gateway is met.
examples:
- name: Place the CLI in a waiting state until the application gateway is created.
text: az network application-gateway wait -g MyResourceGroup -n MyAppGateway --created
"""
helps['network application-gateway client-cert'] = """
type: group
short-summary: Manage trusted client certificate of application gateway.
"""
helps['network application-gateway client-cert add'] = """
type: command
short-summary: Add trusted client certificate of the application gateway.
examples:
- name: Add trusted client certificate for an existing application gateway.
text: az network application-gateway client-cert add --gateway-name MyAppGateway -g MyResourceGroup --name MyCert --data Cert.cer
"""
helps['network application-gateway client-cert remove'] = """
type: command
short-summary: Remove an existing trusted client certificate of the application gateway.
examples:
- name: Remove a trusted client certificate for an existing application gateway.
text: az network application-gateway client-cert remove --gateway-name MyAppGateway -g MyResourceGroup --name MyCert
"""
helps['network application-gateway client-cert list'] = """
type: command
short-summary: List the existing trusted client certificate of the application gateway.
examples:
- name: list all the trusted client certificate for an existing application gateway.
text: az network application-gateway client-cert list --gateway-name MyAppGateway -g MyResourceGroup
"""
helps['network application-gateway ssl-profile'] = """
type: group
short-summary: Manage ssl profiles of application gateway.
"""
helps['network application-gateway ssl-profile add'] = """
type: command
short-summary: Add ssl profiles of the application gateway.
examples:
- name: Add ssl profile for an existing application gateway.
text: az network application-gateway ssl-profile add --gateway-name MyAppGateway -g MyResourceGroup --name MySslProfile
"""
helps['network application-gateway ssl-profile remove'] = """
type: command
short-summary: Remove an existing ssl profiles of the application gateway.
examples:
- name: Remove ssl profile for an existing application gateway.
text: az network application-gateway ssl-profile remove --gateway-name MyAppGateway -g MyResourceGroup --name MySslProfile
"""
helps['network application-gateway ssl-profile list'] = """
type: command
short-summary: List the existing ssl profiles of the application gateway.
examples:
- name: List all the ssl profile for an existing application gateway.
text: az network application-gateway ssl-profile list --gateway-name MyAppGateway -g MyResourceGroup
"""
helps['network asg'] = """
type: group
short-summary: Manage application security groups (ASGs).
long-summary: >
You can configure network security as a natural extension of an application's structure, ASG allows
you to group virtual machines and define network security policies based on those groups. You can specify an
application security group as the source and destination in a NSG security rule. For more information
visit https://docs.microsoft.com/azure/virtual-network/create-network-security-group-preview
"""
helps['network asg create'] = """
type: command
short-summary: Create an application security group.
parameters:
- name: --name -n
short-summary: Name of the new application security group resource.
examples:
- name: Create an application security group.
text: az network asg create -g MyResourceGroup -n MyAsg --tags MyWebApp, CostCenter=Marketing
"""
helps['network asg delete'] = """
type: command
short-summary: Delete an application security group.
examples:
- name: Delete an application security group.
text: az network asg delete -g MyResourceGroup -n MyAsg
"""
helps['network asg list'] = """
type: command
short-summary: List all application security groups in a subscription.
examples:
- name: List all application security groups in a subscription.
text: az network asg list
"""
helps['network asg show'] = """
type: command
short-summary: Get details of an application security group.
examples:
- name: Get details of an application security group.
text: az network asg show -g MyResourceGroup -n MyAsg
"""
helps['network asg update'] = """
type: command
short-summary: Update an application security group.
long-summary: >
This command can only be used to update the tags for an application security group.
Name and resource group are immutable and cannot be updated.
examples:
- name: Update an application security group with a modified tag value.
text: az network asg update -g MyResourceGroup -n MyAsg --set tags.CostCenter=MyBusinessGroup
"""
helps['network ddos-protection'] = """
type: group
short-summary: Manage DDoS Protection Plans.
"""
helps['network ddos-protection create'] = """
type: command
short-summary: Create a DDoS protection plan.
parameters:
- name: --vnets
long-summary: >
This parameter can only be used if all the VNets are within the same subscription as
the DDoS protection plan. If this is not the case, set the protection plan on the VNet
directly using the `az network vnet update` command.
examples:
- name: Create a DDoS protection plan.
text: az network ddos-protection create -g MyResourceGroup -n MyDdosPlan
- name: Create a DDoS protection plan. (autogenerated)
text: |
az network ddos-protection create --location westus2 --name MyDdosPlan --resource-group MyResourceGroup
crafted: true
"""
helps['network ddos-protection delete'] = """
type: command
short-summary: Delete a DDoS protection plan.
examples:
- name: Delete a DDoS protection plan.
text: az network ddos-protection delete -g MyResourceGroup -n MyDdosPlan
"""
helps['network ddos-protection list'] = """
type: command
short-summary: List DDoS protection plans.
examples:
- name: List DDoS protection plans
text: az network ddos-protection list
"""
helps['network ddos-protection show'] = """
type: command
short-summary: Show details of a DDoS protection plan.
examples:
- name: Show details of a DDoS protection plan.
text: az network ddos-protection show -g MyResourceGroup -n MyDdosPlan
"""
helps['network ddos-protection update'] = """
type: command
short-summary: Update a DDoS protection plan.
parameters:
- name: --vnets
long-summary: >
This parameter can only be used if all the VNets are within the same subscription as
the DDoS protection plan. If this is not the case, set the protection plan on the VNet
directly using the `az network vnet update` command.
examples:
- name: Add a Vnet to a DDoS protection plan in the same subscription.
text: az network ddos-protection update -g MyResourceGroup -n MyDdosPlan --vnets MyVnet
- name: Update a DDoS protection plan. (autogenerated)
text: |
az network ddos-protection update --name MyDdosPlan --remove tags.no_80 --resource-group MyResourceGroup
crafted: true
"""
helps['network dns'] = """
type: group
short-summary: Manage DNS domains in Azure.
"""
helps['network dns record-set'] = """
type: group
short-summary: Manage DNS records and record sets.
"""
helps['network dns record-set a'] = """
type: group
short-summary: Manage DNS A records.
"""
helps['network dns record-set a add-record'] = """
type: command
short-summary: Add an A record.
examples:
- name: Add an A record.
text: |
az network dns record-set a add-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -a MyIpv4Address
"""
helps['network dns record-set a create'] = """
type: command
short-summary: Create an empty A record set.
examples:
- name: Create an empty A record set.
text: az network dns record-set a create -g MyResourceGroup -z www.mysite.com -n MyRecordSet
- name: Create an empty A record set. (autogenerated)
text: |
az network dns record-set a create --name MyRecordSet --resource-group MyResourceGroup --ttl 30 --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set a delete'] = """
type: command
short-summary: Delete an A record set and all associated records.
examples:
- name: Delete an A record set and all associated records.
text: az network dns record-set a delete -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set a list'] = """
type: command
short-summary: List all A record sets in a zone.
examples:
- name: List all A record sets in a zone.
text: az network dns record-set a list -g MyResourceGroup -z www.mysite.com
"""
helps['network dns record-set a remove-record'] = """
type: command
short-summary: Remove an A record from its record set.
long-summary: >
By default, if the last record in a set is removed, the record set is deleted.
To retain the empty record set, include --keep-empty-record-set.
examples:
- name: Remove an A record from its record set.
text: |
az network dns record-set a remove-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -a MyIpv4Address
"""
helps['network dns record-set a show'] = """
type: command
short-summary: Get the details of an A record set.
examples:
- name: Get the details of an A record set.
text: az network dns record-set a show -g MyResourceGroup -n MyRecordSet -z www.mysite.com
"""
helps['network dns record-set a update'] = """
type: command
short-summary: Update an A record set.
examples:
- name: Update an A record set.
text: |
az network dns record-set a update -g MyResourceGroup -n MyRecordSet \\
-z www.mysite.com --metadata owner=WebTeam
- name: Update an A record set. (autogenerated)
text: |
az network dns record-set a update --name MyRecordSet --resource-group MyResourceGroup --set tags.CostCenter=MyBusinessGroup --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set aaaa'] = """
type: group
short-summary: Manage DNS AAAA records.
"""
helps['network dns record-set aaaa add-record'] = """
type: command
short-summary: Add an AAAA record.
examples:
- name: Add an AAAA record.
text: |
az network dns record-set aaaa add-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -a MyIpv6Address
"""
helps['network dns record-set aaaa create'] = """
type: command
short-summary: Create an empty AAAA record set.
examples:
- name: Create an empty AAAA record set.
text: az network dns record-set aaaa create -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set aaaa delete'] = """
type: command
short-summary: Delete an AAAA record set and all associated records.
examples:
- name: Delete an AAAA record set and all associated records.
text: az network dns record-set aaaa delete -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set aaaa list'] = """
type: command
short-summary: List all AAAA record sets in a zone.
examples:
- name: List all AAAA record sets in a zone.
text: az network dns record-set aaaa list -g MyResourceGroup -z www.mysite.com
- name: List all AAAA record sets in a zone. (autogenerated)
text: |
az network dns record-set aaaa list --resource-group MyResourceGroup --subscription MySubscription --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set aaaa remove-record'] = """
type: command
short-summary: Remove AAAA record from its record set.
long-summary: >
By default, if the last record in a set is removed, the record set is deleted.
To retain the empty record set, include --keep-empty-record-set.
examples:
- name: Remove an AAAA record from its record set.
text: |
az network dns record-set aaaa remove-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -a MyIpv6Address
"""
helps['network dns record-set aaaa show'] = """
type: command
short-summary: Get the details of an AAAA record set.
examples:
- name: Get the details of an AAAA record set.
text: az network dns record-set aaaa show -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set aaaa update'] = """
type: command
short-summary: Update an AAAA record set.
examples:
- name: Update an AAAA record set.
text: |
az network dns record-set aaaa update -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet --metadata owner=WebTeam
"""
helps['network dns record-set caa'] = """
type: group
short-summary: Manage DNS CAA records.
"""
helps['network dns record-set caa add-record'] = """
type: command
short-summary: Add a CAA record.
examples:
- name: Add a CAA record.
text: |
az network dns record-set caa add-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet --flags 0 --tag "issue" --value "ca.contoso.com"
"""
helps['network dns record-set caa create'] = """
type: command
short-summary: Create an empty CAA record set.
examples:
- name: Create an empty CAA record set.
text: az network dns record-set caa create -g MyResourceGroup -z www.mysite.com -n MyRecordSet
- name: Create an empty CAA record set. (autogenerated)
text: |
az network dns record-set caa create --name MyRecordSet --resource-group MyResourceGroup --subscription MySubscription --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set caa delete'] = """
type: command
short-summary: Delete a CAA record set and all associated records.
examples:
- name: Delete a CAA record set and all associated records.
text: az network dns record-set caa delete -g MyResourceGroup -z www.mysite.com -n MyRecordSet
- name: Delete a CAA record set and all associated records. (autogenerated)
text: |
az network dns record-set caa delete --name MyRecordSet --resource-group MyResourceGroup --subscription MySubscription --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set caa list'] = """
type: command
short-summary: List all CAA record sets in a zone.
examples:
- name: List all CAA record sets in a zone.
text: az network dns record-set caa list -g MyResourceGroup -z www.mysite.com
"""
helps['network dns record-set caa remove-record'] = """
type: command
short-summary: Remove a CAA record from its record set.
long-summary: >
By default, if the last record in a set is removed, the record set is deleted.
To retain the empty record set, include --keep-empty-record-set.
examples:
- name: Remove a CAA record from its record set.
text: |
az network dns record-set caa remove-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet --flags 0 --tag "issue" --value "ca.contoso.com"
"""
helps['network dns record-set caa show'] = """
type: command
short-summary: Get the details of a CAA record set.
examples:
- name: Get the details of a CAA record set.
text: az network dns record-set caa show -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set caa update'] = """
type: command
short-summary: Update a CAA record set.
examples:
- name: Update a CAA record set.
text: |
az network dns record-set caa update -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet --metadata owner=WebTeam
"""
helps['network dns record-set cname'] = """
type: group
short-summary: Manage DNS CNAME records.
"""
helps['network dns record-set cname create'] = """
type: command
short-summary: Create an empty CNAME record set.
examples:
- name: Create an empty CNAME record set.
text: az network dns record-set cname create -g MyResourceGroup -z www.mysite.com -n MyRecordSet
- name: Create an empty CNAME record set. (autogenerated)
text: |
az network dns record-set cname create --name MyRecordSet --resource-group MyResourceGroup --ttl 30 --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set cname delete'] = """
type: command
short-summary: Delete a CNAME record set and its associated record.
examples:
- name: Delete a CNAME record set and its associated record.
text: az network dns record-set cname delete -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set cname list'] = """
type: command
short-summary: List the CNAME record set in a zone.
examples:
- name: List the CNAME record set in a zone.
text: az network dns record-set cname list -g MyResourceGroup -z www.mysite.com
"""
helps['network dns record-set cname remove-record'] = """
type: command
short-summary: Remove a CNAME record from its record set.
long-summary: >
By default, if the last record in a set is removed, the record set is deleted.
To retain the empty record set, include --keep-empty-record-set.
examples:
- name: Remove a CNAME record from its record set.
text: |
az network dns record-set cname remove-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -c www.contoso.com
"""
helps['network dns record-set cname set-record'] = """
type: command
short-summary: Set the value of a CNAME record.
examples:
- name: Set the value of a CNAME record.
text: |
az network dns record-set cname set-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -c www.contoso.com
"""
helps['network dns record-set cname show'] = """
type: command
short-summary: Get the details of a CNAME record set.
examples:
- name: Get the details of a CNAME record set.
text: az network dns record-set cname show -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set list'] = """
type: command
short-summary: List all record sets within a DNS zone.
examples:
- name: List all "@" record sets within this zone.
text: az network dns record-set list -g MyResourceGroup -z www.mysite.com --query "[?name=='@']"
"""
helps['network dns record-set mx'] = """
type: group
short-summary: Manage DNS MX records.
"""
helps['network dns record-set mx add-record'] = """
type: command
short-summary: Add an MX record.
examples:
- name: Add an MX record.
text: |
az network dns record-set mx add-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -e mail.mysite.com -p 10
"""
helps['network dns record-set mx create'] = """
type: command
short-summary: Create an empty MX record set.
examples:
- name: Create an empty MX record set.
text: az network dns record-set mx create -g MyResourceGroup -z www.mysite.com -n MyRecordSet
- name: Create an empty MX record set. (autogenerated)
text: |
az network dns record-set mx create --name MyRecordSet --resource-group MyResourceGroup --ttl 30 --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set mx delete'] = """
type: command
short-summary: Delete an MX record set and all associated records.
examples:
- name: Delete an MX record set and all associated records.
text: az network dns record-set mx delete -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set mx list'] = """
type: command
short-summary: List all MX record sets in a zone.
examples:
- name: List all MX record sets in a zone.
text: az network dns record-set mx list -g MyResourceGroup -z www.mysite.com
- name: List all MX record sets in a zone (autogenerated)
text: |
az network dns record-set mx list --resource-group MyResourceGroup --subscription MySubscription --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set mx remove-record'] = """
type: command
short-summary: Remove an MX record from its record set.
long-summary: >
By default, if the last record in a set is removed, the record set is deleted.
To retain the empty record set, include --keep-empty-record-set.
examples:
- name: Remove an MX record from its record set.
text: |
az network dns record-set mx remove-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -e mail.mysite.com -p 10
"""
helps['network dns record-set mx show'] = """
type: command
short-summary: Get the details of an MX record set.
examples:
- name: Get the details of an MX record set.
text: az network dns record-set mx show -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set mx update'] = """
type: command
short-summary: Update an MX record set.
examples:
- name: Update an MX record set.
text: |
az network dns record-set mx update -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet --metadata owner=WebTeam
- name: Update an MX record set. (autogenerated)
text: |
az network dns record-set mx update --name MyRecordSet --resource-group MyResourceGroup --set tags.CostCenter=MyBusinessGroup --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set ns'] = """
type: group
short-summary: Manage DNS NS records.
"""
helps['network dns record-set ns add-record'] = """
type: command
short-summary: Add an NS record.
examples:
- name: Add an NS record.
text: |
az network dns record-set ns add-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -d ns.mysite.com
"""
helps['network dns record-set ns create'] = """
type: command
short-summary: Create an empty NS record set.
examples:
- name: Create an empty NS record set.
text: az network dns record-set ns create -g MyResourceGroup -z www.mysite.com -n MyRecordSet
- name: Create an empty NS record set. (autogenerated)
text: |
az network dns record-set ns create --name MyRecordSet --resource-group MyResourceGroup --ttl 30 --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set ns delete'] = """
type: command
short-summary: Delete an NS record set and all associated records.
examples:
- name: Delete an NS record set and all associated records.
text: az network dns record-set ns delete -g MyResourceGroup -z www.mysite.com -n MyRecordSet
- name: Delete an NS record set and all associated records. (autogenerated)
text: |
az network dns record-set ns delete --name MyRecordSet --resource-group MyResourceGroup --subscription MySubscription --yes --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set ns list'] = """
type: command
short-summary: List all NS record sets in a zone.
examples:
- name: List all NS record sets in a zone.
text: az network dns record-set ns list -g MyResourceGroup -z www.mysite.com
"""
helps['network dns record-set ns remove-record'] = """
type: command
short-summary: Remove an NS record from its record set.
long-summary: >
By default, if the last record in a set is removed, the record set is deleted.
To retain the empty record set, include --keep-empty-record-set.
examples:
- name: Remove an NS record from its record set.
text: |
az network dns record-set ns remove-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -d ns.mysite.com
- name: Remove an NS record from its record set. (autogenerated)
text: |
az network dns record-set ns remove-record --keep-empty-record-set --nsdname ns.mysite.com --record-set-name MyRecordSet --resource-group MyResourceGroup --subscription MySubscription --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set ns show'] = """
type: command
short-summary: Get the details of an NS record set.
examples:
- name: Get the details of an NS record set.
text: az network dns record-set ns show -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set ns update'] = """
type: command
short-summary: Update an NS record set.
examples:
- name: Update an NS record set.
text: |
az network dns record-set ns update -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet --metadata owner=WebTeam
- name: Update an NS record set. (autogenerated)
text: |
az network dns record-set ns update --name MyRecordSet --resource-group MyResourceGroup --set tags.CostCenter=MyBusinessGroup --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set ptr'] = """
type: group
short-summary: Manage DNS PTR records.
"""
helps['network dns record-set ptr add-record'] = """
type: command
short-summary: Add a PTR record.
examples:
- name: Add a PTR record.
text: |
az network dns record-set ptr add-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -d another.site.com
"""
helps['network dns record-set ptr create'] = """
type: command
short-summary: Create an empty PTR record set.
examples:
- name: Create an empty PTR record set.
text: az network dns record-set ptr create -g MyResourceGroup -z www.mysite.com -n MyRecordSet
- name: Create an empty PTR record set. (autogenerated)
text: |
az network dns record-set ptr create --name MyRecordSet --resource-group MyResourceGroup --subscription MySubscription --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set ptr delete'] = """
type: command
short-summary: Delete a PTR record set and all associated records.
examples:
- name: Delete a PTR record set and all associated records.
text: az network dns record-set ptr delete -g MyResourceGroup -z www.mysite.com -n MyRecordSet
- name: Delete a PTR record set and all associated records. (autogenerated)
text: |
az network dns record-set ptr delete --name MyRecordSet --resource-group MyResourceGroup --subscription MySubscription --yes --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set ptr list'] = """
type: command
short-summary: List all PTR record sets in a zone.
examples:
- name: List all PTR record sets in a zone.
text: az network dns record-set ptr list -g MyResourceGroup -z www.mysite.com
- name: List all PTR record sets in a zone. (autogenerated)
text: |
az network dns record-set ptr list --resource-group MyResourceGroup --subscription MySubscription --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set ptr remove-record'] = """
type: command
short-summary: Remove a PTR record from its record set.
long-summary: >
By default, if the last record in a set is removed, the record set is deleted.
To retain the empty record set, include --keep-empty-record-set.
examples:
- name: Remove a PTR record from its record set.
text: |
az network dns record-set ptr remove-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -d another.site.com
"""
helps['network dns record-set ptr show'] = """
type: command
short-summary: Get the details of a PTR record set.
examples:
- name: Get the details of a PTR record set.
text: az network dns record-set ptr show -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set ptr update'] = """
type: command
short-summary: Update a PTR record set.
examples:
- name: Update a PTR record set.
text: |
az network dns record-set ptr update -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet --metadata owner=WebTeam
"""
helps['network dns record-set soa'] = """
type: group
short-summary: Manage a DNS SOA record.
"""
helps['network dns record-set soa show'] = """
type: command
short-summary: Get the details of an SOA record.
examples:
- name: Get the details of an SOA record.
text: az network dns record-set soa show -g MyResourceGroup -z www.mysite.com
- name: Get the details of an SOA record (autogenerated)
text: |
az network dns record-set soa show --resource-group MyResourceGroup --subscription MySubscription --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set soa update'] = """
type: command
short-summary: Update properties of an SOA record.
examples:
- name: Update properties of an SOA record.
text: |
az network dns record-set soa update -g MyResourceGroup -z www.mysite.com \\
-e myhostmaster.mysite.com
- name: Update properties of an SOA record. (autogenerated)
text: |
az network dns record-set soa update --email myhostmaster.mysite.com --only-show-errors --resource-group MyResourceGroup --subscription MySubscription --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set srv'] = """
type: group
short-summary: Manage DNS SRV records.
"""
helps['network dns record-set srv add-record'] = """
type: command
short-summary: Add an SRV record.
examples:
- name: Add an SRV record.
text: |
az network dns record-set srv add-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -t webserver.mysite.com -r 8081 -p 10 -w 10
"""
helps['network dns record-set srv create'] = """
type: command
short-summary: Create an empty SRV record set.
examples:
- name: Create an empty SRV record set.
text: |
az network dns record-set srv create -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet
- name: Create an empty SRV record set. (autogenerated)
text: |
az network dns record-set srv create --metadata owner=WebTeam --name MyRecordSet --resource-group MyResourceGroup --ttl 30 --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set srv delete'] = """
type: command
short-summary: Delete an SRV record set and all associated records.
examples:
- name: Delete an SRV record set and all associated records.
text: az network dns record-set srv delete -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set srv list'] = """
type: command
short-summary: List all SRV record sets in a zone.
examples:
- name: List all SRV record sets in a zone.
text: az network dns record-set srv list -g MyResourceGroup -z www.mysite.com
"""
helps['network dns record-set srv remove-record'] = """
type: command
short-summary: Remove an SRV record from its record set.
long-summary: >
By default, if the last record in a set is removed, the record set is deleted.
To retain the empty record set, include --keep-empty-record-set.
examples:
- name: Remove an SRV record from its record set.
text: |
az network dns record-set srv remove-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -t webserver.mysite.com -r 8081 -p 10 -w 10
"""
helps['network dns record-set srv show'] = """
type: command
short-summary: Get the details of an SRV record set.
examples:
- name: Get the details of an SRV record set.
text: az network dns record-set srv show -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set srv update'] = """
type: command
short-summary: Update an SRV record set.
examples:
- name: Update an SRV record set.
text: |
az network dns record-set srv update -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet --metadata owner=WebTeam
"""
helps['network dns record-set txt'] = """
type: group
short-summary: Manage DNS TXT records.
"""
helps['network dns record-set txt add-record'] = """
type: command
short-summary: Add a TXT record.
examples:
- name: Add a TXT record.
text: |
az network dns record-set txt add-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -v Owner=WebTeam
"""
helps['network dns record-set txt create'] = """
type: command
short-summary: Create an empty TXT record set.
examples:
- name: Create an empty TXT record set.
text: az network dns record-set txt create -g MyResourceGroup -z www.mysite.com -n MyRecordSet
- name: Create an empty TXT record set. (autogenerated)
text: |
az network dns record-set txt create --name MyRecordSet --resource-group MyResourceGroup --ttl 30 --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set txt delete'] = """
type: command
short-summary: Delete a TXT record set and all associated records.
examples:
- name: Delete a TXT record set and all associated records.
text: az network dns record-set txt delete -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set txt list'] = """
type: command
short-summary: List all TXT record sets in a zone.
examples:
- name: List all TXT record sets in a zone.
text: az network dns record-set txt list -g MyResourceGroup -z www.mysite.com
"""
helps['network dns record-set txt remove-record'] = """
type: command
short-summary: Remove a TXT record from its record set.
long-summary: >
By default, if the last record in a set is removed, the record set is deleted.
To retain the empty record set, include --keep-empty-record-set.
examples:
- name: Remove a TXT record from its record set.
text: |
az network dns record-set txt remove-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -v Owner=WebTeam
"""
helps['network dns record-set txt show'] = """
type: command
short-summary: Get the details of a TXT record set.
examples:
- name: Get the details of a TXT record set.
text: az network dns record-set txt show -g MyResourceGroup -z www.mysite.com -n MyRecordSet
- name: Get the details of a TXT record set. (autogenerated)
text: |
az network dns record-set txt show --name MyRecordSet --resource-group MyResourceGroup --subscription MySubscription --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set txt update'] = """
type: command
short-summary: Update a TXT record set.
examples:
- name: Update a TXT record set.
text: |
az network dns record-set txt update -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet --metadata owner=WebTeam
- name: Update a TXT record set. (autogenerated)
text: |
az network dns record-set txt update --name MyRecordSet --resource-group MyResourceGroup --set tags.CostCenter=MyBusinessGroup --zone-name www.mysite.com
crafted: true
"""
helps['network dns zone'] = """
type: group
short-summary: Manage DNS zones.
"""
helps['network dns zone create'] = """
type: command
short-summary: Create a DNS zone.
parameters:
- name: --if-none-match
short-summary: Only create a DNS zone if one doesn't exist that matches the given name.
examples:
- name: Create a DNS zone using a fully qualified domain name.
text: >
az network dns zone create -g MyResourceGroup -n www.mysite.com
- name: Create a DNS zone with delegation in the parent within the same subscription and resource group
text: >
az network dns zone create -g MyResourceGroup -n books.mysite.com -p mysite.com
- name: Create a DNS zone with delegation in the parent in different subscription
text: >
az network dns zone create -g MyResourceGroup -n books.mysite.com -p "/subscriptions/**67e2/resourceGroups/OtherRg/providers/Microsoft.Network/dnszones/mysite.com"
"""
helps['network dns zone delete'] = """
type: command
short-summary: Delete a DNS zone and all associated records.
examples:
- name: Delete a DNS zone using a fully qualified domain name.
text: >
az network dns zone delete -g MyResourceGroup -n www.mysite.com
"""
helps['network dns zone export'] = """
type: command
short-summary: Export a DNS zone as a DNS zone file.
examples:
- name: Export a DNS zone as a DNS zone file.
text: >
az network dns zone export -g MyResourceGroup -n www.mysite.com -f mysite_com_zone.txt
"""
helps['network dns zone import'] = """
type: command
short-summary: Create a DNS zone using a DNS zone file.
examples:
- name: Import a local zone file into a DNS zone resource.
text: >
az network dns zone import -g MyResourceGroup -n MyZone -f /path/to/zone/file
"""
helps['network dns zone list'] = """
type: command
short-summary: List DNS zones.
examples:
- name: List DNS zones in a resource group.
text: >
az network dns zone list -g MyResourceGroup
"""
helps['network dns zone show'] = """
type: command
short-summary: Get a DNS zone parameters. Does not show DNS records within the zone.
examples:
- name: List DNS zones in a resource group.
text: >
az network dns zone show -g MyResourceGroup -n www.mysite.com
"""
helps['network dns zone update'] = """
type: command
short-summary: Update a DNS zone properties. Does not modify DNS records within the zone.
parameters:
- name: --if-match
short-summary: Update only if the resource with the same ETAG exists.
examples:
- name: Update a DNS zone properties to change the user-defined value of a previously set tag.
text: >
az network dns zone update -g MyResourceGroup -n www.mysite.com --tags CostCenter=Marketing
- name: Update a DNS zone properties (autogenerated)
text: |
az network dns zone update --name www.mysite.com --remove tags.no_80 --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route'] = """
type: group
short-summary: Manage dedicated private network fiber connections to Azure.
long-summary: >
To learn more about ExpressRoute circuits visit
https://docs.microsoft.com/azure/expressroute/howto-circuit-cli
"""
helps['network express-route auth'] = """
type: group
short-summary: Manage authentication of an ExpressRoute circuit.
long-summary: >
To learn more about ExpressRoute circuit authentication visit
https://docs.microsoft.com/azure/expressroute/howto-linkvnet-cli#connect-a-virtual-network-in-a-different-subscription-to-a-circuit
"""
helps['network express-route auth create'] = """
type: command
short-summary: Create a new link authorization for an ExpressRoute circuit.
examples:
- name: Create a new link authorization for an ExpressRoute circuit.
text: >
az network express-route auth create --circuit-name MyCircuit -g MyResourceGroup -n MyAuthorization
"""
helps['network express-route auth delete'] = """
type: command
short-summary: Delete a link authorization of an ExpressRoute circuit.
examples:
- name: Delete a link authorization of an ExpressRoute circuit.
text: >
az network express-route auth delete --circuit-name MyCircuit -g MyResourceGroup -n MyAuthorization
"""
helps['network express-route auth list'] = """
type: command
short-summary: List link authorizations of an ExpressRoute circuit.
examples:
- name: List link authorizations of an ExpressRoute circuit.
text: >
az network express-route auth list -g MyResourceGroup --circuit-name MyCircuit
- name: List link authorizations of an ExpressRoute circuit. (autogenerated)
text: |
az network express-route auth list --circuit-name MyCircuit --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network express-route auth show'] = """
type: command
short-summary: Get the details of a link authorization of an ExpressRoute circuit.
examples:
- name: Get the details of a link authorization of an ExpressRoute circuit.
text: >
az network express-route auth show -g MyResourceGroup --circuit-name MyCircuit -n MyAuthorization
"""
helps['network express-route create'] = """
type: command
short-summary: Create an ExpressRoute circuit.
parameters:
- name: --bandwidth
populator-commands:
- az network express-route list-service-providers
- name: --peering-location
populator-commands:
- az network express-route list-service-providers
- name: --provider
populator-commands:
- az network express-route list-service-providers
examples:
- name: Create an ExpressRoute circuit.
text: >
az network express-route create --bandwidth 200 -n MyCircuit --peering-location "Silicon Valley" -g MyResourceGroup --provider "Equinix" -l "West US" --sku-family MeteredData --sku-tier Standard
"""
helps['network express-route delete'] = """
type: command
short-summary: Delete an ExpressRoute circuit.
examples:
- name: Delete an ExpressRoute circuit.
text: >
az network express-route delete -n MyCircuit -g MyResourceGroup
- name: Delete an ExpressRoute circuit. (autogenerated)
text: |
az network express-route delete --name MyCircuit --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network express-route gateway'] = """
type: group
short-summary: Manage ExpressRoute gateways.
"""
helps['network express-route gateway connection'] = """
type: group
short-summary: Manage ExpressRoute gateway connections.
"""
helps['network express-route gateway connection create'] = """
type: command
short-summary: Create an ExpressRoute gateway connection.
examples:
- name: Create an ExpressRoute gateway connection.
text: |
az network express-route gateway connection create --gateway-name MyGateway -n MyExpressRouteConnection -g MyResourceGroup --peering /subscriptions/MySub/resourceGroups/MyResourceGroup/providers/Microsoft.Network/expressRouteCircuits/MyCircuit/peerings/AzurePrivatePeering --associated-route-table /MySub/resourceGroups/MyResourceGroup/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/MyRouteTable1 --propagated-route-tables /MySub/resourceGroups/MyResourceGroup/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/MyRouteTable1 /MySub/resourceGroups/MyResourceGroup/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/MyRouteTable2 --labels label1 label2
- name: Create an ExpressRoute gateway connection. (autogenerated)
text: |
az network express-route gateway connection create --gateway-name MyGateway --name MyExpressRouteConnection --peering /subscriptions/MySub/resourceGroups/MyResourceGroup/providers/Microsoft.Network/expressRouteCircuits/MyCircuit/peerings/AzurePrivatePeering --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route gateway connection delete'] = """
type: command
short-summary: Delete an ExpressRoute gateway connection.
examples:
- name: Delete an ExpressRoute gateway connection. (autogenerated)
text: |
az network express-route gateway connection delete --gateway-name MyGateway --name MyExpressRouteConnection --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route gateway connection list'] = """
type: command
short-summary: List ExpressRoute gateway connections.
examples:
- name: List ExpressRoute gateway connections. (autogenerated)
text: |
az network express-route gateway connection list --gateway-name MyGateway --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route gateway connection show'] = """
type: command
short-summary: Get the details of an ExpressRoute gateway connection.
examples:
- name: Get the details of an ExpressRoute gateway connection. (autogenerated)
text: |
az network express-route gateway connection show --gateway-name MyGateway --name MyExpressRouteConnection --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route gateway connection update'] = """
type: command
short-summary: Update an ExpressRoute gateway connection.
examples:
- name: Update an ExpressRoute gateway connection.
text: |
az network express-route gateway connection update --gateway-name MyGateway -n MyExpressRouteConnection -g MyResourceGroup --peering /subscriptions/MySub/resourceGroups/MyResourceGroup/providers/Microsoft.Network/expressRouteCircuits/MyCircuit/peerings/AzurePrivatePeering --associated-route-table /MySub/resourceGroups/MyResourceGroup/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/MyRouteTable1 --propagated-route-tables /MySub/resourceGroups/MyResourceGroup/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/MyRouteTable1 /MySub/resourceGroups/MyResourceGroup/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/MyRouteTable2 --labels label1 label2
"""
helps['network express-route gateway create'] = """
type: command
short-summary: Create an ExpressRoute gateway.
"""
helps['network express-route gateway delete'] = """
type: command
short-summary: Delete an ExpressRoute gateway.
examples:
- name: Delete an ExpressRoute gateway. (autogenerated)
text: |
az network express-route gateway delete --name MyExpressRouteGateway --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route gateway list'] = """
type: command
short-summary: List ExpressRoute gateways.
examples:
- name: List ExpressRoute gateways. (autogenerated)
text: |
az network express-route gateway list --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route gateway show'] = """
type: command
short-summary: Get the details of an ExpressRoute gateway.
examples:
- name: Get the details of an ExpressRoute gateway. (autogenerated)
text: |
az network express-route gateway show --name MyExpressRouteGateway --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route gateway update'] = """
type: command
short-summary: Update settings of an ExpressRoute gateway.
"""
helps['network express-route get-stats'] = """
type: command
short-summary: Get the statistics of an ExpressRoute circuit.
examples:
- name: Get the statistics of an ExpressRoute circuit.
text: >
az network express-route get-stats -g MyResourceGroup -n MyCircuit
"""
helps['network express-route list'] = """
type: command
short-summary: List all ExpressRoute circuits for the current subscription.
examples:
- name: List all ExpressRoute circuits for the current subscription.
text: >
az network express-route list -g MyResourceGroup
"""
helps['network express-route list-arp-tables'] = """
type: command
short-summary: Show the current Address Resolution Protocol (ARP) table of an ExpressRoute circuit.
examples:
- name: Show the current Address Resolution Protocol (ARP) table of an ExpressRoute circuit.
text: |
az network express-route list-arp-tables -g MyResourceGroup -n MyCircuit \\
--path primary --peering-name AzurePrivatePeering
"""
helps['network express-route list-route-tables'] = """
type: command
short-summary: Show the current routing table of an ExpressRoute circuit peering.
examples:
- name: Show the current routing table of an ExpressRoute circuit peering.
text: |
az network express-route list-route-tables -g MyResourceGroup -n MyCircuit \\
--path primary --peering-name AzurePrivatePeering
"""
helps['network express-route list-service-providers'] = """
type: command
short-summary: List available ExpressRoute service providers.
examples:
- name: List available ExpressRoute service providers.
text: az network express-route list-service-providers
"""
helps['network express-route peering'] = """
type: group
short-summary: Manage ExpressRoute peering of an ExpressRoute circuit.
"""
helps['network express-route peering connection'] = """
type: group
short-summary: Manage ExpressRoute circuit connections.
"""
helps['network express-route peering connection create'] = """
type: command
short-summary: Create connections between two ExpressRoute circuits.
examples:
- name: Create connection between two ExpressRoute circuits with AzurePrivatePeering settings.
text: |
az network express-route peering connection create -g MyResourceGroup --circuit-name \\
MyCircuit --peering-name AzurePrivatePeering -n myConnection --peer-circuit \\
MyOtherCircuit --address-prefix 104.0.0.0/29
"""
helps['network express-route peering connection delete'] = """
type: command
short-summary: Delete an ExpressRoute circuit connection.
examples:
- name: Delete an ExpressRoute circuit connection. (autogenerated)
text: |
az network express-route peering connection delete --circuit-name MyCircuit --name MyPeeringConnection --peering-name MyPeering --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route peering connection show'] = """
type: command
short-summary: Get the details of an ExpressRoute circuit connection.
"""
helps['network express-route peering create'] = """
type: command
short-summary: Create peering settings for an ExpressRoute circuit.
examples:
- name: Create Microsoft Peering settings with IPv4 configuration.
text: |
az network express-route peering create -g MyResourceGroup --circuit-name MyCircuit \\
--peering-type MicrosoftPeering --peer-asn 10002 --vlan-id 103 \\
--primary-peer-subnet 101.0.0.0/30 --secondary-peer-subnet 102.0.0.0/30 \\
--advertised-public-prefixes 101.0.0.0/30
- name: Create Microsoft Peering settings with IPv6 configuration.
text: |
az network express-route peering create -g MyResourceGroup --circuit-name MyCircuit \\
--peering-type AzurePrivatePeering --peer-asn 10002 --vlan-id 103 --ip-version ipv6\\
--primary-peer-subnet 2002:db00::/126 --secondary-peer-subnet 2003:db00::/126
- name: Create peering settings for an ExpressRoute circuit. (autogenerated)
text: |
az network express-route peering create --circuit-name MyCircuit --peer-asn 10002 --peering-type AzurePublicPeering --primary-peer-subnet 101.0.0.0/30 --resource-group MyResourceGroup --secondary-peer-subnet 102.0.0.0/30 --shared-key Abc123 --vlan-id 103
crafted: true
"""
helps['network express-route peering delete'] = """
type: command
short-summary: Delete peering settings.
examples:
- name: Delete private peering.
text: >
az network express-route peering delete -g MyResourceGroup --circuit-name MyCircuit -n AzurePrivatePeering
"""
helps['network express-route peering list'] = """
type: command
short-summary: List peering settings of an ExpressRoute circuit.
examples:
- name: List peering settings of an ExpressRoute circuit.
text: >
az network express-route peering list -g MyResourceGroup --circuit-name MyCircuit
"""
helps['network express-route peering peer-connection'] = """
type: group
short-summary: Manage ExpressRoute circuit peer connections.
"""
helps['network express-route peering show'] = """
type: command
short-summary: Get the details of an express route peering.
examples:
- name: Get private peering details of an ExpressRoute circuit.
text: >
az network express-route peering show -g MyResourceGroup --circuit-name MyCircuit -n AzurePrivatePeering
"""
helps['network express-route peering update'] = """
type: command
short-summary: Update peering settings of an ExpressRoute circuit.
examples:
- name: Add IPv6 Microsoft Peering settings to existing IPv4 config.
text: |
az network express-route peering update -g MyResourceGroup --circuit-name MyCircuit \\
--ip-version ipv6 --primary-peer-subnet 2002:db00::/126 \\
--secondary-peer-subnet 2003:db00::/126 --advertised-public-prefixes 2002:db00::/126
supported-profiles: latest
- name: Update peering settings of an ExpressRoute circuit. (autogenerated)
text: |
az network express-route peering update --circuit-name MyCircuit --name MyPeering --peer-asn 10002 --primary-peer-subnet 2002:db00::/126 --resource-group MyResourceGroup --secondary-peer-subnet 2003:db00::/126 --shared-key Abc123 --vlan-id 103
crafted: true
"""
helps['network express-route port'] = """
type: group
short-summary: Manage ExpressRoute ports.
"""
helps['network express-route port create'] = """
type: command
short-summary: Create an ExpressRoute port.
examples:
- name: Create an ExpressRoute port. (autogenerated)
text: |
az network express-route port create --bandwidth 200 --encapsulation Dot1Q --location westus2 --name MyExpressRoutePort --peering-location westus --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route port delete'] = """
type: command
short-summary: Delete an ExpressRoute port.
examples:
- name: Delete an ExpressRoute port. (autogenerated)
text: |
az network express-route port delete --name MyExpressRoutePort --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route port generate-loa'] = """
type: command
short-summary: Generate and download a letter of authorization for the requested ExpressRoutePort
"""
helps['network express-route port link'] = """
type: group
short-summary: View ExpressRoute links.
"""
helps['network express-route port link list'] = """
type: command
short-summary: List ExpressRoute links.
examples:
- name: List ExpressRoute links. (autogenerated)
text: |
az network express-route port link list --port-name MyPort --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route port link show'] = """
type: command
short-summary: Get the details of an ExpressRoute link.
examples:
- name: Get the details of an ExpressRoute link. (autogenerated)
text: |
az network express-route port link show --name MyLinkExpressRoutePort --port-name MyPort --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route port link update'] = """
type: command
short-summary: Manage MACsec configuration of an ExpressRoute Link.
examples:
- name: Enable MACsec on ExpressRoute Direct Ports once at a time.
text: |-
az network express-route port link update \\
--resource-group MyResourceGroup \\
--port-name MyExpressRoutePort \\
--name link1 \\
--macsec-ckn-secret-identifier MacSecCKNSecretID \\
--macsec-cak-secret-identifier MacSecCAKSecretID \\
--macsec-cipher gcm-aes-128
- name: Enable administrative state of an ExpressRoute Link.
text: |-
az network express-route port link update \\
--resource-group MyResourceGroup \\
--port-name MyExpressRoutePort \\
--name link2 \\
--admin-state Enabled
"""
helps['network express-route port list'] = """
type: command
short-summary: List ExpressRoute ports.
examples:
- name: List ExpressRoute ports. (autogenerated)
text: |
az network express-route port list --resource-group myresourcegroup
crafted: true
"""
helps['network express-route port location'] = """
type: group
short-summary: View ExpressRoute port location information.
"""
helps['network express-route port location list'] = """
type: command
short-summary: List ExpressRoute port locations.
"""
helps['network express-route port location show'] = """
type: command
short-summary: Get the details of an ExpressRoute port location.
examples:
- name: Get the details of an ExpressRoute port location. (autogenerated)
text: |
az network express-route port location show --location westus2
crafted: true
"""
helps['network express-route port show'] = """
type: command
short-summary: Get the details of an ExpressRoute port.
examples:
- name: Get the details of an ExpressRoute port. (autogenerated)
text: |
az network express-route port show --name MyExpressRoutePort --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route port update'] = """
type: command
short-summary: Update settings of an ExpressRoute port.
examples:
- name: Update settings of an ExpressRoute port (autogenerated)
text: |
az network express-route port update --name MyExpressRoutePort --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route port identity'] = """
type: group
short-summary: Manage the managed service identity of an ExpressRoute Port
"""
helps['network express-route port identity assign'] = """
type: command
short-summary: Assign a managed service identity to an ExpressRoute Port
examples:
- name: Assign an identity to the ExpressRoute Port
text: |-
az network express-route port identity assign \\
--resource-group MyResourceGroupg \\
--name MyExpressRoutePort \\
--identity MyUserAssignedManagedServiceIdentity
"""
helps['network express-route port identity remove'] = """
type: command
short-summary: Remove the managed service identity of an ExpressRoute Port
examples:
- name: Remove an identity of the ExpressRoute Port
text: az network express-route port identity remove -g MyResourceGroup --name MyExpressRoutePort
"""
helps['network express-route port identity show'] = """
type: command
short-summary: Show the managed service identity of an ExpressRoute Port
examples:
- name: Show an identity of the ExpressRoute Port
text: az network express-route port identity show -g MyResourceGroup --name MyExpressRoutePort
"""
helps['network express-route show'] = """
type: command
short-summary: Get the details of an ExpressRoute circuit.
examples:
- name: Get the details of an ExpressRoute circuit.
text: >
az network express-route show -n MyCircuit -g MyResourceGroup
"""
helps['network express-route update'] = """
type: command
short-summary: Update settings of an ExpressRoute circuit.
examples:
- name: Change the SKU of an ExpressRoute circuit from Standard to Premium.
text: >
az network express-route update -n MyCircuit -g MyResourceGroup --sku-tier Premium
"""
helps['network express-route wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the ExpressRoute is met.
examples:
- name: Pause executing next line of CLI script until the ExpressRoute circuit is successfully provisioned.
text: az network express-route wait -n MyCircuit -g MyResourceGroup --created
"""
helps['network cross-region-lb'] = """
type: group
short-summary: Manage and configure cross-region load balancers.
long-summary: To learn more about Azure Load Balancer visit https://docs.microsoft.com/azure/load-balancer/load-balancer-get-started-internet-arm-cli
"""
helps['network cross-region-lb create'] = """
type: command
short-summary: Create a cross-region load balancer.
examples:
- name: Create a basic load balancer.
text: >
az network cross-region-lb create -g MyResourceGroup -n MyLb
"""
helps['network cross-region-lb update'] = """
type: command
short-summary: Update a cross-region load balancer.
long-summary: >
This command can only be used to update the tags for a load balancer. Name and resource group are immutable and cannot be updated.
examples:
- name: Update the tags of a load balancer.
text: az network cross-region-lb update -g MyResourceGroup -n MyLb --set tags.CostCenter=MyBusinessGroup
"""
helps['network cross-region-lb list'] = """
type: command
short-summary: List load balancers.
examples:
- name: List load balancers.
text: az network cross-region-lb list -g MyResourceGroup
"""
helps['network cross-region-lb wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the cross-region load balancer is met.
examples:
- name: Wait for load balancer to return as created.
text: |
az network cross-region-lb wait -g MyResourceGroup -n MyLB --created
"""
helps['network cross-region-lb address-pool'] = """
type: group
short-summary: Manage address pools of a cross-region load balancer.
"""
helps['network cross-region-lb address-pool create'] = """
type: command
short-summary: Create an address pool.
parameters:
- name: --backend-address
short-summary: Backend addresses information for backend address pool.
long-summary: |
Usage: --backend-address name=addr1 frontend-ip-address=regional_lb_resource_id
name: Required. The name of the backend address.
frontend-ip-address: Required. Resource id of a regional load balancer.
Multiple backend addresses can be specified by using more than one `--backend-address` argument.
- name: --backend-addresses-config-file --config-file
short-summary: A config file used to set backend addresses. This argument is for experienced users. You may encounter parse errors if the json file is invalid.
long-summary: |
Usage: --backend-addresses-config-file @"{config_file.json}"
A example config file is
[
{
"name": "address1",
"frontendIpAddress": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_lb_address_pool_addresses000001/providers/Microsoft.Network/loadBalancers/regional-lb/frontendIPConfigurations/fe-rlb1"
},
{
"name": "address2",
"frontendIpAddress": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_lb_address_pool_addresses000001/providers/Microsoft.Network/loadBalancers/regional-lb/frontendIPConfigurations/fe-rlb2"
}
]
examples:
- name: Create an address pool.
text: az network cross-region-lb address-pool create -g MyResourceGroup --lb-name MyLb -n MyAddressPool
- name: Create an address pool with several backend addresses using key-value arguments.
text: az network cross-region-lb address-pool create -g MyResourceGroup --lb-name MyLb -n MyAddressPool --backend-address name=addr1 frontend-ip-address=/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_lb_address_pool_addresses000001/providers/Microsoft.Network/loadBalancers/regional-lb/frontendIPConfigurations/fe-rlb1 --backend-address name=addr2 frontend-ip-address=/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_lb_address_pool_addresses000001/providers/Microsoft.Network/loadBalancers/regional-lb/frontendIPConfigurations/fe-rlb2
- name: Create an address pool with several backend addresses using config file
text: az network cross-region-lb address-pool create -g MyResourceGroup --lb-name MyLb -n MyAddressPool --backend-addresses-config-file @config_file.json
"""
helps['network cross-region-lb address-pool delete'] = """
type: command
short-summary: Delete an address pool.
examples:
- name: Delete an address pool.
text: az network cross-region-lb address-pool delete -g MyResourceGroup --lb-name MyLb -n MyAddressPool
"""
helps['network cross-region-lb address-pool address'] = """
type: group
short-summary: Manage backend addresses of the cross-region load balance backend address pool.
"""
helps['network cross-region-lb address-pool address add'] = """
type: command
short-summary: Add one backend address into the load balance backend address pool.
examples:
- name: Add one backend address into the load balance backend address pool.
text: az network cross-region-lb address-pool address add -g MyResourceGroup --lb-name MyLb --pool-name MyAddressPool -n MyAddress --frontend-ip-address /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_lb_address_pool_addresses000001/providers/Microsoft.Network/loadBalancers/regional-lb/frontendIPConfigurations/fe-rlb2
"""
helps['network cross-region-lb address-pool address remove'] = """
type: command
short-summary: Remove one backend address from the load balance backend address pool.
examples:
- name: Remove one backend address from the load balance backend address pool.
text: az network cross-region-lb address-pool address remove -g MyResourceGroup --lb-name MyLb --pool-name MyAddressPool -n MyAddress
"""
helps['network cross-region-lb address-pool address list'] = """
type: command
short-summary: List all backend addresses of the load balance backend address pool.
examples:
- name: List all backend addresses of the load balance backend address pool.
text: az network cross-region-lb address-pool address list -g MyResourceGroup --lb-name MyLb --pool-name MyAddressPool
"""
helps['network cross-region-lb frontend-ip'] = """
type: group
short-summary: Manage frontend IP addresses of a cross-region load balancer.
"""
helps['network cross-region-lb frontend-ip create'] = """
type: command
short-summary: Create a frontend IP address.
examples:
- name: Create a frontend ip address for a public load balancer.
text: az network cross-region-lb frontend-ip create -g MyResourceGroup -n MyFrontendIp --lb-name MyLb --public-ip-address MyFrontendIp
"""
helps['network cross-region-lb frontend-ip delete'] = """
type: command
short-summary: Delete a frontend IP address.
examples:
- name: Delete a frontend IP address.
text: az network cross-region-lb frontend-ip delete -g MyResourceGroup --lb-name MyLb -n MyFrontendIp
"""
helps['network cross-region-lb frontend-ip list'] = """
type: command
short-summary: List frontend IP addresses.
examples:
- name: List frontend IP addresses.
text: az network cross-region-lb frontend-ip list -g MyResourceGroup --lb-name MyLb
"""
helps['network cross-region-lb frontend-ip show'] = """
type: command
short-summary: Get the details of a frontend IP address.
examples:
- name: Get the details of a frontend IP address.
text: az network cross-region-lb frontend-ip show -g MyResourceGroup --lb-name MyLb -n MyFrontendIp
"""
helps['network cross-region-lb frontend-ip update'] = """
type: command
short-summary: Update a frontend IP address.
examples:
- name: Update the frontend IP address of a public load balancer.
text: az network cross-region-lb frontend-ip update -g MyResourceGroup --lb-name MyLb -n MyFrontendIp --public-ip-address MyNewPublicIp
"""
helps['network cross-region-lb probe'] = """
type: group
short-summary: Evaluate probe information and define routing rules.
"""
helps['network cross-region-lb probe create'] = """
type: command
short-summary: Create a probe.
examples:
- name: Create a probe on a load balancer over HTTP and port 80.
text: |
az network cross-region-lb probe create -g MyResourceGroup --lb-name MyLb -n MyProbe \\
--protocol http --port 80 --path /
- name: Create a probe on a load balancer over TCP on port 443.
text: |
az network cross-region-lb probe create -g MyResourceGroup --lb-name MyLb -n MyProbe \\
--protocol tcp --port 443
"""
helps['network cross-region-lb probe delete'] = """
type: command
short-summary: Delete a probe.
examples:
- name: Delete a probe.
text: az network cross-region-lb probe delete -g MyResourceGroup --lb-name MyLb -n MyProbe
"""
helps['network cross-region-lb probe list'] = """
type: command
short-summary: List probes.
examples:
- name: List probes.
text: az network cross-region-lb probe list -g MyResourceGroup --lb-name MyLb -o table
"""
helps['network cross-region-lb probe show'] = """
type: command
short-summary: Get the details of a probe.
examples:
- name: Get the details of a probe.
text: az network cross-region-lb probe show -g MyResourceGroup --lb-name MyLb -n MyProbe
"""
helps['network cross-region-lb probe update'] = """
type: command
short-summary: Update a probe.
examples:
- name: Update a probe with a different port and interval.
text: az network cross-region-lb probe update -g MyResourceGroup --lb-name MyLb -n MyProbe --port 81 --interval 10
"""
helps['network cross-region-lb rule'] = """
type: group
short-summary: Manage cross-region load balancing rules.
"""
helps['network cross-region-lb rule create'] = """
type: command
short-summary: Create a load balancing rule.
examples:
- name: >
Create a load balancing rule that assigns a front-facing IP configuration and port to an address pool and port.
text: |
az network cross-region-lb rule create -g MyResourceGroup --lb-name MyLb -n MyLbRule --protocol Tcp \\
--frontend-ip-name MyFrontEndIp --frontend-port 80 \\
--backend-pool-name MyAddressPool --backend-port 80
- name: >
Create a load balancing rule that assigns a front-facing IP configuration and port to an address pool and port with the floating ip feature.
text: |
az network cross-region-lb rule create -g MyResourceGroup --lb-name MyLb -n MyLbRule --protocol Tcp \\
--frontend-ip-name MyFrontEndIp --backend-pool-name MyAddressPool \\
--floating-ip true --frontend-port 80 --backend-port 80
- name: >
Create an HA ports load balancing rule that assigns a frontend IP and port to use all available backend IPs in a pool on the same port.
text: |
az network cross-region-lb rule create -g MyResourceGroup --lb-name MyLb -n MyHAPortsRule \\
--protocol All --frontend-port 0 --backend-port 0 --frontend-ip-name MyFrontendIp \\
--backend-pool-name MyAddressPool
"""
helps['network cross-region-lb rule delete'] = """
type: command
short-summary: Delete a load balancing rule.
examples:
- name: Delete a load balancing rule.
text: az network cross-region-lb rule delete -g MyResourceGroup --lb-name MyLb -n MyLbRule
"""
helps['network cross-region-lb rule list'] = """
type: command
short-summary: List load balancing rules.
examples:
- name: List load balancing rules.
text: az network cross-region-lb rule list -g MyResourceGroup --lb-name MyLb -o table
"""
helps['network cross-region-lb rule show'] = """
type: command
short-summary: Get the details of a load balancing rule.
examples:
- name: Get the details of a load balancing rule.
text: az network cross-region-lb rule show -g MyResourceGroup --lb-name MyLb -n MyLbRule
"""
helps['network cross-region-lb rule update'] = """
type: command
short-summary: Update a load balancing rule.
examples:
- name: Update a load balancing rule to change the protocol to UDP.
text: az network cross-region-lb rule update -g MyResourceGroup --lb-name MyLb -n MyLbRule --protocol Udp
examples:
- name: Update a load balancing rule to support HA ports.
text: az network cross-region-lb rule update -g MyResourceGroup --lb-name MyLb -n MyLbRule \\ --protocol All --frontend-port 0 --backend-port 0
"""
helps['network lb'] = """
type: group
short-summary: Manage and configure load balancers.
long-summary: |
[Coming breaking change] In the coming release, the default behavior will be changed. When sku is Standard and in zone-redundant regions, the default 'zones' of 'frontendIPConfigurations' will display as 'zones:[1,2,3]' instead of 'zones:null'.
To learn more about Azure Load Balancer visit https://docs.microsoft.com/azure/load-balancer/load-balancer-get-started-internet-arm-cli
"""
helps['network lb wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the load balancer is met.
examples:
- name: Wait for load balancer to return as created.
text: |
az network lb wait -g MyResourceGroup -n MyLB --created
"""
helps['network lb address-pool'] = """
type: group
short-summary: Manage address pools of a load balancer.
"""
helps['network lb address-pool create'] = """
type: command
short-summary: Create an address pool.
parameters:
- name: --backend-address
short-summary: Backend addresses information for backend address pool. If it's used, --vnet is required or subnet is required.
long-summary: |
Usage1: --backend-address name=addr1 ip-address=10.0.0.1 --vnet MyVnet
Usage2: --backend-address name=addr1 ip-address=10.0.0.1 subnet=/subscriptions/000/resourceGroups/MyRg/providers/Microsoft.Network/virtualNetworks/vnet/subnets/subnet1
Usage3: --backend-address name=addr1 ip-address=10.0.0.1 subnet=subnet1 --vnet MyVnet
name: Required. The name of the backend address.
ip-address: Required. Ip Address within the Virtual Network.
subnet: Name or Id of the subnet.
Multiple backend addresses can be specified by using more than one `--backend-address` argument.
- name: --backend-addresses-config-file
short-summary: A config file used to set backend addresses. This argument is for experienced users. You may encounter parse errors if the json file is invalid.
long-summary: |
Usage: --backend-addresses-config-file @"{config_file.json}"
A example config file is
[
{
"name": "address1",
"virtualNetwork": "clitestvnet",
"ipAddress": "10.0.0.4"
},
{
"name": "address2",
"virtualNetwork": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_lb_address_pool_addresses000001/providers/Microsoft.Network/virtualNetworks/clitestvnet",
"ipAddress": "10.0.0.5"
},
{
"name": "address3",
"subnet": "subnet3",
"ipAddress": "10.0.0.6"
},
{
"name": "address4",
"subnet": "/subscriptions/000/resourceGroups/MyRg/providers/Microsoft.Network/virtualNetworks/vnet/subnets/subnet4",
"ipAddress": "10.0.0.7"
}
]
examples:
- name: Create an address pool.
text: az network lb address-pool create -g MyResourceGroup --lb-name MyLb -n MyAddressPool
- name: Create an address pool with several backend addresses using key-value arguments.
text: az network lb address-pool create -g MyResourceGroup --lb-name MyLb -n MyAddressPool --vnet {VnetResourceId} --backend-address name=addr1 ip-address=10.0.0.1 --backend-address name=addr2 ip-address=10.0.0.3
- name: Create an address pool with several backend addresses using config file
text: az network lb address-pool create -g MyResourceGroup --lb-name MyLb -n MyAddressPool --backend-addresses-config-file @config_file.json
"""
helps['network lb address-pool delete'] = """
type: command
short-summary: Delete an address pool.
examples:
- name: Delete an address pool.
text: az network lb address-pool delete -g MyResourceGroup --lb-name MyLb -n MyAddressPool
"""
helps['network lb address-pool list'] = """
type: command
short-summary: List address pools.
examples:
- name: List address pools.
text: az network lb address-pool list -g MyResourceGroup --lb-name MyLb -o table
"""
helps['network lb address-pool show'] = """
type: command
short-summary: Get the details of an address pool.
examples:
- name: Get the details of an address pool.
text: az network lb address-pool show -g MyResourceGroup --lb-name MyLb -n MyAddressPool
"""
helps['network lb address-pool address'] = """
type: group
short-summary: Manage backend addresses of the load balance backend address pool.
"""
helps['network lb address-pool address add'] = """
type: command
short-summary: Add one backend address into the load balance backend address pool.
examples:
- name: Add one backend address into the load balance backend address pool.
text: az network lb address-pool address add -g MyResourceGroup --lb-name MyLb --pool-name MyAddressPool -n MyAddress --vnet MyVnet --ip-address 10.0.0.1
- name: Add one backend address into the load balance backend address pool with subnet.
text: az network lb address-pool address add -g MyResourceGroup --lb-name MyLb --pool-name MyAddressPool -n MyAddress --subnet /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/MyRg/providers/Microsoft.Network/virtualNetworks/vnet/subnets/subnet2 --ip-address 10.0.0.1
"""
helps['network lb address-pool address remove'] = """
type: command
short-summary: Remove one backend address from the load balance backend address pool.
examples:
- name: Remove one backend address from the load balance backend address pool.
text: az network lb address-pool address remove -g MyResourceGroup --lb-name MyLb --pool-name MyAddressPool -n MyAddress
"""
helps['network lb address-pool address list'] = """
type: command
short-summary: List all backend addresses of the load balance backend address pool.
examples:
- name: List all backend addresses of the load balance backend address pool.
text: az network lb address-pool address list -g MyResourceGroup --lb-name MyLb --pool-name MyAddressPool
"""
helps['network lb create'] = """
type: command
short-summary: Create a load balancer.
examples:
- name: Create a basic load balancer.
text: >
az network lb create -g MyResourceGroup -n MyLb --sku Basic
- name: Create a basic load balancer on a specific virtual network and subnet. If a virtual network with the same name is found in the same resource group, the load balancer will utilize this virtual network. If one is not found a new one will be created.
text: >
az network lb create -g MyResourceGroup -n MyLb --sku Basic --vnet-name MyVnet --subnet MySubnet
- name: Create a basic load balancer on a subnet of a pre-existing virtual network. The subnet can be in arbitary resource group or subscription by providing the ID of the subnet.
text: >
az network lb create -g MyResourceGroup -n MyLb --sku Basic --subnet {subnetID}
- name: Create a basic zone flavored internal load balancer, through provisioning a zonal public ip.
text: >
az network lb create -g MyResourceGroup -n MyLb --sku Basic --public-ip-zone 2
- name: >
Create a standard zone flavored public-facing load balancer, through provisioning a zonal frontend ip configuration and Vnet.
text: >
az network lb create -g MyResourceGroup -n MyLb --sku Standard --frontend-ip-zone 1 --vnet-name MyVnet --subnet MySubnet
"""
helps['network lb delete'] = """
type: command
short-summary: Delete a load balancer.
examples:
- name: Delete a load balancer.
text: az network lb delete -g MyResourceGroup -n MyLb
"""
helps['network lb frontend-ip'] = """
type: group
short-summary: Manage frontend IP addresses of a load balancer.
"""
helps['network lb frontend-ip create'] = """
type: command
short-summary: Create a frontend IP address.
examples:
- name: Create a frontend ip address for a public load balancer.
text: az network lb frontend-ip create -g MyResourceGroup -n MyFrontendIp --lb-name MyLb --public-ip-address MyFrontendIp
- name: Create a frontend ip address for an internal load balancer.
text: |
az network lb frontend-ip create -g MyResourceGroup -n MyFrontendIp --lb-name MyLb \\
--private-ip-address 10.10.10.100 --subnet MySubnet --vnet-name MyVnet
"""
helps['network lb frontend-ip delete'] = """
type: command
short-summary: Delete a frontend IP address.
examples:
- name: Delete a frontend IP address.
text: az network lb frontend-ip delete -g MyResourceGroup --lb-name MyLb -n MyFrontendIp
"""
helps['network lb frontend-ip list'] = """
type: command
short-summary: List frontend IP addresses.
examples:
- name: List frontend IP addresses.
text: az network lb frontend-ip list -g MyResourceGroup --lb-name MyLb
"""
helps['network lb frontend-ip show'] = """
type: command
short-summary: Get the details of a frontend IP address.
examples:
- name: Get the details of a frontend IP address.
text: az network lb frontend-ip show -g MyResourceGroup --lb-name MyLb -n MyFrontendIp
- name: Get the details of a frontend IP address (autogenerated)
text: |
az network lb frontend-ip show --lb-name MyLb --name MyFrontendIp --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network lb frontend-ip update'] = """
type: command
short-summary: Update a frontend IP address.
examples:
- name: Update the frontend IP address of a public load balancer.
text: az network lb frontend-ip update -g MyResourceGroup --lb-name MyLb -n MyFrontendIp --public-ip-address MyNewPublicIp
- name: Update the frontend IP address of an internal load balancer.
text: az network lb frontend-ip update -g MyResourceGroup --lb-name MyLb -n MyFrontendIp --private-ip-address 10.10.10.50
- name: Update a frontend IP address. (autogenerated)
text: |
az network lb frontend-ip update --lb-name MyLb --name MyFrontendIp --resource-group MyResourceGroup --set tags.CostCenter=MyBusinessGroup
crafted: true
"""
helps['network lb inbound-nat-pool'] = """
type: group
short-summary: Manage inbound NAT address pools of a load balancer.
"""
helps['network lb inbound-nat-pool create'] = """
type: command
short-summary: Create an inbound NAT address pool.
examples:
- name: Create an inbound NAT address pool.
text: |
az network lb inbound-nat-pool create -g MyResourceGroup --lb-name MyLb \\
-n MyNatPool --protocol Tcp --frontend-port-range-start 80 --frontend-port-range-end 89 \\
--backend-port 80 --frontend-ip-name MyFrontendIp
"""
helps['network lb inbound-nat-pool delete'] = """
type: command
short-summary: Delete an inbound NAT address pool.
examples:
- name: Delete an inbound NAT address pool.
text: az network lb inbound-nat-pool delete -g MyResourceGroup --lb-name MyLb -n MyNatPool
"""
helps['network lb inbound-nat-pool list'] = """
type: command
short-summary: List inbound NAT address pools.
examples:
- name: List inbound NAT address pools.
text: az network lb inbound-nat-pool list -g MyResourceGroup --lb-name MyLb -o table
"""
helps['network lb inbound-nat-pool show'] = """
type: command
short-summary: Get the details of an inbound NAT address pool.
examples:
- name: Get the details of an inbound NAT address pool.
text: az network lb inbound-nat-pool show -g MyResourceGroup --lb-name MyLb -n MyNatPool
"""
helps['network lb inbound-nat-pool update'] = """
type: command
short-summary: Update an inbound NAT address pool.
examples:
- name: Update an inbound NAT address pool to a different backend port.
text: |
az network lb inbound-nat-pool update -g MyResourceGroup --lb-name MyLb -n MyNatPool \\
--protocol Tcp --backend-port 8080
- name: Update an inbound NAT address pool. (autogenerated)
text: |
az network lb inbound-nat-pool update --backend-port 8080 --enable-tcp-reset true --frontend-port-range-end 89 --frontend-port-range-start 80 --lb-name MyLb --name MyNatPool --resource-group MyResourceGroup
crafted: true
- name: Update an inbound NAT address pool. (autogenerated)
text: |
az network lb inbound-nat-pool update --enable-tcp-reset true --lb-name MyLb --name MyNatPool --protocol Udp --resource-group MyResourceGroup
crafted: true
- name: Update an inbound NAT address pool. (autogenerated)
text: |
az network lb inbound-nat-pool update --backend-port 8080 --floating-ip true --frontend-port-range-end 89 --frontend-port-range-start 80 --lb-name MyLb --name MyNatPool --protocol Udp --resource-group MyResourceGroup
crafted: true
"""
helps['network lb inbound-nat-rule'] = """
type: group
short-summary: Manage inbound NAT rules of a load balancer.
"""
helps['network lb inbound-nat-rule create'] = """
type: command
short-summary: Create an inbound NAT rule.
examples:
- name: Create a basic inbound NAT rule for port 80.
text: |
az network lb inbound-nat-rule create -g MyResourceGroup --lb-name MyLb -n MyNatRule \\
--protocol Tcp --frontend-port 80 --backend-port 80
- name: Create a basic inbound NAT rule for a specific frontend IP and enable floating IP for NAT Rule.
text: |
az network lb inbound-nat-rule create -g MyResourceGroup --lb-name MyLb -n MyNatRule --protocol Tcp \\
--frontend-port 5432 --backend-port 3389 --frontend-ip-name MyFrontendIp --floating-ip true
"""
helps['network lb inbound-nat-rule delete'] = """
type: command
short-summary: Delete an inbound NAT rule.
examples:
- name: Delete an inbound NAT rule.
text: az network lb inbound-nat-rule delete -g MyResourceGroup --lb-name MyLb -n MyNatRule
"""
helps['network lb inbound-nat-rule list'] = """
type: command
short-summary: List inbound NAT rules.
examples:
- name: List inbound NAT rules.
text: az network lb inbound-nat-rule list -g MyResourceGroup --lb-name MyLb -o table
"""
helps['network lb inbound-nat-rule show'] = """
type: command
short-summary: Get the details of an inbound NAT rule.
examples:
- name: Get the details of an inbound NAT rule.
text: az network lb inbound-nat-rule show -g MyResourceGroup --lb-name MyLb -n MyNatRule
"""
helps['network lb inbound-nat-rule update'] = """
type: command
short-summary: Update an inbound NAT rule.
examples:
- name: Update an inbound NAT rule to disable floating IP and modify idle timeout duration.
text: |
az network lb inbound-nat-rule update -g MyResourceGroup --lb-name MyLb -n MyNatRule \\
--floating-ip false --idle-timeout 5
- name: Update an inbound NAT rule. (autogenerated)
text: |
az network lb inbound-nat-rule update --backend-port 3389 --frontend-port 5432 --lb-name MyLb --name MyNatRule --protocol Udp --resource-group MyResourceGroup
crafted: true
- name: Update an inbound NAT rule. (autogenerated)
text: |
az network lb inbound-nat-rule update --lb-name MyLb --name MyNatRule --resource-group MyResourceGroup --set tags.CostCenter=MyBusinessGroup
crafted: true
"""
helps['network lb list'] = """
type: command
short-summary: List load balancers.
examples:
- name: List load balancers.
text: az network lb list -g MyResourceGroup
"""
helps['network lb outbound-rule'] = """
type: group
short-summary: Manage outbound rules of a load balancer.
"""
helps['network lb outbound-rule create'] = """
type: command
short-summary: Create an outbound-rule.
examples:
- name: Create an outbound-rule. (autogenerated)
text: |
az network lb outbound-rule create --address-pool MyAddressPool --frontend-ip-configs myfrontendoutbound --idle-timeout 5 --lb-name MyLb --name MyOutboundRule --outbound-ports 10000 --protocol Udp --resource-group MyResourceGroup
crafted: true
"""
helps['network lb outbound-rule delete'] = """
type: command
short-summary: Delete an outbound-rule.
examples:
- name: Delete an outbound-rule. (autogenerated)
text: |
az network lb outbound-rule delete --lb-name MyLb --name MyOutboundRule --resource-group MyResourceGroup
crafted: true
"""
helps['network lb outbound-rule list'] = """
type: command
short-summary: List outbound rules.
examples:
- name: List outbound rules. (autogenerated)
text: |
az network lb outbound-rule list --lb-name MyLb --resource-group MyResourceGroup
crafted: true
"""
helps['network lb outbound-rule show'] = """
type: command
short-summary: Get the details of an outbound rule.
examples:
- name: Get the details of an outbound rule. (autogenerated)
text: |
az network lb outbound-rule show --lb-name MyLb --name MyOutboundRule --resource-group MyResourceGroup
crafted: true
"""
helps['network lb outbound-rule update'] = """
type: command
short-summary: Update an outbound-rule.
examples:
- name: Update an outbound-rule. (autogenerated)
text: |
az network lb outbound-rule update --lb-name MyLb --name MyOutboundRule --outbound-ports 10000 --resource-group MyResourceGroup
crafted: true
"""
helps['network lb probe'] = """
type: group
short-summary: Evaluate probe information and define routing rules.
"""
helps['network lb probe create'] = """
type: command
short-summary: Create a probe.
examples:
- name: Create a probe on a load balancer over HTTP and port 80.
text: |
az network lb probe create -g MyResourceGroup --lb-name MyLb -n MyProbe \\
--protocol http --port 80 --path /
- name: Create a probe on a load balancer over TCP on port 443.
text: |
az network lb probe create -g MyResourceGroup --lb-name MyLb -n MyProbe \\
--protocol tcp --port 443
"""
helps['network lb probe delete'] = """
type: command
short-summary: Delete a probe.
examples:
- name: Delete a probe.
text: az network lb probe delete -g MyResourceGroup --lb-name MyLb -n MyProbe
"""
helps['network lb probe list'] = """
type: command
short-summary: List probes.
examples:
- name: List probes.
text: az network lb probe list -g MyResourceGroup --lb-name MyLb -o table
"""
helps['network lb probe show'] = """
type: command
short-summary: Get the details of a probe.
examples:
- name: Get the details of a probe.
text: az network lb probe show -g MyResourceGroup --lb-name MyLb -n MyProbe
"""
helps['network lb probe update'] = """
type: command
short-summary: Update a probe.
examples:
- name: Update a probe with a different port and interval.
text: az network lb probe update -g MyResourceGroup --lb-name MyLb -n MyProbe --port 81 --interval 10
- name: Update a probe. (autogenerated)
text: |
az network lb probe update --lb-name MyLb --name MyProbe --port 81 --protocol Http --resource-group MyResourceGroup
crafted: true
"""
helps['network lb rule'] = """
type: group
short-summary: Manage load balancing rules.
"""
helps['network lb rule create'] = """
type: command
short-summary: Create a load balancing rule.
examples:
- name: >
Create a load balancing rule that assigns a front-facing IP configuration and port to an address pool and port.
text: |
az network lb rule create -g MyResourceGroup --lb-name MyLb -n MyLbRule --protocol Tcp \\
--frontend-ip-name MyFrontEndIp --frontend-port 80 \\
--backend-pool-name MyAddressPool --backend-port 80
- name: >
Create a load balancing rule that assigns a front-facing IP configuration and port to an address pool and port with the floating ip feature.
text: |
az network lb rule create -g MyResourceGroup --lb-name MyLb -n MyLbRule --protocol Tcp \\
--frontend-ip-name MyFrontEndIp --backend-pool-name MyAddressPool \\
--floating-ip true --frontend-port 80 --backend-port 80
- name: >
Create an HA ports load balancing rule that assigns a frontend IP and port to use all available backend IPs in a pool on the same port.
text: |
az network lb rule create -g MyResourceGroup --lb-name MyLb -n MyHAPortsRule \\
--protocol All --frontend-port 0 --backend-port 0 --frontend-ip-name MyFrontendIp \\
--backend-pool-name MyAddressPool
"""
helps['network lb rule delete'] = """
type: command
short-summary: Delete a load balancing rule.
examples:
- name: Delete a load balancing rule.
text: az network lb rule delete -g MyResourceGroup --lb-name MyLb -n MyLbRule
"""
helps['network lb rule list'] = """
type: command
short-summary: List load balancing rules.
examples:
- name: List load balancing rules.
text: az network lb rule list -g MyResourceGroup --lb-name MyLb -o table
"""
helps['network lb rule show'] = """
type: command
short-summary: Get the details of a load balancing rule.
examples:
- name: Get the details of a load balancing rule.
text: az network lb rule show -g MyResourceGroup --lb-name MyLb -n MyLbRule
"""
helps['network lb rule update'] = """
type: command
short-summary: Update a load balancing rule.
examples:
- name: Update a load balancing rule to change the protocol to UDP.
text: az network lb rule update -g MyResourceGroup --lb-name MyLb -n MyLbRule --protocol Udp
examples:
- name: Update a load balancing rule to support HA ports.
text: az network lb rule update -g MyResourceGroup --lb-name MyLb -n MyLbRule \\ --protocol All --frontend-port 0 --backend-port 0
- name: Update a load balancing rule. (autogenerated)
text: |
az network lb rule update --disable-outbound-snat true --lb-name MyLb --name MyLbRule --resource-group MyResourceGroup
crafted: true
- name: Update a load balancing rule. (autogenerated)
text: |
az network lb rule update --idle-timeout 5 --lb-name MyLb --name MyLbRule --resource-group MyResourceGroup
crafted: true
"""
helps['network lb show'] = """
type: command
short-summary: Get the details of a load balancer.
examples:
- name: Get the details of a load balancer.
text: az network lb show -g MyResourceGroup -n MyLb
"""
helps['network lb update'] = """
type: command
short-summary: Update a load balancer.
long-summary: >
This command can only be used to update the tags for a load balancer. Name and resource group are immutable and cannot be updated.
examples:
- name: Update the tags of a load balancer.
text: az network lb update -g MyResourceGroup -n MyLb --set tags.CostCenter=MyBusinessGroup
"""
helps['network list-service-tags'] = """
type: command
short-summary: List all service tags which are below to different resources
long-summary: >
A service tag represents a group of IP address prefixes to help minimize complexity for security rule creation.
To learn more about list-service-tags, visit https://docs.microsoft.com/en-us/azure/virtual-network/security-overview#service-tags. \\
Note that the location parameter is used as a reference for version (not as a filter based on location).
For example, even if you specify --location eastus2 you will get the list of service tags with prefix details across all regions but limited to the cloud that your subscription belongs to (i.e. Public, US government, China or Germany).
examples:
- name: Gets a list of service tag information resources. (autogenerated)
text: |
az network list-service-tags --location westus2
crafted: true
"""
helps['network list-usages'] = """
type: command
short-summary: List the number of network resources in a region that are used against a subscription quota.
examples:
- name: List the provisioned network resources in East US region within a subscription.
text: az network list-usages --location eastus -o table
"""
helps['network local-gateway'] = """
type: group
short-summary: Manage local gateways.
long-summary: >
For more information on local gateways, visit: https://docs.microsoft.com/azure/vpn-gateway/vpn-gateway-howto-site-to-site-resource-manager-cli#localnet
"""
helps['network local-gateway create'] = """
type: command
short-summary: Create a local VPN gateway.
examples:
- name: Create a Local Network Gateway to represent your on-premises site.
text: |
az network local-gateway create -g MyResourceGroup -n MyLocalGateway \\
--gateway-ip-address 23.99.221.164 --local-address-prefixes 10.0.0.0/24 20.0.0.0/24
"""
helps['network local-gateway delete'] = """
type: command
short-summary: Delete a local VPN gateway.
long-summary: >
In order to delete a Local Network Gateway, you must first delete ALL Connection objects in Azure
that are connected to the Gateway. After deleting the Gateway, proceed to delete other resources now not in use.
For more information, follow the order of instructions on this page: https://docs.microsoft.com/azure/vpn-gateway/vpn-gateway-delete-vnet-gateway-portal
examples:
- name: Create a Local Network Gateway to represent your on-premises site.
text: az network local-gateway delete -g MyResourceGroup -n MyLocalGateway
- name: Delete a local VPN gateway. (autogenerated)
text: |
az network local-gateway delete --name MyLocalGateway --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network local-gateway list'] = """
type: command
short-summary: List all local VPN gateways in a resource group.
examples:
- name: List all local VPN gateways in a resource group.
text: az network local-gateway list -g MyResourceGroup
"""
helps['network local-gateway show'] = """
type: command
short-summary: Get the details of a local VPN gateway.
examples:
- name: Get the details of a local VPN gateway.
text: az network local-gateway show -g MyResourceGroup -n MyLocalGateway
"""
helps['network local-gateway update'] = """
type: command
short-summary: Update a local VPN gateway.
examples:
- name: Update a Local Network Gateway provisioned with a 10.0.0.0/24 address prefix with additional prefixes.
text: |
az network local-gateway update -g MyResourceGroup -n MyLocalGateway \\
--local-address-prefixes 10.0.0.0/24 20.0.0.0/24 30.0.0.0/24
- name: Update a local VPN gateway. (autogenerated)
text: |
az network local-gateway update --gateway-ip-address 23.99.221.164 --name MyLocalGateway --resource-group MyResourceGroup
crafted: true
"""
helps['network local-gateway wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the local gateway is met.
examples:
- name: Wait for Local Network Gateway to return as created.
text: |
az network local-gateway wait -g MyResourceGroup -n MyLocalGateway --created
"""
helps['network nic'] = """
type: group
short-summary: Manage network interfaces.
long-summary: >
To learn more about network interfaces in Azure visit https://docs.microsoft.com/azure/virtual-network/virtual-network-network-interface
"""
helps['network nic create'] = """
type: command
short-summary: Create a network interface.
examples:
- name: Create a network interface for a specified subnet on a specified virtual network.
text: >
az network nic create -g MyResourceGroup --vnet-name MyVnet --subnet MySubnet -n MyNic
- name: >
Create a network interface for a specified subnet on a virtual network which allows
IP forwarding subject to a network security group.
text: |
az network nic create -g MyResourceGroup --vnet-name MyVnet --subnet MySubnet -n MyNic \\
--ip-forwarding --network-security-group MyNsg
- name: >
Create a network interface for a specified subnet on a virtual network with network security group and application security groups.
text: |
az network nic create -g MyResourceGroup --vnet-name MyVnet --subnet MySubnet -n MyNic \\
--network-security-group MyNsg --application-security-groups Web App
"""
helps['network nic delete'] = """
type: command
short-summary: Delete a network interface.
examples:
- name: Delete a network interface.
text: >
az network nic delete -g MyResourceGroup -n MyNic
"""
helps['network nic ip-config'] = """
type: group
short-summary: Manage IP configurations of a network interface.
"""
helps['network nic ip-config address-pool'] = """
type: group
short-summary: Manage address pools in an IP configuration.
"""
helps['network nic ip-config address-pool add'] = """
type: command
short-summary: Add an address pool to an IP configuration.
examples:
- name: Add an address pool to an IP configuration.
text: |
az network nic ip-config address-pool add -g MyResourceGroup --nic-name MyNic \\
-n MyIpConfig --address-pool MyAddressPool
- name: Add an address pool to an IP configuration. (autogenerated)
text: |
az network nic ip-config address-pool add --address-pool MyAddressPool --ip-config-name MyIpConfig --lb-name MyLb --nic-name MyNic --resource-group MyResourceGroup
crafted: true
"""
helps['network nic ip-config address-pool remove'] = """
type: command
short-summary: Remove an address pool of an IP configuration.
examples:
- name: Remove an address pool of an IP configuration.
text: |
az network nic ip-config address-pool remove -g MyResourceGroup --nic-name MyNic \\
-n MyIpConfig --address-pool MyAddressPool
- name: Remove an address pool of an IP configuration. (autogenerated)
text: |
az network nic ip-config address-pool remove --address-pool MyAddressPool --ip-config-name MyIpConfig --lb-name MyLb --nic-name MyNic --resource-group MyResourceGroup
crafted: true
"""
helps['network nic ip-config create'] = """
type: command
short-summary: Create an IP configuration.
long-summary: >
You must have the Microsoft.Network/AllowMultipleIpConfigurationsPerNic feature enabled for your subscription.
Only one configuration may be designated as the primary IP configuration per NIC, using the `--make-primary` flag.
examples:
- name: Create a primary IP configuration for a NIC.
text: az network nic ip-config create -g MyResourceGroup -n MyIpConfig --nic-name MyNic --make-primary
- name: Create an IP configuration. (autogenerated)
text: |
az network nic ip-config create --name MyIpConfig --nic-name MyNic --private-ip-address 10.0.0.9 --resource-group MyResourceGroup
crafted: true
"""
helps['network nic ip-config delete'] = """
type: command
short-summary: Delete an IP configuration.
long-summary: A NIC must have at least one IP configuration.
examples:
- name: Delete an IP configuration.
text: az network nic ip-config delete -g MyResourceGroup -n MyIpConfig --nic-name MyNic
"""
helps['network nic ip-config inbound-nat-rule'] = """
type: group
short-summary: Manage inbound NAT rules of an IP configuration.
"""
helps['network nic ip-config inbound-nat-rule add'] = """
type: command
short-summary: Add an inbound NAT rule to an IP configuration.
examples:
- name: Add an inbound NAT rule to an IP configuration.
text: |
az network nic ip-config inbound-nat-rule add -g MyResourceGroup --nic-name MyNic \\
-n MyIpConfig --inbound-nat-rule MyNatRule
- name: Add an inbound NAT rule to an IP configuration. (autogenerated)
text: |
az network nic ip-config inbound-nat-rule add --inbound-nat-rule MyNatRule --ip-config-name MyIpConfig --lb-name MyLb --nic-name MyNic --resource-group MyResourceGroup
crafted: true
"""
helps['network nic ip-config inbound-nat-rule remove'] = """
type: command
short-summary: Remove an inbound NAT rule of an IP configuration.
examples:
- name: Remove an inbound NAT rule of an IP configuration.
text: |
az network nic ip-config inbound-nat-rule remove -g MyResourceGroup --nic-name MyNic \\
-n MyIpConfig --inbound-nat-rule MyNatRule
- name: Remove an inbound NAT rule of an IP configuration. (autogenerated)
text: |
az network nic ip-config inbound-nat-rule remove --inbound-nat-rule MyNatRule --ip-config-name MyIpConfig --lb-name MyLb --nic-name MyNic --resource-group MyResourceGroup
crafted: true
"""
helps['network nic ip-config list'] = """
type: command
short-summary: List the IP configurations of a NIC.
examples:
- name: List the IP configurations of a NIC.
text: az network nic ip-config list -g MyResourceGroup --nic-name MyNic
"""
helps['network nic ip-config show'] = """
type: command
short-summary: Show the details of an IP configuration.
examples:
- name: Show the details of an IP configuration of a NIC.
text: az network nic ip-config show -g MyResourceGroup -n MyIpConfig --nic-name MyNic
"""
helps['network nic ip-config update'] = """
type: command
short-summary: Update an IP configuration.
examples:
- name: Update a NIC to use a new private IP address.
text: |
az network nic ip-config update -g MyResourceGroup --nic-name MyNic \\
-n MyIpConfig --private-ip-address 10.0.0.9
- name: Make an IP configuration the default for the supplied NIC.
text: |
az network nic ip-config update -g MyResourceGroup --nic-name MyNic \\
-n MyIpConfig --make-primary
- name: Update an IP configuration. (autogenerated)
text: |
az network nic ip-config update --name MyIpConfig --nic-name MyNic --public-ip-address MyAppGatewayPublicIp --resource-group MyResourceGroup
crafted: true
"""
helps['network nic list'] = """
type: command
short-summary: List network interfaces.
long-summary: >
To list network interfaces attached to VMs in VM scale sets use 'az vmss nic list' or 'az vmss nic list-vm-nics'.
examples:
- name: List all NICs by internal DNS suffix.
text: >
az network nic list --query "[?dnsSettings.internalDomainNameSuffix=`{dnsSuffix}`]"
"""
helps['network nic list-effective-nsg'] = """
type: command
short-summary: List all effective network security groups applied to a network interface.
long-summary: >
To learn more about troubleshooting using effective security rules visit https://docs.microsoft.com/azure/virtual-network/virtual-network-nsg-troubleshoot-portal
examples:
- name: List the effective security groups associated with a NIC.
text: az network nic list-effective-nsg -g MyResourceGroup -n MyNic
"""
helps['network nic show'] = """
type: command
short-summary: Get the details of a network interface.
examples:
- name: Get the internal domain name suffix of a NIC.
text: az network nic show -g MyResourceGroup -n MyNic --query "dnsSettings.internalDomainNameSuffix"
"""
helps['network nic show-effective-route-table'] = """
type: command
short-summary: Show the effective route table applied to a network interface.
long-summary: >
To learn more about troubleshooting using the effective route tables visit
https://docs.microsoft.com/azure/virtual-network/virtual-network-routes-troubleshoot-portal#using-effective-routes-to-troubleshoot-vm-traffic-flow
examples:
- name: Show the effective routes applied to a network interface.
text: az network nic show-effective-route-table -g MyResourceGroup -n MyNic
"""
helps['network nic update'] = """
type: command
short-summary: Update a network interface.
examples:
- name: Update a network interface to use a different network security group.
text: az network nic update -g MyResourceGroup -n MyNic --network-security-group MyNewNsg
- name: Update a network interface. (autogenerated)
text: |
az network nic update --accelerated-networking true --name MyNic --resource-group MyResourceGroup
crafted: true
"""
helps['network nic wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the network interface is met.
examples:
- name: Pause CLI until the network interface is created.
text: az network nic wait -g MyResourceGroup -n MyNic --created
- name: Place the CLI in a waiting state until a condition of the network interface is met. (autogenerated)
text: |
az network nic wait --deleted --name MyNic --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network nsg'] = """
type: group
short-summary: Manage Azure Network Security Groups (NSGs).
long-summary: >
You can control network traffic to resources in a virtual network using a network security group.
A network security group contains a list of security rules that allow or deny inbound or
outbound network traffic based on source or destination IP addresses, Application Security
Groups, ports, and protocols. For more information visit https://docs.microsoft.com/azure/virtual-network/virtual-networks-create-nsg-arm-cli
"""
helps['network nsg create'] = """
type: command
short-summary: Create a network security group.
examples:
- name: Create an NSG in a resource group within a region with tags.
text: az network nsg create -g MyResourceGroup -n MyNsg --tags super_secure no_80 no_22
"""
helps['network nsg delete'] = """
type: command
short-summary: Delete a network security group.
examples:
- name: Delete an NSG in a resource group.
text: az network nsg delete -g MyResourceGroup -n MyNsg
"""
helps['network nsg list'] = """
type: command
short-summary: List network security groups.
examples:
- name: List all NSGs in the 'westus' region.
text: az network nsg list --query "[?location=='westus']"
"""
helps['network nsg rule'] = """
type: group
short-summary: Manage network security group rules.
"""
helps['network nsg rule create'] = """
type: command
short-summary: Create a network security group rule.
examples:
- name: Create a basic "Allow" NSG rule with the highest priority.
text: >
az network nsg rule create -g MyResourceGroup --nsg-name MyNsg -n MyNsgRule --priority 100
- name: Create a "Deny" rule over TCP for a specific IP address range with the lowest priority.
text: |
az network nsg rule create -g MyResourceGroup --nsg-name MyNsg -n MyNsgRule --priority 4096 \\
--source-address-prefixes 208.130.28/24 --source-port-ranges 80 \\
--destination-address-prefixes '*' --destination-port-ranges 80 8080 --access Deny \\
--protocol Tcp --description "Deny from specific IP address ranges on 80 and 8080."
- name: Create a security rule using service tags. For more details visit https://aka.ms/servicetags
text: |
az network nsg rule create -g MyResourceGroup --nsg-name MyNsg -n MyNsgRuleWithTags \\
--priority 400 --source-address-prefixes VirtualNetwork --destination-address-prefixes Storage \\
--destination-port-ranges '*' --direction Outbound --access Allow --protocol Tcp --description "Allow VirtualNetwork to Storage."
- name: Create a security rule using application security groups. https://aka.ms/applicationsecuritygroups
text: |
az network nsg rule create -g MyResourceGroup --nsg-name MyNsg -n MyNsgRuleWithAsg \\
--priority 500 --source-address-prefixes Internet --destination-port-ranges 80 8080 \\
--destination-asgs Web --access Allow --protocol Tcp --description "Allow Internet to Web ASG on ports 80,8080."
"""
helps['network nsg rule delete'] = """
type: command
short-summary: Delete a network security group rule.
examples:
- name: Delete a network security group rule.
text: az network nsg rule delete -g MyResourceGroup --nsg-name MyNsg -n MyNsgRule
"""
helps['network nsg rule list'] = """
type: command
short-summary: List all rules in a network security group.
examples:
- name: List all rules in a network security group.
text: az network nsg rule list -g MyResourceGroup --nsg-name MyNsg
"""
helps['network nsg rule show'] = """
type: command
short-summary: Get the details of a network security group rule.
examples:
- name: Get the details of a network security group rule.
text: az network nsg rule show -g MyResourceGroup --nsg-name MyNsg -n MyNsgRule
"""
helps['network nsg rule update'] = """
type: command
short-summary: Update a network security group rule.
examples:
- name: Update an NSG rule with a new wildcard destination address prefix.
text: az network nsg rule update -g MyResourceGroup --nsg-name MyNsg -n MyNsgRule --destination-address-prefix '*'
- name: Update a network security group rule. (autogenerated)
text: |
az network nsg rule update --name MyNsgRule --nsg-name MyNsg --resource-group MyResourceGroup --source-address-prefixes 208.130.28/24
crafted: true
"""
helps['network nsg show'] = """
type: command
short-summary: Get information about a network security group.
examples:
- name: Get basic information about an NSG.
text: az network nsg show -g MyResourceGroup -n MyNsg
- name: Get the default security rules of an NSG and format the output as a table.
text: az network nsg show -g MyResourceGroup -n MyNsg --query "defaultSecurityRules[]" -o table
- name: Get all default NSG rules with "Allow" access and format the output as a table.
text: az network nsg show -g MyResourceGroup -n MyNsg --query "defaultSecurityRules[?access=='Allow']" -o table
"""
helps['network nsg update'] = """
type: command
short-summary: Update a network security group.
long-summary: >
This command can only be used to update the tags of an NSG. Name and resource group are immutable and cannot be updated.
examples:
- name: Remove a tag of an NSG.
text: az network nsg update -g MyResourceGroup -n MyNsg --remove tags.no_80
- name: Update a network security group. (autogenerated)
text: |
az network nsg update --name MyNsg --resource-group MyResourceGroup --set tags.CostCenter=MyBusinessGroup
crafted: true
"""
helps['network private-endpoint'] = """
type: group
short-summary: Manage private endpoints.
"""
helps['network private-endpoint create'] = """
type: command
short-summary: Create a private endpoint.
examples:
- name: Create a private endpoint.
text: az network private-endpoint create -g MyResourceGroup -n MyPE --vnet-name MyVnetName --subnet MySubnet --private-connection-resource-id "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/MyResourceGroup/providers/Microsoft.Network/privateLinkServices/MyPLS" --connection-name tttt -l centralus
"""
helps['network private-endpoint delete'] = """
type: command
short-summary: Delete a private endpoint.
examples:
- name: Delete a private endpoint. (autogenerated)
text: |
az network private-endpoint delete --name MyPrivateEndpoint --resource-group MyResourceGroup
crafted: true
"""
helps['network private-endpoint list'] = """
type: command
short-summary: List private endpoints.
"""
helps['network private-endpoint show'] = """
type: command
short-summary: Get the details of a private endpoint.
examples:
- name: Get the details of a private endpoint (autogenerated)
text: |
az network private-endpoint show --name MyPrivateEndpoint --resource-group MyResourceGroup
crafted: true
"""
helps['network private-endpoint update'] = """
type: command
short-summary: Update a private endpoint.
examples:
- name: Update a private endpoint.
text: az network private-endpoint update -g MyResourceGroup -n MyPE --request-message "test" --tags mytag=hello
- name: Update a private endpoint. (autogenerated)
text: |
az network private-endpoint update --name MyPE --resource-group MyResourceGroup --set useRemoteGateways=true
crafted: true
"""
helps['network private-endpoint dns-zone-group'] = """
type: group
short-summary: Manage private endpoint dns zone group.
"""
helps['network private-endpoint dns-zone-group create'] = """
type: command
short-summary: Create a private endpoint dns zone group.
examples:
- name: Create a private endpoint dns zone group.
text: az network private-endpoint dns-zone-group create --endpoint-name MyPE -g MyRG -n MyZoneGroup --zone-name Zone1 --private-dns-zone PrivateDNSZone1
"""
helps['network private-endpoint dns-zone-group add'] = """
type: command
short-summary: Add a private endpoint dns zone into a dns zone group.
examples:
- name: Add a private endpoint dns zone group.
text: az network private-endpoint dns-zone-group add --endpoint-name MyPE -g MyRG -n MyZoneGroup --zone-name Zone1 --private-dns-zone PrivateDNSZone1
"""
helps['network private-endpoint dns-zone-group remove'] = """
type: command
short-summary: Remove a private endpoint dns zone into a dns zone group.
examples:
- name: Remove a private endpoint dns zone group.
text: az network private-endpoint dns-zone-group remove --endpoint-name MyPE -g MyRG -n MyZoneGroup --zone-name Zone1
"""
helps['network private-endpoint dns-zone-group delete'] = """
type: command
short-summary: Delete a private endpoint dns zone group.
examples:
- name: Delete a private endpoint dns zone group. (autogenerated)
text: |
az network private-endpoint dns-zone-group delete --endpoint-name MyEndpoint --name MyPrivateDnsZoneGroup --resource-group MyResourceGroup
crafted: true
"""
helps['network private-endpoint dns-zone-group list'] = """
type: command
short-summary: List all private endpoint dns zone groups.
examples:
- name: List all private endpoint dns zone groups. (autogenerated)
text: |
az network private-endpoint dns-zone-group list --endpoint-name MyEndpoint --resource-group MyResourceGroup
crafted: true
"""
helps['network private-endpoint dns-zone-group show'] = """
type: command
short-summary: Show a private endpoint dns zone group.
examples:
- name: Show a private endpoint dns zone group. (autogenerated)
text: |
az network private-endpoint dns-zone-group show --endpoint-name MyEndpoint --name MyPrivateDnsZoneGroup --resource-group MyResourceGroup
crafted: true
"""
helps['network private-link-service'] = """
type: group
short-summary: Manage private link services.
"""
helps['network private-link-service connection'] = """
type: group
short-summary: Manage private link service endpoint connections.
"""
helps['network private-link-service connection delete'] = """
type: command
short-summary: Delete a private link service endpoint connection.
examples:
- name: Delete a private link service endpoint connection. (autogenerated)
text: |
az network private-link-service connection delete --name MyPrivateEndpointConnection --resource-group MyResourceGroup --service-name MyService
crafted: true
"""
helps['network private-link-service connection update'] = """
type: command
short-summary: Update a private link service endpoint connection.
long-summary: >
To update the connection status, the name of the connection should be provided.
Please obtain this name by running 'az network private-link-service show -g MyResourceGroup -n MyPLSName'.
The connection name is under the 'privateEndpointConnections' filed.
examples:
- name: Update the endpoint connections status of private link service
text: az network private-link-service connection update -g MyResourceGroup -n MyEndpointName.f072a430-2d82-4470-ab30-d23fcfee58d1 --service-name MyPLSName --connection-status Rejected
"""
helps['network private-link-service create'] = """
type: command
short-summary: Create a private link service.
examples:
- name: Create a private link service
text: az network private-link-service create -g MyResourceGroup -n MyPLSName --vnet-name MyVnetName --subnet MySubnet --lb-name MyLBName --lb-frontend-ip-configs LoadBalancerFrontEnd -l centralus
"""
helps['network private-link-service delete'] = """
type: command
short-summary: Delete a private link service.
examples:
- name: Delete a private link service. (autogenerated)
text: |
az network private-link-service delete --name MyPrivateLinkService --resource-group MyResourceGroup
crafted: true
"""
helps['network private-link-service list'] = """
type: command
short-summary: List private link services.
"""
helps['network private-link-service show'] = """
type: command
short-summary: Get the details of a private link service.
examples:
- name: Get the details of a private link service. (autogenerated)
text: |
az network private-link-service show --name MyPrivateLinkService --resource-group MyResourceGroup
crafted: true
"""
helps['network private-link-service update'] = """
type: command
short-summary: Update a private link service.
examples:
- name: Update a private link service
text: az network private-link-service update -g MyResourceGroup -n MyPLSName --visibility SubId1 SubId2 --auto-approval SubId1 SubId2
"""
helps['network private-endpoint-connection'] = """
type: group
short-summary: Manage private endpoint connections.
"""
helps['network private-endpoint-connection approve'] = """
type: command
short-summary: Approve a private endpoint connection.
examples:
- name: Approve a private endpoint connection for a storage account.
text: az network private-endpoint-connection approve -g MyResourceGroup -n MyPrivateEndpoint --resource-name MySA --type Microsoft.Storage/storageAccounts --description "Approved"
- name: Approve a private endpoint connection for a keyvault.
text: az network private-endpoint-connection approve -g MyResourceGroup -n MyPrivateEndpoint --resource-name MyKV --type Microsoft.Keyvault/vaults --description "Approved"
- name: Approve a private endpoint connection for an ACR.
text: az network private-endpoint-connection approve --id /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.ContainerRegistry/registries/testreg000002/privateEndpointConnections/testreg000002.6e6bf72bc59d41cc89c698d4cc5ee79d --description "Approved"
"""
helps['network private-endpoint-connection reject'] = """
type: command
short-summary: Reject a private endpoint connection.
examples:
- name: Reject a private endpoint connection for a storage account.
text: az network private-endpoint-connection reject -g MyResourceGroup -n MyPrivateEndpoint --resource-name MySA --type Microsoft.Storage/storageAccounts --description "Rejected"
- name: Reject a private endpoint connection for a keyvault.
text: az network private-endpoint-connection reject -g MyResourceGroup -n MyPrivateEndpoint --resource-name MyKV --type Microsoft.Keyvault/vaults --description "Rejected"
- name: Reject a private endpoint connection for an ACR.
text: az network private-endpoint-connection reject --id /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.ContainerRegistry/registries/testreg000002/privateEndpointConnections/testreg000002.6e6bf72bc59d41cc89c698d4cc5ee79d --description "Rejected"
"""
helps['network private-endpoint-connection delete'] = """
type: command
short-summary: Delete a private endpoint connection.
examples:
- name: Delete a private endpoint connection for a storage account.
text: az network private-endpoint-connection delete -g MyResourceGroup -n MyPrivateEndpoint --resource-name MySA --type Microsoft.Storage/storageAccounts
- name: Delete a private endpoint connection for a keyvault.
text: az network private-endpoint-connection delete -g MyResourceGroup -n MyPrivateEndpoint --resource-name MyKV --type Microsoft.Keyvault/vaults
- name: Delete a private endpoint connection for an ACR.
text: az network private-endpoint-connection delete --id /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.ContainerRegistry/registries/testreg000002/privateEndpointConnections/testreg000002.6e6bf72bc59d41cc89c698d4cc5ee79d
"""
helps['network private-endpoint-connection show'] = """
type: command
short-summary: Show a private endpoint connection.
examples:
- name: Show a private endpoint connection for a storage account.
text: az network private-endpoint-connection show -g MyResourceGroup -n MyPrivateEndpoint --resource-name MySA --type Microsoft.Storage/storageAccounts
- name: Show a private endpoint connection for a keyvault.
text: az network private-endpoint-connection show -g MyResourceGroup -n MyPrivateEndpoint --resource-name MyKV --type Microsoft.Keyvault/vaults
- name: Show a private endpoint connection for an ACR.
text: az network private-endpoint-connection show --id /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.ContainerRegistry/registries/testreg000002/privateEndpointConnections/testreg000002.6e6bf72bc59d41cc89c698d4cc5ee79d
"""
helps['network private-endpoint-connection list'] = """
type: command
short-summary: List all private endpoint connections.
examples:
- name: List all private endpoint connections for a storage account.
text: az network private-endpoint-connection list -g MyResourceGroup -n MySA --type Microsoft.Storage/storageAccounts
- name: List all private endpoint connections for a keyvault.
text: az network private-endpoint-connection list -g MyResourceGroup -n MyKV --type Microsoft.Keyvault/vaults
- name: List all private endpoint connections for an ACR.
text: az network private-endpoint-connection list --id /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.ContainerRegistry/registries/testreg000002
"""
helps['network private-link-resource'] = """
type: group
short-summary: Manage private link resources.
"""
helps['network private-link-resource list'] = """
type: command
short-summary: List all private link resources.
examples:
- name: List all private link resources for a storage account.
text: az network private-link-resource list -g MyResourceGroup -n MySA --type Microsoft.Storage/storageAccounts
- name: List all private link resources for a keyvault.
text: az network private-link-resource list -g MyResourceGroup -n MyKV --type Microsoft.Keyvault/vaults
- name: List all private link resources for an ACR.
text: az network private-link-resource list --id /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.ContainerRegistry/registries/testreg000002
"""
helps['network profile'] = """
type: group
short-summary: Manage network profiles.
long-summary: >
To create a network profile, see the create command for the relevant resource. Currently,
only Azure Container Instances are supported.
"""
helps['network profile delete'] = """
type: command
short-summary: Delete a network profile.
examples:
- name: Delete a network profile. (autogenerated)
text: |
az network profile delete --name MyNetworkProfile --resource-group MyResourceGroup
crafted: true
"""
helps['network profile list'] = """
type: command
short-summary: List network profiles.
examples:
- name: List network profiles (autogenerated)
text: |
az network profile list --resource-group MyResourceGroup
crafted: true
"""
helps['network profile show'] = """
type: command
short-summary: Get the details of a network profile.
examples:
- name: Get the details of a network profile. (autogenerated)
text: |
az network profile show --name MyNetworkProfile --resource-group MyResourceGroup
crafted: true
"""
helps['network public-ip'] = """
type: group
short-summary: Manage public IP addresses.
long-summary: >
To learn more about public IP addresses visit https://docs.microsoft.com/azure/virtual-network/virtual-network-public-ip-address
"""
helps['network public-ip create'] = """
type: command
short-summary: Create a public IP address.
long-summary: >
[Coming breaking change] In the coming release, the default behavior will be changed as follows when sku is Standard and zone is not provided: zones = [], which means the Standard Public IP has no zones. If you want to create a zone-redundant Public IP address, please specify all the zones in the region. For example, --zone 1 2 3.
examples:
- name: Create a basic public IP resource.
text: az network public-ip create -g MyResourceGroup -n MyIp
- name: Create a static public IP resource for a DNS name label.
text: az network public-ip create -g MyResourceGroup -n MyIp --dns-name MyLabel --allocation-method Static
- name: Create a public IP resource in an availability zone in the current resource group region.
text: az network public-ip create -g MyResourceGroup -n MyIp --zone 2
"""
helps['network public-ip delete'] = """
type: command
short-summary: Delete a public IP address.
examples:
- name: Delete a public IP address.
text: az network public-ip delete -g MyResourceGroup -n MyIp
"""
helps['network public-ip list'] = """
type: command
short-summary: List public IP addresses.
examples:
- name: List all public IPs in a subscription.
text: az network public-ip list
- name: List all public IPs in a resource group.
text: az network public-ip list -g MyResourceGroup
- name: List all public IPs of a domain name label.
text: az network public-ip list -g MyResourceGroup --query "[?dnsSettings.domainNameLabel=='MyLabel']"
"""
helps['network public-ip prefix'] = """
type: group
short-summary: Manage public IP prefix resources.
"""
helps['network public-ip prefix create'] = """
type: command
short-summary: Create a public IP prefix resource.
examples:
- name: Create a public IP prefix resource. (autogenerated)
text: |
az network public-ip prefix create --length 28 --location westus2 --name MyPublicIPPrefix --resource-group MyResourceGroup
crafted: true
"""
helps['network public-ip prefix delete'] = """
type: command
short-summary: Delete a public IP prefix resource.
examples:
- name: Delete a public IP prefix resource. (autogenerated)
text: |
az network public-ip prefix delete --name MyPublicIPPrefix --resource-group MyResourceGroup
crafted: true
"""
helps['network public-ip prefix list'] = """
type: command
short-summary: List public IP prefix resources.
"""
helps['network public-ip prefix show'] = """
type: command
short-summary: Get the details of a public IP prefix resource.
examples:
- name: Get the details of a public IP prefix resource. (autogenerated)
text: |
az network public-ip prefix show --name MyPublicIPPrefix --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network public-ip prefix update'] = """
type: command
short-summary: Update a public IP prefix resource.
examples:
- name: Update a public IP prefix resource. (autogenerated)
text: |
az network public-ip prefix update --name MyPublicIPPrefix --resource-group MyResourceGroup --set useRemoteGateways=true
crafted: true
"""
helps['network public-ip show'] = """
type: command
short-summary: Get the details of a public IP address.
examples:
- name: Get information about a public IP resource.
text: az network public-ip show -g MyResourceGroup -n MyIp
- name: Get the FQDN and IP address of a public IP resource.
text: >
az network public-ip show -g MyResourceGroup -n MyIp --query "{fqdn: dnsSettings.fqdn, address: ipAddress}"
"""
helps['network public-ip update'] = """
type: command
short-summary: Update a public IP address.
examples:
- name: Update a public IP resource with a DNS name label and static allocation.
text: az network public-ip update -g MyResourceGroup -n MyIp --dns-name MyLabel --allocation-method Static
"""
helps['network route-filter'] = """
type: group
short-summary: Manage route filters.
long-summary: >
To learn more about route filters with Microsoft peering with ExpressRoute, visit https://docs.microsoft.com/azure/expressroute/how-to-routefilter-cli
"""
helps['network route-filter create'] = """
type: command
short-summary: Create a route filter.
examples:
- name: Create a route filter.
text: az network route-filter create -g MyResourceGroup -n MyRouteFilter
- name: Create a route filter. (autogenerated)
text: |
az network route-filter create --location westus2 --name MyRouteFilter --resource-group MyResourceGroup
crafted: true
"""
helps['network route-filter delete'] = """
type: command
short-summary: Delete a route filter.
examples:
- name: Delete a route filter.
text: az network route-filter delete -g MyResourceGroup -n MyRouteFilter
"""
helps['network route-filter list'] = """
type: command
short-summary: List route filters.
examples:
- name: List route filters in a resource group.
text: az network route-filter list -g MyResourceGroup
"""
helps['network route-filter rule'] = """
type: group
short-summary: Manage rules in a route filter.
long-summary: >
To learn more about route filters with Microsoft peering with ExpressRoute, visit https://docs.microsoft.com/azure/expressroute/how-to-routefilter-cli
"""
helps['network route-filter rule create'] = """
type: command
short-summary: Create a rule in a route filter.
parameters:
- name: --communities
short-summary: Space-separated list of border gateway protocol (BGP) community values to filter on.
populator-commands:
- az network route-filter rule list-service-communities
examples:
- name: Create a rule in a route filter to allow Dynamics 365.
text: |
az network route-filter rule create -g MyResourceGroup --filter-name MyRouteFilter \\
-n MyRouteFilterRule --communities 12076:5040 --access Allow
"""
helps['network route-filter rule delete'] = """
type: command
short-summary: Delete a rule from a route filter.
examples:
- name: Delete a rule from a route filter.
text: az network route-filter rule delete -g MyResourceGroup --filter-name MyRouteFilter -n MyRouteFilterRule
"""
helps['network route-filter rule list'] = """
type: command
short-summary: List rules in a route filter.
examples:
- name: List rules in a route filter.
text: az network route-filter rule list -g MyResourceGroup --filter-name MyRouteFilter
"""
helps['network route-filter rule list-service-communities'] = """
type: command
short-summary: Gets all the available BGP service communities.
examples:
- name: Gets all the available BGP service communities.
text: az network route-filter rule list-service-communities -o table
- name: Get the community value for Exchange.
text: |
az network route-filter rule list-service-communities \\
--query '[].bgpCommunities[?communityName==`Exchange`].[communityValue][][]' -o tsv
"""
helps['network route-filter rule show'] = """
type: command
short-summary: Get the details of a rule in a route filter.
examples:
- name: Get the details of a rule in a route filter.
text: az network route-filter rule show -g MyResourceGroup --filter-name MyRouteFilter -n MyRouteFilterRule
"""
helps['network route-filter rule update'] = """
type: command
short-summary: Update a rule in a route filter.
examples:
- name: Update a rule in a route filter to add Exchange to rule list.
text: |
az network route-filter rule update -g MyResourceGroup --filter-name MyRouteFilter \\
-n MyRouteFilterRule --add communities='12076:5010'
"""
helps['network route-filter show'] = """
type: command
short-summary: Get the details of a route filter.
examples:
- name: Get the details of a route filter.
text: az network route-filter show -g MyResourceGroup -n MyRouteFilter
- name: Get the details of a route filter. (autogenerated)
text: |
az network route-filter show --expand peerings --name MyRouteFilter --resource-group MyResourceGroup
crafted: true
"""
helps['network route-filter update'] = """
type: command
short-summary: Update a route filter.
long-summary: >
This command can only be used to update the tags for a route filter. Name and resource group are immutable and cannot be updated.
examples:
- name: Update the tags on a route filter.
text: az network route-filter update -g MyResourceGroup -n MyRouteFilter --set tags.CostCenter=MyBusinessGroup
"""
helps['network route-table'] = """
type: group
short-summary: Manage route tables.
"""
helps['network route-table create'] = """
type: command
short-summary: Create a route table.
examples:
- name: Create a route table.
text: az network route-table create -g MyResourceGroup -n MyRouteTable
"""
helps['network route-table delete'] = """
type: command
short-summary: Delete a route table.
examples:
- name: Delete a route table.
text: az network route-table delete -g MyResourceGroup -n MyRouteTable
"""
helps['network route-table list'] = """
type: command
short-summary: List route tables.
examples:
- name: List all route tables in a subscription.
text: az network route-table list -g MyResourceGroup
"""
helps['network route-table route'] = """
type: group
short-summary: Manage routes in a route table.
"""
helps['network route-table route create'] = """
type: command
short-summary: Create a route in a route table.
examples:
- name: Create a route that forces all inbound traffic to a Network Virtual Appliance.
text: |
az network route-table route create -g MyResourceGroup --route-table-name MyRouteTable -n MyRoute \\
--next-hop-type VirtualAppliance --address-prefix 10.0.0.0/16 --next-hop-ip-address 10.0.100.4
"""
helps['network route-table route delete'] = """
type: command
short-summary: Delete a route from a route table.
examples:
- name: Delete a route from a route table.
text: az network route-table route delete -g MyResourceGroup --route-table-name MyRouteTable -n MyRoute
"""
helps['network route-table route list'] = """
type: command
short-summary: List routes in a route table.
examples:
- name: List routes in a route table.
text: az network route-table route list -g MyResourceGroup --route-table-name MyRouteTable
"""
helps['network route-table route show'] = """
type: command
short-summary: Get the details of a route in a route table.
examples:
- name: Get the details of a route in a route table.
text: az network route-table route show -g MyResourceGroup --route-table-name MyRouteTable -n MyRoute -o table
"""
helps['network route-table route update'] = """
type: command
short-summary: Update a route in a route table.
examples:
- name: Update a route in a route table to change the next hop ip address.
text: az network route-table route update -g MyResourceGroup --route-table-name MyRouteTable \\ -n MyRoute --next-hop-ip-address 10.0.100.5
- name: Update a route in a route table. (autogenerated)
text: |
az network route-table route update --address-prefix 10.0.0.0/16 --name MyRoute --next-hop-ip-address 10.0.100.5 --next-hop-type VirtualNetworkGateway --resource-group MyResourceGroup --route-table-name MyRouteTable
crafted: true
"""
helps['network route-table show'] = """
type: command
short-summary: Get the details of a route table.
examples:
- name: Get the details of a route table.
text: az network route-table show -g MyResourceGroup -n MyRouteTable
"""
helps['network route-table update'] = """
type: command
short-summary: Update a route table.
examples:
- name: Update a route table to disable BGP route propogation.
text: az network route-table update -g MyResourceGroup -n MyRouteTable --disable-bgp-route-propagation true
"""
helps['network service-endpoint'] = """
type: group
short-summary: Manage policies related to service endpoints.
"""
helps['network service-endpoint policy'] = """
type: group
short-summary: Manage service endpoint policies.
"""
helps['network service-endpoint policy create'] = """
type: command
short-summary: Create a service endpoint policy.
examples:
- name: Create a service endpoint policy. (autogenerated)
text: |
az network service-endpoint policy create --name MyServiceEndpointPolicy --resource-group MyResourceGroup
crafted: true
"""
helps['network service-endpoint policy delete'] = """
type: command
short-summary: Delete a service endpoint policy.
"""
helps['network service-endpoint policy list'] = """
type: command
short-summary: List service endpoint policies.
examples:
- name: List service endpoint policies. (autogenerated)
text: |
az network service-endpoint policy list --resource-group MyResourceGroup
crafted: true
"""
helps['network service-endpoint policy show'] = """
type: command
short-summary: Get the details of a service endpoint policy.
examples:
- name: Get the details of a service endpoint policy. (autogenerated)
text: |
az network service-endpoint policy show --name MyServiceEndpointPolicy --resource-group MyResourceGroup
crafted: true
"""
helps['network service-endpoint policy update'] = """
type: command
short-summary: Update a service endpoint policy.
"""
helps['network service-endpoint policy-definition'] = """
type: group
short-summary: Manage service endpoint policy definitions.
"""
helps['network service-endpoint policy-definition create'] = """
type: command
short-summary: Create a service endpoint policy definition.
parameters:
- name: --service
populator-commands:
- az network service-endpoint list
"""
helps['network service-endpoint policy-definition delete'] = """
type: command
short-summary: Delete a service endpoint policy definition.
examples:
- name: Delete a service endpoint policy definition (autogenerated)
text: |
az network service-endpoint policy-definition delete --name myserviceendpointpolicydefinition --policy-name mypolicy --resource-group myresourcegroup
crafted: true
"""
helps['network service-endpoint policy-definition list'] = """
type: command
short-summary: List service endpoint policy definitions.
examples:
- name: List service endpoint policy definitions. (autogenerated)
text: |
az network service-endpoint policy-definition list --policy-name MyPolicy --resource-group MyResourceGroup
crafted: true
"""
helps['network service-endpoint policy-definition show'] = """
type: command
short-summary: Get the details of a service endpoint policy definition.
examples:
- name: Get the details of a service endpoint policy definition. (autogenerated)
text: |
az network service-endpoint policy-definition show --name myserviceendpointpolicydefinition --policy-name mypolicy --resource-group myresourcegroup
crafted: true
"""
helps['network service-endpoint policy-definition update'] = """
type: command
short-summary: Update a service endpoint policy definition.
examples:
- name: Update a service endpoint policy definition. (autogenerated)
text: |
az network service-endpoint policy-definition update --add communities='12076:5010' --name MyServiceEndpointPolicyDefinition --policy-name MyPolicy --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network traffic-manager'] = """
type: group
short-summary: Manage the routing of incoming traffic.
"""
helps['network traffic-manager endpoint'] = """
type: group
short-summary: Manage Azure Traffic Manager end points.
"""
helps['network traffic-manager endpoint create'] = """
type: command
short-summary: Create a traffic manager endpoint.
parameters:
- name: --geo-mapping
populator-commands:
- az network traffic-manager endpoint show-geographic-hierarchy
examples:
- name: Create an endpoint for a performance profile to point to an Azure Web App endpoint.
text: |
az network traffic-manager endpoint create -g MyResourceGroup --profile-name MyTmProfile \\
-n MyEndpoint --type azureEndpoints --target-resource-id $MyWebApp1Id --endpoint-status enabled
"""
helps['network traffic-manager endpoint delete'] = """
type: command
short-summary: Delete a traffic manager endpoint.
examples:
- name: Delete a traffic manager endpoint.
text: az network traffic-manager endpoint delete -g MyResourceGroup \\ --profile-name MyTmProfile -n MyEndpoint --type azureEndpoints
- name: Delete a traffic manager endpoint. (autogenerated)
text: |
az network traffic-manager endpoint delete --name MyEndpoint --profile-name MyTmProfile --resource-group MyResourceGroup --subscription MySubscription --type azureEndpoints
crafted: true
"""
helps['network traffic-manager endpoint list'] = """
type: command
short-summary: List traffic manager endpoints.
examples:
- name: List traffic manager endpoints.
text: az network traffic-manager endpoint list -g MyResourceGroup --profile-name MyTmProfile
"""
helps['network traffic-manager endpoint show'] = """
type: command
short-summary: Get the details of a traffic manager endpoint.
examples:
- name: Get the details of a traffic manager endpoint.
text: |
az network traffic-manager endpoint show -g MyResourceGroup \\
--profile-name MyTmProfile -n MyEndpoint --type azureEndpoints
"""
helps['network traffic-manager endpoint show-geographic-hierarchy'] = """
type: command
short-summary: Get the default geographic hierarchy used by the geographic traffic routing method.
examples:
- name: Get the default geographic hierarchy used by the geographic traffic routing method.
text: az network traffic-manager endpoint show-geographic-hierarchy
"""
helps['network traffic-manager endpoint update'] = """
type: command
short-summary: Update a traffic manager endpoint.
examples:
- name: Update a traffic manager endpoint to change its weight.
text: az network traffic-manager endpoint update -g MyResourceGroup --profile-name MyTmProfile \\ -n MyEndpoint --weight 20 --type azureEndpoints
- name: Update a traffic manager endpoint. (autogenerated)
text: |
az network traffic-manager endpoint update --name MyEndpoint --profile-name MyTmProfile --resource-group MyResourceGroup --target webserver.mysite.com --type azureEndpoints
crafted: true
- name: Update a traffic manager endpoint. (autogenerated)
text: |
az network traffic-manager endpoint update --endpoint-status Enabled --name MyEndpoint --profile-name MyTmProfile --resource-group MyResourceGroup --type azureEndpoints
crafted: true
"""
helps['network traffic-manager profile'] = """
type: group
short-summary: Manage Azure Traffic Manager profiles.
"""
helps['network traffic-manager profile check-dns'] = """
type: command
short-summary: Check the availability of a relative DNS name.
long-summary: This checks for the avabilility of dns prefixes for trafficmanager.net.
examples:
- name: Check the availability of 'mywebapp.trafficmanager.net' in Azure.
text: az network traffic-manager profile check-dns -n mywebapp
"""
helps['network traffic-manager profile create'] = """
type: command
short-summary: Create a traffic manager profile.
examples:
- name: Create a traffic manager profile with performance routing.
text: |
az network traffic-manager profile create -g MyResourceGroup -n MyTmProfile --routing-method Performance \\
--unique-dns-name mywebapp --ttl 30 --protocol HTTP --port 80 --path "/"
"""
helps['network traffic-manager profile delete'] = """
type: command
short-summary: Delete a traffic manager profile.
examples:
- name: Delete a traffic manager profile.
text: az network traffic-manager profile delete -g MyResourceGroup -n MyTmProfile
- name: Delete a traffic manager profile. (autogenerated)
text: |
az network traffic-manager profile delete --name MyTmProfile --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network traffic-manager profile list'] = """
type: command
short-summary: List traffic manager profiles.
examples:
- name: List traffic manager profiles.
text: az network traffic-manager profile list -g MyResourceGroup
"""
helps['network traffic-manager profile show'] = """
type: command
short-summary: Get the details of a traffic manager profile.
examples:
- name: Get the details of a traffic manager profile.
text: az network traffic-manager profile show -g MyResourceGroup -n MyTmProfile
"""
helps['network traffic-manager profile update'] = """
type: command
short-summary: Update a traffic manager profile.
examples:
- name: Update a traffic manager profile to change the TTL to 300.
text: az network traffic-manager profile update -g MyResourceGroup -n MyTmProfile --ttl 300
- name: Update a traffic manager profile. (autogenerated)
text: |
az network traffic-manager profile update --name MyTmProfile --resource-group MyResourceGroup --status Enabled
crafted: true
"""
helps['network vnet'] = """
type: group
short-summary: Manage Azure Virtual Networks.
long-summary: To learn more about Virtual Networks visit https://docs.microsoft.com/azure/virtual-network/virtual-network-manage-network
"""
helps['network vnet check-ip-address'] = """
type: command
short-summary: Check if a private IP address is available for use within a virtual network.
examples:
- name: Check whether 10.0.0.4 is available within MyVnet.
text: az network vnet check-ip-address -g MyResourceGroup -n MyVnet --ip-address 10.0.0.4
"""
helps['network vnet create'] = """
type: command
short-summary: Create a virtual network.
long-summary: >
You may also create a subnet at the same time by specifying a subnet name and (optionally) an address prefix.
To learn about how to create a virtual network visit https://docs.microsoft.com/azure/virtual-network/manage-virtual-network#create-a-virtual-network
examples:
- name: Create a virtual network.
text: az network vnet create -g MyResourceGroup -n MyVnet
- name: Create a virtual network with a specific address prefix and one subnet.
text: |
az network vnet create -g MyResourceGroup -n MyVnet --address-prefix 10.0.0.0/16 \\
--subnet-name MySubnet --subnet-prefix 10.0.0.0/24
- name: Create a virtual network. (autogenerated)
text: |
az network vnet create --address-prefixes 10.0.0.0/16 --name MyVirtualNetwork --resource-group MyResourceGroup --subnet-name MyAseSubnet --subnet-prefixes 10.0.0.0/24
crafted: true
"""
helps['network vnet delete'] = """
type: command
short-summary: Delete a virtual network.
examples:
- name: Delete a virtual network.
text: az network vnet delete -g MyResourceGroup -n myVNet
"""
helps['network vnet list'] = """
type: command
short-summary: List virtual networks.
examples:
- name: List all virtual networks in a subscription.
text: az network vnet list
- name: List all virtual networks in a resource group.
text: az network vnet list -g MyResourceGroup
- name: List virtual networks in a subscription which specify a certain address prefix.
text: az network vnet list --query "[?contains(addressSpace.addressPrefixes, '10.0.0.0/16')]"
"""
helps['network vnet list-endpoint-services'] = """
type: command
short-summary: List which services support VNET service tunneling in a given region.
long-summary: To learn more about service endpoints visit https://docs.microsoft.com/azure/virtual-network/virtual-network-service-endpoints-configure#azure-cli
examples:
- name: List the endpoint services available for use in the West US region.
text: az network vnet list-endpoint-services -l westus -o table
"""
helps['network vnet peering'] = """
type: group
short-summary: Manage peering connections between Azure Virtual Networks.
long-summary: To learn more about virtual network peering visit https://docs.microsoft.com/azure/virtual-network/virtual-network-manage-peering
"""
helps['network vnet peering create'] = """
type: command
short-summary: Create a virtual network peering connection.
long-summary: >
To successfully peer two virtual networks this command must be called twice with
the values for --vnet-name and --remote-vnet reversed.
examples:
- name: Create a peering connection between two virtual networks.
text: |
az network vnet peering create -g MyResourceGroup -n MyVnet1ToMyVnet2 --vnet-name MyVnet1 \\
--remote-vnet MyVnet2Id --allow-vnet-access
"""
helps['network vnet peering delete'] = """
type: command
short-summary: Delete a peering.
examples:
- name: Delete a virtual network peering connection.
text: az network vnet peering delete -g MyResourceGroup -n MyVnet1ToMyVnet2 --vnet-name MyVnet1
"""
helps['network vnet peering list'] = """
type: command
short-summary: List peerings.
examples:
- name: List all peerings of a specified virtual network.
text: az network vnet peering list -g MyResourceGroup --vnet-name MyVnet1
"""
helps['network vnet peering show'] = """
type: command
short-summary: Show details of a peering.
examples:
- name: Show all details of the specified virtual network peering.
text: az network vnet peering show -g MyResourceGroup -n MyVnet1ToMyVnet2 --vnet-name MyVnet1
"""
helps['network vnet peering update'] = """
type: command
short-summary: Update a peering.
examples:
- name: Change forwarded traffic configuration of a virtual network peering.
text: >
az network vnet peering update -g MyResourceGroup -n MyVnet1ToMyVnet2 --vnet-name MyVnet1 --set allowForwardedTraffic=true
- name: Change virtual network access of a virtual network peering.
text: >
az network vnet peering update -g MyResourceGroup -n MyVnet1ToMyVnet2 --vnet-name MyVnet1 --set allowVirtualNetworkAccess=true
- name: Change gateway transit property configuration of a virtual network peering.
text: >
az network vnet peering update -g MyResourceGroup -n MyVnet1ToMyVnet2 --vnet-name MyVnet1 --set allowGatewayTransit=true
- name: Use remote gateways in virtual network peering.
text: >
az network vnet peering update -g MyResourceGroup -n MyVnet1ToMyVnet2 --vnet-name MyVnet1 --set useRemoteGateways=true
"""
helps['network vnet show'] = """
type: command
short-summary: Get the details of a virtual network.
examples:
- name: Get details for MyVNet.
text: az network vnet show -g MyResourceGroup -n MyVNet
"""
helps['network vnet list-available-ips'] = """
type: command
short-summary: List some available ips in the vnet.
examples:
- name: List some available ips in the vnet.
text: az network vnet list-available-ips -g MyResourceGroup -n MyVNet
"""
helps['network vnet subnet'] = """
type: group
short-summary: Manage subnets in an Azure Virtual Network.
long-summary: To learn more about subnets visit https://docs.microsoft.com/azure/virtual-network/virtual-network-manage-subnet
"""
helps['network vnet subnet create'] = """
type: command
short-summary: Create a subnet and associate an existing NSG and route table.
parameters:
- name: --service-endpoints
short-summary: Space-separated list of services allowed private access to this subnet.
populator-commands:
- az network vnet list-endpoint-services
- name: --nat-gateway
short-summary: Attach Nat Gateway to subnet
examples:
- name: Create new subnet attached to an NSG with a custom route table.
text: |
az network vnet subnet create -g MyResourceGroup --vnet-name MyVnet -n MySubnet \\
--address-prefixes 10.0.0.0/24 --network-security-group MyNsg --route-table MyRouteTable
- name: Create new subnet attached to a NAT gateway.
text: az network vnet subnet create -n MySubnet --vnet-name MyVnet -g MyResourceGroup --nat-gateway MyNatGateway --address-prefixes "10.0.0.0/21"
"""
helps['network vnet subnet delete'] = """
type: command
short-summary: Delete a subnet.
examples:
- name: Delete a subnet.
text: az network vnet subnet delete -g MyResourceGroup -n MySubnet
- name: Delete a subnet. (autogenerated)
text: |
az network vnet subnet delete --name MySubnet --resource-group MyResourceGroup --vnet-name MyVnet
crafted: true
"""
helps['network vnet subnet list'] = """
type: command
short-summary: List the subnets in a virtual network.
examples:
- name: List the subnets in a virtual network.
text: az network vnet subnet list -g MyResourceGroup --vnet-name MyVNet
"""
helps['network vnet subnet list-available-delegations'] = """
type: command
short-summary: List the services available for subnet delegation.
examples:
- name: Retrieve the service names for available delegations in the West US region.
text: az network vnet subnet list-available-delegations -l westus --query [].serviceName
- name: List the services available for subnet delegation. (autogenerated)
text: |
az network vnet subnet list-available-delegations --resource-group MyResourceGroup
crafted: true
"""
helps['network vnet subnet show'] = """
type: command
short-summary: Show details of a subnet.
examples:
- name: Show the details of a subnet associated with a virtual network.
text: az network vnet subnet show -g MyResourceGroup -n MySubnet --vnet-name MyVNet
"""
helps['network vnet subnet update'] = """
type: command
short-summary: Update a subnet.
parameters:
- name: --service-endpoints
short-summary: Space-separated list of services allowed private access to this subnet.
populator-commands:
- az network vnet list-endpoint-services
- name: --nat-gateway
short-summary: Attach Nat Gateway to subnet
examples:
- name: Associate a network security group to a subnet.
text: az network vnet subnet update -g MyResourceGroup -n MySubnet --vnet-name MyVNet --network-security-group MyNsg
- name: Update subnet with NAT gateway.
text: az network vnet subnet update -n MySubnet --vnet-name MyVnet -g MyResourceGroup --nat-gateway MyNatGateway --address-prefixes "10.0.0.0/21"
- name: Disable the private endpoint network policies
text: az network vnet subnet update -n MySubnet --vnet-name MyVnet -g MyResourceGroup --disable-private-endpoint-network-policies
"""
helps['network vnet update'] = """
type: command
short-summary: Update a virtual network.
examples:
- name: Update a virtual network with the IP address of a DNS server.
text: az network vnet update -g MyResourceGroup -n MyVNet --dns-servers 10.2.0.8
- name: Update a virtual network. (autogenerated)
text: |
az network vnet update --address-prefixes 40.1.0.0/24 --name MyVNet --resource-group MyResourceGroup
crafted: true
"""
helps['network vnet-gateway'] = """
type: group
short-summary: Use an Azure Virtual Network Gateway to establish secure, cross-premises connectivity.
long-summary: >
To learn more about Azure Virtual Network Gateways, visit https://docs.microsoft.com/azure/vpn-gateway/vpn-gateway-howto-site-to-site-resource-manager-cli
"""
helps['network vnet-gateway create'] = """
type: command
short-summary: Create a virtual network gateway.
examples:
- name: Create a basic virtual network gateway for site-to-site connectivity.
text: |
az network vnet-gateway create -g MyResourceGroup -n MyVnetGateway --public-ip-address MyGatewayIp \\
--vnet MyVnet --gateway-type Vpn --sku VpnGw1 --vpn-type RouteBased --no-wait
- name: >
Create a basic virtual network gateway that provides point-to-site connectivity with a RADIUS secret that matches what is configured on a RADIUS server.
text: |
az network vnet-gateway create -g MyResourceGroup -n MyVnetGateway --public-ip-address MyGatewayIp \\
--vnet MyVnet --gateway-type Vpn --sku VpnGw1 --vpn-type RouteBased --address-prefixes 40.1.0.0/24 \\
--client-protocol IkeV2 SSTP --radius-secret 111_aaa --radius-server 30.1.1.15 --vpn-gateway-generation Generation1
- name: >
Create a basic virtual network gateway with multi authentication
text: |
az network vnet-gateway create -g MyResourceGroup -n MyVnetGateway --public-ip-address MyGatewayIp --vnet MyVnet --gateway-type Vpn --sku VpnGw1 --vpn-type RouteBased --address-prefixes 40.1.0.0/24 --client-protocol OpenVPN --radius-secret 111_aaa --radius-server 30.1.1.15 --aad-issuer https://sts.windows.net/00000-000000-00000-0000-000/ --aad-tenant https://login.microsoftonline.com/000 --aad-audience 0000-000 --root-cert-name root-cert --root-cert-data "root-cert.cer" --vpn-auth-type AAD Certificate Radius
- name: Create a virtual network gateway. (autogenerated)
text: |
az network vnet-gateway create --gateway-type Vpn --location westus2 --name MyVnetGateway --no-wait --public-ip-addresses myVGPublicIPAddress --resource-group MyResourceGroup --sku Basic --vnet MyVnet --vpn-type PolicyBased
crafted: true
"""
helps['network vnet-gateway delete'] = """
type: command
short-summary: Delete a virtual network gateway.
long-summary: >
In order to delete a Virtual Network Gateway, you must first delete ALL Connection objects in Azure that are
connected to the Gateway. After deleting the Gateway, proceed to delete other resources now not in use.
For more information, follow the order of instructions on this page:
https://docs.microsoft.com/azure/vpn-gateway/vpn-gateway-delete-vnet-gateway-portal
examples:
- name: Delete a virtual network gateway.
text: az network vnet-gateway delete -g MyResourceGroup -n MyVnetGateway
"""
helps['network vnet-gateway ipsec-policy'] = """
type: group
short-summary: Manage virtual network gateway IPSec policies.
"""
helps['network vnet-gateway ipsec-policy add'] = """
type: command
short-summary: Add a virtual network gateway IPSec policy.
long-summary: Set all IPsec policies of a virtual network gateway. If you want to set any IPsec policy, you must set them all.
examples:
- name: Add specified IPsec policies to a gateway instead of relying on defaults.
text: |
az network vnet-gateway ipsec-policy add -g MyResourceGroup --gateway-name MyGateway \\
--dh-group DHGroup14 --ike-encryption AES256 --ike-integrity SHA384 --ipsec-encryption DES3 \\
--ipsec-integrity GCMAES256 --pfs-group PFS2048 --sa-lifetime 27000 --sa-max-size 102400000
"""
helps['network vnet-gateway ipsec-policy clear'] = """
type: command
short-summary: Delete all IPsec policies on a virtual network gateway.
examples:
- name: Remove all previously specified IPsec policies from a gateway.
text: az network vnet-gateway ipsec-policy clear -g MyResourceGroup --gateway-name MyConnection
"""
helps['network vnet-gateway ipsec-policy list'] = """
type: command
short-summary: List IPSec policies associated with a virtual network gateway.
examples:
- name: List the IPsec policies set on a gateway.
text: az network vnet-gateway ipsec-policy list -g MyResourceGroup --gateway-name MyConnection
"""
helps['network vnet-gateway list'] = """
type: command
short-summary: List virtual network gateways.
examples:
- name: List virtual network gateways in a resource group.
text: az network vnet-gateway list -g MyResourceGroup
"""
helps['network vnet-gateway list-advertised-routes'] = """
type: command
short-summary: List the routes of a virtual network gateway advertised to the specified peer.
examples:
- name: List the routes of a virtual network gateway advertised to the specified peer.
text: az network vnet-gateway list-advertised-routes -g MyResourceGroup -n MyVnetGateway --peer 23.10.10.9
"""
helps['network vnet-gateway list-bgp-peer-status'] = """
type: command
short-summary: Retrieve the status of BGP peers.
examples:
- name: Retrieve the status of a BGP peer.
text: az network vnet-gateway list-bgp-peer-status -g MyResourceGroup -n MyVnetGateway --peer 23.10.10.9
"""
helps['network vnet-gateway list-learned-routes'] = """
type: command
short-summary: This operation retrieves a list of routes the virtual network gateway has learned, including routes learned from BGP peers.
examples:
- name: Retrieve a list of learned routes.
text: az network vnet-gateway list-learned-routes -g MyResourceGroup -n MyVnetGateway
"""
helps['network vnet-gateway reset'] = """
type: command
short-summary: Reset a virtual network gateway.
examples:
- name: Reset a virtual network gateway.
text: az network vnet-gateway reset -g MyResourceGroup -n MyVnetGateway
- name: Reset a virtual network gateway with Active-Active feature enabled.
text: az network vnet-gateway reset -g MyResourceGroup -n MyVnetGateway --gateway-vip MyGatewayIP
"""
helps['network vnet-gateway revoked-cert'] = """
type: group
short-summary: Manage revoked certificates in a virtual network gateway.
long-summary: Prevent machines using this certificate from accessing Azure through this gateway.
"""
helps['network vnet-gateway revoked-cert create'] = """
type: command
short-summary: Revoke a certificate.
examples:
- name: Revoke a certificate.
text: |
az network vnet-gateway revoked-cert create -g MyResourceGroup -n MyRootCertificate \\
--gateway-name MyVnetGateway --thumbprint abc123
"""
helps['network vnet-gateway revoked-cert delete'] = """
type: command
short-summary: Delete a revoked certificate.
examples:
- name: Delete a revoked certificate.
text: az network vnet-gateway revoked-cert delete -g MyResourceGroup -n MyRootCertificate --gateway-name MyVnetGateway
- name: Delete a revoked certificate. (autogenerated)
text: |
az network vnet-gateway revoked-cert delete --gateway-name MyVnetGateway --name MyRootCertificate --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network vnet-gateway root-cert'] = """
type: group
short-summary: Manage root certificates of a virtual network gateway.
"""
helps['network vnet-gateway root-cert create'] = """
type: command
short-summary: Upload a root certificate.
examples:
- name: Add a Root Certificate to the list of certs allowed to connect to this Gateway.
text: |
az network vnet-gateway root-cert create -g MyResourceGroup -n MyRootCertificate \\
--gateway-name MyVnetGateway --public-cert-data MyCertificateData
"""
helps['network vnet-gateway root-cert delete'] = """
type: command
short-summary: Delete a root certificate.
examples:
- name: Remove a certificate from the list of Root Certificates whose children are allowed to access this Gateway.
text: az network vnet-gateway root-cert delete -g MyResourceGroup -n MyRootCertificate --gateway-name MyVnetGateway
- name: Delete a root certificate. (autogenerated)
text: |
az network vnet-gateway root-cert delete --gateway-name MyVnetGateway --name MyRootCertificate --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network vnet-gateway show'] = """
type: command
short-summary: Get the details of a virtual network gateway.
examples:
- name: Get the details of a virtual network gateway.
text: az network vnet-gateway show -g MyResourceGroup -n MyVnetGateway
"""
helps['network vnet-gateway update'] = """
type: command
short-summary: Update a virtual network gateway.
examples:
- name: Change the SKU of a virtual network gateway.
text: az network vnet-gateway update -g MyResourceGroup -n MyVnetGateway --sku VpnGw2
- name: Update a virtual network gateway. (autogenerated)
text: |
az network vnet-gateway update --address-prefixes 40.1.0.0/24 --client-protocol IkeV2 --name MyVnetGateway --resource-group MyResourceGroup
crafted: true
"""
helps['network vnet-gateway vpn-client'] = """
type: group
short-summary: Download a VPN client configuration required to connect to Azure via point-to-site.
"""
helps['network vnet-gateway vpn-client generate'] = """
type: command
short-summary: Generate VPN client configuration.
long-summary: The command outputs a URL to a zip file for the generated VPN client configuration.
examples:
- name: Create the VPN client configuration for RADIUS with EAP-MSCHAV2 authentication.
text: az network vnet-gateway vpn-client generate -g MyResourceGroup -n MyVnetGateway --authentication-method EAPMSCHAPv2
- name: Create the VPN client configuration for AMD64 architecture.
text: az network vnet-gateway vpn-client generate -g MyResourceGroup -n MyVnetGateway --processor-architecture Amd64
- name: Generate VPN client configuration. (autogenerated)
text: |
az network vnet-gateway vpn-client generate --name MyVnetGateway --processor-architecture Amd64 --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network vnet-gateway vpn-client show-url'] = """
type: command
short-summary: Retrieve a pre-generated VPN client configuration.
long-summary: The profile needs to be generated first using vpn-client generate command.
examples:
- name: Get the pre-generated point-to-site VPN client of the virtual network gateway.
text: az network vnet-gateway vpn-client show-url -g MyResourceGroup -n MyVnetGateway
"""
helps['network vnet-gateway wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the virtual network gateway is met.
examples:
- name: Pause CLI until the virtual network gateway is created.
text: az network vnet-gateway wait -g MyResourceGroup -n MyVnetGateway --created
- name: Place the CLI in a waiting state until a condition of the virtual network gateway is met. (autogenerated)
text: |
az network vnet-gateway wait --name MyVnetGateway --resource-group MyResourceGroup --updated
crafted: true
"""
helps['network vnet-gateway aad'] = """
type: group
short-summary: Manage AAD(Azure Active Directory) authentication of a virtual network gateway
"""
helps['network vnet-gateway aad assign'] = """
type: command
short-summary: Assign/Update AAD(Azure Active Directory) authentication to a virtual network gateway.
examples:
- name: Assign AAD authentication to a virtual network gateway
text: |-
az network vnet-gateway aad assign \\
--resource-group MyResourceGroup \\
--gateway-name MyVnetGateway \\
--tenant MyAADTenantURI \\
--audience MyAADAudienceId \\
--issuer MyAADIssuerURI
"""
helps['network vnet-gateway aad show'] = """
type: command
short-summary: Show AAD(Azure Active Directory) authentication of a virtual network gateway
examples:
- name: Show AAD information
text: az network vnet-gateway aad show --resource-group MyResourceGroup --gateway-name MyVnetGateway
"""
helps['network vnet-gateway aad remove'] = """
type: command
short-summary: Remove AAD(Azure Active Directory) authentication from a virtual network gateway
examples:
- name: Remove AAD information
text: az network vnet-gateway aad remove --resource-group MyResourceGroup --gateway-name MyVnetGateway
"""
helps['network vpn-connection'] = """
type: group
short-summary: Manage VPN connections.
long-summary: >
For more information on site-to-site connections,
visit https://docs.microsoft.com/azure/vpn-gateway/vpn-gateway-howto-site-to-site-resource-manager-cli.
For more information on Vnet-to-Vnet connections, visit https://docs.microsoft.com/azure/vpn-gateway/vpn-gateway-howto-vnet-vnet-cli
"""
helps['network vpn-connection create'] = """
type: command
short-summary: Create a VPN connection.
long-summary: The VPN Gateway and Local Network Gateway must be provisioned before creating the connection between them.
parameters:
- name: --vnet-gateway1
short-summary: Name or ID of the source virtual network gateway.
- name: --vnet-gateway2
short-summary: Name or ID of the destination virtual network gateway to connect to using a 'Vnet2Vnet' connection.
- name: --local-gateway2
short-summary: Name or ID of the destination local network gateway to connect to using an 'IPSec' connection.
- name: --express-route-circuit2
short-summary: Name or ID of the destination ExpressRoute to connect to using an 'ExpressRoute' connection.
- name: --authorization-key
short-summary: The authorization key for the VPN connection.
- name: --enable-bgp
short-summary: Enable BGP for this VPN connection.
- name: --validate
short-summary: Display and validate the ARM template but do not create any resources.
examples:
- name: >
Create a site-to-site connection between an Azure virtual network and an on-premises local network gateway.
text: >
az network vpn-connection create -g MyResourceGroup -n MyConnection --vnet-gateway1 MyVnetGateway --local-gateway2 MyLocalGateway --shared-key Abc123
- name: Create a VPN connection. (autogenerated)
text: |
az network vpn-connection create --location westus2 --name MyConnection --resource-group MyResourceGroup --shared-key Abc123 --vnet-gateway1 MyVnetGateway --vnet-gateway2 /subscriptions/{subscriptionID}/resourceGroups/TestBGPRG1/providers/Microsoft.Network/virtualNetworkGateways/VNet1GW
crafted: true
- name: Create a VPN connection. (autogenerated)
text: |
az network vpn-connection create --local-gateway2 MyLocalGateway --location westus2 --name MyConnection --resource-group MyResourceGroup --shared-key Abc123 --vnet-gateway1 MyVnetGateway
crafted: true
"""
helps['network vpn-connection delete'] = """
type: command
short-summary: Delete a VPN connection.
examples:
- name: Delete a VPN connection.
text: az network vpn-connection delete -g MyResourceGroup -n MyConnection
"""
helps['network vpn-connection ipsec-policy'] = """
type: group
short-summary: Manage VPN connection IPSec policies.
"""
helps['network vpn-connection ipsec-policy add'] = """
type: command
short-summary: Add a VPN connection IPSec policy.
long-summary: Set all IPsec policies of a VPN connection. If you want to set any IPsec policy, you must set them all.
examples:
- name: Add specified IPsec policies to a connection instead of relying on defaults.
text: |
az network vpn-connection ipsec-policy add -g MyResourceGroup --connection-name MyConnection \\
--dh-group DHGroup14 --ike-encryption AES256 --ike-integrity SHA384 --ipsec-encryption DES3 \\
--ipsec-integrity GCMAES256 --pfs-group PFS2048 --sa-lifetime 27000 --sa-max-size 102400000
"""
helps['network vpn-connection ipsec-policy clear'] = """
type: command
short-summary: Delete all IPsec policies on a VPN connection.
examples:
- name: Remove all previously specified IPsec policies from a connection.
text: az network vpn-connection ipsec-policy clear -g MyResourceGroup --connection-name MyConnection
"""
helps['network vpn-connection ipsec-policy list'] = """
type: command
short-summary: List IPSec policies associated with a VPN connection.
examples:
- name: List the IPsec policies set on a connection.
text: az network vpn-connection ipsec-policy list -g MyResourceGroup --connection-name MyConnection
"""
helps['network vpn-connection list'] = """
type: command
short-summary: List all VPN connections in a resource group.
examples:
- name: List all VPN connections in a resource group.
text: az network vpn-connection list -g MyResourceGroup
"""
helps['network vpn-connection shared-key'] = """
type: group
short-summary: Manage VPN shared keys.
"""
helps['network vpn-connection shared-key reset'] = """
type: command
short-summary: Reset a VPN connection shared key.
examples:
- name: Reset the shared key on a connection.
text: az network vpn-connection shared-key reset -g MyResourceGroup --connection-name MyConnection --key-length 128
- name: Reset a VPN connection shared key. (autogenerated)
text: |
az network vpn-connection shared-key reset --connection-name MyConnection --key-length 128 --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network vpn-connection shared-key show'] = """
type: command
short-summary: Retrieve a VPN connection shared key.
examples:
- name: View the shared key of a connection.
text: az network vpn-connection shared-key show -g MyResourceGroup --connection-name MyConnection
- name: Retrieve a VPN connection shared key. (autogenerated)
text: |
az network vpn-connection shared-key show --connection-name MyConnection --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network vpn-connection shared-key update'] = """
type: command
short-summary: Update a VPN connection shared key.
examples:
- name: Change the shared key for the connection to "Abc123".
text: az network vpn-connection shared-key update -g MyResourceGroup --connection-name MyConnection --value Abc123
- name: Update a VPN connection shared key. (autogenerated)
text: |
az network vpn-connection shared-key update --connection-name MyConnection --resource-group MyResourceGroup --subscription MySubscription --value Abc123
crafted: true
"""
helps['network vpn-connection show'] = """
type: command
short-summary: Get the details of a VPN connection.
examples:
- name: View the details of a VPN connection.
text: az network vpn-connection show -g MyResourceGroup -n MyConnection
"""
helps['network vpn-connection update'] = """
type: command
short-summary: Update a VPN connection.
examples:
- name: Add BGP to an existing connection.
text: az network vpn-connection update -g MyResourceGroup -n MyConnection --enable-bgp True
- name: Update a VPN connection. (autogenerated)
text: |
az network vpn-connection update --name MyConnection --resource-group MyResourceGroup --use-policy-based-traffic-selectors true
crafted: true
"""
helps['network vrouter'] = """
type: group
short-summary: Manage the virtual router. This feature supports both VirtualHub and VirtualRouter. Considering VirtualRouter is deprecated, we recommend to create VirtualRouter with --hosted-subnet instead
"""
helps['network vrouter create'] = """
type: command
short-summary: Create a virtual router.
"""
helps['network vrouter update'] = """
type: command
short-summary: Update a virtual router.
examples:
- name: Update a virtual router. (autogenerated)
text: |
az network vrouter update --name myvirtualrouter --resource-group myresourcegroup --tags super_secure no_80 no_22
crafted: true
"""
helps['network vrouter show'] = """
type: command
short-summary: Show a virtual router.
"""
helps['network vrouter list'] = """
type: command
short-summary: List all virtual routers under a subscription or a resource group.
"""
helps['network vrouter delete'] = """
type: command
short-summary: Delete a virtual router under a resource group.
"""
helps['network vrouter peering'] = """
type: group
short-summary: Manage the virtual router peering.
"""
helps['network vrouter peering create'] = """
type: command
short-summary: Create a virtual router peering.
"""
helps['network vrouter peering update'] = """
type: command
short-summary: Update a virtual router peering.
"""
helps['network vrouter peering list'] = """
type: command
short-summary: List all virtual router peerings under a resource group.
"""
helps['network vrouter peering show'] = """
type: command
short-summary: Show a virtual router peering
"""
helps['network vrouter peering delete'] = """
type: command
short-summary: Delete a virtual router peering.
"""
helps['network routeserver'] = """
type: group
short-summary: Manage the route server.
"""
helps['network routeserver create'] = """
type: command
short-summary: Create a route server.
examples:
- name: Create a route server.
text: |
az network routeserver create --resource-group myresourcegroup --name myrouteserver --hosted-subnet my_subnet_id
"""
helps['network routeserver update'] = """
type: command
short-summary: Update a route server.
examples:
- name: Update a route server.
text: |
az network routeserver update --name myrouteserver --resource-group myresourcegroup --tags super_secure no_80 no_22
crafted: true
"""
helps['network routeserver show'] = """
type: command
short-summary: Show a route server.
"""
helps['network routeserver list'] = """
type: command
short-summary: List all route servers under a subscription or a resource group.
"""
helps['network routeserver delete'] = """
type: command
short-summary: Delete a route server under a resource group.
"""
helps['network routeserver peering'] = """
type: group
short-summary: Manage the route server peering.
"""
helps['network routeserver peering create'] = """
type: command
short-summary: Create a route server peering.
"""
helps['network routeserver peering update'] = """
type: command
short-summary: Update a route server peering.
"""
helps['network routeserver peering list'] = """
type: command
short-summary: List all route server peerings under a resource group.
"""
helps['network routeserver peering show'] = """
type: command
short-summary: Show a route server peering
"""
helps['network routeserver peering delete'] = """
type: command
short-summary: Delete a route server peering.
"""
helps['network routeserver peering list-learned-routes'] = """
type: command
short-summary: List all routes the route server bgp connection has learned.
"""
helps['network routeserver peering list-advertised-routes'] = """
type: command
short-summary: List all routes the route server bgp connection is advertising to the specified peer.
"""
helps['network watcher'] = """
type: group
short-summary: Manage the Azure Network Watcher.
long-summary: >
Network Watcher assists with monitoring and diagnosing conditions at a network scenario level. To learn more visit https://docs.microsoft.com/azure/network-watcher/
"""
helps['network watcher configure'] = """
type: command
short-summary: Configure the Network Watcher service for different regions.
parameters:
- name: --enabled
short-summary: Enabled status of Network Watcher in the specified regions.
- name: --locations -l
short-summary: Space-separated list of locations to configure.
- name: --resource-group -g
short-summary: Name of resource group. Required when enabling new regions.
long-summary: >
When a previously disabled region is enabled to use Network Watcher, a
Network Watcher resource will be created in this resource group.
examples:
- name: Configure Network Watcher for the West US region.
text: az network watcher configure -g NetworkWatcherRG -l westus --enabled true
"""
helps['network watcher connection-monitor'] = """
type: group
short-summary: Manage connection monitoring between an Azure Virtual Machine and any IP resource.
long-summary: >
Connection monitor can be used to monitor network connectivity between an Azure virtual machine and an IP address.
The IP address can be assigned to another Azure resource or a resource on the Internet or on-premises. To learn
more visit https://aka.ms/connectionmonitordoc
"""
helps['network watcher connection-monitor create'] = """
type: command
short-summary: Create a connection monitor.
long-summary: |
This extension allow to create V1 and V2 version of connection monitor.
V1 connection monitor supports single source and destination endpoint which comes with V1 argument groups as usual.
V2 connection monitor supports multiple endpoints and several test protocol which comes with V2 argument groups.
parameters:
- name: --source-resource
short-summary: >
Currently only Virtual Machines are supported.
- name: --dest-resource
short-summary: >
Currently only Virtual Machines are supported.
examples:
- name: Create a connection monitor for a virtual machine.
text: |
az network watcher connection-monitor create -g MyResourceGroup -n MyConnectionMonitorName \\
--source-resource MyVM
- name: Create a V2 connection monitor
text: >
az network watcher connection-monitor create
--name MyV2ConnectionMonitor
--endpoint-source-name "vm01"
--endpoint-source-resource-id MyVM01ResourceID
--endpoint-dest-name bing
--endpoint-dest-address bing.com
--test-config-name TCPTestConfig
--protocol Tcp
--tcp-port 2048
- name: Create a connection monitor. (autogenerated)
text: |
az network watcher connection-monitor create --endpoint-dest-address bing.com --endpoint-dest-name bing --endpoint-source-name "vm01" --endpoint-source-resource-id MyVM01ResourceID --location westus2 --name MyConnectionMonitorName --protocol Tcp --tcp-port 2048 --test-config-name TCPTestConfig
crafted: true
"""
helps['network watcher connection-monitor delete'] = """
type: command
short-summary: Delete a connection monitor for the given region.
examples:
- name: Delete a connection monitor for the given region.
text: az network watcher connection-monitor delete -l westus -n MyConnectionMonitorName
"""
helps['network watcher connection-monitor list'] = """
type: command
short-summary: List connection monitors for the given region.
examples:
- name: List a connection monitor for the given region.
text: az network watcher connection-monitor list -l westus
- name: List connection monitors for the given region. (autogenerated)
text: |
az network watcher connection-monitor list --location westus --subscription MySubscription
crafted: true
"""
helps['network watcher connection-monitor query'] = """
type: command
short-summary: Query a snapshot of the most recent connection state of a connection monitor.
examples:
- name: List a connection monitor for the given region.
text: az network watcher connection-monitor query -l westus -n MyConnectionMonitorName
"""
helps['network watcher connection-monitor show'] = """
type: command
short-summary: Shows a connection monitor by name.
examples:
- name: Show a connection monitor for the given name.
text: az network watcher connection-monitor show -l westus -n MyConnectionMonitorName
"""
helps['network watcher connection-monitor start'] = """
type: command
short-summary: Start the specified connection monitor.
examples:
- name: Start the specified connection monitor.
text: az network watcher connection-monitor start -l westus -n MyConnectionMonitorName
"""
helps['network watcher connection-monitor stop'] = """
type: command
short-summary: Stop the specified connection monitor.
examples:
- name: Stop the specified connection monitor.
text: az network watcher connection-monitor stop -l westus -n MyConnectionMonitorName
"""
helps['network watcher connection-monitor endpoint'] = """
type: group
short-summary: Manage endpoint of a connection monitor
"""
helps['network watcher connection-monitor endpoint add'] = """
type: command
short-summary: Add an endpoint to a connection monitor
examples:
- name: Add an external address as a destination endpoint
text: >
az network watcher connection-monitor endpoint add
--connection-monitor MyConnectionMonitor
--location westus
--name MyExternalEndpoint
--address "bing.com"
--dest-test-groups DefaultTestGroup
--type ExternalAddress
- name: Add an Azure VM as a source endpoint
text: >
az network watcher connection-monitor endpoint add
--connection-monitor MyConnectionMonitor
--location westus
--name MyVMEndpoint
--resource-id MyVMResourceID
--source-test-groups DefaultTestGroup
--type AzureVM
- name: Add a Subnet as a source endpoint with addresses excluded
text: >
az network watcher connection-monitor endpoint add
--connection-monitor MyConnectionMonitor
--location westus
--name MySubnetEndpoint
--resource-id MySubnetID
--source-test-groups DefaultTestGroup
--type AzureSubnet
--address-exclude 10.0.0.25 10.0.0.30
--coverage-level BelowAverage
"""
helps['network watcher connection-monitor endpoint remove'] = """
type: command
short-summary: Remove an endpoint from a connection monitor
examples:
- name: Remove endpoint from all test groups of a connection monitor
text: >
az network watcher connection-monitor endpoint remove
--connection-monitor MyConnectionMonitor
--location westus
--name MyEndpoint
- name: Remove endpoint from two test groups of a connection monitor
text: >
az network watcher connection-monitor endpoint remove
--connection-monitor MyConnectionMonitor
--location westus
--name MyEndpoint
--test-groups DefaultTestGroup HealthCheckTestGroup
"""
helps['network watcher connection-monitor endpoint show'] = """
type: command
short-summary: Show an endpoint from a connection monitor
examples:
- name: Show an endpoint from a connection monitor. (autogenerated)
text: |
az network watcher connection-monitor endpoint show --connection-monitor MyConnectionMonitor --location westus2 --name myconnectionmonitorendpoint --subscription MySubscription
crafted: true
"""
helps['network watcher connection-monitor endpoint list'] = """
type: command
short-summary: List all endpoints form a connection monitor
examples:
- name: List all endpoints form a connection monitor. (autogenerated)
text: |
az network watcher connection-monitor endpoint list --connection-monitor MyConnectionMonitor --location westus2
crafted: true
"""
helps['network watcher connection-monitor test-configuration'] = """
type: group
short-summary: Manage test configuration of a connection monitor
"""
helps['network watcher connection-monitor test-configuration add'] = """
type: command
short-summary: Add a test configuration to a connection monitor
examples:
- name: Add a test configuration with HTTP supported
text: >
az network watcher connection-monitor test-configuration add
--connection-monitor MyConnectionMonitor
--location westus
--name MyHTTPTestConfiguration
--test-groups DefaultTestGroup
--protocol Http
--http-request-header name=Host value=bing.com
--http-request-header name=UserAgent value=Edge
- name: Add a test configuration with TCP supported
text: >
az network watcher connection-monitor test-configuration add
--connection-monitor MyConnectionMonitor
--location westus
--name MyHTTPTestConfiguration
--test-groups TCPTestGroup DefaultTestGroup
--protocol Tcp
--tcp-port 4096
"""
helps['network watcher connection-monitor test-configuration remove'] = """
type: command
short-summary: Remove a test configuration from a connection monitor
examples:
- name: Remove a test configuration from all test groups of a connection monitor
text: >
az network watcher connection-monitor test-configuration remove
--connection-monitor MyConnectionMonitor
--location westus
--name MyTCPTestConfiguration
- name: Remove a test configuration from two test groups of a connection monitor
text: >
az network watcher connection-monitor test-configuration remove
--connection-monitor MyConnectionMonitor
--location westus
--name MyHTTPTestConfiguration
--test-groups HTTPTestGroup DefaultTestGroup
"""
helps['network watcher connection-monitor test-configuration show'] = """
type: command
short-summary: Show a test configuration from a connection monitor
examples:
- name: Show a test configuration from a connection monitor. (autogenerated)
text: |
az network watcher connection-monitor test-configuration show --connection-monitor MyConnectionMonitor --location westus2 --name MyConnectionMonitorTestConfiguration
crafted: true
"""
helps['network watcher connection-monitor test-configuration list'] = """
type: command
short-summary: List all test configurations of a connection monitor
examples:
- name: List all test configurations of a connection monitor. (autogenerated)
text: |
az network watcher connection-monitor test-configuration list --connection-monitor MyConnectionMonitor --location westus2
crafted: true
"""
helps['network watcher connection-monitor test-group'] = """
type: group
short-summary: Manage a test group of a connection monitor
"""
helps['network watcher connection-monitor test-group add'] = """
type: command
short-summary: Add a test group along with new-added/existing endpoint and test configuration to a connection monitor
examples:
- name: Add a test group along with existing endpoint and test configuration via their names
text: >
az network watcher connection-monitor test-group add
--connection-monitor MyConnectionMonitor
--location westus
--name MyHTTPTestGroup
--endpoint-source-name MySourceEndpoint
--endpoint-dest-name MyDestinationEndpoint
--test-config-name MyTestConfiguration
- name: Add a test group long with new-added source endpoint and existing test configuration via its name
text: >
az network watcher connection-monitor test-group add
--connection-monitor MyConnectionMonitor
--location westus
--name MyAccessibilityTestGroup
--endpoint-source-name MySourceEndpoint
--endpoint-source-resource-id MyLogAnalysisWorkspaceID
--endpoint-dest-name MyExistingDestinationEndpoint
--test-config-name MyExistingTestConfiguration
- name: Add a test group along with new-added endpoints and test configuration
text: >
az network watcher connection-monitor test-group add
--connection-monitor MyConnectionMonitor
--location westus
--name MyAccessibilityTestGroup
--endpoint-source-name MySourceEndpoint
--endpoint-source-resource-id MyVMResourceID
--endpoint-dest-name bing
--endpoint-dest-address bing.com
--test-config-name MyNewTestConfiguration
--protocol Tcp
--tcp-port 4096
"""
helps['network watcher connection-monitor test-group remove'] = """
type: command
short-summary: Remove test group from a connection monitor
examples:
- name: Remove test group from a connection monitor. (autogenerated)
text: |
az network watcher connection-monitor test-group remove --connection-monitor MyConnectionMonitor --location westus2 --name MyConnectionMonitorTestGroup
crafted: true
"""
helps['network watcher connection-monitor test-group show'] = """
type: command
short-summary: Show a test group of a connection monitor
examples:
- name: Show a test group of a connection monitor. (autogenerated)
text: |
az network watcher connection-monitor test-group show --connection-monitor MyConnectionMonitor --location westus2 --name MyConnectionMonitorTestGroup --subscription MySubscription
crafted: true
"""
helps['network watcher connection-monitor test-group list'] = """
type: command
short-summary: List all test groups of a connection monitor
examples:
- name: List all test groups of a connection monitor. (autogenerated)
text: |
az network watcher connection-monitor test-group list --connection-monitor MyConnectionMonitor --location westus2
crafted: true
"""
helps['network watcher connection-monitor output'] = """
type: group
short-summary: Manage output of connection monitor
"""
helps['network watcher connection-monitor output add'] = """
type: command
short-summary: Add an output to a connection monitor
"""
helps['network watcher connection-monitor output remove'] = """
type: command
short-summary: Remove all outputs from a connection monitor
"""
helps['network watcher connection-monitor output list'] = """
type: command
short-summary: List all output from a connection monitor
"""
helps['network watcher flow-log'] = """
type: group
short-summary: Manage network security group flow logging.
long-summary: >
For more information about configuring flow logs visit https://docs.microsoft.com/azure/network-watcher/network-watcher-nsg-flow-logging-cli
"""
helps['network watcher flow-log configure'] = """
type: command
short-summary: Configure flow logging on a network security group.
parameters:
- name: --nsg
short-summary: Name or ID of the Network Security Group to target.
- name: --enabled
short-summary: Enable logging.
- name: --retention
short-summary: Number of days to retain logs.
- name: --storage-account
short-summary: Name or ID of the storage account in which to save the flow logs.
examples:
- name: Enable NSG flow logs.
text: az network watcher flow-log configure -g MyResourceGroup --enabled true --nsg MyNsg --storage-account MyStorageAccount
- name: Disable NSG flow logs.
text: az network watcher flow-log configure -g MyResourceGroup --enabled false --nsg MyNsg
"""
helps['network watcher flow-log create'] = """
type: command
short-summary: Create a flow log on a network security group.
examples:
- name: Create a flow log with Network Security Group name
text: >
az network watcher flow-log create
--location westus
--resource-group MyResourceGroup
--name MyFlowLog
--nsg MyNetworkSecurityGroupName
--storage-account account
- name: Create a flow log with Network Security Group ID (could be in other resource group)
text: >
az network watcher flow-log create
--location westus
--name MyFlowLog
--nsg MyNetworkSecurityGroupID
--storage-account account
"""
helps['network watcher flow-log list'] = """
type: command
short-summary: List all flow log resources for the specified Network Watcher
examples:
- name: List all flow log resources for the specified Network Watcher. (autogenerated)
text: |
az network watcher flow-log list --location westus2
crafted: true
"""
helps['network watcher flow-log delete'] = """
type: command
short-summary: Delete the specified flow log resource.
examples:
- name: Delete the specified flow log resource. (autogenerated)
text: |
az network watcher flow-log delete --location westus2 --name MyFlowLogger
crafted: true
"""
helps['network watcher flow-log show'] = """
type: command
short-summary: Get the flow log configuration of a network security group.
examples:
- name: Show NSG flow logs. (Deprecated)
text: az network watcher flow-log show -g MyResourceGroup --nsg MyNsg
- name: Show NSG flow logs with Azure Resource Management formatted.
text: az network watcher flow-log show --location MyNetworkWatcher --name MyFlowLog
"""
helps['network watcher flow-log update'] = """
type: command
short-summary: Update the flow log configuration of a network security group
examples:
- name: Update storage account with name to let resource group identify the storage account and network watcher
text: >
az network watcher flow-log update
--location westus
--resource-group MyResourceGroup
--name MyFlowLog
--storage-account accountname
- name: Update storage account with ID to let location identify the network watcher
text: >
az network watcher flow-log update
--location westus
--resource-group MyResourceGroup
--name MyFlowLog
--storage-account accountid
- name: Update Network Security Group on another resource group
text: >
az network watcher flow-log update
--location westus
--resource-group MyAnotherResourceGroup
--name MyFlowLog
--nsg MyNSG
- name: Update Workspace on another resource group
text: >
az network watcher flow-log update
--location westus
--resource-group MyAnotherResourceGroup
--name MyFlowLog
--workspace MyAnotherLogAnalyticWorkspace
"""
helps['network watcher list'] = """
type: command
short-summary: List Network Watchers.
examples:
- name: List all Network Watchers in a subscription.
text: az network watcher list
"""
helps['network watcher packet-capture'] = """
type: group
short-summary: Manage packet capture sessions on VMs.
long-summary: >
These commands require that both Azure Network Watcher is enabled for the VMs region and that AzureNetworkWatcherExtension is enabled on the VM.
For more information visit https://docs.microsoft.com/azure/network-watcher/network-watcher-packet-capture-manage-cli
"""
helps['network watcher packet-capture create'] = """
type: command
short-summary: Create and start a packet capture session.
parameters:
- name: --capture-limit
short-summary: The maximum size in bytes of the capture output.
- name: --capture-size
short-summary: Number of bytes captured per packet. Excess bytes are truncated.
- name: --time-limit
short-summary: Maximum duration of the capture session in seconds.
- name: --storage-account
short-summary: Name or ID of a storage account to save the packet capture to.
- name: --storage-path
short-summary: Fully qualified URI of an existing storage container in which to store the capture file.
long-summary: >
If not specified, the container 'network-watcher-logs' will be
created if it does not exist and the capture file will be stored there.
- name: --file-path
short-summary: >
Local path on the targeted VM at which to save the packet capture. For Linux VMs, the
path must start with /var/captures.
- name: --vm
short-summary: Name or ID of the VM to target.
- name: --filters
short-summary: JSON encoded list of packet filters. Use `@{path}` to load from file.
examples:
- name: Create a packet capture session on a VM.
text: az network watcher packet-capture create -g MyResourceGroup -n MyPacketCaptureName --vm MyVm --storage-account MyStorageAccount
- name: Create a packet capture session on a VM with optional filters for protocols, local IP address and remote IP address ranges and ports.
text: |
az network watcher packet-capture create -g MyResourceGroup -n MyPacketCaptureName --vm MyVm \\
--storage-account MyStorageAccount --filters '[ \\
{ \\
"protocol":"TCP", \\
"remoteIPAddress":"1.1.1.1-255.255.255", \\
"localIPAddress":"10.0.0.3", \\
"remotePort":"20" \\
}, \\
{ \\
"protocol":"TCP", \\
"remoteIPAddress":"1.1.1.1-255.255.255", \\
"localIPAddress":"10.0.0.3", \\
"remotePort":"80" \\
}, \\
{ \\
"protocol":"TCP", \\
"remoteIPAddress":"1.1.1.1-255.255.255", \\
"localIPAddress":"10.0.0.3", \\
"remotePort":"443" \\
}, \\
{ \\
"protocol":"UDP" \\
}]'
"""
helps['network watcher packet-capture delete'] = """
type: command
short-summary: Delete a packet capture session.
examples:
- name: Delete a packet capture session. This only deletes the session and not the capture file.
text: az network watcher packet-capture delete -n packetCaptureName -l westcentralus
- name: Delete a packet capture session. (autogenerated)
text: |
az network watcher packet-capture delete --location westcentralus --name packetCaptureName --subscription MySubscription
crafted: true
"""
helps['network watcher packet-capture list'] = """
type: command
short-summary: List all packet capture sessions within a resource group.
examples:
- name: List all packet capture sessions within a region.
text: az network watcher packet-capture list -l westus
- name: List all packet capture sessions within a resource group (autogenerated)
text: |
az network watcher packet-capture list --location westus --subscription MySubscription
crafted: true
"""
helps['network watcher packet-capture show'] = """
type: command
short-summary: Show details of a packet capture session.
examples:
- name: Show a packet capture session.
text: az network watcher packet-capture show -l westus -n MyPacketCapture
"""
helps['network watcher packet-capture show-status'] = """
type: command
short-summary: Show the status of a packet capture session.
examples:
- name: Show the status of a packet capture session.
text: az network watcher packet-capture show-status -l westus -n MyPacketCapture
"""
helps['network watcher packet-capture stop'] = """
type: command
short-summary: Stop a running packet capture session.
examples:
- name: Stop a running packet capture session.
text: az network watcher packet-capture stop -l westus -n MyPacketCapture
"""
helps['network watcher run-configuration-diagnostic'] = """
type: command
short-summary: Run a configuration diagnostic on a target resource.
long-summary: >
Requires that Network Watcher is enabled for the region in which the target is located.
examples:
- name: Run configuration diagnostic on a VM with a single query.
text: |
az network watcher run-configuration-diagnostic --resource {VM_ID}
--direction Inbound --protocol TCP --source 12.11.12.14 --destination 10.1.1.4 --port 12100
- name: Run configuration diagnostic on a VM with multiple queries.
text: |
az network watcher run-configuration-diagnostic --resource {VM_ID}
--queries '[
{
"direction": "Inbound", "protocol": "TCP", "source": "12.11.12.14",
"destination": "10.1.1.4", "destinationPort": "12100"
},
{
"direction": "Inbound", "protocol": "TCP", "source": "12.11.12.0/32",
"destination": "10.1.1.4", "destinationPort": "12100"
},
{
"direction": "Outbound", "protocol": "TCP", "source": "12.11.12.14",
"destination": "10.1.1.4", "destinationPort": "12100"
}]'
"""
helps['network watcher show-next-hop'] = """
type: command
short-summary: Get information on the 'next hop' of a VM.
long-summary: >
Requires that Network Watcher is enabled for the region in which the VM is located.
For more information about show-next-hop visit https://docs.microsoft.com/azure/network-watcher/network-watcher-check-next-hop-cli
examples:
- name: Get the next hop from a VMs assigned IP address to a destination at 10.1.0.4.
text: az network watcher show-next-hop -g MyResourceGroup --vm MyVm --source-ip 10.0.0.4 --dest-ip 10.1.0.4
"""
helps['network watcher show-security-group-view'] = """
type: command
short-summary: Get detailed security information on a VM for the currently configured network security group.
long-summary: >
For more information on using security group view visit https://docs.microsoft.com/azure/network-watcher/network-watcher-security-group-view-cli
examples:
- name: Get the network security group information for the specified VM.
text: az network watcher show-security-group-view -g MyResourceGroup --vm MyVm
"""
helps['network watcher show-topology'] = """
type: command
short-summary: Get the network topology of a resource group, virtual network or subnet.
long-summary: For more information about using network topology visit https://docs.microsoft.com/azure/network-watcher/network-watcher-topology-cli
parameters:
- name: --resource-group -g
short-summary: The name of the target resource group to perform topology on.
- name: --location -l
short-summary: Location. Defaults to the location of the target resource group.
long-summary: >
Topology information is only shown for resources within the target
resource group that are within the specified region.
examples:
- name: Use show-topology to get the topology of resources within a resource group.
text: az network watcher show-topology -g MyResourceGroup
"""
helps['network watcher test-connectivity'] = """
type: command
short-summary: Test if a connection can be established between a Virtual Machine and a given endpoint.
long-summary: >
To check connectivity between two VMs in different regions, use the VM ids instead of the VM names for the source and destination resource arguments.
To register for this feature or see additional examples visit https://docs.microsoft.com/azure/network-watcher/network-watcher-connectivity-cli
parameters:
- name: --source-resource
short-summary: Name or ID of the resource from which to originate traffic.
long-summary: Currently only Virtual Machines are supported.
- name: --source-port
short-summary: Port number from which to originate traffic.
- name: --dest-resource
short-summary: Name or ID of the resource to receive traffic.
long-summary: Currently only Virtual Machines are supported.
- name: --dest-port
short-summary: Port number on which to receive traffic.
- name: --dest-address
short-summary: The IP address or URI at which to receive traffic.
examples:
- name: Check connectivity between two virtual machines in the same resource group over port 80.
text: az network watcher test-connectivity -g MyResourceGroup --source-resource MyVmName1 --dest-resource MyVmName2 --dest-port 80
- name: Check connectivity between two virtual machines in the same subscription in two different resource groups over port 80.
text: az network watcher test-connectivity --source-resource MyVmId1 --dest-resource MyVmId2 --dest-port 80
"""
helps['network watcher test-ip-flow'] = """
type: command
short-summary: Test IP flow to/from a VM given the currently configured network security group rules.
long-summary: >
Requires that Network Watcher is enabled for the region in which the VM is located.
For more information visit https://docs.microsoft.com/azure/network-watcher/network-watcher-check-ip-flow-verify-cli
parameters:
- name: --local
short-summary: >
The private IPv4 address for the VMs NIC and the port of the packet in
X.X.X.X:PORT format. `*` can be used for port when direction is outbound.
- name: --remote
short-summary: >
The IPv4 address and port for the remote side of the packet
X.X.X.X:PORT format. `*` can be used for port when the direction is inbound.
- name: --direction
short-summary: Direction of the packet relative to the VM.
- name: --protocol
short-summary: Protocol to test.
examples:
- name: Run test-ip-flow verify to test logical connectivity from a VM to the specified destination IPv4 address and port.
text: |
az network watcher test-ip-flow -g MyResourceGroup --direction Outbound \\
--protocol TCP --local 10.0.0.4:* --remote 10.1.0.4:80 --vm MyVm
"""
helps['network watcher troubleshooting'] = """
type: group
short-summary: Manage Network Watcher troubleshooting sessions.
long-summary: >
For more information on configuring troubleshooting visit https://docs.microsoft.com/azure/network-watcher/network-watcher-troubleshoot-manage-cli
"""
helps['network watcher troubleshooting show'] = """
type: command
short-summary: Get the results of the last troubleshooting operation.
examples:
- name: Show the results or status of a troubleshooting operation for a Vnet Gateway.
text: az network watcher troubleshooting show -g MyResourceGroup --resource MyVnetGateway --resource-type vnetGateway
"""
helps['network watcher troubleshooting start'] = """
type: command
short-summary: Troubleshoot issues with VPN connections or gateway connectivity.
parameters:
- name: --resource-type -t
short-summary: The type of target resource to troubleshoot, if resource ID is not specified.
- name: --storage-account
short-summary: Name or ID of the storage account in which to store the troubleshooting results.
- name: --storage-path
short-summary: Fully qualified URI to the storage blob container in which to store the troubleshooting results.
examples:
- name: Start a troubleshooting operation on a VPN Connection.
text: |
az network watcher troubleshooting start -g MyResourceGroup --resource MyVPNConnection \\
--resource-type vpnConnection --storage-account MyStorageAccount \\
--storage-path https://{storageAccountName}.blob.core.windows.net/{containerName}
"""
helps['network list-service-aliases'] = """
type: command
short-summary: List available service aliases in the region which can be used for Service Endpoint Policies.
examples:
- name: List available service aliases in the region which can be used for Service Endpoint Policies. (autogenerated)
text: |
az network list-service-aliases --location westus2
crafted: true
"""
helps['network bastion'] = """
type: group
short-summary: Manage Azure bastion host.
"""
helps['network bastion create'] = """
type: command
short-summary: Create a Azure bastion host machine.
examples:
- name: Create a Azure bastion host machine. (autogenerated)
text: |
az network bastion create --location westus2 --name MyBastionHost --public-ip-address MyPublicIpAddress --resource-group MyResourceGroup --vnet-name MyVnet
crafted: true
"""
helps['network bastion delete'] = """
type: command
short-summary: Delete a Azure bastion host machine.
examples:
- name: Delete a Azure bastion host machine. (autogenerated)
text: |
az network bastion delete --name MyBastionHost --resource-group MyResourceGroup
crafted: true
"""
helps['network bastion list'] = """
type: command
short-summary: List all Azure bastion host machines.
"""
helps['network bastion show'] = """
type: command
short-summary: Show a Azure bastion host machine.
examples:
- name: Show a Azure bastion host machine. (autogenerated)
text: |
az network bastion show --name MyBastionHost --resource-group MyResourceGroup
crafted: true
"""
helps['network security-partner-provider'] = """
type: group
short-summary: Manage Azure security partner provider.
"""
helps['network security-partner-provider create'] = """
type: command
short-summary: Create a Azure security partner provider.
"""
helps['network security-partner-provider update'] = """
type: command
short-summary: Update a Azure security partner provider.
"""
helps['network security-partner-provider delete'] = """
type: command
short-summary: Delete a Azure security partner provider.
"""
helps['network security-partner-provider list'] = """
type: command
short-summary: List all Azure security partner provider.
"""
helps['network security-partner-provider show'] = """
type: command
short-summary: Show a Azure security partner provider.
"""
helps['network virtual-appliance'] = """
type: group
short-summary: Manage Azure Network Virtual Appliance.
"""
helps['network virtual-appliance create'] = """
type: command
short-summary: Create an Azure network virtual appliance.
examples:
- name: Create an Azure network virtual appliance.
text: |
az network virtual-appliance create -n MyName -g MyRG --vhub {vhubID} --vendor "barracudasdwanrelease" --scale-unit 2 -v latest --asn 10000 --init-config "echo $hello" --boot-blobs {blobUrl1} {blobUrl2} --cloud-blobs {blobUrl3} {blobUrl4}
"""
helps['network virtual-appliance update'] = """
type: command
short-summary: Update an Azure network virtual appliance.
examples:
- name: Update an Azure network virtual appliance.
text: |
az network virtual-appliance update -n MyName -g MyRG --asn 20000 --init-config "echo $hello"
"""
helps['network virtual-appliance show'] = """
type: command
short-summary: Show the detail of an Azure network virtual appliance.
"""
helps['network virtual-appliance list'] = """
type: command
short-summary: List all Azure network virtual appliance.
"""
helps['network virtual-appliance delete'] = """
type: command
short-summary: Delete an Azure network virtual appliance.
"""
helps['network virtual-appliance site'] = """
type: group
short-summary: Manage Azure Network Virtual Appliance Site.
"""
helps['network virtual-appliance site create'] = """
type: command
short-summary: Create an Azure network virtual appliance site.
examples:
- name: Create an Azure network virtual appliance site.
text: |
az network virtual-appliance site create -n MyName -g MyRG --appliance-name MyAppliance --address-prefix 10.0.0.0/24 --allow --default --optimize
"""
helps['network virtual-appliance site update'] = """
type: command
short-summary: Update an Azure network virtual appliance site.
examples:
- name: Update an Azure network virtual appliance site.
text: |
az network virtual-appliance site update -n MyName -g MyRG --appliance-name MyAppliance --address-prefix 10.0.0.0/24 --allow false --default false --optimize false
"""
helps['network virtual-appliance site show'] = """
type: command
short-summary: Show the detail of an Azure network virtual appliance site.
"""
helps['network virtual-appliance site list'] = """
type: command
short-summary: List all Azure network virtual appliance site.
"""
helps['network virtual-appliance site delete'] = """
type: command
short-summary: Delete an Azure network virtual appliance site.
"""
helps['network virtual-appliance sku'] = """
type: group
short-summary: Manage Azure Network Virtual Appliance Sku.
"""
helps['network virtual-appliance sku show'] = """
type: command
short-summary: Show the detail of an Azure network virtual appliance sku.
"""
helps['network virtual-appliance sku list'] = """
type: command
short-summary: List all Azure network virtual appliance sku.
"""
| 39.775608
| 693
| 0.731902
|
384d99cf190d4399c4b3f9b52aa7bbfcaa4b2fe5
| 52,703
|
py
|
Python
|
bcdi/preprocessing/bcdi_utils.py
|
ccechatelier/bcdi
|
cbe3b7960414b03f8e98336c3fcd7b367de441ca
|
[
"CECILL-B"
] | null | null | null |
bcdi/preprocessing/bcdi_utils.py
|
ccechatelier/bcdi
|
cbe3b7960414b03f8e98336c3fcd7b367de441ca
|
[
"CECILL-B"
] | null | null | null |
bcdi/preprocessing/bcdi_utils.py
|
ccechatelier/bcdi
|
cbe3b7960414b03f8e98336c3fcd7b367de441ca
|
[
"CECILL-B"
] | null | null | null |
# -*- coding: utf-8 -*-
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-05/2021 : DESY PHOTON SCIENCE
# authors:
# Jerome Carnis, jerome.carnis@esrf.fr
"""Functions related to BCDI data preprocessing, before phase retrieval."""
try:
import hdf5plugin # for P10, should be imported before h5py or PyTables
except ModuleNotFoundError:
pass
import matplotlib.pyplot as plt
from numbers import Real
import numpy as np
import pathlib
from scipy.interpolate import interp1d
from scipy.ndimage.measurements import center_of_mass
from typing import no_type_check, Optional, Tuple
import xrayutilities as xu
from ..experiment import diffractometer as diff
from ..graph import graph_utils as gu
from ..utils import utilities as util
from ..utils import validation as valid
def center_fft(
data,
mask,
detector,
frames_logical,
centering="max",
fft_option="crop_asymmetric_ZYX",
**kwargs,
):
"""
Center and crop/pad the dataset depending on user parameters.
:param data: the 3D data array
:param mask: the 3D mask array
:param detector: an instance of the class Detector
:param frames_logical: array of initial length the number of measured frames.
In case of padding the length changes. A frame whose index is set to 1 means
that it is used, 0 means not used, -1 means padded (added) frame.
:param centering: centering option, 'max' or 'com'. It will be overridden if the
kwarg 'fix_bragg' is provided.
:param fft_option:
- 'crop_sym_ZYX': crop the array for FFT requirements, Bragg peak centered
- 'crop_asym_ZYX': crop the array for FFT requirements without centering the
Brag peak
- 'pad_sym_Z_crop_sym_YX': crop detector images (Bragg peak centered) and pad
the rocking angle based on 'pad_size' (Bragg peak centered)
- 'pad_sym_Z_crop_asym_YX': pad rocking angle based on 'pad_size'
(Bragg peak centered) and crop detector (Bragg peak non-centered)
- 'pad_asym_Z_crop_sym_YX': crop detector images (Bragg peak centered),
pad the rocking angle without centering the Brag peak
- 'pad_asym_Z_crop_asym_YX': pad rocking angle and crop detector without centering
the Bragg peak
- 'pad_sym_Z': keep detector size and pad/center the rocking angle based on
'pad_size', Bragg peak centered
- 'pad_asym_Z': keep detector size and pad the rocking angle without centering
the Brag peak
- 'pad_sym_ZYX': pad all dimensions based on 'pad_size', Brag peak centered
- 'pad_asym_ZYX': pad all dimensions based on 'pad_size' without centering
the Brag peak
- 'skip': keep the full dataset or crop it to the size defined by fix_size
:param kwargs:
- 'fix_bragg' = user-defined position in pixels of the Bragg peak
[z_bragg, y_bragg, x_bragg]
- 'fix_size' = user defined output array size
[zstart, zstop, ystart, ystop, xstart, xstop]
- 'pad_size' = user defined output array size [nbz, nby, nbx]
- 'q_values' = [qx, qz, qy], each component being a 1D array
:return:
- updated data, mask (and q_values if provided, [] otherwise)
- pad_width = [z0, z1, y0, y1, x0, x1] number of pixels added at each end of the
original data
- updated frames_logical
"""
valid.valid_ndarray(arrays=(data, mask), ndim=3)
# check and load kwargs
valid.valid_kwargs(
kwargs=kwargs,
allowed_kwargs={"fix_bragg", "fix_size", "pad_size", "q_values"},
name="kwargs",
)
fix_bragg = kwargs.get("fix_bragg")
fix_size = kwargs.get("fix_size")
pad_size = kwargs.get("pad_size", [])
q_values = kwargs.get("q_values", [])
if q_values: # len(q_values) != 0
qx = q_values[0] # axis=0, z downstream, qx in reciprocal space
qz = q_values[1] # axis=1, y vertical, qz in reciprocal space
qy = q_values[2] # axis=2, x outboard, qy in reciprocal space
else:
qx = []
qy = []
qz = []
if centering == "max":
z0, y0, x0 = np.unravel_index(abs(data).argmax(), data.shape)
if q_values:
print(f"Max at (qx, qz, qy): {qx[z0]:.5f}, {qz[y0]:.5f}, {qy[x0]:.5f}")
else:
print("Max at pixel (Z, Y, X): ", z0, y0, x0)
elif centering == "com":
z0, y0, x0 = center_of_mass(data)
if q_values:
print(
"Center of mass at (qx, qz, qy): "
f"{qx[z0]:.5f}, {qz[y0]:.5f}, {qy[x0]:.5f}"
)
else:
print("Center of mass at pixel (Z, Y, X): ", z0, y0, x0)
else:
raise ValueError("Incorrect value for 'centering' parameter")
if fix_bragg:
if len(fix_bragg) != 3:
raise ValueError("fix_bragg should be a list of 3 integers")
z0, y0, x0 = fix_bragg
print(
"Peak intensity position defined by user on the full detector: ", z0, y0, x0
)
y0 = (y0 - detector.roi[0]) / detector.binning[1]
x0 = (x0 - detector.roi[2]) / detector.binning[2]
print(
"Peak intensity position with detector ROI and binning in detector plane: ",
z0,
y0,
x0,
)
iz0, iy0, ix0 = int(round(z0)), int(round(y0)), int(round(x0))
print(f"Data peak value = {data[iz0, iy0, ix0]:.1f}")
# Max symmetrical box around center of mass
nbz, nby, nbx = np.shape(data)
max_nz = abs(2 * min(iz0, nbz - iz0))
max_ny = 2 * min(iy0, nby - iy0)
max_nx = abs(2 * min(ix0, nbx - ix0))
if fft_option != "skip":
print("Max symmetrical box (qx, qz, qy): ", max_nz, max_ny, max_nx)
if any(val == 0 for val in (max_nz, max_ny, max_nx)):
print(
"Empty images or presence of hotpixel at the border,"
' defaulting fft_option to "skip"!'
)
fft_option = "skip"
# Crop/pad data to fulfill FFT size and user requirements
if fft_option == "crop_sym_ZYX":
# crop rocking angle and detector, Bragg peak centered
nz1, ny1, nx1 = util.smaller_primes(
(max_nz, max_ny, max_nx), maxprime=7, required_dividers=(2,)
)
pad_width = np.zeros(6, dtype=int)
data = data[
iz0 - nz1 // 2 : iz0 + nz1 // 2,
iy0 - ny1 // 2 : iy0 + ny1 // 2,
ix0 - nx1 // 2 : ix0 + nx1 // 2,
]
mask = mask[
iz0 - nz1 // 2 : iz0 + nz1 // 2,
iy0 - ny1 // 2 : iy0 + ny1 // 2,
ix0 - nx1 // 2 : ix0 + nx1 // 2,
]
print("FFT box (qx, qz, qy): ", data.shape)
if (iz0 - nz1 // 2) > 0: # if 0, the first frame is used
frames_logical[0 : iz0 - nz1 // 2] = 0
if (iz0 + nz1 // 2) < nbz: # if nbz, the last frame is used
frames_logical[iz0 + nz1 // 2 :] = 0
if len(q_values) != 0:
qx = qx[iz0 - nz1 // 2 : iz0 + nz1 // 2]
qy = qy[ix0 - nx1 // 2 : ix0 + nx1 // 2]
qz = qz[iy0 - ny1 // 2 : iy0 + ny1 // 2]
elif fft_option == "crop_asym_ZYX":
# crop rocking angle and detector without centering the Bragg peak
nz1, ny1, nx1 = util.smaller_primes(
(nbz, nby, nbx), maxprime=7, required_dividers=(2,)
)
pad_width = np.zeros(6, dtype=int)
data = data[
nbz // 2 - nz1 // 2 : nbz // 2 + nz1 // 2,
nby // 2 - ny1 // 2 : nby // 2 + ny1 // 2,
nbx // 2 - nx1 // 2 : nbx // 2 + nx1 // 2,
]
mask = mask[
nbz // 2 - nz1 // 2 : nbz // 2 + nz1 // 2,
nby // 2 - ny1 // 2 : nby // 2 + ny1 // 2,
nbx // 2 - nx1 // 2 : nbx // 2 + nx1 // 2,
]
print("FFT box (qx, qz, qy): ", data.shape)
if (nbz // 2 - nz1 // 2) > 0: # if 0, the first frame is used
frames_logical[0 : nbz // 2 - nz1 // 2] = 0
if (nbz // 2 + nz1 // 2) < nbz: # if nbz, the last frame is used
frames_logical[nbz // 2 + nz1 // 2 :] = 0
if len(q_values) != 0:
qx = qx[nbz // 2 - nz1 // 2 : nbz // 2 + nz1 // 2]
qy = qy[nbx // 2 - nx1 // 2 : nbx // 2 + nx1 // 2]
qz = qz[nby // 2 - ny1 // 2 : nby // 2 + ny1 // 2]
elif fft_option == "pad_sym_Z_crop_sym_YX":
# pad rocking angle based on 'pad_size' (Bragg peak centered)
# and crop detector (Bragg peak centered)
if len(pad_size) != 3:
raise ValueError("pad_size should be a list of three elements")
if pad_size[0] != util.higher_primes(
pad_size[0], maxprime=7, required_dividers=(2,)
):
raise ValueError(pad_size[0], "does not meet FFT requirements")
ny1, nx1 = util.smaller_primes(
(max_ny, max_nx), maxprime=7, required_dividers=(2,)
)
data = data[:, iy0 - ny1 // 2 : iy0 + ny1 // 2, ix0 - nx1 // 2 : ix0 + nx1 // 2]
mask = mask[:, iy0 - ny1 // 2 : iy0 + ny1 // 2, ix0 - nx1 // 2 : ix0 + nx1 // 2]
pad_width = np.array(
[
int(min(pad_size[0] / 2 - iz0, pad_size[0] - nbz)),
int(min(pad_size[0] / 2 - nbz + iz0, pad_size[0] - nbz)),
0,
0,
0,
0,
],
dtype=int,
)
data = zero_pad(data, padding_width=pad_width, mask_flag=False)
mask = zero_pad(
mask, padding_width=pad_width, mask_flag=True
) # mask padded pixels
print("FFT box (qx, qz, qy): ", data.shape)
temp_frames = -1 * np.ones(data.shape[0])
temp_frames[pad_width[0] : pad_width[0] + nbz] = frames_logical
frames_logical = temp_frames
if len(q_values) != 0:
dqx = qx[1] - qx[0]
qx0 = qx[0] - pad_width[0] * dqx
qx = qx0 + np.arange(pad_size[0]) * dqx
qy = qy[ix0 - nx1 // 2 : ix0 + nx1 // 2]
qz = qz[iy0 - ny1 // 2 : iy0 + ny1 // 2]
elif fft_option == "pad_sym_Z_crop_asym_YX":
# pad rocking angle based on 'pad_size' (Bragg peak centered)
# and crop detector (Bragg peak non-centered)
if len(pad_size) != 3:
raise ValueError("pad_size should be a list of three elements")
print("pad_size for 1st axis before binning: ", pad_size[0])
if pad_size[0] != util.higher_primes(
pad_size[0], maxprime=7, required_dividers=(2,)
):
raise ValueError(pad_size[0], "does not meet FFT requirements")
ny1, nx1 = util.smaller_primes(
(max_ny, max_nx), maxprime=7, required_dividers=(2,)
)
data = data[
:,
nby // 2 - ny1 // 2 : nby // 2 + ny1 // 2,
nbx // 2 - nx1 // 2 : nbx // 2 + nx1 // 2,
]
mask = mask[
:,
nby // 2 - ny1 // 2 : nby // 2 + ny1 // 2,
nbx // 2 - nx1 // 2 : nbx // 2 + nx1 // 2,
]
pad_width = np.array(
[
int(min(pad_size[0] / 2 - iz0, pad_size[0] - nbz)),
int(min(pad_size[0] / 2 - nbz + iz0, pad_size[0] - nbz)),
0,
0,
0,
0,
],
dtype=int,
)
data = zero_pad(data, padding_width=pad_width, mask_flag=False)
mask = zero_pad(
mask, padding_width=pad_width, mask_flag=True
) # mask padded pixels
print("FFT box (qx, qz, qy): ", data.shape)
temp_frames = -1 * np.ones(data.shape[0])
temp_frames[pad_width[0] : pad_width[0] + nbz] = frames_logical
frames_logical = temp_frames
if len(q_values) != 0:
dqx = qx[1] - qx[0]
qx0 = qx[0] - pad_width[0] * dqx
qx = qx0 + np.arange(pad_size[0]) * dqx
qy = qy[nbx // 2 - nx1 // 2 : nbx // 2 + nx1 // 2]
qz = qz[nby // 2 - ny1 // 2 : nby // 2 + ny1 // 2]
elif fft_option == "pad_asym_Z_crop_sym_YX":
# pad rocking angle without centering the Bragg peak
# and crop detector (Bragg peak centered)
ny1, nx1 = util.smaller_primes(
(max_ny, max_nx), maxprime=7, required_dividers=(2,)
)
nz1 = util.higher_primes(nbz, maxprime=7, required_dividers=(2,))
data = data[:, iy0 - ny1 // 2 : iy0 + ny1 // 2, ix0 - nx1 // 2 : ix0 + nx1 // 2]
mask = mask[:, iy0 - ny1 // 2 : iy0 + ny1 // 2, ix0 - nx1 // 2 : ix0 + nx1 // 2]
pad_width = np.array(
[
int((nz1 - nbz + ((nz1 - nbz) % 2)) / 2),
int((nz1 - nbz + 1) / 2 - ((nz1 - nbz) % 2)),
0,
0,
0,
0,
],
dtype=int,
)
data = zero_pad(data, padding_width=pad_width, mask_flag=False)
mask = zero_pad(
mask, padding_width=pad_width, mask_flag=True
) # mask padded pixels
print("FFT box (qx, qz, qy): ", data.shape)
temp_frames = -1 * np.ones(data.shape[0])
temp_frames[pad_width[0] : pad_width[0] + nbz] = frames_logical
frames_logical = temp_frames
if len(q_values) != 0:
dqx = qx[1] - qx[0]
qx0 = qx[0] - pad_width[0] * dqx
qx = qx0 + np.arange(nz1) * dqx
qy = qy[ix0 - nx1 // 2 : ix0 + nx1 // 2]
qz = qz[iy0 - ny1 // 2 : iy0 + ny1 // 2]
elif fft_option == "pad_asym_Z_crop_asym_YX":
# pad rocking angle and crop detector without centering the Bragg peak
ny1, nx1 = util.smaller_primes((nby, nbx), maxprime=7, required_dividers=(2,))
nz1 = util.higher_primes(nbz, maxprime=7, required_dividers=(2,))
data = data[
:,
nby // 2 - ny1 // 2 : nby // 2 + ny1 // 2,
nbx // 2 - nx1 // 2 : nbx // 2 + nx1 // 2,
]
mask = mask[
:,
nby // 2 - ny1 // 2 : nby // 2 + ny1 // 2,
nbx // 2 - nx1 // 2 : nbx // 2 + nx1 // 2,
]
pad_width = np.array(
[
int((nz1 - nbz + ((nz1 - nbz) % 2)) / 2),
int((nz1 - nbz + 1) / 2 - ((nz1 - nbz) % 2)),
0,
0,
0,
0,
],
dtype=int,
)
data = zero_pad(data, padding_width=pad_width, mask_flag=False)
mask = zero_pad(
mask, padding_width=pad_width, mask_flag=True
) # mask padded pixels
print("FFT box (qx, qz, qy): ", data.shape)
temp_frames = -1 * np.ones(data.shape[0])
temp_frames[pad_width[0] : pad_width[0] + nbz] = frames_logical
frames_logical = temp_frames
if len(q_values) != 0:
dqx = qx[1] - qx[0]
qx0 = qx[0] - pad_width[0] * dqx
qx = qx0 + np.arange(nz1) * dqx
qy = qy[nbx // 2 - nx1 // 2 : nbx // 2 + nx1 // 2]
qz = qz[nby // 2 - ny1 // 2 : nby // 2 + ny1 // 2]
elif fft_option == "pad_sym_Z":
# pad rocking angle based on 'pad_size'(Bragg peak centered)
# and keep detector size
if len(pad_size) != 3:
raise ValueError("pad_size should be a list of three elements")
print("pad_size for 1st axis before binning: ", pad_size[0])
if pad_size[0] != util.higher_primes(
pad_size[0], maxprime=7, required_dividers=(2,)
):
raise ValueError(pad_size[0], "does not meet FFT requirements")
pad_width = np.array(
[
int(min(pad_size[0] / 2 - iz0, pad_size[0] - nbz)),
int(min(pad_size[0] / 2 - nbz + iz0, pad_size[0] - nbz)),
0,
0,
0,
0,
],
dtype=int,
)
data = zero_pad(data, padding_width=pad_width, mask_flag=False)
mask = zero_pad(
mask, padding_width=pad_width, mask_flag=True
) # mask padded pixels
print("FFT box (qx, qz, qy): ", data.shape)
temp_frames = -1 * np.ones(data.shape[0])
temp_frames[pad_width[0] : pad_width[0] + nbz] = frames_logical
frames_logical = temp_frames
if len(q_values) != 0:
dqx = qx[1] - qx[0]
qx0 = qx[0] - pad_width[0] * dqx
qx = qx0 + np.arange(pad_size[0]) * dqx
elif fft_option == "pad_asym_Z":
# pad rocking angle without centering the Bragg peak, keep detector size
nz1 = util.higher_primes(nbz, maxprime=7, required_dividers=(2,))
pad_width = np.array(
[
int((nz1 - nbz + ((nz1 - nbz) % 2)) / 2),
int((nz1 - nbz + 1) / 2 - ((nz1 - nbz) % 2)),
0,
0,
0,
0,
],
dtype=int,
)
data = zero_pad(data, padding_width=pad_width, mask_flag=False)
mask = zero_pad(
mask, padding_width=pad_width, mask_flag=True
) # mask padded pixels
print("FFT box (qx, qz, qy): ", data.shape)
temp_frames = -1 * np.ones(data.shape[0])
temp_frames[pad_width[0] : pad_width[0] + nbz] = frames_logical
frames_logical = temp_frames
if len(q_values) != 0:
dqx = qx[1] - qx[0]
qx0 = qx[0] - pad_width[0] * dqx
qx = qx0 + np.arange(nz1) * dqx
elif fft_option == "pad_sym_ZYX":
# pad both dimensions based on 'pad_size' (Bragg peak centered)
if len(pad_size) != 3:
raise ValueError("pad_size should be a list of 3 integers")
print("pad_size: ", pad_size)
print(
"The 1st axis (stacking dimension) is padded before binning,"
" detector plane after binning."
)
if pad_size[0] != util.higher_primes(
pad_size[0], maxprime=7, required_dividers=(2,)
):
raise ValueError(pad_size[0], "does not meet FFT requirements")
if pad_size[1] != util.higher_primes(
pad_size[1], maxprime=7, required_dividers=(2,)
):
raise ValueError(pad_size[1], "does not meet FFT requirements")
if pad_size[2] != util.higher_primes(
pad_size[2], maxprime=7, required_dividers=(2,)
):
raise ValueError(pad_size[2], "does not meet FFT requirements")
pad_width = [
int(min(pad_size[0] / 2 - iz0, pad_size[0] - nbz)),
int(min(pad_size[0] / 2 - nbz + iz0, pad_size[0] - nbz)),
int(min(pad_size[1] / 2 - iy0, pad_size[1] - nby)),
int(min(pad_size[1] / 2 - nby + iy0, pad_size[1] - nby)),
int(min(pad_size[2] / 2 - ix0, pad_size[2] - nbx)),
int(min(pad_size[2] / 2 - nbx + ix0, pad_size[2] - nbx)),
]
pad_width = np.array(
list((map(lambda value: max(value, 0), pad_width))), dtype=int
) # remove negative numbers
data = zero_pad(data, padding_width=pad_width, mask_flag=False)
mask = zero_pad(
mask, padding_width=pad_width, mask_flag=True
) # mask padded pixels
print("FFT box (qx, qz, qy): ", data.shape)
temp_frames = -1 * np.ones(data.shape[0])
temp_frames[pad_width[0] : pad_width[0] + nbz] = frames_logical
frames_logical = temp_frames
if len(q_values) != 0:
dqx = qx[1] - qx[0]
dqy = qy[1] - qy[0]
dqz = qz[1] - qz[0]
qx0 = qx[0] - pad_width[0] * dqx
qy0 = qy[0] - pad_width[2] * dqy
qz0 = qz[0] - pad_width[1] * dqz
qx = qx0 + np.arange(pad_size[0]) * dqx
qy = qy0 + np.arange(pad_size[2]) * dqy
qz = qz0 + np.arange(pad_size[1]) * dqz
elif fft_option == "pad_asym_ZYX":
# pad both dimensions without centering the Bragg peak
nz1, ny1, nx1 = [
util.higher_primes(nbz, maxprime=7, required_dividers=(2,)),
util.higher_primes(nby, maxprime=7, required_dividers=(2,)),
util.higher_primes(nbx, maxprime=7, required_dividers=(2,)),
]
pad_width = np.array(
[
int((nz1 - nbz + ((nz1 - nbz) % 2)) / 2),
int((nz1 - nbz + 1) / 2 - ((nz1 - nbz) % 2)),
int((ny1 - nby + ((pad_size[1] - nby) % 2)) / 2),
int((ny1 - nby + 1) / 2 - ((ny1 - nby) % 2)),
int((nx1 - nbx + ((nx1 - nbx) % 2)) / 2),
int((nx1 - nbx + 1) / 2 - ((nx1 - nbx) % 2)),
]
)
data = zero_pad(data, padding_width=pad_width, mask_flag=False)
mask = zero_pad(
mask, padding_width=pad_width, mask_flag=True
) # mask padded pixels
temp_frames = -1 * np.ones(data.shape[0])
temp_frames[pad_width[0] : pad_width[0] + nbz] = frames_logical
frames_logical = temp_frames
if len(q_values) != 0:
dqx = qx[1] - qx[0]
dqy = qy[1] - qy[0]
dqz = qz[1] - qz[0]
qx0 = qx[0] - pad_width[0] * dqx
qy0 = qy[0] - pad_width[2] * dqy
qz0 = qz[0] - pad_width[1] * dqz
qx = qx0 + np.arange(nz1) * dqx
qy = qy0 + np.arange(nx1) * dqy
qz = qz0 + np.arange(ny1) * dqz
elif fft_option == "skip":
# keep the full dataset or use 'fix_size' parameter
pad_width = np.zeros(
6, dtype=int
) # do nothing or crop the data, starting_frame should be 0
if fix_size:
if len(fix_size) != 6:
raise ValueError("fix_bragg should be a list of 3 integers")
# take binning into account
fix_size[2] = int(fix_size[2] // detector.binning[1])
fix_size[3] = int(fix_size[3] // detector.binning[1])
fix_size[4] = int(fix_size[4] // detector.binning[2])
fix_size[5] = int(fix_size[5] // detector.binning[2])
# size of output array defined
nbz, nby, nbx = np.shape(data)
z_span = fix_size[1] - fix_size[0]
y_span = fix_size[3] - fix_size[2]
x_span = fix_size[5] - fix_size[4]
if (
z_span > nbz
or y_span > nby
or x_span > nbx
or fix_size[1] > nbz
or fix_size[3] > nby
or fix_size[5] > nbx
):
raise ValueError("Predefined fix_size uncorrect")
data = data[
fix_size[0] : fix_size[1],
fix_size[2] : fix_size[3],
fix_size[4] : fix_size[5],
]
mask = mask[
fix_size[0] : fix_size[1],
fix_size[2] : fix_size[3],
fix_size[4] : fix_size[5],
]
if fix_size[0] > 0: # if 0, the first frame is used
frames_logical[0 : fix_size[0]] = 0
if fix_size[1] < nbz: # if nbz, the last frame is used
frames_logical[fix_size[1] :] = 0
if len(q_values) != 0:
qx = qx[fix_size[0] : fix_size[1]]
qy = qy[fix_size[4] : fix_size[5]]
qz = qz[fix_size[2] : fix_size[3]]
else:
raise ValueError("Incorrect value for 'fft_option'")
if len(q_values) != 0:
q_values = list(q_values)
q_values[0] = qx
q_values[1] = qz
q_values[2] = qy
return data, mask, pad_width, q_values, frames_logical
@no_type_check # https://github.com/python/mypy/issues/6697
def find_bragg(
data: np.ndarray,
peak_method: str,
roi: Optional[Tuple[int, int, int, int]] = None,
binning: Optional[Tuple[int, ...]] = None,
) -> Tuple[int, ...]:
"""
Find the Bragg peak position in data based on the centering method.
It compensates for a ROI in the detector and an eventual binning.
:param data: 2D or 3D array. If complex, Bragg peak position is calculated for
abs(array)
:param peak_method: 'max', 'com' or 'maxcom'. For 'maxcom', it uses method 'max'
for the first axis and 'com' for the other axes.
:param roi: tuple of integers of length 4, region of interest used to generate data
from the full sized detector.
:param binning: tuple of integers of length data.ndim, binning applied to the data
in each dimension.
:return: the Bragg peak position in the unbinned, full size detector as a tuple of
data.ndim elements
"""
# check parameters
valid.valid_ndarray(arrays=data, ndim=(2, 3))
valid.valid_container(
roi,
container_types=(tuple, list, np.ndarray),
item_types=int,
length=4,
allow_none=True,
name="roi",
)
valid.valid_container(
binning,
container_types=(tuple, list, np.ndarray),
item_types=int,
length=data.ndim,
allow_none=True,
name="binning",
)
if peak_method not in {"max", "com", "maxcom"}:
raise ValueError("peak_method should be 'max', 'com' or 'maxcom'")
print(f"\nFinding Bragg peak position: input data shape {data.shape}")
print(f"Binning: {binning}")
print(f"Roi: {roi}")
if peak_method == "max":
position = np.unravel_index(abs(data).argmax(), data.shape)
print(f"Max at: {position}, Max = {int(data[position])}")
elif peak_method == "com":
position = center_of_mass(data)
position = tuple(map(lambda x: int(np.rint(x)), position))
print(f"Center of mass at: {position}, COM = {int(data[position])}")
else: # 'maxcom'
valid.valid_ndarray(arrays=data, ndim=3)
position = list(np.unravel_index(abs(data).argmax(), data.shape))
position[1:] = center_of_mass(data[position[0], :, :])
position = tuple(map(lambda x: int(np.rint(x)), position))
print(f"MaxCom at (z, y, x): {position}, COM = {int(data[position])}")
# unbin
if binning is not None:
position = [a * b for a, b in zip(position, binning)]
# add the offset due to the region of interest
# the roi is defined as [y_start, y_stop, x_start, x_stop]
if roi is not None:
position = list(position)
position[-1] = position[-1] + roi[2]
position[-2] = position[-2] + roi[0]
print(f"Bragg peak (full unbinned roi) at: {position}")
return tuple(position)
def grid_bcdi_labframe(
data,
mask,
detector,
setup,
align_q=False,
reference_axis=(0, 1, 0),
debugging=False,
**kwargs,
):
"""
Interpolate BCDI reciprocal space data using a linearized transformation matrix.
The resulting (qx, qy, qz) are in the laboratory frame (qx downstrean,
qz vertical up, qy outboard).
:param data: the 3D data, already binned in the detector frame
:param mask: the corresponding 3D mask
:param detector: an instance of the class Detector
:param setup: instance of the Class experiment_utils.Setup()
:param align_q: boolean, if True the data will be rotated such that q is along
reference_axis, and q values will be calculated in the pseudo crystal frame.
:param reference_axis: 3D vector along which q will be aligned, expressed in an
orthonormal frame x y z
:param debugging: set to True to see plots
:param kwargs:
- 'fill_value': tuple of two real numbers, fill values to use for pixels outside
of the interpolation range. The first value is for the data, the second for the
mask. Default is (0, 0)
:return:
- the data interpolated in the laboratory frame
- the mask interpolated in the laboratory frame
- a tuple of three 1D vectors of q values (qx, qz, qy)
- a numpy array of shape (3, 3): transformation matrix from the detector
frame to the laboratory/crystal frame
"""
valid.valid_ndarray(arrays=(data, mask), ndim=3)
# check and load kwargs
valid.valid_kwargs(
kwargs=kwargs,
allowed_kwargs={"fill_value", "reference_axis"},
name="kwargs",
)
fill_value = kwargs.get("fill_value", (0, 0))
valid.valid_container(
fill_value,
container_types=(tuple, list, np.ndarray),
length=2,
item_types=Real,
name="fill_value",
)
# check some parameters
if setup.rocking_angle == "energy":
raise NotImplementedError(
"Geometric transformation not yet implemented for energy scans"
)
valid.valid_item(align_q, allowed_types=bool, name="align_q")
valid.valid_container(
reference_axis,
container_types=(tuple, list, np.ndarray),
length=3,
item_types=Real,
name="reference_axis",
)
reference_axis = np.array(reference_axis)
# grid the data
print(
"Gridding the data using the linearized matrix,"
" the result will be in the laboratory frame"
)
string = "linmat_reciprocal_space_"
(interp_data, interp_mask), q_values, transfer_matrix = setup.ortho_reciprocal(
arrays=(data, mask),
verbose=True,
debugging=debugging,
fill_value=fill_value,
align_q=align_q,
reference_axis=reference_axis,
scale=("log", "linear"),
title=("data", "mask"),
)
qx, qz, qy = q_values
# check for Nan
interp_mask[np.isnan(interp_data)] = 1
interp_data[np.isnan(interp_data)] = 0
interp_mask[np.isnan(interp_mask)] = 1
# set the mask as an array of integers, 0 or 1
interp_mask[np.nonzero(interp_mask)] = 1
interp_mask = interp_mask.astype(int)
# apply the mask to the data
interp_data[np.nonzero(interp_mask)] = 0
# save plots of the gridded data
final_binning = (
detector.preprocessing_binning[0] * detector.binning[0],
detector.preprocessing_binning[1] * detector.binning[1],
detector.preprocessing_binning[2] * detector.binning[2],
)
numz, numy, numx = interp_data.shape
plot_comment = (
f"_{numz}_{numy}_{numx}_"
f"{final_binning[0]}_{final_binning[1]}_{final_binning[2]}.png"
)
max_z = interp_data.sum(axis=0).max()
fig, _, _ = gu.contour_slices(
interp_data,
(qx, qz, qy),
sum_frames=True,
title="Regridded data",
levels=np.linspace(0, np.ceil(np.log10(max_z)), 150, endpoint=True),
plot_colorbar=True,
scale="log",
is_orthogonal=True,
reciprocal_space=True,
)
fig.savefig(detector.savedir + string + "sum" + plot_comment)
plt.close(fig)
fig, _, _ = gu.contour_slices(
interp_data,
(qx, qz, qy),
sum_frames=False,
title="Regridded data",
levels=np.linspace(0, np.ceil(np.log10(interp_data.max())), 150, endpoint=True),
plot_colorbar=True,
scale="log",
is_orthogonal=True,
reciprocal_space=True,
)
fig.savefig(detector.savedir + string + "central" + plot_comment)
plt.close(fig)
fig, _, _ = gu.multislices_plot(
interp_data,
sum_frames=True,
scale="log",
plot_colorbar=True,
vmin=0,
title="Regridded data",
is_orthogonal=True,
reciprocal_space=True,
)
fig.savefig(detector.savedir + string + "sum_pix" + plot_comment)
plt.close(fig)
fig, _, _ = gu.multislices_plot(
interp_data,
sum_frames=False,
scale="log",
plot_colorbar=True,
vmin=0,
title="Regridded data",
is_orthogonal=True,
reciprocal_space=True,
)
fig.savefig(detector.savedir + string + "central_pix" + plot_comment)
plt.close(fig)
if debugging:
gu.multislices_plot(
interp_mask,
sum_frames=False,
scale="linear",
plot_colorbar=True,
vmin=0,
title="Regridded mask",
is_orthogonal=True,
reciprocal_space=True,
)
return interp_data, interp_mask, q_values, transfer_matrix
def grid_bcdi_xrayutil(
data,
mask,
scan_number,
logfile,
detector,
setup,
frames_logical,
hxrd,
debugging=False,
):
"""
Interpolate BCDI reciprocal space data using xrayutilities package.
The resulting (qx, qy, qz) are in the crystal frame (qz vertical).
:param data: the 3D data, already binned in the detector frame
:param mask: the corresponding 3D mask
:param scan_number: the scan number to load
:param logfile: file containing the information about the scan and image numbers
(specfile, .fio...)
:param detector: an instance of the class Detector
:param setup: instance of the Class experiment_utils.Setup()
:param frames_logical: array of initial length the number of measured frames.
In case of padding the length changes. A frame whose index is set to 1 means
that it is used, 0 means not used, -1 means padded (added) frame.
:param hxrd: an initialized xrayutilities HXRD object used for the orthogonalization
of the dataset
:param debugging: set to True to see plots
:return: the data and mask interpolated in the crystal frame, q values
(downstream, vertical up, outboard). q values are in inverse angstroms.
"""
valid.valid_ndarray(arrays=(data, mask), ndim=3)
numz, numy, numx = data.shape
print(
"Gridding the data using xrayutilities package,"
" the result will be in the crystal frame"
)
string = "xrutil_reciprocal_space_"
if setup.filtered_data:
print(
"Trying to orthogonalize a filtered data,"
" the corresponding detector ROI should be provided\n"
"otherwise q values will be wrong."
)
qx, qz, qy, frames_logical = setup.calc_qvalues_xrutils(
logfile=logfile,
hxrd=hxrd,
nb_frames=numz,
scan_number=scan_number,
frames_logical=frames_logical,
)
maxbins = []
for dim in (qx, qy, qz):
maxstep = max((abs(np.diff(dim, axis=j)).max() for j in range(3)))
maxbins.append(int(abs(dim.max() - dim.min()) / maxstep))
print(f"Maximum number of bins based on the sampling in q: {maxbins}")
maxbins = util.smaller_primes(maxbins, maxprime=7, required_dividers=(2,))
print(f"Maximum number of bins based on the shape requirements for FFT: {maxbins}")
# only rectangular cuboidal voxels are supported in xrayutilities FuzzyGridder3D
gridder = xu.FuzzyGridder3D(*maxbins)
#
# define the width of data points (rectangular datapoints, xrayutilities use half
# of these values but there are artefacts sometimes)
wx = (qx.max() - qx.min()) / maxbins[0]
wz = (qz.max() - qz.min()) / maxbins[1]
wy = (qy.max() - qy.min()) / maxbins[2]
# convert mask to rectangular grid in reciprocal space
gridder(
qx, qz, qy, mask, width=(wx, wz, wy)
) # qx downstream, qz vertical up, qy outboard
interp_mask = np.copy(gridder.data)
# convert data to rectangular grid in reciprocal space
gridder(
qx, qz, qy, data, width=(wx, wz, wy)
) # qx downstream, qz vertical up, qy outboard
interp_data = gridder.data
qx, qz, qy = [
gridder.xaxis,
gridder.yaxis,
gridder.zaxis,
] # downstream, vertical up, outboard
# q values are 1D arrays
# check for Nan
interp_mask[np.isnan(interp_data)] = 1
interp_data[np.isnan(interp_data)] = 0
interp_mask[np.isnan(interp_mask)] = 1
interp_mask = interp_mask.astype(int)
# apply the mask to the data
interp_data[np.nonzero(interp_mask)] = 0
# plot the gridded data
final_binning = (
detector.preprocessing_binning[0] * detector.binning[0],
detector.preprocessing_binning[1] * detector.binning[1],
detector.preprocessing_binning[2] * detector.binning[2],
)
numz, numy, numx = interp_data.shape
plot_comment = (
f"_{numz}_{numy}_{numx}"
f"_{final_binning[0]}_{final_binning[1]}_{final_binning[2]}.png"
)
max_z = interp_data.sum(axis=0).max()
fig, _, _ = gu.contour_slices(
interp_data,
(qx, qz, qy),
sum_frames=True,
title="Regridded data",
levels=np.linspace(0, np.ceil(np.log10(max_z)), 150, endpoint=True),
plot_colorbar=True,
scale="log",
is_orthogonal=True,
reciprocal_space=True,
)
fig.savefig(detector.savedir + string + "sum" + plot_comment)
plt.close(fig)
fig, _, _ = gu.contour_slices(
interp_data,
(qx, qz, qy),
sum_frames=False,
title="Regridded data",
levels=np.linspace(0, np.ceil(np.log10(interp_data.max())), 150, endpoint=True),
plot_colorbar=True,
scale="log",
is_orthogonal=True,
reciprocal_space=True,
)
fig.savefig(detector.savedir + string + "central" + plot_comment)
plt.close(fig)
fig, _, _ = gu.multislices_plot(
interp_data,
sum_frames=True,
scale="log",
plot_colorbar=True,
vmin=0,
title="Regridded data",
is_orthogonal=True,
reciprocal_space=True,
)
fig.savefig(detector.savedir + string + "sum_pix" + plot_comment)
plt.close(fig)
fig, _, _ = gu.multislices_plot(
interp_data,
sum_frames=False,
scale="log",
plot_colorbar=True,
vmin=0,
title="Regridded data",
is_orthogonal=True,
reciprocal_space=True,
)
fig.savefig(detector.savedir + string + "central_pix" + plot_comment)
plt.close(fig)
if debugging:
gu.multislices_plot(
interp_mask,
sum_frames=False,
scale="linear",
plot_colorbar=True,
vmin=0,
title="Regridded mask",
is_orthogonal=True,
reciprocal_space=True,
)
return interp_data, interp_mask, (qx, qz, qy), frames_logical
def load_bcdi_data(
scan_number,
detector,
setup,
bin_during_loading=False,
flatfield=None,
hotpixels=None,
background=None,
normalize="skip",
debugging=False,
**kwargs,
):
"""
Load Bragg CDI data, apply optional threshold, normalization and binning.
:param scan_number: the scan number to load
:param detector: an instance of the class Detector
:param setup: an instance of the class Setup
:param bin_during_loading: True to bin the data during loading (faster)
:param flatfield: the 2D flatfield array
:param hotpixels: the 2D hotpixels array. 1 for a hotpixel, 0 for normal pixels.
:param background: the 2D background array to subtract to the data
:param normalize: 'monitor' to return the default monitor values, 'sum_roi' to
return a monitor based on the integrated intensity in the region of interest
defined by detector.sum_roi, 'skip' to do nothing
:param debugging: set to True to see plots
:param kwargs:
- 'photon_threshold': float, photon threshold to apply before binning
- 'frames_pattern': 1D array of int, of length data.shape[0]. If
frames_pattern is 0 at index, the frame at data[index] will be skipped,
if 1 the frame will added to the stack.
:return:
- the 3D data and mask arrays
- frames_logical: array of initial length the number of measured frames.
In case of padding the length changes. A frame whose index is set to 1 means
that it is used, 0 means not used, -1 means padded (added) frame.
- the monitor values used for the intensity normalization
"""
# check and load kwargs
valid.valid_kwargs(
kwargs=kwargs,
allowed_kwargs={"photon_threshold", "frames_pattern"},
name="kwargs",
)
photon_threshold = kwargs.get("photon_threshold", 0)
valid.valid_item(
photon_threshold,
allowed_types=Real,
min_included=0,
name="photon_threshold",
)
frames_pattern = kwargs.get("frames_pattern")
valid.valid_1d_array(
frames_pattern, allow_none=True, allowed_values={0, 1}, name="frames_pattern"
)
rawdata, rawmask, monitor, frames_logical = setup.diffractometer.load_check_dataset(
scan_number=scan_number,
detector=detector,
setup=setup,
frames_pattern=frames_pattern,
bin_during_loading=bin_during_loading,
flatfield=flatfield,
hotpixels=hotpixels,
background=background,
normalize=normalize,
debugging=debugging,
)
#####################################################
# apply an optional photon threshold before binning #
#####################################################
if photon_threshold != 0:
rawmask[rawdata < photon_threshold] = 1
rawdata[rawdata < photon_threshold] = 0
print("Applying photon threshold before binning: < ", photon_threshold)
####################################################################################
# bin data and mask in the detector plane if not already done during loading #
# binning in the stacking dimension is done at the very end of the data processing #
####################################################################################
if not bin_during_loading and (
(detector.binning[1] != 1) or (detector.binning[2] != 1)
):
print(
"Binning the data: detector vertical axis by",
detector.binning[1],
", detector horizontal axis by",
detector.binning[2],
)
rawdata = util.bin_data(
rawdata, (1, detector.binning[1], detector.binning[2]), debugging=False
)
rawmask = util.bin_data(
rawmask, (1, detector.binning[1], detector.binning[2]), debugging=False
)
rawmask[np.nonzero(rawmask)] = 1
################################################
# pad the data to the shape defined by the ROI #
################################################
rawdata, rawmask = util.pad_from_roi(
arrays=(rawdata, rawmask),
roi=detector.roi,
binning=detector.binning[1:],
pad_value=(0, 1),
)
return rawdata, rawmask, frames_logical, monitor
def reload_bcdi_data(
data,
mask,
scan_number,
detector,
setup,
normalize=False,
debugging=False,
**kwargs,
):
"""
Reload BCDI data, apply optional threshold, normalization and binning.
:param data: the 3D data array
:param mask: the 3D mask array
:param scan_number: the scan number to load
:param detector: an instance of the class Detector
:param setup: an instance of the class Setup
:param normalize: set to True to normalize by the default monitor of the beamline
:param debugging: set to True to see plots
:parama kwargs:
- 'photon_threshold' = float, photon threshold to apply before binning
:return:
- the updated 3D data and mask arrays
- the monitor values used for the intensity normalization
"""
valid.valid_ndarray(arrays=(data, mask), ndim=3)
# check and load kwargs
valid.valid_kwargs(
kwargs=kwargs,
allowed_kwargs={"photon_threshold"},
name="kwargs",
)
photon_threshold = kwargs.get("photon_threshold", 0)
valid.valid_item(
photon_threshold,
allowed_types=Real,
min_included=0,
name="photon_threshold",
)
normalize_method = "monitor" if normalize else "skip"
nbz, nby, nbx = data.shape
frames_logical = np.ones(nbz)
print(
(data < 0).sum(), " negative data points masked"
) # can happen when subtracting a background
mask[data < 0] = 1
data[data < 0] = 0
# normalize by the incident X-ray beam intensity
if normalize_method == "skip":
print("Skip intensity normalization")
monitor = []
else: # use the default monitor of the beamline
monitor = setup.diffractometer.read_monitor(
scan_number=scan_number,
setup=setup,
)
print("Intensity normalization using " + normalize_method)
data, monitor = diff.normalize_dataset(
array=data,
monitor=monitor,
norm_to_min=True,
savedir=detector.savedir,
debugging=True,
)
# pad the data to the shape defined by the ROI
if (
detector.roi[1] - detector.roi[0] > nby
or detector.roi[3] - detector.roi[2] > nbx
):
start = (np.nan, min(0, detector.roi[0]), min(0, detector.roi[2]))
print("Paddind the data to the shape defined by the ROI")
data = util.crop_pad(
array=data,
pad_start=start,
output_shape=(
data.shape[0],
detector.roi[1] - detector.roi[0],
detector.roi[3] - detector.roi[2],
),
)
mask = util.crop_pad(
array=mask,
pad_value=1,
pad_start=start,
output_shape=(
mask.shape[0],
detector.roi[1] - detector.roi[0],
detector.roi[3] - detector.roi[2],
),
)
# apply optional photon threshold before binning
if photon_threshold != 0:
mask[data < photon_threshold] = 1
data[data < photon_threshold] = 0
print("Applying photon threshold before binning: < ", photon_threshold)
# bin data and mask in the detector plane if needed
# binning in the stacking dimension is done at the very end of the data processing
if (detector.binning[1] != 1) or (detector.binning[2] != 1):
print(
"Binning the data: detector vertical axis by",
detector.binning[1],
", detector horizontal axis by",
detector.binning[2],
)
data = util.bin_data(
data, (1, detector.binning[1], detector.binning[2]), debugging=debugging
)
mask = util.bin_data(
mask, (1, detector.binning[1], detector.binning[2]), debugging=debugging
)
mask[np.nonzero(mask)] = 1
return data, mask, frames_logical, monitor
def show_rocking_curve(
data,
roi_center,
integration_roi=None,
tilt_values=None,
savedir=None,
):
"""
Calculate the integrated intensity along a rocking curve and plot it.
The data is expected to be stacked, the first axis corresponding to the rocking
angle and axes 1 and 2 to the detector plane (vertical, horizontal).
:param data: the stacked rocking curve data
:param roi_center: the position of the center of the region of interest. Most often
this will be the position of the Bragg peak.
:param integration_roi: the region of interest where to integrate the intensity
:param tilt_values: the angular values along the rocking curve
:param savedir: path to the saving directory
:return: a dictionary containing the output metadata
"""
# check parameters
valid.valid_ndarray(data, ndim=3, name="data")
nb_frames = data.shape[0]
valid.valid_container(
roi_center,
container_types=(tuple, list, np.ndarray),
length=3,
item_types=Real,
name="roi_center",
)
valid.valid_container(
integration_roi,
container_types=(tuple, list, np.ndarray),
length=2,
item_types=int,
allow_none=True,
name="integration_roi",
)
if integration_roi is None:
integration_roi = (data.shape[1], data.shape[2])
elif integration_roi[0] > data.shape[1] or integration_roi[1] > data.shape[2]:
print(
"integration_roi larger than the frame size, using the full frame" "instead"
)
integration_roi = (data.shape[1], data.shape[2])
valid.valid_container(
tilt_values,
container_types=(tuple, list, np.ndarray),
length=nb_frames,
item_types=Real,
allow_none=True,
name="tilt_values",
)
if tilt_values is None:
tilt_values = np.arange(nb_frames)
x_label = "Frame number"
else:
x_label = "Rocking angle (deg)"
valid.valid_container(savedir, container_types=str, allow_none=True, name="savedir")
if savedir is not None:
pathlib.Path(savedir).mkdir(parents=True, exist_ok=True)
rocking_curve = data[
:,
np.clip(roi_center[1] - integration_roi[0] // 2, 0, data.shape[1]) : np.clip(
roi_center[1] + integration_roi[0] // 2, 0, data.shape[1]
),
np.clip(roi_center[2] - integration_roi[1] // 2, 0, data.shape[2]) : np.clip(
roi_center[2] + integration_roi[1] // 2, 0, data.shape[2]
),
].sum(axis=(1, 2))
interpolation = interp1d(tilt_values, rocking_curve, kind="cubic")
interp_points = 5 * nb_frames
interp_tilt = np.linspace(tilt_values.min(), tilt_values.max(), interp_points)
interp_curve = interpolation(interp_tilt)
interp_fwhm = (
len(np.argwhere(interp_curve >= interp_curve.max() / 2))
* (tilt_values.max() - tilt_values.min())
/ (interp_points - 1)
)
print("FWHM by interpolation", str("{:.3f}".format(interp_fwhm)), "deg")
plt.ion()
fig, (ax0, ax1) = plt.subplots(2, 1, sharex="col", figsize=(10, 5))
ax0.plot(tilt_values, rocking_curve, ".")
ax0.plot(interp_tilt, interp_curve)
ax0.axvline(tilt_values[roi_center[0]], color="r", alpha=0.7, linewidth=1)
ax0.set_ylabel("Integrated intensity")
ax0.legend(("data", "interpolation"))
ax0.set_title(f"Rocking curve in a {integration_roi[0]}x{integration_roi[1]} roi")
ax1.plot(tilt_values, np.log10(rocking_curve), ".")
ax1.plot(interp_tilt, np.log10(interp_curve))
ax1.axvline(tilt_values[roi_center[0]], color="r", alpha=0.7, linewidth=1)
ax1.set_xlabel(x_label)
ax1.set_ylabel("Log(integrated intensity)")
ax0.legend(("data", "interpolation"))
plt.pause(0.1)
fig.savefig(savedir + "rocking_curve.png")
plt.close(fig)
fig, _ = plt.subplots(1, 1, figsize=(10, 5))
plt.imshow(np.log10(abs(data[roi_center[0], :, :])), vmin=0, vmax=5)
plt.scatter(
roi_center[2], roi_center[1], color="r", marker="1", alpha=0.7, linewidth=1
)
plt.title(f"Slice at frame {roi_center[0]}")
plt.colorbar()
plt.pause(0.1)
fig.savefig(savedir + "central_slice.png")
plt.close(fig)
plt.ioff()
metadata = {
"tilt_values": tilt_values,
"rocking_curve": rocking_curve,
"interp_tilt_values": interp_tilt,
"interp_rocking_curve": interp_curve,
"interp_fwhm": interp_fwhm,
"COM_rocking_curve": tilt_values[roi_center[0]],
"detector_data_COM": data[roi_center[0], :, :],
}
return metadata
def zero_pad(array, padding_width=np.zeros(6), mask_flag=False, debugging=False):
"""
Pad obj with zeros.
:param array: 3D array to be padded
:param padding_width: number of zero pixels to padd on each side
:param mask_flag: set to True to pad with 1, False to pad with 0
:type mask_flag: bool
:param debugging: set to True to see plots
:type debugging: bool
:return: obj padded with zeros
"""
valid.valid_ndarray(arrays=array, ndim=3)
nbz, nby, nbx = array.shape
if debugging:
gu.multislices_plot(
array=array,
sum_frames=False,
plot_colorbar=True,
vmin=0,
vmax=1,
title="Array before padding",
)
if mask_flag:
newobj = np.ones(
(
nbz + padding_width[0] + padding_width[1],
nby + padding_width[2] + padding_width[3],
nbx + padding_width[4] + padding_width[5],
)
)
else:
newobj = np.zeros(
(
nbz + padding_width[0] + padding_width[1],
nby + padding_width[2] + padding_width[3],
nbx + padding_width[4] + padding_width[5],
)
)
newobj[
padding_width[0] : padding_width[0] + nbz,
padding_width[2] : padding_width[2] + nby,
padding_width[4] : padding_width[4] + nbx,
] = array
if debugging:
gu.multislices_plot(
array=newobj,
sum_frames=False,
plot_colorbar=True,
vmin=0,
vmax=1,
title="Array after padding",
)
return newobj
| 35.803668
| 88
| 0.57399
|
bb061209ecd5aa421f83c5ddd950a05989e1c880
| 208
|
py
|
Python
|
trigger_webapp/trigger_app/apps.py
|
ADACS-Australia/mwa_trigger
|
335752efda6251d61f40179fed9a19cf4e58ba85
|
[
"MIT"
] | null | null | null |
trigger_webapp/trigger_app/apps.py
|
ADACS-Australia/mwa_trigger
|
335752efda6251d61f40179fed9a19cf4e58ba85
|
[
"MIT"
] | 9
|
2022-01-31T05:56:48.000Z
|
2022-03-16T08:03:59.000Z
|
trigger_webapp/trigger_app/apps.py
|
ADACS-Australia/mwa_trigger
|
335752efda6251d61f40179fed9a19cf4e58ba85
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class TriggerAppConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'trigger_app'
def ready(self):
import trigger_app.signals
| 26
| 56
| 0.740385
|
ac66a0d691935af40cd71aa4b9f0d213457f9571
| 6,822
|
py
|
Python
|
pydm/data_plugins/__init__.py
|
ronpandolfi/pydm
|
0a4ed7626bfd32d8d0c7b04be1359d89ee00f1fe
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
pydm/data_plugins/__init__.py
|
ronpandolfi/pydm
|
0a4ed7626bfd32d8d0c7b04be1359d89ee00f1fe
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
pydm/data_plugins/__init__.py
|
ronpandolfi/pydm
|
0a4ed7626bfd32d8d0c7b04be1359d89ee00f1fe
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
"""
Loads all the data plugins available at the given PYDM_DATA_PLUGINS_PATH
environment variable and subfolders that follows the *_plugin.py and have
classes that inherits from the pydm.data_plugins.PyDMPlugin class.
"""
import os
import sys
import inspect
import logging
import imp
import uuid
from collections import deque
from contextlib import contextmanager
from qtpy.QtWidgets import QApplication
from .plugin import PyDMPlugin
from ..utilities import protocol_and_address
from .. import config
logger = logging.getLogger(__name__)
plugin_modules = {}
__read_only = False
global __CONNECTION_QUEUE__
__CONNECTION_QUEUE__ = None
global __DEFER_CONNECTIONS__
__DEFER_CONNECTIONS__ = False
@contextmanager
def connection_queue(defer_connections=False):
global __CONNECTION_QUEUE__
global __DEFER_CONNECTIONS__
if __CONNECTION_QUEUE__ is None:
__CONNECTION_QUEUE__ = deque()
__DEFER_CONNECTIONS__ = defer_connections
yield
if __DEFER_CONNECTIONS__:
return
establish_queued_connections()
def establish_queued_connections():
global __DEFER_CONNECTIONS__
global __CONNECTION_QUEUE__
if __CONNECTION_QUEUE__ is None:
return
try:
while (__CONNECTION_QUEUE__ is not None and
len(__CONNECTION_QUEUE__) > 0):
channel = __CONNECTION_QUEUE__.popleft()
establish_connection_immediately(channel)
QApplication.instance().processEvents()
except IndexError:
pass
finally:
__CONNECTION_QUEUE__ = None
__DEFER_CONNECTIONS__ = False
def establish_connection(channel):
global __CONNECTION_QUEUE__
if __CONNECTION_QUEUE__ is not None:
__CONNECTION_QUEUE__.append(channel)
else:
establish_connection_immediately(channel)
def establish_connection_immediately(channel):
plugin = plugin_for_address(channel.address)
plugin.add_connection(channel)
def plugin_for_address(address):
"""
Find the correct PyDMPlugin for a channel
"""
# Check for a configured protocol
protocol, addr = protocol_and_address(address)
# Use default protocol
if protocol is None and config.DEFAULT_PROTOCOL is not None:
logger.debug("Using default protocol %s for %s",
config.DEFAULT_PROTOCOL, address)
# If no protocol was specified, and the default protocol
# environment variable is specified, try to use that instead.
protocol = config.DEFAULT_PROTOCOL
# Load proper plugin module
if protocol:
try:
return plugin_modules[str(protocol)]
except KeyError as exc:
logger.exception("Could not find protocol for %r", address)
# Catch all in case of improper plugin specification
logger.error("Channel {addr} did not specify a valid protocol "
"and no default protocol is defined. This channel "
"will receive no data. To specify a default protocol, "
"set the PYDM_DEFAULT_PROTOCOL environment variable."
"".format(addr=address))
return None
def add_plugin(plugin):
"""
Add a PyDM plugin to the global registry of protocol vs. plugins
Parameters
----------
plugin: PyDMPlugin
The class of plugin to instantiate
"""
# Warn users if we are overwriting a protocol which already has a plugin
if plugin.protocol in plugin_modules:
logger.warning("Replacing %s plugin with %s for use with protocol %s",
plugin, plugin_modules[plugin.protocol],
plugin.protocol)
plugin_modules[plugin.protocol] = plugin()
def load_plugins_from_path(locations, token):
"""
Load plugins from file locations that match a specific token
Parameters
----------
locations: list
List of file locations
token : str
Phrase that must match the end of the filename for it to be checked for
PyDMPlugins
Returns
-------
plugins: dict
Dictionary of plugins add from this folder
"""
added_plugins = dict()
for loc in locations:
for root, _, files in os.walk(loc):
if root.split(os.path.sep)[-1].startswith("__"):
continue
logger.debug("Looking for PyDM Data Plugins at: %s", root)
for name in files:
if name.endswith(token):
try:
logger.debug("Trying to load %s...", name)
sys.path.append(root)
temp_name = str(uuid.uuid4())
module = imp.load_source(temp_name,
os.path.join(root, name))
except Exception as e:
logger.exception("Unable to import plugin file %s."
"This plugin will be skipped."
"The exception raised was: %s",
name, e)
continue
classes = [obj for name, obj in inspect.getmembers(module)
if (inspect.isclass(obj)
and issubclass(obj, PyDMPlugin)
and obj is not PyDMPlugin)]
# De-duplicate classes.
classes = list(set(classes))
for plugin in classes:
if plugin.protocol is not None:
# Add to global plugin list
add_plugin(plugin)
# Add to return dictionary of added plugins
added_plugins[plugin.protocol] = plugin
return added_plugins
def is_read_only():
"""
Check whether or not the app is running with the read only flag set.
Returns
-------
bool
True if read only. False otherwise.
"""
return __read_only
def set_read_only(read_only):
"""
Set the read only flag for the data plugins.
Parameters
----------
read_only : bool
"""
global __read_only
__read_only = read_only
if read_only:
logger.info("Running PyDM in Read Only mode.")
# Load the data plugins from PYDM_DATA_PLUGINS_PATH
logger.debug("*"*80)
logger.debug("* Loading PyDM Data Plugins")
logger.debug("*"*80)
DATA_PLUGIN_TOKEN = "_plugin.py"
path = os.getenv("PYDM_DATA_PLUGINS_PATH", None)
if path is None:
locations = []
else:
locations = path.split(os.pathsep)
# Ensure that we first visit the local data_plugins location
plugin_dir = os.path.dirname(os.path.realpath(__file__))
locations.insert(0, plugin_dir)
load_plugins_from_path(locations, DATA_PLUGIN_TOKEN)
| 32.179245
| 79
| 0.624304
|
3e011cc6f5f243b28e4a6f52e471d18d85b15f95
| 1,013
|
py
|
Python
|
good_spot/filter/migrations/0021_auto_20180329_1459.py
|
jasmine92122/NightClubBackend
|
7f59129b78baaba0e0c25de2b493033b858f1b00
|
[
"MIT"
] | null | null | null |
good_spot/filter/migrations/0021_auto_20180329_1459.py
|
jasmine92122/NightClubBackend
|
7f59129b78baaba0e0c25de2b493033b858f1b00
|
[
"MIT"
] | 5
|
2020-02-12T03:13:11.000Z
|
2022-01-13T01:41:14.000Z
|
good_spot/filter/migrations/0021_auto_20180329_1459.py
|
jasmine92122/NightClubBackend
|
7f59129b78baaba0e0c25de2b493033b858f1b00
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-03-29 14:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('filter', '0020_auto_20180329_1446'),
]
operations = [
migrations.AddField(
model_name='placetextfilterfield',
name='value_en',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='placetextfilterfield',
name='value_fr',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='placetextfilterfield',
name='value_ru',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='placetextfilterfield',
name='value_uk',
field=models.CharField(max_length=255, null=True),
),
]
| 28.138889
| 62
| 0.595262
|
d6f00bd6b91641b097b243d150a96c81159c0a2a
| 1,012
|
py
|
Python
|
mockthink/test/unit/test_rtime.py
|
scivey/mockthink
|
d8c3ec9228cfa4db4d8a108f60f1b3824e05ef28
|
[
"MIT"
] | 6
|
2016-02-25T03:51:27.000Z
|
2019-10-26T23:20:25.000Z
|
mockthink/test/unit/test_rtime.py
|
alexa-infra/mockthink
|
d8c3ec9228cfa4db4d8a108f60f1b3824e05ef28
|
[
"MIT"
] | 3
|
2016-04-13T18:34:23.000Z
|
2021-01-16T13:35:49.000Z
|
mockthink/test/unit/test_rtime.py
|
deadscivey/mockthink
|
d8c3ec9228cfa4db4d8a108f60f1b3824e05ef28
|
[
"MIT"
] | 7
|
2016-04-12T20:44:32.000Z
|
2021-01-15T07:53:03.000Z
|
import unittest
import datetime
import rethinkdb
from mockthink.test.common import assertEqual
from ... import rtime
class TestRTime(unittest.TestCase):
def test_to_date(self):
timezone = rethinkdb.make_timezone('00:00')
dt = datetime.datetime(2014, 6, 3, 12, 5, 36, tzinfo=timezone)
as_date = rtime.to_date(dt)
assertEqual(2014, as_date.year)
assertEqual(6, as_date.month)
assertEqual(3, as_date.day)
assertEqual(0, as_date.hour)
assertEqual(0, as_date.minute)
assertEqual(0, as_date.second)
assertEqual(timezone, as_date.tzinfo)
def test_time_of_day_seconds(self):
dt = datetime.datetime(2014, 1, 1, 2, 10, 30)
assertEqual(7830, rtime.time_of_day_seconds(dt))
def test_make_time(self):
dt = rtime.make_time(2014, 3, 2)
assertEqual(2014, dt.year)
assertEqual(3, dt.month)
assertEqual(2, dt.day)
self.assertTrue(isinstance(dt.tzinfo, rethinkdb.ast.RqlTzinfo))
| 32.645161
| 71
| 0.667984
|
4fd874799ecdde1145b0c4ca103892a483c1a06c
| 7,894
|
py
|
Python
|
AhrsManager.py
|
UM-LoCoLab/NeuroLocoMiddleware
|
0dfedeed8d6d8a41518b357b33ee92324b5029c3
|
[
"MIT"
] | null | null | null |
AhrsManager.py
|
UM-LoCoLab/NeuroLocoMiddleware
|
0dfedeed8d6d8a41518b357b33ee92324b5029c3
|
[
"MIT"
] | null | null | null |
AhrsManager.py
|
UM-LoCoLab/NeuroLocoMiddleware
|
0dfedeed8d6d8a41518b357b33ee92324b5029c3
|
[
"MIT"
] | null | null | null |
from SoftRealtimeLoop import SoftRealtimeLoop
import time
import csv
import sys, time
import numpy as np
from os.path import realpath
sys.path.append(r'/usr/share/python3-mscl/') # Path of the MSCL)
import traceback
import mscl
class AhrsManager():
def __init__(self, csv_file_name=None, dt=0.01, port="/dev/ttyACM0"):
self.port = realpath(port) # dereference symlinks
self.save_csv = not (csv_file_name is None)
self.csv_file_name = csv_file_name
self.csv_file = None
self.csv_writer = None
self.prevTime = 0.0
self.R = np.eye(3)
self.init_R = None
self.R_prime = None
self.dt = dt
self.xd = np.zeros((3,1))
self.x = np.zeros((3,1))
self.xd_forget = 0.0
self.x_forget = 0.0
self.acc_bias = np.zeros((3,1))
self.lp_xdd = 0.0
def __enter__(self):
if self.save_csv:
with open(self.csv_file_name,'w') as fd:
writer = csv.writer(fd)
writer.writerow(["pi_time",
"r00", "r01", "r02",
"r10", "r11", "r12",
"r20", "r21", "r22"])
self.csv_file = open(self.csv_file_name,'a').__enter__()
self.csv_writer = csv.writer(self.csv_file)
self.connection = mscl.Connection.Serial(self.port, 921600)
self.node = mscl.InertialNode(self.connection)
# self.node.setToIdle()
# self.deltaTime = 0
# self.sampleRate = mscl.SampleRate(1,500)
#Resume node for streaming
# self.node.resume()
#if the node supports AHRS/IMU
if self.node.features().supportsCategory(mscl.MipTypes.CLASS_AHRS_IMU):
self.node.enableDataStream(mscl.MipTypes.CLASS_AHRS_IMU)
#if the self.node supports Estimation Filter
if self.node.features().supportsCategory(mscl.MipTypes.CLASS_ESTFILTER):
self.node.enableDataStream(mscl.MipTypes.CLASS_ESTFILTER)
#if the self.node supports GNSS
if self.node.features().supportsCategory(mscl.MipTypes.CLASS_GNSS):
self.node.enableDataStream(mscl.MipTypes.CLASS_GNSS)
# Clean the internal circular buffer. Select timeout to be 500ms
# self.packets = self.node.getDataPackets(0)
packets = self.node.getDataPackets(0)
return self
def __exit__(self, etype, value, tb):
""" Closes the file properly """
if self.save_csv:
self.csv_file.__exit__(etype, value, tb)
self.node.setToIdle()
if not (etype is None):
traceback.print_exception(etype, value, tb)
def get_sagittal_angle(self):
# this is the X-Y plane angle on the device, relative to initial position.
return 180/np.pi*np.arctan2(self.R_prime[1,0],self.R_prime[0,0])
return "%.2f degrees"%(180/np.pi*np.arctan2(self.R_prime[1,0],self.R_prime[0,0]))
return "\n%.2f, %.2f, %.2f\n%.2f, %.2f, %.2f\n%.2f, %.2f, %.2f\n\n"%(
self.R_prime[0,0], self.R_prime[0,1], self.R_prime[0,2],
self.R_prime[1,0], self.R_prime[1,1], self.R_prime[1,2],
self.R_prime[2,0], self.R_prime[2,1], self.R_prime[2,2],
)
def update(self):
t0=time.time()
microstrainData = self.readIMUnode(timeout=0)# 0ms
# print([microstrainDatum.keys() for microstrainDatum in microstrainData ])
for datum in microstrainData:
if 'orientMatrix' in datum.keys():
self.R = datum['orientMatrix']
if self.init_R is None:
self.init_R = np.array(self.R)
self.R_prime = self.R@self.init_R.T
if 'deltaVelX' in datum.keys():
self.xdd = self.R_prime.T@np.array([[datum['deltaVelX'], datum['deltaVelY'], datum['deltaVelZ']]]).T*9.81/self.dt
self.lp_xdd += 0.4*(self.xdd-self.lp_xdd)
self.xd += (self.xdd - self.xd_forget * self.xd + self.acc_bias) * self.dt
if np.linalg.norm(self.xdd - self.lp_xdd)<1e-1:
self.xd*=0
self.acc_bias = -self.lp_xdd
self.x += (self.xd - self.x_forget * self.x)* self.dt
# self.R = self.readIMUnode()['orientMatrix']
# self.R= np.eye(3)
dur = time.time()-t0
if self.save_csv:
self.csv_writer.writerow([time.time()
, self.R[0,0], self.R[0,1], self.R[0,2]
, self.R[1,0], self.R[1,1], self.R[1,2]
, self.R[2,0], self.R[2,1], self.R[2,2]
])
#print(self.R[0,0], self.R[1,1], self.R[2,2])
return 1
def start_cal(self):
self.t0=time.time()
self.xd = np.zeros((3,1))
self.x = np.zeros((3,1))
print('start cal')
def stop_cal(self):
cal_time = time.time()-self.t0
self.acc_bias = -self.xd/cal_time
self.xd = np.zeros((3,1))
self.x = np.zeros((3,1))
self.xd_forget = .01
self.x_forget = .01
print('stop cal', self.acc_bias.T)
def readIMUnode(self, timeout = 0):
packets = self.node.getDataPackets(timeout)
microstrainData = []
# print("found", len(packets), "packets")
for packet in packets:
microstrainDatum = dict()
for dataPoint in packet.data():
# print(dataPoint.channelName())
if dataPoint.storedAs() == 0:
microstrainDatum[dataPoint.channelName()] = dataPoint.as_float()
elif dataPoint.storedAs() == 3:
# print(dir(dataPoint))
# ts = dataPoint.as_Timestamp()
microstrainDatum[dataPoint.channelName()] = None
elif dataPoint.storedAs() == 1:
# print(dir(dataPoint))
ts = dataPoint.as_double()
microstrainDatum[dataPoint.channelName()] = ts
elif dataPoint.storedAs() == 9:
mat = dataPoint.as_Matrix()
npmat = np.array([[mat.as_floatAt(i,j) for j in range(3)] for i in range(3)])
microstrainDatum[dataPoint.channelName()] = npmat
else:
print("no solution for datapoint stored as", dataPoint.storedAs(), dataPoint.channelName())
microstrainDatum[dataPoint.channelName()] = None
microstrainData.append(microstrainDatum)
return microstrainData
def get_data(self):
init_time = time.perf_counter_ns()
#get the data in first packet from the node, with a timeout of 500 milliseconds
data = self.readIMUnode(timeout = 500)
delta_time = (time.perf_counter_ns() - init_time)*1e-6
self.grav_x = data['grav_x'][0]/9.81
self.grav_y = data['grav_y'][0]/9.81
self.grav_z = data['grav_z'][0]/9.81
return data, delta_time
def get_euler_angles(self):
self.roll_usedef = -np.arccos(np.dot( np.array([0,1,0]) , np.array([self.grav_x,self.grav_y,self.grav_z]) )) + np.pi/2
self.pitch_usedef = np.arctan2(self.grav_y, self.grav_x)
eulerAngles = (self.roll_usedef,self.pitch_usedef,self.yaw_usedef)
# eulerAngles = extractEulerAngles(R_update)
return eulerAngles
def main():
with AhrsManager(csv_file_name="test_ahrs.csv", port="/dev/ttyAhrsB") as am:
cal=False
for i,t in enumerate(SoftRealtimeLoop(dt=1.0/200, report=True)):
am.update()
# if t<.01:
# am.start_cal()
# if t>1.5 and not cal:
# cal=True
# am.stop_cal()
if i%20==0: print(am.x)
# print(am.get_sagittal_angle())
if __name__ == '__main__':
main()
| 37.770335
| 129
| 0.563973
|
5b7e364586e9f17917fc724464967c711cdd7fc6
| 10,288
|
py
|
Python
|
geoist/magmod/tests/dipole_coords.py
|
CHEN-Zhaohui/geoist
|
06a00db3e0ed3d92abf3e45b7b3bfbef6a858a5b
|
[
"MIT"
] | null | null | null |
geoist/magmod/tests/dipole_coords.py
|
CHEN-Zhaohui/geoist
|
06a00db3e0ed3d92abf3e45b7b3bfbef6a858a5b
|
[
"MIT"
] | null | null | null |
geoist/magmod/tests/dipole_coords.py
|
CHEN-Zhaohui/geoist
|
06a00db3e0ed3d92abf3e45b7b3bfbef6a858a5b
|
[
"MIT"
] | null | null | null |
#-------------------------------------------------------------------------------
#
# Magnetic Dipole Coordinates - tests
#
# Author: Steve Shi Chen <chenshi80@gmail.com>
#
# Original Author: Martin Paces <martin.paces@eox.at>
#-------------------------------------------------------------------------------
# Copyright (C) 2019 Geoist team
#
#-------------------------------------------------------------------------------
# pylint: disable=missing-docstring,invalid-name,no-name-in-module
from unittest import TestCase, main
from itertools import product
from math import pi
from numpy import array, asarray, zeros, linspace, meshgrid, sin, cos, dot
from numpy.random import random
from numpy.testing import assert_allclose
from magmod._pymm import (
convert, vrot_sph2cart, vrot_cart2sph,
GEOCENTRIC_SPHERICAL, GEOCENTRIC_CARTESIAN, GEODETIC_ABOVE_WGS84,
)
from magmod.dipole_coords import (
get_dipole_rotation_matrix,
convert_to_dipole,
vrot_from_dipole,
)
DEG2RAD = pi/180.0
class TestDipoleRotationMatrix(TestCase):
@staticmethod
def reference_rotation_matrix(latitude, longitude):
sin_lat, cos_lat = sin(DEG2RAD*latitude), cos(DEG2RAD*latitude)
sin_lon, cos_lon = sin(DEG2RAD*longitude), cos(DEG2RAD*longitude)
matrix = dot(
# rotate around azimuth axis by -longitude
array([
[cos_lon, -sin_lon, 0],
[sin_lon, cos_lon, 0],
[0, 0, 1],
]),
# rotate around elevation axis by 90dg - latitude
array([
[sin_lat, 0, cos_lat],
[0, 1, 0],
[-cos_lat, 0, sin_lat],
])
)
return matrix
@staticmethod
def eval_rotation_matrix(latitude, longitude):
return get_dipole_rotation_matrix(latitude, longitude)
def test_rotation_matrix(self):
coords = [
(lat, lon) for lat, lon
in product(range(-90, 91, 5), range(-180, 181, 10))
]
for lat, lon in coords:
matrix = self.eval_rotation_matrix(lat, lon)
assert_allclose(
matrix,
self.reference_rotation_matrix(lat, lon),
atol=1e-14
)
assert_allclose(
dot(matrix.transpose(), matrix),
[(1, 0, 0), (0, 1, 0), (0, 0, 1)],
atol=1e-14
)
class TestConvertToDipoleCoordinates(TestCase):
@staticmethod
def reference_convert_to_dipole(coords, latitude, longitude):
rotation_matrix = get_dipole_rotation_matrix(latitude, longitude)
coords = convert(coords, GEOCENTRIC_SPHERICAL, GEOCENTRIC_CARTESIAN)
coords = dot(coords, rotation_matrix)
return coords
@staticmethod
def eval_convert_to_dipole(coords, latitude, longitude):
# to avoid pole longitude ambiguity compare Cartesian coordinates
return convert(
convert_to_dipole(coords, latitude, longitude),
GEOCENTRIC_SPHERICAL, GEOCENTRIC_CARTESIAN
)
@property
def coordinates(self):
return array([
(lat, lon, 6371.2*(1.0 + random())) for lat, lon
in product(range(-90, 91, 5), range(-180, 181, 10))
])
def test_convert_to_dipole(self):
north_pole_coords = [
(lat, lon) for lat, lon
in product(range(-90, 91, 10), range(-180, 181, 20))
]
for lat, lon in north_pole_coords:
coords = self.coordinates
assert_allclose(
self.eval_convert_to_dipole(coords, lat, lon),
self.reference_convert_to_dipole(coords, lat, lon),
atol=1e-8
)
def test_convert_to_dipole_sanity_check(self):
assert_allclose(
self.eval_convert_to_dipole([
(80, -170, 1.0),
(-80, 10, 1.0),
(-10, -170, 1.0),
(10, 10, 1.0),
(0, -80, 1.0),
(0, 100, 1.0),
], 80, -170),
[
(0, 0, 1),
(0, 0, -1),
(1, 0, 0),
(-1, 0, 0),
(0, 1, 0),
(0, -1, 0),
],
atol=1e-12
)
class VRotFromDipoleMixIn(object):
target_coords_type = None
shape = (37, 37)
@property
def vectors(self):
return 2.0*random(self.shape + (3,)) - 1.0
@property
def coords(self):
coords = zeros(self.shape + (3,))
coords[..., 1], coords[..., 0] = meshgrid(
linspace(-180, 180, self.shape[1]),
linspace(-90, 90, self.shape[0])
)
coords[..., 2] = 6371.2
return coords
@classmethod
def reference_vrot_from_dipole(cls, vectors, coords, latitude, longitude):
coords = asarray(coords)
rotation_matrix = get_dipole_rotation_matrix(latitude, longitude)
coords_dipole = convert_to_dipole(coords, latitude, longitude)
lat_dipole = coords_dipole[..., 0]
lon_dipole = coords_dipole[..., 1]
vectors = vrot_sph2cart(vectors, lat_dipole, lon_dipole)
vectors = dot(vectors, rotation_matrix.transpose())
if cls.target_coords_type != GEOCENTRIC_CARTESIAN:
coords_out = convert(
coords, GEOCENTRIC_SPHERICAL, cls.target_coords_type
)
lat_out = coords_out[..., 0]
lon_out = coords_out[..., 1]
vectors = vrot_cart2sph(vectors, lat_out, lon_out)
return vectors
@classmethod
def eval_vrot_from_dipole(cls, vectors, coords, latitude, longitude):
coords = asarray(coords)
coords_dipole = convert_to_dipole(coords, latitude, longitude)
lat_dipole = coords_dipole[..., 0]
lon_dipole = coords_dipole[..., 1]
if cls.target_coords_type != GEOCENTRIC_CARTESIAN:
coords_out = convert(
coords, GEOCENTRIC_SPHERICAL, cls.target_coords_type
)
lat_out = coords_out[..., 0]
lon_out = coords_out[..., 1]
else:
lat_out, lon_out = None, None
return vrot_from_dipole(
vectors, latitude, longitude, lat_dipole, lon_dipole,
lat_out, lon_out, cls.target_coords_type
)
def test_vrot_dipole2spherical(self):
north_pole_coords = [
(lat, lon) for lat, lon
in product(range(-90, 91, 10), range(-180, 181, 20))
]
for lat, lon in north_pole_coords:
coords = self.coords
vects = self.vectors
assert_allclose(
self.eval_vrot_from_dipole(vects, coords, lat, lon),
self.reference_vrot_from_dipole(vects, coords, lat, lon),
atol=1e-12
)
class TestVRotDipoleToCartesian(TestCase, VRotFromDipoleMixIn):
target_coords_type = GEOCENTRIC_CARTESIAN
def test_vrot_dipole_to_cartesian_sanity_check(self):
lat_ngp, lon_ngp = 80, -170
vectors = array([
(1, 0, 0),
(0, 1, 0),
(0, 0, 1),
])
sin10, cos10 = sin(DEG2RAD*10), cos(DEG2RAD*10)
input_output_pairs = [
((-10, -170), (0, 0), [
(-sin10*cos10, -sin10**2, cos10),
(sin10, -cos10, 0),
(-cos10**2, -sin10*cos10, -sin10),
]),
((10, 10), (0, 180), [
(-sin10*cos10, -sin10**2, cos10),
(-sin10, cos10, 0),
(cos10**2, sin10*cos10, sin10),
]),
((0, -80), (0, 90), [
(-sin10*cos10, -sin10**2, cos10),
(cos10**2, sin10*cos10, sin10),
(sin10, -cos10, 0),
]),
((0, 100), (0, -90), [
(-sin10*cos10, -sin10**2, cos10),
(-cos10**2, -sin10*cos10, -sin10),
(-sin10, cos10, 0),
]),
((80, -170), (90, 0), [
(cos10**2, sin10*cos10, sin10),
(sin10, -cos10, 0),
(-sin10*cos10, -sin10**2, cos10),
]),
((-80, 10), (-90, 0), [
(-cos10**2, -sin10*cos10, -sin10),
(sin10, -cos10, 0),
(sin10*cos10, sin10**2, -cos10),
]),
]
for (lat_sph, lon_sph), (lat_dip, lon_dip), expected in input_output_pairs:
assert_allclose(
vrot_from_dipole(
vectors, lat_ngp, lon_ngp, lat_dip, lon_dip,
lat_sph, lon_sph, self.target_coords_type,
), expected, atol=1e-12
)
class VRotFromDipoleToSphericalMixIn(object):
target_coords_type = GEOCENTRIC_SPHERICAL
def test_vrot_dipole_to_spherical_sanity_check(self):
lat_ngp, lon_ngp = 80, -170
vectors = array([
(1, 0, 0),
(0, 1, 0),
(0, 0, 1),
])
sin10, cos10 = sin(DEG2RAD*10), cos(DEG2RAD*10)
input_output_pairs = [
((-10, -170), (0, 0), [(1, 0, 0), (0, 1, 0), (0, 0, 1),]),
((10, 10), (0, 180), [(1, 0, 0), (0, 1, 0), (0, 0, 1),]),
(
(0, -80), (0, 90),
[(cos10, -sin10, 0), (sin10, cos10, 0), (0, 0, 1)]
),
(
(0, 100), (0, -90),
[(cos10, sin10, 0), (-sin10, cos10, 0), (0, 0, 1)]
),
((80, -170), (90, 0), [(1, 0, 0), (0, 1, 0), (0, 0, 1),]),
((-80, 10), (-90, 0), [(-1, 0, 0), (0, -1, 0), (0, 0, 1),]),
]
for (lat_sph, lon_sph), (lat_dip, lon_dip), expected in input_output_pairs:
assert_allclose(
vrot_from_dipole(
vectors, lat_ngp, lon_ngp, lat_dip, lon_dip,
lat_sph, lon_sph, self.target_coords_type,
), expected, atol=1e-12
)
class TestVRotDipoleToSpherical(TestCase, VRotFromDipoleToSphericalMixIn):
target_coords_type = GEOCENTRIC_SPHERICAL
class TestVRotDipoleToWGS84(TestCase, VRotFromDipoleToSphericalMixIn):
target_coords_type = GEODETIC_ABOVE_WGS84
if __name__ == "__main__":
main()
| 33.511401
| 83
| 0.51701
|
d30d00c8af2158d9250b963be20cba1b9f6b6c35
| 1,712
|
py
|
Python
|
venv/lib/python3.6/site-packages/xero_python/accounting/models/request_empty.py
|
6enno/FarmXero
|
881b1e6648e927631b276e66a4c5287e4de2cbc1
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/xero_python/accounting/models/request_empty.py
|
6enno/FarmXero
|
881b1e6648e927631b276e66a4c5287e4de2cbc1
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/xero_python/accounting/models/request_empty.py
|
6enno/FarmXero
|
881b1e6648e927631b276e66a4c5287e4de2cbc1
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Accounting API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class RequestEmpty(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"status": "str"}
attribute_map = {"status": "Status"}
def __init__(self, status=None): # noqa: E501
"""RequestEmpty - a model defined in OpenAPI""" # noqa: E501
self._status = None
self.discriminator = None
if status is not None:
self.status = status
@property
def status(self):
"""Gets the status of this RequestEmpty. # noqa: E501
Need at least one field to create an empty JSON payload # noqa: E501
:return: The status of this RequestEmpty. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this RequestEmpty.
Need at least one field to create an empty JSON payload # noqa: E501
:param status: The status of this RequestEmpty. # noqa: E501
:type: str
"""
self._status = status
| 25.552239
| 124
| 0.613318
|
ea16f064ee70077956c59a501e7dae311dadb774
| 247,402
|
py
|
Python
|
nPYc/objects/_targetedDataset.py
|
ghaggart/nPYc-Toolbox
|
d0160b476581fbd695f3f5f0303048466ed95864
|
[
"MIT"
] | 14
|
2018-01-23T23:10:40.000Z
|
2022-02-03T15:15:52.000Z
|
nPYc/objects/_targetedDataset.py
|
ghaggart/nPYc-Toolbox
|
d0160b476581fbd695f3f5f0303048466ed95864
|
[
"MIT"
] | 76
|
2018-01-24T17:37:25.000Z
|
2022-03-23T14:12:54.000Z
|
nPYc/objects/_targetedDataset.py
|
ghaggart/nPYc-Toolbox
|
d0160b476581fbd695f3f5f0303048466ed95864
|
[
"MIT"
] | 11
|
2018-01-25T11:35:47.000Z
|
2022-03-07T15:04:02.000Z
|
"""
Module for the import and manipulation of quantified targeted MS data sets.
"""
import copy
import os
import re
from datetime import datetime
import numpy
import pandas
import collections
import warnings
from .._toolboxPath import toolboxPath
from ._dataset import Dataset
from ..utilities import normalisation, rsd
from ..enumerations import VariableType, AssayRole, SampleType, QuantificationType, CalibrationMethod, AnalyticalPlatform
class TargetedDataset(Dataset):
"""
TargetedDataset(dataPath, fileType='TargetLynx', sop='Generic', \*\*kwargs)
:py:class:`~TargetedDataset` extends :py:class:`Dataset` to represent quantitative datasets, where compounds are already identified, the exactitude of the quantification can be established, units are known and calibration curve or internal standards are employed.
The :py:class:`~TargetedDataset` class include methods to apply limits of quantification (LLOQ and ULOQ), merge multiple analytical batch, and report accuracy and precision of each measurements.
In addition to the structure of :py:class:`~Dataset`, :py:class:`~TargetedDataset` requires the following attributes:
* :py:attr:`~TargetedDataset.expectedConcentration`:
A :math:`n` × :math:`m` pandas dataframe of expected concentrations (matching the :py:attr:`~Dataset.intensityData` dimension), with column names matching :py:attr:`~TargetedDataset.featureMetadata[‘Feature Name’]`
* :py:attr:`~TargetedDataset.calibration`:
A dictionary containing pandas dataframe describing calibration samples:
* :py:attr:`~TargetedDataset.calibration['calibIntensityData']`:
A :math:`r` x :math:`m` numpy matrix of measurements. Features must match features in :py:attr:`~TargetedDataset.intensityData`
* :py:attr:`~TargetedDataset.calibration['calibSampleMetadata']`:
A :math:`r` x :math:`m` pandas dataframe of calibration sample identifiers and metadata
* :py:attr:`~TargetedDataset.calibration['calibFeatureMetadata']`:
A :math:`m` × :math:`q` pandas dataframe of feature identifiers and metadata
* :py:attr:`~TargetedDataset.calibration['calibExpectedConcentration']`:
A :math:`r` × :math:`m` pandas dataframe of calibration samples expected concentrations
* :py:attr:`~TargetedDataset.Attributes` must contain the following (can be loaded from a method specific JSON on import):
* ``methodName``:
A (str) name of the method
* ``externalID``:
A list of external ID, each external ID must also be present in *Attributes* as a list of identifier (for that external ID) for each feature. For example, if ``externalID=['PubChem ID']``, ``Attributes['PubChem ID']=['ID1','ID2','','ID75']``
* :py:attr:`~TargetedDataset.featureMetadata` expects the following columns:
* ``quantificationType``:
A :py:class:`~nPYc.enumerations.QuantificationType` enum specifying the exactitude of the quantification procedure employed.
* ``calibrationMethod``:
A :py:class:`~nPYc.enumerations.CalibrationMethod` enum specifying the calibration method employed.
* ``Unit``:
A (str) unit corresponding the the feature measurement value.
* ``LLOQ``:
The lowest limit of quantification, used to filter concentrations < LLOQ
* ``ULOQ``:
The upper limit of quantification, used to filter concentrations > ULOQ
* externalID:
All externalIDs listed in :py:attr:`~TargetedDataset.Attributes['externalID']` must be present as their own column
Currently targeted assay results processed using **TargetLynx** or **Bruker quantification results** can be imported.
To create an import for any other form of semi-quantitative or quantitative results, the procedure is as follow:
* Create a new ``fileType == 'myMethod'`` entry in :py:meth:`~TargetedDataset.__init__`
* Define functions to populate all expected dataframes (using file readers, JSON,...)
* Separate calibration samples from study samples (store in :py:attr:`~TargetedDataset.calibration`). *If none exist, intialise empty dataframes with the correct number of columns and column names.*
* Execute pre-processing steps if required (note: all feature values should be expressed in the unit listed in :py:attr:`~TargetedDataset.featureMetadata['Unit']`)
* Apply limits of quantification using :py:meth:`~TargetedDataset._applyLimitsOfQuantification`. (This function does not apply limits of quantification to features marked as :py:class:`~nPYc.enumerations.QuantificationType` == QuantificationType.Monitored for compounds monitored for relative information.)
The resulting :py:class:`~TargetedDatset` created must satisfy to the criteria for *BasicTargetedDataset*, which can be checked with :py:meth:`~TargetedDataset.validatedObject` (list the minimum requirements for all class methods).
* ``fileType == 'TargetLynx'`` to import data processed using **TargetLynx**
TargetLynx import operates on ``xml`` files exported *via* the 'File -> Export -> XML' TargetLynx menu option. Import requires a ``calibration_report.csv`` providing lower and upper limits of quantification (LLOQ, ULOQ) with the ``calibrationReportPath`` keyword argument.
Targeted data measurements as well as calibration report information are read and mapped with pre-defined SOPs. All measurments are converted to pre-defined units and measurements inferior to the lowest limits of quantification or superior to the upper limits of quantification are replaced. Once the import is finished, only analysed samples are returned (no calibration samples) and only features mapped onto the pre-defined SOP and sufficiently described.
Instructions to created new ``TargetLynx`` SOP can be found on the :doc:`generation of targeted SOPs <configuration/targetedSOPs>` page.
Example: ``TargetedDataset(datapath, fileType='TargetLynx', sop='OxylipinMS', calibrationReportPath=calibrationReportPath, sampleTypeToProcess=['Study Sample','QC'], noiseFilled=False, onlyLLOQ=False, responseReference=None)``
* ``sop``
Currently implemented are `'OxylipinMS'` and `'AminoAcidMS'`
`AminoAcidMS`: Gray N. `et al`. Human Plasma and Serum via Precolumn Derivatization with 6‑Aminoquinolyl‑N‑hydroxysuccinimidyl Carbamate: Application to Acetaminophen-Induced Liver Failure. `Analytical Chemistry`, 2017, 89, 2478−87.
`OxylipinMS`: Wolfer AM. `et al.` Development and Validation of a High-Throughput Ultrahigh-Performance Liquid Chromatography-Mass Spectrometry Approach for Screening of Oxylipins and Their Precursors. `Analytical Chemistry`, 2015, 87 (23),11721–31
* ``calibrationReportPath``
Path to the calibration report `csv` following the provided report template.
The following columns are required (leave an empty value to reject a compound):
* Compound
The compound name, identical to the one employed in the SOP `json` file.
* TargetLynx ID
The compound TargetLynx ID, identical to the one employed in the SOP `json` file.
* LLOQ
Lowest limit of quantification concentration, in the same unit as indicated in TargetLynx.
* ULOQ
Upper limit of quantification concentration, in the same unit as indicated in TargetLynx.
The following columns are expected by :py:meth:`~TargetedDataset._targetLynxApplyLimitsOfQuantificationNoiseFilled`:
* Noise (area)
Area integrated in a blank sample at the same retention time as the compound of interest (if left empty noise concentration calculation cannot take place).
* a
:math:`a` coefficient in the calibration equation (if left empty noise concentration calculation cannot take place).
* b
:math:`b` coefficient in the calibration equation (if left empty noise concentration calculation cannot take place).
The following columns are recommended but not expected:
* Cpd Info
Additional information relating to the compound (can be left empty).
* r
:math:`r` goodness of fit measure for the calibration equation (can be left empty).
* r2
:math:`r^2` goodness of fit measure for the calibration equation (can be left empty).
* ``sampleTypeToProcess``
List of *['Study Sample','Blank','QC','Other']* for the sample types to process as defined in MassLynx. Only samples in 'sampleTypeToProcess' are returned. Calibrants should not be processed and are not returned. Most uses should only require `'Study Sample'` as quality controls are identified based on sample names by subsequent functions. `Default value is '['Study Sample','QC']'`.
* ``noiseFilled``
If True values <LLOQ will be replaced by a concentration equivalent to the noise level in a blank. If False <LLOQ is replaced by :math:`-inf`. `Default value is 'False'`
* ``onlyLLOQ``
If True only correct <LLOQ, if False correct <LLOQ and >ULOQ. `Default value is 'False'`.
* ``responseReference``
If noiseFilled=True the noise concentration needs to be calculated. Provide the 'Sample File Name' of a reference sample to use in order to establish the response to use, or list of samples to use (one per feature). If None, the middle of the calibration will be employed. `Default value is 'None'`.
* ``keepPeakInfo``
If keepPeakInfo=True (default `False`) adds the :py:attr:`peakInfo` dictionary to the :py:class:`~TargetedDataset.calibration`. :py:attr:`peakInfo` contains the `peakResponse`, `peakArea`, `peakConcentrationDeviation`, `peakIntegrationFlag` and `peakRT`.
* ``keepExcluded``
If keepExcluded=True (default `False`), import exclusions (:py:attr:`excludedImportSampleMetadata`, :py:attr:`excludedImportFeatureMetadata`, :py:attr:`excludedImportIntensityData` and :py:attr:`excludedImportExpectedConcentration`) are kept in the object.
* ``keepIS``
If keepIS=True (default `False`), features marked as Internal Standards (IS) are retained.
* ``fileType = 'Bruker Quantification'`` to import Bruker quantification results
* ``nmrRawDataPath``
Path to the parent folder where all result files are stored. All subfolders will be parsed and the ``.xml`` results files matching the ``fileNamePattern`` imported.
* ``fileNamePattern``
Regex to recognise the result data xml files
* ``pdata``
To select the right pdata folders (default 1)
Two form of Bruker quantification results are supported and selected using the ``sop`` option: *BrukerQuant-UR* and *Bruker BI-LISA*
* ``sop = 'BrukerQuant-UR'``
Example: ``TargetedDataset(nmrRawDataPath, fileType='Bruker Quantification', sop='BrukerQuant-UR', fileNamePattern='.*?urine_quant_report_b\.xml$', unit='mmol/mol Crea')``
* ``unit``
If features are duplicated with different units, ``unit`` limits the import to features matching said unit. (In case of duplication and no ``unit``, all available units will be listed)
* ``sop = ''BrukerBI-LISA'``
Example: ``TargetedDataset(nmrRawDataPath, fileType='Bruker Quantification', sop='BrukerBI-LISA', fileNamePattern='.*?results\.xml$')``
"""
def __init__(self, datapath, fileType='TargetLynx', sop='Generic', **kwargs):
"""
Initialisation and pre-processing of input data (load files and match data and calibration and SOP, apply limits of quantification).
"""
super().__init__(sop=sop, **kwargs)
self.filePath, fileName = os.path.split(datapath)
self.fileName, fileExtension = os.path.splitext(fileName)
self.name = self.fileName
# Load files and match data, calibration report and SOP, then Apply the limits of quantification
if fileType == 'TargetLynx':
# Read files, filter calibration samples, filter IS, applyLLOQ, clean object
self._loadTargetLynxDataset(datapath, **kwargs)
# Finalise object
self.VariableType = VariableType.Discrete
self.AnalyticalPlatform = AnalyticalPlatform.MS
self.initialiseMasks()
elif fileType == 'Bruker Quantification':
# Read files, clean object
self._loadBrukerXMLDataset(datapath, **kwargs)
# Finalise object
self.VariableType = VariableType.Discrete
self.AnalyticalPlatform = AnalyticalPlatform.NMR
self.initialiseMasks()
elif fileType == 'empty':
# Build empty object for testing
pass
else:
raise NotImplementedError
# Check the final object is valid and log
if fileType != 'empty':
validDataset = self.validateObject(verbose=False, raiseError=False, raiseWarning=False)
if not validDataset['BasicTargetedDataset']:
raise ValueError('Import Error: The imported dataset does not satisfy to the Basic TargetedDataset definition')
self.Attributes['Log'].append([datetime.now(),
'%s instance initiated, with %d samples, %d features, from %s'
% (self.__class__.__name__, self.noSamples, self.noFeatures, datapath)])
# Check later
if 'Metadata Available' not in self.sampleMetadata:
self.sampleMetadata['Metadata Available'] = False
@property
def rsdSP(self):
"""
Returns percentage :term:`relative standard deviations<RSD>` for each feature in the dataset, calculated on samples with the Assay Role :py:attr:`~nPYc.enumerations.AssayRole.PrecisionReference` and Sample Type :py:attr:`~nPYc.enumerations.SampleType.StudyPool` in :py:attr:`~Dataset.sampleMetadata`.
Implemented as a back-up to :py:Meth:`accuracyPrecision` when no expected concentrations are known
:return: Vector of feature RSDs
:rtype: numpy.ndarray
"""
# Check we have Study Reference samples defined
if not ('AssayRole' in self.sampleMetadata.keys() or 'SampleType' in self.sampleMetadata.keys()):
raise ValueError('Assay Roles and Sample Types must be defined to calculate RSDs.')
if not sum(self.sampleMetadata['AssayRole'].values == AssayRole.PrecisionReference) > 1:
raise ValueError('More than one precision reference is required to calculate RSDs.')
mask = numpy.logical_and(self.sampleMetadata['AssayRole'].values == AssayRole.PrecisionReference,
self.sampleMetadata['SampleType'].values == SampleType.StudyPool)
return rsd(self._intensityData[mask & self.sampleMask])
@property
def rsdSS(self):
"""
Returns percentage :term:`relative standard deviations<RSD>` for each feature in the dataset, calculated on samples with the Assay Role :py:attr:`~nPYc.enumerations.AssayRole.Assay` and Sample Type :py:attr:`~nPYc.enumerations.SampleType.StudySample` in :py:attr:`~Dataset.sampleMetadata`.
:return: Vector of feature RSDs
:rtype: numpy.ndarray
"""
# Check we have Study Reference samples defined
if not ('AssayRole' in self.sampleMetadata.keys() or 'SampleType' in self.sampleMetadata.keys()):
raise ValueError('Assay Roles and Sample Types must be defined to calculate RSDs.')
if not sum(self.sampleMetadata['AssayRole'].values == AssayRole.Assay) > 1:
raise ValueError('More than one assay sample is required to calculate RSDs.')
mask = numpy.logical_and(self.sampleMetadata['AssayRole'].values == AssayRole.Assay,
self.sampleMetadata['SampleType'].values == SampleType.StudySample)
return rsd(self._intensityData[mask & self.sampleMask])
def _loadTargetLynxDataset(self, datapath, calibrationReportPath, keepIS=False, noiseFilled=False, keepPeakInfo=False, keepExcluded=False, **kwargs):
"""
Initialise object from peak-picked and calibrated TargetLynx data. Filter calibration samples, filter IS.
Targeted data measurements as well as calibration report information are read and mapped with pre-defined SOPs. All units are converted to pre-defined units and measurements inferior to the lowest limits of quantification or superior to the upper limits of quantification are replaced. Once the import is finished, only analysed samples are returned (no calibration samples) and only features mapped onto the pre-defined SOP and sufficiently described.
* TargetLynx
TargetLynx import operates on xml files exported *via* the 'File -> Export -> XML' menu option. Import requires a calibration_report.csv providing lower and upper limits of quantification (LLOQ, ULOQ) with the ``calibrationReportPath`` keyword argument.
Example: ``TargetedDataset(datapath, fileType='TargetLynx', sop='OxylipinMS', calibrationReportPath=calibrationReportPath, sampleTypeToProcess=['Study Sample','QC'], noiseFilled=False, onlyLLOQ=False, responseReference=None)``
* ``datapath``
Path to the TargetLynx exported `xml` file.
* ``calibrationReportPath``
Path to the calibration report `csv` following the provided report template (leave an empty value in the predefined columns to reject a compound).
* ``sampleTypeToProcess``
List of ['Study Sample','Blank','QC','Other'] for the sample types to process as defined in MassLynx. Only samples in 'sampleTypeToProcess' are returned. Calibrants should not be processed and are not returned. Most uses should only require `'Study Sample'` as quality controls are identified based on sample names by subsequent functions. `Default value is '['Study Sample','QC']'`.
* ``noiseFilled``
If True values <LLOQ will be replaced by a concentration equivalent to the noise level in a blank. If False <LLOQ is replaced by :math:`-inf`. `Default value is 'False'`
* ``onlyLLOQ``
If True only correct <LLOQ, if False correct <LLOQ and >ULOQ. `Default value is 'False'`.
* ``responseReference``
If noiseFilled=True the noise concentration needs to be calculated. Provide the 'Sample File Name' of a reference sample to use in order to establish the response to use, or list of samples to use (one per feature). If None, the middle of the calibration will be employed. `Default value is 'None'`.
* ``keepIS
If keepIS=True (default `False`), features marked as Internal Standards (IS) are retained.
* ``keepPeakInfo``
If keepPeakInfo=True (default `False`) adds the :py:attr:`peakInfo` dictionary to the :py:class:`TargetedDataset` and py:attr:`calibration`. :py:attr:`peakInfo` contains the `peakResponse`, `peakArea`, `peakConcentrationDeviation`, `peakIntegrationFlag` and `peakRT`.
* ``keepExcluded``
If keepExcluded=True (default `False`), import exclusions (:py:attr:`excludedImportSampleMetadata`, :py:attr:`excludedImportFeatureMetadata`, :py:attr:`excludedImportIntensityData` and :py:attr:`excludedImportExpectedConcentration`) are kept in the object.
:param datapath: Path to the TargetLynx exported xml file
:type datapath: str
:param calibrationReportPath: Path to the calibration report csv file
:type calibrationReportPath: str
:param keepIS: If keepIS=True (default `False`), features marked as Internal Standards (IS) are retained.
:type keepIS: bool
:param noiseFilled: If noiseFilled=True (default `False`), values <LLOQ are replaced by the noise concentration
:type noiseFilled: bool
:param peakInfo: If keepExcluded=True (default `False`), import exclusions (:py:attr:`excludedImportSampleMetadata`, :py:attr:`excludedImportFeatureMetadata`, :py:attr:`excludedImportIntensityData` and :py:attr:`excludedImportExpectedConcentration`) are kept in the object.
:type peakInfo: bool
:param keepExcluded: If keepExcluded=True (default `False`), import exclusions (:py:attr:`excludedImportSampleMetadata`, :py:attr:`excludedImportFeatureMetadata`, :py:attr:`excludedImportIntensityData` and :py:attr:`excludedImportExpectedConcentration`) are kept in the object.
:type keepExcluded: bool
:param kwargs: Additional parameters such as `sampleTypeToProcess`, `onlyLLOQ` or `reponseReference`
:return: None
"""
# Load TargetLynx output file
self._readTargetLynxDataset(datapath, calibrationReportPath, **kwargs)
# Filter calibration samples
self._filterTargetLynxSamples(**kwargs)
# Filter IS features (default remove them)
if keepIS:
print('IS features are kept for processing:', sum(self.featureMetadata['IS'].values), 'IS features,', sum(~self.featureMetadata['IS'].values), 'other features.')
print('-----')
self.Attributes['Log'].append([datetime.now(), 'IS features kept for processing (%d samples). %d IS, %d other features.' % (self.noSamples, sum(self.featureMetadata['IS'].values), sum(~self.featureMetadata['IS'].values))])
else:
self._filterTargetLynxIS(**kwargs)
# Apply limits of quantification
if noiseFilled:
self._targetLynxApplyLimitsOfQuantificationNoiseFilled(**kwargs)
else:
self._applyLimitsOfQuantification(**kwargs)
# Remove peakInfo (default remove)
if keepPeakInfo:
self.Attributes['Log'].append([datetime.now(), 'TargetLynx peakInfo kept.'])
else:
delattr(self, 'peakInfo')
del self.calibration['calibPeakInfo']
# Remove import exclusions as they are not useful after import
if keepExcluded:
self.Attributes['Log'].append([datetime.now(), 'Features and Samples excluded during import have been kept.'])
else:
delattr(self, 'sampleMetadataExcluded')
delattr(self, 'featureMetadataExcluded')
delattr(self, 'intensityDataExcluded')
delattr(self, 'expectedConcentrationExcluded')
delattr(self, 'excludedFlag')
# clear **kwargs that have been copied to Attributes
for i in list(kwargs.keys()):
try:
del self.Attributes[i]
except:
pass
for j in ['keepIS','noiseFilled','keepPeakInfo','keepExcluded']:
try:
del self.Attributes[j]
except:
pass
def _readTargetLynxDataset(self, datapath, calibrationReportPath, **kwargs):
"""
Parse a TargetLynx output file (`xml`; sample metadata, feature metadata, intensity, peak area and peak response) and the matching calibration report (`csv`; limits of quantification, noise area, calibration equation parameters), then check their agreement before returning a sufficiently described dataset.
Sets :py:attr:`sampleMetadata`, :py:attr:`featureMetadata`, :py:attr:`intensityData`, :py:attr:`expectedConcentration`, :py:attr:`excludedImportSampleMetadata`, :py:attr:`excludedImportFeatureMetadata`, :py:attr:`excludedImportIntensityData` and :py:attr:`peakInfo`
:param datapath: Path to the TargetLynx export xml file
:type datapath: str
:param calibrationReportPath: Path to the calibration report csv file
:type calibrationReportPath: str
:return: None
"""
# Read XML (dumb, no checks, no metadata alteration)
sampleMetadata, featureMetadata, intensityData, expectedConcentration, peakResponse, peakArea, peakConcentrationDeviation, peakIntegrationFlag, peakRT = self.__getDatasetFromXML(datapath)
# Read calibration information from .csv (dumb, no metadata alteration, only checks for required columns)
calibReport = self.__getCalibrationFromReport(calibrationReportPath)
# Match XML, Calibration Report & SOP
sampleMetadata, featureMetadata, intensityData, expectedConcentration, excludedImportSampleMetadata, excludedImportFeatureMetadata, excludedImportIntensityData, excludedImportExpectedConcentration, excludedImportFlag, peakResponse, peakArea, peakConcentrationDeviation, peakIntegrationFlag, peakRT = self.__matchDatasetToCalibrationReport(sampleMetadata, featureMetadata, intensityData, expectedConcentration, peakResponse, peakArea, peakConcentrationDeviation, peakIntegrationFlag, peakRT, calibReport)
self.sampleMetadata = sampleMetadata
self.featureMetadata = featureMetadata
self._intensityData = intensityData
self.expectedConcentration = expectedConcentration
self.sampleMetadataExcluded = excludedImportSampleMetadata
self.featureMetadataExcluded = excludedImportFeatureMetadata
self.intensityDataExcluded = excludedImportIntensityData
self.expectedConcentrationExcluded = excludedImportExpectedConcentration
self.excludedFlag = excludedImportFlag
self.peakInfo = {'peakResponse': peakResponse, 'peakArea': peakArea, 'peakConcentrationDeviation': peakConcentrationDeviation, 'peakIntegrationFlag': peakIntegrationFlag, 'peakRT': peakRT}
# add Dataset mandatory columns
self.sampleMetadata['AssayRole'] = numpy.nan
self.sampleMetadata['SampleType'] = numpy.nan
self.sampleMetadata['Dilution'] = numpy.nan
self.sampleMetadata['Correction Batch'] = numpy.nan
self.sampleMetadata['Sample ID'] = numpy.nan
self.sampleMetadata['Exclusion Details'] = numpy.nan
#self.sampleMetadata['Batch'] = numpy.nan #already created
# clear SOP parameters not needed after __matchDatasetToCalibrationReport
AttributesToRemove = ['compoundID', 'compoundName', 'IS', 'unitFinal', 'unitCorrectionFactor', 'calibrationMethod', 'calibrationEquation', 'quantificationType']
AttributesToRemove.extend(self.Attributes['externalID'])
for k in AttributesToRemove:
del self.Attributes[k]
self.Attributes['Log'].append([datetime.now(),'TargetLynx data file with %d samples, %d features, loaded from \%s, calibration report read from \%s\'' % (self.noSamples, self.noFeatures, datapath, calibrationReportPath)])
def __getDatasetFromXML(self, path):
"""
Parse information for :py:attr:`sampleMetadata`, :py:attr:`featureMetadata`, :py:attr:`intensityData`, :py:attr:`expectedConcentration`, :py:attr:`peakResponse`, :py:attr:`peakArea`, :py:attr:`peakConcentrationDeviation`, :py:attr:`peakIntegrationFlag` and :py:attr:`peakRT` from a xml export file produced by TargetLynx (using the 'File -> Export -> XML' menu option)
:param path: Path to the TargetLynx export xml file
:type path: str
:return sampleMetadata: dataframe of sample identifiers and metadata.
:rtype: pandas.DataFrame, :math:`n` × :math:`p`
:return featureMetadata: pandas dataframe of feature identifiers and metadata.
:rtype: pandas.DataFrame, :math:`m` × :math:`q`
:return intensityData: numpy matrix of intensity measurements.
:rtype: numpy.ndarray, :math:`n` × :math:`m`
:return expectedConcentration: pandas dataframe of expected concentration for each sample/feature
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:return peakResponse: pandas dataframe of analytical peak response.
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:return peakArea: pandas dataframe of analytical peak area.
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:return peakConcentrationDeviation: pandas dataframe of %deviation between expected and measured concentration for each sample/feature
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:return peakIntegrationFlag: pandas dataframe of integration flag
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:return peakRT: pandas dataframe of analytical peak Retention time.
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
"""
import xml.etree.ElementTree
inputData = xml.etree.ElementTree.ElementTree(file=path).getroot()[2][0]
nSamples = int(inputData[1].attrib['count'])
nFeatures = int(inputData[2].attrib['count'])
## Initialise
# sample metadata
sample_file_name = list()
sample_id = list()
sample_number = list()
sample_text = list()
sample_type = list()
sample_date = list()
sample_time = list()
sample_vial = list()
sample_instrument = list()
# feature metadata
compound_name = list()
compound_id = list()
compound_IS_id = list()
# intensity data
peak_conc = numpy.full([nSamples, nFeatures], numpy.nan)
# expected concentration
peak_expconc = numpy.full([nSamples, nFeatures], numpy.nan)
# Bonus peak info
peak_concdev = numpy.full([nSamples, nFeatures], numpy.nan)
peak_area = numpy.full([nSamples, nFeatures], numpy.nan)
peak_response = numpy.full([nSamples, nFeatures], numpy.nan)
peak_RT = numpy.full([nSamples, nFeatures], numpy.nan)
peak_integrationFlag = pandas.DataFrame(index=range(1, nSamples + 1), columns=range(1, nFeatures + 1), dtype='str')
## Read data
# sample metadata & intensity data
# iterate over samples
for i_spl in range(0, nSamples):
spl = inputData[1][i_spl]
# sample metadata
sample_file_name.append(spl.attrib['name'])
sample_id.append(int(spl.attrib['id']))
sample_number.append(int(spl.attrib['samplenumber']))
sample_text.append(spl.attrib['sampleid'])
sample_type.append(spl.attrib['type'])
sample_date.append(spl.attrib['createdate'])
sample_time.append(spl.attrib['createtime'])
sample_vial.append(spl.attrib['vial'])
sample_instrument.append(spl.attrib['instrument'])
# iterate over compounds
for i_cpd in range(0, nFeatures):
cpdData = spl[i_cpd][0]
# intensity data
# for whatever reason, TargetLynx sometimes report no peak by '0.0000' and sometimes by ''
try:
peak_conc[i_spl, i_cpd] = float(cpdData.attrib['analconc'])
except ValueError:
peak_conc[i_spl, i_cpd] = 0.0
# more peak info
peak_area[i_spl, i_cpd] = float(cpdData.attrib['area'])
peak_expconc[i_spl, i_cpd] = float(spl[i_cpd].attrib['stdconc'])
peak_concdev[i_spl, i_cpd] = float(cpdData.attrib['conccalc'])
peak_response[i_spl, i_cpd] = float(cpdData.attrib['response'])
peak_RT[i_spl, i_cpd] = float(cpdData.attrib['foundrt'])
peak_integrationFlag.iloc[i_spl, i_cpd] = cpdData.attrib['pkflags']
# feature metadata
for j_cpd in range(0, nFeatures):
cpd_calib = inputData[2][j_cpd]
compound_name.append(cpd_calib.attrib['name'])
compound_id.append(int(cpd_calib.attrib['id']))
compound_IS_id.append(cpd_calib[0].attrib['ref']) # not int() as some IS have ref=''
## Output Dataframe
# sampleMetadata
sampleMetadata = dict()
sampleMetadata['Sample File Name'] = sample_file_name
sampleMetadata['Sample Base Name'] = sample_file_name
sampleMetadata['TargetLynx Sample ID'] = sample_id
sampleMetadata['MassLynx Row ID'] = sample_number
sampleMetadata['Sample Name'] = sample_text
sampleMetadata['Sample Type'] = sample_type
sampleMetadata['Acqu Date'] = sample_date
sampleMetadata['Acqu Time'] = sample_time
sampleMetadata['Vial'] = sample_vial
sampleMetadata['Instrument'] = sample_instrument
# featureMetadata
featureMetadata = dict()
featureMetadata['Feature Name'] = compound_name
featureMetadata['TargetLynx Feature ID'] = compound_id
featureMetadata['TargetLynx IS ID'] = compound_IS_id
# intensityData
intensityData = peak_conc
# expectedConcentration
peak_expconc[peak_expconc == 0] = numpy.nan # remove 0 and replace them by nan
expectedConcentration = pandas.DataFrame(peak_expconc)
# Other peak info
peakResponse = pandas.DataFrame(peak_response)
peakArea = pandas.DataFrame(peak_area)
peakConcentrationDeviation = pandas.DataFrame(peak_concdev)
peakIntegrationFlag = peak_integrationFlag # already dataframe
peakIntegrationFlag.reset_index(drop=True, inplace=True)
peakRT = pandas.DataFrame(peak_RT)
# Convert to DataFrames
featureMetadata = pandas.concat([pandas.DataFrame(featureMetadata[c], columns=[c]) for c in featureMetadata.keys()], axis=1, sort=False)
sampleMetadata = pandas.concat([pandas.DataFrame(sampleMetadata[c], columns=[c]) for c in sampleMetadata.keys()], axis=1, sort=False)
expectedConcentration.columns = featureMetadata['Feature Name'].values.tolist()
peakIntegrationFlag.columns = featureMetadata['Feature Name'].values.tolist()
peakResponse.columns = featureMetadata['Feature Name'].values.tolist()
peakArea.columns = featureMetadata['Feature Name'].values.tolist()
peakConcentrationDeviation.columns = featureMetadata['Feature Name'].values.tolist()
peakRT.columns = featureMetadata['Feature Name'].values.tolist()
sampleMetadata['Metadata Available'] = False
return sampleMetadata, featureMetadata, intensityData, expectedConcentration, peakResponse, peakArea, peakConcentrationDeviation, peakIntegrationFlag, peakRT
def __getCalibrationFromReport(self, path):
"""
Read the calibration information from a calibration report `csv` following the provided report template.
The following columns are required (leave an empty value to reject a compound):
* Compound
The compound name, identical to the one employed in the SOP `json` file.
* TargetLynx ID
The compound TargetLynx ID, identical to the one employed in the SOP `json` file.
* LLOQ
Lowest limit of quantification concentration, in the same unit as indicated in TargetLynx.
* ULOQ
Upper limit of quantification concentration, in the same unit as indicated in TargetLynx.
The following columns are expected by :py:meth:`~TargetedDataset._targetLynxApplyLimitsOfQuantificationNoiseFilled`:
* Noise (area)
Area integrated in a blank sample at the same retention time as the compound of interest (if left empty noise concentration calculation cannot take place).
* a
:math:`a` coefficient in the calibration equation (if left empty noise concentration calculation cannot take place).
* b
:math:`b` coefficient in the calibration equation (if left empty noise concentration calculation cannot take place).
The following columns are recommended:
* Cpd Info
Additional information relating to the compound (can be left empty).
* r
:math:`r` goodness of fit measure for the calibration equation (can be left empty).
* r2
:math:`r^2` goodness of fit measure for the calibration equation (can be left empty).
:param path: Path to the calibration report csv file.
:type path: str
:return calibReport: pandas dataframe of feature identifiers and calibration information.
:rtype: pandas.DataFrame, :math:`m` × :math:`r`
:raises LookupError: if the expected columns are absent from the csv file.
"""
calibReport = pandas.read_csv(path)
# check minimum number of columns
expectedCol = ['Compound', 'TargetLynx ID', 'LLOQ', 'ULOQ']
foundCol = calibReport.columns.values.tolist()
# if the set is not empty, some columns are missing from the csv
if set(expectedCol) - set(foundCol) != set():
raise LookupError('Calibration report (' + os.path.split(path)[1] + ') does not contain the following expected column: ' + str(list(set(expectedCol) - set(foundCol))))
return calibReport
def __matchDatasetToCalibrationReport(self, sampleMetadata, featureMetadata, intensityData, expectedConcentration, peakResponse, peakArea, peakConcentrationDeviation, peakIntegrationFlag, peakRT, calibReport):
"""
Check the agreement of Feature IDs and Feature Names across all inputs (TargetLynx export `xml`, calibration report `csv` and SOP `json`).
First map the calibration report and SOP information, which raise errors in case of disagreement.
This block is then mapped to the TargetLynx `featureMetadata` (on compound ID) and overrides the TargetLynx information (raise warnings).
Features not matched are appended to an `excludedSampleMetadata`, `excludedFeatureMetadata` and `excludedIntensityData` (excluded `peakResponse`, `peakArea`, `peakConcentrationDeviation`, `peakIntegrationFlag` and `peakRT` are discarded).
Additional information is added to the `sampleMetadata` (chromatography, ionisation, acquired time, run order).
Apply the unitCorrectionFactor to the `intensityData`, `LLOQ` and `ULOQ` concentrations and `expectedConcentration`.
:param sampleMetadata: dataframe of sample identifiers and metadata.
:type sampleMetadata: pandas.DataFrame, :math:`n` × :math:`p`
:param featureMetadata: pandas dataframe of feature identifiers and metadata.
:type featureMetadata: pandas.DataFrame, :math:`m` × :math:`q`
:param intensityData: numpy matrix of intensity measurements.
:type intensityData: numpy.ndarray, :math:`n` × :math:`m`
:param expectedConcentration: pandas dataframe of analytical peak expected concentrations.
:type expectedConcentration: pandas.DataFrame, :math:`n` × :math:`m`
:param peakResponse: pandas dataframe of analytical peak response.
:type peakResponse: pandas.DataFrame, :math:`n` × :math:`m`
:param peakArea: pandas dataframe of analytical peak area.
:type peakArea: pandas.DataFrame, :math:`n` × :math:`m`
:param peakConcentrationDeviation: pandas dataframe of analytical peak concentration deviation.
:type peakConcentrationDeviation: pandas.DataFrame, :math:`n` × :math:`m`
:param peakIntegrationFlag: pandas dataFrame of analytical peak integration flags.
:type peakIntegrationFlag: pandas.DataFrame, :math:`n` × :math:`m`
:param peakRT: pandas dataframe of analytical Retention time.
:type peakRT: pandas.DataFrame, :math:`n` × :math:`m`
:param calibReport: pandas dataframe of feature identifiers and calibration informations.
:type calibReport: pandas.DataFrame, :math:`m` × :math:`r`
:return sampleMetadata: dataframe of sample identifiers and metadata.
:rtype: pandas.DataFrame, :math:`n` × :math:`p`
:return finalFeatureMetadata: pandas dataframe of feature identifiers and metadata.
:rtype: pandas.DataFrame, :math:`m` × :math:`q`
:return finalIntensityData: numpy matrix of intensity measurements.
:rtype: numpy.ndarray, :math:`n` × :math:`m`
:return finalExpectedConcentration: pandas dataframe of expected concentration for each sample/feature
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:return excludedSampleMetadata: list of pandas dataframe of excluded sample measurements for excluded features.
:rtype: list
:return excludedFeatureMetadata: list of pandas dataframe of excluded feature identifiers and metadata.
:rtype: list
:return excludedIntensityData: list of matrix of intensity measurements for excluded features.
:rtype: list
:return excludedExpectedConcentration: list of pandas dataframe of excluded expected concentration.
:rtype: list
:return excludedFlag: list of str of exclusion type ('Samples' or 'Features').
:rtype: list
:return finalPeakResponse: pandas dataframe of analytical peak response.
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:return finalPeakArea: pandas dataframe of analytical peak area.
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:return finalPeakConcentrationDeviation: pandas dataframe of %deviation between expected and measured concentration for each sample/feature
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:return finalPeakIntegrationFlag: pandas dataframe of integration flag
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:return finalPeakRT: pandas dataframe of analytical peak Retention time
:rtype: pandas.DataFrame, :math:`n` × :math:`m`
:raises ValueError: if the shape of sampleMetadata, featureMetadata or intensityData shape do not match.
:raises ValueError: if features in the calibration report and in the SOP differ (number of compounds, compound ID or compound names).
:raises ValueError: if in the SOP 'quantificationType', 'calibrationMethod' or 'IS' are mismatched.
"""
import warnings
from datetime import datetime
## sampleMetadata, featureMetadata & intensityData should by construction have the same size
if sampleMetadata.shape[0] != intensityData.shape[0]:
raise ValueError('sampleMetadata and intensityData number of samples differ')
if featureMetadata.shape[0] != intensityData.shape[1]:
raise ValueError('featureMetadata and intensityData number of compounds differ')
if intensityData.shape != peakResponse.shape:
raise ValueError('intensityData and peakResponse number of compounds/samples differ')
if intensityData.shape != peakArea.shape:
raise ValueError('intensityData and peakArea number of compounds/samples differ')
if intensityData.shape != expectedConcentration.shape:
raise ValueError('intensityData and expectedConcentration number of compounds/samples differ')
if intensityData.shape != peakConcentrationDeviation.shape:
raise ValueError('intensityData and peakConcentrationDeviation number of compounds/samples differ')
if intensityData.shape != peakIntegrationFlag.shape:
raise ValueError('intensityData and peakIntegrationFlag number of compounds/samples differ')
if intensityData.shape != peakRT.shape:
raise ValueError('intensityData and peakRT number of compounds/samples differ')
# initialise excluded import data
excludedSampleMetadata = []
excludedFeatureMetadata = []
excludedIntensityData = []
excludedExpectedConcentration = []
excludedFlag = []
## SOP is used as 'Truth', if calibReport does not match, it's a problem (Error)
## Then if featureMetadata does not match SOP/calibReport, use SOP as reference (message conflict)
## Match SOP & calibReport
# Load SOP
# calibrationMethod is 'backcalculatedIS' (use response), 'noIS' (use area), or 'noCalibration' (no corrections at all)
# quantificationType is:
# 'IS' (expects calibrationMethod=noIS)
# 'QuantOwnLabeledAnalogue' (would expect 'backcalculatedIS' but could use 'noIS')
# 'QuantAltLabeledAnalogue' (would expect 'backcalculatedIS' but could use 'noIS')
# 'Monitored' (which expects 'noCalibration')
SOPColumnsToLoad = ['compoundID', 'compoundName', 'IS', 'unitFinal', 'unitCorrectionFactor', 'calibrationMethod', 'calibrationEquation', 'quantificationType']
SOPColumnsToLoad.extend(self.Attributes['externalID'])
SOPFeatureMetadata = pandas.DataFrame.from_dict(dict((k, self.Attributes[k]) for k in SOPColumnsToLoad), orient='columns')
SOPFeatureMetadata['compoundID'] = pandas.to_numeric(SOPFeatureMetadata['compoundID'])
SOPFeatureMetadata['unitCorrectionFactor'] = pandas.to_numeric(SOPFeatureMetadata['unitCorrectionFactor'])
SOPFeatureMetadata['IS'] = SOPFeatureMetadata['IS'].map({'True': True, 'False': False})
SOPFeatureMetadata['Unit'] = SOPFeatureMetadata['unitFinal']
SOPFeatureMetadata.drop('unitFinal', inplace=True, axis=1)
# convert quantificationType from str to enum
if 'quantificationType' in SOPFeatureMetadata.columns:
for qType in QuantificationType:
SOPFeatureMetadata.loc[SOPFeatureMetadata['quantificationType'].values == qType.name, 'quantificationType'] = qType
# convert calibrationMethod from str to enum
if 'calibrationMethod' in SOPFeatureMetadata.columns:
for cMethod in CalibrationMethod:
SOPFeatureMetadata.loc[SOPFeatureMetadata['calibrationMethod'].values == cMethod.name, 'calibrationMethod'] = cMethod
# check that all quantificationType='IS' are also flagged as IS
# (both have same number of feature + intersection has same number of feature as one of them)
if (sum((SOPFeatureMetadata['quantificationType'] == QuantificationType.IS)) != sum(SOPFeatureMetadata['IS'])) | (sum((SOPFeatureMetadata['quantificationType'] == QuantificationType.IS) & SOPFeatureMetadata['IS']) != sum(SOPFeatureMetadata['IS'])):
raise ValueError('Check SOP file, features with quantificationType=\'IS\' must have been flagged as IS=\'True\'')
# check that all quantificationType='Monitored' have a calibrationMethod='noCalibration'
# (both have same number of feature + intersection has same number of feature as one of them)
if (sum((SOPFeatureMetadata['quantificationType'] == QuantificationType.Monitored)) != (sum(SOPFeatureMetadata['calibrationMethod'] == CalibrationMethod.noCalibration))) | (sum((SOPFeatureMetadata['quantificationType'] == QuantificationType.Monitored) & (SOPFeatureMetadata['calibrationMethod'] == CalibrationMethod.noCalibration)) != sum(SOPFeatureMetadata['quantificationType'] == QuantificationType.Monitored)):
raise ValueError('Check SOP file, features with quantificationType=\'Monitored\' must have a calibrationMethod=\'noCalibration\'\n quantificationType are:\n\'IS\' (expects calibrationMethod=noIS)\n\'QuantOwnLabeledAnalogue\' (would expect \'backcalculatedIS\' but could use \'noIS\' or \'otherCalibration\')\n\'QuantAltLabeledAnalogue\' (would expect \'backcalculatedIS\' but could use \'noIS\' or \'otherCalibration\')\n\'QuantOther\' (can take any CalibrationMethod)\n\'Monitored\' (which expects \'noCalibration\')')
# check number of compounds in SOP & calibReport
if SOPFeatureMetadata.shape[0] != calibReport.shape[0]:
raise ValueError('SOP and Calibration Report number of compounds differ')
featureCalibSOP = pandas.merge(left=SOPFeatureMetadata, right=calibReport, how='inner', left_on='compoundName', right_on='Compound', sort=False)
featureCalibSOP.drop('TargetLynx ID', inplace=True, axis=1)
# check we still have the same number of features (inner join)
if featureCalibSOP.shape[0] != SOPFeatureMetadata.shape[0]:
raise ValueError('SOP and Calibration Report compounds differ')
# check compound names match in SOP and calibReport after join
if sum(featureCalibSOP['compoundName'] != featureCalibSOP['Compound']) != 0:
raise ValueError('SOP and Calibration Report compounds names differ: ' + str(featureCalibSOP.loc[(featureCalibSOP['compoundName'] != featureCalibSOP['Compound']), ['compoundName', 'Compound']].values.tolist()))
featureCalibSOP.drop('Compound', inplace=True, axis=1)
## Match calibSOP & featureMetadata
# left join to keep feature order and limit to features in XML
finalFeatureMetadata = pandas.merge(left=featureMetadata, right=featureCalibSOP, how='left', left_on='TargetLynx Feature ID', right_on='compoundID', sort=False)
# limit to compounds present in the SOP (no report of SOP compounds not in XML)
if finalFeatureMetadata['compoundID'].isnull().sum() != 0:
warnings.warn("Warning: Only " + str(finalFeatureMetadata['compoundID'].notnull().sum()) + " features shared across the SOP/Calibration report (" + str(featureCalibSOP.shape[0]) + " total) and the TargetLynx output file (" + str(featureMetadata.shape[0]) + " total). " + str(finalFeatureMetadata['compoundID'].isnull().sum()) + " features discarded from the TargetLynx output file.")
# filter out unavailable features
unavailableFeatVect = finalFeatureMetadata['compoundID'].isnull().values
excludedSampleMetadata.append(sampleMetadata)
excludedFeatureMetadata.append(finalFeatureMetadata.iloc[unavailableFeatVect, :])
excludedIntensityData.append(intensityData[:, unavailableFeatVect])
excludedExpectedConcentration.append(expectedConcentration.iloc[:, unavailableFeatVect])
excludedFlag.append('Features')
finalFeatureMetadata = finalFeatureMetadata.iloc[~unavailableFeatVect, :]
finalIntensityData = intensityData[:, ~unavailableFeatVect]
finalExpectedConcentration = expectedConcentration.iloc[:, ~unavailableFeatVect]
finalPeakResponse = peakResponse.iloc[:, ~unavailableFeatVect]
finalPeakArea = peakArea.iloc[:, ~unavailableFeatVect]
finalPeakConcentrationDeviation = peakConcentrationDeviation.iloc[:, ~unavailableFeatVect]
finalPeakIntegrationFlag = peakIntegrationFlag.iloc[:, ~unavailableFeatVect]
finalPeakRT = peakRT.iloc[:, ~unavailableFeatVect]
# remove duplicate col
finalFeatureMetadata.drop('compoundID', inplace=True, axis=1)
else:
finalIntensityData = intensityData
finalExpectedConcentration = expectedConcentration
finalPeakResponse = peakResponse
finalPeakArea = peakArea
finalPeakConcentrationDeviation = peakConcentrationDeviation
finalPeakIntegrationFlag = peakIntegrationFlag
finalPeakRT = peakRT
# remove duplicate col
finalFeatureMetadata.drop('compoundID', inplace=True, axis=1)
# check names, keep SOP value, report differences
if sum(finalFeatureMetadata['Feature Name'] != finalFeatureMetadata['compoundName']) != 0:
warnings.warn('TargetLynx feature names & SOP/Calibration Report compounds names differ; SOP names will be used: ' + str(finalFeatureMetadata.loc[(finalFeatureMetadata['Feature Name'] != finalFeatureMetadata['compoundName']), ['Feature Name','compoundName']].values.tolist()))
finalFeatureMetadata['Feature Name'] = finalFeatureMetadata['compoundName']
finalExpectedConcentration.columns = finalFeatureMetadata['Feature Name'].values.tolist()
finalPeakResponse.columns = finalFeatureMetadata['Feature Name'].values.tolist()
finalPeakArea.columns = finalFeatureMetadata['Feature Name'].values.tolist()
finalPeakConcentrationDeviation.columns = finalFeatureMetadata['Feature Name'].values.tolist()
finalPeakIntegrationFlag.columns = finalFeatureMetadata['Feature Name'].values.tolist()
finalPeakRT.columns = finalFeatureMetadata['Feature Name'].values.tolist()
finalFeatureMetadata.drop('compoundName', inplace=True, axis=1)
## Add information to the sampleMetada
finalSampleMetadata = copy.deepcopy(sampleMetadata)
# Add chromatography
finalSampleMetadata.join(pandas.DataFrame([self.Attributes['chromatography']] * finalSampleMetadata.shape[0], columns=['Chromatograpy']))
# Add ionisation
finalSampleMetadata.join(pandas.DataFrame([self.Attributes['ionisation']] * finalSampleMetadata.shape[0], columns=['Ionisation']))
# Add batch, default is 1
finalSampleMetadata.join(pandas.DataFrame([1] * finalSampleMetadata.shape[0], columns=['Batch']))
# Process Sample Type
finalSampleMetadata['Calibrant'] = finalSampleMetadata['Sample Type'] == 'Standard'
finalSampleMetadata['Study Sample'] = finalSampleMetadata['Sample Type'] == 'Analyte'
finalSampleMetadata['Blank'] = finalSampleMetadata['Sample Type'] == 'Blank'
finalSampleMetadata['QC'] = finalSampleMetadata['Sample Type'] == 'QC'
# unused Sample Types
# sampleMetadata['Solvent'] = sampleMetadata['Sample Type'] == 'Solvent'
# sampleMetadata['Recovery'] = sampleMetadata['Sample Type'] == 'Recovery'
# sampleMetadata['Donor'] = sampleMetadata['Sample Type'] == 'Donor'
# sampleMetadata['Receptor'] = sampleMetadata['Sample Type'] == 'Receptor'
finalSampleMetadata['Other'] = (~finalSampleMetadata['Calibrant'] & ~finalSampleMetadata['Study Sample'] & ~finalSampleMetadata['Blank'] & ~finalSampleMetadata['QC']) # & ~sampleMetadata['Solvent'] & ~sampleMetadata['Recovery'] & ~sampleMetadata['Donor'] & ~sampleMetadata['Receptor']
# Add Acquired Time
finalSampleMetadata['Acquired Time'] = numpy.nan
for i in range(finalSampleMetadata.shape[0]):
try:
finalSampleMetadata.loc[i, 'Acquired Time'] = datetime.strptime(str(finalSampleMetadata.loc[i, 'Acqu Date']) + " " + str(finalSampleMetadata.loc[i, 'Acqu Time']),'%d-%b-%y %H:%M:%S')
except ValueError:
pass
finalSampleMetadata['Acquired Time'] = pandas.to_datetime(finalSampleMetadata['Acquired Time'])
# Add Run Order
finalSampleMetadata['Order'] = finalSampleMetadata.sort_values(by='Acquired Time').index
finalSampleMetadata['Run Order'] = finalSampleMetadata.sort_values(by='Order').index
finalSampleMetadata.drop('Order', axis=1, inplace=True)
# Initialise the Batch to 1
finalSampleMetadata['Batch'] = [1]*finalSampleMetadata.shape[0]
## Apply unitCorrectionFactor
finalFeatureMetadata['LLOQ'] = finalFeatureMetadata['LLOQ'] * finalFeatureMetadata['unitCorrectionFactor'] # NaN will be kept
finalFeatureMetadata['ULOQ'] = finalFeatureMetadata['ULOQ'] * finalFeatureMetadata['unitCorrectionFactor']
finalIntensityData = finalIntensityData * finalFeatureMetadata['unitCorrectionFactor'].values
finalExpectedConcentration = finalExpectedConcentration * finalFeatureMetadata['unitCorrectionFactor'].values
## Summary
print('TagetLynx output, Calibration report and SOP information matched:')
print('Targeted Method: ' + self.Attributes['methodName'])
print(str(finalSampleMetadata.shape[0]) + ' samples (' + str(sum(finalSampleMetadata['Calibrant'])) + ' calibration points, ' + str(sum(finalSampleMetadata['Study Sample'])) + ' study samples)')
print(str(finalFeatureMetadata.shape[0]) + ' features (' + str(sum(finalFeatureMetadata['IS'])) + ' IS, ' + str(sum(finalFeatureMetadata['quantificationType'] == QuantificationType.QuantOwnLabeledAnalogue)) + ' quantified and validated with own labeled analogue, ' + str(sum(finalFeatureMetadata['quantificationType'] == QuantificationType.QuantAltLabeledAnalogue)) + ' quantified and validated with alternative labeled analogue, ' + str(sum(finalFeatureMetadata['quantificationType'] == QuantificationType.QuantOther)) + ' other quantification, ' + str(sum(finalFeatureMetadata['quantificationType'] == QuantificationType.Monitored)) + ' monitored for relative information)')
if len(excludedFeatureMetadata) != 0:
print(str(excludedFeatureMetadata[0].shape[0]) + ' features excluded as missing from the SOP')
print('All concentrations converted to final units')
print('-----')
return finalSampleMetadata, finalFeatureMetadata, finalIntensityData, finalExpectedConcentration, excludedSampleMetadata, excludedFeatureMetadata, excludedIntensityData, excludedExpectedConcentration, excludedFlag, finalPeakResponse, finalPeakArea, finalPeakConcentrationDeviation, finalPeakIntegrationFlag, finalPeakRT
def _filterTargetLynxSamples(self, sampleTypeToProcess=['Study Sample', 'QC'], **kwargs):
"""
Isolate 'Calibrant' samples ('Sample Type' == 'Standard' in MassLynx) and create the :py:attr:`calibration` dictionary, following :py:meth:`~TargetedDataset._readTargetLynxDataset`.
Exclude samples based on their MassLynx 'Sample Type'. Only the types passed in `sampleTypeToProcess` are kept. Values are 'Study Sample' ('Analyte' in MassLynx), 'Blank', 'QC' or 'Other' (for all other MassLynx entries).
:param sampleTypeToProcess: list of ['Study Sample','Blank','QC','Other'] for the sample types to keep.
:type sampleTypeToProcess: list of str
:return: None
:raises ValueError: if 'sampleTypeToProcess' is not recognised.
:raises AttributeError: if the excludedImport lists do not exist.
"""
# check inputs
if set(sampleTypeToProcess) - set(['Study Sample', 'Blank', 'QC', 'Other']) != set():
raise ValueError('sampleTypeToProcess ' + str(
set(sampleTypeToProcess) - set(['Study Sample', 'Blank', 'QC', 'Other'])) + ' is not recognised')
# check excluded exist
if((not hasattr(self,'sampleMetadataExcluded'))|(not hasattr(self,'featureMetadataExcluded'))|(not hasattr(self,'intensityDataExcluded'))|(not hasattr(self,'expectedConcentrationExcluded'))|(not hasattr(self,'excludedFlag'))):
raise AttributeError('sampleMetadataExcluded, featureMetadataExcluded, intensityDataExcluded, expectedConcentrationExcluded or excludedFlag have not bee previously initialised')
sampleMetadata = copy.deepcopy(self.sampleMetadata)
featureMetadata = copy.deepcopy(self.featureMetadata)
intensityData = copy.deepcopy(self._intensityData)
expectedConcentration = copy.deepcopy(self.expectedConcentration)
excludedImportSampleMetadata = copy.deepcopy(self.sampleMetadataExcluded)
excludedImportFeatureMetadata = copy.deepcopy(self.featureMetadataExcluded)
excludedImportIntensityData = copy.deepcopy(self.intensityDataExcluded)
excludedImportExpectedConcentration = copy.deepcopy(self.expectedConcentrationExcluded)
excludedImportFlag = copy.deepcopy(self.excludedFlag)
peakInfo = copy.deepcopy(self.peakInfo)
# Calibration information
calibFeatureMetadata = featureMetadata
calibSampleMetadata = sampleMetadata.loc[sampleMetadata['Calibrant'].values, :]
calibIntensityData = intensityData[sampleMetadata['Calibrant'].values, :]
calibExpectedConcentration = expectedConcentration.loc[sampleMetadata['Calibrant'].values, :]
calibPeakResponse = peakInfo['peakResponse'].loc[sampleMetadata['Calibrant'].values, :]
calibPeakArea = peakInfo['peakArea'].loc[sampleMetadata['Calibrant'].values, :]
calibPeakConcentrationDeviation = peakInfo['peakConcentrationDeviation'].loc[sampleMetadata['Calibrant'].values, :]
calibPeakIntegrationFlag = peakInfo['peakIntegrationFlag'].loc[sampleMetadata['Calibrant'].values, :]
calibPeakRT = peakInfo['peakRT'].loc[sampleMetadata['Calibrant'].values, :]
calibPeakInfo = {'peakResponse': calibPeakResponse, 'peakArea': calibPeakArea, 'peakConcentrationDeviation': calibPeakConcentrationDeviation, 'peakIntegrationFlag': calibPeakIntegrationFlag, 'peakRT': calibPeakRT}
calibration = {'calibSampleMetadata': calibSampleMetadata, 'calibFeatureMetadata': calibFeatureMetadata, 'calibIntensityData': calibIntensityData, 'calibExpectedConcentration': calibExpectedConcentration, 'calibPeakInfo': calibPeakInfo}
# Samples to keep
samplesToProcess = [False] * sampleMetadata.shape[0]
for i in sampleTypeToProcess:
samplesToProcess = (samplesToProcess | sampleMetadata[i]).values
# Filter
tmpSampleMetadata = sampleMetadata.loc[samplesToProcess, :]
tmpIntensityData = intensityData[samplesToProcess, :]
tmpExpectedConcentration = expectedConcentration.loc[samplesToProcess, :]
tmpPeakResponse = peakInfo['peakResponse'].loc[samplesToProcess, :]
tmpPeakArea = peakInfo['peakArea'].loc[samplesToProcess, :]
tmpPeakConcentrationDeviation = peakInfo['peakConcentrationDeviation'].loc[samplesToProcess, :]
tmpPeakIntegrationFlag = peakInfo['peakIntegrationFlag'].loc[samplesToProcess, :]
tmpPeakRT = peakInfo['peakRT'].loc[samplesToProcess, :]
tmpPeakInfo = {'peakResponse': tmpPeakResponse, 'peakArea': tmpPeakArea, 'peakConcentrationDeviation': tmpPeakConcentrationDeviation, 'peakIntegrationFlag': tmpPeakIntegrationFlag, 'peakRT': tmpPeakRT}
# Samples to exclude
samplesToExclude = ~samplesToProcess & ~sampleMetadata['Calibrant'].values # no need to exclude calibrant
if sum(samplesToExclude) != 0:
excludedImportSampleMetadata.append(sampleMetadata.loc[samplesToExclude, :])
excludedImportFeatureMetadata.append(featureMetadata)
excludedImportIntensityData.append(intensityData[samplesToExclude, :])
excludedImportExpectedConcentration.append(expectedConcentration.loc[samplesToExclude, :])
excludedImportFlag.append('Samples')
# Clean columns
tmpSampleMetadata.reset_index(drop=True, inplace=True)
tmpSampleMetadata = tmpSampleMetadata.drop(['Calibrant', 'Study Sample', 'Blank', 'QC', 'Other'], axis=1)
tmpExpectedConcentration.reset_index(drop=True, inplace=True)
# Output
self.sampleMetadata = tmpSampleMetadata
self.featureMetadata = featureMetadata
self._intensityData = tmpIntensityData
self.expectedConcentration = tmpExpectedConcentration
self.sampleMetadataExcluded = excludedImportSampleMetadata
self.featureMetadataExcluded = excludedImportFeatureMetadata
self.intensityDataExcluded = excludedImportIntensityData
self.expectedConcentrationExcluded = excludedImportExpectedConcentration
self.excludedFlag = excludedImportFlag
self.peakInfo = tmpPeakInfo
self.calibration = calibration
# log the modifications
print(sampleTypeToProcess, 'samples are kept for processing')
print('-----')
self.Attributes['Log'].append([datetime.now(), '%s samples kept for processing (%d samples, %d features). %d calibration samples filtered. %d samples excluded.' % (str(sampleTypeToProcess), self.noSamples, self.noFeatures, self.calibration['calibSampleMetadata'].shape[0], sum(samplesToExclude))])
def _filterTargetLynxIS(self, **kwargs):
"""
Filter out Internal Standard (IS) features and add them to excludedImportSampleMetadata, excludedImportFeatureMetadata, excludedImportIntensityData and excludedImportExpectedConcentration.
IS filtered from self.calibration are not saved.
:return: None
:raises AttributeError: if the excludedImport lists do not exist.
:raises AttributeError: if the calibration dictionary does not exist.
"""
# check excludedImport exist (ensures functions are run in the right order)
if ((not hasattr(self, 'sampleMetadataExcluded')) | (not hasattr(self, 'featureMetadataExcluded')) | (not hasattr(self, 'intensityDataExcluded')) | (not hasattr(self, 'expectedConcentrationExcluded')) | (not hasattr(self, 'excludedFlag'))):
raise AttributeError('sampleMetadataExcluded, featureMetadataExcluded, intensityDataExcluded, expectedConcentrationExcluded or excludedFlag have not bee previously initialised')
# check calibration dictionary exist (ensures functions are run in the right order)
if not hasattr(self, 'calibration'):
raise AttributeError('calibration dictionary has not been previously initialised')
sampleMetadata = copy.deepcopy(self.sampleMetadata)
featureMetadata = copy.deepcopy(self.featureMetadata)
intensityData = copy.deepcopy(self._intensityData)
expectedConcentration = copy.deepcopy(self.expectedConcentration)
excludedImportSampleMetadata = copy.deepcopy(self.sampleMetadataExcluded)
excludedImportFeatureMetadata = copy.deepcopy(self.featureMetadataExcluded)
excludedImportIntensityData = copy.deepcopy(self.intensityDataExcluded)
excludedImportExpectedConcentration = copy.deepcopy(self.expectedConcentrationExcluded)
excludedImportFlag = copy.deepcopy(self.excludedFlag)
calibration = copy.deepcopy(self.calibration)
peakInfo = copy.deepcopy(self.peakInfo)
# Feature to keep
keptFeat = ~featureMetadata['IS'].values.astype(bool)
# Filter
tmpFeatureMetadata = featureMetadata.loc[keptFeat, :]
tmpIntensityData = intensityData[:, keptFeat]
tmpExpectedConcentration = expectedConcentration.loc[:, keptFeat]
tmpCalibFeatureMetadata = calibration['calibFeatureMetadata'].loc[keptFeat, :]
tmpCalibIntensityData = calibration['calibIntensityData'][:, keptFeat]
tmpCalibExpectedConcentration = calibration['calibExpectedConcentration'].loc[:, keptFeat]
tmpCalibPeakResponse = calibration['calibPeakInfo']['peakResponse'].loc[:, keptFeat]
tmpCalibPeakArea = calibration['calibPeakInfo']['peakArea'].loc[:, keptFeat]
tmpCalibPeakConcentrationDeviation = calibration['calibPeakInfo']['peakConcentrationDeviation'].loc[:, keptFeat]
tmpCalibPeakIntegrationFlag = calibration['calibPeakInfo']['peakIntegrationFlag'].loc[:, keptFeat]
tmpCalibPeakRT = calibration['calibPeakInfo']['peakRT'].loc[:, keptFeat]
tmpCalibPeakInfo = {'peakResponse': tmpCalibPeakResponse, 'peakArea': tmpCalibPeakArea, 'peakConcentrationDeviation': tmpCalibPeakConcentrationDeviation, 'peakIntegrationFlag': tmpCalibPeakIntegrationFlag, 'peakRT': tmpCalibPeakRT}
tmpCalibration = {'calibSampleMetadata': calibration['calibSampleMetadata'], 'calibFeatureMetadata': tmpCalibFeatureMetadata, 'calibIntensityData': tmpCalibIntensityData, 'calibExpectedConcentration': tmpCalibExpectedConcentration, 'calibPeakInfo': tmpCalibPeakInfo}
tmpPeakResponse = peakInfo['peakResponse'].loc[:, keptFeat]
tmpPeakArea = peakInfo['peakArea'].loc[:, keptFeat]
tmpPeakConcentrationDeviation = peakInfo['peakConcentrationDeviation'].loc[:, keptFeat]
tmpPeakIntegrationFlag = peakInfo['peakIntegrationFlag'].loc[:, keptFeat]
tmpPeakRT = peakInfo['peakRT'].loc[:, keptFeat]
tmpPeakInfo = {'peakResponse': tmpPeakResponse, 'peakArea': tmpPeakArea, 'peakConcentrationDeviation': tmpPeakConcentrationDeviation, 'peakIntegrationFlag': tmpPeakIntegrationFlag, 'peakRT': tmpPeakRT}
# Features to exclude
ISFeat = ~keptFeat
if sum(ISFeat) != 0:
excludedImportSampleMetadata.append(sampleMetadata)
excludedImportFeatureMetadata.append(featureMetadata.loc[ISFeat, :])
excludedImportIntensityData.append(intensityData[:, ISFeat])
excludedImportExpectedConcentration.append(expectedConcentration.loc[:, ISFeat])
excludedImportFlag.append('Features')
# Clean columns
tmpFeatureMetadata.reset_index(drop=True, inplace=True)
tmpCalibration['calibFeatureMetadata'].reset_index(drop=True, inplace=True)
tmpFeatureMetadata = tmpFeatureMetadata.drop(['IS', 'TargetLynx IS ID'], axis=1)
# Output
self.featureMetadata = tmpFeatureMetadata
self._intensityData = tmpIntensityData
self.expectedConcentration = tmpExpectedConcentration
self.sampleMetadataExcluded = excludedImportSampleMetadata
self.featureMetadataExcluded = excludedImportFeatureMetadata
self.intensityDataExcluded = excludedImportIntensityData
self.expectedConcentrationExcluded = excludedImportExpectedConcentration
self.excludedFlag = excludedImportFlag
self.calibration = tmpCalibration
self.peakInfo = tmpPeakInfo
# log the modifications
print(sum(keptFeat), 'feature are kept for processing,',sum(ISFeat),'IS removed')
print('-----')
self.Attributes['Log'].append([datetime.now(), '%d features kept for processing (%d samples). %d IS features filtered.' % (sum(keptFeat), self.noSamples, sum(ISFeat))])
def _loadBrukerXMLDataset(self, datapath, fileNamePattern=None, pdata=1, unit=None, **kwargs):
"""
Initialise object from Bruker XML files. Read files and prepare a valid TargetedDataset.
Targeted data measurements are read and mapped to pre-defined SOPs. Once the import is finished, only properly read samples are returned and only features mapped onto the pre-defined SOP and sufficiently described. Only the first instance of a duplicated feature is kept.
:param str datapath: Path to the folder containing all `xml` files, all directories below :file:`datapath` will be scanned for valid `xml` files.
:param str fileNamePattern: Regex pattern to identify the `xml` files in `datapath` folder
:param int pdata: pdata files to parse (default 1)
:param unit: if features are present more than once, only keep the features with the unit passed as input.
:type unit: None or str
:raises TypeError: if `fileNamePattern` is not a string
:raises TypeError: if `pdata` is not an integer
:raises TypeError: if `unit` is not 'None' or a string
:raises ValueError: if `unit` is not one of the unit in the input data
:return: None
"""
from ..utilities._readBrukerXML import importBrukerXML
from ..utilities.extractParams import buildFileList
if fileNamePattern is None:
fileNamePattern = self.Attributes['fileNamePattern']
# Check inputs
if not isinstance(fileNamePattern, str):
raise TypeError('\'fileNamePattern\' must be a string')
if not isinstance(pdata, int):
raise TypeError('\'pdata\' must be an integer')
if unit is not None:
if not isinstance(unit, str):
raise TypeError('\'unit\' must be a string')
## Build a list of xml files matching the pdata in the right folder
pattern = re.compile(fileNamePattern)
filelist = buildFileList(datapath, pattern)
pdataPattern = re.compile('.*?pdata.*?%i' % (pdata))
filelist = [x for x in filelist if pdataPattern.match(x)]
## Load intensity, sampleMetadata and featureMetadata. Files that cannot be opened raise warnings, and are filtered from the returned matrices.
(self.intensityData, self.sampleMetadata, self.featureMetadata) = importBrukerXML(filelist)
## Filter unit if required
avUnit = self.featureMetadata['Unit'].unique().tolist()
if unit is not None:
if unit not in self.featureMetadata['Unit'].unique().tolist():
raise ValueError('The unit \'' + str(unit) + '\' is not present in the input data, available units: ' + str(avUnit))
keepMask = (self.featureMetadata['Unit'] == unit).values
self.featureMetadata = self.featureMetadata.loc[keepMask, :]
self.featureMetadata.reset_index(drop=True, inplace=True)
self.intensityData = self.intensityData[:, keepMask]
## Check all features are unique, and
u_ids, u_counts = numpy.unique(self.featureMetadata['Feature Name'], return_counts=True)
if not all(u_counts == 1):
dupFeat = u_ids[u_counts != 1].tolist()
warnings.warn('The following features are present more than once, only the first occurence will be kept: ' + str(dupFeat) + '. For further filtering, available units are: ' + str(avUnit))
# only keep the first of duplicated features
keepMask = ~self.featureMetadata['Feature Name'].isin(dupFeat).values
keepFirstVal = [(self.featureMetadata['Feature Name'] == Feat).idxmax() for Feat in dupFeat]
keepMask[keepFirstVal] = True
self.featureMetadata = self.featureMetadata.loc[keepMask, :]
self.featureMetadata.reset_index(drop=True, inplace=True)
self.intensityData = self.intensityData[:, keepMask]
## Reformat featureMetadata
# quantificationType
self.featureMetadata['quantificationType'] = numpy.nan
self.featureMetadata.loc[self.featureMetadata['type'] == 'quantification', 'quantificationType'] = QuantificationType.QuantOther
self.featureMetadata.loc[self.featureMetadata['type'] != 'quantification', 'quantificationType'] = QuantificationType.Monitored
self.featureMetadata.drop('type', inplace=True, axis=1)
# calibrationMethod
self.featureMetadata['calibrationMethod'] = numpy.nan
self.featureMetadata.loc[self.featureMetadata['quantificationType'] == QuantificationType.QuantOther, 'calibrationMethod'] = CalibrationMethod.otherCalibration
self.featureMetadata.loc[self.featureMetadata['quantificationType'] == QuantificationType.Monitored, 'calibrationMethod'] = CalibrationMethod.noCalibration
# rename columns
self.featureMetadata.rename(columns={'loq': 'LLOQ', 'lod': 'LOD', 'Lower Reference Bound': 'Lower Reference Percentile', 'Upper Reference Bound': 'Upper Reference Percentile'}, inplace=True)
# replace '-' with nan
self.featureMetadata['LLOQ'].replace('-', numpy.nan, inplace=True)
self.featureMetadata['LLOQ'] = [float(x) for x in self.featureMetadata['LLOQ'].tolist()]
self.featureMetadata['LOD'].replace('-', numpy.nan, inplace=True)
self.featureMetadata['LOD'] = [float(x) for x in self.featureMetadata['LOD'].tolist()]
# ULOQ
self.featureMetadata['ULOQ'] = numpy.nan
## Initialise sampleMetadata
self.sampleMetadata['AssayRole'] = numpy.nan
self.sampleMetadata['SampleType'] = numpy.nan
self.sampleMetadata['Dilution'] = 100
self.sampleMetadata['Correction Batch'] = numpy.nan
self.sampleMetadata['Sample ID'] = numpy.nan
self.sampleMetadata['Exclusion Details'] = None
# add Run Order
self.sampleMetadata['Order'] = self.sampleMetadata.sort_values(by='Acquired Time').index
self.sampleMetadata['Run Order'] = self.sampleMetadata.sort_values(by='Order').index
self.sampleMetadata.drop('Order', axis=1, inplace=True)
# initialise the Batch to 1
self.sampleMetadata['Batch'] = [1] * self.sampleMetadata.shape[0]
self.sampleMetadata['Metadata Available'] = False
## Initialise expectedConcentration
self.expectedConcentration = pandas.DataFrame(None, index=list(self.sampleMetadata.index), columns=self.featureMetadata['Feature Name'].tolist())
## Initialise empty Calibration info
self.calibration = dict()
self.calibration['calibIntensityData'] = numpy.ndarray((0, self.featureMetadata.shape[0]))
self.calibration['calibSampleMetadata'] = pandas.DataFrame(None, columns=self.sampleMetadata.columns)
self.calibration['calibSampleMetadata']['Metadata Available'] = False
self.calibration['calibFeatureMetadata'] = pandas.DataFrame({'Feature Name': self.featureMetadata['Feature Name'].tolist()})
self.calibration['calibExpectedConcentration'] = pandas.DataFrame(None, columns=self.featureMetadata['Feature Name'].tolist())
## Summary
print('Targeted Method: ' + self.Attributes['methodName'])
print(str(self.sampleMetadata.shape[0]) + ' study samples')
print(str(self.featureMetadata.shape[0]) + ' features (' + str(sum(self.featureMetadata['quantificationType'] == QuantificationType.IS)) + ' IS, ' + str(sum(self.featureMetadata['quantificationType'] == QuantificationType.QuantOwnLabeledAnalogue)) + ' quantified and validated with own labeled analogue, ' + str(sum(self.featureMetadata['quantificationType'] == QuantificationType.QuantAltLabeledAnalogue)) + ' quantified and validated with alternative labeled analogue, ' + str(sum(self.featureMetadata['quantificationType'] == QuantificationType.QuantOther)) + ' other quantification, ' + str(sum(self.featureMetadata['quantificationType'] == QuantificationType.Monitored)) + ' monitored for relative information)')
print('-----')
## Apply limit of quantification?
self._applyLimitsOfQuantification(**kwargs)
## clear **kwargs that have been copied to Attributes
for i in list(kwargs.keys()):
try:
del self.Attributes[i]
except:
pass
for j in ['fileNamePattern', 'pdata', 'unit']:
try:
del self.Attributes[j]
except:
pass
def _applyLimitsOfQuantification(self, onlyLLOQ=False, **kwargs):
"""
For each feature, replace intensity values inferior to the lowest limit of quantification or superior to the upper limit of quantification, by a fixed value.
Features missing the minimal required information are excluded from :py:attr:'featureMetadata', :py:attr:'intensityData', :py:attr:'expectedConcentration' and :py:attr:'calibration'. Features `'Monitored for relative information'` (and `'noCalibration'`) are not processed and returned without alterations. Features with `'Other quantification'` are allowed `Nan` in the LLOQ or ULOQ (no replacement takes place).
Calibration data should not be processed and therefore returned without modification.
Units in :py:attr:`_intensityData`, :py:attr:`featureMetadata['LLOQ'] and :py:attr:`featureMetadata['ULOQ']` are expected to be identical for a given feature.
Note: In merged datasets, calibration is a list of dict, with features in each calibration dict potentially different from features in featureMetadata and _intensityData.
Therefore in merged dataset, features are not filtered in each individual calibration.
If features are excluded due to the lack of required featureMetadata info, the masks will be reinitialised
:param onlyLLOQ: if True only correct <LLOQ, if False correct <LLOQ and >ULOQ
:type onlyLLOQ: bool
:return: None
:raises AttributeError: if :py:attr:`featureMetadata['LLOQ']` is missing
:raises AttributeError: if :py:attr:`featureMetadata['ULOQ']` is missing and onlyLLOQ==False
"""
sampleMetadata = copy.deepcopy(self.sampleMetadata)
featureMetadata = copy.deepcopy(self.featureMetadata)
intensityData = copy.deepcopy(self._intensityData)
expectedConcentration = copy.deepcopy(self.expectedConcentration)
calibration = copy.deepcopy(self.calibration)
if ((not hasattr(self, 'sampleMetadataExcluded')) | (not hasattr(self, 'featureMetadataExcluded')) | (not hasattr(self, 'intensityDataExcluded')) | (not hasattr(self, 'expectedConcentrationExcluded')) | (not hasattr(self, 'excludedFlag'))):
sampleMetadataExcluded = []
featureMetadataExcluded = []
intensityDataExcluded = []
expectedConcentrationExcluded = []
excludedFlag = []
else:
sampleMetadataExcluded = copy.deepcopy(self.sampleMetadataExcluded)
featureMetadataExcluded = copy.deepcopy(self.featureMetadataExcluded)
intensityDataExcluded = copy.deepcopy(self.intensityDataExcluded)
expectedConcentrationExcluded = copy.deepcopy(self.expectedConcentrationExcluded)
excludedFlag = copy.deepcopy(self.excludedFlag)
## Check input columns
if 'LLOQ' not in featureMetadata.columns:
raise AttributeError('the featureMetadata[\'LLOQ\'] column is absent')
if onlyLLOQ==False:
if 'ULOQ' not in featureMetadata.columns:
raise AttributeError('featureMetadata[\'ULOQ\'] column is absent')
## Features only Monitored are not processed and passed untouched (concatenated back at the end)
untouched = (featureMetadata['quantificationType'] == QuantificationType.Monitored).values
if sum(untouched) != 0:
print('The following features are only monitored and therefore not processed for LOQs: ' + str(featureMetadata.loc[untouched, 'Feature Name'].values.tolist()))
untouchedFeatureMetadata = featureMetadata.loc[untouched, :]
featureMetadata = featureMetadata.loc[~untouched, :]
untouchedIntensityData = intensityData[:, untouched]
intensityData = intensityData[:, ~untouched]
untouchedExpectedConcentration = expectedConcentration.loc[:, untouched]
expectedConcentration = expectedConcentration.loc[:, ~untouched]
# same reordering of the calibration
if isinstance(calibration, dict):
untouchedCalibFeatureMetadata = calibration['calibFeatureMetadata'].loc[untouched, :]
calibration['calibFeatureMetadata'] = calibration['calibFeatureMetadata'].loc[~untouched, :]
untouchedCalibIntensityData = calibration['calibIntensityData'][:, untouched]
calibration['calibIntensityData'] = calibration['calibIntensityData'][:, ~untouched]
untouchedCalibExpectedConcentration = calibration['calibExpectedConcentration'].loc[:, untouched]
calibration['calibExpectedConcentration'] = calibration['calibExpectedConcentration'].loc[:, ~untouched]
## Exclude features without required information
unusableFeat = featureMetadata['LLOQ'].isnull().values & (featureMetadata['quantificationType'] != QuantificationType.QuantOther).values
if not onlyLLOQ:
unusableFeat = unusableFeat | (featureMetadata['ULOQ'].isnull().values & (featureMetadata['quantificationType'] != QuantificationType.QuantOther).values)
if sum(unusableFeat) != 0:
print(str(sum(unusableFeat)) + ' features cannot be pre-processed:')
print('\t' + str(sum(unusableFeat)) + ' features lack the required information to apply limits of quantification')
# store
sampleMetadataExcluded.append(sampleMetadata)
featureMetadataExcluded.append(featureMetadata.loc[unusableFeat, :])
intensityDataExcluded.append(intensityData[:, unusableFeat])
expectedConcentrationExcluded.append(expectedConcentration.loc[:, unusableFeat])
excludedFlag.append('Features')
#remove
featureMetadata = featureMetadata.loc[~unusableFeat, :]
intensityData = intensityData[:, ~unusableFeat]
expectedConcentration = expectedConcentration.loc[:, ~unusableFeat]
if isinstance(calibration, dict):
calibration['calibFeatureMetadata'] = calibration['calibFeatureMetadata'].loc[~unusableFeat, :]
calibration['calibIntensityData'] = calibration['calibIntensityData'][:, ~unusableFeat]
calibration['calibExpectedConcentration'] = calibration['calibExpectedConcentration'].loc[:, ~unusableFeat]
## Values replacement (-inf / +inf)
# iterate over the features
for i in range(0, featureMetadata.shape[0]):
# LLOQ
if not numpy.isnan(featureMetadata['LLOQ'].values[i]):
toReplaceLLOQ = intensityData[:, i] < featureMetadata['LLOQ'].values[i]
intensityData[toReplaceLLOQ, i] = -numpy.inf
# ULOQ
if not onlyLLOQ:
if not numpy.isnan(featureMetadata['ULOQ'].values[i]):
toReplaceULOQ = intensityData[:, i] > featureMetadata['ULOQ'].values[i]
intensityData[toReplaceULOQ, i] = numpy.inf
## Add back the untouched monitored features
if sum(untouched) != 0:
featureMetadata = pandas.concat([featureMetadata, untouchedFeatureMetadata], axis=0, sort=False)
intensityData = numpy.concatenate((intensityData, untouchedIntensityData), axis=1)
expectedConcentration = pandas.concat([expectedConcentration, untouchedExpectedConcentration], axis=1, sort=False)
# reorder the calib
if isinstance(calibration, dict):
calibration['calibFeatureMetadata'] = pandas.concat([calibration['calibFeatureMetadata'], untouchedCalibFeatureMetadata], axis=0, sort=False)
calibration['calibIntensityData'] = numpy.concatenate((calibration['calibIntensityData'], untouchedCalibIntensityData), axis=1)
calibration['calibExpectedConcentration'] = pandas.concat([calibration['calibExpectedConcentration'], untouchedCalibExpectedConcentration], axis=1, sort=False)
# Remove excess info
featureMetadata.reset_index(drop=True, inplace=True)
expectedConcentration.reset_index(drop=True, inplace=True)
if isinstance(calibration, dict):
calibration['calibFeatureMetadata'].reset_index(drop=True, inplace=True)
calibration['calibExpectedConcentration'].reset_index(drop=True, inplace=True)
## return dataset with limits of quantification applied
self.featureMetadata = featureMetadata
self._intensityData = intensityData
self.expectedConcentration = expectedConcentration
self.calibration = calibration
self.sampleMetadataExcluded = sampleMetadataExcluded
self.featureMetadataExcluded = featureMetadataExcluded
self.intensityDataExcluded = intensityDataExcluded
self.expectedConcentrationExcluded = expectedConcentrationExcluded
self.excludedFlag = excludedFlag
if sum(unusableFeat) != 0:
# featureMask size will be wrong, requires a reinitialisation
self.initialiseMasks()
## Output and Log
print('Values <LLOQ replaced by -inf')
if not onlyLLOQ:
print('Values >ULOQ replaced by +inf')
if isinstance(calibration, dict):
print('\n')
# log the modifications
if onlyLLOQ:
logLimits = 'Limits of quantification applied to LLOQ'
else:
logLimits = 'Limits of quantification applied to LLOQ and ULOQ'
if sum(untouched) != 0:
logUntouchedFeatures = ' ' + str(sum(untouched)) + ' features only monitored and not processed: ' + str(untouchedFeatureMetadata.loc[:, 'Feature Name'].values.tolist()) + '.'
else:
logUntouchedFeatures = ''
self.Attributes['Log'].append([datetime.now(), '%s (%i samples, %i features). LLOQ are replaced by -inf.%s' % (logLimits, self.noSamples, self.noFeatures, logUntouchedFeatures)])
def _targetLynxApplyLimitsOfQuantificationNoiseFilled(self, onlyLLOQ=False, responseReference=None, **kwargs):
"""
For each feature, replace intensity values inferior to the lowest limit of quantification or superior to the upper limit of quantification. Values inferior to the lowest limit of quantification are replaced by the feature noise concentration.
Features missing the minimal required information are excluded from :py:attr:'featureMetadata', :py:attr:'intensityData', :py:attr:'expectedConcentration' and :py:attr:'calibration'. Features `'Monitored for relative information'` (and `'noCalibration'`) are not processed and returned without alterations.
Calibration data should not be processed and therefore returned without modification.
Units in :py:attr:`_intensityData`, :py:attr:`featureMetadata['LLOQ'] and :py:attr:`featureMetadata['ULOQ']` are expected to be identical for a given feature.
.. Note:: To replace <LLOQ by the concentration equivalent to the noise level, the noise area, as well as the :math:`a` and :math:`b` parameters of the calibration equation must be known. For each feature, the ratio `(IS conc / IS Area)` defined as the responseFactor, is determined in a representative calibration sample. Then the concentration equivalent to the noise area is calculated, before being used to replace values <LLOQ.
:param onlyLLOQ: if True only correct <LLOQ, if False correct <LLOQ and >ULOQ
:type onlyLLOQ: bool
:param responseReference: 'Sample File Name' of reference sample to use in order to establish the response to use, or list of samples to use (one per feature). If None, the middle of the calibration will be employed.
:type responseReference: None or str or list
:return: None
:raises AttributeError: if :py:attr:`featureMetadata['LLOQ']` is missing
:raises AttributeError: if :py:attr:`featureMetadata['ULOQ']` is missing and onlyLLOQ==False
:raises AttributeError: if :py:attr:`featureMetadata['calibrationEquation']` is missing
:raises AttributeError: if :py:attr:`featureMetadata['unitCorrectionFactor']` is missing
:raises AttributeError: if :py:attr:`featureMetadata['Noise (area)']` is missing
:raises AttributeError: if :py:attr:`featureMetadata['a']` is missing
:raises AttributeError: if :py:attr:`featureMetadata['b']` is missing
:raises AttributeError: if :py:attr:`calibration['calibPeakInfo']` is missing
:raises ValueError: if :py:attr:`calibration['calibPeakInfo']['peakArea']` number of features or samples do not match the rest of if :py:attr:`calibration`
:raises ValueError: if :py:attr:`calibration['calibPeakInfo']['peakResponse']` number of features or samples do not match the rest of if :py:attr:`calibration`
:raises ValueError: if the 'responseReference' sample name is not recognised or the list is of erroneous length.
:raises ValueError: if calculation using the calibrationEquation fails.
"""
sampleMetadata = copy.deepcopy(self.sampleMetadata)
featureMetadata = copy.deepcopy(self.featureMetadata)
intensityData = copy.deepcopy(self._intensityData)
expectedConcentration = copy.deepcopy(self.expectedConcentration)
calibration = copy.deepcopy(self.calibration)
if ((not hasattr(self, 'sampleMetadataExcluded')) | (not hasattr(self, 'featureMetadataExcluded')) | (not hasattr(self, 'intensityDataExcluded')) | (not hasattr(self, 'expectedConcentrationExcluded')) | (not hasattr(self, 'excludedFlag'))):
sampleMetadataExcluded = []
featureMetadataExcluded = []
intensityDataExcluded = []
expectedConcentrationExcluded = []
excludedFlag = []
else:
sampleMetadataExcluded = copy.deepcopy(self.sampleMetadataExcluded)
featureMetadataExcluded = copy.deepcopy(self.featureMetadataExcluded)
intensityDataExcluded = copy.deepcopy(self.intensityDataExcluded)
expectedConcentrationExcluded = copy.deepcopy(self.expectedConcentrationExcluded)
excludedFlag = copy.deepcopy(self.excludedFlag)
## Check input columns
if 'LLOQ' not in featureMetadata.columns:
raise AttributeError('the featureMetadata[\'LLOQ\'] column is absent')
if onlyLLOQ==False:
if 'ULOQ' not in featureMetadata.columns:
raise AttributeError('featureMetadata[\'ULOQ\'] column is absent')
if 'calibrationEquation' not in featureMetadata.columns:
raise AttributeError('the featureMetadata[\'calibrationEquation\'] column is absent')
if 'unitCorrectionFactor' not in featureMetadata.columns:
raise AttributeError('the featureMetadata[\'unitCorrectionFactor\'] column is absent')
if 'Noise (area)' not in featureMetadata.columns:
raise AttributeError('the featureMetadata[\'Noise (area)\'] column is absent')
if 'a' not in featureMetadata.columns:
raise AttributeError('the featureMetadata[\'a\'] column is absent')
if 'b' not in featureMetadata.columns:
raise AttributeError('the featureMetadata[\'b\'] column is absent')
if 'calibPeakInfo' not in calibration.keys():
raise AttributeError('the calibPeakInfo dict is absent from the calibration dict')
if (not numpy.array_equal(calibration['calibPeakInfo']['peakArea'].index.values, calibration['calibSampleMetadata'].index.values)) | (not numpy.array_equal(calibration['calibPeakInfo']['peakArea'].columns.values, calibration['calibFeatureMetadata']['Feature Name'].values)):
raise ValueError('calibration[\'calibPeakInfo\'][\'peakArea\'] number of features or samples do not match the rest of \'calibration\'')
if (not numpy.array_equal(calibration['calibPeakInfo']['peakResponse'].index.values, calibration['calibSampleMetadata'].index.values)) | (not numpy.array_equal(calibration['calibPeakInfo']['peakResponse'].columns.values, calibration['calibFeatureMetadata']['Feature Name'].values)):
raise ValueError('calibration[\'calibPeakInfo\'][\'peakResponse\'] number of features or samples do not match the rest of \'calibration\'')
## Features only Monitored are not processed and passed untouched (concatenated back at the end)
untouched = (featureMetadata['quantificationType'] == QuantificationType.Monitored).values
if sum(untouched) != 0:
print('The following features are only monitored and therefore not processed: ' + str(featureMetadata.loc[untouched, 'Feature Name'].values.tolist()))
untouchedFeatureMetadata = featureMetadata.loc[untouched, :]
featureMetadata = featureMetadata.loc[~untouched, :]
untouchedIntensityData = intensityData[:, untouched]
intensityData = intensityData[:, ~untouched]
untouchedExpectedConcentration = expectedConcentration.loc[:, untouched]
expectedConcentration = expectedConcentration.loc[:, ~untouched]
# same reordering of the calibration
untouchedCalibFeatureMetadata = calibration['calibFeatureMetadata'].loc[untouched, :]
calibration['calibFeatureMetadata'] = calibration['calibFeatureMetadata'].loc[~untouched, :]
untouchedCalibIntensityData = calibration['calibIntensityData'][:, untouched]
calibration['calibIntensityData'] = calibration['calibIntensityData'][:, ~untouched]
untouchedCalibExpectedConcentration = calibration['calibExpectedConcentration'].loc[:, untouched]
calibration['calibExpectedConcentration'] = calibration['calibExpectedConcentration'].loc[:, ~untouched]
untouchedCalibPeakArea = calibration['calibPeakInfo']['peakArea'].loc[:, untouched]
calibration['calibPeakInfo']['peakArea'] = calibration['calibPeakInfo']['peakArea'].loc[:, ~untouched]
untouchedCalibPeakResponse = calibration['calibPeakInfo']['peakResponse'].loc[:, untouched]
calibration['calibPeakInfo']['peakResponse'] = calibration['calibPeakInfo']['peakResponse'].loc[:, ~untouched]
untouchedCalibPeakConcentrationDeviation = calibration['calibPeakInfo']['peakConcentrationDeviation'].loc[:, untouched]
calibration['calibPeakInfo']['peakConcentrationDeviation'] = calibration['calibPeakInfo']['peakConcentrationDeviation'].loc[:, ~untouched]
untouchedCalibPeakIntegrationFlag = calibration['calibPeakInfo']['peakIntegrationFlag'].loc[:, untouched]
calibration['calibPeakInfo']['peakIntegrationFlag'] = calibration['calibPeakInfo']['peakIntegrationFlag'].loc[:, ~untouched]
untouchedCalibPeakRT = calibration['calibPeakInfo']['peakRT'].loc[:, untouched]
calibration['calibPeakInfo']['peakRT'] = calibration['calibPeakInfo']['peakRT'].loc[:, ~untouched]
## Exclude features without required information
unusableFeat = featureMetadata['LLOQ'].isnull().values | featureMetadata['Noise (area)'].isnull() | featureMetadata['a'].isnull() | featureMetadata['b'].isnull()
if not onlyLLOQ:
unusableFeat = unusableFeat | featureMetadata['ULOQ'].isnull().values
unusableFeat = unusableFeat.values
if sum(unusableFeat) != 0:
print(str(sum(unusableFeat)) + ' features cannot be pre-processed:')
print('\t' + str(sum(unusableFeat)) + ' features lack the required information to replace limits of quantification by noise level')
# store
sampleMetadataExcluded.append(sampleMetadata)
featureMetadataExcluded.append(featureMetadata.loc[unusableFeat, :])
intensityDataExcluded.append(intensityData[:, unusableFeat])
#return(expectedConcentration, unusableFeat)
expectedConcentrationExcluded.append(expectedConcentration.loc[:, unusableFeat])
excludedFlag.append('Features')
#remove
featureMetadata = featureMetadata.loc[~unusableFeat, :]
intensityData = intensityData[:, ~unusableFeat]
expectedConcentration = expectedConcentration.loc[:, ~unusableFeat]
calibration['calibFeatureMetadata'] = calibration['calibFeatureMetadata'].loc[~unusableFeat, :]
calibration['calibIntensityData'] = calibration['calibIntensityData'][:, ~unusableFeat]
calibration['calibExpectedConcentration'] = calibration['calibExpectedConcentration'].loc[:, ~unusableFeat]
calibration['calibPeakInfo']['peakResponse'] = calibration['calibPeakInfo']['peakResponse'].loc[:, ~unusableFeat]
calibration['calibPeakInfo']['peakArea'] = calibration['calibPeakInfo']['peakArea'].loc[:, ~unusableFeat]
calibration['calibPeakInfo']['peakConcentrationDeviation'] = calibration['calibPeakInfo']['peakConcentrationDeviation'].loc[:, ~unusableFeat]
calibration['calibPeakInfo']['peakIntegrationFlag'] = calibration['calibPeakInfo']['peakIntegrationFlag'].loc[:, ~unusableFeat]
calibration['calibPeakInfo']['peakRT'] = calibration['calibPeakInfo']['peakRT'].loc[:, ~unusableFeat]
## Calculate each feature's replacement noise concentration
##
## Approximate the response reference
## Needed for calibrationMethod='backcalculatedIS', for 'noIS' responseFactor=1
# responseReference: None (guessed middle of the curve), 'Sample File Name' to use, or list of 'Sample File Name' (one per feature)
#
# ! The calibration curve is plotted in TargetLynx as x-axis concentration, y-axis response
# The calibration equation obtained is written as: response = a * concentration + b (eq. 1)
# The response uses the area measured and IS: response = Area * (IS conc / IS Area) (eq. 2) [for 'noIS' response = Area]
# We can define the responseFactor = (IS conc/IS Area), the ratio of IS Conc/IS Area that can changes from sample to sample.
# For noise concentration calculation, using eq. 2 and a reference sample,we can approximate responseFactor = response/area [works for both calibrationMethod]
# make a list of responseReference (one per feature)
if isinstance(responseReference, str):
# Check existance of this sample
if sum(calibration['calibSampleMetadata']['Sample File Name'] == responseReference) == 0:
raise ValueError('responseReference \'Sample File Name\' unknown: ' + str(responseReference))
responseReference = [responseReference] * featureMetadata.shape[0]
elif isinstance(responseReference, list):
# Check length to match the number of features
if len(responseReference) != featureMetadata.shape[0]:
raise ValueError('The number of responseReference \'Sample File Name\' provided does not match the number of features to process:\n' + str(featureMetadata['Feature Name'].values))
for i in responseReference:
if sum(calibration['calibSampleMetadata']['Sample File Name'] == i) == 0:
raise ValueError('ResponseReference \'Sample File Name\' unknown: ' + str(i))
elif responseReference is None:
# Get a compound in the middle of the calibration run, use to your own risks
responseReference = calibration['calibSampleMetadata'].sort_values(by='Run Order').iloc[int(numpy.ceil(calibration['calibSampleMetadata'].shape[0] / 2)) - 1]['Sample File Name'] # round to the highest value
warnings.warn('No responseReference provided, sample in the middle of the calibration run employed: ' + str(responseReference))
responseReference = [responseReference] * featureMetadata.shape[0]
else:
raise ValueError('The responseReference provided is not recognised. A \'Sample File Name\', a list of \'Sample File Name\' or None are expected')
# Get the right Area and Response for each feature
tmpArea = list()
tmpResponse = list()
# iterate over features, get value in responseReference spectra
for i in range(0, featureMetadata.shape[0]):
tmpArea.append(calibration['calibPeakInfo']['peakArea'][(calibration['calibSampleMetadata']['Sample File Name'] == responseReference[i]).values].values.flatten()[i])
tmpResponse.append(calibration['calibPeakInfo']['peakResponse'][(calibration['calibSampleMetadata']['Sample File Name'] == responseReference[i]).values].values.flatten()[i])
# responseFactor = response/Area
# Note: responseFactor will be ~equal for all compound sharing the same IS (as ISconc/ISArea will be identical)
resFact = [resp / area for resp, area in zip(tmpResponse, tmpArea)]
featureMetadata = featureMetadata.assign(responseFactor=resFact)
## Calculate noise concentration equivalent for each feature
## Note for equation in .json:
# calibration curve in TargetLynx is defined/established as: response = a * concentration + b (eq. 1)
# response is defined as: response = Area * (IS conc / IS Area) (eq. 2) [for 'noIS' response = Area]
# using eq. 2, we can approximate the ratio IS Conc/IS Area in a representative sample as: responseFactor = response / area (eq. 3)
# Therefore: concentration = ((area*responseFactor) - b) / a (eq. 4)
#
# If in TargetLynx 'axis transformation' is set to log ( but still use 'Polynomial Type'=linear and 'Fit Weighting'=None)
# eq.1 is changed to: log(response) = a * log(concentration) + b (eq. 5)
# and eq. 4 changed to: concentration = 10^( (log(area*responseFactor) - b) / a ) (eq. 5)
# The equation filled expect the following variables:
# area
# responseFactor | responseFactor=(IS conc/IS Area)=response/Area, for noIS, responseFactor will be 1.
# a
# b
#
# Examples:
# '((area * responseFactor)-b)/a'
# '10**((numpy.log10(area * responseFactor)-b)/a)'
# 'area/a' | if b not needed, set to 0 in csv [use for linear noIS, area=response, responseFactor=1, and response = a * concentration ]
tmpNoiseConc = []
for i in range(0, featureMetadata.shape[0]):
# set the right values before applying the equation
calibrationEquation = featureMetadata['calibrationEquation'].values[i]
area = featureMetadata['Noise (area)'].values[i]
responseFactor = featureMetadata['responseFactor'].values[i]
a = featureMetadata['a'].values[i]
b = featureMetadata['b'].values[i]
# apply the calibration equation, and the unitCorrectionFactor, as the equations were established with the original area/response/concentrations
try:
tmpNoiseConc.append(eval(calibrationEquation) * featureMetadata['unitCorrectionFactor'].values[i])
except:
raise ValueError('Verify calibrationEquation: \"' + calibrationEquation + '\", only variables expected are \"area\", \"responseFactor\", \"a\" or \"b\"')
featureMetadata = featureMetadata.assign(noiseConcentration=tmpNoiseConc)
## Values replacement by noise concentration (<LOQ) and +inf for (>ULOQ)
# iterate over the features
for i in range(0, featureMetadata.shape[0]):
# LLOQ
toReplaceLLOQ = intensityData[:, i] < featureMetadata['LLOQ'].values[i]
intensityData[toReplaceLLOQ, i] = featureMetadata['noiseConcentration'].values[i]
# ULOQ
if not onlyLLOQ:
toReplaceULOQ = intensityData[:, i] > featureMetadata['ULOQ'].values[i]
intensityData[toReplaceULOQ, i] = numpy.inf
## Add back the untouched monitored features
if sum(untouched) != 0:
featureMetadata = pandas.concat([featureMetadata, untouchedFeatureMetadata], axis=0, sort=False)
intensityData = numpy.concatenate((intensityData, untouchedIntensityData), axis=1)
expectedConcentration = pandas.concat([expectedConcentration, untouchedExpectedConcentration], axis=1, sort=False)
# reorder the calib
calibration['calibFeatureMetadata'] = pandas.concat([calibration['calibFeatureMetadata'], untouchedCalibFeatureMetadata], axis=0, sort=False)
calibration['calibIntensityData'] = numpy.concatenate((calibration['calibIntensityData'], untouchedCalibIntensityData), axis=1)
calibration['calibExpectedConcentration'] = pandas.concat([calibration['calibExpectedConcentration'], untouchedCalibExpectedConcentration], axis=1, sort=False)
calibration['calibPeakInfo']['peakArea'] = pandas.concat([calibration['calibPeakInfo']['peakArea'], untouchedCalibPeakArea], axis=1, sort=False)
calibration['calibPeakInfo']['peakResponse'] = pandas.concat([calibration['calibPeakInfo']['peakResponse'], untouchedCalibPeakResponse], axis=1, sort=False)
calibration['calibPeakInfo']['peakConcentrationDeviation'] = pandas.concat([calibration['calibPeakInfo']['peakConcentrationDeviation'], untouchedCalibPeakConcentrationDeviation], axis=1, sort=False)
calibration['calibPeakInfo']['peakIntegrationFlag'] = pandas.concat([calibration['calibPeakInfo']['peakIntegrationFlag'], untouchedCalibPeakIntegrationFlag], axis=1, sort=False)
calibration['calibPeakInfo']['peakRT'] = pandas.concat([calibration['calibPeakInfo']['peakRT'], untouchedCalibPeakRT], axis=1, sort=False)
# Remove excess info
featureMetadata.reset_index(drop=True, inplace=True)
calibration['calibFeatureMetadata'].reset_index(drop=True, inplace=True)
expectedConcentration.reset_index(drop=True, inplace=True)
calibration['calibExpectedConcentration'].reset_index(drop=True, inplace=True)
calibration['calibPeakInfo']['peakArea'] .reset_index(drop=True, inplace=True)
calibration['calibPeakInfo']['peakResponse'].reset_index(drop=True, inplace=True)
calibration['calibPeakInfo']['peakConcentrationDeviation'].reset_index(drop=True, inplace=True)
calibration['calibPeakInfo']['peakIntegrationFlag'].reset_index(drop=True, inplace=True)
calibration['calibPeakInfo']['peakRT'].reset_index(drop=True, inplace=True)
## return dataset with limits of quantification applied
self.featureMetadata = featureMetadata
self._intensityData = intensityData
self.expectedConcentration = expectedConcentration
self.calibration = calibration
self.sampleMetadataExcluded = sampleMetadataExcluded
self.featureMetadataExcluded = featureMetadataExcluded
self.intensityDataExcluded = intensityDataExcluded
self.expectedConcentrationExcluded = expectedConcentrationExcluded
self.excludedFlag = excludedFlag
## Output and Log
print('Values <LLOQ replaced by the noise concentration')
if not onlyLLOQ:
print('Values >ULOQ replaced by +inf')
print('\n')
# log the modifications
if onlyLLOQ:
logLimits = 'Limits of quantification applied to LLOQ'
else:
logLimits = 'Limits of quantification applied to LLOQ and ULOQ'
if sum(untouched) != 0:
logUntouchedFeatures = ' ' + str(sum(untouched)) + ' features only monitored and not processed: ' + str(untouchedFeatureMetadata.loc[:, 'Feature Name'].values.tolist()) + '.'
else:
logUntouchedFeatures = ''
self.Attributes['Log'].append([datetime.now(), '%s (%i samples, %i features). LLOQ are replaced by the noise concentration.%s' % (logLimits, self.noSamples, self.noFeatures, logUntouchedFeatures)])
def __add__(self,other):
"""
Implements the concatenation of 2 :py:class:`TargetedDataset`
`targetedDataset = targetedDatasetBatch1 + targetedDatasetBatch2`
`targetedDataset = sum([targetedDatasetBatch1, targetedDatasetBatch2`, targetedDatasetBatch3])'
:py:attr:`sampleMetadata` are concatenated, :py:attr:`featureMetadata` are merged and :py:attr:`intensityData` match it.
In :py:attr:`featureMetadata`, non pre-defined columns names get the suffix '_batchX' appended.
Excluded features and samples are listed in the same order as the 'Batch'.
Calibration are listed in the same order as batch. Features are not modified inside the calibration (can have more features in calibFeatureMetadata than in self.featureMetadata)
FeatureMetadata columns listed in Attribute['additionalQuantParamColumns'] are expected to be identical across all batch (if present), and added to the merge columns.
:raises ValueError: if the targeted methods employed differ
:raises ValueError: if an object doesn't pass validation before merge
:raises ValueError: if the merge object doesn't pass validation
:raises Warning: to update LOQ using :py:meth:`~TargetedDataset.mergeLimitsOfQuantification`
"""
import collections
import warnings
def flatten(x):
""" Always provide a single level list, from a list of lists and or str """
result = []
for el in x:
if isinstance(x, collections.Iterable) and not (isinstance(el, str)|isinstance(el, dict)):
result.extend(flatten(el))
else:
result.append(el)
return result
def reNumber(oldSeries, startNb):
""" reindex a series of int between the startNB and startNb + number of unique values """
oldNb = oldSeries.unique().tolist()
newNb = numpy.arange(startNb, startNb + len(oldNb)).tolist()
newSeries = pandas.Series(numpy.repeat(numpy.nan, oldSeries.shape[0]))
for i in range(0, len(oldNb)):
newSeries[oldSeries == oldNb[i]] = newNb[i]
changes = dict(zip(oldNb, newNb))
return newSeries.astype('int64'), changes
def batchListReNumber(oldList, numberChange, untouchedValues):
"""
Rename list values; if no '_batchX' suffix present in list value, append one (the lowest key in numberChange).
Then scan all '_batchX' suffix and update X to all members following numberChange.
:param list oldList: values to which append and update _batchX
:param dict numberChanges: dict with old batch number as keys and new batch number as values
:param list untouchedValues: list of values to leave untouched
:return: list with appended/updated batch values
"""
import re
newList = copy.deepcopy(oldList)
## Append'_batchX' with X the smallest original 'Batch' if none already present
for i in range(len(newList)):
if (newList[i] not in untouchedValues) & (newList[i].find('_batch') == -1):
newList[i] = newList[i] + '_batch' + str(min(numberChange.keys()))
## Update X in all '_batchX' column names (look for previous batch numbers and replace)
for batchNum in numberChange.keys():
# exact match with end of string ($)
query = '.+?(?=_batch' + str(batchNum) + '$)'
for j in range(len(newList)):
if newList[j] not in untouchedValues:
# string without _batchX
searchRes = re.search(query, newList[j]) # if no match returns None
if searchRes:
newList[j] = searchRes.group() + '_batch' + str(numberChange[batchNum])
return newList
def concatenateList(list1, list2):
"""
Concatenate two lists, always return a list of list
"""
outputList = []
## list1
# if it's an empty list
if len(list1) == 0:
outputList.append(list1)
# if it's already a list of list (from previous __add__)
elif isinstance(list1[0], list):
for i in range(len(list1)):
outputList.append(list1[i])
# first use of __add__, not a list of list
else:
outputList.append(list1)
## list2
# if it's an empty list
if len(list2) == 0:
outputList.append(list2)
# if it's already a list of list (from previous __add__)
elif isinstance(list2[0], list):
for i in range(len(list2)):
outputList.append(list2[i])
# first use of __add__, not a list of list
else:
outputList.append(list2)
return outputList
def updatecalibBatch(calib, batchChange):
"""
change batch number inside each calibration['calibSampleMetadata']
:param calib: calibration or list of calibration
:param batchChange: dict of batch number changes
:return: updated calibration
"""
if isinstance(calib, list):
updatedcalib = list()
for j in range(len(calib)):
# all the same
updatedcalib.append(calib[j])
# modify batch number
for batchNum in batchChange.keys():
updatedcalib[j]['calibSampleMetadata'].loc[calib[j]['calibSampleMetadata']['Batch'] == batchNum, 'Batch'] = batchChange[batchNum]
elif isinstance(calib, dict):
updatedcalib = copy.deepcopy(calib)
# modify batch number
for batchNum in batchChange.keys():
updatedcalib['calibSampleMetadata'].loc[calib['calibSampleMetadata']['Batch'] == batchNum, 'Batch'] = batchChange[batchNum]
return updatedcalib
## Input checks
# Run validator (checks for duplicates in featureMetadata['Feature Name']). No check for AssayRole and SampleType as sample info data might not have been imported yet
validSelfDataset = self.validateObject(verbose=False, raiseError=False, raiseWarning=False)
validOtherDataset = other.validateObject(verbose=False, raiseError=False, raiseWarning=False)
if not validSelfDataset['BasicTargetedDataset']:
raise ValueError('self does not satisfy to the Basic TargetedDataset definition, check with self.validateObject(verbose=True, raiseError=False)')
if not validOtherDataset['BasicTargetedDataset']:
raise ValueError('other does not satisfy to the Basic TargetedDataset definition, check with other.validateObject(verbose=True, raiseError=False)')
# Warning if duplicate 'Sample File Name' in sampleMetadata
u_ids, u_counts = numpy.unique(pandas.concat([self.sampleMetadata['Sample File Name'], other.sampleMetadata['Sample File Name']],ignore_index=True, sort=False), return_counts=True)
if any(u_counts > 1):
warnings.warn('Warning: The following \'Sample File Name\' are present more than once: ' + str(u_ids[u_counts>1].tolist()))
if self.AnalyticalPlatform != other.AnalyticalPlatform:
raise ValueError('Can only add Targeted datasets with the same AnalyticalPlatform Attribute')
## Initialise an empty TargetedDataset to overwrite
targetedData = TargetedDataset(datapath='', fileType='empty')
## Attributes
if self.Attributes['methodName'] != other.Attributes['methodName']:
raise ValueError('Cannot concatenate different targeted methods: \''+ self.Attributes['methodName'] +'\' and \''+ other.Attributes['methodName'] +'\'')
# copy from the first (mainly dataset parameters, methodName, chromatography and ionisation)
targetedData.Attributes = copy.deepcopy(self.Attributes)
# append both logs
targetedData.Attributes['Log'] = self.Attributes['Log'] + other.Attributes['Log']
## _Normalisation
targetedData._Normalisation = normalisation.NullNormaliser()
## VariableType
targetedData.VariableType = copy.deepcopy(self.VariableType)
targetedData.AnalyticalPlatform = copy.deepcopy(self.AnalyticalPlatform)
## _name
targetedData.name = self.name+'-'+other.name
## fileName
targetedData.fileName = flatten([self.fileName, other.fileName])
## filePath
targetedData.filePath = flatten([self.filePath, other.filePath])
## sampleMetadata
tmpSampleMetadata1 = copy.deepcopy(self.sampleMetadata)
tmpSampleMetadata2 = copy.deepcopy(other.sampleMetadata)
# reindex the 'Batch' value across both targetedDataset (self starts at 1, other at max(self)+1)
tmpSampleMetadata1['Batch'], batchChangeSelf = reNumber(tmpSampleMetadata1['Batch'], 1)
tmpSampleMetadata2['Batch'], batchChangeOther = reNumber(tmpSampleMetadata2['Batch'], tmpSampleMetadata1['Batch'].values.max()+1)
# Concatenate samples and reinitialise index
sampleMetadata = pandas.concat([tmpSampleMetadata1, tmpSampleMetadata2], ignore_index=True, sort=False)
# Update Run Order
sampleMetadata['Order'] = sampleMetadata.sort_values(by='Acquired Time').index
sampleMetadata['Run Order'] = sampleMetadata.sort_values(by='Order').index
sampleMetadata.drop('Order', axis=1, inplace=True)
# new sampleMetadata
targetedData.sampleMetadata = copy.deepcopy(sampleMetadata)
## featureMetadata
## Merge feature list on the common columns imposed by the targeted SOP employed.
# All other columns have a '_batchX' suffix amended for traceability. (use the min original 'Batch' for that targetedDataset)
# From that point onward no variable should exist without a '_batchX'
# Apply to '_batchX' the batchChangeSelf and batchChangeOther to align it with the 'Batch'
mergeCol = ['Feature Name', 'calibrationMethod', 'quantificationType', 'Unit']
mergeCol.extend(self.Attributes['externalID'])
# additionalQuantParamColumns if present are expected to be identical across batch
if 'additionalQuantParamColumns' in targetedData.Attributes.keys():
for col in targetedData.Attributes['additionalQuantParamColumns']:
if (col in self.featureMetadata.columns) and (col in other.featureMetadata.columns) and (col not in mergeCol):
mergeCol.append(col)
# take each dataset featureMetadata column names, modify them and rename columns
tmpFeatureMetadata1 = copy.deepcopy(self.featureMetadata)
updatedCol1 = batchListReNumber(tmpFeatureMetadata1.columns.tolist(), batchChangeSelf, mergeCol)
tmpFeatureMetadata1.columns = updatedCol1
tmpFeatureMetadata2 = copy.deepcopy(other.featureMetadata)
updatedCol2 = batchListReNumber(tmpFeatureMetadata2.columns.tolist(), batchChangeOther, mergeCol)
tmpFeatureMetadata2.columns = updatedCol2
# Merge featureMetadata on the mergeCol, no columns with identical name exist
tmpFeatureMetadata = tmpFeatureMetadata1.merge(tmpFeatureMetadata2, how='outer', on=mergeCol, left_on=None,right_on=None,left_index=False,right_index=False,sort=False,copy=True,indicator=False)
targetedData.featureMetadata = copy.deepcopy(tmpFeatureMetadata)
## featureMetadataNotExported
# add _batchX to the column names to exclude. The expected columns are 'mergeCol' from featureMetadata. No modification for sampleMetadataNotExported which has been copied with the other Attributes (and is an SOP parameter)
notExportedSelf = batchListReNumber(self.Attributes['featureMetadataNotExported'], batchChangeSelf, mergeCol)
notExportedOther = batchListReNumber(other.Attributes['featureMetadataNotExported'], batchChangeOther, mergeCol)
targetedData.Attributes['featureMetadataNotExported'] = list(set().union(notExportedSelf, notExportedOther))
## _intensityData
# samples are simply concatenated, but features are merged. Reproject each dataset on the merge feature list before concatenation.
# init with nan
intensityData1 = numpy.full([self._intensityData.shape[0], targetedData.featureMetadata.shape[0]], numpy.nan)
intensityData2 = numpy.full([other._intensityData.shape[0], targetedData.featureMetadata.shape[0]], numpy.nan)
# iterate over the merged features
for i in range(targetedData.featureMetadata.shape[0]):
featureName = targetedData.featureMetadata.loc[i,'Feature Name']
featurePosition1 = self.featureMetadata['Feature Name'] == featureName
featurePosition2 = other.featureMetadata['Feature Name'] == featureName
if sum(featurePosition1)==1:
intensityData1[:,i] = self._intensityData[:,featurePosition1].ravel()
elif sum(featurePosition1)>1:
raise ValueError('Duplicate feature name in first input: ' + featureName)
if sum(featurePosition2)==1:
intensityData2[:, i] = other._intensityData[:, featurePosition2].ravel()
elif sum(featurePosition2) > 1:
raise ValueError('Duplicate feature name in second input: ' + featureName)
intensityData = numpy.concatenate([intensityData1,intensityData2], axis=0)
targetedData._intensityData = copy.deepcopy(intensityData)
## expectedConcentration
# same approach as _intensityData, samples are concatenated but features are merged. validObject() on input ensures expectedConcentration.columns match featureMetadata['Feature Name']
expectedConc1 = pandas.DataFrame(numpy.full([self.expectedConcentration.shape[0], targetedData.featureMetadata.shape[0]], numpy.nan), columns=targetedData.featureMetadata['Feature Name'].tolist())
expectedConc2 = pandas.DataFrame(numpy.full([other.expectedConcentration.shape[0], targetedData.featureMetadata.shape[0]], numpy.nan), columns=targetedData.featureMetadata['Feature Name'].tolist())
# iterate over the merged features
for colname in targetedData.featureMetadata['Feature Name'].tolist():
if colname in self.expectedConcentration.columns:
expectedConc1.loc[:,colname] = self.expectedConcentration[colname].ravel()
if colname in other.expectedConcentration.columns:
expectedConc2.loc[:,colname] = other.expectedConcentration[colname].ravel()
expectedConcentration = pandas.concat([expectedConc1, expectedConc2], axis=0, ignore_index=True, sort=False)
expectedConcentration.reset_index(drop=True, inplace=True)
targetedData.expectedConcentration = copy.deepcopy(expectedConcentration)
## Masks
targetedData.initialiseMasks()
# sampleMask
targetedData.sampleMask = numpy.concatenate([self.sampleMask, other.sampleMask], axis=0)
# featureMask
# if featureMask agree in both, keep that value. Otherwise let the default True value. If feature exist only in one, use that value.
if (sum(~self.featureMask)!=0) | (sum(~other.featureMask)!=0):
warnings.warn("Warning: featureMask are not empty, they will be merged. If both featureMasks do not agree, the default \'True\' value will be set. If the feature is only present in one dataset, the corresponding featureMask value will be kept.")
for i in range(targetedData.featureMetadata.shape[0]):
featureName = targetedData.featureMetadata.loc[i, 'Feature Name']
featurePosition1 = self.featureMetadata['Feature Name'] == featureName
featurePosition2 = other.featureMetadata['Feature Name'] == featureName
# if both exist
if (sum(featurePosition1)==1) & (sum(featurePosition2)==1):
# only False if both are False (otherwise True, same as default)
targetedData.featureMask[i] = self.featureMask[featurePosition1] | other.featureMask[featurePosition2]
# if feature only exist in first input
elif sum(featurePosition1==1):
targetedData.featureMask[i] = self.featureMask[featurePosition1]
# if feature only exist in second input
elif sum(featurePosition2==1):
targetedData.featureMask[i] = other.featureMask[featurePosition2]
## Excluded data with applyMask()
# attribute doesn't exist the first time. From one round of __add__ onward the attribute is created and the length matches the number and order of 'Batch'
if hasattr(self, 'sampleMetadataExcluded') & hasattr(other, 'sampleMetadataExcluded'):
targetedData.sampleMetadataExcluded = concatenateList(self.sampleMetadataExcluded, other.sampleMetadataExcluded)
targetedData.featureMetadataExcluded = concatenateList(self.featureMetadataExcluded, other.featureMetadataExcluded)
targetedData.intensityDataExcluded = concatenateList(self.intensityDataExcluded, other.intensityDataExcluded)
targetedData.expectedConcentrationExcluded = concatenateList(self.expectedConcentrationExcluded, other.expectedConcentrationExcluded)
targetedData.excludedFlag = concatenateList(self.excludedFlag, other.excludedFlag)
# add expectedConcentrationExcluded here too!
elif hasattr(self, 'sampleMetadataExcluded'):
targetedData.sampleMetadataExcluded = concatenateList(self.sampleMetadataExcluded, [])
targetedData.featureMetadataExcluded = concatenateList(self.featureMetadataExcluded, [])
targetedData.intensityDataExcluded = concatenateList(self.intensityDataExcluded, [])
targetedData.expectedConcentrationExcluded = concatenateList(self.expectedConcentrationExcluded, [])
targetedData.excludedFlag = concatenateList(self.excludedFlag, [])
elif hasattr(other, 'sampleMetadataExcluded'):
targetedData.sampleMetadataExcluded = concatenateList([], other.sampleMetadataExcluded)
targetedData.featureMetadataExcluded = concatenateList([], other.featureMetadataExcluded)
targetedData.intensityDataExcluded = concatenateList([], other.intensityDataExcluded)
targetedData.expectedConcentrationExcluded = concatenateList([], other.expectedConcentrationExcluded)
targetedData.excludedFlag = concatenateList([], other.excludedFlag)
else:
targetedData.sampleMetadataExcluded = concatenateList([], [])
targetedData.featureMetadataExcluded = concatenateList([], [])
targetedData.intensityDataExcluded = concatenateList([], [])
targetedData.expectedConcentrationExcluded = concatenateList([], [])
targetedData.excludedFlag = concatenateList([], [])
## calibration
# change batch number inside each calibration['calibSampleMetadata']
tmpCalibSelf = copy.deepcopy(self.calibration)
tmpCalibSelf = updatecalibBatch(tmpCalibSelf, batchChangeSelf)
tmpCalibOther = copy.deepcopy(other.calibration)
tmpCalibOther = updatecalibBatch(tmpCalibOther, batchChangeOther)
targetedData.calibration = flatten([tmpCalibSelf, tmpCalibOther])
## unexpected attributes
expectedAttr = {'Attributes', 'VariableType', 'AnalyticalPlatform', '_Normalisation', '_name', 'fileName', 'filePath',
'_intensityData', 'sampleMetadata', 'featureMetadata', 'expectedConcentration','sampleMask',
'featureMask', 'calibration', 'sampleMetadataExcluded', 'intensityDataExcluded',
'featureMetadataExcluded', 'expectedConcentrationExcluded', 'excludedFlag'}
selfAttr = set(self.__dict__.keys())
selfAdditional = selfAttr - expectedAttr
otherAttr = set(other.__dict__.keys())
otherAdditional = otherAttr - expectedAttr
# identify common and unique
commonAttr = selfAdditional.intersection(otherAdditional)
onlySelfAttr = selfAdditional - commonAttr
onlyOtherAttr = otherAdditional - commonAttr
# save a list [self, other] for each attribute
if bool(commonAttr):
print('The following additional attributes are present in both datasets and stored as lists:')
print('\t' + str(commonAttr))
for k in commonAttr:
setattr(targetedData, k, [getattr(self,k), getattr(other,k)])
if bool(onlySelfAttr):
print('The following additional attributes are only present in the first dataset and stored as lists:')
print('\t' + str(onlySelfAttr))
for l in onlySelfAttr:
setattr(targetedData, l, [getattr(self, l), None])
if bool(onlyOtherAttr):
print('The following additional attributes are only present in the second dataset and stored as lists:')
print('\t' + str(onlyOtherAttr))
for m in onlyOtherAttr:
setattr(targetedData, m, [None, getattr(other, m)])
## run validation on the merged dataset
validMergedDataset = targetedData.validateObject(verbose=False, raiseError=False, raiseWarning=False)
if not validMergedDataset['BasicTargetedDataset']:
raise ValueError('The merged dataset does not satisfy to the Basic TargetedDataset definition')
## Log
targetedData.Attributes['Log'].append([datetime.now(), 'Concatenated datasets %s (%i samples and %i features) and %s (%i samples and %i features), to a dataset of %i samples and %i features.' % (self.name, self.noSamples, self.noFeatures, other.name, other.noSamples, other.noFeatures, targetedData.noSamples, targetedData.noFeatures)])
print('Concatenated datasets %s (%i samples and %i features) and %s (%i samples and %i features), to a dataset of %i samples and %i features.' % (self.name, self.noSamples, self.noFeatures, other.name, other.noSamples, other.noFeatures, targetedData.noSamples, targetedData.noFeatures))
## Remind to mergeLimitsOfQuantification
warnings.warn('Update the limits of quantification using `mergedDataset.mergeLimitsOfQuantification()` (keeps the lowest common denominator across all batch: highest LLOQ, lowest ULOQ)')
return targetedData
def __radd__(self, other):
"""
Implements the summation of multiple :py:class:`TargetedDataset`
`targetedDataset = sum([ targetedDatasetBatch1, targetedDatasetBatch2, targetedDatasetBatch3 ])`
..Note:: Sum always starts by the 0 integer and does `0.__add__(targetedDatasetBatch1)` which fails and then calls the reverse add method `targetedDatasetBatch1.__radd_(0)`
"""
if other == 0:
return self
else:
return self.__add__(other)
def mergeLimitsOfQuantification(self, keepBatchLOQ=False, onlyLLOQ=False):
"""
Update limits of quantification and apply LLOQ/ULOQ using the lowest common denominator across all batch (after a :py:meth:`~TargetedDataset.__add__`). Keep the highest LLOQ and lowest ULOQ.
:param bool keepBatchLOQ: If ``True`` do not remove each batch LOQ (:py:attr:`featureMetadata['LLOQ_batchX']`, :py:attr:`featureMetadata['ULOQ_batchX']`)
:param bool onlyLLOQ: if True only correct <LLOQ, if False correct <LLOQ and >ULOQ
:raises ValueError: if targetedData does not satisfy to the BasicTargetedDataset definition on input
:raises ValueError: if number of batch, LLOQ_batchX and ULOQ_batchX do not match
:raises ValueError: if targetedData does not satisfy to the BasicTargetedDataset definition after LOQ merging
:raises Warning: if :py:attr:`featureMetadata['LLOQ']` or :py:attr:`featureMetadata['ULOQ']` already exist and will be overwritten.
"""
# Check dataset is fit for merging LOQ
validateDataset = copy.deepcopy(self)
validDataset = validateDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False)
if not validDataset['BasicTargetedDataset']:
raise ValueError('Import Error: targetedData does not satisfy to the BasicTargetedDataset definition')
# find XLOQ_batchX, get batch ID, check agreement
col_LLOQ = self.featureMetadata.columns[self.featureMetadata.columns.to_series().str.contains('LLOQ_batch')].tolist()
col_LLOQ_batch = sorted([int(i.replace('LLOQ_batch', '')) for i in col_LLOQ])
col_ULOQ = self.featureMetadata.columns[self.featureMetadata.columns.to_series().str.contains('ULOQ_batch')].tolist()
col_ULOQ_batch = sorted([int(i.replace('ULOQ_batch', '')) for i in col_ULOQ])
batches = sorted((numpy.unique(self.sampleMetadata.loc[:, 'Batch'].values[~numpy.isnan(self.sampleMetadata.loc[:, 'Batch'].values)])).astype(int))
if (col_LLOQ_batch != batches) | (col_ULOQ_batch != batches):
raise ValueError('Import Error: targetedData does not have the same number of batch, LLOQ_batchX and ULOQ_batchX: ' + str(batches) + ', ' + str(col_LLOQ) + ', ' + str(col_ULOQ) + '. LOQs must have already been merged!')
# New LOQ
common_LLOQ = self.featureMetadata[col_LLOQ].max(axis=1, skipna=False)
common_ULOQ = self.featureMetadata[col_ULOQ].min(axis=1, skipna=False)
if ('LLOQ' in self.featureMetadata.columns) | ('ULOQ' in self.featureMetadata.columns):
warnings.warn('Previous featureMetadata[\'LLOQ\'] and [\'ULOQ\'] values will be overwritten.')
self.featureMetadata['LLOQ'] = common_LLOQ
self.featureMetadata['ULOQ'] = common_ULOQ
# Remove old batch LOQ columns
if not keepBatchLOQ:
self.featureMetadata.drop(col_LLOQ, inplace=True, axis=1)
self.featureMetadata.drop(col_ULOQ, inplace=True, axis=1)
# _applyLimitsOfQuantification
self._applyLimitsOfQuantification(onlyLLOQ=onlyLLOQ)
# run validation on the merged LOQ
validateMergeDataset = copy.deepcopy(self)
validMergedDataset = validateMergeDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False)
if not validMergedDataset['BasicTargetedDataset']:
raise ValueError('The merged LOQ dataset does not satisfy to the Basic TargetedDataset definition')
# Log
self.Attributes['Log'].append([datetime.now(), 'LOQ merged (keepBatchLOQ = %s, onlyLLOQ = %s).' % (keepBatchLOQ, onlyLLOQ)])
if onlyLLOQ:
print('Limits of quantification merged to the highest LLOQ across batch')
else:
print('Limits of quantification merged to the highest LLOQ and lowest ULOQ across batch')
def exportDataset(self, destinationPath='.', saveFormat='CSV', withExclusions=True, escapeDelimiters=False, filterMetadata=True):
"""
Calls :py:meth:`~Dataset.exportDataset` and raises a warning if normalisation is employed as :py:class:`TargetedDataset` :py:attr:`intensityData` can be left-censored.
"""
# handle the dilution due to method... These lines are left here commented - as hopefully this will be handled more
# elegantly through the intensityData getter
# Export dataset...
tmpData = copy.deepcopy(self)
tmpData._intensityData = tmpData._intensityData * (100/tmpData.sampleMetadata['Dilution']).values[:, numpy.newaxis]
super(TargetedDataset, tmpData).exportDataset(destinationPath=destinationPath, saveFormat=saveFormat, withExclusions=withExclusions, escapeDelimiters=escapeDelimiters, filterMetadata=filterMetadata)
def _exportCSV(self, destinationPath, escapeDelimiters=False):
"""
Replace `-numpy.inf` by `<LLOQ` and `numpy.inf` by `>ULOQ`
Export the dataset to the directory *destinationPath* as a set of three CSV files:
*destinationPath*_intensityData.csv
*destinationPath*_sampleMetadata.csv
*destinationPath*_featureMetadata.csv
:param str destinationPath: Path to a directory in which the output will be saved
:param bool escapeDelimiters: Remove characters commonly used as delimiters in csv files from metadata
:raises IOError: If writing one of the files fails
"""
sampleMetadata = self.sampleMetadata.copy(deep=True)
featureMetadata = self.featureMetadata.copy(deep=True)
intensityData = copy.deepcopy(self._intensityData)
intensityData = pandas.DataFrame(intensityData)
intensityData.replace(to_replace=-numpy.inf, value='<LLOQ', inplace=True)
intensityData.replace(to_replace= numpy.inf, value='>ULOQ', inplace=True)
if escapeDelimiters:
# Remove any commas from metadata/feature tables - for subsequent import of resulting csv files to other software packages
for column in sampleMetadata.columns:
try:
if type(sampleMetadata[column][0]) is not datetime:
sampleMetadata[column] = sampleMetadata[column].str.replace(',', ';')
except:
pass
for column in featureMetadata.columns:
try:
if type(featureMetadata[column][0]) is not datetime:
featureMetadata[column] = featureMetadata[column].str.replace(',', ';')
except:
pass
# Export sample metadata
sampleMetadata.to_csv(destinationPath + '_sampleMetadata.csv', encoding='utf-8', date_format=self._timestampFormat)
# Export feature metadata
featureMetadata.to_csv(destinationPath + '_featureMetadata.csv', encoding='utf-8')
# Export intensity data
intensityData.to_csv(os.path.join(destinationPath + '_intensityData.csv'), encoding='utf-8', date_format=self._timestampFormat, header=False, index=False)
def _exportUnifiedCSV(self, destinationPath, escapeDelimiters=False):
"""
Replace `-numpy.inf` by `<LLOQ` and `numpy.inf` by `>ULOQ`
Export the dataset to the directory *destinationPath* as a combined CSV file containing intensity data, and feature and sample metadata
*destinationPath*_combinedData.csv
:param str destinationPath: Path to a directory in which the output will be saved
:param bool escapeDelimiters: Remove characters commonly used as delimiters in csv files from metadata
:raises IOError: If writing one of the files fails
"""
sampleMetadata = self.sampleMetadata.copy(deep=True)
featureMetadata = self.featureMetadata.copy(deep=True)
intensityData = copy.deepcopy(self._intensityData)
intensityData = pandas.DataFrame(intensityData)
intensityData.replace(to_replace=-numpy.inf, value='<LLOQ', inplace=True)
intensityData.replace(to_replace=numpy.inf, value='>ULOQ', inplace=True)
if escapeDelimiters:
# Remove any commas from metadata/feature tables - for subsequent import of resulting csv files to other software packages
for column in sampleMetadata.columns:
try:
if type(sampleMetadata[column][0]) is not datetime:
sampleMetadata[column] = sampleMetadata[column].str.replace(',', ';')
except:
pass
for column in featureMetadata.columns:
try:
if type(featureMetadata[column][0]) is not datetime:
featureMetadata[column] = featureMetadata[column].str.replace(',', ';')
except:
pass
# Export combined data in single file
tmpXCombined = pandas.concat([featureMetadata.transpose(), intensityData], axis=0, sort=False)
with warnings.catch_warnings():
# Seems no way to avoid pandas complaining here (v0.18.1)
warnings.simplefilter("ignore")
tmpCombined = pandas.concat([sampleMetadata, tmpXCombined], axis=1, sort=False)
# reorder rows to put metadata first
tmpCombined = tmpCombined.reindex(tmpXCombined.index, axis=0)
# Save
tmpCombined.to_csv(os.path.join(destinationPath + '_combinedData.csv'), encoding='utf-8', date_format=self._timestampFormat)
def validateObject(self, verbose=True, raiseError=False, raiseWarning=True):
"""
Checks that all the attributes specified in the class definition are present and of the required class and/or values.
Returns 4 boolean: is the object a *Dataset* < a *basic TargetedDataset* < *has the object parameters for QC* < *has the object sample metadata*.
To employ all class methods, the most inclusive (*has the object sample metadata*) must be successful:
* *'Basic TargetedDataset'* checks :py:class:`~TargetedDataset` types and uniqueness as well as additional attributes.
* *'has parameters for QC'* is *'Basic TargetedDataset'* + sampleMetadata[['SampleType, AssayRole, Dilution, Run Order, Batch, Correction Batch, Sample Base Name]]
* *'has sample metadata'* is *'has parameters for QC'* + sampleMetadata[['Sample ID', 'Subject ID', 'Matrix']]
:py:attr:`~calibration['calibIntensityData']` must be initialised even if no samples are present
:py:attr:`~calibration['calibSampleMetadata']` must be initialised even if no samples are present, use: ``pandas.DataFrame(None, columns=self.sampleMetadata.columns.values.tolist())``
:py:attr:`~calibration['calibFeatureMetadata']` must be initialised even if no samples are present, use a copy of ``self.featureMetadata``
:py:attr:`~calibration['calibExpectedConcentration']` must be initialised even if no samples are present, use: ``pandas.DataFrame(None, columns=self.expectedConcentration.columns.values.tolist())``
Calibration features must be identical to the usual features. Number of calibration samples and features must match across the 4 calibration tables
If *'sampleMetadataExcluded'*, *'intensityDataExcluded'*, *'featureMetadataExcluded'*, *'expectedConcentrationExcluded'* or *'excludedFlag'* exist, the existence and number of exclusions (based on *'sampleMetadataExcluded'*) is checked
Column type() in pandas.DataFrame are established on the first sample (for non int/float)
featureMetadata are search for column names containing *'LLOQ'* & *'ULOQ'* to allow for *'LLOQ_batch...'* after :py:meth:`~TargetedDataset.__add__`, the first column matching is then checked for dtype
If datasets are merged, calibration is a list of dict, and number of features is only kept constant inside each dict
Does not check for uniqueness in :py:attr:`~sampleMetadata['Sample File Name']`
Does not check columns inside :py:attr:`~calibration['calibSampleMetadata']`
Does not check columns inside :py:attr:`~calibration['calibFeatureMetadata']`
Does not currently check for :py:attr:`~Attributes['Feature Name']`
:param verbose: if True the result of each check is printed (default True)
:type verbose: bool
:param raiseError: if True an error is raised when a check fails and the validation is interrupted (default False)
:type raiseError: bool
:param raiseWarning: if True a warning is raised when a check fails
:type raiseWarning: bool
:return: A dictionary of 4 boolean with True if the Object conforms to the corresponding test. 'Dataset' conforms to :py:class:`Dataset`, 'BasicTargetedDataset' conforms to :py:class:`Dataset` + basic :py:class:`TargetedDataset`, 'QC' BasicTargetedDataset + object has QC parameters, 'sampleMetadata' QC + object has sample metadata information
:rtype: dict
:raises TypeError: if the Object class is wrong
:raises AttributeError: if self.Attributes['methodName'] does not exist
:raises TypeError: if self.Attributes['methodName'] is not a str
:raises AttributeError: if self.Attributes['externalID'] does not exist
:raises TypeError: if self.Attributes['externalID'] is not a list
:raises TypeError: if self.VariableType is not an enum 'VariableType'
:raises AttributeError: if self.fileName does not exist
:raises TypeError: if self.fileName is not a str or list
:raises AttributeError: if self.filePath does not exist
:raises TypeError: if self.filePath is not a str or list
:raises ValueError: if self.sampleMetadata does not have the same number of samples as self._intensityData
:raises TypeError: if self.sampleMetadata['Sample File Name'] is not str
:raises TypeError: if self.sampleMetadata['AssayRole'] is not an enum 'AssayRole'
:raises TypeError: if self.sampleMetadata['SampleType'] is not an enum 'SampleType'
:raises TypeError: if self.sampleMetadata['Dilution'] is not an int or float
:raises TypeError: if self.sampleMetadata['Batch'] is not an int or float
:raises TypeError: if self.sampleMetadata['Correction Batch'] is not an int or float
:raises TypeError: if self.sampleMetadata['Run Order'] is not an int
:raises TypeError: if self.sampleMetadata['Acquired Time'] is not a datetime
:raises TypeError: if self.sampleMetadata['Sample Base Name'] is not str
:raises LookupError: if self.sampleMetadata does not have a Subject ID column
:raises TypeError: if self.sampleMetadata['Subject ID'] is not a str
:raises TypeError: if self.sampleMetadata['Sample ID'] is not a str
:raises ValueError: if self.featureMetadata does not have the same number of features as self._intensityData
:raises TypeError: if self.featureMetadata['Feature Name'] is not a str
:raises ValueError: if self.featureMetadata['Feature Name'] is not unique
:raises LookupError: if self.featureMetadata does not have a calibrationMethod column
:raises TypeError: if self.featureMetadata['calibrationMethod'] is not an enum 'CalibrationMethod'
:raises LookupError: if self.featureMetadata does not have a quantificationType column
:raises TypeError: if self.featureMetadata['quantificationType'] is not an enum 'QuantificationType'
:raises LookupError: if self.featureMetadata does not have a Unit column
:raises TypeError: if self.featureMetadata['Unit'] is not a str
:raises LookupError: if self.featureMetadata does not have a LLOQ or similar column
:raises TypeError: if self.featureMetadata['LLOQ'] or similar is not an int or float
:raises LookupError: if self.featureMetadata does not have a ULOQ or similar column
:raises TypeError: if self.featureMetadata['ULOQ'] or similar is not an int or float
:raises LookupError: if self.featureMetadata does not have the 'externalID' as columns
:raises AttributeError: if self.expectedConcentration does not exist
:raises TypeError: if self.expectedConcentration is not a pandas.DataFrame
:raises ValueError: if self.expectedConcentration does not have the same number of samples as self._intensityData
:raises ValueError: if self.expectedConcentration does not have the same number of features as self._intensityData
:raises ValueError: if self.expectedConcentration column name do not match self.featureMetadata['Feature Name']
:raises ValueError: if self.sampleMask is not initialised
:raises ValueError: if self.sampleMask does not have the same number of samples as self._intensityData
:raises ValueError: if self.featureMask has not been initialised
:raises ValueError: if self.featureMask does not have the same number of features as self._intensityData
:raises AttributeError: if self.calibration does not exist
:raises TypeError: if self.calibration is not a dict
:raises AttributeError: if self.calibration['calibIntensityData'] does not exist
:raises TypeError: if self.calibration['calibIntensityData'] is not a numpy.ndarray
:raises ValueError: if self.calibration['calibIntensityData'] does not have the same number of features as self._intensityData
:raises AttributeError: if self.calibration['calibSampleMetadata'] does not exist
:raises TypeError: if self.calibration['calibSampleMetadata'] is not a pandas.DataFrame
:raises ValueError: if self.calibration['calibSampleMetadata'] does not have the same number of samples as self.calibration['calibIntensityData']
:raises AttributeError: if self.calibration['calibFeatureMetadata'] does not exist
:raises TypeError: if self.calibration['calibFeatureMetadata'] is not a pandas.DataFrame
:raises LookupError: if self.calibration['calibFeatureMetadata'] does not have a ['Feature Name'] column
:raises ValueError: if self.calibration['calibFeatureMetadata'] does not have the same number of features as self._intensityData
:raises AttributeError: if self.calibration['calibExpectedConcentration'] does not exist
:raises TypeError: if self.calibration['calibExpectedConcentration'] is not a pandas.DataFrame
:raises ValueError: if self.calibration['calibExpectedConcentration'] does not have the same number of samples as self.calibration['calibIntensityData']
:raises ValueError: if self.calibration['calibExpectedConcentration'] does not have the same number of features as self.calibration['calibIntensityData']
:raises ValueError: if self.calibration['calibExpectedConcentration'] column name do not match self.featureMetadata['Feature Name']
"""
def conditionTest(successCond, successMsg, failureMsg, allFailures, verb, raiseErr, raiseWarn, exception):
if not successCond:
allFailures.append(failureMsg)
msg = failureMsg
if raiseWarn:
warnings.warn(msg)
if raiseErr:
raise exception
else:
msg = successMsg
if verb:
print(msg)
return (allFailures)
## init
failureListBasic = []
failureListQC = []
failureListMeta = []
# reference number of samples / features, from _intensityData
refNumSamples = None
refNumFeatures = None
# reference ['Feature Name'], from featureMetadata
refFeatureName = None
# reference number of calibration samples, from calibration['calibIntensityData']
refNumCalibSamples = None
# reference number of exclusions in list, from sampleMetadataExcluded
refNumExcluded = None
# First check it conforms to Dataset
if super().validateObject(verbose=verbose, raiseError=raiseError, raiseWarning=raiseWarning):
## Check object class
condition = isinstance(self, TargetedDataset)
success = 'Check Object class:\tOK'
failure = 'Check Object class:\tFailure, not TargetedDataset, but ' + str(type(self))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
## Attributes
## methodName
# exist
condition = 'methodName' in self.Attributes
success = 'Check self.Attributes[\'methodName\'] exists:\tOK'
failure = 'Check self.Attributes[\'methodName\'] exists:\tFailure, no attribute \'self.Attributes[\'methodName\']\''
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=AttributeError(failure))
if condition:
# is a str
condition = isinstance(self.Attributes['methodName'], str)
success = 'Check self.Attributes[\'methodName\'] is a str:\tOK'
failure = 'Check self.Attributes[\'methodName\'] is a str:\tFailure, \'self.Attributes[\'methodName\']\' is ' + str(type(self.Attributes['methodName']))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
# end self.Attributes['methodName']
## externalID
# exist
condition = 'externalID' in self.Attributes
success = 'Check self.Attributes[\'externalID\'] exists:\tOK'
failure = 'Check self.Attributes[\'externalID\'] exists:\tFailure, no attribute \'self.Attributes[\'externalID\']\''
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=AttributeError(failure))
if condition:
# is a list
condition = isinstance(self.Attributes['externalID'], list)
success = 'Check self.Attributes[\'externalID\'] is a list:\tOK'
failure = 'Check self.Attributes[\'externalID\'] is a list:\tFailure, \'self.Attributes[\'externalID\']\' is ' + str(type(self.Attributes['externalID']))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
# end self.Attributes['externalID']
## self.VariableType
# is a enum VariableType
condition = isinstance(self.VariableType, VariableType)
success = 'Check self.VariableType is an enum \'VariableType\':\tOK'
failure = 'Check self.VariableType is an enum \'VariableType\':\tFailure, \'self.VariableType\' is' + str(type(self.VariableType))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
# end Variabletype
## self.fileName
# exist
condition = hasattr(self, 'fileName')
success = 'Check self.fileName exists:\tOK'
failure = 'Check self.fileName exists:\tFailure, no attribute \'self.fileName\''
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=AttributeError(failure))
if condition:
# is a str
condition = isinstance(self.fileName, (str, list))
success = 'Check self.fileName is a str or list:\tOK'
failure = 'Check self.fileName is a str or list:\tFailure, \'self.fileName\' is ' + str(type(self.fileName))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
if isinstance(self.fileName, list):
for i in range(len(self.fileName)):
condition = isinstance(self.fileName[i], (str))
success = 'Check self.filename[' + str(i) + '] is str:\tOK'
failure = 'Check self.filename[' + str(i) + '] is str:\tFailure, \'self.fileName[' + str(i) + '] is' + str(type(self.fileName[i]))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
# end self.fileName list
# end self.fileName
## self.filePath
# exist
condition = hasattr(self, 'filePath')
success = 'Check self.filePath exists:\tOK'
failure = 'Check self.filePath exists:\tFailure, no attribute \'self.filePath\''
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=AttributeError(failure))
if condition:
# is a str
condition = isinstance(self.filePath, (str, list))
success = 'Check self.filePath is a str or list:\tOK'
failure = 'Check self.filePath is a str or list:\tFailure, \'self.filePath\' is ' + str(type(self.filePath))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
if isinstance(self.filePath, list):
for i in range(len(self.filePath)):
condition = isinstance(self.filePath[i], (str))
success = 'Check self.filePath[' + str(i) + '] is str:\tOK'
failure = 'Check self.filePath[' + str(i) + '] is str:\tFailure, \'self.filePath[' + str(i) + '] is' + str(type(self.filePath[i]))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
# end self.filePath list
# end self.filePath
## self._intensityData
# Use _intensityData as size reference for all future tables
if (self._intensityData.all() != numpy.array(None).all()):
refNumSamples = self._intensityData.shape[0]
refNumFeatures = self._intensityData.shape[1]
if verbose:
print('---- self._intensityData used as size reference ----')
print('\t' + str(refNumSamples) + ' samples, ' + str(refNumFeatures) + ' features')
# end self._intensityData
## self.sampleMetadata
# number of samples
condition = (self.sampleMetadata.shape[0] == refNumSamples)
success = 'Check self.sampleMetadata number of samples (rows):\tOK'
failure = 'Check self.sampleMetadata number of samples (rows):\tFailure, \'self.sampleMetadata\' has ' + str(self.sampleMetadata.shape[0]) + ' samples, ' + str(refNumSamples) + 'expected'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=ValueError(failure))
if condition:
# sampleMetadata['Sample File Name'] is str
condition = isinstance(self.sampleMetadata['Sample File Name'][0], str)
success = 'Check self.sampleMetadata[\'Sample File Name\'] is str:\tOK'
failure = 'Check self.sampleMetadata[\'Sample File Name\'] is str:\tFailure, \'self.sampleMetadata[\'Sample File Name\']\' is ' + str(type(self.sampleMetadata['Sample File Name'][0]))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
## Fields required for QC
# sampleMetadata['AssayRole'] is enum AssayRole
condition = isinstance(self.sampleMetadata['AssayRole'][0], AssayRole)
success = 'Check self.sampleMetadata[\'AssayRole\'] is an enum \'AssayRole\':\tOK'
failure = 'Check self.sampleMetadata[\'AssayRole\'] is an enum \'AssayRole\':\tFailure, \'self.sampleMetadata[\'AssayRole\']\' is ' + str(type(self.sampleMetadata['AssayRole'][0]))
failureListQC = conditionTest(condition, success, failure, failureListQC, verbose, raiseError, raiseWarning, exception=TypeError(failure))
# sampleMetadata['SampleType'] is enum SampleType
condition = isinstance(self.sampleMetadata['SampleType'][0], SampleType)
success = 'Check self.sampleMetadata[\'SampleType\'] is an enum \'SampleType\':\tOK'
failure = 'Check self.sampleMetadata[\'SampleType\'] is an enum \'SampleType\':\tFailure, \'self.sampleMetadata[\'SampleType\']\' is ' + str(type(self.sampleMetadata['SampleType'][0]))
failureListQC = conditionTest(condition, success, failure, failureListQC, verbose, raiseError, raiseWarning, exception=TypeError(failure))
# sampleMetadata['Dilution'] is an int or float
condition = isinstance(self.sampleMetadata['Dilution'][0], (int, float, numpy.integer, numpy.floating))
success = 'Check self.sampleMetadata[\'Dilution\'] is int or float:\tOK'
failure = 'Check self.sampleMetadata[\'Dilution\'] is int or float:\tFailure, \'self.sampleMetadata[\'Dilution\']\' is ' + str(type(self.sampleMetadata['Dilution'][0]))
failureListQC = conditionTest(condition, success, failure, failureListQC, verbose, raiseError, raiseWarning, exception=TypeError(failure))
# sampleMetadata['Batch'] is an int or float
condition = isinstance(self.sampleMetadata['Batch'][0], (int, float, numpy.integer, numpy.floating))
success = 'Check self.sampleMetadata[\'Batch\'] is int or float:\tOK'
failure = 'Check self.sampleMetadata[\'Batch\'] is int or float:\tFailure, \'self.sampleMetadata[\'Batch\']\' is ' + str(type(self.sampleMetadata['Batch'][0]))
failureListQC = conditionTest(condition, success, failure, failureListQC, verbose, raiseError, raiseWarning, exception=TypeError(failure))
# sampleMetadata['Correction Batch'] is an int or float
condition = isinstance(self.sampleMetadata['Correction Batch'][0], (int, float, numpy.integer, numpy.floating))
success = 'Check self.sampleMetadata[\'Correction Batch\'] is int or float:\tOK'
failure = 'Check self.sampleMetadata[\'Correction Batch\'] is int or float:\tFailure, \'self.sampleMetadata[\'Correction Batch\']\' is ' + str(type(self.sampleMetadata['Correction Batch'][0]))
failureListQC = conditionTest(condition, success, failure, failureListQC, verbose, raiseError, raiseWarning, exception=TypeError(failure))
# sampleMetadata['Run Order'] is an int
condition = isinstance(self.sampleMetadata['Run Order'][0], (int, numpy.integer))
success = 'Check self.sampleMetadata[\'Run Order\'] is int:\tOK'
failure = 'Check self.sampleMetadata[\'Run Order\'] is int:\tFailure, \'self.sampleMetadata[\'Run Order\']\' is ' + str(type(self.sampleMetadata['Run Order'][0]))
failureListQC = conditionTest(condition, success, failure, failureListQC, verbose, raiseError, raiseWarning, exception=TypeError(failure))
# sampleMetadata['Acquired Time'] is datetime.datetime
condition = isinstance(self.sampleMetadata['Acquired Time'][0], datetime)
success = 'Check self.sampleMetadata[\'Acquired Time\'] is datetime:\tOK'
failure = 'Check self.sampleMetadata[\'Acquired Time\'] is datetime:\tFailure, \'self.sampleMetadata[\'Acquired Time\']\' is ' + str(type(self.sampleMetadata['Acquired Time'][0]))
failureListQC = conditionTest(condition, success, failure, failureListQC, verbose, raiseError, raiseWarning, exception=TypeError(failure))
# sampleMetadata['Sample Base Name'] is str
condition = isinstance(self.sampleMetadata['Sample Base Name'][0], str)
success = 'Check self.sampleMetadata[\'Sample Base Name\'] is str:\tOK'
failure = 'Check self.sampleMetadata[\'Sample Base Name\'] is str:\tFailure, \'self.sampleMetadata[\'Sample Base Name\']\' is ' + str(type(self.sampleMetadata['Sample Base Name'][0]))
failureListQC = conditionTest(condition, success, failure, failureListQC, verbose, raiseError, raiseWarning, exception=TypeError(failure))
## Sample metadata fields
# ['Subject ID']
condition = ('Subject ID' in self.sampleMetadata.columns)
success = 'Check self.sampleMetadata[\'Subject ID\'] exists:\tOK'
failure = 'Check self.sampleMetadata[\'Subject ID\'] exists:\tFailure, \'self.sampleMetadata\' lacks a \'Subject ID\' column'
failureListMeta = conditionTest(condition, success, failure, failureListMeta, verbose, raiseError, raiseWarning, exception=LookupError(failure))
if condition:
# sampleMetadata['Subject ID'] is str
condition = (self.sampleMetadata['Subject ID'].dtype == numpy.dtype('O'))
success = 'Check self.sampleMetadata[\'Subject ID\'] is str:\tOK'
failure = 'Check self.sampleMetadata[\'Subject ID\'] is str:\tFailure, \'self.sampleMetadata[\'Subject ID\']\' is ' + str(type(self.sampleMetadata['Subject ID'][0]))
failureListMeta = conditionTest(condition, success, failure, failureListMeta, verbose, raiseError, raiseWarning, exception=TypeError(failure))
# end self.sampleMetadata['Subject ID']
# sampleMetadata['Sample ID'] is str
condition = (self.sampleMetadata['Sample ID'].dtype == numpy.dtype('O'))
success = 'Check self.sampleMetadata[\'Sample ID\'] is str:\tOK'
failure = 'Check self.sampleMetadata[\'Sample ID\'] is str:\tFailure, \'self.sampleMetadata[\'Sample ID\']\' is ' + str(type(self.sampleMetadata['Sample ID'][0]))
failureListMeta = conditionTest(condition, success, failure, failureListMeta, verbose, raiseError, raiseWarning, exception=TypeError(failure))
# end self.sampleMetadata number of samples
# end self.sampleMetadata
## self.featureMetadata
# exist
# number of features
condition = (self.featureMetadata.shape[0] == refNumFeatures)
success = 'Check self.featureMetadata number of features (rows):\tOK'
failure = 'Check self.featureMetadata number of features (rows):\tFailure, \'self.featureMetadata\' has ' + str(self.featureMetadata.shape[0]) + ' features, ' + str(refNumFeatures) + ' expected'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=ValueError(failure))
if condition & (self.featureMetadata.shape[0] != 0):
# No point checking columns if the number of columns is wrong or no features
# featureMetadata['Feature Name'] is str
condition = isinstance(self.featureMetadata['Feature Name'][0], str)
success = 'Check self.featureMetadata[\'Feature Name\'] is str:\tOK'
failure = 'Check self.featureMetadata[\'Feature Name\'] is str:\tFailure, \'self.featureMetadata[\'Feature Name\']\' is ' + str(type(self.featureMetadata['Feature Name'][0]))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
if condition:
# featureMetadata['Feature Name'] are unique
u_ids, u_counts = numpy.unique(self.featureMetadata['Feature Name'], return_counts=True)
condition = all(u_counts == 1)
success = 'Check self.featureMetadata[\'Feature Name\'] are unique:\tOK'
failure = 'Check self.featureMetadata[\'Feature Name\'] are unique:\tFailure, the following \'self.featureMetadata[\'Feature Name\']\' are present more than once ' + str(u_ids[u_counts > 1].tolist())
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=ValueError(failure))
# Use featureMetadata['Feature Name'] as reference for future tables
refFeatureName = self.featureMetadata['Feature Name'].values.tolist()
if verbose:
print('---- self.featureMetadata[\'Feature Name\'] used as Feature Name reference ----')
# end self.featureMetadata['Feature Name']
# ['calibrationMethod']
condition = ('calibrationMethod' in self.featureMetadata.columns)
success = 'Check self.featureMetadata[\'calibrationMethod\'] exists:\tOK'
failure = 'Check self.featureMetadata[\'calibrationMethod\'] exists:\tFailure, \'self.featureMetadata\' lacks a \'calibrationMethod\' column'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=LookupError(failure))
if condition:
# featureMetadata['calibrationMethod'] is an enum 'CalibrationMethod'
condition = isinstance(self.featureMetadata['calibrationMethod'][0], CalibrationMethod)
success = 'Check self.featureMetadata[\'calibrationMethod\'] is an enum \'CalibrationMethod\':\tOK'
failure = 'Check self.featureMetadata[\'calibrationMethod\'] is an enum \'CalibrationMethod\':\tFailure, \'self.featureMetadata[\'calibrationMethod\']\' is ' + str(type(self.featureMetadata['calibrationMethod'][0]))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
# end self.featureMetadata['calibrationMethod']
# ['quantificationType']
condition = ('quantificationType' in self.featureMetadata.columns)
success = 'Check self.featureMetadata[\'quantificationType\'] exists:\tOK'
failure = 'Check self.featureMetadata[\'quantificationType\'] exists:\tFailure, \'self.featureMetadata\' lacks a \'quantificationType\' column'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=LookupError(failure))
if condition:
# featureMetadata['quantificationType'] is an enum 'QuantificationType'
condition = isinstance(self.featureMetadata['quantificationType'][0], QuantificationType)
success = 'Check self.featureMetadata[\'quantificationType\'] is an enum \'QuantificationType\':\tOK'
failure = 'Check self.featureMetadata[\'quantificationType\'] is an enum \'QuantificationType\':\tFailure, \'self.featureMetadata[\'quantificationType\']\' is ' + str(type(self.featureMetadata['quantificationType'][0]))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
# end self.featureMetadata['quantificationType']
# ['Unit']
condition = ('Unit' in self.featureMetadata.columns)
success = 'Check self.featureMetadata[\'Unit\'] exists:\tOK'
failure = 'Check self.featureMetadata[\'Unit\'] exists:\tFailure, \'self.featureMetadata\' lacks a \'Unit\' column'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=LookupError(failure))
if condition:
# featureMetadata['Unit'] is a str
condition = isinstance(self.featureMetadata['Unit'][0], str)
success = 'Check self.featureMetadata[\'Unit\'] is a str:\tOK'
failure = 'Check self.featureMetadata[\'Unit\'] is a str:\tFailure, \'self.featureMetadata[\'Unit\']\' is ' + str(type(self.featureMetadata['Unit'][0]))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
# end self.featureMetadata['Unit']
# ['LLOQ']
tmpLLOQMatch = self.featureMetadata.columns.to_series().str.contains('LLOQ')
condition = (sum(tmpLLOQMatch) > 0)
success = 'Check self.featureMetadata[\'LLOQ\'] or similar exists:\tOK'
failure = 'Check self.featureMetadata[\'LLOQ\'] or similar exists:\tFailure, \'self.featureMetadata\' lacks a \'LLOQ\' or \'LLOQ_batch\' column'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=LookupError(failure))
if condition:
# featureMetadata['LLOQ'] is a float, try on first found
condition = ((self.featureMetadata.loc[:, tmpLLOQMatch].iloc[:, 0].dtype == numpy.dtype(float)) | (self.featureMetadata.loc[:, tmpLLOQMatch].iloc[:, 0].dtype == numpy.dtype(numpy.int32)) | (self.featureMetadata.loc[:, tmpLLOQMatch].iloc[:, 0].dtype == numpy.dtype(numpy.int64)))
success = 'Check self.featureMetadata[\'' + str(self.featureMetadata.columns[tmpLLOQMatch][0]) + '\'] is int or float:\tOK'
failure = 'Check self.featureMetadata[\'' + str(self.featureMetadata.columns[tmpLLOQMatch][0]) + '\'] is int or float:\tFailure, \'self.featureMetadata[\'' + str(self.featureMetadata.columns[tmpLLOQMatch][0]) + '\']\' is ' + str(self.featureMetadata.loc[:, tmpLLOQMatch].iloc[:, 0].dtype)
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
# end self.featureMetadata['LLOQ']
# ['ULOQ']
tmpULOQMatch = self.featureMetadata.columns.to_series().str.contains('ULOQ')
condition = (sum(tmpULOQMatch) > 0)
success = 'Check self.featureMetadata[\'ULOQ\'] or similar exists:\tOK'
failure = 'Check self.featureMetadata[\'ULOQ\'] or similar exists:\tFailure, \'self.featureMetadata\' lacks a \'ULOQ\' or \'ULOQ_batch\' column'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=LookupError(failure))
if condition:
# featureMetadata['ULOQ'] is a float, try on first found
condition = ((self.featureMetadata.loc[:, tmpULOQMatch].iloc[:, 0].dtype == numpy.dtype(float)) | (self.featureMetadata.loc[:, tmpULOQMatch].iloc[:, 0].dtype == numpy.dtype(numpy.int32)) | (self.featureMetadata.loc[:, tmpULOQMatch].iloc[:, 0].dtype == numpy.dtype(numpy.int64)))
success = 'Check self.featureMetadata[\'' + str(self.featureMetadata.columns[tmpULOQMatch][0]) + '\'] is int or float:\tOK'
failure = 'Check self.featureMetadata[\'' + str(self.featureMetadata.columns[tmpULOQMatch][0]) + '\'] is int or float:\tFailure, \'self.featureMetadata[\'' + str(self.featureMetadata.columns[tmpULOQMatch][0]) + '\']\' is ' + str(self.featureMetadata.loc[:, tmpULOQMatch].iloc[:, 0].dtype)
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
# end self.featureMetadata['ULOQ']
# 'externalID' in featureMetadata columns (need externalID to exist)
if 'externalID' in self.Attributes:
if isinstance(self.Attributes['externalID'], list):
condition = set(self.Attributes['externalID']).issubset(self.featureMetadata.columns)
success = 'Check self.featureMetadata does have the \'externalID\' as columns:\tOK'
failure = 'Check self.featureMetadata does have the \'externalID\' as columns:\tFailure, \'self.featureMetadata\' lacks the \'externalID\' columns'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=LookupError(failure))
# end 'externalID' columns
# end self.featureMetadata number of features
# end self.featureMetadata
## self.expectedConcentration
# exist
condition = hasattr(self, 'expectedConcentration')
success = 'Check self.expectedConcentration exists:\tOK'
failure = 'Check self.expectedConcentration exists:\tFailure, no attribute \'self.expectedConcentration\''
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=AttributeError(failure))
if condition:
# is a pandas.DataFrame
condition = isinstance(self.expectedConcentration, pandas.DataFrame)
success = 'Check self.expectedConcentration is a pandas.DataFrame:\tOK'
failure = 'Check self.expectedConcentration is a pandas.DataFrame:\tFailure, \'self.expectedConcentration\' is ' + str(type(self.expectedConcentration))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
if condition:
# number of samples
condition = (self.expectedConcentration.shape[0] == refNumSamples)
success = 'Check self.expectedConcentration number of samples (rows):\tOK'
failure = 'Check self.expectedConcentration number of samples (rows):\tFailure, \'self.expectedConcentration\' has ' + str(self.expectedConcentration.shape[0]) + ' features, ' + str(refNumSamples) + ' expected'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=ValueError(failure))
# number of features
condition = (self.expectedConcentration.shape[1] == refNumFeatures)
success = 'Check self.expectedConcentration number of features (columns):\tOK'
failure = 'Check self.expectedConcentration number of features (columns):\tFailure, \'self.expectedConcentration\' has ' + str(self.expectedConcentration.shape[1]) + ' features, ' + str(refNumFeatures) + ' expected'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=ValueError(failure))
if condition & (refNumFeatures != 0):
# expectedConcentration column names match ['Feature Name']
tmpDiff = pandas.DataFrame({'FeatName': refFeatureName, 'ColName': self.expectedConcentration.columns.values.tolist()})
condition = (self.expectedConcentration.columns.values.tolist() == refFeatureName)
success = 'Check self.expectedConcentration column name match self.featureMetadata[\'Feature Name\']:\tOK'
failure = 'Check self.expectedConcentration column name match self.featureMetadata[\'Feature Name\']:\tFailure, the following \'self.featureMetadata[\'Feature Name\']\' and \'self.expectedConcentration.columns\' differ ' + str(tmpDiff.loc[(tmpDiff['FeatName'] != tmpDiff['ColName']), ['FeatName', 'ColName']].values.tolist())
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=ValueError(failure))
# end self.expectedConcentration number of features
# end self.expectedConcentration is a pandas.DataFrame
# end self.expectedConcentration
## self.sampleMask
# is initialised
condition = (self.sampleMask.shape != ())
success = 'Check self.sampleMask is initialised:\tOK'
failure = 'Check self.sampleMask is initialised:\tFailure, \'self.sampleMask\' is not initialised'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose,raiseError, raiseWarning, exception=ValueError(failure))
if condition:
# number of samples
condition = (self.sampleMask.shape == (refNumSamples,))
success = 'Check self.sampleMask number of samples:\tOK'
failure = 'Check self.sampleMask number of samples:\tFailure, \'self.sampleMask\' has ' + str(self.sampleMask.shape[0]) + ' samples, ' + str(refNumSamples) + ' expected'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=ValueError(failure))
## end self.sampleMask
## self.featureMask
# is initialised
condition = (self.featureMask.shape != ())
success = 'Check self.featureMask is initialised:\tOK'
failure = 'Check self.featureMask is initialised:\tFailure, \'self.featureMask\' is not initialised'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=ValueError(failure))
if condition:
# number of features
condition = (self.featureMask.shape == (refNumFeatures,))
success = 'Check self.featureMask number of features:\tOK'
failure = 'Check self.featureMask number of features:\tFailure, \'self.featureMask\' has ' + str(self.featureMask.shape[0]) + ' features, ' + str(refNumFeatures) + ' expected'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=ValueError(failure))
## end self.featureMask
## self.calibration
# exist
condition = hasattr(self, 'calibration')
success = 'Check self.calibration exists:\tOK'
failure = 'Check self.calibration exists:\tFailure, no attribute \'self.calibration\''
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=AttributeError(failure))
if condition:
# is a dict or a list
condition = isinstance(self.calibration, (dict, list))
success = 'Check self.calibration is a dict or list:\tOK'
failure = 'Check self.calibration is a dict or list:\tFailure, \'self.calibration\' is ' + str(type(self.calibration))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
if condition:
# self.calibration is a list of dict
if isinstance(self.calibration, list):
# use reference inside each calibration
refCalibNumSamples = len(self.calibration) * [None]
refCalibNumFeatures = len(self.calibration) * [None]
refCalibFeatureName = len(self.calibration) * [None]
for i in range(len(self.calibration)):
# self.calibration[i] is a dict
condition = isinstance(self.calibration[i], dict)
success = 'Check self.calibration[' + str(i) + '] is a dict or list:\tOK'
failure = 'Check self.calibration[' + str(i) + '] is a dict or list:\tFailure, \'self.calibration\' is ' + str(type(self.calibration[i]))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
if condition:
## calibIntensityData
# exist
condition = 'calibIntensityData' in self.calibration[i]
success = 'Check self.calibration[' + str(i) + '][\'calibIntensityData\'] exists:\tOK'
failure = 'Check self.calibration[' + str(i) + '][\'calibIntensityData\'] exists:\tFailure, no attribute \'self.calibration[' + str(i) + '][\'calibIntensityData\']\''
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=AttributeError(failure))
if condition:
# is a numpy.ndarray
condition = isinstance(self.calibration[i]['calibIntensityData'], numpy.ndarray)
success = 'Check self.calibration[' + str(i) + '][\'calibIntensityData\'] is a numpy.ndarray:\tOK'
failure = 'Check self.calibration[' + str(i) + '][\'calibIntensityData\'] is a numpy.ndarray:\tFailure, \'self.calibration[' + str(i) + '][\'calibIntensityData\']\' is ' + str(type(self.calibration[i]['calibIntensityData']))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
if condition:
if (self.calibration[i]['calibIntensityData'].all() != numpy.array(None).all()):
# Use calibIntensityData as number of calib sample/feature reference
refCalibNumSamples[i] = self.calibration[i]['calibIntensityData'].shape[0]
refCalibNumFeatures[i] = self.calibration[i]['calibIntensityData'].shape[1]
if verbose:
print('---- self.calibration[' + str(i) + '][\'calibIntensityData\'] used as number of calibration samples/features reference ----')
print('\t' + str(refCalibNumSamples[i]) + ' samples, ' + str(refCalibNumFeatures[i]) + ' features')
# end calibIntensityData is a numpy.ndarray
# end calibIntensityData
## calibSampleMetadata
# exist
condition = 'calibSampleMetadata' in self.calibration[i]
success = 'Check self.calibration[' + str(i) + '][\'calibSampleMetadata\'] exists:\tOK'
failure = 'Check self.calibration[' + str(i) + '][\'calibSampleMetadata\'] exists:\tFailure, no attribute \'self.calibration[' + str(i) + '][\'calibSampleMetadata\']\''
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=AttributeError(failure))
if condition:
# is a pandas.DataFrame
condition = isinstance(self.calibration[i]['calibSampleMetadata'], pandas.DataFrame)
success = 'Check self.calibration[' + str(i) + '][\'calibSampleMetadata\'] is a pandas.DataFrame:\tOK'
failure = 'Check self.calibration[' + str(i) + '][\'calibSampleMetadata\'] is a pandas.DataFrame:\tFailure, \'self.calibration[' + str(i) + '][\'calibSampleMetadata\']\' is ' + str(type(self.calibration[i]['calibSampleMetadata']))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
if condition:
# number of samples
condition = (self.calibration[i]['calibSampleMetadata'].shape[0] == refCalibNumSamples[i])
success = 'Check self.calibration[' + str(i) + '][\'calibSampleMetadata\'] number of samples:\tOK'
failure = 'Check self.calibration[' + str(i) + '][\'calibSampleMetadata\'] number of samples:\tFailure, \'self.calibration[' + str(i) + '][\'calibSampleMetadata\']\' has ' + str(self.calibration[i]['calibSampleMetadata'].shape[0]) + ' samples, ' + str(refCalibNumSamples[i]) + ' expected'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=ValueError(failure))
# end calibSampleMetadata is a pandas.DataFrame
# end calibSampleMetadata
## calibFeatureMetadata
# exist
condition = 'calibFeatureMetadata' in self.calibration[i]
success = 'Check self.calibration[' + str(i) + '][\'calibFeatureMetadata\'] exists:\tOK'
failure = 'Check self.calibration[' + str(i) + '][\'calibFeatureMetadata\'] exists:\tFailure, no attribute \'self.calibration[' + str(i) + '][\'calibFeatureMetadata\']\''
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=AttributeError(failure))
if condition:
# is a pandas.DataFrame
condition = isinstance(self.calibration[i]['calibFeatureMetadata'], pandas.DataFrame)
success = 'Check self.calibration[' + str(i) + '][\'calibFeatureMetadata\'] is a pandas.DataFrame:\tOK'
failure = 'Check self.calibration[' + str(i) + '][\'calibFeatureMetadata\'] is a pandas.DataFrame:\tFailure, \'self.calibration[' + str(i) + '][\'calibFeatureMetadata\']\' is ' + str(type(self.calibration[i]['calibFeatureMetadata']))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
if condition:
# number of features
condition = (self.calibration[i]['calibFeatureMetadata'].shape[0] == refCalibNumFeatures[i])
success = 'Check self.calibration[' + str(i) + '][\'calibFeatureMetadata\'] number of features:\tOK'
failure = 'Check self.calibration[' + str(i) + '][\'calibFeatureMetadata\'] number of features:\tFailure, \'self.calibration[' + str(i) + '][\'calibFeatureMetadata\']\' has ' + str(self.calibration[i]['calibFeatureMetadata'].shape[0]) + ' features, ' + str(refCalibNumFeatures[i]) + ' expected'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=ValueError(failure))
if condition & (refCalibNumFeatures[i] != 0):
# Feature Name exist
condition = ('Feature Name' in self.calibration[i]['calibFeatureMetadata'].columns.tolist())
success = 'Check self.calibration[' + str(i) + '][\'calibFeatureMetadata\'][\'Feature Name\'] exist:\tOK'
failure = 'Check self.calibration[' + str(i) + '][\'calibFeatureMetadata\'][\'Feature Name\'] exist:\tFailure, no column \'self.calibration[' + str(i) + '][\'calibFeatureMetadata\'][\'Feature Name\']'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose,raiseError, raiseWarning, exception=LookupError(failure))
if condition:
# store the featureMetadata columns as reference
refCalibFeatureName[i] = self.calibration[i]['calibFeatureMetadata']['Feature Name'].values.tolist()
# end calibFeatureMetadata is a pandas.DataFrame
# end calibFeatureMetadata
## calibExpectedConcentration
# exist
condition = 'calibExpectedConcentration' in self.calibration[i]
success = 'Check self.calibration[' + str(i) + '][\'calibExpectedConcentration\'] exists:\tOK'
failure = 'Check self.calibration[' + str(i) + '][\'calibExpectedConcentration\'] exists:\tFailure, no attribute \'self.calibration[' + str(i) + '][\'calibExpectedConcentration\']\''
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=AttributeError(failure))
if condition:
# is a pandas.DataFrame
condition = isinstance(self.calibration[i]['calibExpectedConcentration'], pandas.DataFrame)
success = 'Check self.calibration[' + str(i) + '][\'calibExpectedConcentration\'] is a pandas.DataFrame:\tOK'
failure = 'Check self.calibration[' + str(i) + '][\'calibExpectedConcentration\'] is a pandas.DataFrame:\tFailure, \'self.calibration[' + str(i) + '][\'calibExpectedConcentration\']\' is ' + str(type(self.calibration[i]['calibExpectedConcentration']))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
if condition:
# number of samples
condition = (self.calibration[i]['calibExpectedConcentration'].shape[0] == refCalibNumSamples[i])
success = 'Check self.calibration[' + str(i) + '][\'calibExpectedConcentration\'] number of samples:\tOK'
failure = 'Check self.calibration[' + str(i) + '][\'calibExpectedConcentration\'] number of samples:\tFailure, \'self.calibration[' + str(i) + '][\'calibExpectedConcentration\']\' has ' + str(self.calibration[i]['calibExpectedConcentration'].shape[0]) + ' samples, ' + str(refCalibNumSamples[i]) + ' expected'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=ValueError(failure))
# number of features
condition = (self.calibration[i]['calibExpectedConcentration'].shape[1] == refCalibNumFeatures[i])
success = 'Check self.calibration[' + str(i) + '][\'calibExpectedConcentration\'] number of features:\tOK'
failure = 'Check self.calibration[' + str(i) + '][\'calibExpectedConcentration\'] number of features:\tFailure, \'self.calibration[' + str(i) + '][\'calibExpectedConcentration\']\' has ' + str(self.calibration[i]['calibExpectedConcentration'].shape[1]) + ' features, ' + str(refCalibNumFeatures[i]) + ' expected'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=ValueError(failure))
if condition & (refCalibNumFeatures[i] != 0):
# calibExpectedConcentration column names match ['Feature Name']
tmpDiff = pandas.DataFrame({'FeatName': refCalibFeatureName[i],'ColName': self.calibration[i]['calibExpectedConcentration'].columns.values.tolist()})
condition = (self.calibration[i]['calibExpectedConcentration'].columns.values.tolist() == refCalibFeatureName[i])
success = 'Check self.calibration[' + str(i) + '][\'calibExpectedConcentration\'] column name match self.calibration[' + str(i) + '][\'calibFeatureMetadata\'][\'Feature Name\']:\tOK'
failure = 'Check self.calibration[' + str(i) + '][\'calibExpectedConcentration\'] column name match self.calibration[' + str(i) + '][\'calibFeatureMetadata\'][\'Feature Name\']:\tFailure, the following \'self.calibration[' + str(i) + '][\'calibFeatureMetadata\'][\'Feature Name\']\' and \'self.calibration[' + str(i) + '][\'calibExpectedConcentration\'].columns\' differ ' + str(tmpDiff.loc[(tmpDiff['FeatName'] != tmpDiff['ColName']), ['FeatName','ColName']].values.tolist())
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=ValueError(failure))
# end calibExpectedConcentration number of features
# end calibExpectedConcentration is a pandas.DataFrame
# end calibExpectedConcentration
# end self.calibration[i] is a dict
# end self.calibration list
else:
## self.calibration is a dict
## calibIntensityData
# exist
condition = 'calibIntensityData' in self.calibration
success = 'Check self.calibration[\'calibIntensityData\'] exists:\tOK'
failure = 'Check self.calibration[\'calibIntensityData\'] exists:\tFailure, no attribute \'self.calibration[\'calibIntensityData\']\''
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=AttributeError(failure))
if condition:
# is a numpy.ndarray
condition = isinstance(self.calibration['calibIntensityData'], numpy.ndarray)
success = 'Check self.calibration[\'calibIntensityData\'] is a numpy.ndarray:\tOK'
failure = 'Check self.calibration[\'calibIntensityData\'] is a numpy.ndarray:\tFailure, \'self.calibration[\'calibIntensityData\']\' is ' + str(type(self.calibration['calibIntensityData']))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
if condition:
if (self.calibration['calibIntensityData'].all() != numpy.array(None).all()):
# number of features
condition = (self.calibration['calibIntensityData'].shape[1] == refNumFeatures)
success = 'Check self.calibration[\'calibIntensityData\'] number of features:\tOK'
failure = 'Check self.calibration[\'calibIntensityData\'] number of features:\tFailure, \'self.calibration[\'calibIntensityData\']\' has ' + str(self.calibration['calibIntensityData'].shape[1]) + ' features, ' + str(refNumFeatures) + ' expected'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=ValueError(failure))
# Use calibIntensityData as number of calib sample reference
refNumCalibSamples = self.calibration['calibIntensityData'].shape[0]
if verbose:
print('---- self.calibration[\'calibIntensityData\'] used as number of calibration samples reference ----')
print('\t' + str(refNumCalibSamples) + ' samples')
# end calibIntensityData is a numpy.ndarray
# end calibIntensityData
## calibSampleMetadata
# exist
condition = 'calibSampleMetadata' in self.calibration
success = 'Check self.calibration[\'calibSampleMetadata\'] exists:\tOK'
failure = 'Check self.calibration[\'calibSampleMetadata\'] exists:\tFailure, no attribute \'self.calibration[\'calibSampleMetadata\']\''
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=AttributeError(failure))
if condition:
# is a pandas.DataFrame
condition = isinstance(self.calibration['calibSampleMetadata'], pandas.DataFrame)
success = 'Check self.calibration[\'calibSampleMetadata\'] is a pandas.DataFrame:\tOK'
failure = 'Check self.calibration[\'calibSampleMetadata\'] is a pandas.DataFrame:\tFailure, \'self.calibration[\'calibSampleMetadata\']\' is ' + str(type(self.calibration['calibSampleMetadata']))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
if condition:
# number of samples
condition = (self.calibration['calibSampleMetadata'].shape[0] == refNumCalibSamples)
success = 'Check self.calibration[\'calibSampleMetadata\'] number of samples:\tOK'
failure = 'Check self.calibration[\'calibSampleMetadata\'] number of samples:\tFailure, \'self.calibration[\'calibSampleMetadata\']\' has ' + str(self.calibration['calibSampleMetadata'].shape[0]) + ' samples, ' + str(refNumCalibSamples) + ' expected'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=ValueError(failure))
# end calibSampleMetadata is a pandas.DataFrame
# end calibSampleMetadata
## calibFeatureMetadata
# exist
condition = 'calibFeatureMetadata' in self.calibration
success = 'Check self.calibration[\'calibFeatureMetadata\'] exists:\tOK'
failure = 'Check self.calibration[\'calibFeatureMetadata\'] exists:\tFailure, no attribute \'self.calibration[\'calibFeatureMetadata\']\''
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=AttributeError(failure))
if condition:
# is a pandas.DataFrame
condition = isinstance(self.calibration['calibFeatureMetadata'], pandas.DataFrame)
success = 'Check self.calibration[\'calibFeatureMetadata\'] is a pandas.DataFrame:\tOK'
failure = 'Check self.calibration[\'calibFeatureMetadata\'] is a pandas.DataFrame:\tFailure, \'self.calibration[\'calibFeatureMetadata\']\' is ' + str(type(self.calibration['calibFeatureMetadata']))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
if condition:
# number of features
condition = (self.calibration['calibFeatureMetadata'].shape[0] == refNumFeatures)
success = 'Check self.calibration[\'calibFeatureMetadata\'] number of features:\tOK'
failure = 'Check self.calibration[\'calibFeatureMetadata\'] number of features:\tFailure, \'self.calibration[\'calibFeatureMetadata\']\' has ' + str(self.calibration['calibFeatureMetadata'].shape[0]) + ' features, ' + str(refNumFeatures) + ' expected'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=ValueError(failure))
if condition & (refNumFeatures != 0):
# Feature Name exist
condition = ('Feature Name' in self.calibration['calibFeatureMetadata'].columns.tolist())
success = 'Check self.calibration[\'calibFeatureMetadata\'][\'Feature Name\'] exist:\tOK'
failure = 'Check self.calibration[\'calibFeatureMetadata\'][\'Feature Name\'] exist:\tFailure, no column \'self.calibration[\'calibFeatureMetadata\'][\'Feature Name\']'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=LookupError(failure))
# end calibFeatureMetadata is a pandas.DataFrame
# end calibFeatureMetadata
## calibExpectedConcentration
# exist
condition = 'calibExpectedConcentration' in self.calibration
success = 'Check self.calibration[\'calibExpectedConcentration\'] exists:\tOK'
failure = 'Check self.calibration[\'calibExpectedConcentration\'] exists:\tFailure, no attribute \'self.calibration[\'calibExpectedConcentration\']\''
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=AttributeError(failure))
if condition:
# is a pandas.DataFrame
condition = isinstance(self.calibration['calibExpectedConcentration'], pandas.DataFrame)
success = 'Check self.calibration[\'calibExpectedConcentration\'] is a pandas.DataFrame:\tOK'
failure = 'Check self.calibration[\'calibExpectedConcentration\'] is a pandas.DataFrame:\tFailure, \'self.calibration[\'calibExpectedConcentration\']\' is ' + str(type(self.calibration['calibExpectedConcentration']))
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=TypeError(failure))
if condition:
# number of samples
condition = (self.calibration['calibExpectedConcentration'].shape[0] == refNumCalibSamples)
success = 'Check self.calibration[\'calibExpectedConcentration\'] number of samples:\tOK'
failure = 'Check self.calibration[\'calibExpectedConcentration\'] number of samples:\tFailure, \'self.calibration[\'calibExpectedConcentration\']\' has ' + str(self.calibration['calibExpectedConcentration'].shape[0]) + ' samples, ' + str(refNumCalibSamples) + ' expected'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=ValueError(failure))
# number of features
condition = (self.calibration['calibExpectedConcentration'].shape[1] == refNumFeatures)
success = 'Check self.calibration[\'calibExpectedConcentration\'] number of features:\tOK'
failure = 'Check self.calibration[\'calibExpectedConcentration\'] number of features:\tFailure, \'self.calibration[\'calibExpectedConcentration\']\' has ' + str(self.calibration['calibExpectedConcentration'].shape[1]) + ' features, ' + str(refNumFeatures) + ' expected'
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=ValueError(failure))
if condition & (refNumFeatures != 0):
# calibExpectedConcentration column names match ['Feature Name']
tmpDiff = pandas.DataFrame({'FeatName': refFeatureName, 'ColName': self.calibration['calibExpectedConcentration'].columns.values.tolist()})
condition = (self.calibration['calibExpectedConcentration'].columns.values.tolist() == refFeatureName)
success = 'Check self.calibration[\'calibExpectedConcentration\'] column name match self.featureMetadata[\'Feature Name\']:\tOK'
failure = 'Check self.calibration[\'calibExpectedConcentration\'] column name match self.featureMetadata[\'Feature Name\']:\tFailure, the following \'self.featureMetadata[\'Feature Name\']\' and \'self.calibration[\'calibExpectedConcentration\'].columns\' differ ' + str(tmpDiff.loc[(tmpDiff['FeatName'] != tmpDiff['ColName']), ['FeatName','ColName']].values.tolist())
failureListBasic = conditionTest(condition, success, failure, failureListBasic, verbose, raiseError, raiseWarning, exception=ValueError(failure))
# end calibExpectedConcentration number of features
# end calibExpectedConcentration is a pandas.DataFrame
# end calibExpectedConcentration
# end self.calib is a dict
# self.calibration is a dict or a list
# end self.calibration
## List additional attributes (print + log)
expectedSet = set({'Attributes', 'VariableType', '_Normalisation', '_name', 'fileName', 'filePath',
'_intensityData', 'sampleMetadata', 'featureMetadata', 'expectedConcentration', 'sampleMask',
'featureMask', 'calibration', 'sampleMetadataExcluded', 'intensityDataExcluded',
'featureMetadataExcluded', 'expectedConcentrationExcluded', 'excludedFlag'})
objectSet = set(self.__dict__.keys())
additionalAttributes = objectSet - expectedSet
if len(additionalAttributes) > 0:
if verbose:
print('--------')
print(str(len(additionalAttributes)) + ' additional attributes in the object:')
print('\t' + str(list(additionalAttributes)))
else:
if verbose:
print('--------')
print('No additional attributes in the object')
## Log and final Output
# Basic failure might compromise logging, failure of QC compromises sample meta
if len(failureListBasic) == 0:
# Prepare log text and bool
if len(failureListQC) != 0:
QCText = 'lacks parameters for QC'
QCBool = False
MetaText = 'lacks sample metadata'
MetaBool = False
else:
QCText = 'has parameters for QC'
QCBool = True
if len(failureListMeta) != 0:
MetaText = 'lacks sample metadata'
MetaBool = False
else:
MetaText = 'has sample metadata'
MetaBool = True
# Log
self.Attributes['Log'].append([datetime.now(), 'Dataset conforms to basic TargetedDataset (0 errors), %s (%d errors), %s (%d errors), (%i samples and %i features), with %d additional attributes in the object: %s. QC errors: %s, Meta errors: %s' % (QCText, len(failureListQC), MetaText, len(failureListMeta), self.noSamples, self.noFeatures, len(additionalAttributes), list(additionalAttributes), list(failureListQC), list(failureListMeta))])
# print results
if verbose:
print('--------')
print('Conforms to Dataset:\t 0 errors found')
print('Conforms to basic TargetedDataset:\t 0 errors found')
if QCBool:
print('Has required parameters for QC:\t %d errors found' % ((len(failureListQC))))
else:
print('Does not have QC parameters:\t %d errors found' % ((len(failureListQC))))
if MetaBool:
print('Has sample metadata information:\t %d errors found' % ((len(failureListMeta))))
else:
print('Does not have sample metadata information:\t %d errors found' % ((len(failureListMeta))))
# output
if (not QCBool) & raiseWarning:
warnings.warn('Does not have QC parameters:\t %d errors found' % ((len(failureListQC))))
if (not MetaBool) & raiseWarning:
warnings.warn('Does not have sample metadata information:\t %d errors found' % ((len(failureListMeta))))
return ({'Dataset': True, 'BasicTargetedDataset': True, 'QC': QCBool, 'sampleMetadata': MetaBool})
# Try logging to something that might not have a log
else:
# try logging
try:
self.Attributes['Log'].append([datetime.now(), 'Failed basic TargetedDataset validation, with the following %d issues: %s' % (len(failureListBasic), failureListBasic)])
except (AttributeError, KeyError, TypeError):
if verbose:
print('--------')
print('Logging failed')
# print results
if verbose:
print('--------')
print('Conforms to Dataset:\t 0 errors found')
print('Does not conform to basic TargetedDataset:\t %i errors found' % (len(failureListBasic)))
print('Does not have QC parameters')
print('Does not have sample metadata information')
# output
if raiseWarning:
warnings.warn('Does not conform to basic TargetedDataset:\t %i errors found' % (len(failureListBasic)))
warnings.warn('Does not have QC parameters')
warnings.warn('Does not have sample metadata information')
return ({'Dataset': True, 'BasicTargetedDataset': False, 'QC': False, 'sampleMetadata': False})
# If it's not a Dataset, no point checking anything more
else:
# try logging
try:
self.Attributes['Log'].append([datetime.now(), 'Failed basic TargetedDataset validation, Failed Dataset validation'])
except (AttributeError, KeyError, TypeError):
if verbose:
print('--------')
print('Logging failed')
# print results
if verbose:
print('--------')
print('Does not conform to Dataset')
print('Does not conform to basic TargetedDataset')
print('Does not have QC parameters')
print('Does not have sample metadata information')
# output
if raiseWarning:
warnings.warn('Does not conform to basic TargetedDataset')
warnings.warn('Does not have QC parameters')
warnings.warn('Does not have sample metadata information')
return ({'Dataset': False, 'BasicTargetedDataset': False, 'QC': False, 'sampleMetadata': False})
def applyMasks(self):
"""
Permanently delete elements masked (those set to ``False``) in :py:attr:`~Dataset.sampleMask` and :py:attr:`~Dataset.featureMask`, from :py:attr:`~Dataset.featureMetadata`, :py:attr:`~Dataset.sampleMetadata`, :py:attr:`~Dataset.intensityData` and py:attr:`TargetedDataset.expectedConcentration`.
Features are excluded in each :py:attr:`~TargetedDataset.calibration` based on the internal :py:attr:`~TargetedDataset.calibration['calibFeatureMetadata']` (iterate through the list of calibration if 2+ datasets have been joined with :py:meth:`~TargetedDataset.__add__`).
"""
def findAndRemoveFeatures(calibDict, featureNameList):
"""
Finds and remove all features with Feature Name in featureNameList, from the numpy.ndarray and pandas.Dataframe in calibDict.
Do not expect features in calibration ordered the same as in featureMetadata (but it should), therefore work on feature names.
:param calibDict: self.calibration dictionary
:param featureNameList: list of Feature Name to remove
:return: newCalibDict with feature removed
"""
# init new mask
toRemoveFeatMask = calibDict['calibFeatureMetadata']['Feature Name'].isin(featureNameList).values # True for feature to remove
newCalibDict = dict()
newCalibDict['calibSampleMetadata'] = calibDict['calibSampleMetadata']
# resize all frames
dictKeys = set(calibDict.keys()) - set(['calibFeatureMetadata', 'calibSampleMetadata'])
for i in dictKeys:
# numpy.ndarray
if isinstance(calibDict[i], numpy.ndarray):
newCalibDict[i] = calibDict[i][:, ~toRemoveFeatMask]
# pandas.DataFrame
elif isinstance(calibDict[i], pandas.DataFrame):
newCalibDict[i] = calibDict[i].loc[:, ~toRemoveFeatMask]
else:
newCalibDict[i] = calibDict[i]
# calibFeatureMetadata
newCalibDict['calibFeatureMetadata'] = calibDict['calibFeatureMetadata'].loc[~toRemoveFeatMask, :]
newCalibDict['calibFeatureMetadata'].reset_index(drop=True, inplace=True)
return newCalibDict
# Only filter TargetedDataset.expectedConcentration as it is not present in Dataset, others are done in Dataset.applyMasks
if (sum(self.sampleMask == False) > 0) | (sum(self.featureMask == False) > 0):
# Instantiate lists if first application
if not hasattr(self, 'sampleMetadataExcluded'):
self.expectedConcentrationExcluded = []
# Samples
if sum(self.sampleMask) != len(self.sampleMask):
# Account for if self.sampleMask is a pandas.series
try:
self.sampleMask = self.sampleMask.values
except:
pass
# Save excluded samples
self.expectedConcentrationExcluded.append(self.expectedConcentration.loc[~self.sampleMask, :])
# Delete excluded samples
self.expectedConcentration = self.expectedConcentration.loc[self.sampleMask]
self.expectedConcentration.reset_index(drop=True, inplace=True)
# Features
if sum(self.featureMask) != len(self.featureMask):
# Account for if self.featureMask is a pandas.series
try:
self.featureMask = self.featureMask.values
except:
pass
# Start by removing features from self.calibration
featureNameList = self.featureMetadata['Feature Name'].values[~self.featureMask].tolist()
# list of dict if 2+ joined targetedDatasets
if isinstance(self.calibration, list):
# remove in each calibration
for j in range(len(self.calibration)):
self.calibration[j] = findAndRemoveFeatures(self.calibration[j], featureNameList)
# dict 1 targetedDataset
elif isinstance(self.calibration, dict):
self.calibration = findAndRemoveFeatures(self.calibration, featureNameList)
# Save excluded features
self.expectedConcentrationExcluded.append(self.expectedConcentration.loc[:, ~self.featureMask])
# Delete excluded features
self.expectedConcentration = self.expectedConcentration.loc[:, self.featureMask]
self.expectedConcentration.reset_index(drop=True, inplace=True)
# applyMasks to the rest of TargetedDataset
super().applyMasks()
def updateMasks(self, filterSamples=True, filterFeatures=True, sampleTypes=[SampleType.StudySample, SampleType.StudyPool],
assayRoles=[AssayRole.Assay, AssayRole.PrecisionReference],
quantificationTypes=[QuantificationType.IS, QuantificationType.QuantOwnLabeledAnalogue, QuantificationType.QuantAltLabeledAnalogue, QuantificationType.QuantOther, QuantificationType.Monitored],
calibrationMethods=[CalibrationMethod.backcalculatedIS, CalibrationMethod.noIS, CalibrationMethod.noCalibration, CalibrationMethod.otherCalibration],
rsdThreshold=None, **kwargs):
"""
Update :py:attr:`~Dataset.sampleMask` and :py:attr:`~Dataset.featureMask` according to QC parameters.
:py:meth:`updateMasks` sets :py:attr:`~Dataset.sampleMask` or :py:attr:`~Dataset.featureMask` to ``False`` for those items failing analytical criteria.
Similar to :py:meth:`~MSDataset.updateMasks`, without `blankThreshold` or `artifactual` filtering
.. note:: To avoid reintroducing items manually excluded, this method only ever sets items to ``False``, therefore if you wish to move from more stringent criteria to a less stringent set, you will need to reset the mask to all ``True`` using :py:meth:`~Dataset.initialiseMasks`.
:param bool filterSamples: If ``False`` don't modify sampleMask
:param bool filterFeatures: If ``False`` don't modify featureMask
:param sampleTypes: List of types of samples to retain
:type sampleTypes: SampleType
:param assayRoles: List of assays roles to retain
:type assayRoles: AssayRole
:param quantificationTypes: List of quantification types to retain
:type quantificationTypes: QuantificationType
:param calibrationMethods: List of calibratio methods to retain
:type calibrationMethods: CalibrationMethod
:raise TypeError: if sampleTypes is not a list
:raise TypeError: if sampleTypes are not a SampleType enum
:raise TypeError: if assayRoles is not a list
:raise TypeError: if assayRoles are not an AssayRole enum
:raise TypeError: if quantificationTypes is not a list
:raise TypeError: if quantificationTypes are not a QuantificationType enum
:raise TypeError: if calibrationMethods is not a list
:raise TypeError: if calibrationMethods are not a CalibrationMethod enum
"""
# Check sampleTypes, assayRoles, quantificationTypes and calibrationMethods are lists
if not isinstance(sampleTypes, list):
raise TypeError('sampleTypes must be a list of SampleType enums')
if not isinstance(assayRoles, list):
raise TypeError('assayRoles must be a list of AssayRole enums')
if not isinstance(quantificationTypes, list):
raise TypeError('quantificationTypes must be a list of QuantificationType enums')
if not isinstance(assayRoles, list):
raise TypeError('calibrationMethods must be a list of CalibrationMethod enums')
# Check sampleTypes, assayRoles, quantificationTypes and calibrationMethods are enums
if not all(isinstance(item, SampleType) for item in sampleTypes):
raise TypeError('sampleTypes must be SampleType enums.')
if not all(isinstance(item, AssayRole) for item in assayRoles):
raise TypeError('assayRoles must be AssayRole enums.')
if not all(isinstance(item, QuantificationType) for item in quantificationTypes):
raise TypeError('quantificationTypes must be QuantificationType enums.')
if not all(isinstance(item, CalibrationMethod) for item in calibrationMethods):
raise TypeError('calibrationMethods must be CalibrationMethod enums.')
if rsdThreshold is None:
if 'rsdThreshold' in self.Attributes:
rsdThreshold = self.Attributes['rsdThreshold']
else:
rsdThreshold = None
if rsdThreshold is not None and not isinstance(rsdThreshold, (float, int)):
raise TypeError('rsdThreshold should either be a float or None')
# Feature Exclusions
if filterFeatures:
quantTypeMask = self.featureMetadata['quantificationType'].isin(quantificationTypes)
calibMethodMask = self.featureMetadata['calibrationMethod'].isin(calibrationMethods)
featureMask = numpy.logical_and(quantTypeMask, calibMethodMask).values
self.featureMask = numpy.logical_and(featureMask, self.featureMask)
if rsdThreshold is not None:
self.featureMask &= self.rsdSP <= rsdThreshold
self.featureMetadata['Passing Selection'] = self.featureMask
# Sample Exclusions
if filterSamples:
sampleMask = self.sampleMetadata['SampleType'].isin(sampleTypes)
assayMask = self.sampleMetadata['AssayRole'].isin(assayRoles)
sampleMask = numpy.logical_and(sampleMask, assayMask).values
self.sampleMask = numpy.logical_and(sampleMask, self.sampleMask)
self.Attributes['Log'].append([datetime.now(), 'Dataset filtered with: filterSamples=%s, filterFeatures=%s, sampleTypes=%s, assayRoles=%s, quantificationTypes=%s, calibrationMethods=%s' % (filterSamples, filterFeatures, sampleTypes, assayRoles, quantificationTypes, calibrationMethods)])
def addSampleInfo(self, descriptionFormat=None, filePath=None, **kwargs):
"""
Load additional metadata and map it in to the :py:attr:`~Dataset.sampleMetadata` table.
Possible options:
* **'NPC Subject Info'** Map subject metadata from a NPC sample manifest file (format defined in 'PCSOP.082')
* **'Raw Data'** Extract analytical parameters from raw data files
* **'ISATAB'** ISATAB study designs
* **'Filenames'** Parses sample information out of the filenames, based on the named capture groups in the regex passed in *filenamespec*
* **'Basic CSV'** Joins the :py:attr:`sampleMetadata` table with the data in the ``csv`` file at *filePath=*, matching on the 'Sample File Name' column in both.
* **'Batches'** Interpolate batch numbers for samples between those with defined batch numbers based on sample acquisitions times
:param str descriptionFormat: Format of metadata to be added
:param str filePath: Path to the additional data to be added
:param filenameSpec: Only used if *descriptionFormat* is 'Filenames'. A regular expression that extracts sample-type information into the following named capture groups: 'fileName', 'baseName', 'study', 'chromatography' 'ionisation', 'instrument', 'groupingKind' 'groupingNo', 'injectionKind', 'injectionNo', 'reference', 'exclusion' 'reruns', 'extraInjections', 'exclusion2'. if ``None`` is passed, use the *filenameSpec* key in *Attributes*, loaded from the SOP json
:type filenameSpec: None or str
:raises NotImplementedError: if the descriptionFormat is not understood
"""
if descriptionFormat == 'Filenames':
filenameSpec = kwargs.get('filenameSpec', None) # default to None if not provided
if filenameSpec is None:
raise AttributeError('A \'filenameSpec\' must be provided with \'descriptionFormat==\'Filenames\'\'')
self._getSampleMetadataFromFilename(filenameSpec)
elif descriptionFormat == 'Batches':
self._fillBatches()
else:
super().addSampleInfo(descriptionFormat=descriptionFormat, filePath=filePath, **kwargs)
def _matchDatasetToLIMS(self, pathToLIMSfile):
"""
Establish the `Sampling ID` by matching the `Sample Base Name` with the LIMS file information.
:param str pathToLIMSfile: Path to LIMS file for map Sampling ID
"""
# Detect if requires NMR specific alterations
if 'expno' in self.sampleMetadata.columns:
from . import NMRDataset
NMRDataset._matchDatasetToLIMS(self,pathToLIMSfile)
else:
super()._matchDatasetToLIMS(pathToLIMSfile)
def _getSampleMetadataFromFilename(self, filenameSpec):
"""
Infer sample acquisition metadata from standardised filename template.
Similar to :py:meth:`~MSDataset._getSampleMetadataFromFilename`
"""
# If the dilution series design is not defined in the SOP, load the default.
if not 'dilutionMap' in self.Attributes.keys():
dilutionMap = pandas.read_csv(os.path.join(toolboxPath(), 'StudyDesigns', 'DilutionSeries.csv'), index_col='Sample Name')
self.Attributes['dilutionMap'] = dilutionMap['Dilution Factor (%)'].to_dict()
# Strip any whitespace from 'Sample File Name'
self.sampleMetadata['Sample File Name'] = self.sampleMetadata['Sample File Name'].str.strip()
# Break filename down into constituent parts.
baseNameParser = re.compile(filenameSpec, re.VERBOSE)
fileNameParts = self.sampleMetadata['Sample File Name'].str.extract(baseNameParser, expand=False)
# Deal with badly ordered exclusions
fileNameParts['exclusion'].loc[fileNameParts['exclusion2'].isnull() == False] = fileNameParts['exclusion2'].loc[fileNameParts['exclusion2'].isnull() == False]
fileNameParts.drop('exclusion2', axis=1, inplace=True)
# Pass masks into enum fields
fileNameParts.loc[:, 'AssayRole'] = AssayRole.Assay
fileNameParts.loc[fileNameParts['reference'] == 'SR', 'AssayRole'] = AssayRole.PrecisionReference
fileNameParts.loc[fileNameParts['baseName'].str.match('.+[B]\d+?[SE]\d+?', na=False).astype(bool), 'AssayRole'] = AssayRole.PrecisionReference
fileNameParts.loc[fileNameParts['reference'] == 'LTR', 'AssayRole'] = AssayRole.PrecisionReference
fileNameParts.loc[fileNameParts['reference'] == 'MR', 'AssayRole'] = AssayRole.PrecisionReference
fileNameParts.loc[fileNameParts['injectionKind'] == 'SRD', 'AssayRole'] = AssayRole.LinearityReference
fileNameParts.loc[fileNameParts['groupingKind'].str.match('Blank', na=False).astype(bool), 'AssayRole'] = AssayRole.LinearityReference
fileNameParts.loc[fileNameParts['groupingKind'].str.match('E?IC', na=False).astype(bool), 'AssayRole'] = AssayRole.Assay
fileNameParts.loc[:, 'SampleType'] = SampleType.StudySample
fileNameParts.loc[fileNameParts['reference'] == 'SR', 'SampleType'] = SampleType.StudyPool
fileNameParts.loc[fileNameParts['baseName'].str.match('.+[B]\d+?[SE]\d+?', na=False).astype(bool), 'SampleType'] = SampleType.StudyPool
fileNameParts.loc[fileNameParts['reference'] == 'LTR', 'SampleType'] = SampleType.ExternalReference
fileNameParts.loc[fileNameParts['reference'] == 'MR', 'SampleType'] = SampleType.MethodReference
fileNameParts.loc[fileNameParts['injectionKind'] == 'SRD', 'SampleType'] = SampleType.StudyPool
fileNameParts.loc[fileNameParts['groupingKind'].str.match('Blank', na=False).astype(bool), 'SampleType'] = SampleType.ProceduralBlank
fileNameParts.loc[fileNameParts['groupingKind'].str.match('E?IC', na=False).astype(bool), 'SampleType'] = SampleType.StudyPool
# Skipped runs
fileNameParts['Skipped'] = fileNameParts['exclusion'].str.match('[Xx]', na=False)
# Get matrix
fileNameParts['Matrix'] = fileNameParts['groupingKind'].str.extract('^([AC-Z]{1,2})(?<!IC)$', expand=False)
fileNameParts['Matrix'].fillna('', inplace=True)
# Get well numbers
fileNameParts.loc[
fileNameParts['groupingKind'].str.match('Blank|E?IC', na=False).astype(bool), 'injectionNo'] = -1
fileNameParts['Well'] = pandas.to_numeric(fileNameParts['injectionNo'])
# Plate / grouping no
fileNameParts['Plate'] = pandas.to_numeric(fileNameParts['groupingNo'])
# Get batch where it is explicit in file name
fileNameParts['Batch'] = pandas.to_numeric(fileNameParts['baseName'].str.extract('B(\d+?)[SE]', expand=False))
fileNameParts['Correction Batch'] = numpy.nan
# Map dilution series names to dilution level
fileNameParts['Dilution'] = fileNameParts['baseName'].str.extract('(?:.+_?)(SRD\d\d)(?:_?.*)', expand=False).replace(self.Attributes['dilutionMap'])
fileNameParts['Dilution'] = fileNameParts['Dilution'].astype(float)
# Blank out NAs for neatness
fileNameParts['reruns'].fillna('', inplace=True)
fileNameParts['extraInjections'].fillna('', inplace=True)
# Drop unwanted columns
fileNameParts.drop(['exclusion', 'reference', 'groupingKind', 'injectionNo', 'injectionKind', 'groupingNo'], axis=1, inplace=True)
# Swap in user freindly file names
fileNameParts.rename(columns={'chromatography': 'Chromatography'}, inplace=True)
fileNameParts.rename(columns={'instrument': 'Instrument'}, inplace=True)
fileNameParts.rename(columns={'study': 'Study'}, inplace=True)
fileNameParts.rename(columns={'baseName': 'Sample Base Name'}, inplace=True)
fileNameParts.rename(columns={'fileName': 'Sample File Name'}, inplace=True)
fileNameParts.rename(columns={'suplementalInfo': 'Suplemental Info'}, inplace=True)
fileNameParts.rename(columns={'ionisation': 'Ionisation'}, inplace=True)
fileNameParts.rename(columns={'extraInjections': 'Suplemental Injections'}, inplace=True)
fileNameParts.rename(columns={'reruns': 'Re-Run'}, inplace=True)
# Merge metadata back into the sampleInfo table.
# first remove duplicate columns (from _dataset _init_)
if 'AssayRole' in self.sampleMetadata.columns: self.sampleMetadata.drop(['AssayRole'], axis=1, inplace=True)
if 'SampleType' in self.sampleMetadata.columns: self.sampleMetadata.drop(['SampleType'], axis=1, inplace=True)
if 'Sample Base Name' in self.sampleMetadata.columns: self.sampleMetadata.drop(['Sample Base Name'], axis=1, inplace=True)
if 'Dilution' in self.sampleMetadata.columns: self.sampleMetadata.drop(['Dilution'], axis=1, inplace=True)
if 'Batch' in self.sampleMetadata.columns: self.sampleMetadata.drop(['Batch'], axis=1, inplace=True)
if 'Correction Batch' in self.sampleMetadata.columns: self.sampleMetadata.drop(['Correction Batch'], axis=1, inplace=True)
# merge
self.sampleMetadata = pandas.merge(self.sampleMetadata, fileNameParts, left_on='Sample File Name', right_on='Sample File Name', how='left', sort=False)
# Add 'Exclusion Details' column
self.sampleMetadata['Exclusion Details'] = ''
self.Attributes['Log'].append([datetime.now(), 'Sample metadata parsed from filenames.'])
def _fillBatches(self):
"""
Use sample names and acquisition times to infer batch info
Similar to :py:meth:`~MSDataset._fillBatches`
"""
batchRE = r"""
B
(?P<observebatch>\d+?)
(?P<startend>[SE])
(?P<sequence>\d+?)
_SR
(?:_(?P<extraInjections>\d+?|\w+?))?
$
"""
batchRE = re.compile(batchRE, re.VERBOSE)
# We canot infer batches unless we have runorder
if 'Run Order' in self.sampleMetadata.keys():
currentBatch = 0
# Loop over samples in run order
for index, row in self.sampleMetadata.sort_values(by='Run Order').iterrows():
nameComponents = batchRE.search(row['Sample File Name'])
if nameComponents:
# Batch start
if nameComponents.group('startend') == 'S':
# New batch - increment batch no
if nameComponents.group('sequence') == '1':
currentBatch = currentBatch + 1
# Don't include the dilution series or blanks
if not ((row['AssayRole'] == AssayRole.LinearityReference) or (row['SampleType'] == SampleType.ProceduralBlank)):
self.sampleMetadata.loc[index, 'Batch'] = currentBatch
self.sampleMetadata.loc[index, 'Correction Batch'] = currentBatch
else:
warnings.warn('Unable to infer batches without run order, skipping.')
return
def accuracyPrecision(self, onlyPrecisionReferences=False):
"""
Return Precision (percent RSDs) and Accuracy for each SampleType and each unique concentration.
Statistic grouped by SampleType, Feature and unique concentration.
:param TargetedDataset dataset: TargetedDataset object to generate the accuracy and precision for.
:param bool onlyPrecisionReference: If ``True`` only use samples with the `AssayRole` PrecisionReference.
:returns: Dict of Accuracy and Precision dict for each group.
:rtype: dict(str:dict(str:pandas.DataFrame))
:raises TypeError: if dataset is not an instance of TargetedDataset
"""
#from ..enumerations import AssayRole
#from ..objects import TargetedDataset
def calcAccuracy(measuredConc, expectedConc):
"""
Calculate the accuracy of measurement for a column of data.
accuracy = (mean(measuredConcentration)/expectedConcentration)*100
:param numpy.ndarray measuredConc: *n* by 1 numpy array of data, with a single feature in column, and samples in rows
:param float expectedConc: expected concentration
:return: accuracy value
:rtype: float
"""
accuracy = (numpy.mean(measuredConc) / expectedConc) * 100
return accuracy
def calcPrecision(measuredConc):
"""
Calculate the precision of measurement (percent RSD) for a column of data.
Allow for -inf, inf values in input.
:param numpy.ndarray measuredConc: *n* by 1 numpy array of data, with a single feature in column, and samples in rows
:return: precisin value
:rtype: float
"""
std = numpy.std(measuredConc)
rsd = (std / numpy.mean(measuredConc)) * 100
if numpy.isnan(rsd):
rsd = numpy.inf
return rsd
#if not isinstance(dataset, TargetedDataset):
# raise TypeError('dataset must be an instance of TargetedDataset.')
# Init
accuracy = dict()
precision = dict()
# Restrict to PrecisionReference if necessary
if onlyPrecisionReferences:
startMask = (self.sampleMetadata['AssayRole'].values == AssayRole.PrecisionReference)
else:
startMask = numpy.squeeze(numpy.ones([self.sampleMetadata.shape[0], 1], dtype=bool), axis=1)
# Unique concentrations
uniqueConc = pandas.unique(self.expectedConcentration.loc[startMask, :].values.ravel()).tolist()
uniqueConc = sorted([x for x in uniqueConc if str(x) != 'nan'])
# Each SampleType
sampleTypes = self.sampleMetadata['SampleType'].unique()
for sampleType in sampleTypes:
# init
acc = pandas.DataFrame(numpy.full([len(uniqueConc), self.featureMetadata.shape[0]], numpy.nan), index=uniqueConc, columns=self.featureMetadata['Feature Name'].values)
prec = pandas.DataFrame(numpy.full([len(uniqueConc), self.featureMetadata.shape[0]], numpy.nan), index=uniqueConc, columns=self.featureMetadata['Feature Name'].values)
# Restrict to sampleType
# Allow for the case where sampleType is not defined
if pandas.isnull(sampleType):
sampleTypeMask = numpy.logical_and(startMask, self.sampleMetadata['SampleType'].isnull())
else:
sampleTypeMask = numpy.logical_and(startMask, self.sampleMetadata['SampleType'].values == sampleType)
# Each feature
for feat in self.featureMetadata['Feature Name'].tolist():
# Each unique concentrations
for conc in uniqueConc:
# Restrict to concentration
mask = numpy.logical_and(sampleTypeMask, self.expectedConcentration[feat].values == conc)
# minimum of samples
if sum(mask) < 2:
continue
# fill accuracy/precision df
featID = (self.featureMetadata['Feature Name'] == feat).values
acc.loc[conc, feat] = calcAccuracy(self.intensityData[mask, featID], conc)
prec.loc[conc, feat] = calcPrecision(self.intensityData[mask, featID])
# Store accuracy/precision + clean empty rows
accuracy[sampleType] = acc.dropna(axis=0, how='all')
precision[sampleType] = prec.dropna(axis=0, how='all')
# All samples
acc = pandas.DataFrame(numpy.full([len(uniqueConc), self.featureMetadata.shape[0]], numpy.nan), index=uniqueConc, columns=self.featureMetadata['Feature Name'].values)
prec = pandas.DataFrame(numpy.full([len(uniqueConc), self.featureMetadata.shape[0]], numpy.nan), index=uniqueConc, columns=self.featureMetadata['Feature Name'].values)
# Each feature
for feat in self.featureMetadata['Feature Name'].tolist():
# Each unique concentrations
for conc in uniqueConc:
# Restrict to concentration
mask = numpy.logical_and(startMask, self.expectedConcentration[feat].values == conc)
# minimum of samples
if sum(mask) < 2:
continue
# fill accuracy/precision df
featID = (self.featureMetadata['Feature Name'] == feat).values
acc.loc[conc, feat] = calcAccuracy(self.intensityData[mask, featID], conc)
prec.loc[conc, feat] = calcPrecision(self.intensityData[mask, featID])
# Store accuracy/precision
accuracy['All Samples'] = acc.dropna(axis=0, how='all')
precision['All Samples'] = prec.dropna(axis=0, how='all')
# Output
return {'Accuracy': accuracy, 'Precision': precision}
def main():
pass
if __name__=='__main__':
main()
| 71.400289
| 725
| 0.659667
|
7333a49c13050f97fc79f359093c51b5e25a39ac
| 1,862
|
py
|
Python
|
pipeline/objects/function.py
|
neuro-ai-dev/pipeline
|
c7edcc83576158062fe48f266dfaea62d754e761
|
[
"Apache-2.0"
] | 2
|
2021-11-02T21:31:51.000Z
|
2021-11-18T13:27:42.000Z
|
pipeline/objects/function.py
|
neuro-ai-dev/pipeline
|
c7edcc83576158062fe48f266dfaea62d754e761
|
[
"Apache-2.0"
] | 5
|
2021-12-06T10:19:26.000Z
|
2022-01-06T10:02:53.000Z
|
pipeline/objects/function.py
|
neuro-ai-dev/pipeline
|
c7edcc83576158062fe48f266dfaea62d754e761
|
[
"Apache-2.0"
] | null | null | null |
import inspect
from hashlib import sha256
from typing import Any, Callable, Dict, Optional
from pipeline.schemas.function import FunctionGet
from pipeline.util import generate_id, hex_to_python_object
class Function:
local_id: str
remote_id: str
name: str
source: str
hash: str
typing_inputs: Dict[str, Any]
typing_outputs: Dict[str, Any]
function: Callable
class_instance: Optional[Any]
def __init__(
self, function: Callable, *, remote_id: str = None, class_instance: Any = None
):
self.name = function.__name__
self.remote_id = remote_id
self.class_instance = class_instance
self.function = function
self.source = inspect.getsource(function)
self.hash = sha256(self.source.encode()).hexdigest()
# TODO: Add verification that all inputs to function have a typing annotation,
# except for "self"
if "return" not in function.__annotations__:
raise Exception(
(
"You must define an output type for a piepline function. "
"e.g. def my_func(...) -> float:"
)
)
self.typing_outputs = {"return": function.__annotations__["return"]}
self.typing_inputs = {
function_i: function.__annotations__[function_i]
for function_i in function.__annotations__
if not function_i == "return"
}
self.local_id = generate_id(10)
@classmethod
def from_schema(cls, schema: FunctionGet):
unpickled_data = hex_to_python_object(schema.hex_file.data)
if isinstance(unpickled_data, Function):
unpickled_data.local_id = schema.id
return unpickled_data
return cls(
unpickled_data,
remote_id=schema.id,
)
| 28.646154
| 86
| 0.625134
|
0052eba33d5f67b5225b2ed1e9aebcbaf41932ba
| 3,118
|
py
|
Python
|
examples/rbfn/music_speech/train.py
|
FrostByte266/neupy
|
4b7127e5e4178b0cce023ba36542f5ad3f1d798c
|
[
"MIT"
] | 801
|
2015-09-23T09:24:47.000Z
|
2022-03-29T19:19:03.000Z
|
examples/rbfn/music_speech/train.py
|
FrostByte266/neupy
|
4b7127e5e4178b0cce023ba36542f5ad3f1d798c
|
[
"MIT"
] | 277
|
2015-09-22T19:48:50.000Z
|
2022-03-11T23:25:32.000Z
|
examples/rbfn/music_speech/train.py
|
FrostByte266/neupy
|
4b7127e5e4178b0cce023ba36542f5ad3f1d798c
|
[
"MIT"
] | 194
|
2015-09-23T15:03:57.000Z
|
2022-03-31T13:54:46.000Z
|
"""
Music/Speech classification using PNN
-------------------------------------
A similar dataset which was collected for the purposes of
music/speech discrimination. The dataset consists of 120 tracks,
each 30 seconds long. Each class (music/speech) has 60 examples.
The tracks are all 22050Hz Mono 16-bit audio files in .wav format.
Dataset page: http://marsyasweb.appspot.com/download/data_sets/
Dataset file: http://opihi.cs.uvic.ca/sound/music_speech.tar.gz
"""
import numpy as np
from neupy import algorithms
from sklearn.utils import shuffle
from sklearn import preprocessing, model_selection, metrics, decomposition
import matplotlib.pyplot as plt
from librosa.feature import mfcc
from getdata import train_test_data, parser
plt.style.use('ggplot')
parser.add_argument('--pca', '-p', dest='apply_pca', default=False,
action='store_true',
help="Apply PCA for the train data set visualization")
x_train, x_test, y_train, y_test = train_test_data()
def extract_features(data, n_fft=2048):
data = data.astype(np.float32)
res = []
for row in data:
centroid = mfcc(row, n_fft=n_fft, sr=22050)
res.append([
np.min(centroid),
np.max(centroid),
np.median(centroid),
])
return np.array(res)
print("> Data preprocessing procedure")
args = parser.parse_args()
if args.seed is not None:
np.random.seed(args.seed)
std = 0.2
n_fft = 128
print("STD = {}".format(std))
print("#FFT = {}".format(n_fft))
scaler = preprocessing.MinMaxScaler()
x_train = scaler.fit_transform(extract_features(x_train, n_fft=n_fft))
x_test = scaler.transform(extract_features(x_test, n_fft=n_fft))
x_train, y_train = shuffle(x_train, y_train)
if args.apply_pca:
pca = decomposition.PCA(2)
plt.scatter(*pca.fit_transform(x_train).T, c=y_train, s=100)
plt.show()
print("PCA explain {:.2%}".format(pca.explained_variance_ratio_.sum()))
print("\n> Train prediction")
skf = model_selection.StratifiedKFold(n_splits=5)
skf_iterator = skf.split(x_train, y_train)
scores = []
for i, (train_index, test_index) in enumerate(skf_iterator, start=1):
print("\nK-fold #{}".format(i))
pnnet = algorithms.PNN(std=std, verbose=False)
x_fold_train, x_fold_test = x_train[train_index], x_train[test_index]
y_fold_train, y_fold_test = y_train[train_index], y_train[test_index]
pnnet.fit(x_fold_train, y_fold_train)
y_predicted = pnnet.predict(x_fold_test)
score = metrics.roc_auc_score(y_predicted, y_fold_test)
accurucy = metrics.accuracy_score(y_predicted, y_fold_test)
scores.append(score)
print("ROC AUC score: {:.4f}".format(score))
print("Accurucy: {:.2%}".format(accurucy))
print(metrics.confusion_matrix(y_predicted, y_fold_test))
print("Average ROC AUC score: {:.4f}".format(np.mean(scores)))
print("\n> Test prediction")
pnnet = algorithms.PNN(std=std, verbose=False)
pnnet.fit(x_train, y_train)
y_predicted = pnnet.predict(x_test)
test_accurucy = metrics.roc_auc_score(y_predicted, y_test)
print("Test data accurucy: {:.4f}".format(test_accurucy))
| 30.871287
| 75
| 0.710071
|
2b62fb7c9073ba840523b3f6050dd65f492f0e96
| 1,775
|
py
|
Python
|
tests/test_cookiecutter_generation.py
|
uisautomation/django-boilerplate
|
62a1162e38fba263021dc39d872cb071533d9cc1
|
[
"MIT"
] | null | null | null |
tests/test_cookiecutter_generation.py
|
uisautomation/django-boilerplate
|
62a1162e38fba263021dc39d872cb071533d9cc1
|
[
"MIT"
] | 26
|
2018-01-18T11:20:34.000Z
|
2019-03-10T13:46:52.000Z
|
tests/test_cookiecutter_generation.py
|
uisautomation/django-boilerplate
|
62a1162e38fba263021dc39d872cb071533d9cc1
|
[
"MIT"
] | 2
|
2018-01-11T20:55:44.000Z
|
2019-03-11T16:51:23.000Z
|
import os
import re
import sys
import sh
import pytest
from binaryornot.check import is_binary
PATTERN = "{{(\s?cookiecutter)[.](.*?)}}"
RE_OBJ = re.compile(PATTERN)
@pytest.fixture
def context():
return {
"project_name": "My Test Project",
"project_slug": "test_project",
"application_name": "Test Application"
}
def build_files_list(root_dir):
"""Build a list containing absolute paths to the generated files."""
return [
os.path.join(dirpath, file_path)
for dirpath, subdirs, files in os.walk(root_dir)
for file_path in files
]
def check_paths(paths):
"""Method to check all paths have correct substitutions,
used by other tests cases
"""
# Assert that no match is found in any of the files
for path in paths:
if is_binary(path):
continue
for line in open(path, "r"):
match = RE_OBJ.search(line)
msg = "cookiecutter variable not replaced in {}"
assert match is None, msg.format(path)
def test_default_configuration(cookies, context):
result = cookies.bake(extra_context=context)
assert result.exit_code == 0
assert result.exception is None
assert result.project.basename == context["project_slug"]
assert result.project.isdir()
paths = build_files_list(str(result.project))
assert paths
check_paths(paths)
def test_tox(capsys, cookies):
"""generated project should pass the tox tests"""
result = cookies.bake()
try:
# The output from tox is of interest when running the test suite.
with capsys.disabled():
sh.tox(_cwd=str(result.project), _err_to_out=True, _out=sys.stdout)
except sh.ErrorReturnCode as e:
pytest.fail(e)
| 26.102941
| 79
| 0.658028
|
c2fc2bf5543fbf922637b56db4d53193bdde8615
| 6,277
|
py
|
Python
|
src/cfnlint/rules/parameters/AllowedPattern.py
|
Adam-sHub/cfn-lint
|
4c501d01f87ec0ef9432dc407c5a9ac0025f00b6
|
[
"MIT-0"
] | 1,134
|
2019-03-02T14:58:34.000Z
|
2021-05-15T00:57:16.000Z
|
src/cfnlint/rules/parameters/AllowedPattern.py
|
Adam-sHub/cfn-lint
|
4c501d01f87ec0ef9432dc407c5a9ac0025f00b6
|
[
"MIT-0"
] | 1,122
|
2019-03-03T04:27:15.000Z
|
2021-05-14T20:51:16.000Z
|
src/cfnlint/rules/parameters/AllowedPattern.py
|
Adam-sHub/cfn-lint
|
4c501d01f87ec0ef9432dc407c5a9ac0025f00b6
|
[
"MIT-0"
] | 297
|
2019-03-11T09:56:57.000Z
|
2021-05-14T16:41:19.000Z
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import re
import six
from cfnlint.rules import CloudFormationLintRule
from cfnlint.rules import RuleMatch
from cfnlint.helpers import RESOURCE_SPECS
class AllowedPattern(CloudFormationLintRule):
"""Check if parameters have a valid value"""
id = 'W2031'
shortdesc = 'Check if parameters have a valid value based on an allowed pattern'
description = 'Check if parameters have a valid value in a pattern. The Parameter\'s allowed pattern is based on the usages in property (Ref)'
source_url = 'https://github.com/aws-cloudformation/cfn-python-lint/blob/main/docs/cfn-resource-specification.md#allowedpattern'
tags = ['parameters', 'resources', 'property', 'allowed pattern']
def initialize(self, cfn):
"""Initialize the rule"""
for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):
self.resource_property_types.append(resource_type_spec)
for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):
self.resource_sub_property_types.append(property_type_spec)
def check_value_ref(self, value, path, **kwargs):
"""Check Ref"""
matches = []
cfn = kwargs.get('cfn')
if 'Fn::If' in path:
self.logger.debug(
'Not able to guarentee that the default value hasn\'t been conditioned out')
return matches
if path[0] == 'Resources' and 'Condition' in cfn.template.get(
path[0], {}).get(path[1]):
self.logger.debug(
'Not able to guarentee that the default value '
'hasn\'t been conditioned out')
return matches
allowed_pattern = kwargs.get('value_specs', {}).get('AllowedPattern', {})
allowed_pattern_regex = kwargs.get('value_specs', {}).get('AllowedPatternRegex', {})
allowed_pattern_description = kwargs.get('value_specs', {}).get('AllowedPatternDescription', {})
if allowed_pattern_regex:
if value in cfn.template.get('Parameters', {}):
param = cfn.template.get('Parameters').get(value, {})
parameter_values = param.get('AllowedValues')
default_value = param.get('Default')
parameter_type = param.get('Type')
if isinstance(parameter_type, six.string_types):
if ((not parameter_type.startswith('List<')) and
(not parameter_type.startswith('AWS::SSM::Parameter::Value<')) and
parameter_type not in ['CommaDelimitedList', 'List<String>']):
# Check Allowed Values
if parameter_values:
for index, allowed_value in enumerate(parameter_values):
if not re.match(allowed_pattern_regex, str(allowed_value)):
param_path = ['Parameters', value, 'AllowedValues', index]
description = allowed_pattern_description or 'Valid values must match pattern {0}'.format(allowed_pattern)
message = 'You must specify a valid allowed value for {0} ({1}). {2}'
matches.append(RuleMatch(param_path, message.format(
value, allowed_value, description)))
if default_value:
# Check Default, only if no allowed Values are specified in the parameter (that's covered by E2015)
if not re.match(allowed_pattern_regex, str(default_value)):
param_path = ['Parameters', value, 'Default']
description = allowed_pattern_description or 'Valid values must match pattern {0}'.format(allowed_pattern)
message = 'You must specify a valid Default value for {0} ({1}). {2}'
matches.append(RuleMatch(param_path, message.format(
value, default_value, description)))
return matches
def check(self, cfn, properties, value_specs, property_specs, path):
"""Check itself"""
matches = list()
for p_value, p_path in properties.items_safe(path[:]):
for prop in p_value:
if prop in value_specs:
value = value_specs.get(prop).get('Value', {})
if value:
value_type = value.get('ValueType', '')
property_type = property_specs.get('Properties').get(prop).get('Type')
matches.extend(
cfn.check_value(
p_value, prop, p_path,
check_ref=self.check_value_ref,
value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get(
'ValueTypes').get(value_type, {}),
cfn=cfn, property_type=property_type, property_name=prop
)
)
return matches
def match_resource_sub_properties(self, properties, property_type, path, cfn):
"""Match for sub properties"""
matches = list()
specs = RESOURCE_SPECS.get(cfn.regions[0]).get(
'PropertyTypes').get(property_type, {}).get('Properties', {})
property_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type)
matches.extend(self.check(cfn, properties, specs, property_specs, path))
return matches
def match_resource_properties(self, properties, resource_type, path, cfn):
"""Check CloudFormation Properties"""
matches = list()
specs = RESOURCE_SPECS.get(cfn.regions[0]).get(
'ResourceTypes').get(resource_type, {}).get('Properties', {})
resource_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type)
matches.extend(self.check(cfn, properties, specs, resource_specs, path))
return matches
| 51.876033
| 146
| 0.58101
|
16c079dfa5e5ab3060fedab05f08bafd154d43a9
| 804
|
py
|
Python
|
charts/pagerank2.py
|
twmarshall/tbd
|
35de2a72515f5f1d0004c3d1ca896f5ef7cb4ce4
|
[
"Apache-2.0"
] | null | null | null |
charts/pagerank2.py
|
twmarshall/tbd
|
35de2a72515f5f1d0004c3d1ca896f5ef7cb4ce4
|
[
"Apache-2.0"
] | null | null | null |
charts/pagerank2.py
|
twmarshall/tbd
|
35de2a72515f5f1d0004c3d1ca896f5ef7cb4ce4
|
[
"Apache-2.0"
] | 2
|
2015-03-03T03:39:26.000Z
|
2015-04-13T14:34:11.000Z
|
import matplotlib
# prevents pyplot from trying to connect to x windowing
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import string
import sys
fig, ax = plt.subplots()
graph_dir = 'charts/'
def plotLine(title, start, inc, color):
x = []
y = []
val = start
for i in range(0, 40):
x.append(i)
y.append(val)
val = val + inc
plt.plot(x, y, label=title, color=color)
plotLine("Update 10", 23291.67, 65.33, '#6aa84f')
plotLine("Non-incremental", 2819.0, 2819.0, '#cc0000')
plt.xlabel("Update Batches")
plt.ylabel('Cumulative Time (ms)')
#ax.set_title("Effect of Input Size on Update Time")
plt.legend()
plt.savefig(graph_dir + 'pagerank2.png')
| 23.647059
| 158
| 0.575871
|
04bdee9a276d493d4c4c7377dfaeea14881e0ebc
| 1,509
|
py
|
Python
|
python/tests/test_ll_queue.py
|
mhendricks96/data-structures-and-algorithms
|
9c07d284fa8f54a0405a1fc5bda963b6150cc2ef
|
[
"MIT"
] | null | null | null |
python/tests/test_ll_queue.py
|
mhendricks96/data-structures-and-algorithms
|
9c07d284fa8f54a0405a1fc5bda963b6150cc2ef
|
[
"MIT"
] | 39
|
2021-06-08T04:19:00.000Z
|
2022-03-19T17:58:10.000Z
|
python/tests/test_ll_queue.py
|
mhendricks96/data-structures-and-algorithms
|
9c07d284fa8f54a0405a1fc5bda963b6150cc2ef
|
[
"MIT"
] | null | null | null |
from ll_queue.ll_queue import LL_Queue
from linked_list.linked_list import LinkedList, Node
# Linked List Queue Tests
def test_create_empty_queue():
my_queue = LL_Queue()
actual = len(my_queue)
expected = 0
assert actual == expected
def test_enqueue_one():
my_queue = LL_Queue()
my_queue.enqueue("yea, mike")
actual = len(my_queue)
expected = 1
assert actual == expected
def test_enqueue_three():
my_queue = LL_Queue()
my_queue.enqueue("yea, mike")
my_queue.enqueue("hola, miguel")
my_queue.enqueue("bonjuer, mike")
actual = len(my_queue)
expected = 3
assert actual == expected
def test_dequeue_one():
my_queue = LL_Queue()
my_queue.enqueue(22)
my_queue.enqueue(77)
my_queue.dequeue()
actual = len(my_queue)
expected = 1
assert actual == expected
def test_dequeue_all():
my_queue = LL_Queue()
my_queue.enqueue(22)
my_queue.enqueue(77)
my_queue.enqueue("codefellows")
my_queue.dequeue()
my_queue.dequeue()
my_queue.dequeue()
actual = len(my_queue)
expected = 0
assert actual == expected
def test_peek_into_queue():
my_queue = LL_Queue()
my_queue.enqueue(22)
my_queue.enqueue(77)
my_queue.enqueue("codefellows")
actual = my_queue.peek()
expected = 22
assert actual == expected
def test_empty_exception_in_queue():
my_queue = LL_Queue()
actual = my_queue.dequeue()
expected = "Sorry, the queue is empty"
assert actual == expected
| 24.737705
| 52
| 0.680583
|
cd182e14524a2ea405595787e2778aa80d5675b9
| 507
|
py
|
Python
|
pontoon/tags/models.py
|
oberd/pontoon
|
8366905e0e44eabd1f49cf4c57572792d5f2365a
|
[
"BSD-3-Clause"
] | null | null | null |
pontoon/tags/models.py
|
oberd/pontoon
|
8366905e0e44eabd1f49cf4c57572792d5f2365a
|
[
"BSD-3-Clause"
] | null | null | null |
pontoon/tags/models.py
|
oberd/pontoon
|
8366905e0e44eabd1f49cf4c57572792d5f2365a
|
[
"BSD-3-Clause"
] | 1
|
2019-07-17T21:21:41.000Z
|
2019-07-17T21:21:41.000Z
|
from django.db import models
from pontoon.base.models import PRIORITY_CHOICES, Project, Resource
class Tag(models.Model):
slug = models.CharField(max_length=20)
name = models.CharField(max_length=30)
project = models.ForeignKey(Project, blank=True, null=True)
resources = models.ManyToManyField(Resource)
priority = models.IntegerField(
blank=True,
null=True,
choices=PRIORITY_CHOICES)
class Meta(object):
unique_together = [['slug', 'project']]
| 26.684211
| 67
| 0.700197
|
54a2ff0a5a559224427ff503472079019770d2cf
| 134
|
py
|
Python
|
raspy/board_revision.py
|
cyrusbuilt/RasPy
|
1e34840cc90ea7f19317e881162209d3d819eb09
|
[
"MIT"
] | null | null | null |
raspy/board_revision.py
|
cyrusbuilt/RasPy
|
1e34840cc90ea7f19317e881162209d3d819eb09
|
[
"MIT"
] | null | null | null |
raspy/board_revision.py
|
cyrusbuilt/RasPy
|
1e34840cc90ea7f19317e881162209d3d819eb09
|
[
"MIT"
] | null | null | null |
"""This module contains the Raspberry Pi board revisions."""
REV1 = 1
"""Revision 1.0 board."""
REV2 = 2
"""Revision 2.x board."""
| 14.888889
| 60
| 0.641791
|
b33c0daf8f6ce981bfdd76918c6ff8ca2e75c322
| 724
|
py
|
Python
|
feeds/migrations/0001_initial.py
|
ralphqq/rss-apifier
|
cd056654abf24fd178f1e5d8661cafcb3cc1236b
|
[
"MIT"
] | null | null | null |
feeds/migrations/0001_initial.py
|
ralphqq/rss-apifier
|
cd056654abf24fd178f1e5d8661cafcb3cc1236b
|
[
"MIT"
] | 5
|
2020-06-06T01:01:48.000Z
|
2021-09-22T18:16:22.000Z
|
feeds/migrations/0001_initial.py
|
ralphqq/rss-apifier
|
cd056654abf24fd178f1e5d8661cafcb3cc1236b
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.6 on 2019-10-23 10:30
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Feed',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1024)),
('description', models.CharField(default='', max_length=2048)),
('link', models.URLField(max_length=400, unique=True)),
('version', models.CharField(max_length=64)),
],
),
]
| 28.96
| 115
| 0.553867
|
e551464369acf97f21f0b9b698761a8fec2f1270
| 12,469
|
py
|
Python
|
code/python/FundsAPIforDigitalPortals/v2/fds/sdk/FundsAPIforDigitalPortals/model/inline_response2004_share_class_registration_country.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 6
|
2022-02-07T16:34:18.000Z
|
2022-03-30T08:04:57.000Z
|
code/python/FundsAPIforDigitalPortals/v2/fds/sdk/FundsAPIforDigitalPortals/model/inline_response2004_share_class_registration_country.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 2
|
2022-02-07T05:25:57.000Z
|
2022-03-07T14:18:04.000Z
|
code/python/FundsAPIforDigitalPortals/v2/fds/sdk/FundsAPIforDigitalPortals/model/inline_response2004_share_class_registration_country.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | null | null | null |
"""
Funds API For Digital Portals
Search for mutual funds and ETFs using one single consolidated API, including a criteria-based screener. The API provides also base data, key figures, and holdings. A separate endpoint returns the possible values and value range for the parameters that the endpoint /fund/notation/screener/search accepts: Application developers can request the values and value range only for a restricted set of notations that match predefined parameters. This functionality may be used to pre-fill the values and value ranges of the parameters of the /fund/notation/screener/search endpoint so that performing a search always leads to a non-empty set of notations. This API is fully integrated with the corresponding Quotes API, allowing access to detailed price and performance information of instruments, as well as basic security identifier cross-reference. For direct access to price histories, please refer to the Time Series API for Digital Portals. Similar criteria based screener APIs exist for equity instruments and securitized derivatives: See the Stocks API and the Securitized Derivatives API for details. # noqa: E501
The version of the OpenAPI document: 2
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.FundsAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.FundsAPIforDigitalPortals.exceptions import ApiAttributeError
class InlineResponse2004ShareClassRegistrationCountry(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'id': (float,), # noqa: E501
'name': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'name': 'name', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""InlineResponse2004ShareClassRegistrationCountry - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (float): Identifier of the country.. [optional] # noqa: E501
name (str): Name of the country.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""InlineResponse2004ShareClassRegistrationCountry - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (float): Identifier of the country.. [optional] # noqa: E501
name (str): Name of the country.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 47.957692
| 1,125
| 0.594274
|
3c1f6e6f27f361df9eb27fc2d39edd9ab4c05eee
| 2,561
|
py
|
Python
|
c2cgeoportal/scaffolds/update/CONST_alembic/main/versions/5109242131ce_add_column_time_widget.py
|
craxxkid/c2cgeoportal
|
60ca7d5d014d69b0a938f858271c911a30da77c3
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
c2cgeoportal/scaffolds/update/CONST_alembic/main/versions/5109242131ce_add_column_time_widget.py
|
craxxkid/c2cgeoportal
|
60ca7d5d014d69b0a938f858271c911a30da77c3
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
c2cgeoportal/scaffolds/update/CONST_alembic/main/versions/5109242131ce_add_column_time_widget.py
|
craxxkid/c2cgeoportal
|
60ca7d5d014d69b0a938f858271c911a30da77c3
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
"""add column time_widget
Revision ID: 5109242131ce
Revises: 164ac0819a61
Create Date: 2015-04-27 17:31:41.760977
"""
from alembic import op, context
from sqlalchemy import Column
from sqlalchemy.types import Unicode
# revision identifiers, used by Alembic.
revision = '5109242131ce'
down_revision = '164ac0819a61'
def upgrade():
schema = context.get_context().config.get_main_option('schema')
# Instructions
for table in ['layerv1', 'layer_internal_wms', 'layer_external_wms']:
op.add_column(table, Column('time_widget', Unicode(10), default=u'slider'), schema=schema)
op.execute("UPDATE %(schema)s.%(table)s SET time_widget = 'slider'" % {
'schema': schema, 'table': table
})
def downgrade():
schema = context.get_context().config.get_main_option('schema')
# Instructions
for table in ['layerv1', 'layer_internal_wms', 'layer_external_wms']:
op.drop_column(table, 'time_widget', schema=schema)
| 40.650794
| 98
| 0.754393
|
8607704f0d23c096c4ced60c4c6d0771f9084509
| 483
|
py
|
Python
|
visualizedata.py
|
scuruchima1/Chicago-COVID19-Forecaster
|
c83bd71f749b5ca13caed002544cf518f873372f
|
[
"MIT"
] | null | null | null |
visualizedata.py
|
scuruchima1/Chicago-COVID19-Forecaster
|
c83bd71f749b5ca13caed002544cf518f873372f
|
[
"MIT"
] | null | null | null |
visualizedata.py
|
scuruchima1/Chicago-COVID19-Forecaster
|
c83bd71f749b5ca13caed002544cf518f873372f
|
[
"MIT"
] | null | null | null |
from pandas import read_csv
from pandas.plotting import scatter_matrix
from matplotlib import pyplot
import numpy as np
#Get 7-day moving average data set
dataset = read_csv(r'data/avg.csv')
data = np.genfromtxt(r"data/avg.csv", delimiter=",")
data = list(filter(([]).__ne__, data))
#Plot data, make scatter matrix, and a box and whisker plot
pyplot.plot(data)
scatter_matrix(dataset)
dataset.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False)
pyplot.show()
| 30.1875
| 81
| 0.759834
|
d9e23d7f6c7f869ad7080053248ad39d0614e99d
| 1,965
|
py
|
Python
|
daiquiri/metadata/serializers/dublincore.py
|
agy-why/daiquiri
|
4d3e2ce51e202d5a8f1df404a0094a4e018dcb4d
|
[
"Apache-2.0"
] | 14
|
2018-12-23T18:35:02.000Z
|
2021-12-15T04:55:12.000Z
|
daiquiri/metadata/serializers/dublincore.py
|
agy-why/daiquiri
|
4d3e2ce51e202d5a8f1df404a0094a4e018dcb4d
|
[
"Apache-2.0"
] | 40
|
2018-12-20T12:44:05.000Z
|
2022-03-21T11:35:20.000Z
|
daiquiri/metadata/serializers/dublincore.py
|
agy-why/daiquiri
|
4d3e2ce51e202d5a8f1df404a0094a4e018dcb4d
|
[
"Apache-2.0"
] | 5
|
2019-05-16T08:03:35.000Z
|
2021-08-23T20:03:11.000Z
|
from django.conf import settings
from rest_framework import serializers
from daiquiri.core.serializers import JSONListField
from daiquiri.metadata.models import Schema, Table
class DublincoreSerializer(serializers.ModelSerializer):
title = serializers.SerializerMethodField()
description = serializers.SerializerMethodField()
creators = JSONListField(default=[])
contributors = JSONListField(default=[])
subjects = serializers.ReadOnlyField(default=settings.SITE_SUBJECTS)
publisher = serializers.ReadOnlyField(default=settings.SITE_PUBLISHER)
date = serializers.ReadOnlyField(source='published')
type = serializers.SerializerMethodField()
identifier = serializers.SerializerMethodField()
rights = serializers.ReadOnlyField(source='license')
def get_title(self, obj):
return obj.title or obj.name
def get_description(self, obj):
return obj.long_description or obj.description
def get_type(self, obj):
return 'Dataset'
def get_identifier(self, obj):
raise NotImplementedError()
class DublincoreSchemaSerializer(DublincoreSerializer):
class Meta:
model = Schema
fields = (
'title',
'description',
'creators',
'contributors',
'subjects',
'publisher',
'date',
'type',
'identifier',
'rights'
)
def get_identifier(self, obj):
return obj.doi or 'schemas/%i' % obj.pk
class DublincoreTableSerializer(DublincoreSerializer):
class Meta:
model = Table
fields = (
'title',
'description',
'creators',
'contributors',
'subjects',
'publisher',
'date',
'type',
'identifier',
'rights'
)
def get_identifier(self, obj):
return obj.doi or 'tables/%i' % obj.pk
| 26.2
| 74
| 0.622392
|
e1936433afc40b68b04707a035438554bb375806
| 5,315
|
py
|
Python
|
tests/test_etl.py
|
x14119641/small-projects
|
20acb97bdbb7ef3e4946821e3476ea451e548e28
|
[
"MIT"
] | null | null | null |
tests/test_etl.py
|
x14119641/small-projects
|
20acb97bdbb7ef3e4946821e3476ea451e548e28
|
[
"MIT"
] | null | null | null |
tests/test_etl.py
|
x14119641/small-projects
|
20acb97bdbb7ef3e4946821e3476ea451e548e28
|
[
"MIT"
] | null | null | null |
from app import etl
import pandas as pd
from app.extensions import mongo
import pytest
doc_1 = {"_id": 2141219,
"iswc": "T0420889173",
"titles": [
{"title": "MALA YERBA",
"type": "OriginalTitle"}
],
"right_owners": [
{"name": "RAFAEL MENDIZABAL ITURAIN",
"role": "Autor",
"ipi": "00200703727"},
{"name": "JOSE CARPENA SORIANO",
"role": "Autor",
"ipi": "00222061816"},
{"name": "FRANCISCO MARTINEZ SOCIAS",
"role": "Compositor",
"ipi": "00222084113"}
]
}
last_doc = {"_id": 611321,
"iswc": "T0421644792",
"titles": [
{"title": "CHA CHA CHA DE BAHIA",
"type": "OriginalTitle"},
{"title": "CHACHACHA EN BAHIA",
"type": "AlternativeType"},
{"title": "CHA CHACHA EN BAHÍA",
"type": "AlternativeType"},
{"title": "CHA CHACHA EN BAHIA",
"type": "AlternativeType"},
],
"right_owners": [
{"name": "ENRIQUE JESUS JORRIN Y OLEAGA",
"role": "Compositor/Autor",
"ipi": "00015546498"},
{"name": "EDMOND DAVID BACRI",
"role": "Adaptador",
"ipi": "00001772516"}
]
}
series_cols ="ISWC,ORIGINAL TITLE,ALTERNATIVE TITLE 1,ALTERNATIVE TITLE 2,ALTERNATIVE TITLE 3,RIGHT OWNER,ROLE,IPI NUMBER,ID SOCIETY".split(",")
serie_1 = pd.Series(
["T-042088917-3","MALA YERBA","","","","RAFAEL MENDIZABAL ITURAIN","Autor","200703727",2141219],
index=series_cols
)
last_serie = pd.Series(
["T-042164479-2","CHA CHA CHA DE BAHIA","CHACHACHA EN BAHIA","CHA CHACHA EN BAHÍA","CHA CHACHA EN BAHIA","EDMOND DAVID BACRI","Adaptador","1772516",611321],
index=series_cols
)
@pytest.fixture
def df():
yield etl.read_excel()
def test_read_excel(df):
print(df)
assert df.shape == (14, 9)
assert 'ROLE' in df.columns
assert any(item.startswith(" ") for item in df.columns) is False, "Some column name starts with ' '"
def test_extract_ipi():
ipis = ["200703727","222084113", "159586128", "68238360", "00555"]
for ipi in ipis:
assert len(etl.extract_ipi(ipi)) ==11
assert etl.extract_ipi('') ==''
def test_extract_titles():
assert len(etl.extract_titles(serie_1[1:5].values)) ==1
assert len(etl.extract_titles(last_serie[1:5].values)) ==4
def test_create_schema(df):
schema_1 = etl.create_schema(df.iloc[1,:])
assert isinstance(schema_1, dict)
assert schema_1["_id"] == doc_1["_id"]
assert schema_1["iswc"] == doc_1["iswc"]
assert schema_1["titles"][0]["title"] == doc_1["titles"][0]["title"]
assert schema_1["right_owners"][0]["name"] in doc_1["right_owners"][1]["name"]
### Test Load data ###
def test_insert_data(df):
"""Inserts 3 first rows to see if maps the doc_1"""
for (i,row) in df.iterrows():
if i > 2:
break
obj = etl.create_schema(row)
etl.insert_data(obj, True)
query = mongo.test_music_dataset.music_collection.find_one({"_id": obj['_id']})
assert isinstance(query, dict)
assert query["_id"] == doc_1["_id"]
assert query["iswc"] == doc_1["iswc"]
assert len(query["titles"]) == len(doc_1["titles"])
assert len(query["right_owners"]) == len(doc_1["right_owners"])
owners_name_query = [item["name"] for item in query["right_owners"]]
owners_name_doc_1 = [item["name"] for item in doc_1["right_owners"]]
assert set(owners_name_query) == set(owners_name_doc_1)
owners_ipi_query = [item["ipi"] for item in query["right_owners"]]
owners_ipi_doc_1 = [item["ipi"] for item in doc_1["right_owners"]]
assert set(owners_ipi_query) == set(owners_ipi_doc_1)
mongo.test_music_dataset.music_collection.drop()
assert "music_collection" not in mongo.test_music_dataset.list_collection_names()
def test_load_data(df):
"""Same loop of load_data in etl, testes last document"""
mongo.test_music_dataset.music_collection.drop()
for (_,row) in df.iterrows():
obj = etl.create_schema(row)
etl.insert_data(obj, True)
all_items = mongo.test_music_dataset.music_collection.find({})
assert len(list(all_items))==5
# last item
query = mongo.test_music_dataset.music_collection.find_one({"_id": last_doc['_id']})
assert isinstance(query, dict)
assert query["_id"] == last_doc["_id"]
assert query["iswc"] == last_doc["iswc"]
assert len(query["titles"]) == len(last_doc["titles"])
assert len(query["right_owners"]) == len(last_doc["right_owners"])
owners_name_query = [item["name"] for item in query["right_owners"]]
owners_name_last_doc = [item["name"] for item in last_doc["right_owners"]]
assert set(owners_name_query) == set(owners_name_last_doc)
owners_ipi_query = [item["ipi"] for item in query["right_owners"]]
owners_ipi_last_doc = [item["ipi"] for item in last_doc["right_owners"]]
assert set(owners_ipi_query) == set(owners_ipi_last_doc)
mongo.test_music_dataset.music_collection.drop()
assert "music_collection" not in mongo.test_music_dataset.list_collection_names()
| 36.909722
| 160
| 0.614864
|
900c0e215c20d510c7cda1b2afb05136f055cc23
| 8,915
|
py
|
Python
|
sphinx_markdown_parser/commonmark_parser.py
|
codejamninja/recommonmark
|
c5c9e79574ac3521a9357b86238f1d1c88f5859a
|
[
"MIT"
] | null | null | null |
sphinx_markdown_parser/commonmark_parser.py
|
codejamninja/recommonmark
|
c5c9e79574ac3521a9357b86238f1d1c88f5859a
|
[
"MIT"
] | null | null | null |
sphinx_markdown_parser/commonmark_parser.py
|
codejamninja/recommonmark
|
c5c9e79574ac3521a9357b86238f1d1c88f5859a
|
[
"MIT"
] | null | null | null |
"""Docutils CommonMark parser"""
import sys
from os.path import splitext
from docutils import parsers, nodes
from sphinx import addnodes
from commonmark import Parser
from warnings import warn
if sys.version_info < (3, 0):
from urlparse import urlparse
else:
from urllib.parse import urlparse
__all__ = ['CommonMarkParser']
class CommonMarkParser(parsers.Parser):
"""Docutils parser for CommonMark"""
supported = ('md', 'markdown')
translate_section_name = None
level = 0
def __init__(self):
self._level_to_elem = {}
def parse(self, inputstring, document):
self.document = document
self.current_node = document
self.setup_parse(inputstring, document)
self.setup_sections()
parser = Parser()
ast = parser.parse(inputstring + '\n')
self.convert_ast(ast)
self.finish_parse()
def convert_ast(self, ast):
for (node, entering) in ast.walker():
fn_prefix = "visit" if entering else "depart"
fn_name = "{0}_{1}".format(fn_prefix, node.t.lower())
fn_default = "default_{0}".format(fn_prefix)
fn = getattr(self, fn_name, None)
if fn is None:
fn = getattr(self, fn_default)
fn(node)
# Node type enter/exit handlers
def default_visit(self, mdnode):
pass
def default_depart(self, mdnode):
"""Default node depart handler
If there is a matching ``visit_<type>`` method for a container node,
then we should make sure to back up to it's parent element when the node
is exited.
"""
if mdnode.is_container():
fn_name = 'visit_{0}'.format(mdnode.t)
if not hasattr(self, fn_name):
warn("Container node skipped: type={0}".format(mdnode.t))
else:
self.current_node = self.current_node.parent
def visit_heading(self, mdnode):
# Test if we're replacing a section level first
if isinstance(self.current_node, nodes.section):
if self.is_section_level(mdnode.level, self.current_node):
self.current_node = self.current_node.parent
title_node = nodes.title()
title_node.line = mdnode.sourcepos[0][0]
new_section = nodes.section()
new_section.line = mdnode.sourcepos[0][0]
new_section.append(title_node)
self.add_section(new_section, mdnode.level)
# Set the current node to the title node to accumulate text children/etc
# for heading.
self.current_node = title_node
def depart_heading(self, _):
"""Finish establishing section
Wrap up title node, but stick in the section node. Add the section names
based on all the text nodes added to the title.
"""
assert isinstance(self.current_node, nodes.title)
# The title node has a tree of text nodes, use the whole thing to
# determine the section id and names
text = self.current_node.astext()
if self.translate_section_name:
text = self.translate_section_name(text)
name = nodes.fully_normalize_name(text)
section = self.current_node.parent
section['names'].append(name)
self.document.note_implicit_target(section, section)
self.current_node = section
def visit_text(self, mdnode):
self.current_node.append(nodes.Text(mdnode.literal, mdnode.literal))
def visit_softbreak(self, _):
self.current_node.append(nodes.Text('\n'))
def visit_paragraph(self, mdnode):
p = nodes.paragraph(mdnode.literal)
p.line = mdnode.sourcepos[0][0]
self.current_node.append(p)
self.current_node = p
def visit_emph(self, _):
n = nodes.emphasis()
self.current_node.append(n)
self.current_node = n
def visit_strong(self, _):
n = nodes.strong()
self.current_node.append(n)
self.current_node = n
def visit_code(self, mdnode):
n = nodes.literal(mdnode.literal, mdnode.literal)
self.current_node.append(n)
def visit_link(self, mdnode):
ref_node = nodes.reference()
# Check destination is supported for cross-linking and remove extension
destination = mdnode.destination
_, ext = splitext(destination)
# TODO check for other supported extensions, such as those specified in
# the Sphinx conf.py file but how to access this information?
# TODO this should probably only remove the extension for local paths,
# i.e. not uri's starting with http or other external prefix.
if ext.replace('.', '') in self.supported:
destination = destination.replace(ext, '')
ref_node['refuri'] = destination
# TODO okay, so this is acutally not always the right line number, but
# these mdnodes won't have sourcepos on them for whatever reason. This
# is better than 0 though.
ref_node.line = self._get_line(mdnode)
if mdnode.title:
ref_node['title'] = mdnode.title
next_node = ref_node
url_check = urlparse(destination)
if not url_check.scheme and not url_check.fragment:
wrap_node = addnodes.pending_xref(
reftarget=destination,
reftype='any',
refdomain=None, # Added to enable cross-linking
refexplicit=True,
refwarn=True
)
# TODO also not correct sourcepos
wrap_node.line = self._get_line(mdnode)
if mdnode.title:
wrap_node['title'] = mdnode.title
wrap_node.append(ref_node)
next_node = wrap_node
self.current_node.append(next_node)
self.current_node = ref_node
def depart_link(self, mdnode):
if isinstance(self.current_node.parent, addnodes.pending_xref):
self.current_node = self.current_node.parent.parent
else:
self.current_node = self.current_node.parent
def visit_image(self, mdnode):
img_node = nodes.image()
img_node['uri'] = mdnode.destination
if mdnode.title:
img_node['alt'] = mdnode.title
self.current_node.append(img_node)
self.current_node = img_node
def visit_list(self, mdnode):
list_node = None
if (mdnode.list_data['type'] == "bullet"):
list_node_cls = nodes.bullet_list
else:
list_node_cls = nodes.enumerated_list
list_node = list_node_cls()
list_node.line = mdnode.sourcepos[0][0]
self.current_node.append(list_node)
self.current_node = list_node
def visit_item(self, mdnode):
node = nodes.list_item()
node.line = mdnode.sourcepos[0][0]
self.current_node.append(node)
self.current_node = node
def visit_code_block(self, mdnode):
kwargs = {}
if mdnode.is_fenced and mdnode.info:
kwargs['language'] = mdnode.info
text = ''.join(mdnode.literal)
if text.endswith('\n'):
text = text[:-1]
node = nodes.literal_block(text, text, **kwargs)
self.current_node.append(node)
def visit_block_quote(self, mdnode):
q = nodes.block_quote()
q.line = mdnode.sourcepos[0][0]
self.current_node.append(q)
self.current_node = q
def visit_html(self, mdnode):
raw_node = nodes.raw(mdnode.literal,
mdnode.literal, format='html')
if mdnode.sourcepos is not None:
raw_node.line = mdnode.sourcepos[0][0]
self.current_node.append(raw_node)
def visit_html_inline(self, mdnode):
self.visit_html(mdnode)
def visit_html_block(self, mdnode):
self.visit_html(mdnode)
def visit_thematic_break(self, _):
self.current_node.append(nodes.transition())
# Section handling
def setup_sections(self):
self._level_to_elem = {0: self.document}
def add_section(self, section, level):
parent_level = max(
section_level for section_level in self._level_to_elem
if level > section_level
)
parent = self._level_to_elem[parent_level]
parent.append(section)
self._level_to_elem[level] = section
# Prune level to limit
self._level_to_elem = dict(
(section_level, section)
for section_level, section in self._level_to_elem.items()
if section_level <= level
)
def is_section_level(self, level, section):
return self._level_to_elem.get(level, None) == section
def _get_line(self, mdnode):
while mdnode:
if mdnode.sourcepos:
return mdnode.sourcepos[0][0]
mdnode = mdnode.parent
return 0
| 33.389513
| 80
| 0.622658
|
207d583efd6d17cb9ab20dd1c28fa666b5099bb5
| 1,939
|
py
|
Python
|
src/utils.py
|
kod3r/GraRep
|
ac42b59513993e37d29389d4dc23fc758207e96a
|
[
"MIT"
] | 1
|
2019-12-15T19:33:00.000Z
|
2019-12-15T19:33:00.000Z
|
src/utils.py
|
kod3r/GraRep
|
ac42b59513993e37d29389d4dc23fc758207e96a
|
[
"MIT"
] | null | null | null |
src/utils.py
|
kod3r/GraRep
|
ac42b59513993e37d29389d4dc23fc758207e96a
|
[
"MIT"
] | null | null | null |
"""Dataset reading utilities."""
import numpy as np
import pandas as pd
import networkx as nx
from scipy import sparse
from texttable import Texttable
def create_inverse_degree_matrix(edges):
"""
Creating an inverse degree matrix from an edge list.
:param edges: Edge list.
:return D_1: Inverse degree matrix.
"""
graph = nx.from_edgelist(edges)
ind = range(len(graph.nodes()))
degs = [1.0/graph.degree(node) for node in range(graph.number_of_nodes())]
D_1 = sparse.coo_matrix((degs, (ind, ind)),
shape=(graph.number_of_nodes(),
graph.number_of_nodes()),
dtype=np.float32)
return D_1
def normalize_adjacency(edges):
"""
Method to calculate a sparse degree normalized adjacency matrix.
:param edges: Edge list of graph.
:return A: Normalized adjacency matrix.
"""
D_1 = create_inverse_degree_matrix(edges)
index_1 = [edge[0] for edge in edges] + [edge[1] for edge in edges]
index_2 = [edge[1] for edge in edges] + [edge[0] for edge in edges]
values = [1.0 for edge in edges] + [1.0 for edge in edges]
A = sparse.coo_matrix((values, (index_1, index_2)),
shape=D_1.shape,
dtype=np.float32)
A = A.dot(D_1)
return A
def read_graph(edge_path):
"""
Method to read graph and create a target matrix.
:param edge_path: Path to the ege list.
:return A: Target matrix.
"""
edges = pd.read_csv(edge_path).values.tolist()
A = normalize_adjacency(edges)
return A
def tab_printer(args):
"""
Function to print the logs in a nice tabular format.
:param args: Parameters used for the model.
"""
args = vars(args)
t = Texttable()
t.add_rows([["Parameter", "Value"]])
t.add_rows([[k.replace("_", " ").capitalize(), v] for k, v in args.items()])
print(t.draw())
| 31.274194
| 80
| 0.617844
|
7ea7a64258baeceb17c5190974767da7c90347d4
| 1,840
|
py
|
Python
|
software/logReader/intersect.py
|
Extent421/bladeBench
|
0eb01b88e21e66a7897b3094041196790848b3e0
|
[
"MIT"
] | 8
|
2018-10-30T22:04:56.000Z
|
2020-07-17T12:50:56.000Z
|
software/logReader/intersect.py
|
Extent421/bladeBench
|
0eb01b88e21e66a7897b3094041196790848b3e0
|
[
"MIT"
] | null | null | null |
software/logReader/intersect.py
|
Extent421/bladeBench
|
0eb01b88e21e66a7897b3094041196790848b3e0
|
[
"MIT"
] | null | null | null |
#
# line segment intersection using vectors
# see Computer Graphics by F.S. Hill
#
from numpy import *
import sys
def perp( a ) :
b = empty_like(a)
b[0] = -a[1]
b[1] = a[0]
return b
# line segment a given by endpoints a1, a2
# line segment b given by endpoints b1, b2
# return
def intersectPoint( a1,a2, b1,b2 ) :
a1 = array(a1)
a2 = array(a2)
b1 = array(b1)
b2 = array(b2)
da = a2-a1
db = b2-b1
dp = a1-b1
dap = perp(da)
denom = dot( dap, db)
if not denom : return None
num = dot( dap, dp )
return (num / denom.astype(float))*db + b1
def isBetween( a, b, c):
a = array(a)
b = array(b)
c = array(c)
cma = c-a
bma = b-a
crossproduct = cma[1]*bma[0] - cma[0]*bma[1]
if abs(crossproduct) > finfo(float32).eps:
#print 'crossFail', abs(crossproduct), finfo(float32).eps
return False
dotproduct = cma[0]*bma[0] + cma[1]*bma[1]
if dotproduct < 0:
#print 'dotFail', dotProduct
return False
squaredLength = pow( bma[0], 2) + pow( bma[1], 2 )
if dotproduct > squaredLength:
#print 'squareFail',dotproduct, squaredLength
return False
return True
def intersect( a1,a2, b1,b2 ) :
point = intersectPoint( a1,a2, b1,b2 )
#print 'point', point
if point is None: return None
#print 'a', isBetween(a1, a2, point)
#print 'b', isBetween(b1, b2, point)
if not isBetween(a1, a2, point): return None
if not isBetween(b1, b2, point): return None
return point
def dist( a,b, c ) :
a = array(a)
b = array(b)
c = array(c)
lineA = a-b
lineADist = sqrt( pow(lineA[0], 2) + pow(lineA[1], 2) )
lineB = a-c
lineBDist = sqrt( pow(lineB[0], 2) + pow(lineB[1], 2) )
print 'dists', lineADist, lineBDist
return lineBDist/lineADist
| 23.896104
| 65
| 0.582065
|
fdda3d8796054ae73cfb79c7261280933007a087
| 11,580
|
py
|
Python
|
cirq/google/api/v1/programs.py
|
exAClior/Cirq
|
0701327bc66c988428f302dd1e4bed1eef1535a6
|
[
"Apache-2.0"
] | 1
|
2021-01-05T19:47:55.000Z
|
2021-01-05T19:47:55.000Z
|
cirq/google/api/v1/programs.py
|
rohitvuppala/Cirq
|
0ff2894e053e4ce3bb1b54e9b9de1cc4345d10b3
|
[
"Apache-2.0"
] | 4
|
2021-01-11T10:35:37.000Z
|
2021-01-28T19:17:02.000Z
|
cirq/google/api/v1/programs.py
|
rohitvuppala/Cirq
|
0ff2894e053e4ce3bb1b54e9b9de1cc4345d10b3
|
[
"Apache-2.0"
] | 1
|
2021-12-30T21:50:00.000Z
|
2021-12-30T21:50:00.000Z
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from typing import Any, cast, Dict, Iterable, Optional, Sequence, Tuple, TYPE_CHECKING, Iterator
import numpy as np
import sympy
from cirq import devices, ops, protocols, value, circuits
from cirq.google.api.v1 import operations_pb2
if TYPE_CHECKING:
import cirq
def _load_json_bool(b: Any):
"""Converts a json field to bool. If already a bool, pass through."""
if isinstance(b, bool):
return b
return json.loads(b)
def gate_to_proto(
gate: 'cirq.Gate', qubits: Tuple['cirq.Qid', ...], delay: int
) -> operations_pb2.Operation:
if isinstance(gate, ops.MeasurementGate):
return operations_pb2.Operation(
incremental_delay_picoseconds=delay, measurement=_measure_to_proto(gate, qubits)
)
if isinstance(gate, ops.XPowGate):
if len(qubits) != 1:
# coverage: ignore
raise ValueError('Wrong number of qubits.')
return operations_pb2.Operation(
incremental_delay_picoseconds=delay, exp_w=_x_to_proto(gate, qubits[0])
)
if isinstance(gate, ops.YPowGate):
if len(qubits) != 1:
# coverage: ignore
raise ValueError('Wrong number of qubits.')
return operations_pb2.Operation(
incremental_delay_picoseconds=delay, exp_w=_y_to_proto(gate, qubits[0])
)
if isinstance(gate, ops.PhasedXPowGate):
if len(qubits) != 1:
# coverage: ignore
raise ValueError('Wrong number of qubits.')
return operations_pb2.Operation(
incremental_delay_picoseconds=delay, exp_w=_phased_x_to_proto(gate, qubits[0])
)
if isinstance(gate, ops.ZPowGate):
if len(qubits) != 1:
# coverage: ignore
raise ValueError('Wrong number of qubits.')
return operations_pb2.Operation(
incremental_delay_picoseconds=delay, exp_z=_z_to_proto(gate, qubits[0])
)
if isinstance(gate, ops.CZPowGate):
if len(qubits) != 2:
# coverage: ignore
raise ValueError('Wrong number of qubits.')
return operations_pb2.Operation(
incremental_delay_picoseconds=delay, exp_11=_cz_to_proto(gate, *qubits)
)
raise ValueError("Don't know how to serialize this gate: {!r}".format(gate))
def _x_to_proto(gate: 'cirq.XPowGate', q: 'cirq.Qid') -> operations_pb2.ExpW:
return operations_pb2.ExpW(
target=_qubit_to_proto(q),
axis_half_turns=_parameterized_value_to_proto(0),
half_turns=_parameterized_value_to_proto(gate.exponent),
)
def _y_to_proto(gate: 'cirq.YPowGate', q: 'cirq.Qid') -> operations_pb2.ExpW:
return operations_pb2.ExpW(
target=_qubit_to_proto(q),
axis_half_turns=_parameterized_value_to_proto(0.5),
half_turns=_parameterized_value_to_proto(gate.exponent),
)
def _phased_x_to_proto(gate: 'cirq.PhasedXPowGate', q: 'cirq.Qid') -> operations_pb2.ExpW:
return operations_pb2.ExpW(
target=_qubit_to_proto(q),
axis_half_turns=_parameterized_value_to_proto(gate.phase_exponent),
half_turns=_parameterized_value_to_proto(gate.exponent),
)
def _z_to_proto(gate: 'cirq.ZPowGate', q: 'cirq.Qid') -> operations_pb2.ExpZ:
return operations_pb2.ExpZ(
target=_qubit_to_proto(q), half_turns=_parameterized_value_to_proto(gate.exponent)
)
def _cz_to_proto(gate: 'cirq.CZPowGate', p: 'cirq.Qid', q: 'cirq.Qid') -> operations_pb2.Exp11:
return operations_pb2.Exp11(
target1=_qubit_to_proto(p),
target2=_qubit_to_proto(q),
half_turns=_parameterized_value_to_proto(gate.exponent),
)
def _qubit_to_proto(qubit):
return operations_pb2.Qubit(row=qubit.row, col=qubit.col)
def _measure_to_proto(gate: 'cirq.MeasurementGate', qubits: Sequence['cirq.Qid']):
if len(qubits) == 0:
raise ValueError('Measurement gate on no qubits.')
invert_mask = None
if gate.invert_mask:
invert_mask = gate.invert_mask + (False,) * (gate.num_qubits() - len(gate.invert_mask))
if invert_mask and len(invert_mask) != len(qubits):
raise ValueError(
'Measurement gate had invert mask of length '
'different than number of qubits it acts on.'
)
return operations_pb2.Measurement(
targets=[_qubit_to_proto(q) for q in qubits],
key=protocols.measurement_key(gate),
invert_mask=invert_mask,
)
def circuit_as_schedule_to_protos(circuit: 'cirq.Circuit') -> Iterator[operations_pb2.Operation]:
"""Convert a circuit into an iterable of protos.
Args:
circuit: The circuit to convert to a proto. Must contain only
gates that can be cast to xmon gates.
Yields:
An Operation proto.
"""
last_picos: Optional[int] = None
time_picos = 0
for op in circuit.all_operations():
if last_picos is None:
delay = time_picos
else:
delay = time_picos - last_picos
op_proto = gate_to_proto(cast(ops.Gate, op.gate), op.qubits, delay)
time_picos += 1
last_picos = time_picos
yield op_proto
def circuit_from_schedule_from_protos(
device: 'cirq.google.XmonDevice',
ops: Iterable[operations_pb2.Operation],
) -> 'cirq.Circuit':
"""Convert protos into a Circuit for the given device."""
result = []
for op in ops:
xmon_op = xmon_op_from_proto(op)
result.append(xmon_op)
return circuits.Circuit(result, device=device)
def pack_results(measurements: Sequence[Tuple[str, np.ndarray]]) -> bytes:
"""Pack measurement results into a byte string.
Args:
measurements: A sequence of tuples, one for each measurement, consisting
of a string key and an array of boolean data. The data should be
a 2-D array indexed by (repetition, qubit_index). All data for all
measurements must have the same number of repetitions.
Returns:
Packed bytes, as described in the unpack_results docstring below.
Raises:
ValueError if the measurement data do not have the compatible shapes.
"""
if not measurements:
return b''
shapes = [(key, np.shape(data)) for key, data in measurements]
if not all(len(shape) == 2 for _, shape in shapes):
raise ValueError("Expected 2-D data: shapes={}".format(shapes))
reps = shapes[0][1][0]
if not all(shape[0] == reps for _, shape in shapes):
raise ValueError("Expected same reps for all keys: shapes={}".format(shapes))
bits = np.hstack([np.asarray(data, dtype=bool) for _, data in measurements])
bits = bits.reshape(-1)
# Pad length to multiple of 8 if needed.
remainder = len(bits) % 8
if remainder:
bits = np.pad(bits, (0, 8 - remainder), 'constant')
# Pack in little-endian bit order.
bits = bits.reshape((-1, 8))[:, ::-1]
byte_arr = np.packbits(bits, axis=1).reshape(-1)
return byte_arr.tobytes()
def unpack_results(
data: bytes, repetitions: int, key_sizes: Sequence[Tuple[str, int]]
) -> Dict[str, np.ndarray]:
"""Unpack data from a bitstring into individual measurement results.
Args:
data: Packed measurement results, in the form <rep0><rep1>...
where each repetition is <key0_0>..<key0_{size0-1}><key1_0>...
with bits packed in little-endian order in each byte.
repetitions: number of repetitions.
key_sizes: Keys and sizes of the measurements in the data.
Returns:
Dict mapping measurement key to a 2D array of boolean results. Each
array has shape (repetitions, size) with size for that measurement.
"""
bits_per_rep = sum(size for _, size in key_sizes)
total_bits = repetitions * bits_per_rep
byte_arr = np.frombuffer(data, dtype='uint8').reshape((len(data), 1))
bits = np.unpackbits(byte_arr, axis=1)[:, ::-1].reshape(-1).astype(bool)
bits = bits[:total_bits].reshape((repetitions, bits_per_rep))
results = {}
ofs = 0
for key, size in key_sizes:
results[key] = bits[:, ofs : ofs + size]
ofs += size
return results
def is_native_xmon_op(op: 'cirq.Operation') -> bool:
"""Check if the gate corresponding to an operation is a native xmon gate.
Args:
op: Input operation.
Returns:
True if the operation is native to the xmon, false otherwise.
"""
return isinstance(op, ops.GateOperation) and is_native_xmon_gate(op.gate)
def is_native_xmon_gate(gate: 'cirq.Gate') -> bool:
"""Check if a gate is a native xmon gate.
Args:
gate: Input gate.
Returns:
True if the gate is native to the xmon, false otherwise.
"""
return isinstance(
gate,
(
ops.CZPowGate,
ops.MeasurementGate,
ops.PhasedXPowGate,
ops.XPowGate,
ops.YPowGate,
ops.ZPowGate,
),
)
def xmon_op_from_proto(proto: operations_pb2.Operation) -> 'cirq.Operation':
"""Convert the proto to the corresponding operation.
See protos in api/google/v1 for specification of the protos.
Args:
proto: Operation proto.
Returns:
The operation.
"""
param = _parameterized_value_from_proto
qubit = _qubit_from_proto
if proto.HasField('exp_w'):
exp_w = proto.exp_w
return ops.PhasedXPowGate(
exponent=param(exp_w.half_turns),
phase_exponent=param(exp_w.axis_half_turns),
).on(qubit(exp_w.target))
if proto.HasField('exp_z'):
exp_z = proto.exp_z
return ops.Z(qubit(exp_z.target)) ** param(exp_z.half_turns)
if proto.HasField('exp_11'):
exp_11 = proto.exp_11
return ops.CZ(qubit(exp_11.target1), qubit(exp_11.target2)) ** param(exp_11.half_turns)
if proto.HasField('measurement'):
meas = proto.measurement
return ops.MeasurementGate(
num_qubits=len(meas.targets), key=meas.key, invert_mask=tuple(meas.invert_mask)
).on(*[qubit(q) for q in meas.targets])
raise ValueError('invalid operation: {}'.format(proto))
def _qubit_from_proto(proto: operations_pb2.Qubit):
return devices.GridQubit(row=proto.row, col=proto.col)
def _parameterized_value_from_proto(proto: operations_pb2.ParameterizedFloat) -> value.TParamVal:
if proto.HasField('parameter_key'):
return sympy.Symbol(proto.parameter_key)
if proto.HasField('raw'):
return proto.raw
raise ValueError(
'No value specified for parameterized float. '
'Expected "raw" or "parameter_key" to be set. '
'proto: {!r}'.format(proto)
)
def _parameterized_value_to_proto(param: value.TParamVal) -> operations_pb2.ParameterizedFloat:
if isinstance(param, sympy.Symbol):
return operations_pb2.ParameterizedFloat(parameter_key=str(param.free_symbols.pop()))
else:
return operations_pb2.ParameterizedFloat(raw=float(param))
| 33.760933
| 97
| 0.66563
|
e4943d35b3262a5bcf1d5e62067e8ccd576a992e
| 8,419
|
py
|
Python
|
ucsmsdk/mometa/fc/FcStats.py
|
Kego/ucsmsdk
|
244f283a5c295cf746110bb96686d079b19927ce
|
[
"Apache-2.0"
] | 78
|
2015-11-30T14:10:05.000Z
|
2022-02-13T00:29:08.000Z
|
ucsmsdk/mometa/fc/FcStats.py
|
Kego/ucsmsdk
|
244f283a5c295cf746110bb96686d079b19927ce
|
[
"Apache-2.0"
] | 113
|
2015-11-20T09:42:46.000Z
|
2022-03-16T16:53:29.000Z
|
ucsmsdk/mometa/fc/FcStats.py
|
Kego/ucsmsdk
|
244f283a5c295cf746110bb96686d079b19927ce
|
[
"Apache-2.0"
] | 86
|
2015-12-12T08:22:18.000Z
|
2022-01-23T03:56:34.000Z
|
"""This module contains the general information for FcStats ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class FcStatsConsts:
SUSPECT_FALSE = "false"
SUSPECT_NO = "no"
SUSPECT_TRUE = "true"
SUSPECT_YES = "yes"
class FcStats(ManagedObject):
"""This is FcStats class."""
consts = FcStatsConsts()
naming_props = set([])
mo_meta = MoMeta("FcStats", "fcStats", "stats", VersionMeta.Version111j, "OutputOnly", 0xf, [], ["admin", "operations", "read-only"], ['fabricFcSanPc', 'fcPIo'], ['fcStatsHist'], ["Get"])
prop_meta = {
"bytes_rx": MoPropertyMeta("bytes_rx", "bytesRx", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"bytes_rx_delta": MoPropertyMeta("bytes_rx_delta", "bytesRxDelta", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"bytes_rx_delta_avg": MoPropertyMeta("bytes_rx_delta_avg", "bytesRxDeltaAvg", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"bytes_rx_delta_max": MoPropertyMeta("bytes_rx_delta_max", "bytesRxDeltaMax", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"bytes_rx_delta_min": MoPropertyMeta("bytes_rx_delta_min", "bytesRxDeltaMin", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"bytes_tx": MoPropertyMeta("bytes_tx", "bytesTx", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"bytes_tx_delta": MoPropertyMeta("bytes_tx_delta", "bytesTxDelta", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"bytes_tx_delta_avg": MoPropertyMeta("bytes_tx_delta_avg", "bytesTxDeltaAvg", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"bytes_tx_delta_max": MoPropertyMeta("bytes_tx_delta_max", "bytesTxDeltaMax", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"bytes_tx_delta_min": MoPropertyMeta("bytes_tx_delta_min", "bytesTxDeltaMin", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111j, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"intervals": MoPropertyMeta("intervals", "intervals", "uint", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"packets_rx": MoPropertyMeta("packets_rx", "packetsRx", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"packets_rx_delta": MoPropertyMeta("packets_rx_delta", "packetsRxDelta", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"packets_rx_delta_avg": MoPropertyMeta("packets_rx_delta_avg", "packetsRxDeltaAvg", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"packets_rx_delta_max": MoPropertyMeta("packets_rx_delta_max", "packetsRxDeltaMax", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"packets_rx_delta_min": MoPropertyMeta("packets_rx_delta_min", "packetsRxDeltaMin", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"packets_tx": MoPropertyMeta("packets_tx", "packetsTx", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"packets_tx_delta": MoPropertyMeta("packets_tx_delta", "packetsTxDelta", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"packets_tx_delta_avg": MoPropertyMeta("packets_tx_delta_avg", "packetsTxDeltaAvg", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"packets_tx_delta_max": MoPropertyMeta("packets_tx_delta_max", "packetsTxDeltaMax", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"packets_tx_delta_min": MoPropertyMeta("packets_tx_delta_min", "packetsTxDeltaMin", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"suspect": MoPropertyMeta("suspect", "suspect", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
"thresholded": MoPropertyMeta("thresholded", "thresholded", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"time_collected": MoPropertyMeta("time_collected", "timeCollected", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [], []),
"update": MoPropertyMeta("update", "update", "uint", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
}
prop_map = {
"bytesRx": "bytes_rx",
"bytesRxDelta": "bytes_rx_delta",
"bytesRxDeltaAvg": "bytes_rx_delta_avg",
"bytesRxDeltaMax": "bytes_rx_delta_max",
"bytesRxDeltaMin": "bytes_rx_delta_min",
"bytesTx": "bytes_tx",
"bytesTxDelta": "bytes_tx_delta",
"bytesTxDeltaAvg": "bytes_tx_delta_avg",
"bytesTxDeltaMax": "bytes_tx_delta_max",
"bytesTxDeltaMin": "bytes_tx_delta_min",
"childAction": "child_action",
"dn": "dn",
"intervals": "intervals",
"packetsRx": "packets_rx",
"packetsRxDelta": "packets_rx_delta",
"packetsRxDeltaAvg": "packets_rx_delta_avg",
"packetsRxDeltaMax": "packets_rx_delta_max",
"packetsRxDeltaMin": "packets_rx_delta_min",
"packetsTx": "packets_tx",
"packetsTxDelta": "packets_tx_delta",
"packetsTxDeltaAvg": "packets_tx_delta_avg",
"packetsTxDeltaMax": "packets_tx_delta_max",
"packetsTxDeltaMin": "packets_tx_delta_min",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"suspect": "suspect",
"thresholded": "thresholded",
"timeCollected": "time_collected",
"update": "update",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.bytes_rx = None
self.bytes_rx_delta = None
self.bytes_rx_delta_avg = None
self.bytes_rx_delta_max = None
self.bytes_rx_delta_min = None
self.bytes_tx = None
self.bytes_tx_delta = None
self.bytes_tx_delta_avg = None
self.bytes_tx_delta_max = None
self.bytes_tx_delta_min = None
self.child_action = None
self.intervals = None
self.packets_rx = None
self.packets_rx_delta = None
self.packets_rx_delta_avg = None
self.packets_rx_delta_max = None
self.packets_rx_delta_min = None
self.packets_tx = None
self.packets_tx_delta = None
self.packets_tx_delta_avg = None
self.packets_tx_delta_max = None
self.packets_tx_delta_min = None
self.sacl = None
self.status = None
self.suspect = None
self.thresholded = None
self.time_collected = None
self.update = None
ManagedObject.__init__(self, "FcStats", parent_mo_or_dn, **kwargs)
| 69.578512
| 258
| 0.672645
|
48c3e8f6e551ecd77aa66983aa2d678734995b68
| 1,861
|
py
|
Python
|
predict.py
|
romanbas/Image_Classifier
|
74a675015796b1f6fea0d3439d1007cec2cbe61e
|
[
"MIT"
] | null | null | null |
predict.py
|
romanbas/Image_Classifier
|
74a675015796b1f6fea0d3439d1007cec2cbe61e
|
[
"MIT"
] | null | null | null |
predict.py
|
romanbas/Image_Classifier
|
74a675015796b1f6fea0d3439d1007cec2cbe61e
|
[
"MIT"
] | null | null | null |
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_hub as hub
import logging
import argparse
import sys
import json
from PIL import Image
#==================================
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
#==================================
parser = argparse.ArgumentParser ()
parser.add_argument('Image_path', help = 'Image Path', type = str)
parser.add_argument('model', help = 'DL Model to use ', type = str, default='my_model.h5')
parser.add_argument('--top_k', help = 'Number of K to display', type = int, default=5 )
parser.add_argument('--category_names', help = 'List of the categories in json file', default='label_map.json')
args=parser.parse_args()
#==================================
with open(args.category_names, 'r') as f:
class_names = json.load(f)
reloaded = tf.keras.models.load_model(args.model,custom_objects={'KerasLayer': hub.KerasLayer})
#==================================
def process_image(P_Image):
ts_img=tf.convert_to_tensor(P_Image, dtype=tf.float32)
ts_img=tf.image.resize(ts_img, (224,224))
ts_img/= 255
np_img=ts_img.numpy()
return np_img
def predict(Image,Model,Top_K):
prc_img=process_image(Image)
predict=Model.predict(prc_img)
predict = predict[0].tolist()
Prob, Class= tf.math.top_k(predict, k=Top_K)
Prob=Prob.numpy().tolist()#[0]
Class=Class.numpy().tolist()#[0]
Labeled_Class = [class_names[str(x)] for x in Class]
return Prob,Labeled_Class
#==================================
image_path = args.Image_path
img = Image.open(image_path)
test_image =np.expand_dims(np.asarray(img), axis = 0)
probs, classes = predict(test_image,reloaded, args.top_k)
print("The top K probabilities",probs)
print("The top K Classes",classes)
| 27.367647
| 111
| 0.65986
|
ffd8dd1ac30847023f8c964bd02dc624c2b674a1
| 3,528
|
py
|
Python
|
lib/proxy/tests.py
|
muffinresearch/solitude
|
6cb37f591956111b54e5c4098602be21c8f4b438
|
[
"BSD-3-Clause"
] | null | null | null |
lib/proxy/tests.py
|
muffinresearch/solitude
|
6cb37f591956111b54e5c4098602be21c8f4b438
|
[
"BSD-3-Clause"
] | null | null | null |
lib/proxy/tests.py
|
muffinresearch/solitude
|
6cb37f591956111b54e5c4098602be21c8f4b438
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf import settings
from django.core.urlresolvers import reverse
import mock
from nose.tools import eq_
import requests
import test_utils
from lib.bango.constants import HEADERS_SERVICE_GET
from lib.bango.tests import samples
from lib.paypal.constants import HEADERS_URL_GET, HEADERS_TOKEN_GET
from lib.paypal.map import urls
@mock.patch.object(settings, 'SOLITUDE_PROXY', True)
@mock.patch('lib.proxy.views.requests.post')
class TestProxy(test_utils.TestCase):
def setUp(self):
self.url = reverse('paypal.proxy')
def test_proxy(self, post):
post.return_value.status_code = 200
post.return_value.text = 'some-text'
res = self.client.post(self.url, **{HEADERS_URL_GET: 'get-pay-key'})
eq_(post.call_args[0][0], urls['get-pay-key'])
eq_(res.status_code, 200)
eq_(res.content, 'some-text')
def test_not_present(self, post):
with self.assertRaises(KeyError):
self.client.post(self.url)
def test_proxy_auth(self, post):
post.return_value.status_code = 200
self.client.get(self.url, **{HEADERS_URL_GET: 'get-pay-key',
HEADERS_TOKEN_GET: 'token=b&secret=f'})
assert 'X-PAYPAL-AUTHORIZATION' in post.call_args[1]['headers']
def test_status_code(self, post):
post.return_value.status_code = 123
res = self.client.post(self.url, **{HEADERS_URL_GET: 'get-pay-key'})
eq_(res.status_code, 123)
def test_result(self, post):
post.side_effect = requests.exceptions.ConnectionError
res = self.client.post(self.url, **{HEADERS_URL_GET: 'get-pay-key'})
eq_(res.status_code, 500)
def test_not_enabled(self, post):
with self.settings(SOLITUDE_PROXY=False):
eq_(self.client.post(self.url).status_code, 404)
@mock.patch.object(settings, 'SOLITUDE_PROXY', True)
@mock.patch.object(settings, 'BANGO_AUTH', {'USER': 'me', 'PASSWORD': 'shh'})
@mock.patch('lib.proxy.views.requests.post')
class TestBango(test_utils.TestCase):
def setUp(self):
self.url = reverse('bango.proxy')
def test_not_present(self, post):
with self.assertRaises(KeyError):
self.client.post(self.url, samples.sample_request,
**{'content_type': 'text/xml'})
def test_good(self, post):
self.client.post(self.url,
samples.sample_request,
**{'content_type': 'text/xml',
HEADERS_SERVICE_GET: 'http://url.com/b'})
body = post.call_args[1]['data']
assert '<ns0:username>me</ns0:username>' in body
assert '<ns0:password>shh</ns0:password>' in body
def test_billing(self, post):
self.client.post(self.url,
samples.billing_request,
**{'content_type': 'text/xml',
HEADERS_SERVICE_GET: 'http://url.com/b'})
body = post.call_args[1]['data']
assert '<ns1:username>me</ns1:username>' in body
assert '<ns1:password>shh</ns1:password>' in body
def test_refund(self, post):
self.client.post(self.url,
samples.refund_request,
**{'content_type': 'text/xml',
HEADERS_SERVICE_GET: 'http://url.com/b'})
body = post.call_args[1]['data']
assert '<ns0:username>me</ns0:username>' in body
assert '<ns0:password>shh</ns0:password>' in body
| 37.136842
| 77
| 0.619331
|
cca53c33de7869782daa1f9c17454a29fcf073ca
| 1,736
|
py
|
Python
|
src/drovirt/models/tasks.py
|
Storware/drovirt
|
4726b237e2f8940a652e2b773ace0742bb788192
|
[
"MIT"
] | 22
|
2018-10-19T13:40:56.000Z
|
2021-05-11T08:39:37.000Z
|
src/drovirt/models/tasks.py
|
Storware/drovirt
|
4726b237e2f8940a652e2b773ace0742bb788192
|
[
"MIT"
] | 12
|
2019-02-11T15:07:53.000Z
|
2019-02-11T15:21:33.000Z
|
src/drovirt/models/tasks.py
|
Storware/drovirt
|
4726b237e2f8940a652e2b773ace0742bb788192
|
[
"MIT"
] | 7
|
2018-12-10T14:17:49.000Z
|
2022-02-03T23:43:51.000Z
|
import enum
import logging
from sqlalchemy.sql import func
from drovirt.models.base import db, SerializerMixin
from drovirt.models.node import Node
logger = logging.getLogger(__name__)
class TaskStatus(enum.Enum):
QUEUED = "QUEUED"
ACTIVE = "ACTIVE"
COMPLETED = "COMPLETED"
FAILED = "FAILED"
class Task(SerializerMixin, db.Model):
__tablename__ = "task"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(256), nullable=False, default='')
created = db.Column(db.DateTime, nullable=False, server_default=func.now())
updated = db.Column(db.DateTime, onupdate=func.now())
started = db.Column(db.DateTime)
finished = db.Column(db.DateTime)
node_id = db.Column(db.Integer, db.ForeignKey('node.id'), nullable=True)
node = db.relationship("Node", backref=db.backref("tasks", lazy=True), uselist=False)
task_group_id = db.Column(db.Integer, db.ForeignKey('task_group.id'), nullable=False)
task_group = db.relationship("TaskGroup", backref=db.backref("tasks", lazy=True))
task_type = db.Column(db.String(16), nullable=False, default='')
status = db.Column(db.Enum(TaskStatus), nullable=False, default=TaskStatus.QUEUED)
order = db.Column(db.Integer, default=1)
message = db.Column(db.String(4096), nullable=True, default='')
class TaskGroup(SerializerMixin, db.Model):
__tablename__ = "task_group"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(256), nullable=False, default='')
created = db.Column(db.DateTime, nullable=False, server_default=func.now())
updated = db.Column(db.DateTime, onupdate=func.now())
#tasks = db.relationship("Task", backref=db.backref("task_group", lazy=True))
| 36.93617
| 89
| 0.710829
|
5f68493d3a0fd0f5eaedcdea581e338d2e96c391
| 1,497
|
py
|
Python
|
separator.py
|
AFakeman/HLM-tools
|
9a0de2094b6426a1b28c1898f0a3e4d210cb3a81
|
[
"Unlicense"
] | null | null | null |
separator.py
|
AFakeman/HLM-tools
|
9a0de2094b6426a1b28c1898f0a3e4d210cb3a81
|
[
"Unlicense"
] | null | null | null |
separator.py
|
AFakeman/HLM-tools
|
9a0de2094b6426a1b28c1898f0a3e4d210cb3a81
|
[
"Unlicense"
] | null | null | null |
import sys
import os
import struct
from PIL import Image
def read_int32(file):
return int(struct.unpack('<i',file.read(4))[0])
def read_int64(file):
return int(struct.unpack('<q',file.read(8))[0])
def read_string(file, length):
return file.read(length).decode('ASCII')
def read_byte(file):
return int(struct.unpack('<b',file.read(1))[0])
def write_int32(file, int):
file.write(struct.pack('<i',int))
def write_int64(file, int):
file.write(struct.pack('<q',int))
def write_string(file, str):
file.write(str)
def create_directories(name):
if not os.path.exists(name):
os.makedirs(name)
name = sys.argv[1]
output_directory = sys.argv[2]
img = Image.open(name+'.png')
pixels = img.load()
file = open(name+'.meta','rb')
length = os.stat(name+'.meta').st_size
file.seek(28)
create_directories(output_directory)
os.chdir(output_directory)
while file.tell()<length:
name_length = read_byte(file)
name = read_string(file,name_length)
sprites = read_int32(file)
for i in range(sprites):
size_x = read_int32(file)
size_y = read_int32(file)
pos_x = read_int32(file)
pos_y = read_int32(file)
file.read(16)
new_crop = img.crop((pos_x, pos_y, pos_x+size_x, pos_y+size_y))
new_crop.save("{name}_{number}.png".format(name=name, number=i))
#os.system('convert -delay 10 -dispose previous -loop 0 {name}*.png {name}.gif'.format(name=name))
#os.system('rm {name}*.png'.format(name=name))
| 27.722222
| 102
| 0.671343
|
106a9980026e70b39491501daae17c89c19d74c4
| 6,046
|
py
|
Python
|
turbinia/pubsub.py
|
sa3eed3ed/turbinia
|
1eb4db37813f2bd44dcc2c3764e9411f6a2f9d97
|
[
"Apache-2.0"
] | null | null | null |
turbinia/pubsub.py
|
sa3eed3ed/turbinia
|
1eb4db37813f2bd44dcc2c3764e9411f6a2f9d97
|
[
"Apache-2.0"
] | null | null | null |
turbinia/pubsub.py
|
sa3eed3ed/turbinia
|
1eb4db37813f2bd44dcc2c3764e9411f6a2f9d97
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google PubSub Listener for requests to Turbinia to process evidence."""
from __future__ import unicode_literals
import base64
import codecs
import logging
from six.moves import queue
from six.moves import xrange
from google.cloud import exceptions
from google.cloud import pubsub
from googleapiclient.errors import HttpError
import libcloudforensics.providers.gcp.internal.common as gcp_common
from turbinia import config
from turbinia import TurbiniaException
from turbinia.message import TurbiniaMessageBase
log = logging.getLogger('turbinia')
class TurbiniaPubSub(TurbiniaMessageBase):
"""PubSub client object for Google Cloud.
Attributes:
_queue: A Queue object for storing pubsub messages
pubsub_api_client: The pubsub API client object
subscriber: The pubsub subscriber client object
subscription: The pubsub subscription object
topic_name (str): The pubsub topic name
topic_path (str): The full path of the pubsub topic
"""
def __init__(self, topic_name):
"""Initialization for PubSubClient."""
self._queue = queue.Queue()
self.pubsub_api_client = None
self.subscriber = None
self.subscription = None
self.topic_name = topic_name
self.topic_path = None
def setup(self):
"""Set up the pubsub clients."""
self.setup_publisher()
self.setup_subscriber()
def setup_publisher(self):
"""Set up the pubsub publisher."""
config.LoadConfig()
# Configure the pubsub client in googleapiclient.discovery
# for more information on using the API, see
# https://cloud.google.com/pubsub/docs/reference/rest
self.pubsub_api_client = gcp_common.CreateService('pubsub', 'v1')
self.topic_path = 'projects/{0:s}/topics/{1:s}'.format(
config.TURBINIA_PROJECT, self.topic_name)
try:
log.debug('Trying to create pubsub topic {0:s}'.format(self.topic_path))
topics_client = self.pubsub_api_client.projects().topics()
# the ExecuteRequest takes API URI, method name as string and parameters
# as a dict, it executes the API call, handles paging and return response.
gcp_common.ExecuteRequest(
topics_client, 'create', {'name': self.topic_path})
except HttpError as exception:
if exception.resp.status == 409:
log.debug('PubSub topic {0:s} already exists.'.format(self.topic_path))
else:
raise TurbiniaException(
'Unknown error occurred when creating Topic:'
' {0!s}'.format(exception), __name__) from exception
log.debug('Setup PubSub publisher at {0:s}'.format(self.topic_path))
def setup_subscriber(self):
"""Set up the pubsub subscriber."""
config.LoadConfig()
self.subscriber = pubsub.SubscriberClient()
subscription_path = self.subscriber.subscription_path(
config.TURBINIA_PROJECT, self.topic_name)
if not self.topic_path:
self.topic_path = self.subscriber.topic_path(
config.TURBINIA_PROJECT, self.topic_name)
try:
log.debug(
'Trying to create subscription {0:s} on topic {1:s}'.format(
subscription_path, self.topic_path))
self.subscriber.create_subscription(subscription_path, self.topic_path)
except exceptions.Conflict:
log.debug('Subscription {0:s} already exists.'.format(subscription_path))
log.debug('Setup PubSub Subscription {0:s}'.format(subscription_path))
self.subscription = self.subscriber.subscribe(
subscription_path, self._callback)
def _callback(self, message):
"""Callback function that places messages in the queue.
Args:
message: A pubsub message object
"""
data = codecs.decode(message.data, 'utf-8')
log.debug('Received pubsub message: {0:s}'.format(data))
message.ack()
self._queue.put(message)
def check_messages(self):
"""Checks for pubsub messages.
Returns:
A list of any TurbiniaRequest objects received, else an empty list
"""
requests = []
for _ in xrange(self._queue.qsize()):
message = self._queue.get()
data = message.data
log.info('Processing PubSub message {0:s}'.format(message.message_id))
request = self._validate_message(data)
if request:
requests.append(request)
else:
log.error('Error processing PubSub message: {0:s}'.format(data))
return requests
def send_message(self, message):
"""Send a pubsub message.
message: The message to send.
"""
base64_data = base64.b64encode(message.encode('utf-8'))
request_body = {
"messages": [{
"data":
base64_data.decode('utf-8') # base64 encoded string
}]
}
publish_client = self.pubsub_api_client.projects().topics()
response = gcp_common.ExecuteRequest(
publish_client, 'publish', {
'topic': self.topic_path,
'body': request_body
})
# Safe to unpack since response is unpaged.
if not response[0]['messageIds']:
raise TurbiniaException(
'Message {0:s} was not published to topic {1:s}'.format(
message, self.topic_path))
msg_id = response[0]['messageIds'][0]
log.info(
'Published message {0!s} to topic {1!s}'.format(
msg_id, self.topic_name))
def send_request(self, request):
"""Sends a TurbiniaRequest message.
Args:
request: A TurbiniaRequest object.
"""
self.send_message(request.to_json())
| 34.352273
| 80
| 0.691035
|
3c69b2136d2bea8a0c391f0c1beeae0a384cab00
| 1,431
|
py
|
Python
|
surface_coatings/chains/alkylsilane.py
|
daico007/surface_coatings
|
fde9086931c7612a74b59975479f76c6b35b2812
|
[
"MIT"
] | null | null | null |
surface_coatings/chains/alkylsilane.py
|
daico007/surface_coatings
|
fde9086931c7612a74b59975479f76c6b35b2812
|
[
"MIT"
] | 1
|
2021-10-07T20:38:41.000Z
|
2021-10-07T20:38:41.000Z
|
surface_coatings/chains/alkylsilane.py
|
daico007/surface_coatings
|
fde9086931c7612a74b59975479f76c6b35b2812
|
[
"MIT"
] | null | null | null |
"""Routine to create alkylsilane chain."""
import mbuild as mb
from mbuild.lib.recipes import Alkane
from mbuild.lib.moieties import Silane
from surface_coatings.molecules.one_port import OnePort
class Alkylsilane(mb.Compound):
"""A terminal-functionalized alkylsilane chain.
An alkylsilane chain featuring a user-specified functional group at one
terminus and a silane group (featuring an open port to attach to a surface)
at the other terminus.
Parameters
----------
chain_length : int
Length of the chain (number of carbons)
terminal_group : str
Functional group to attach to the chain terminus. Valid option for this
repository is `methyl`, but more can be easily added by providing
appropriate supplement structure files.
"""
def __init__(self, chain_length=17, terminal_group="methyl"):
super(Alkylsilane, self).__init__()
tgroup = OnePort(terminal_group)
alkane = Alkane(chain_length, cap_front=False, cap_end=False)
self.add(alkane, 'alkane')
self.add(tgroup, 'terminal_group')
mb.force_overlap(self['alkane'], self['alkane']['up'],
self['terminal_group']['down'])
silane = Silane()
self.add(silane, 'silane')
mb.force_overlap(self['silane'], self['silane']['up'], self['alkane']['down'])
self.add(silane['down'], 'down', containment=False)
| 35.775
| 86
| 0.672956
|
e8fe1fb646df1eb6c525bdcaadeba4d6f8db5d8e
| 738
|
py
|
Python
|
src/omniglot/cli.py
|
j340m3/omniglot
|
0290dba19288479905e9603bfe6a9daf92e5b7b4
|
[
"BSD-2-Clause"
] | null | null | null |
src/omniglot/cli.py
|
j340m3/omniglot
|
0290dba19288479905e9603bfe6a9daf92e5b7b4
|
[
"BSD-2-Clause"
] | 15
|
2016-12-30T14:25:20.000Z
|
2022-03-20T16:47:43.000Z
|
src/omniglot/cli.py
|
j340m3/omniglot
|
0290dba19288479905e9603bfe6a9daf92e5b7b4
|
[
"BSD-2-Clause"
] | 1
|
2016-12-30T14:25:42.000Z
|
2016-12-30T14:25:42.000Z
|
"""
Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will cause
problems: the code will get executed twice:
- When you run `python -momniglot` python will execute
``__main__.py`` as a script. That means there won't be any
``omniglot.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``omniglot.__main__`` in ``sys.modules``.
Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration
"""
import click
@click.command()
@click.argument('names', nargs=-1)
def main(names=""):
click.echo(repr(names))
| 30.75
| 80
| 0.711382
|
00a44825f6dabeca7cce793d38caca55cab3c644
| 6,827
|
py
|
Python
|
ros/src/tl_detector/light_classification/tl_classifier_object_detect.py
|
stela/CarND-Capstone
|
d7bdbdef35519df97775a1407ab65c91ff6eba46
|
[
"MIT"
] | 1
|
2018-05-10T16:46:11.000Z
|
2018-05-10T16:46:11.000Z
|
ros/src/tl_detector/light_classification/tl_classifier_object_detect.py
|
stela/CarND-Capstone
|
d7bdbdef35519df97775a1407ab65c91ff6eba46
|
[
"MIT"
] | 7
|
2018-06-20T20:42:53.000Z
|
2018-08-21T13:51:30.000Z
|
ros/src/tl_detector/light_classification/tl_classifier_object_detect.py
|
stela/CarND-Capstone
|
d7bdbdef35519df97775a1407ab65c91ff6eba46
|
[
"MIT"
] | 2
|
2018-07-02T21:48:08.000Z
|
2018-07-06T20:03:29.000Z
|
from styx_msgs.msg import TrafficLight
import tensorflow as tf
import numpy as np
import datetime
from PIL import Image
class TLClassifierObjDetect(object):
"""
This class uses the trained model from the TensorFlow Object Detection API.
https://github.com/tensorflow/models/tree/master/research/object_detection
For the Capstone Project, a Single Shot Detector with lightweight MobileNet
Backbone was trained on the Bosch Traffic Light Dataset, LISA Traffic Light Dataset and
Udacity Simulator and Site images.
The Inference Code was adapted from:
https://github.com/tensorflow/models/blob/master/research/object_detection/inference/detection_inference.py
"""
def __init__(self, path_to_tensorflow_graph, confidence_thresh):
# Threshold for detections
self.detection_threshold = confidence_thresh
# Create the TensorFlow session in which the graph is loaded
self.session = tf.Session()
# Create Tensors for results
self.num_detections_tensor = None
self.detected_boxes_tensor = None
self.detected_scores_tensor = None
self.detected_labels_tensor = None
self.image_tensor = None
# Load the trained and frozen model graph with respective weights
with self.session.graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(path_to_tensorflow_graph, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
g = tf.get_default_graph()
# Remember all the tensors we will need for inference
# Most important: input image tensor:
self.image_tensor = g.get_tensor_by_name('image_tensor:0')
self.num_detections_tensor = tf.squeeze(g.get_tensor_by_name('num_detections:0'), 0)
self.num_detections_tensor = tf.cast(self.num_detections_tensor, tf.int32)
self.detected_boxes_tensor = tf.squeeze(g.get_tensor_by_name('detection_boxes:0'), 0)
self.detected_boxes_tensor = self.detected_boxes_tensor[:self.num_detections_tensor]
self.detected_scores_tensor = tf.squeeze(g.get_tensor_by_name('detection_scores:0'), 0)
self.detected_scores_tensor = self.detected_scores_tensor[:self.num_detections_tensor]
self.detected_labels_tensor = tf.squeeze(g.get_tensor_by_name('detection_classes:0'), 0)
self.detected_labels_tensor = tf.cast(self.detected_labels_tensor, tf.int64)
self.detected_labels_tensor = self.detected_labels_tensor[:self.num_detections_tensor]
def get_classification(self, image):
"""
Determines the color of the traffic light in the image by
using the TensorFlow graph.
We run the operations that will give us the boxes, scores and labels.
Then we filter out the most probable scores (> threshold) and use the
biggest box, since this will be the nearest traffic light.
The graph will give us the following IDs :
4: NA
3: green
2: yellow
1: red
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
traffic_light_id = 4 # 4 equals to unknown
id_mapping = {4: TrafficLight.UNKNOWN,
3: TrafficLight.GREEN,
2: TrafficLight.YELLOW,
1: TrafficLight.RED}
results = []
with self.session.graph.as_default():
boxes, scores, labels = self.session.run([self.detected_boxes_tensor,
self.detected_scores_tensor,
self.detected_labels_tensor],
feed_dict={self.image_tensor: image})
# Filter for probability (score) and classification
for i, score in enumerate(scores):
if score > self.detection_threshold and labels[i] != traffic_light_id:
results.append({'box': boxes[i],
'score': score,
'id': labels[i]})
if len(results) > 0:
# print('Nums: '+str(len(results))+' '+str(results[0]['score'])+ ' ' + str(results[0]['id']))
# The boxes are encoded as xmin, xmax, ymin, ymax with normalized coordinates [0..1].
# So lets find just the biggest box and take the traffic light state from it.
# max_sized_result = max(results, key=lambda bb: (bb['box'][1] - bb['box'][0]) * (bb['box'][3] - bb['box'][2]))
# traffic_light_id = max_sized_result['id']
# Better take the best score than the biggest box !
max_score_result = max(results, key=lambda bb: bb['score'])
traffic_light_id = max_score_result['id']
return id_mapping[traffic_light_id]
class TestTLClassifier(object):
def __init__(self):
self.detector = TLClassifier()
def test_classification(self):
# Load image
image_path_green = ('light_classification/test_images/green.jpg', TrafficLight.GREEN)
image_path_yellow = ('light_classification/test_images/yellow.jpg', TrafficLight.YELLOW)
image_path_red = ('light_classification/test_images/red.jpg', TrafficLight.RED)
image_path_na = ('light_classification/test_images/NA.jpg', TrafficLight.UNKNOWN)
for image_path in [image_path_green, image_path_yellow, image_path_red, image_path_na]:
image = np.asarray(Image.open(image_path[0]))
image = np.expand_dims(image, 0)
gt_result = image_path[1]
pred_result = self.detector.get_classification(image)
print(image_path[0])
print('Prediction success: ' + str(gt_result == pred_result))
if gt_result != pred_result:
raise Exception('Prediction error.')
def measure_time(self):
# Load image
image_path = 'light_classification/test_images/green.jpg'
image = np.asarray(Image.open(image_path))
image = np.expand_dims(image, 0)
repeats = 25
t0 = datetime.datetime.now()
for i in range(repeats):
_ = self.detector.get_classification(image)
delta = datetime.datetime.now() - t0
print('Time per image in ms: ' + str(delta.seconds * 100.0 / float(repeats)))
if __name__ == '__main__':
tester = TestTLClassifier()
tester.measure_time()
tester.test_classification()
| 41.375758
| 123
| 0.630877
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.