hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6cccae7fad14badcd9d04577d27f28d557fe726e
| 212
|
py
|
Python
|
visualizing_russian_tools/wsgi.py
|
arthurian/visualizing_russian_tools
|
65fd37839dc0650bb25d1f98904da5b79ae1a754
|
[
"BSD-3-Clause"
] | 2
|
2020-07-10T14:17:03.000Z
|
2020-11-17T09:18:26.000Z
|
visualizing_russian_tools/wsgi.py
|
eelegiap/visualizing_russian_tools
|
9c36baebc384133c7c27d7a7c4e0cedc8cb84e74
|
[
"BSD-3-Clause"
] | 13
|
2019-03-17T13:27:31.000Z
|
2022-01-18T17:03:14.000Z
|
visualizing_russian_tools/wsgi.py
|
eelegiap/visualizing_russian_tools
|
9c36baebc384133c7c27d7a7c4e0cedc8cb84e74
|
[
"BSD-3-Clause"
] | 2
|
2019-10-19T16:37:44.000Z
|
2020-06-22T13:30:20.000Z
|
"""WSGI config."""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'visualizing_russian_tools.settings.local')
application = get_wsgi_application()
| 21.2
| 91
| 0.811321
|
e6dc044dbc933e427b91aab0bc5b1f1fa4dc6c14
| 6,750
|
py
|
Python
|
examples/librispeech/evaluation/eval_ensemble8_ctc.py
|
sundogrd/tensorflow_end2end_speech_recognition
|
61e4a65fb5c9f3d9f690d713dcd77a48b1de0a14
|
[
"MIT"
] | 351
|
2017-05-27T08:31:27.000Z
|
2022-03-03T16:47:27.000Z
|
examples/librispeech/evaluation/eval_ensemble8_ctc.py
|
eLavin11/tensorflow_end2end_speech_recognition
|
65b9728089d5e92b25b92384a67419d970399a64
|
[
"MIT"
] | 19
|
2017-07-19T13:12:18.000Z
|
2019-06-12T06:07:13.000Z
|
examples/librispeech/evaluation/eval_ensemble8_ctc.py
|
eLavin11/tensorflow_end2end_speech_recognition
|
65b9728089d5e92b25b92384a67419d970399a64
|
[
"MIT"
] | 127
|
2017-06-12T16:27:21.000Z
|
2021-12-29T02:22:34.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Evaluate the ensemble of 8 CTC models (Librispeech corpus)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join, abspath
import sys
import yaml
import argparse
sys.path.append(abspath('../../../'))
from experiments.librispeech.data.load_dataset_ctc import Dataset
from experiments.librispeech.evaluation.eval_ensemble4_ctc import do_eval_cer
from utils.io.labels.character import Idx2char, Char2idx
from utils.evaluation.edit_distance import compute_cer, compute_wer, wer_align
from models.ctc.decoders.beam_search_decoder import BeamSearchDecoder
parser = argparse.ArgumentParser()
parser.add_argument('--result_save_path', type=str, default=None,
help='path to save results of ensemble')
parser.add_argument('--model1_path', type=str,
help='path to the 1st model to evaluate')
parser.add_argument('--model2_path', type=str,
help='path to the 2nd model to evaluate')
parser.add_argument('--model3_path', type=str,
help='path to the 3rd model to evaluate')
parser.add_argument('--model4_path', type=str,
help='path to the 4th model to evaluate')
parser.add_argument('--model5_path', type=str,
help='path to the 5th model to evaluate')
parser.add_argument('--model6_path', type=str,
help='path to the 6th model to evaluate')
parser.add_argument('--model7_path', type=str,
help='path to the 7th model to evaluate')
parser.add_argument('--model8_path', type=str,
help='path to the 8th model to evaluate')
parser.add_argument('--epoch_model1', type=int, default=-1,
help='the epoch of 1st model to restore')
parser.add_argument('--epoch_model2', type=int, default=-1,
help='the epoch of 2nd model to restore')
parser.add_argument('--epoch_model3', type=int, default=-1,
help='the epoch of 3rd model to restore')
parser.add_argument('--epoch_model4', type=int, default=-1,
help='the epoch of 4th model to restore')
parser.add_argument('--epoch_model5', type=int, default=-1,
help='the epoch of 5th model to restore')
parser.add_argument('--epoch_model6', type=int, default=-1,
help='the epoch of 6th model to restore')
parser.add_argument('--epoch_model7', type=int, default=-1,
help='the epoch of 7th model to restore')
parser.add_argument('--epoch_model8', type=int, default=-1,
help='the epoch of 8th model to restore')
parser.add_argument('--beam_width', type=int, default=20,
help='beam_width (int, optional): beam width for beam search.' +
' 1 disables beam search, which mean greedy decoding.')
parser.add_argument('--temperature_infer', type=int, default=1,
help='temperature parameter in the inference stage')
def do_eval(save_paths, params, beam_width, temperature_infer,
result_save_path):
"""Evaluate the model.
Args:
save_paths (list):
params (dict): A dictionary of parameters
epoch_list (list): list of the epoch to restore
beam_width (int): beam width for beam search.
1 disables beam search, which mean greedy decoding.
eval_batch_size (int): the size of mini-batch when evaluation
temperature_infer (int): temperature in the inference stage
result_save_path (string, optional):
"""
if 'temp1' in save_paths[0]:
temperature_train = 1
elif 'temp2' in save_paths[0]:
temperature_train = 2
else:
raise ValueError
if result_save_path is not None:
sys.stdout = open(join(result_save_path,
'8models_traintemp' + str(temperature_train) +
'_inftemp' + str(temperature_infer) + '.log'), 'w')
print('=' * 30)
print(' frame stack %d' % int(params['num_stack']))
print(' beam width: %d' % beam_width)
print(' ensemble: %d' % len(save_paths))
print(' temperature (training): %d' % temperature_train)
print(' temperature (inference): %d' % temperature_infer)
print('=' * 30)
# Load dataset
test_clean_data = Dataset(
data_type='test_clean', train_data_size=params['train_data_size'],
label_type=params['label_type'],
batch_size=1, splice=params['splice'],
num_stack=params['num_stack'], num_skip=params['num_skip'],
sort_utt=True)
test_other_data = Dataset(
data_type='test_other', train_data_size=params['train_data_size'],
label_type=params['label_type'],
batch_size=1, splice=params['splice'],
num_stack=params['num_stack'], num_skip=params['num_skip'],
sort_utt=True)
print('Test Data Evaluation:')
cer_clean_test, wer_clean_test = do_eval_cer(
save_paths=save_paths,
dataset=test_clean_data,
data_type='test_clean',
label_type=params['label_type'],
num_classes=params['num_classes'] + 1,
beam_width=beam_width,
temperature_infer=temperature_infer,
is_test=True,
progressbar=True)
print(' CER (clean): %f %%' % (cer_clean_test * 100))
print(' WER (clean): %f %%' % (wer_clean_test * 100))
cer_other_test, wer_other_test = do_eval_cer(
save_paths=save_paths,
dataset=test_other_data,
data_type='test_other',
label_type=params['label_type'],
num_classes=params['num_classes'] + 1,
beam_width=beam_width,
temperature_infer=temperature_infer,
is_test=True,
progressbar=True)
print(' CER (other): %f %%' % (cer_other_test * 100))
print(' WER (other): %f %%' % (wer_other_test * 100))
def main():
args = parser.parse_args()
# Load config file
with open(join(args.model1_path, 'config.yml'), "r") as f:
config = yaml.load(f)
params = config['param']
# Except for a blank class
if params['label_type'] == 'character':
params['num_classes'] = 28
else:
raise TypeError
save_paths = [args.model1_path, args.model2_path,
args.model3_path, args.model4_path,
args.model5_path, args.model6_path,
args.model7_path, args.model8_path]
do_eval(save_paths=save_paths, params=params,
beam_width=args.beam_width,
temperature_infer=args.temperature_infer,
result_save_path=args.result_save_path)
if __name__ == '__main__':
args = sys.argv
main()
| 39.244186
| 84
| 0.642519
|
d4c18a4ba4f803a3f7db76ec1574b182e3a27b0f
| 1,654
|
py
|
Python
|
PollsProjectRest/urls.py
|
Parth-Shah-99/Polling-Project-API
|
6ce7b45d8518a63451ec5cc72ead37a0602eb5f5
|
[
"MIT"
] | 1
|
2021-06-29T10:07:13.000Z
|
2021-06-29T10:07:13.000Z
|
PollsProjectRest/urls.py
|
Parth-Shah-99/Polling-Project-API
|
6ce7b45d8518a63451ec5cc72ead37a0602eb5f5
|
[
"MIT"
] | null | null | null |
PollsProjectRest/urls.py
|
Parth-Shah-99/Polling-Project-API
|
6ce7b45d8518a63451ec5cc72ead37a0602eb5f5
|
[
"MIT"
] | null | null | null |
"""PollsProjectRest URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls import url
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title="Question, Answer, Comment API",
default_version='v1',
description="Test description",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="example@gmail.com"),
license=openapi.License(name="Test License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('polls_api.urls')),
url(r'^swagger(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
]
| 36.755556
| 108
| 0.713422
|
45603116a29e49121ecf8ed8104d09c6e28caacc
| 1,379
|
py
|
Python
|
ceilometer/api/hooks.py
|
CiscoSystems/ceilometer
|
a9267fd94e7854afa0720d761fbe75d946e7167d
|
[
"Apache-2.0"
] | 1
|
2021-11-22T11:00:53.000Z
|
2021-11-22T11:00:53.000Z
|
ceilometer/api/hooks.py
|
CiscoSystems/ceilometer
|
a9267fd94e7854afa0720d761fbe75d946e7167d
|
[
"Apache-2.0"
] | null | null | null |
ceilometer/api/hooks.py
|
CiscoSystems/ceilometer
|
a9267fd94e7854afa0720d761fbe75d946e7167d
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Author: Doug Hellmann <doug.hellmann@dreamhost.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from pecan import hooks
from ceilometer import storage
class ConfigHook(hooks.PecanHook):
"""Attach the configuration object to the request
so controllers can get to it.
"""
def before(self, state):
state.request.cfg = cfg.CONF
class DBHook(hooks.PecanHook):
def before(self, state):
storage_engine = storage.get_engine(state.request.cfg)
state.request.storage_engine = storage_engine
state.request.storage_conn = storage_engine.get_connection(
state.request.cfg)
# def after(self, state):
# print 'method:', state.request.method
# print 'response:', state.response.status
| 30.644444
| 75
| 0.715011
|
619e8c6d280c06b5306f0f76801e66eed1511057
| 882
|
py
|
Python
|
coding/learn_docker/client_demo.py
|
yatao91/learning_road
|
e88dc43de98e35922bfc71c222ec71766851e618
|
[
"MIT"
] | 3
|
2021-05-25T16:58:52.000Z
|
2022-02-05T09:37:17.000Z
|
coding/learn_docker/client_demo.py
|
yataosu/learning_road
|
e88dc43de98e35922bfc71c222ec71766851e618
|
[
"MIT"
] | null | null | null |
coding/learn_docker/client_demo.py
|
yataosu/learning_road
|
e88dc43de98e35922bfc71c222ec71766851e618
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import docker
# 使用默认的socket连接docker服务
# client = docker.from_env()
client = docker.DockerClient(base_url='tcp://127.0.0.1:2375')
# 指定运行容器
# resp = client.containers.run('hello-world')
# 后台运行container
# resp = client.containers.run('hello-world', detach=True)
#
# print(resp)
# 获取容器列表
container_list = client.containers.list()
print(container_list)
print("-"*30)
# 获取容器
container = client.containers.get('5cbe57a9a7')
print(container)
print("-"*30)
print(dir(container))
print("-"*30)
print(container.attrs)
print("-"*30)
print(container.attrs['Config']['Image'])
print("-"*30)
print(container.logs(tail=20))
# 停止容器
# container.stop()
print("-"*30)
# 流式输出日志
# for line in container.logs(stream=True):
# print(line.strip())
# 拉取镜像
# resp = client.images.pull('nginx')
# print(resp)
# 获取镜像列表
image_list = client.images.list()
print(image_list)
| 16.961538
| 61
| 0.695011
|
4791e1de44c8386c2884dee3285db0bf5aa246bb
| 2,057
|
py
|
Python
|
tests/st/ops/ascend/vector/test_invert_001.py
|
tianjiashuo/akg
|
a9cbf642063fb1086a93e8bc6be6feb145689817
|
[
"Apache-2.0"
] | 286
|
2020-06-23T06:40:44.000Z
|
2022-03-30T01:27:49.000Z
|
tests/st/ops/ascend/vector/test_invert_001.py
|
tianjiashuo/akg
|
a9cbf642063fb1086a93e8bc6be6feb145689817
|
[
"Apache-2.0"
] | 10
|
2020-07-31T03:26:59.000Z
|
2021-12-27T15:00:54.000Z
|
tests/st/ops/ascend/vector/test_invert_001.py
|
tianjiashuo/akg
|
a9cbf642063fb1086a93e8bc6be6feb145689817
|
[
"Apache-2.0"
] | 30
|
2020-07-17T01:04:14.000Z
|
2021-12-27T14:05:19.000Z
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
################################################
Testcase_PrepareCondition:
Testcase_TestSteps:
Testcase_ExpectedResult:
"""
import os
import pytest
from tests.common.base import TestBase
from tests.common.test_run.ascend.invert_run import invert_run
############################################################
# TestCase= class: put to tests/*/
############################################################
class TestCase(TestBase):
def setup(self):
case_name = "test_auto_invert_001"
case_path = os.getcwd()
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg = [
#caseflag,testfuncname,testRunArgs, dimArgs
("invert_001", invert_run, ((1, 128), "uint16", "cce_invert_fp16"), ((128, 128), (128, 128))),
("invert_002", invert_run, ((128, 128), "uint16", "cce_invert_fp16"), ((0, 0), (128, 128))),
("invert_003", invert_run, ((128, 256), "uint16", "cce_invert_fp16"), ((0, 0), (128, 128))),
]
return
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
self.common_run(self.testarg)
def teardown(self):
self._log.info("============= {0} Teardown============".format(self.casename))
return
| 33.177419
| 106
| 0.609626
|
9b55d0f02de68b6a1ac653a8647b455b61b62c48
| 1,251
|
py
|
Python
|
src/pip/_internal/main.py
|
Nta1e/pip
|
828cbba70fffc3ad1e4eed40c6c47248ca07a827
|
[
"MIT"
] | null | null | null |
src/pip/_internal/main.py
|
Nta1e/pip
|
828cbba70fffc3ad1e4eed40c6c47248ca07a827
|
[
"MIT"
] | null | null | null |
src/pip/_internal/main.py
|
Nta1e/pip
|
828cbba70fffc3ad1e4eed40c6c47248ca07a827
|
[
"MIT"
] | null | null | null |
"""Primary application entrypoint.
"""
from __future__ import absolute_import
import locale
import logging
import os
import sys
from pip._internal.cli.autocompletion import autocomplete
from pip._internal.cli.main_parser import parse_command
from pip._internal.commands import create_command
from pip._internal.exceptions import PipError
from pip._internal.utils import deprecation
logger = logging.getLogger(__name__)
def main(args=None):
if args is None:
args = sys.argv[1:]
# Configure our deprecation warnings to be sent through loggers
deprecation.install_warning_logger()
autocomplete()
try:
cmd_name, cmd_args = parse_command(args)
except PipError as exc:
sys.stderr.write("ERROR: %s" % exc)
sys.stderr.write(os.linesep)
sys.exit(1)
# Needed for locale.getpreferredencoding(False) to work
# in pip._internal.utils.encoding.auto_decode
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error as e:
# setlocale can apparently crash if locale are uninitialized
logger.debug("Ignoring error %s when setting locale", e)
command = create_command(cmd_name, isolated=("--isolated" in cmd_args))
return command.main(cmd_args)
| 27.8
| 75
| 0.728217
|
1b33f02720af0faae4b903c50db4103e684b639c
| 1,577
|
py
|
Python
|
inceptor/compilers/CscCompiler.py
|
pwnmeow/inceptor
|
4c1d17b1f29777aaae634c07ad6c4dd8ed61aa01
|
[
"BSD-4-Clause"
] | 743
|
2021-08-02T16:27:27.000Z
|
2022-03-31T16:34:16.000Z
|
inceptor/compilers/CscCompiler.py
|
pwnmeow/inceptor
|
4c1d17b1f29777aaae634c07ad6c4dd8ed61aa01
|
[
"BSD-4-Clause"
] | 32
|
2021-08-03T04:47:20.000Z
|
2022-03-28T23:15:45.000Z
|
inceptor/compilers/CscCompiler.py
|
pwnmeow/inceptor
|
4c1d17b1f29777aaae634c07ad6c4dd8ed61aa01
|
[
"BSD-4-Clause"
] | 138
|
2021-08-02T16:27:28.000Z
|
2022-03-31T02:47:20.000Z
|
from compilers.Compiler import Compiler
from compilers.CompilerExceptions import OperationNotSupported
from config.Config import Config
class CscCompiler(Compiler):
def add_include_directory(self, directory):
pass
def __init__(self, args=None, aargs=None, arch="x64"):
self.config = Config()
super().__init__(None, args=args, aargs=aargs, sep=":", arch=arch)
if not self.args:
self.args = {}
def default_exe_args(self, outfile):
self.args = {
"/target": "exe",
"/platform": self.arch,
"/unsafe": None,
"/out": f'"{outfile}"'
}
def default_dll_args(self, outfile):
self.args = {
"/target": "library",
"/platform": self.arch,
"/unsafe": None,
"/out": f'"{outfile}"'
}
# "/optimize-": None,
def hide_window(self):
if self.args["/target"] == "exe":
self.args["/target"] = "winexe"
else:
raise OperationNotSupported(
"DLLs don't support hidden windows at compiler level. Consider using SW_HIDE in the template"
)
return True
def set_outfile(self, outfile):
self.args["/out"] = f'"{outfile}"'
def set_architecture(self, arch):
self.args["/platform"] = arch
def set_libraries(self, libs: list):
if len(libs) > 0:
for lib in libs:
self.args[f'/res:"{lib}"'] = None
self.args["/r"] = ",".join([f'"{lib}"' for lib in libs])
| 28.672727
| 109
| 0.541535
|
108186af5724d4980050bddf8cfa1ff17bbbbf9b
| 1,089
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/fCoEPlogiLsAcc_template.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/fCoEPlogiLsAcc_template.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/fCoEPlogiLsAcc_template.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class FCoE_PLOGI_LS_ACC(Base):
__slots__ = ()
_SDM_NAME = 'fCoEPlogiLsAcc'
_SDM_ATT_MAP = {
'FCoE Header': 'fCoEPlogiLsAcc.header.fcoeHeader',
'FC Header': 'fCoEPlogiLsAcc.header.fcHeader',
'FC ELS': 'fCoEPlogiLsAcc.header.FcEls',
}
def __init__(self, parent):
super(FCoE_PLOGI_LS_ACC, self).__init__(parent)
@property
def FCoE_Header(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCoE Header']))
@property
def FC_Header(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FC Header']))
@property
def FC_ELS(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FC ELS']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| 32.029412
| 86
| 0.702479
|
b59fa5f55b6c96f2a1d796ad0fc9bc435dc0d9d7
| 2,674
|
py
|
Python
|
codes/000_visualization.py
|
baiyuang/Tensorflow-Computer-Vision-Tutorial
|
09db553dc23f0edeb8a9b0d51c13d0a27290016a
|
[
"MIT"
] | 1
|
2021-06-02T01:09:32.000Z
|
2021-06-02T01:09:32.000Z
|
codes/000_visualization.py
|
Rookie-Eric/Tensorflow-Computer-Vision-Tutorial
|
09db553dc23f0edeb8a9b0d51c13d0a27290016a
|
[
"MIT"
] | null | null | null |
codes/000_visualization.py
|
Rookie-Eric/Tensorflow-Computer-Vision-Tutorial
|
09db553dc23f0edeb8a9b0d51c13d0a27290016a
|
[
"MIT"
] | 1
|
2020-07-23T08:17:20.000Z
|
2020-07-23T08:17:20.000Z
|
import numpy as np
import matplotlib.pyplot as plt
f = np.load('../mnist.npz')
image, label = f['x_train'][7], f['y_train'][7]
def show_conv():
filter = np.array([
[1, 1, 1],
[0, 0, 0],
[-1, -1, -1]])
plt.figure(0, figsize=(9, 5))
ax1 = plt.subplot(121)
ax1.imshow(image, cmap='gray')
plt.xticks(())
plt.yticks(())
ax2 = plt.subplot(122)
plt.ion()
texts = []
feature_map = np.zeros((26, 26))
flip_filter = np.flipud(np.fliplr(filter)) # flip both sides of the filter
for i in range(26):
for j in range(26):
if texts:
fm.remove()
for n in range(3):
for m in range(3):
if len(texts) != 9:
texts.append(ax1.text(j+m, i+n, filter[n, m], color='w', size=8, ha='center', va='center',))
else:
texts[n*3+m].set_position((j+m, i+n))
feature_map[i, j] = np.sum(flip_filter * image[i:i+3, j:j+3])
fm = ax2.imshow(feature_map, cmap='gray', vmax=255*3, vmin=-255*3)
plt.xticks(())
plt.yticks(())
plt.pause(0.001)
plt.ioff()
plt.show()
def show_result():
filters = [
np.array([
[1, 1, 1],
[0, 0, 0],
[-1, -1, -1]]),
np.array([
[-1, -1, -1],
[0, 0, 0],
[1, 1, 1]]),
np.array([
[1, 0, -1],
[1, 0, -1],
[1, 0, -1]]),
np.array([
[-1, 0, 1],
[-1, 0, 1],
[-1, 0, 1]])
]
plt.figure(0)
plt.title('Original image')
plt.imshow(image, cmap='gray')
plt.xticks(())
plt.yticks(())
plt.figure(1)
for n in range(4):
feature_map = np.zeros((26, 26))
flip_filter = np.flipud(np.fliplr(filters[n]))
for i in range(26):
for j in range(26):
feature_map[i, j] = np.sum(image[i:i + 3, j:j + 3] * flip_filter)
plt.subplot(3, 4, 1 + n)
plt.title('Filter%i' % n)
plt.imshow(filters[n], cmap='gray')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 5 + n)
plt.title('Conv%i' % n)
plt.imshow(feature_map, cmap='gray')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 9 + n)
plt.title('ReLU%i' % n)
feature_map = np.maximum(0, feature_map)
plt.imshow(feature_map, cmap='gray')
plt.xticks(())
plt.yticks(())
plt.tight_layout()
plt.show()
if __name__ == "__main__":
show_conv()
show_result()
| 25.466667
| 116
| 0.450636
|
106c140036dc1733b8eea8ed4eccd1bf0e4f8210
| 4,000
|
py
|
Python
|
server/app/commits/views.py
|
jialin-wu-02/aimde
|
7a8795df7f9df3be3bb607e5f7888731da837fde
|
[
"MIT"
] | null | null | null |
server/app/commits/views.py
|
jialin-wu-02/aimde
|
7a8795df7f9df3be3bb607e5f7888731da837fde
|
[
"MIT"
] | null | null | null |
server/app/commits/views.py
|
jialin-wu-02/aimde
|
7a8795df7f9df3be3bb607e5f7888731da837fde
|
[
"MIT"
] | null | null | null |
import json
import os
import time
from flask import Blueprint, jsonify, request, \
abort, make_response, send_from_directory
from flask_restful import Api, Resource
from app import App
from app.commits.models import Commit, Tag
from app.commits.utils import get_commits
from services.executables.action import Action
from app.db import db
commits_bp = Blueprint('commits', __name__)
commits_api = Api(commits_bp)
@commits_api.resource('/search')
class CommitSearchApi(Resource):
def get(self):
metric = ''
tag = None
experiment = None
query = request.args.get('q').strip()
sub_queries = query.split(' ')
for sub_query in sub_queries:
if 'metric' in sub_query:
_, _, metric = sub_query.rpartition(':')
metric = metric.strip()
if 'tag' in sub_query:
_, _, tag = sub_query.rpartition(':')
tag = tag.strip()
if 'experiment' in sub_query:
_, _, experiment = sub_query.rpartition(':')
experiment = experiment.strip()
commits = get_commits(metric, tag, experiment)
return jsonify(commits)
@commits_api.resource('/tags/<commit_hash>')
class CommitTagApi(Resource):
def get(self, commit_hash):
commit = Commit.query.filter(Commit.hash == commit_hash).first()
if not commit:
return make_response(jsonify({}), 404)
commit_tags = []
for t in commit.tags:
commit_tags.append({
'id': t.uuid,
'name': t.name,
'color': t.color,
})
return jsonify(commit_tags)
@commits_api.resource('/tags/update')
class CommitTagUpdateApi(Resource):
def post(self):
form = request.form
commit_hash = form.get('commit_hash')
experiment_name = form.get('experiment_name')
tag_id = form.get('tag_id')
commit = Commit.query.filter((Commit.hash == commit_hash) &
(Commit.experiment_name == experiment_name)
).first()
if not commit:
commit = Commit(commit_hash, experiment_name)
db.session.add(commit)
db.session.commit()
tag = Tag.query.filter(Tag.uuid == tag_id).first()
if not tag:
return make_response(jsonify({}), 404)
if tag in commit.tags:
commit.tags.remove(tag)
else:
for t in commit.tags:
commit.tags.remove(t)
commit.tags.append(tag)
db.session.commit()
return {
'tag': list(map(lambda t: t.uuid, commit.tags)),
}
@commits_api.resource('/info/<experiment>/<commit_hash>')
class CommitInfoApi(Resource):
def get(self, experiment, commit_hash):
commit_path = os.path.join('/store', experiment, commit_hash)
if not os.path.isdir(commit_path):
return make_response(jsonify({}), 404)
commit_config_file_path = os.path.join(commit_path, 'config.json')
info = {}
try:
with open(commit_config_file_path, 'r+') as commit_config_file:
info = json.loads(commit_config_file.read())
except:
pass
process = info.get('process')
if process:
if not process['finish']:
action = Action(Action.SELECT, {
'experiment': experiment,
'commit_hash': commit_hash,
})
processes_res = App.executables_manager.add(action, 30)
if processes_res is not None and 'processes' in processes_res:
processes = json.loads(processes_res)['processes']
if len(processes):
process['pid'] = processes[0]['pid']
process['time'] = time.time() - info['start_date']
return jsonify(info)
| 30.075188
| 80
| 0.5675
|
fe0db5ca8accc2f8bb6b38f728c0f269233657e5
| 1,802
|
py
|
Python
|
chapter/six_exercises.py
|
sloscal1/ML-Prob-Perspective
|
3474faf8559cc2229426ab773460000c6c40fbb3
|
[
"MIT"
] | null | null | null |
chapter/six_exercises.py
|
sloscal1/ML-Prob-Perspective
|
3474faf8559cc2229426ab773460000c6c40fbb3
|
[
"MIT"
] | null | null | null |
chapter/six_exercises.py
|
sloscal1/ML-Prob-Perspective
|
3474faf8559cc2229426ab773460000c6c40fbb3
|
[
"MIT"
] | null | null | null |
def question_1():
r""" Pessimism of LOOCV. I didn't like the question because I thought it was a little too open...
You want to show that LOOCV can be a really bad estimate of error in some situations. For example, if you
have a random class label that you're trying to predict (binary, equal proportions), LOOCV can report
a really bad error bound. You're asked what the best classifier is for this data and what happens when you
do its LOOCV estimate.
The question is leading (though not prescribing) you towards a simple majority classifier. It will be right the
maximum number of times (0.5), and it's LOOCV error rate will be extremely pessimistic, at 100%. The reason is
simple enough - if you remove a single sample, then the other class becomese the majority so you'll predict that
but it's only the majority because the test sample belonged to the other class and so you'll be wrong every time.
But this isn't the only optimal classifier specification for this problem. You can also have a probabilistic
classifier that outputs a label in proportion to the input. In this case you'll again achieve an accuracy of 0.5,
but the LOOCV will essentially also be 0.5 as the training data grows. For small amounts of training data the
process will give you something closer to a 0.49 error rate, but it's still not that bad.
In short, the LOOCV is pessimistic, but to get the wild swing desired by the question it should have been explicit
and asked the reader to investigate the majority classifier (as is done in the source discussion referred to in the
question (Witten, Frank, Hall p.154 in Data mining 3rd edition).
See ``chapter.six.demo_loocv`` for simulation study of this question.
Returns:
None.
"""
| 64.357143
| 119
| 0.746948
|
40355fe20bdfab65f42f2f5a3e63016466e69c50
| 6,645
|
py
|
Python
|
mars/dataframe/datastore/to_vineyard.py
|
perfumescent/mars
|
9bf9bb990587cb9f091d108ed7f725fb429a80e8
|
[
"Apache-2.0"
] | null | null | null |
mars/dataframe/datastore/to_vineyard.py
|
perfumescent/mars
|
9bf9bb990587cb9f091d108ed7f725fb429a80e8
|
[
"Apache-2.0"
] | null | null | null |
mars/dataframe/datastore/to_vineyard.py
|
perfumescent/mars
|
9bf9bb990587cb9f091d108ed7f725fb429a80e8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from ... import opcodes as OperandDef
from ...core import OutputType
from ...core.operand.base import SchedulingHint
from ...serialization.serializables import StringField
from ...tensor.datastore.to_vineyard import resolve_vineyard_socket
from ..operands import DataFrameOperand, DataFrameOperandMixin
from ..utils import parse_index
try:
import vineyard
from vineyard.data.dataframe import make_global_dataframe
from vineyard.data.utils import to_json
except ImportError:
vineyard = None
class DataFrameToVineyardChunk(DataFrameOperand, DataFrameOperandMixin):
_op_type_ = OperandDef.DATAFRAME_STORE_VINEYARD_CHUNK
# vineyard ipc socket
vineyard_socket = StringField("vineyard_socket")
def __init__(self, vineyard_socket=None, dtypes=None, **kw):
super().__init__(
vineyard_socket=vineyard_socket,
_dtypes=dtypes,
_output_types=[OutputType.dataframe],
**kw
)
def __call__(self, df):
return self.new_dataframe(
[df],
shape=(0, 0),
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns_value,
)
@classmethod
def _process_out_chunks(cls, op, out_chunks):
dtypes = pd.Series([np.dtype("O")], index=pd.Index([0]))
merge_op = DataFrameToVinyardStoreMeta(
vineyard_socket=op.vineyard_socket,
chunk_shape=op.inputs[0].chunk_shape,
shape=(1, 1),
dtypes=dtypes,
)
return merge_op.new_chunks(
out_chunks, shape=(1, 1), dtypes=dtypes, index=(0, 0)
)
@classmethod
def tile(cls, op):
out_chunks = []
scheduling_hint = SchedulingHint(fuseable=False)
dtypes = pd.Series([np.dtype("O")], index=pd.Index([0]))
for idx, chunk in enumerate(op.inputs[0].chunks):
chunk_op = op.copy().reset_key()
chunk_op.scheduling_hint = scheduling_hint
out_chunk = chunk_op.new_chunk(
[chunk],
shape=(1, 1),
dtypes=dtypes,
index_value=chunk.index_value,
columns_value=chunk.columns_value,
index=(idx, 0),
)
out_chunks.append(out_chunk)
out_chunks = cls._process_out_chunks(op, out_chunks)
in_df = op.inputs[0]
new_op = op.copy().reset_key()
return new_op.new_dataframes(
op.inputs,
shape=(len(out_chunks), 1),
dtypes=dtypes,
index_value=in_df.index_value,
columns_value=in_df.columns_value,
chunks=out_chunks,
nsplits=((np.prod(op.inputs[0].chunk_shape),),),
)
@classmethod
def execute(cls, ctx, op):
if vineyard is None:
raise RuntimeError("vineyard is not available")
socket, needs_put = resolve_vineyard_socket(ctx, op)
client = vineyard.connect(socket)
# some op might be fused and executed twice on different workers
if not needs_put:
# might be fused
try: # pragma: no cover
meta = ctx.get_chunks_meta([op.inputs[0].key])[0]
df_id = vineyard.ObjectID(meta["object_ref"])
if not client.exists(df_id):
needs_put = True
except KeyError:
needs_put = True
if needs_put:
df_id = client.put(
ctx[op.inputs[0].key], partition_index=op.inputs[0].index
)
else: # pragma: no cover
meta = client.get_meta(df_id)
new_meta = vineyard.ObjectMeta()
for k, v in meta.items():
if k not in ["id", "signature", "instance_id"]:
if isinstance(v, vineyard.ObjectMeta):
new_meta.add_member(k, v)
else:
new_meta[k] = v
new_meta["partition_index_"] = to_json(op.inputs[0].index)
df_id = client.create_metadata(new_meta).id
client.persist(df_id)
ctx[op.outputs[0].key] = pd.DataFrame({0: [df_id]})
class DataFrameToVinyardStoreMeta(DataFrameOperand, DataFrameOperandMixin):
_op_type_ = OperandDef.DATAFRAME_STORE_VINEYARD_META
# vineyard ipc socket
vineyard_socket = StringField("vineyard_socket")
def __init__(self, vineyard_socket=None, dtypes=None, **kw):
super().__init__(
vineyard_socket=vineyard_socket,
dtypes=dtypes,
_output_types=[OutputType.dataframe],
**kw
)
@classmethod
def tile(cls, op):
dtypes = pd.Series([np.dtype("O")], index=pd.Index([0]))
chunk_op = op.copy().reset_key()
out_chunk = chunk_op.new_chunk(
op.inputs[0].chunks,
shape=(1, 1),
dtypes=dtypes,
index_value=parse_index(pd.Index([-1])),
columns_value=parse_index(pd.Index([0])),
index=(0, 0),
)
new_op = op.copy().reset_key()
return new_op.new_dataframes(
op.inputs,
shape=(1, 1),
dtypes=dtypes,
index_value=parse_index(pd.Index([0])),
columns_value=parse_index(pd.Index([0])),
chunks=[out_chunk],
nsplits=((1,), (1,)),
)
@classmethod
def execute(cls, ctx, op):
if vineyard is None:
raise RuntimeError("vineyard is not available")
socket, _ = resolve_vineyard_socket(ctx, op)
client = vineyard.connect(socket)
# # store the result object id to execution context
chunks = [ctx[chunk.key][0][0] for chunk in op.inputs]
ctx[op.outputs[0].key] = pd.DataFrame(
{0: [make_global_dataframe(client, chunks).id]}
)
def to_vineyard(df, vineyard_socket=None):
op = DataFrameToVineyardChunk(vineyard_socket=vineyard_socket)
return op(df)
| 34.252577
| 75
| 0.60301
|
23090a7d9da1f9820b4f38d4a685aa98dc6a70c3
| 2,966
|
py
|
Python
|
tests/trainer/dynamic_args/test_multiple_optimizers.py
|
nahidgrid/pytorch-lightning
|
60bf48d3e0ee26ebafd7a98ad96c86ba304d4533
|
[
"Apache-2.0"
] | null | null | null |
tests/trainer/dynamic_args/test_multiple_optimizers.py
|
nahidgrid/pytorch-lightning
|
60bf48d3e0ee26ebafd7a98ad96c86ba304d4533
|
[
"Apache-2.0"
] | null | null | null |
tests/trainer/dynamic_args/test_multiple_optimizers.py
|
nahidgrid/pytorch-lightning
|
60bf48d3e0ee26ebafd7a98ad96c86ba304d4533
|
[
"Apache-2.0"
] | null | null | null |
from pytorch_lightning import Trainer
from tests.base.boring_model import BoringModel
import torch
def test_multiple_optimizers(tmpdir):
"""
Tests that only training_step can be used
"""
class TestModel(BoringModel):
def on_train_epoch_start(self) -> None:
self.opt_0_seen = False
self.opt_1_seen = False
def training_step(self, batch, batch_idx, optimizer_idx):
if optimizer_idx == 0:
self.opt_0_seen = True
elif optimizer_idx == 1:
self.opt_1_seen = True
else:
raise Exception('should only have two optimizers')
self.training_step_called = True
loss = self.step(batch[0])
return loss
def training_epoch_end(self, outputs) -> None:
# outputs should be an array with an entry per optimizer
assert len(outputs) == 2
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
return optimizer, optimizer_2
model = TestModel()
model.val_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=1,
log_every_n_steps=1,
weights_summary=None,
)
trainer.fit(model)
assert model.opt_0_seen
assert model.opt_1_seen
def test_multiple_optimizers_manual(tmpdir):
"""
Tests that only training_step can be used
"""
class TestModel(BoringModel):
def on_train_epoch_start(self) -> None:
self.opt_0_seen = False
self.opt_1_seen = False
def training_step(self, batch, batch_idx, optimizer_idx):
# manual
(opt_a, opt_b) = self.optimizers()
loss_1 = self.step(batch[0])
# fake generator
self.manual_backward(loss_1, opt_a)
opt_a.step()
opt_a.zero_grad()
# fake discriminator
loss_2 = self.step(batch[0])
self.manual_backward(loss_2, opt_b)
opt_b.step()
opt_b.zero_grad()
def training_epoch_end(self, outputs) -> None:
# outputs should be an array with an entry per optimizer
assert len(outputs) == 2
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
return optimizer, optimizer_2
model = TestModel()
model.val_dataloader = None
trainer = Trainer(
automatic_optimization=False,
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=1,
log_every_n_steps=1,
weights_summary=None,
)
trainer.fit(model)
| 29.366337
| 74
| 0.604181
|
08797a4125d0a3a087bc36e4ce13a299670b843a
| 3,374
|
py
|
Python
|
third_party/logilab/common/urllib2ext.py
|
yetu/repotools
|
1f52004a33ee27f539bb4c831b8e8a37751550a8
|
[
"BSD-3-Clause"
] | 45
|
2015-10-12T10:02:11.000Z
|
2021-12-14T07:10:47.000Z
|
pylibs/logilab/common/urllib2ext.py
|
teranex/python-mode
|
51d0c3f341b5d0cd68300e16cc29a97a5783ce02
|
[
"Vim"
] | 7
|
2021-02-08T20:22:15.000Z
|
2022-03-11T23:19:41.000Z
|
pylibs/logilab/common/urllib2ext.py
|
teranex/python-mode
|
51d0c3f341b5d0cd68300e16cc29a97a5783ce02
|
[
"Vim"
] | 38
|
2016-06-25T05:57:35.000Z
|
2021-12-30T04:58:10.000Z
|
import logging
import urllib2
import kerberos as krb
class GssapiAuthError(Exception):
"""raised on error during authentication process"""
import re
RGX = re.compile('(?:.*,)*\s*Negotiate\s*([^,]*),?', re.I)
def get_negociate_value(headers):
for authreq in headers.getheaders('www-authenticate'):
match = RGX.search(authreq)
if match:
return match.group(1)
class HTTPGssapiAuthHandler(urllib2.BaseHandler):
"""Negotiate HTTP authentication using context from GSSAPI"""
handler_order = 400 # before Digest Auth
def __init__(self):
self._reset()
def _reset(self):
self._retried = 0
self._context = None
def clean_context(self):
if self._context is not None:
krb.authGSSClientClean(self._context)
def http_error_401(self, req, fp, code, msg, headers):
try:
if self._retried > 5:
raise urllib2.HTTPError(req.get_full_url(), 401,
"negotiate auth failed", headers, None)
self._retried += 1
logging.debug('gssapi handler, try %s' % self._retried)
negotiate = get_negociate_value(headers)
if negotiate is None:
logging.debug('no negociate found in a www-authenticate header')
return None
logging.debug('HTTPGssapiAuthHandler: negotiate 1 is %r' % negotiate)
result, self._context = krb.authGSSClientInit("HTTP@%s" % req.get_host())
if result < 1:
raise GssapiAuthError("HTTPGssapiAuthHandler: init failed with %d" % result)
result = krb.authGSSClientStep(self._context, negotiate)
if result < 0:
raise GssapiAuthError("HTTPGssapiAuthHandler: step 1 failed with %d" % result)
client_response = krb.authGSSClientResponse(self._context)
logging.debug('HTTPGssapiAuthHandler: client response is %s...' % client_response[:10])
req.add_unredirected_header('Authorization', "Negotiate %s" % client_response)
server_response = self.parent.open(req)
negotiate = get_negociate_value(server_response.info())
if negotiate is None:
logging.warning('HTTPGssapiAuthHandler: failed to authenticate server')
else:
logging.debug('HTTPGssapiAuthHandler negotiate 2: %s' % negotiate)
result = krb.authGSSClientStep(self._context, negotiate)
if result < 1:
raise GssapiAuthError("HTTPGssapiAuthHandler: step 2 failed with %d" % result)
return server_response
except GssapiAuthError, exc:
logging.error(repr(exc))
finally:
self.clean_context()
self._reset()
if __name__ == '__main__':
import sys
# debug
import httplib
httplib.HTTPConnection.debuglevel = 1
httplib.HTTPSConnection.debuglevel = 1
# debug
import logging
logging.basicConfig(level=logging.DEBUG)
# handle cookies
import cookielib
cj = cookielib.CookieJar()
ch = urllib2.HTTPCookieProcessor(cj)
# test with url sys.argv[1]
h = HTTPGssapiAuthHandler()
response = urllib2.build_opener(h, ch).open(sys.argv[1])
print '\nresponse: %s\n--------------\n' % response.code, response.info()
| 38.340909
| 99
| 0.624185
|
dce1a6530be3c7b4d2c0d9c63a7ec68a30f351fa
| 411
|
py
|
Python
|
inventory/urls.py
|
kissops/ERPv
|
e5772f5e254d178bb29661d22401c9edcb6067ce
|
[
"MIT"
] | 2
|
2019-07-25T10:58:59.000Z
|
2020-11-08T02:32:44.000Z
|
inventory/urls.py
|
kissops/ERPv-server
|
e5772f5e254d178bb29661d22401c9edcb6067ce
|
[
"MIT"
] | 21
|
2021-01-01T14:08:41.000Z
|
2021-09-01T16:04:20.000Z
|
inventory/urls.py
|
kissops/ERPv-server
|
e5772f5e254d178bb29661d22401c9edcb6067ce
|
[
"MIT"
] | 1
|
2020-10-05T03:35:28.000Z
|
2020-10-05T03:35:28.000Z
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter()
router.register(r"warehouses", views.WarehouseViewSet)
router.register(r"products", views.ProductViewSet)
router.register(r"locations", views.LocationViewSet)
router.register(r"location_quantities", views.LocationQuantityViewSet)
urlpatterns = [path("", include(router.urls))]
| 34.25
| 70
| 0.815085
|
e2fd6fc17aa88d1b6bd2ef10480a8924bcec4d39
| 679
|
py
|
Python
|
models/load_pretrained_crnn.py
|
Armyke/crnn.pytorch
|
3cba2d626d29c299f0204fc50f0c80b7bbc6315d
|
[
"MIT"
] | null | null | null |
models/load_pretrained_crnn.py
|
Armyke/crnn.pytorch
|
3cba2d626d29c299f0204fc50f0c80b7bbc6315d
|
[
"MIT"
] | null | null | null |
models/load_pretrained_crnn.py
|
Armyke/crnn.pytorch
|
3cba2d626d29c299f0204fc50f0c80b7bbc6315d
|
[
"MIT"
] | null | null | null |
import torch
from models import crnn as crnn
def load_model(model_path, nh, alphabet='abcdefghijklmnopqrstuvwxyz'):
# initialize crnn model
model = crnn.CRNN(32, 1, len(alphabet) + 1, nh)
# load model
try:
model.load_state_dict(torch.load(model_path, map_location='cpu'))
except Exception as e:
from collections import OrderedDict
state_dict = torch.load(model_path, map_location='cpu')
new_state_dict = OrderedDict()
for key, layer in state_dict.items():
new_key = key[7:]
new_state_dict[new_key] = layer
model.load_state_dict(new_state_dict)
model.eval()
return model
| 24.25
| 73
| 0.661267
|
20734fbb8ff295d38c83047b642ddcd5f6c4f2d2
| 1,997
|
py
|
Python
|
model/model.py
|
AnuroopKeshav/NaiveBayesClassifier
|
f7f3a032426ede778cb4382dfd7a19465994a6b9
|
[
"MIT"
] | 1
|
2022-01-22T07:07:52.000Z
|
2022-01-22T07:07:52.000Z
|
model/model.py
|
AnuroopKeshav/NaiveBayesClassifier
|
f7f3a032426ede778cb4382dfd7a19465994a6b9
|
[
"MIT"
] | null | null | null |
model/model.py
|
AnuroopKeshav/NaiveBayesClassifier
|
f7f3a032426ede778cb4382dfd7a19465994a6b9
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy.stats as sp
class NaiveBayesClassifier:
# Grouping entries based on different discrete resultant values
def groupClasses(self) -> None:
self.classes = {}
for entry in range(len(self.resultant_vector_train)):
if self.resultant_vector_train[entry] not in self.classes:
self.classes[self.resultant_vector_train[entry]] = []
self.classes[self.resultant_vector_train[entry]].append(self.feature_matrix_train[entry])
# Calculating the mean and standard deviation of all the classes
# Used for the Gaussian Probability Density funtion
def defineClassMeanAndStd(self) -> None:
self.class_mean_std = {}
for output_class, feature_class in self.classes.items():
self.class_mean_std[output_class] = [(np.mean(class_attr), np.std(class_attr)) for class_attr in zip(*feature_class)]
# Funtion that iteratively finds the product of probabilities for all the classes and returns the max
def findMaxProba(self, entry) -> np.float64:
proba_dict = {}
for output, class_info in self.class_mean_std.items():
probability = 1
for attr in range(len(class_info)):
probability *= sp.norm(class_info[attr][0], class_info[attr][1]).pdf(entry[attr])
proba_dict[output] = probability
return max(zip(proba_dict.values(), proba_dict.keys()))[1]
# Fitting the model with the data
def fit(self, feature_matrix: np.array, resultant_vector: np.array) -> None:
self.feature_matrix_train = feature_matrix
self.resultant_vector_train = resultant_vector
self.groupClasses()
self.defineClassMeanAndStd()
# Predicting for test_data
def predict(self, feature_matrix_test: np.array) -> np.array:
predictions = [self.findMaxProba(feature_matrix_test[entry]) for entry in range(len(feature_matrix_test))]
return np.array(predictions)
| 40.755102
| 129
| 0.685528
|
0bcec87e02b054687b0e775bf9f09d112847f7ae
| 1,433
|
py
|
Python
|
disk/tests/conftest.py
|
glasser/integrations-core
|
1dd515d49b1690a1369ee5195713605b1b072b1f
|
[
"BSD-3-Clause"
] | null | null | null |
disk/tests/conftest.py
|
glasser/integrations-core
|
1dd515d49b1690a1369ee5195713605b1b072b1f
|
[
"BSD-3-Clause"
] | 4
|
2019-07-03T02:53:19.000Z
|
2019-07-10T14:52:14.000Z
|
disk/tests/conftest.py
|
glasser/integrations-core
|
1dd515d49b1690a1369ee5195713605b1b072b1f
|
[
"BSD-3-Clause"
] | 1
|
2019-12-23T13:35:17.000Z
|
2019-12-23T13:35:17.000Z
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import mock
import pytest
from datadog_checks.dev.utils import ON_WINDOWS, mock_context_manager
from .metrics import CORE_GAUGES, CORE_RATES, UNIX_GAUGES
from .mocks import MockDiskIOMetrics, MockDiskMetrics, MockInodesMetrics, MockPart
@pytest.fixture
def psutil_mocks():
if ON_WINDOWS:
mock_statvfs = mock_context_manager()
else:
mock_statvfs = mock.patch('os.statvfs', return_value=MockInodesMetrics(), __name__='statvfs')
with mock.patch('psutil.disk_partitions', return_value=[MockPart()], __name__='disk_partitions'), mock.patch(
'psutil.disk_usage', return_value=MockDiskMetrics(), __name__='disk_usage'
), mock.patch('psutil.disk_io_counters', return_value=MockDiskIOMetrics()), mock_statvfs:
yield
@pytest.fixture(scope='session')
def dd_environment(instance_basic_volume):
yield instance_basic_volume
@pytest.fixture(scope='session')
def instance_basic_volume():
return {'use_mount': 'false', 'tag_by_label': False}
@pytest.fixture(scope='session')
def instance_basic_mount():
return {'use_mount': 'true', 'tag_by_label': False}
@pytest.fixture(scope='session')
def gauge_metrics():
if ON_WINDOWS:
return CORE_GAUGES
else:
return UNIX_GAUGES
@pytest.fixture(scope='session')
def rate_metrics():
return CORE_RATES
| 27.557692
| 113
| 0.7418
|
7ac57f4ac112977ef18454787f72d5ebd91f6405
| 918
|
py
|
Python
|
dtlpy/services/__init__.py
|
dataloop-ai/dtlpy
|
2c73831da54686e047ab6aefd8f12a8e53ea97c2
|
[
"Apache-2.0"
] | 10
|
2020-05-21T06:25:35.000Z
|
2022-01-07T20:34:03.000Z
|
dtlpy/services/__init__.py
|
dataloop-ai/dtlpy
|
2c73831da54686e047ab6aefd8f12a8e53ea97c2
|
[
"Apache-2.0"
] | 22
|
2019-11-17T17:25:16.000Z
|
2022-03-10T15:14:28.000Z
|
dtlpy/services/__init__.py
|
dataloop-ai/dtlpy
|
2c73831da54686e047ab6aefd8f12a8e53ea97c2
|
[
"Apache-2.0"
] | 8
|
2020-03-05T16:23:55.000Z
|
2021-12-27T11:10:42.000Z
|
#! /usr/bin/env python3
# This file is part of DTLPY.
#
# DTLPY is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DTLPY is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DTLPY. If not, see <http://www.gnu.org/licenses/>.
from .api_client import ApiClient, VerboseLoggingLevel
from .async_utils import AsyncResponse, AsyncThreadEventLoop
from .cookie import CookieIO
from .create_logger import DataloopLogger
from .reporter import Reporter
from . import service_defaults
| 41.727273
| 70
| 0.784314
|
dbfc0777dccec83e3541b173c7c4aef3a48f85df
| 5,385
|
py
|
Python
|
nchs_mortality/delphi_nchs_mortality/run.py
|
JedGrabman/covidcast-indicators
|
d2a5a232431c8392c54bfc301dcb9beecc541b97
|
[
"MIT"
] | null | null | null |
nchs_mortality/delphi_nchs_mortality/run.py
|
JedGrabman/covidcast-indicators
|
d2a5a232431c8392c54bfc301dcb9beecc541b97
|
[
"MIT"
] | null | null | null |
nchs_mortality/delphi_nchs_mortality/run.py
|
JedGrabman/covidcast-indicators
|
d2a5a232431c8392c54bfc301dcb9beecc541b97
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Functions to call when running the function.
This module should contain a function called `run_module`, that is executed
when the module is run with `python -m MODULE_NAME`.
"""
from datetime import datetime, date, timedelta
from os.path import join
from os import remove, listdir
from shutil import copy
import numpy as np
import pandas as pd
from delphi_utils import read_params, S3ArchiveDiffer
from .pull import pull_nchs_mortality_data
from .export import export_csv
# global constants
METRICS = [
'covid_deaths', 'total_deaths', 'percent_of_expected_deaths',
'pneumonia_deaths', 'pneumonia_and_covid_deaths', 'influenza_deaths',
'pneumonia_influenza_or_covid_19_deaths'
]
SENSORS = [
"num",
"prop"
]
INCIDENCE_BASE = 100000
geo_res = "state"
def run_module():
"""Run module for processing NCHS mortality data."""
params = read_params()
export_start_date = params["export_start_date"]
if export_start_date == "latest": # Find the previous Saturday
export_start_date = date.today() - timedelta(
days=date.today().weekday() + 2)
export_start_date = export_start_date.strftime('%Y-%m-%d')
export_dir = params["export_dir"]
daily_export_dir = params["daily_export_dir"]
cache_dir = params["cache_dir"]
daily_cache_dir = params["daily_cache_dir"]
static_file_dir = params["static_file_dir"]
token = params["token"]
test_mode = params["mode"]
daily_arch_diff = S3ArchiveDiffer(
daily_cache_dir, daily_export_dir,
params["bucket_name"], "nchs_mortality",
params["aws_credentials"])
daily_arch_diff.update_cache()
map_df = pd.read_csv(
join(static_file_dir, "state_pop.csv"), dtype={"fips": int}
)
df = pull_nchs_mortality_data(token, map_df, test_mode)
for metric in METRICS:
if metric == 'percent_of_expected_deaths':
print(metric)
df["val"] = df[metric]
df["se"] = np.nan
df["sample_size"] = np.nan
sensor_name = "_".join(["wip", metric])
export_csv(
df,
geo_name=geo_res,
export_dir=daily_export_dir,
start_date=datetime.strptime(export_start_date, "%Y-%m-%d"),
sensor=sensor_name,
)
else:
for sensor in SENSORS:
print(metric, sensor)
if sensor == "num":
df["val"] = df[metric]
else:
df["val"] = df[metric] / df["population"] * INCIDENCE_BASE
df["se"] = np.nan
df["sample_size"] = np.nan
sensor_name = "_".join(["wip", metric, sensor])
export_csv(
df,
geo_name=geo_res,
export_dir=daily_export_dir,
start_date=datetime.strptime(export_start_date, "%Y-%m-%d"),
sensor=sensor_name,
)
# Weekly run of archive utility on Monday
# - Does not upload to S3, that is handled by daily run of archive utility
# - Exports issues into receiving for the API
if datetime.today().weekday() == 0:
# Copy todays raw output to receiving
for output_file in listdir(daily_export_dir):
copy(
join(daily_export_dir, output_file),
join(export_dir, output_file))
weekly_arch_diff = S3ArchiveDiffer(
cache_dir, export_dir,
params["bucket_name"], "nchs_mortality",
params["aws_credentials"])
# Dont update cache from S3 (has daily files), only simulate a update_cache() call
weekly_arch_diff._cache_updated = True
# Diff exports, and make incremental versions
_, common_diffs, new_files = weekly_arch_diff.diff_exports()
# Archive changed and new files only
to_archive = [f for f, diff in common_diffs.items() if diff is not None]
to_archive += new_files
_, fails = weekly_arch_diff.archive_exports(to_archive, update_s3=False)
# Filter existing exports to exclude those that failed to archive
succ_common_diffs = {f: diff for f, diff in common_diffs.items() if f not in fails}
weekly_arch_diff.filter_exports(succ_common_diffs)
# Report failures: someone should probably look at them
for exported_file in fails:
print(f"Failed to archive (weekly) '{exported_file}'")
# Daily run of archiving utility
# - Uploads changed files to S3
# - Does not export any issues into receiving
# Diff exports, and make incremental versions
_, common_diffs, new_files = daily_arch_diff.diff_exports()
# Archive changed and new files only
to_archive = [f for f, diff in common_diffs.items() if diff is not None]
to_archive += new_files
_, fails = daily_arch_diff.archive_exports(to_archive)
# Daily output not needed anymore, remove them
for exported_file in new_files:
remove(exported_file)
for exported_file, diff_file in common_diffs.items():
remove(exported_file)
remove(diff_file)
# Report failures: someone should probably look at them
for exported_file in fails:
print(f"Failed to archive (daily) '{exported_file}'")
| 36.632653
| 91
| 0.633612
|
4be7961643c539131be63a5072d52ff7052a40d1
| 6,832
|
py
|
Python
|
ekorpkit/models/tokenizer/trainer.py
|
entelecheia/eKorpKit
|
9521ae4c4749419fa2b088d1b9e518e5927b7cb8
|
[
"CC-BY-4.0"
] | 4
|
2022-02-26T10:54:16.000Z
|
2022-02-26T11:01:56.000Z
|
ekorpkit/models/tokenizer/trainer.py
|
entelecheia/eKorpKit
|
9521ae4c4749419fa2b088d1b9e518e5927b7cb8
|
[
"CC-BY-4.0"
] | 1
|
2022-03-25T06:37:12.000Z
|
2022-03-25T06:45:53.000Z
|
ekorpkit/models/tokenizer/trainer.py
|
entelecheia/eKorpKit
|
9521ae4c4749419fa2b088d1b9e518e5927b7cb8
|
[
"CC-BY-4.0"
] | null | null | null |
import os
import logging
from tokenizers import BertWordPieceTokenizer, ByteLevelBPETokenizer
from transformers import (
WEIGHTS_NAME,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
BertConfig,
BertForMaskedLM,
BertTokenizer,
BigBirdConfig,
BigBirdForMaskedLM,
BigBirdTokenizer,
CamembertConfig,
CamembertForMaskedLM,
CamembertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
ElectraConfig,
ElectraForMaskedLM,
ElectraForPreTraining,
ElectraTokenizer,
GPT2Config,
GPT2LMHeadModel,
GPT2Tokenizer,
LongformerConfig,
LongformerForMaskedLM,
LongformerTokenizer,
OpenAIGPTConfig,
OpenAIGPTLMHeadModel,
OpenAIGPTTokenizer,
PreTrainedModel,
PreTrainedTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
XLMRobertaConfig,
XLMRobertaForMaskedLM,
XLMRobertaTokenizer,
)
log = logging.getLogger(__name__)
MODEL_CLASSES = {
"auto": (AutoConfig, AutoModelWithLMHead, AutoTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"electra": (ElectraConfig, ElectraForMaskedLM, ElectraTokenizer),
"gpt2": (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
}
def train_tokenizer(
train_files,
args,
tokenizer_name=None,
output_dir=None,
):
"""
Train a new tokenizer on `train_files`.
Args:
- train_files: List of files to be used when training the tokenizer.
- tokenizer_name: Name of a pretrained tokenizer or a path to a directory containing a tokenizer.
- output_dir (optional): The directory where model files will be saved. If not given, args.output_dir
will be used.
Returns: None
"""
if not args.vocab_size:
raise AttributeError(
"Cannot train a new tokenizer as vocab_size is not specified in args dict. "
"Either provide a tokenizer or specify vocab_size."
)
if not isinstance(train_files, list):
train_files = [train_files]
if not output_dir:
output_dir = args.output_dir
if args.model_type in ["bert", "electra"]:
tokenizer = BertWordPieceTokenizer(
clean_text=args.clean_text,
handle_chinese_chars=args.handle_chinese_chars,
strip_accents=args.strip_accents, # Must be False if cased model
lowercase=args.do_lower_case,
)
# args.special_tokens = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"]
# args.wordpieces_prefix = "##"
unused_tokens = [f"[unused{n}]" for n in range(args.unused_token_num)]
special_tokens = list(args.special_tokens) + unused_tokens
tokenizer.train(
files=train_files,
vocab_size=args.vocab_size,
limit_alphabet=args.limit_alphabet,
min_frequency=args.min_frequency,
special_tokens=special_tokens,
wordpieces_prefix=args.wordpieces_prefix,
)
elif args.model_type in ["bigbird", "xlmroberta"]:
# The google BigBird way
# Tokenizers sentencepiece does not build a BigBird compatible vocabulary model
import sentencepiece as spm
import shutil
os.makedirs(output_dir, exist_ok=True)
files = ",".join(train_files)
if args.model_type in ["xlmroberta"]:
# </s>,<s>,<unk>,<pad> are built in -- leave as default
# XLMRoberta uses sentencepiece.bpe as a vocab model prefix
prefix = "sentencepiece.bpe"
spm.SentencePieceTrainer.Train(
f"--input={files} --user_defined_symbols='<mask>,<s>NOTUSED,</s>NOTUSED' --model_prefix={prefix} --vocab_size={args.vocab_size - 2}"
)
else:
# </s>,<s>,<unk>,<pad> are built in -- leave as default
# BigBird uses spiece as a vocab model prefix
prefix = "spiece"
spm.SentencePieceTrainer.Train(
f"--input={files} --user_defined_symbols='[SEP],[CLS],[MASK]' --model_prefix=spiece --vocab_size={args.vocab_size - 3}"
)
# SentencePiece There is no option for output path https://github.com/google/sentencepiece/blob/master/doc/options.md
if os.path.exists(output_dir + "/" + f"{prefix}.model"):
os.remove(output_dir + "/" + f"{prefix}.model")
shutil.move(src=f"{prefix}.model", dst=output_dir)
if os.path.exists(output_dir + "/" + f"{prefix}.vocab"):
os.remove(output_dir + "/" + f"{prefix}.vocab")
shutil.move(src=f"{prefix}.vocab", dst=output_dir)
else:
tokenizer = ByteLevelBPETokenizer(lowercase=args.do_lower_case)
tokenizer.train(
files=train_files,
vocab_size=args.vocab_size,
min_frequency=args.min_frequency,
special_tokens=list(args.special_tokens),
)
if args.model_type not in ["bigbird", "xlmroberta"]:
os.makedirs(output_dir, exist_ok=True)
tokenizer.save_model(output_dir)
log.info(
" Training of {} tokenizer complete. Saved to {}.".format(
tokenizer_name, output_dir
)
)
# _, _, tokenizer_class = MODEL_CLASSES[args.model_type]
# tokenizer = tokenizer_class.from_pretrained(output_dir)
def train_spm(
train_files,
output_dir=None,
train_args=None,
**kwargs,
):
import sentencepiece as spm
import shutil
os.makedirs(output_dir, exist_ok=True)
files = ",".join(train_files)
if args.model_type in ["xlmroberta"]:
# </s>,<s>,<unk>,<pad> are built in -- leave as default
# XLMRoberta uses sentencepiece.bpe as a vocab model prefix
prefix = "sentencepiece.bpe"
spm.SentencePieceTrainer.Train(
f"--input={files} --user_defined_symbols='<mask>,<s>NOTUSED,</s>NOTUSED' --model_prefix={prefix} --vocab_size={args.vocab_size - 2}"
)
else:
# </s>,<s>,<unk>,<pad> are built in -- leave as default
# BigBird uses spiece as a vocab model prefix
prefix = "spiece"
spm.SentencePieceTrainer.Train(
f"--input={files} --user_defined_symbols='[SEP],[CLS],[MASK]' --model_prefix=spiece --vocab_size={args.vocab_size - 3}"
)
# SentencePiece There is no option for output path https://github.com/google/sentencepiece/blob/master/doc/options.md
if os.path.exists(output_dir + "/" + f"{prefix}.model"):
os.remove(output_dir + "/" + f"{prefix}.model")
shutil.move(src=f"{prefix}.model", dst=output_dir)
if os.path.exists(output_dir + "/" + f"{prefix}.vocab"):
os.remove(output_dir + "/" + f"{prefix}.vocab")
shutil.move(src=f"{prefix}.vocab", dst=output_dir)
| 34.331658
| 148
| 0.645785
|
2f560c671d6d1bc64e20ec81b6f2361e10920c4a
| 5,078
|
py
|
Python
|
mewgram/settings.py
|
glasnt/mewgram
|
cd31df84d313c0edb452336a6b014428c110a2fc
|
[
"BSD-3-Clause"
] | null | null | null |
mewgram/settings.py
|
glasnt/mewgram
|
cd31df84d313c0edb452336a6b014428c110a2fc
|
[
"BSD-3-Clause"
] | 2
|
2020-06-28T04:12:52.000Z
|
2021-06-09T19:13:32.000Z
|
mewgram/settings.py
|
glasnt/mewgram
|
cd31df84d313c0edb452336a6b014428c110a2fc
|
[
"BSD-3-Clause"
] | 2
|
2020-07-01T04:30:32.000Z
|
2021-05-14T12:09:46.000Z
|
import io
import logging
import os
import sys
from pathlib import Path
import environ
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Two modes: local dev, and prod.
# Local dev: have a .env file, use local settings.
# Prod: Google auth, use stored settings.
#
# You can interact with prod on your local machine by being
# authenticated to gcloud and not having a local .env file.
env_file = os.path.join(BASE_DIR, ".env")
env = environ.Env()
if os.path.isfile(".env"):
env.read_env(env_file)
logging.debug("Loaded env from local filesystem")
LOCAL_DEVELOPMENT = True
else:
import google.auth
try:
_, project = google.auth.default()
except google.auth.exceptions.DefaultCredentialsError as e:
raise ImproperlyConfigured(
"If you want to run in local development mode, define a .env file"
)
# Load settings from Secret Manager
from google.cloud import secretmanager as sm
client = sm.SecretManagerServiceClient()
settings_name = os.environ.get("SETTINGS_NAME", "django_settings")
name = f"projects/{project}/secrets/{settings_name}/versions/latest"
payload = client.access_secret_version(name=name).payload.data.decode("UTF-8")
env.read_env(io.StringIO(payload))
logging.debug("Loaded env from Secret Manager")
LOCAL_DEVELOPMENT = False
SECRET_KEY = env("SECRET_KEY")
DEBUG = env("DEBUG", default=False)
LOGIN_REDIRECT_URL = "/"
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
"gcloudc",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.humanize",
"users",
"purr",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "mewgram.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "mewgram.wsgi.application"
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
if LOCAL_DEVELOPMENT:
if "DATABASE_URL" in os.environ.keys():
DATABASES = {"default": env.db()}
else:
raise ImproperlyConfigured("DATABASE_URL is not defined in .env")
else:
DATABASES = {
"default": {
"ENGINE": "gcloudc.db.backends.datastore",
"PROJECT": project,
"INDEXES_FILE": "djangaeidx.yaml",
"NAMESPACE": "mewgram",
}
}
logging.debug(f"Using {DATABASES['default']['ENGINE']} as database engine")
# Password validation
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_USER_MODEL = "users.CustomUser"
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATICFILES_DIRS = ["purr/static"]
STATIC_URL = "/static/"
if LOCAL_DEVELOPMENT:
DEFAULT_FILE_STORAGE = "django.core.files.storage.FileSystemStorage"
STATIC_ROOT = os.path.join(BASE_DIR, "static/")
STATIC_URL = STATIC_ROOT
MEDIA_ROOT = "media/" # where files are stored on the local filesystem
MEDIA_URL = "/media/" # what is prepended to the image URL
else:
GS_BUCKET_NAME = env("GS_BUCKET_NAME", default=None)
if GS_BUCKET_NAME:
DEFAULT_FILE_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
STATICFILES_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
GS_DEFAULT_ACL = "publicRead"
INSTALLED_APPS += ["storages"]
else:
logging.error("No GS_BUCKET_NAME defined in settings")
sys.exit(1)
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
| 27.597826
| 91
| 0.687672
|
75ef17677c2d50afe7b66b96c5f056f973a3cbc5
| 11,203
|
py
|
Python
|
api_1.4/containerd/services/images/v1/images_pb2_grpc.py
|
englandbaron/pycontainerd
|
9e5fea6e182a80508ce8b5725f407e50beba3cfe
|
[
"Apache-2.0"
] | null | null | null |
api_1.4/containerd/services/images/v1/images_pb2_grpc.py
|
englandbaron/pycontainerd
|
9e5fea6e182a80508ce8b5725f407e50beba3cfe
|
[
"Apache-2.0"
] | null | null | null |
api_1.4/containerd/services/images/v1/images_pb2_grpc.py
|
englandbaron/pycontainerd
|
9e5fea6e182a80508ce8b5725f407e50beba3cfe
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from containerd.services.images.v1 import images_pb2 as containerd_dot_services_dot_images_dot_v1_dot_images__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class ImagesStub(object):
"""Images is a service that allows one to register images with containerd.
In containerd, an image is merely the mapping of a name to a content root,
described by a descriptor. The behavior and state of image is purely
dictated by the type of the descriptor.
From the perspective of this service, these references are mostly shallow,
in that the existence of the required content won't be validated until
required by consuming services.
As such, this can really be considered a "metadata service".
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Get = channel.unary_unary(
'/containerd.services.images.v1.Images/Get',
request_serializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.GetImageRequest.SerializeToString,
response_deserializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.GetImageResponse.FromString,
)
self.List = channel.unary_unary(
'/containerd.services.images.v1.Images/List',
request_serializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.ListImagesRequest.SerializeToString,
response_deserializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.ListImagesResponse.FromString,
)
self.Create = channel.unary_unary(
'/containerd.services.images.v1.Images/Create',
request_serializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.CreateImageRequest.SerializeToString,
response_deserializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.CreateImageResponse.FromString,
)
self.Update = channel.unary_unary(
'/containerd.services.images.v1.Images/Update',
request_serializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.UpdateImageRequest.SerializeToString,
response_deserializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.UpdateImageResponse.FromString,
)
self.Delete = channel.unary_unary(
'/containerd.services.images.v1.Images/Delete',
request_serializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.DeleteImageRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class ImagesServicer(object):
"""Images is a service that allows one to register images with containerd.
In containerd, an image is merely the mapping of a name to a content root,
described by a descriptor. The behavior and state of image is purely
dictated by the type of the descriptor.
From the perspective of this service, these references are mostly shallow,
in that the existence of the required content won't be validated until
required by consuming services.
As such, this can really be considered a "metadata service".
"""
def Get(self, request, context):
"""Get returns an image by name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""List returns a list of all images known to containerd.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Create(self, request, context):
"""Create an image record in the metadata store.
The name of the image must be unique.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Update assigns the name to a given target image based on the provided
image.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Delete deletes the image by name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ImagesServicer_to_server(servicer, server):
rpc_method_handlers = {
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.GetImageRequest.FromString,
response_serializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.GetImageResponse.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.ListImagesRequest.FromString,
response_serializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.ListImagesResponse.SerializeToString,
),
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.CreateImageRequest.FromString,
response_serializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.CreateImageResponse.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.UpdateImageRequest.FromString,
response_serializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.UpdateImageResponse.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=containerd_dot_services_dot_images_dot_v1_dot_images__pb2.DeleteImageRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'containerd.services.images.v1.Images', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Images(object):
"""Images is a service that allows one to register images with containerd.
In containerd, an image is merely the mapping of a name to a content root,
described by a descriptor. The behavior and state of image is purely
dictated by the type of the descriptor.
From the perspective of this service, these references are mostly shallow,
in that the existence of the required content won't be validated until
required by consuming services.
As such, this can really be considered a "metadata service".
"""
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/containerd.services.images.v1.Images/Get',
containerd_dot_services_dot_images_dot_v1_dot_images__pb2.GetImageRequest.SerializeToString,
containerd_dot_services_dot_images_dot_v1_dot_images__pb2.GetImageResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/containerd.services.images.v1.Images/List',
containerd_dot_services_dot_images_dot_v1_dot_images__pb2.ListImagesRequest.SerializeToString,
containerd_dot_services_dot_images_dot_v1_dot_images__pb2.ListImagesResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/containerd.services.images.v1.Images/Create',
containerd_dot_services_dot_images_dot_v1_dot_images__pb2.CreateImageRequest.SerializeToString,
containerd_dot_services_dot_images_dot_v1_dot_images__pb2.CreateImageResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/containerd.services.images.v1.Images/Update',
containerd_dot_services_dot_images_dot_v1_dot_images__pb2.UpdateImageRequest.SerializeToString,
containerd_dot_services_dot_images_dot_v1_dot_images__pb2.UpdateImageResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/containerd.services.images.v1.Images/Delete',
containerd_dot_services_dot_images_dot_v1_dot_images__pb2.DeleteImageRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
| 47.470339
| 136
| 0.701241
|
3309d468eaa83324b274f4269e5a43fe04d84794
| 8,824
|
py
|
Python
|
singa_auto/container/docker_swarm.py
|
pinpom/singa-auto
|
e5b80c4728ee65fd0bd7ab15d59ab5621ccc50b1
|
[
"Apache-2.0"
] | 1
|
2020-10-26T11:37:26.000Z
|
2020-10-26T11:37:26.000Z
|
singa_auto/container/docker_swarm.py
|
pinpom/singa-auto
|
e5b80c4728ee65fd0bd7ab15d59ab5621ccc50b1
|
[
"Apache-2.0"
] | null | null | null |
singa_auto/container/docker_swarm.py
|
pinpom/singa-auto
|
e5b80c4728ee65fd0bd7ab15d59ab5621ccc50b1
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import time
import docker
import logging
import traceback
from collections import namedtuple
from functools import wraps
from .container_manager import ContainerManager, InvalidServiceRequestError, ContainerService
LABEL_AVAILBLE_GPUS = 'available_gpus'
LABEL_NUM_SERVICES = 'num_services'
RETRY_WAIT_SECS = 2
RETRY_TIMES = 5
logger = logging.getLogger(__name__)
_Node = namedtuple('_Node', ['id', 'available_gpus', 'num_services'])
_Deployment = namedtuple('_Deployment', ['node_id', 'gpu_nos'])
class DockerSwarmContainerManager(ContainerManager):
def __init__(
self,
network=os.environ.get('DOCKER_NETWORK', 'singa_auto'),
label_num_services=os.environ.get('DOCKER_NODE_LABEL_NUM_SERVICES',
'num_services'),
label_available_gpus=os.environ.get('DOCKER_NODE_LABEL_AVAILABLE_GPUS',
'available_gpus')):
self._network = network
self._client = docker.from_env()
self._label_num_services = label_num_services
self._label_available_gpus = label_available_gpus
def create_service(self,
service_name,
docker_image,
replicas,
args,
environment_vars,
mounts={},
publish_port=None,
gpus=0) -> ContainerService:
deployment = self._get_deployment(gpus)
(service_id, hostname, port) \
= self._create_service(deployment, service_name, docker_image, replicas,
args, environment_vars, mounts, publish_port)
info = {
'node_id': deployment.node_id,
'gpu_nos': deployment.gpu_nos,
'service_name': service_name,
'replicas': replicas
}
service = ContainerService(service_id, hostname, port, info)
self._mark_deployment(deployment)
logger.info('Created service of ID "{}" with info {}'.format(
service.id, service.info))
return service
def destroy_service(self, service: ContainerService):
self._destroy_sevice(service.id)
node_id = service.info['node_id']
gpu_nos = service.info['gpu_nos']
deployment = _Deployment(node_id, gpu_nos)
self._unmark_deployment(deployment)
logger.info('Deleted service of ID "{}"'.format(service.id))
def _get_deployment(self, gpus) -> _Deployment:
nodes = self._get_nodes()
# Filter nodes with GPU if required
if gpus > 0:
nodes = [x for x in nodes if len(x.available_gpus) >= gpus]
if len(nodes) == 0:
raise InvalidServiceRequestError(
'Insufficient GPUs to deploy service')
# Choose the node with fewest services
(_, node) = sorted([(x.num_services, x) for x in nodes])[0]
# Assign GPUs
gpu_nos = node.available_gpus[:gpus]
deployment = _Deployment(node.id, gpu_nos)
return deployment
def _mark_deployment(self, deployment):
node_id = deployment.node_id
# Update num services and GPUs available for node
node = self._get_node(node_id)
num_services = node.num_services + 1
available_gpus = [
x for x in node.available_gpus if x not in deployment.gpu_nos
]
self._update_node(node_id, num_services, available_gpus)
def _unmark_deployment(self, deployment):
node_id = deployment.node_id
# Update num services and GPUs available for node
node = self._get_node(node_id)
num_services = max(0, node.num_services - 1)
available_gpus = list(set(node.available_gpus + deployment.gpu_nos))
self._update_node(node_id, num_services, available_gpus)
def _destroy_sevice(self, service_id):
service = self._client.services.get(service_id)
_retry(service.remove)()
def _create_service(self, deployment, service_name, docker_image, replicas,
args, environment_vars, mounts, publish_port):
env = ['{}={}'.format(k, v) for (k, v) in environment_vars.items()]
mounts_list = ['{}:{}:rw'.format(k, v) for (k, v) in mounts.items()]
constraints = []
ports_list = []
container_port = None
published_port = None
hostname = service_name
if publish_port is not None:
# Host of Docker Swarm service = service's name at the container port
published_port = int(publish_port[0])
container_port = int(publish_port[1])
ports_list = [{
'PublishedPort': published_port,
'TargetPort': container_port
}]
# Modify service based on deployment info
constraints.append('node.id=={}'.format(
deployment.node_id)) # Add node constraint
if len(deployment.gpu_nos) > 0:
env.append('CUDA_VISIBLE_DEVICES={}'.format(','.join(
[str(x) for x in deployment.gpu_nos]))) # GPU nos
else:
env.append('CUDA_VISIBLE_DEVICES=-1') # No GPU
docker_service = _retry(self._client.services.create)(
image=docker_image,
args=args,
networks=[self._network],
name=service_name,
env=env,
mounts=mounts_list,
# Restart replicas when they exit with error
restart_policy={
'Condition': 'on-failure'
},
constraints=constraints,
endpoint_spec={
'Ports': ports_list
},
mode={
'Replicated': {
'Replicas': replicas
}
})
# Host of Docker Swarm service = service's name at the container port
return (docker_service.id, hostname, container_port)
def _get_nodes(self):
docker_nodes = self._client.nodes.list()
nodes = [self._parse_node(x) for x in docker_nodes]
return nodes
def _get_node(self, node_id):
docker_node = self._client.nodes.get(node_id)
node = self._parse_node(docker_node)
return node
def _parse_node(self, docker_node):
spec = docker_node.attrs.get('Spec', {})
spec_labels = spec.get('Labels', {})
available_gpus_str = spec_labels.get(self._label_available_gpus, '')
available_gpus = [
int(x) for x in available_gpus_str.split(',') if len(x) > 0
]
num_services = int(spec_labels.get(self._label_num_services, 0))
return _Node(docker_node.id, available_gpus, num_services)
def _update_node(self, node_id, num_services, available_gpus):
docker_node = self._client.nodes.get(node_id)
spec = docker_node.attrs.get('Spec', {})
spec_labels = spec.get('Labels', {})
_retry(docker_node.update)({
**spec, 'Labels': {
**spec_labels, self._label_num_services:
str(num_services),
self._label_available_gpus:
','.join([str(x) for x in available_gpus])
}
})
# Decorator that retries a method call a number of times
def _retry(func):
wait_secs = RETRY_WAIT_SECS
@wraps(func)
def retried_func(*args, **kwargs):
for no in range(RETRY_TIMES + 1):
try:
return func(*args, **kwargs)
except Exception as e:
logger.error(f'Error when calling `{func}`:')
logger.error(traceback.format_exc())
# Retried so many times but still errors - raise exception
if no == RETRY_TIMES:
logger.info(f'Giving up on `{func} call...')
raise e
logger.info(f'Retrying {func} after {wait_secs}s...')
time.sleep(wait_secs)
return retried_func
| 35.869919
| 93
| 0.608908
|
0b882adafd99179b25f0ff63d29baf0ead2ff130
| 835
|
py
|
Python
|
src/templates/gRPC-api/tests/test_server.py
|
pyansys/create-python-project
|
e289b5042fd95ca99c6d29e107e2114a5b9a7b15
|
[
"MIT"
] | 4
|
2022-02-08T12:37:49.000Z
|
2022-03-29T04:45:10.000Z
|
src/templates/gRPC-api/tests/test_server.py
|
pyansys/create-python-project
|
e289b5042fd95ca99c6d29e107e2114a5b9a7b15
|
[
"MIT"
] | 26
|
2022-02-08T12:48:52.000Z
|
2022-03-28T08:58:47.000Z
|
src/templates/gRPC-api/tests/test_server.py
|
pyansys/create-python-project
|
e289b5042fd95ca99c6d29e107e2114a5b9a7b15
|
[
"MIT"
] | 1
|
2022-03-16T12:57:24.000Z
|
2022-03-16T12:57:24.000Z
|
# Copyright (c) 2022, Ansys Inc. Unauthorised use, distribution or duplication is prohibited
from src.stubs.pingserver_pb2 import UserRequest, EmptyRequest
class TestServer:
def test_first_ping(self, grpc_stub):
request = EmptyRequest()
response = grpc_stub.PingServer(request)
assert response.message == 'Hello, the server is healthy and it had been pinged 1 times!'
def test_who_ping(self, grpc_stub):
request = UserRequest(name='you')
response = grpc_stub.WhoPing(request)
assert response.message == f'Hello, the server is pinged by {request.name}!'
def test_third_ping(self, grpc_stub):
request = EmptyRequest()
response = grpc_stub.PingServer(request)
assert response.message == 'Hello, the server is healthy and it had been pinged 3 times!'
| 41.75
| 97
| 0.706587
|
5b824a5c29663b245ee53b29325811b5d2bf48ae
| 16,001
|
py
|
Python
|
fitbenchmarking/parsing/tests/test_parsers.py
|
fitbenchmarking/fitbenchmarking
|
ea398efa61f071dc64fe7c3b484d5bb4e1897856
|
[
"BSD-3-Clause"
] | 6
|
2019-07-22T01:56:10.000Z
|
2021-12-10T05:29:30.000Z
|
fitbenchmarking/parsing/tests/test_parsers.py
|
fitbenchmarking/fitbenchmarking
|
ea398efa61f071dc64fe7c3b484d5bb4e1897856
|
[
"BSD-3-Clause"
] | 677
|
2019-04-29T10:23:49.000Z
|
2022-03-22T12:01:30.000Z
|
fitbenchmarking/parsing/tests/test_parsers.py
|
fitbenchmarking/fitbenchmarking
|
ea398efa61f071dc64fe7c3b484d5bb4e1897856
|
[
"BSD-3-Clause"
] | 8
|
2019-06-13T10:32:17.000Z
|
2020-12-09T15:08:40.000Z
|
"""
This file contains tests for the parsers.
"""
from importlib import import_module
from inspect import isclass, isabstract, getmembers
from json import load
import os
from unittest import TestCase
from pytest import test_type as TEST_TYPE # pylint: disable=no-name-in-module
import numpy as np
from fitbenchmarking.parsing.base_parser import Parser
from fitbenchmarking.parsing.fitting_problem import FittingProblem
from fitbenchmarking.parsing.parser_factory import \
ParserFactory, parse_problem_file
from fitbenchmarking.utils import exceptions
from fitbenchmarking.utils.options import Options
OPTIONS = Options()
JACOBIAN_ENABLED_PARSERS = ['cutest', 'nist']
HESSIAN_ENABLED_PARSERS = ['nist']
BOUNDS_ENABLED_PARSERS = ['cutest', 'fitbenchmark']
# pylint: disable=no-self-use
def pytest_generate_tests(metafunc):
"""
Function used by pytest to parametrize tests.
This will create a set of tests for each function in the class where
the parameters are given in a 'params' dict in the class.
"""
# called once per each test function
funcarglist = metafunc.cls.params[metafunc.function.__name__]
argnames = sorted(funcarglist[0])
argvals = [[funcargs[name]
for name in argnames]
for funcargs in funcarglist]
metafunc.parametrize(argnames,
argvals)
def generate_test_cases():
"""
Utility function to create the params dict for parametrising the tests.
:return: params dictionary with a function name as a key, and a list of
parameter dictionaries for the value
:rtype: dict
"""
params = {'test_parsers': [],
'test_factory': [],
'test_function_evaluation': [],
'test_jacobian_evaluation': [],
'test_hessian_evaluation': []}
# get all parsers
test_dir = os.path.dirname(__file__)
if TEST_TYPE == "all":
formats = ['cutest', 'nist', 'mantid', 'ivp', 'sasview']
elif TEST_TYPE == "default":
formats = ['nist']
else:
formats = ['nist']
# create list of test_cases
expected_dir = os.listdir(os.path.join(test_dir, 'expected'))
for file_format in formats:
format_dir = os.listdir(os.path.join(test_dir, file_format))
for expected_file in expected_dir:
expected_path = os.path.join(test_dir, 'expected', expected_file)
test_name = os.path.splitext(expected_file)[0]
test_name_with_ext = [f for f in format_dir
if f.startswith(test_name)]
if not test_name_with_ext:
test_file = None
elif len(test_name_with_ext) == 1:
test_file = os.path.join(test_dir,
file_format,
test_name_with_ext[0])
else:
raise RuntimeError(
'Too many "{}" files found for "{}" test'.format(
file_format, test_name))
test_parsers = {}
test_parsers['file_format'] = file_format
test_parsers['test_file'] = test_file
test_parsers['expected'] = load_expectation(expected_path)
params['test_parsers'].append(test_parsers)
test_factory = {}
test_factory['file_format'] = file_format
test_factory['test_file'] = test_file
params['test_factory'].append(test_factory)
func_eval = os.path.join(test_dir,
file_format,
'function_evaluations.json')
test_func_eval = {}
test_func_eval['file_format'] = file_format
test_func_eval['evaluations_file'] = func_eval
params['test_function_evaluation'].append(test_func_eval)
jac_eval = os.path.join(test_dir,
file_format,
'jacobian_evaluations.json')
test_jac_eval = {}
test_jac_eval['file_format'] = file_format
test_jac_eval['evaluations_file'] = jac_eval
params['test_jacobian_evaluation'].append(test_jac_eval)
hes_eval = os.path.join(test_dir,
file_format,
'hessian_evaluations.json')
test_hes_eval = {}
test_hes_eval['file_format'] = file_format
test_hes_eval['evaluations_file'] = hes_eval
params['test_hessian_evaluation'].append(test_hes_eval)
return params
def load_expectation(filename):
"""
Load an expected fitting problem from a json file.
:param filename: The path to the expectation file
:type filename: string
:return: A fitting problem to test against
:rtype: fitbenchmarking.parsing.FittingProblem
"""
with open(filename, 'r') as f:
expectation_dict = load(f)
expectation = FittingProblem(OPTIONS)
expectation.name = expectation_dict['name']
expectation.start_x = expectation_dict['start_x']
expectation.end_x = expectation_dict['end_x']
expectation.data_x = np.array(expectation_dict['data_x'])
expectation.data_y = np.array(expectation_dict['data_y'])
expectation.data_e = expectation_dict['data_e']
if expectation.data_e is not None:
expectation.data_e = np.array(expectation.data_e)
expectation.starting_values = expectation_dict['starting_values']
expectation.value_ranges = expectation_dict['value_ranges']
return expectation
class TestParsers:
"""
A class to hold the tests for parametrized testing of parsers.
"""
params = generate_test_cases()
def test_parsers(self, file_format, test_file, expected):
"""
Test that the parser correctly parses the specified input file.
:param file_format: The name of the file format
:type file_format: string
:param test_file: The path to the test file
:type test_file: string
"""
assert (test_file is not None), \
'No test file for {}'.format(file_format)
with open(test_file) as f:
if f.readline() == 'NA':
# Test File cannot be written
return
# Test import
module = import_module(name='.{}_parser'.format(file_format),
package='fitbenchmarking.parsing')
parser = getmembers(module, lambda m: (isclass(m)
and not isabstract(m)
and issubclass(m, Parser)
and m is not Parser
and file_format.lower()
in str(m.__name__.lower())
))[0][1]
# Test parse
with parser(test_file, OPTIONS) as p:
fitting_problem = p.parse()
# Allow for problems not supporting certain test cases
# (e.g. value_ranges)
if fitting_problem.name == 'NA':
return
# Check against expected
fitting_problem.verify()
# functions and equation need to be done seperately as they can't be
# generic across problem types.
# similarly starting_values uses the param name so must be checked
# separately
for attr in ['name', 'data_x', 'data_y', 'data_e', 'start_x', 'end_x']:
parsed_attr = getattr(fitting_problem, attr)
expected_attr = getattr(expected, attr)
equal = (parsed_attr == expected_attr)
if isinstance(equal, np.ndarray):
equal = equal.all()
assert (equal), '{} was parsed incorrectly.'.format(attr) \
+ '{} != {}'.format(parsed_attr, expected_attr)
# Check starting_values
for a, e in zip(fitting_problem.starting_values,
expected.starting_values):
loaded_as_set = set(a.values())
expected_as_set = set(e.values())
assert (loaded_as_set == expected_as_set), \
'starting_values were parsed incorrectly.'
# check value ranges
if file_format in BOUNDS_ENABLED_PARSERS:
if fitting_problem.value_ranges is not None:
act_val = str(fitting_problem.value_ranges)
else:
act_val = fitting_problem.value_ranges
assert (act_val == expected.value_ranges), \
'value_ranges were parsed incorrectly.'
# Check that the function is callable
assert callable(fitting_problem.function)
if file_format in JACOBIAN_ENABLED_PARSERS:
# Check that the Jacobian is callable
assert callable(fitting_problem.jacobian)
if file_format in HESSIAN_ENABLED_PARSERS:
# Check that the Jacobian is callable
assert callable(fitting_problem.hessian)
def test_function_evaluation(self, file_format, evaluations_file):
"""
Tests that the function evaluation is consistent with what would be
expected by comparing to some precomputed values with fixed params and
x values.
:param file_format: The name of the file format
:type file_format: string
:param evaluations_file: Path to a json file containing tests and
results
in the following format:
{"test_file1": [[x1, params1, results1],
[x2, params2, results2],
...],
"test_file2": ...}
:type evaluations_file: string
"""
assert (evaluations_file is not None), \
'No function evaluations provided to test against for {}'.format(
file_format)
with open(evaluations_file, 'r') as ef:
results = load(ef)
format_dir = os.path.dirname(evaluations_file)
for f, tests in results.items():
f = os.path.join(format_dir, f)
parser = ParserFactory.create_parser(f)
with parser(f, OPTIONS) as p:
fitting_problem = p.parse()
for r in tests:
x = np.array(r[0])
actual = fitting_problem.eval_model(x=x, params=r[1])
assert np.isclose(actual, r[2]).all()
def test_jacobian_evaluation(self, file_format, evaluations_file):
"""
Tests that the Jacobian evaluation is consistent with what would be
expected by comparing to some precomputed values with fixed params and
x values.
:param file_format: The name of the file format
:type file_format: string
:param evaluations_file: Path to a json file containing tests and
results
in the following format:
{"test_file1": [[x1, params1, results1],
[x2, params2, results2],
...],
"test_file2": ...}
:type evaluations_file: string
"""
# Note that this test is optional so will only run if the file_format
# is added to the JACOBIAN_ENABLED_PARSERS list.
if file_format in JACOBIAN_ENABLED_PARSERS:
message = 'No function evaluations provided to test ' \
'against for {}'.format(file_format)
assert (evaluations_file is not None), message
with open(evaluations_file, 'r') as ef:
results = load(ef)
format_dir = os.path.dirname(evaluations_file)
for f, tests in results.items():
f = os.path.join(format_dir, f)
parser = ParserFactory.create_parser(f)
with parser(f, OPTIONS) as p:
fitting_problem = p.parse()
for r in tests:
x = np.array(r[0])
actual = fitting_problem.jacobian(x, r[1])
assert np.isclose(actual, r[2]).all()
def test_hessian_evaluation(self, file_format, evaluations_file):
"""
Tests that the Hessian evaluation is consistent with what would be
expected by comparing to some precomputed values with fixed params and
x values.
:param file_format: The name of the file format
:type file_format: string
:param evaluations_file: Path to a json file containing tests and
results
in the following format:
{"test_file1": [[x1, params1, results1],
[x2, params2, results2],
...],
"test_file2": ...}
:type evaluations_file: string
"""
# Note that this test is optional so will only run if the file_format
# is added to the HESSIAN_ENABLED_PARSERS list.
if file_format in HESSIAN_ENABLED_PARSERS:
message = 'No function evaluations provided to test ' \
'against for {}'.format(file_format)
assert (evaluations_file is not None), message
with open(evaluations_file, 'r') as ef:
results = load(ef)
format_dir = os.path.dirname(evaluations_file)
for f, tests in results.items():
f = os.path.join(format_dir, f)
parser = ParserFactory.create_parser(f)
with parser(f, OPTIONS) as p:
fitting_problem = p.parse()
for r in tests:
x = np.array(r[0])
actual = fitting_problem.hessian(x, r[1])
assert np.isclose(actual, r[2]).all()
def test_factory(self, file_format, test_file):
"""
Tests that the factory selects the correct parser
:param file_format: The name of the file format
:type file_format: string
:param test_file: The path to the test file
:type test_file: string
"""
with open(test_file) as f:
if f.readline() == 'NA':
# Skip the test files with no data
return
parser = ParserFactory.create_parser(test_file)
assert (parser.__name__.lower().startswith(file_format.lower())), \
'Factory failed to get associated parser for {0}: got {1},' \
'required starting with {1}'.format(test_file,
parser.__name__.lower())
class TestParserFactory(TestCase):
"""
A class to hold the tests for the parser factory.
Note: testing that the parser factory works for all new parsers is left to
the factory tests in TestParsers
"""
def test_unknown_parser(self):
"""
Tests that the parser factory raises a NoParserError when an erroneous
parser is requested.
"""
filename = os.path.join(os.getcwd(), 'this_is_a_fake_parser.txt')
with open(filename, 'w') as f:
f.write('this_is_a_fake_parser')
factory = ParserFactory()
with self.assertRaises(exceptions.NoParserError):
_ = factory.create_parser(filename)
os.remove(filename)
def test_parse_problem_file(self):
"""
Tests the parse_problem_file method
"""
filename = os.path.join(os.path.dirname(__file__),
'nist',
'basic.dat')
fitting_problem = parse_problem_file(filename, OPTIONS)
self.assertEqual(fitting_problem.name, 'basic')
| 38.743341
| 79
| 0.574277
|
d9ca2d113f84680a4c8893f5d1f0e67f3be2ef10
| 11,067
|
py
|
Python
|
galaxy/api/filters.py
|
smrutikanta/ansible-galaxy
|
2492e83a3551681a7b95bdfffb66d51a2a2f42ec
|
[
"Apache-2.0"
] | null | null | null |
galaxy/api/filters.py
|
smrutikanta/ansible-galaxy
|
2492e83a3551681a7b95bdfffb66d51a2a2f42ec
|
[
"Apache-2.0"
] | null | null | null |
galaxy/api/filters.py
|
smrutikanta/ansible-galaxy
|
2492e83a3551681a7b95bdfffb66d51a2a2f42ec
|
[
"Apache-2.0"
] | null | null | null |
# (c) 2012-2018, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
import re
# Django
from django.core.exceptions import FieldError, ValidationError, ObjectDoesNotExist
from django.contrib.auth import get_user_model
from django.db import models
from django.db.models import Q
from django.db.models.fields import FieldDoesNotExist
from django.db.models.fields.related import ForeignObjectRel
# Django REST Framework
from rest_framework.exceptions import ParseError
from rest_framework.filters import BaseFilterBackend
# drf-haystack
from drf_haystack.filters import HaystackFilter
# Galaxy
from galaxy.main.models import UserAlias
GalaxyUser = get_user_model()
class ActiveOnlyBackend(BaseFilterBackend):
'''
Filter to show only objects where is_active/active is True.
'''
def filter_queryset(self, request, queryset, view):
for field in queryset.model._meta.fields:
if field.name == 'is_active':
queryset = queryset.filter(is_active=True)
elif field.name == 'active':
queryset = queryset.filter(active=True)
return queryset
class FieldLookupBackend(BaseFilterBackend):
'''
Filter using field lookups provided via query string parameters.
'''
RESERVED_NAMES = ('page', 'page_size', 'format', 'order', 'order_by',
'search')
SUPPORTED_LOOKUPS = ('exact', 'iexact', 'contains', 'icontains',
'startswith', 'istartswith', 'endswith', 'iendswith',
'regex', 'iregex', 'gt', 'gte', 'lt', 'lte', 'in',
'isnull')
def get_field_from_lookup(self, model, lookup):
field = None
parts = lookup.split('__')
if parts and parts[-1] not in self.SUPPORTED_LOOKUPS:
parts.append('exact')
# FIXME: Could build up a list of models used across relationships, use
# those lookups combined with request.user.get_queryset(Model) to make
# sure user cannot query using objects he could not view.
for n, name in enumerate(parts[:-1]):
if name == 'pk':
field = model._meta.pk
else:
field = model._meta.get_field_by_name(name)[0]
if n < (len(parts) - 2):
if getattr(field, 'rel', None):
model = field.rel.to
else:
model = field.model
return field
def to_python_boolean(self, value, allow_none=False):
value = unicode(value)
if value.lower() in ('true', '1'):
return True
elif value.lower() in ('false', '0'):
return False
elif allow_none and value.lower() in ('none', 'null'):
return None
else:
raise ValueError(u'Unable to convert "%s" to boolean' % unicode(value))
def to_python_related(self, value):
value = unicode(value)
if value.lower() in ('none', 'null'):
return None
else:
return int(value)
def value_to_python_for_field(self, field, value):
if isinstance(field, models.NullBooleanField):
return self.to_python_boolean(value, allow_none=True)
elif isinstance(field, models.BooleanField):
return self.to_python_boolean(value)
elif isinstance(field, ForeignObjectRel):
return self.to_python_related(value)
else:
return field.to_python(value)
def value_to_python(self, model, lookup, value):
field = self.get_field_from_lookup(model, lookup)
if lookup.endswith('__isnull'):
value = self.to_python_boolean(value)
elif lookup.endswith('__in'):
items = []
for item in value.split(','):
items.append(self.value_to_python_for_field(field, item))
value = items
elif lookup.endswith('__regex') or lookup.endswith('__iregex'):
try:
re.compile(value)
except re.error, e:
raise ValueError(e.args[0])
return value
else:
value = self.value_to_python_for_field(field, value)
return value
def filter_queryset(self, request, queryset, view):
# this is a hack to allow aliases on user names when
# filtering on owner__username. QUERY_PARAMS is supposed
# to be an alias for GET, however they appear to be distinct
# objects internally, and since there is no setter for the
# QUERY_PARAMS version we use GET instead directly
if 'owner__username' in request.GET:
try:
# try and lookup the user first, to see if it exists
GalaxyUser.objects.get(username=request.GET['owner__username'])
except ObjectDoesNotExist, e:
# if not, check to see if there's an alias for it
try:
alias_obj = UserAlias.objects.get(alias_name=request.GET['owner__username'])
# and override that query parameter with the actual username
qp = request.GET.copy()
qp['owner__username'] = alias_obj.alias_of.username
# Again, we use GET here because QUERY_PARAMS has no
# setter function, so trying to do so here results in
# an error. Furthermore, even when GET is set to the
# new QueryDict object, QUERY_PARAMS remains unchanged,
# meaning we have to use GET everywhere to ensure the
# same object is being used with the overridden param.
# This may be fixed in later DRF versions?
request.GET = qp
except Exception, e:
# if not, we don't care, the later filtering
# means an empty set will be returned for this
pass
try:
# Apply filters specified via GET/QUERY_PARAMS. Each
# entry in the lists below is (negate, field, value).
and_filters = []
or_filters = []
chain_filters = []
for key, values in request.GET.lists():
if key in self.RESERVED_NAMES:
continue
# Custom __int filter suffix (internal use only).
q_int = False
if key.endswith('__int'):
key = key[:-5]
q_int = True
# Custom chained filter
q_chain = False
if key.startswith('chain__'):
key = key[7:]
q_chain = True
# Custom or__ filter prefix (or__ can precede not__).
q_or = False
if key.startswith('or__'):
key = key[4:]
q_or = True
# Custom not__ filter prefix.
q_not = False
if key.startswith('not__'):
key = key[5:]
q_not = True
# Convert value(s) to python and add to the appropriate list.
for value in values:
if q_int:
value = int(value)
value = self.value_to_python(queryset.model, key, value)
if q_chain:
chain_filters.append((q_not, key, value))
elif q_or:
or_filters.append((q_not, key, value))
else:
and_filters.append((q_not, key, value))
# Now build Q objects for database query filter.
if and_filters or or_filters or chain_filters:
args = []
for n, k, v in and_filters:
if n:
args.append(~Q(**{k: v}))
else:
args.append(Q(**{k: v}))
if or_filters:
q = Q()
for n, k, v in or_filters:
if n:
q |= ~Q(**{k: v})
else:
q |= Q(**{k: v})
args.append(q)
queryset = queryset.filter(*args)
for n, k, v in chain_filters:
if n:
q = ~Q(**{k: v})
else:
q = Q(**{k: v})
queryset = queryset.filter(q)
return queryset.distinct()
except (FieldError, FieldDoesNotExist, ValueError), e:
raise ParseError(e.args[0])
except ValidationError, e:
raise ParseError(e.messages)
class OrderByBackend(BaseFilterBackend):
'''
Filter to apply ordering based on query string parameters.
'''
def filter_queryset(self, request, queryset, view):
try:
order_by = None
for key, value in request.GET.items():
if key in ('order', 'order_by'):
if ',' in value:
order_by = value.split(',')
else:
order_by = (value,)
if order_by:
queryset = queryset.order_by(*order_by)
# Fetch the first result to run the query, otherwise we don't
# always catch the FieldError for invalid field names.
try:
queryset[0]
except IndexError:
pass
return queryset
except FieldError, e:
# Return a 400 for invalid field names.
raise ParseError(*e.args)
class HaystackFilter(HaystackFilter):
def filter_queryset(self, request, queryset, view):
qs = super(HaystackFilter, self).filter_queryset(request, queryset, view)
try:
order_by = None
for key, value in request.GET.items():
if key in ('order', 'order_by'):
if ',' in value:
order_by = value.split(',')
else:
order_by = (value,)
if order_by:
qs = qs.order_by(*order_by)
return qs
except FieldError, e:
# Return a 400 for invalid field names.
raise ParseError(*e.args)
| 39.106007
| 96
| 0.542514
|
0a011139b947d9b233edbd38b9487dff9a677e03
| 2,313
|
py
|
Python
|
src/foxdot/release/what_there_is/the_ashes.vr_001.py
|
Neko250/aisthesis
|
1d4a2c3070d10596c28b25ea2170523583e7eff0
|
[
"Apache-2.0"
] | 4
|
2018-06-29T18:39:34.000Z
|
2021-06-20T16:44:29.000Z
|
src/foxdot/release/what_there_is/the_ashes.vr_001.py
|
Neko250/aisthesis
|
1d4a2c3070d10596c28b25ea2170523583e7eff0
|
[
"Apache-2.0"
] | null | null | null |
src/foxdot/release/what_there_is/the_ashes.vr_001.py
|
Neko250/aisthesis
|
1d4a2c3070d10596c28b25ea2170523583e7eff0
|
[
"Apache-2.0"
] | null | null | null |
Scale.default = Scale.minor
Root.default = 0
Clock.bpm = 80
chords = P[5,0,3,0]
var.ch1 = var(chords,1)
var.ch2 = var(chords,2)
var.ch4 = var(chords,4)
var.ch8 = var(chords,8)
s1 >> charm(var.ch1, amp=.8, dur=PDur(3,8)*(1,2), chop=2, lpf=800, oct=4, formant=.25, room=1, mix=.25).spread()
s2 >> ambi(chords, amp=.8, dur=4, oct=(3,4), room=1, mix=.5)
s3 >> glass(chords, amp=.8, dur=4, oct=(2,3), room=1, mix=.5, lpf=1200)
p1 >> play('(n-)---', amp=.2, dur=.25, room=1, sample=0, formant=linvar([-.25,-1],16))
p2 >> play('v', amp=.25, dur=PDur(3,16)*4, dist=.1, room=1, rate=.25)
p1 >> play('(n-)---', amp=.2, dur=.25, room=1, sample=0, formant=linvar([-.25,-1],16)).often('stutter', 4, dur=3/2)
s4 >> noise([0], amp=PWhite(0,.4), dur=1, room=1, mix=.5, chop=PRand([1,2,4])*2, oct=4)
s4 >> noise(var.ch4, amp=PWhite(0,.4), dur=1, room=1, mix=.5, chop=PRand([1,2,3,4])*2, oct=4)
p3 >> play('V', amp=.4, dur=1, rate=.5, room=linvar([0,1],16), mix=linvar([0,.5],12), lpf=800)
s3.stop() # glass
s2.stop() # ambi
s1.every(4, 'degrade')
Clock.future(16, s1.stop) # charm
p3.stop() # kick
s4.stop() # noise
s5 >> bass(var.ch2, amp=.4, dur=PDur(3,8)*(1,2), dist=.05, oct=(4,5))
p1 >> play('(n-)---', amp=.5, dur=.25, room=1, sample=0, formant=linvar([-.25,-1],16), pan=PWhite(-1,1)).often('stutter', 4, dur=3/2)
p3 >> play('V', amp=.4, dur=1, rate=.5, room=linvar([0,1],16), mix=linvar([0,.5],12), lpf=800)
p4 >> play('i', amp=.4, dur=2, delay=1, sample=2, room=1, mix=[.1,.1,.1,.5], lpf=1500)
s2 >> klank(chords, amp=.8, dur=4, oct=(3,4), room=1, mix=.5)
s2 >> klank(chords, amp=.5, dur=4, oct=(3,4), room=1, mix=.5, formant=1)
s3 >> glass(chords, amp=.8, dur=4, oct=(2,3), room=1, mix=.5, lpf=1200)
s1 >> charm(var.ch1, amp=.8, dur=PDur(3,8)*(1,2), chop=2, lpf=800, oct=4, formant=.25, room=1, mix=.25).spread()
s5 >> bass(var.ch2, amp=.4, dur=PDur(3,8)*(1,2), oct=(4,5))
s1 >> charm(var.ch1, amp=.8, dur=PDur(3,8)*(1,2), chop=2, lpf=800, oct=4, formant=.25, room=1, mix=.25).spread().penta(0)
s5.stop() # bass
p3.stop() # kick
s2.stop() # klank
p4 >> play('i', amp=.4, dur=4, delay=1, sample=2, room=1, mix=[.1,.5], lpf=1500)
s1.every(4, 'degrade')
Clock.future(16, s1.stop) # charm
####
p1.stop() # hihat
s3.stop() # glass
p2.stop() # glitch
p4.stop() # snare
####
Clock.clear()
| 31.684932
| 133
| 0.573714
|
a09950f1cf1f8782d0a71cd639b762a87beb93a2
| 5,657
|
py
|
Python
|
qiskit/tools/jupyter/job_widgets.py
|
tareqdandachi/qiskit-terra
|
5221fe330adba5529bfa22dc25262ac8e6291aaf
|
[
"Apache-2.0"
] | 3
|
2019-05-19T17:39:38.000Z
|
2020-01-28T19:59:18.000Z
|
qiskit/tools/jupyter/job_widgets.py
|
tareqdandachi/qiskit-terra
|
5221fe330adba5529bfa22dc25262ac8e6291aaf
|
[
"Apache-2.0"
] | 4
|
2019-05-13T15:28:46.000Z
|
2019-12-19T20:47:02.000Z
|
qiskit/tools/jupyter/job_widgets.py
|
tareqdandachi/qiskit-terra
|
5221fe330adba5529bfa22dc25262ac8e6291aaf
|
[
"Apache-2.0"
] | 1
|
2021-07-07T16:55:41.000Z
|
2021-07-07T16:55:41.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""A module of widgets for job tracking"""
import ipywidgets as widgets
from IPython.display import display, Javascript
def make_clear_button(watcher):
"""Makes the clear button
Args:
watcher (widget): The watcher widget instance.
Returns:
widget: The clear button widget.
"""
clear = widgets.Button(
description='Clear',
button_style='primary',
layout=widgets.Layout(width='70px',
grid_area='right',
padding="0px 0px 0px 0px"))
def on_clear_button_clicked(_):
watcher.clear_done()
clear.on_click(on_clear_button_clicked)
clear_button = widgets.GridBox(children=[clear],
layout=widgets.Layout(
width='100%',
grid_template_columns='20% 20% 20% 20% 20%',
grid_template_areas='''
". . . . right "
'''))
return clear_button
def make_labels():
"""Makes the labels widget.
Returns:
widget: The labels widget.
"""
labels0 = widgets.HTML(value="<h5>Job ID</h5>",
layout=widgets.Layout(width='190px'))
labels1 = widgets.HTML(value='<h5>Backend</h5>',
layout=widgets.Layout(width='145px'))
labels2 = widgets.HTML(value='<h5>Status</h5>',
layout=widgets.Layout(width='95px'))
labels3 = widgets.HTML(value='<h5>Queue</h5>',
layout=widgets.Layout(width='70px'))
labels4 = widgets.HTML(value='<h5>Message</h5>')
labels = widgets.HBox(children=[labels0, labels1, labels2, labels3, labels4],
layout=widgets.Layout(width='600px',
margin='0px 0px 0px 35px'))
return labels
def create_job_widget(watcher, job, backend, status='', queue_pos=None, msg=''):
"""Creates a widget corresponding to a particular job instance.
Args:
watcher (widget): The job watcher instance.
job (IBMQJob): The job.
backend (str): The backend the job is running on.
status (str): The job status.
queue_pos (int): Queue position, if any.
msg (str): Job message, if any.
Returns:
widget: The job widget
"""
job_id = job.job_id()
id_label = widgets.HTML(value="{}".format(job_id),
layout=widgets.Layout(width='190px'))
backend_label = widgets.HTML(value="{}".format(backend),
layout=widgets.Layout(width='145px'))
status_label = widgets.HTML(value="{}".format(status),
layout=widgets.Layout(width='95px'))
if queue_pos is None:
queue_pos = '-'
else:
queue_pos = str(queue_pos)
queue_label = widgets.HTML(value="{}".format(queue_pos),
layout=widgets.Layout(width='70px'))
msg_label = widgets.HTML(value="<p style=white-space:nowrap;>{}</p>".format(msg),
layout=widgets.Layout(overflow_x='scroll'))
close_button = widgets.Button(button_style='', icon='close',
layout=widgets.Layout(width='30px',
margin="0px 5px 0px 0px"))
close_button.style.button_color = 'white'
def cancel_on_click(_):
watcher.cancel_job(job_id)
close_button.on_click(cancel_on_click)
job_grid = widgets.HBox(children=[close_button, id_label, backend_label,
status_label, queue_label, msg_label],
layout=widgets.Layout(min_width='700px',
max_width='700px'))
job_grid.job_id = job_id
job_grid.job = job
return job_grid
def build_job_viewer():
"""Builds the job viewer widget
Returns:
widget: Job viewer.
"""
acc = widgets.Accordion(children=[widgets.VBox(layout=widgets.Layout(max_width='710px',
min_width='710px'))],
layout=widgets.Layout(width='auto',
max_width='750px',
max_height='500px',
overflow_y='scroll',
overflow_x='hidden'))
acc.set_title(0, 'IBMQ Jobs')
acc.selected_index = None
acc.layout.visibility = 'hidden'
display(acc)
acc._dom_classes = ['job_widget']
display(Javascript("""$('div.job_widget')
.detach()
.appendTo($('#header'))
.css({
'z-index': 999,
'position': 'fixed',
'box-shadow': '5px 5px 5px -3px black',
'opacity': 0.95,
'float': 'left,'
})
"""))
acc.layout.visibility = 'visible'
return acc
| 36.496774
| 94
| 0.53067
|
dab8ce1a09bdb9cdbbdaa2b76e15e41ad830863b
| 155
|
py
|
Python
|
delivery/tests/conftest.py
|
raiizoor/Python
|
9c1184cae21eb725a6ee2f0881e93527c888839a
|
[
"Unlicense"
] | 1
|
2020-07-21T05:17:20.000Z
|
2020-07-21T05:17:20.000Z
|
delivery/tests/conftest.py
|
es99/curso-flask
|
03446ccc89894a2c4f2d1c14d0040e80d2226d9e
|
[
"Unlicense"
] | 3
|
2021-02-11T02:31:31.000Z
|
2021-12-29T22:13:18.000Z
|
delivery/tests/conftest.py
|
es99/curso-flask
|
03446ccc89894a2c4f2d1c14d0040e80d2226d9e
|
[
"Unlicense"
] | 1
|
2021-01-20T20:23:12.000Z
|
2021-01-20T20:23:12.000Z
|
import pytest
from delivery.app import create_app
@pytest.fixture(scope="module")
def app():
"""Instance of Main flask app"""
return create_app()
| 19.375
| 36
| 0.716129
|
7fe4e9bb1eb8539840bd649bbc7be25b9b6c2ea5
| 44,094
|
py
|
Python
|
me2ushop/models.py
|
Dogechi/Me2U
|
0852600983dc1058ee347f4065ee801e16c1249e
|
[
"MIT"
] | null | null | null |
me2ushop/models.py
|
Dogechi/Me2U
|
0852600983dc1058ee347f4065ee801e16c1249e
|
[
"MIT"
] | 9
|
2020-06-06T01:16:25.000Z
|
2021-06-04T23:20:37.000Z
|
me2ushop/models.py
|
Dogechi/Me2U
|
0852600983dc1058ee347f4065ee801e16c1249e
|
[
"MIT"
] | null | null | null |
import collections
import decimal
import itertools
from ckeditor.fields import RichTextField
from ckeditor_uploader.fields import RichTextUploadingField
from django.conf import settings
from django.core.cache import cache
from django.core.validators import MinValueValidator
from django.db import models
from django.shortcuts import reverse
from django.utils.text import slugify
from django_countries.fields import CountryField
from stdimage import StdImageField
from tagging.registry import register
from categories.models import Department
from users.models import STATUSES, UNDER_REVIEW
from utils.models import CreationModificationDateMixin
from djangorave.models import DRPaymentTypeModel
from django.utils.safestring import mark_safe
CATEGORY_CHOICES = (
('At', 'Arts, Crafts'),
('Bk', 'Books'),
('Bb', 'Baby Care'),
('Be', 'Beautiful 2'),
('Ca', 'Camera & Photo'),
('S', 'Shirt'),
('Sw', 'Sport wear'),
('Ow', 'Outwear'),
('Am', 'Automotive & Motorcycle'),
('Ca', 'Cell Phones & Accessories'),
('El', 'Electronics'),
('Fa', 'Fashion'),
('Fu', 'Furniture'),
('So', 'Sokoni'),
('Wo', 'Women Fashion')
)
LABEL_CHOICES = (
('P', 'primary'),
('S', 'secondary'),
('D', 'danger'),
)
ADDRESS_CHOICES = (
('B', 'Billing'),
('S', 'Shipping')
)
PAYMENT_CHOICES = {
('M', "M-Pesa"),
('P', "Paypal"),
('S', "Stripe"),
}
SUBSCRIPTION_TYPE_CHOICE = (
('Fr', 'Free'),
('St', 'Standard'),
('Bs', 'Basic'),
('Pr', 'Premium')
)
BUSINESS_TYPE_CHOICE = (
('Co', 'Company'),
('Sol', 'Sole Proprietorship/Personal')
)
CONDITION_CHOICES = {
('N', "New"),
('R', "Refurbished"),
('U', "Used"),
}
SHIPPING_CAPABILITY = (
('Cd', 'Can Ship Abroad and Deliver Locally'),
('Cl', 'Can Deliver Locally(Within your country)'),
('Co', 'Not Able to Deliver')
)
from django_countries import Countries
class AfrikanCountries(Countries):
only = [
'DZ', 'AO', 'BJ', 'BW', 'BF', 'BI', 'CM', 'CV', 'CF', 'TD',
'KM', 'CG', 'CD', 'CI', 'DJ', 'EG', 'GQ', 'ER', 'ET', 'GA',
'GM', 'GH', 'GN', 'GW', 'KE', 'LS', 'LR', 'LY', 'MG', 'ML',
'MW', 'MR', 'MU', 'YT', 'MA', 'MZ', 'NA', 'NE', 'NG', 'RE',
'RW', 'ST', 'SN', 'SC', 'SL', 'SO', 'ZA', 'SS', 'SD', 'SZ',
'TZ', 'TG', 'TN', 'UG', 'EH', 'ZM', 'ZW'
]
class ContactSupplier(CreationModificationDateMixin):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, blank=True, null=True)
brand = models.ForeignKey('Brand', on_delete=models.SET_NULL, blank=True, null=True)
product = models.ForeignKey('Product', on_delete=models.SET_NULL, blank=True, null=True)
message = models.TextField()
def __str__(self):
return self.user.username
class ActiveBrandManager(models.Manager):
def all(self):
return super(ActiveBrandManager, self).all().filter(is_active=True)
class Brand(CreationModificationDateMixin):
profile = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, blank=True, null=True)
slug = models.SlugField(default='',
editable=False,
blank=True,
null=True,
max_length=255)
title = models.CharField(max_length=200, unique=True, help_text='Unique business title to identify Your store and '
'your product line')
website_link = models.CharField(max_length=255, blank=True, null=True,
help_text='If you have a website by which buyers can find out more about your '
'services.e.g. https://www.facebook.com')
facebook = models.CharField(max_length=255, blank=True, null=True,
help_text='Do you have a facebook page. Copy paste your page link here '
'e.g..https://www.facebook.com/Me2UAfrika')
instagram = models.CharField(max_length=255, blank=True, null=True,
help_text='Do you have a instagram page. Copy paste your page link here '
'eg..https://www.instagram.com/me2u_afrika/')
twitter = models.CharField(max_length=255, blank=True, null=True,
help_text='Do you have a Telegram Channel. Copy paste your page link here. '
'e.g..https://t.me/me2uafrika')
business_phone = models.CharField(max_length=20, blank=True, null=True,
help_text='Business Phone Number . i.e +250785....')
business_email = models.EmailField(blank=True, null=True, max_length=254,
help_text='Business Phone Number . i.e +250785....')
contact_person = models.CharField(max_length=255, blank=True, null=True,
help_text="Contact person name who will receive inquiries")
business_description = models.TextField(help_text="Tell us what you do and the kind of products you sell")
business_type = models.CharField(choices=BUSINESS_TYPE_CHOICE, max_length=4)
country = CountryField(multiple=False)
address1 = models.CharField(max_length=100, blank=True, null=True)
address2 = models.CharField(max_length=100, blank=True, null=True)
zip_code = models.CharField(max_length=12, blank=True, null=True)
subscription_plan = models.ForeignKey(DRPaymentTypeModel, blank=True, null=True, on_delete=models.SET_NULL,
help_text='Select a monthly recurring subscription fees')
# subscription_type = models.CharField(max_length=2, choices=SUBSCRIPTION_TYPE_CHOICE,
# help_text='Select a monthly recurring subscription fees')
subscription_reference = models.CharField(max_length=200, blank=True, null=True)
subscription_status = models.BooleanField(default=True, blank=True, null=True)
# shipping_status = models.CharField(choices=SHIPPING_CAPABILITY, max_length=2, default='Cd', blank=True, null=True,
# help_text='Is Your company able to ship or deliver your products once they '
# 'buyers order online?')
valid_payment_method = models.BooleanField(default=False, null=True, blank=True)
is_active = models.BooleanField(editable=False, default=True)
is_featured = models.BooleanField(default=False, blank=True, null=True)
image = StdImageField(upload_to='images/brands/brand_background', blank=True, null=True,
help_text='wallpaper for your store.Leave blank if you don\'t have one',
default='images/brands/brand_background/default.jpg', variations={
'large': (415, 470,), }, delete_orphans=True)
logo = StdImageField(upload_to='images/brands/brand_logo', blank=True, null=True,
help_text='logo for your store, Leave blank if you don\'t have one',
variations={
'medium': (150, 150, True),
}, delete_orphans=True)
application_status = models.IntegerField(choices=STATUSES, default=UNDER_REVIEW, blank=True, null=True)
def __str__(self):
return str(self.title)
def get_absolute_url(self):
return reverse('sellers:seller_home', kwargs={'slug': self.slug})
def get_backstore_url(self):
return reverse('sellers:seller_home', kwargs={'slug': self.slug})
def get_frontstore_url(self):
return reverse('me2ushop:seller_page', kwargs={'slug': self.slug})
def get_brandupdate_url(self):
return reverse('me2ushop:brand_update', kwargs={'pk': self.pk})
def _generate_slug(self):
value = self.title
slug_original = slugify(value, allow_unicode=True)
slug_candidate = '{}'.format(slug_original)
return slug_candidate
def save(self, *args, **kwargs):
if not self.pk or self.slug == '':
self.slug = self._generate_slug()
if self.subscription_status and 10 < self.application_status < 40:
self.is_active = True
else:
self.is_active = False
if self.product_set.all():
for product in self.product_set.all():
product.save()
cache.delete('brand-%s' % self.slug)
super().save(*args, **kwargs)
class ActiveProductManager(models.Manager):
def all(self):
return super(ActiveProductManager, self).all().filter(is_active=True, not_active=False).prefetch_related(
'productimage_set')
class ProductManager(models.Manager):
def get_by_natural_key(self, slug):
return self.get(slug=slug)
class Unitofmeasure(CreationModificationDateMixin):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Product(CreationModificationDateMixin):
title = models.CharField(max_length=300)
slug = models.SlugField(unique=True,
default='',
editable=False,
max_length=300
)
brand_name = models.ForeignKey('Brand', on_delete=models.SET_NULL, blank=True, null=True,
help_text='Your store name')
stock = models.IntegerField(default=1)
unit_measure = models.ForeignKey(Unitofmeasure, blank=True, null=True, on_delete=models.SET_NULL)
view_count = models.IntegerField(default=0, editable=False)
sku = models.CharField(max_length=120, default='',
editable=False, )
in_stock = models.BooleanField(default=True, editable=False)
min_amount = models.IntegerField(default=1, blank=True, help_text="What is the minimum order units required for "
"this item?")
max_amount = models.IntegerField(blank=True, null=True, help_text="What is the maximum order units required for "
"this item?")
price = models.DecimalField(max_digits=9, decimal_places=2, default=0,
help_text="What is the price of one piece of this item?"
"Please note that the default currency is "
"USD. Converty your product price to "
"US dollar before listing.")
discount_price = models.DecimalField(max_digits=9, decimal_places=2, validators=[MinValueValidator(1)],
blank=True, null=True,
help_text="Please note that the default currency is "
"USD. Converty your product price to "
"US Dollar before listing")
is_active = models.BooleanField(default=True, editable=False)
not_active = models.BooleanField(default=False)
is_bestseller = models.BooleanField(default=False)
is_featured = models.BooleanField(default=False)
is_bestrated = models.BooleanField(default=False)
description = RichTextField(max_length=400, config_name='Special')
additional_information = RichTextUploadingField(blank=True, null=True,
help_text='Provide additional information about '
'your product. Buyers mostly buy from'
' well detailed products and '
'specifications')
shipping_status = models.CharField(choices=SHIPPING_CAPABILITY, max_length=2, default='Cd', blank=True, null=True,
help_text='Is Your company able to ship or deliver this product once they '
'buyers order online?')
meta_keywords = models.CharField("Meta Keywords",
max_length=100,
help_text='Comma-delimited set of SEO keywords that summarize the type of '
'product above max 4 words. This keywords will help buyers find your '
'product easily. Read more https://ads.google.com/home/tools/keyword'
'-planner/')
meta_description = models.CharField("Meta Description",
max_length=255,
help_text='Give a simple short '
'description on the information you have provided on the page this '
'page. i.e This product is used '
'for cleaning, cooking and it was recently released by it\'s '
'manufacturer. Google uses this keywords and description to index '
'your product for it to be found easily '
)
product_categories = models.ManyToManyField(Department,
help_text='Check the box of the category where your product belongs. '
'Please note that different categories attract different Ad '
'charges. Be specific to one or two categories where your '
'product '
'belongs on the provided tree. Contact us for help')
objects = ProductManager()
active = ActiveProductManager()
def __str__(self):
return str(self.title)
def natural_key(self):
return (self.slug,)
class Meta:
db_table = 'Products'
ordering = ['-modified']
verbose_name_plural = 'Products'
def get_category(self):
pass
def sale_price(self):
if self.discount_price:
return self.discount_price
else:
return self.price
def total_items_ordered(self):
orders = self.orderitem_set.all()
total = 0
for order_item in orders:
if order_item.ordered:
total += order_item.quantity
return total
def total_discount(self):
# orders = self.orderitem_set.all()
if self.discount_price and self.price > 0:
diff = ((self.price - self.discount_price) / self.price) * 100
return round(diff)
def get_absolute_url(self):
return reverse('me2ushop:product', kwargs={'slug': self.slug})
def get_add_cart_url(self):
return reverse('me2ushop:add_cart', kwargs={'slug': self.slug})
def get_images(self):
return self.productimage_set.all()
def get_image_in_display(self):
image = self.productimage_set.filter(in_display=True)
if image:
return image[0]
else:
return self.get_images()[0]
def image_tag(self):
image = self.get_image_in_display()
if image:
return mark_safe('<img src="{}" height="50"/>'.format(image.image.thumbnail.url))
else:
return ""
def get_remove_cart_url(self):
return reverse('me2ushop:remove_cart', kwargs={'slug': self.slug})
def get_order_summary_url(self):
return reverse('me2ushop:order_summary')
def cross_sells(self):
orders = Order.objects.filter(items__item=self)
order_items = OrderItem.objects.filter(order__in=orders).exclude(item=self)
products = Product.active.filter(orderitem__in=order_items).filter().distinct()
return products
def cross_sells_user(self):
from users.models import User
users = User.objects.filter(order__items__item=self)
items = OrderItem.objects.filter(order__user__in=users).exclude(item=self)
products = Product.active.filter(orderitem__in=items).distinct()
return
def cross_sells_sellers(self):
from search.search import _prepare_words
from django.db.models import Q
category = self.product_categories
name = self.title
# print('category:', category)
# print('name:', name)
# print('other sellers:', users)
# print('self', self.slug)
words = _prepare_words(self.title)
# print('words:', words)
for word in words:
products = Product.active.filter(
Q(title__icontains=self) |
Q(title__startswith=self) |
Q(title__icontains=word) |
Q(title__startswith=word) |
Q(title__endswith=self) |
Q(title__endswith=word)
).exclude(slug=self.slug)
# print('from sellers', products)
return products
def cross_sells_hybrid(self):
from users.models import User
from django.db.models import Q
orders = Order.objects.filter(items__item=self)
users = User.objects.filter(order__items__item=self)
items = OrderItem.objects.filter(Q(order__user__in=users) |
Q(order__in=orders)
).exclude(item=self)
products = Product.active.filter(orderitem__in=items)
matching = []
for product in products:
matching.append(product)
most_products = []
c = collections.Counter(matching)
for product, count in c.most_common(3):
most_products.append(product)
return most_products
def _generate_slug(self):
value = self.title
slug_candidate = slug_original = slugify(value, allow_unicode=True)
for i in itertools.count(1):
if not Product.objects.filter(slug=slug_candidate).exists():
break
slug_candidate = '{}-{}'.format(slug_original, i)
return slug_candidate
def _generate_sku(self):
brand = str(self.brand_name)[:3]
title = str(self.title)[:3]
created = str(self.created)
sku = '{}-{}-{}'.format(brand, title, created)
for i in itertools.count(1):
if not Product.objects.filter(sku=sku).exists():
break
sku = '{}-{}-{}-{}'.format(brand, title, created, i)
return sku
def save(self, *args, **kwargs):
if not self.pk:
self.slug = self._generate_slug()
self.sku = self._generate_sku()
if self.shipping_status == '':
self.shipping_status = 'Cd'
self.in_stock = True
self.is_active = True
if self.stock < self.min_amount:
# print('we came to check stock')
self.in_stock = False
self.is_active = False
if self.brand_name:
if self.brand_name.is_active and self.in_stock:
self.is_active = True
else:
self.is_active = False
else:
self.is_active = False
image = self.productimage_set.filter(in_display=True)
# print('image', image.exists())
if image.exists() and self.is_active:
self.is_active = True
else:
self.is_active = False
if self.not_active:
self.is_active = False
if self.discount_price and self.price < 1 or self.discount_price == self.price:
self.price = self.discount_price
self.discount_price = None
if not self.is_active:
if self.banner_set.all():
for banner in self.banner_set.all():
banner.save()
cache.delete('product-%s' % self.slug)
super().save(*args, **kwargs)
# REGISTER PRODUCT MODEL AS A TAG
register(Product)
class ProductCustomizations(CreationModificationDateMixin):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
name = models.CharField(max_length=255)
min_order = models.IntegerField(default=1)
max_order = models.IntegerField(blank=True, null=True)
customization_price = models.DecimalField(max_digits=9, blank=True, null=True, decimal_places=2)
customization_discount_price = models.DecimalField(max_digits=9, blank=True, null=True, decimal_places=2)
def __str__(self):
return self.name
class DisplayImageManager(models.Manager):
def get_query_set(self):
return super(DisplayImageManager, self).get_query_set().filter(in_display=True)
class ProductImage(CreationModificationDateMixin):
item = models.ForeignKey(Product, on_delete=models.CASCADE)
title = models.CharField(max_length=255, blank=True, null=True, help_text="Image title")
image = StdImageField(upload_to='images/products', variations={
'thumbnail': (150, 150),
'large': (585, 585),
}, delete_orphans=True)
in_display = models.BooleanField(default=True)
objects = ProductManager()
displayed = DisplayImageManager()
class Meta:
ordering = ('-in_display',)
def __str__(self):
if self.title:
return str(self.title)
return str(self.item)
def natural_key(self):
return (self.item.slug,)
def get_absolute_url(self):
return reverse('me2ushop:product_images', kwargs={'slug': self.item.slug})
def image_tag(self):
if self.image:
return mark_safe('<img src="%s" height="80"/>' % self.image.thumbnail.url)
else:
return ""
# thumbnail_tag.short_description = "Thumbnail"
def save(self, *args, **kwargs):
cache.delete('productimage-%s' % self.pk)
super().save(*args, **kwargs)
#
# class VariationsManager(models.Manager):
# def all(self):
# return super(VariationsManager, self).filter(is_active=True)
#
# def sizes(self):
# return self.all().filter(variation_category='size')
#
# def colors(self):
# return self.all().filter(variation_category='color')
#
#
# VAR_CATEGORIES = (
# ('size', 'size'),
# ('color', 'color'),
# ('package', 'package'),
# )
#
# class VariationCategory(CreationModificationDateMixin):
# variation_name = models.CharField(max_length=200, unique=True)
#
# def __str__(self):
# return self.variation_name
#
#
# class Variation(CreationModificationDateMixin):
# product = models.ForeignKey(Product, on_delete=models.CASCADE, blank=True, null=True)
# title = models.CharField(max_length=100, blank=True, null=True,
# help_text="Title for this product variant")
# variation_category = models.ForeignKey(VariationCategory, on_delete=models.CASCADE)
# variation_value = models.CharField(max_length=200)
# price = models.DecimalField(max_digits=9, null=True, blank=True, decimal_places=2, default=0,
# help_text="Please note that the default currency is "
# "USD. Converty your product price to "
# "US dollar before listing")
#
# discount_price = models.DecimalField(max_digits=9, decimal_places=2, validators=[MinValueValidator(1)],
# blank=True, null=True,
# help_text="Please note that the default currency is "
# "USD. Converty your product price to "
# "US Dollar before listing")
# image = models.ForeignKey(ProductImage, on_delete=models.SET_NULL, blank=True, null=True)
#
# is_active = models.BooleanField(default=True)
#
# stock = models.IntegerField(default=1, blank=True, null=True)
#
# objects = VariationsManager()
#
# class Meta:
# unique_together = (
# ('variation_category', 'variation_value')
# )
#
# def __str__(self):
# return u'%s - %s' % (self.variation_category, self.variation_value,)
#
# def get_absolute_url(self):
# return reverse('me2ushop:product', kwargs={'slug': self.product.slug})
#
# def image_tag(self):
# if self.image:
# return mark_safe('<img src="{}" height="50"/>'.format(self.image.image.thumbnail.url))
# else:
# return ""
class Color(CreationModificationDateMixin):
name = models.CharField(max_length=20)
code = models.CharField(max_length=10, blank=True, null=True)
def __str__(self):
return self.name
class Size(CreationModificationDateMixin):
name = models.CharField(max_length=20)
code = models.CharField(max_length=10, blank=True, null=True)
def __str__(self):
return self.name
class ProductVariationsManager(models.Manager):
def all(self):
return super(ProductVariationsManager, self).all()
def active(self):
return self.all().filter(is_active=True)
class ProductVariationsQueryset(models.query.QuerySet):
def active(self):
return self.filter(is_active=True)
class ActiveProductVariationsManager(models.Manager):
def get_queryset(self):
return ProductVariationsQueryset(self.model, using=self._db).active()
class ProductVariations(CreationModificationDateMixin):
""" The ``ProductDetail`` model represents information unique to a
specific product. This is a generic design that can be used
to extend the information contained in the ``Product`` model with
specific, extra details.
"""
product = models.ForeignKey("Product", on_delete=models.CASCADE)
slug = models.SlugField(unique=True,
default='',
blank=True,
null=True,
editable=False,
max_length=300
)
title = models.CharField(max_length=100, blank=True, null=True,
help_text="Title for this product variant")
color = models.ForeignKey(Color, on_delete=models.CASCADE, blank=True, null=True,
help_text="Add if your product comes in different colors")
size = models.ForeignKey(Size, on_delete=models.CASCADE, blank=True, null=True,
help_text="Add if your product comes in different colors")
sku = models.CharField(max_length=120, default='',
editable=False, )
in_stock = models.BooleanField(default=True, editable=False)
min_amount = models.IntegerField(default=1, blank=True, help_text="What is the minimum order units required for "
"this item?")
max_amount = models.IntegerField(blank=True, null=True, help_text="What is the maximum order units required for "
"this item?")
price = models.DecimalField(max_digits=9, null=True, blank=True, decimal_places=2, default=0,
help_text="If the above variables affect your original price, update price"
"Note that the default currency is "
"USD. Convert your product price to US dollar before listing")
discount_price = models.DecimalField(max_digits=9, decimal_places=2, validators=[MinValueValidator(1)],
blank=True, null=True,
help_text="Note that the default currency is "
"USD. Convert your product price to "
"US Dollar before listing")
image = models.ForeignKey(ProductImage, on_delete=models.SET_NULL, blank=True, null=True)
stock = models.IntegerField(default=1, blank=True, null=True)
is_active = models.BooleanField(default=True, editable=False)
objects = ProductVariationsManager()
active = ActiveProductVariationsManager()
class Meta:
unique_together = ('product', 'size', 'color')
def __str__(self):
return u'%s - %s - %s' % (self.product, self.color, self.size,)
def get_absolute_url(self):
return reverse('me2ushop:product', kwargs={'slug': self.product.slug})
# def get_remove_cart_url(self):
# return reverse('me2ushop:remove_cart', kwargs={'slug': self.slug})
# def get_add_cart_url(self):
# return reverse('me2ushop:add_cart', kwargs={'slug': self.slug})
def get_image(self):
return self.image.image.thumbnail.url
def image_tag(self):
if self.image:
return mark_safe('<img src="{}" height="50"/>'.format(self.image.image.thumbnail.url))
else:
return ""
def sale_price(self):
if self.discount_price:
return self.discount_price
else:
return self.price
def _generate_sku(self):
variant = None
brand = str(self.product.brand_name)[:3]
title = str(self.product.title)[:3]
created = str(self.created)
if self.size and self.color:
variant = '{}-{}'.format(self.size, self.color)
elif self.size:
variant = '{}'.format(self.size)
elif self.color:
variant = '{}'.format(self.color)
sku = '{}-{}-{}-{}'.format(brand, title, created, variant)
for i in itertools.count(1):
if not ProductVariations.objects.filter(sku=sku).exists():
break
sku = '{}-{}-{}-{}'.format(brand, title, created, variant, i)
return sku
def _generate_slug(self):
variant = self.product.title
if self.size and self.color:
variant = '{}-{}-{}'.format(self.product.title, self.size, self.color)
elif self.size:
variant = '{}-{}'.format(self.product.title, self.size)
elif self.color:
variant = '{}-{}'.format(self.product.title, self.color)
value = variant
slug_candidate = slug_original = slugify(value, allow_unicode=True)
for i in itertools.count(1):
if not Product.objects.filter(slug=slug_candidate).exists():
break
slug_candidate = '{}-{}'.format(slug_original, i)
return slug_candidate
def save(self, *args, **kwargs):
if not self.pk or self.sku or self.slug:
self.sku = self._generate_sku()
self.slug = self._generate_slug()
self.in_stock = True
if self.stock:
if self.stock < self.min_amount:
print('we came to check stock on product variant')
self.in_stock = False
self.is_active = False
else:
self.is_active = True
if self.discount_price and self.price < 1 or self.discount_price == self.price:
self.price = self.discount_price
self.discount_price = None
cache.delete('product-%s' % self.product.slug)
super().save(*args, **kwargs)
class ProductAttribute(CreationModificationDateMixin):
"""
The ``ProductAttribute`` model represents a class of feature found
across a set of products. It does not store any data values related to the attribute,
but only describes what kind of a product feature we are trying to capture. Possible attributes include things such 4
as materials, colors, sizes, and many, many more.
"""
name = models.CharField(max_length=300)
description = models.TextField(blank=True)
def __str__(self):
return u'%s' % self.name
class Rentals(CreationModificationDateMixin):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
book_start = models.DateTimeField(auto_now_add=True, blank=True, null=True)
book_end = models.DateTimeField(auto_now_add=True, blank=True, null=True)
rental_price_day = models.DecimalField(max_digits=9, decimal_places=2)
discount_per_week = models.IntegerField(blank=True, null=True)
discount_per_month = models.IntegerField(blank=True, null=True)
def __str__(self):
return u'%s' % self.product
class WishList(CreationModificationDateMixin):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
product = models.ForeignKey(Product, on_delete=models.CASCADE)
def __str__(self):
return str(self.product.title)
class StatusCode(CreationModificationDateMixin):
"""
The StatusCode model represents the status of an order in the
system.
"""
NEW = 10
PAID = 20
PROCESSING = 30
SENT = 40
CANCELLED = 50
IN_TRANSIT = 60
DELIVERED = 70
STATUSES = ((NEW, "New"),
(PAID, "Paid"),
(PROCESSING, "Processing"),
(SENT, "Sent"),
(CANCELLED, "Cancelled"),
(IN_TRANSIT, "in_transit"),
(DELIVERED, "Delivered"),
)
short_name = models.IntegerField(choices=STATUSES, default=NEW)
name = models.CharField(max_length=300)
description = models.TextField()
def __str__(self):
return str(self.short_name)
class OrderItem(CreationModificationDateMixin):
NEW = 10
ACCEPTED = 20
SENT = 30
CANCELLED = 40
IN_TRANSIT = 45
DELIVERED = 50
STATUSES = ((NEW, "New"),
(ACCEPTED, "Accepted"),
(SENT, "Sent"),
(CANCELLED, "Cancelled"),
(IN_TRANSIT, "in_transit"),
(DELIVERED, "Delivered"),
)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, blank=True, null=True)
customer_order = models.ForeignKey('Order', blank=True, null=True, on_delete=models.SET_NULL)
cart_id = models.CharField(max_length=40, blank=True, null=True)
date_added = models.DateTimeField(auto_now_add=True)
date_ordered = models.DateTimeField(auto_now=True)
status = models.IntegerField(choices=STATUSES, default=NEW)
status_code = models.ForeignKey('StatusCode', on_delete=models.SET_NULL, blank=True, null=True)
order_received = models.BooleanField(default=False)
ordered = models.BooleanField(default=False)
item = models.ForeignKey(Product, on_delete=models.PROTECT, unique=False, blank=True, null=True)
variant = models.ForeignKey(ProductVariations, blank=True, null=True, on_delete=models.SET_NULL)
quantity = models.PositiveIntegerField(default=1, validators=[MinValueValidator(1)])
comments = models.TextField(blank=True)
delivered_by = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='dispatcher', on_delete=models.SET_NULL,
blank=True, null=True)
class Meta:
ordering = ['-date_added']
def __str__(self):
return f"{self.quantity} of {self.item.title}"
def get_absolute_url(self):
return reverse('users:order-details', kwargs={'order_id': self.id})
def get_total_price(self):
if self.variant:
return self.quantity * self.variant.price
else:
return self.quantity * self.item.price
def get_total_discount_price(self):
if self.variant:
return self.quantity * self.variant.discount_price
else:
return self.quantity * self.item.discount_price
def get_total_saved(self):
return self.get_total_price() - self.get_total_discount_price()
def get_final_price(self):
if self.variant:
if self.variant.discount_price:
return self.get_total_discount_price()
else:
return self.get_total_price()
else:
if self.item.discount_price:
return self.get_total_discount_price()
else:
return self.get_total_price()
def total_items_ordered(self):
# total = self.orderitem_set.all().count()
total = 0
for order_item in self.filter(ordered=True):
total += order_item.quantity
return total
@property
def mobile_thumb_url(self):
products = [self.item]
# print('products:', products)
if products:
img = products[0].productimage_set.first()
if img:
return img.image.thumbnail.url
@property
def summary(self):
pieces = ['%s x %s' % (self.quantity, self.item.title)]
return ",".join(pieces)
class Order(CreationModificationDateMixin):
NEW = 10
PAID = 20
DONE = 30
STATUSES = ((NEW, 'New'), (PAID, 'Paid'), (DONE, 'Done'))
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, blank=True, null=True,
db_index=True)
cart_id = models.CharField(max_length=40, blank=True, null=True)
status = models.IntegerField(choices=STATUSES, default=NEW)
status_code = models.ForeignKey('StatusCode', on_delete=models.SET_NULL, blank=True, null=True)
items = models.ManyToManyField('OrderItem')
ref_code = models.CharField(max_length=200)
start_date = models.DateTimeField(auto_now_add=True)
order_date = models.DateTimeField(auto_now=True)
ordered = models.BooleanField(default=False)
payment = models.CharField(max_length=2, blank=True, null=True)
coupon = models.ForeignKey('Coupon', on_delete=models.SET_NULL, blank=True, null=True)
being_delivered = models.BooleanField(default=False)
received = models.BooleanField(default=False)
refund_requested = models.BooleanField(blank=True, null=True)
refund_granted = models.BooleanField(blank=True, null=True)
email = models.EmailField(max_length=50, blank=True, null=True)
phone = models.CharField(max_length=20, blank=True, null=True)
name = models.CharField(max_length=60, blank=True, null=True)
billing_address1 = models.CharField(max_length=60)
billing_address2 = models.CharField(max_length=60, blank=True)
billing_zip_code = models.CharField(max_length=12)
billing_country = models.CharField(max_length=3)
billing_city = models.CharField(max_length=12, blank=True, null=True)
shipping_address1 = models.CharField(max_length=60)
shipping_address2 = models.CharField(max_length=60, blank=True)
shipping_zip_code = models.CharField(max_length=12)
shipping_country = models.CharField(max_length=3)
shipping_city = models.CharField(max_length=12, blank=True, null=True)
comments = models.TextField(blank=True)
last_spoken_to = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, related_name="cs_chats",
on_delete=models.SET_NULL)
class Meta:
ordering = ['-modified']
def __str__(self):
return str(self.id)
def get_absolute_url(self):
return reverse('users:order-details', kwargs={'order_id': self.id})
def get_coupon_total(self):
total = 0
for order_item in self.items.all():
total += order_item.get_final_price()
total -= self.coupon.amount
return total
def get_total(self):
total = 0
for order_item in self.items.all():
total += order_item.get_final_price()
return total
def total_items(self):
# total = self.orderitem_set.all().count()
total = 0
for order_item in self.items.all():
total += order_item.quantity
return total
def get_total_saved_coupon(self):
return self.get_total() - self.get_coupon_total()
@property
def mobile_thumb_url(self):
products = [i.item for i in self.items.all()]
# print('products:', products)
if products:
img = products[0].productimage_set.first()
if img:
return img.image.thumbnail.url
@property
def summary(self):
product_counts = self.items.values(
'quantity', 'item__title'
)
pieces = []
for pc in product_counts:
pieces.append(
'%s x %s' % (pc['quantity'], pc['item__title'])
)
return ",".join(pieces)
class ActiveProductReviewManager(models.Manager):
def all(self):
return super(ActiveProductReviewManager, self) \
.all().filter(is_approved=True)
class ProductReview(models.Model):
RATINGS = ((5, 5), (4, 4), (3, 3), (2, 2), (1, 1))
product = models.ForeignKey(Product, on_delete=models.CASCADE)
# ordered_by_user = models.ForeignKey(Order, ordered=True, on_delete=models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
title = models.CharField(max_length=50)
date = models.DateTimeField(auto_now_add=True)
rating = models.PositiveSmallIntegerField(default=5, choices=RATINGS)
is_approved = models.BooleanField(default=True)
content = models.TextField()
country = CountryField(multiple=False, blank=True, null=True)
objects = models.Manager()
approved = ActiveProductReviewManager()
def __str__(self):
return str(self.user.username)
def get_images(self):
return self.product.productimage_set.all()
class Address(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.CASCADE)
cart_id = models.CharField(max_length=40, blank=True, null=True)
street_address = models.CharField(max_length=100)
apartment_address = models.CharField(max_length=100)
country = CountryField(multiple=False)
city = models.CharField(max_length=60)
zip = models.CharField(max_length=10)
address_type = models.CharField(max_length=1, choices=ADDRESS_CHOICES)
payment_option = models.CharField(max_length=2, choices=PAYMENT_CHOICES)
default = models.BooleanField(default=False)
name = models.CharField(max_length=60, blank=True, null=True)
email = models.EmailField(max_length=50, blank=True, null=True)
phone = models.CharField(max_length=20, blank=True, null=True)
date_updated = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "%s, %s, %s, %s, %s" % (self.street_address, self.country, self.city, self.zip, self.phone)
class Meta:
verbose_name_plural = 'Addresses'
ordering = ['-date_updated']
class StripePayment(models.Model):
from stats.models import ProductView
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, blank=True, null=True)
cart_id = models.ForeignKey(ProductView, max_length=70, blank=True, null=True, on_delete=models.CASCADE)
stripe_charge_id = models.CharField(max_length=50)
amount = models.DecimalField(max_digits=9, decimal_places=2)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.stripe_charge_id
class Coupon(models.Model):
code = models.CharField(max_length=10)
amount = models.DecimalField(max_digits=9, decimal_places=2, default=20)
valid = models.BooleanField(default=True)
def __str__(self):
return str(self.code)
class RequestRefund(models.Model):
order = models.ForeignKey(Order, on_delete=models.CASCADE)
reason = models.TextField()
accepted = models.BooleanField(blank=True, null=True)
# email = models.EmailField()
ref_code = models.CharField(max_length=20)
def __str__(self):
return f"{self.pk}"
| 38.209705
| 121
| 0.608654
|
94c267fd288a2997cca3b6eabf2961f55ecc3863
| 655
|
py
|
Python
|
catalog/bindings/gmd/abstract_curve_segment_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/gmd/abstract_curve_segment_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/gmd/abstract_curve_segment_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass, field
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class AbstractCurveSegmentType:
num_derivatives_at_start: int = field(
default=0,
metadata={
"name": "numDerivativesAtStart",
"type": "Attribute",
},
)
num_derivatives_at_end: int = field(
default=0,
metadata={
"name": "numDerivativesAtEnd",
"type": "Attribute",
},
)
num_derivative_interior: int = field(
default=0,
metadata={
"name": "numDerivativeInterior",
"type": "Attribute",
},
)
| 22.586207
| 44
| 0.548092
|
9c527fa820d2c49bd2542b669b26abd377c7d033
| 1,512
|
py
|
Python
|
apps/monitor/views/newsblur_feeds.py
|
louis-pre/NewsBlur
|
b4e9a56041ff187ef77b38dfd0778daf41b53f4f
|
[
"MIT"
] | 3,073
|
2015-01-01T07:20:18.000Z
|
2022-03-31T20:33:41.000Z
|
apps/monitor/views/newsblur_feeds.py
|
louis-pre/NewsBlur
|
b4e9a56041ff187ef77b38dfd0778daf41b53f4f
|
[
"MIT"
] | 1,054
|
2015-01-02T13:32:35.000Z
|
2022-03-30T04:21:21.000Z
|
apps/monitor/views/newsblur_feeds.py
|
louis-pre/NewsBlur
|
b4e9a56041ff187ef77b38dfd0778daf41b53f4f
|
[
"MIT"
] | 676
|
2015-01-03T16:40:29.000Z
|
2022-03-30T14:00:40.000Z
|
from django.views import View
from django.shortcuts import render
from apps.rss_feeds.models import Feed
from apps.reader.models import UserSubscription
from apps.social.models import MSocialProfile, MSocialSubscription
from apps.statistics.models import MStatistics
class Feeds(View):
def get(self, request):
feeds_count = MStatistics.get('munin:feeds_count')
if not feeds_count:
feeds_count = Feed.objects.all().count()
MStatistics.set('munin:feeds_count', feeds_count, 60*60*12)
subscriptions_count = MStatistics.get('munin:subscriptions_count')
if not subscriptions_count:
subscriptions_count = UserSubscription.objects.all().count()
MStatistics.set('munin:subscriptions_count', subscriptions_count, 60*60*12)
data = {
'feeds': feeds_count,
'subscriptions': subscriptions_count,
'profiles': MSocialProfile.objects._collection.count(),
'social_subscriptions': MSocialSubscription.objects._collection.count(),
}
chart_name = "feeds"
chart_type = "counter"
formatted_data = {}
for k, v in data.items():
formatted_data[k] = f'{chart_name}{{category="{k}"}} {v}'
context = {
"data": formatted_data,
"chart_name": chart_name,
"chart_type": chart_type,
}
return render(request, 'monitor/prometheus_data.html', context, content_type="text/plain")
| 35.162791
| 98
| 0.651455
|
5ae2788d0eecd613e821cc8b8a91d6d65c92af05
| 155,951
|
py
|
Python
|
salt/states/file.py
|
lyft/salt
|
2715908423a412f736253d0e5d3cfe185a0179a2
|
[
"Apache-2.0"
] | 3
|
2015-04-16T18:42:35.000Z
|
2017-10-30T16:57:49.000Z
|
salt/states/file.py
|
lyft/salt
|
2715908423a412f736253d0e5d3cfe185a0179a2
|
[
"Apache-2.0"
] | 16
|
2015-11-18T00:44:03.000Z
|
2018-10-29T20:48:27.000Z
|
salt/states/file.py
|
lyft/salt
|
2715908423a412f736253d0e5d3cfe185a0179a2
|
[
"Apache-2.0"
] | 1
|
2017-01-27T21:33:36.000Z
|
2017-01-27T21:33:36.000Z
|
# -*- coding: utf-8 -*-
'''
Operations on regular files, special files, directories, and symlinks
=====================================================================
Salt States can aggressively manipulate files on a system. There are a number
of ways in which files can be managed.
Regular files can be enforced with the :mod:`file.managed
<salt.states.file.managed>` state. This state downloads files from the salt
master and places them on the target system. Managed files can be rendered as a
jinja, mako, or wempy template, adding a dynamic component to file management.
An example of :mod:`file.managed <salt.states.file.managed>` which makes use of
the jinja templating system would look like this:
.. code-block:: yaml
/etc/http/conf/http.conf:
file.managed:
- source: salt://apache/http.conf
- user: root
- group: root
- mode: 644
- template: jinja
- defaults:
custom_var: "default value"
other_var: 123
{% if grains['os'] == 'Ubuntu' %}
- context:
custom_var: "override"
{% endif %}
It is also possible to use the :mod:`py renderer <salt.renderers.py>` as a
templating option. The template would be a Python script which would need to
contain a function called ``run()``, which returns a string. All arguments
to the state will be made available to the Python script as globals. The
returned string will be the contents of the managed file. For example:
.. code-block:: python
def run():
lines = ['foo', 'bar', 'baz']
lines.extend([source, name, user, context]) # Arguments as globals
return '\\n\\n'.join(lines)
.. note::
The ``defaults`` and ``context`` arguments require extra indentation (four
spaces instead of the normal two) in order to create a nested dictionary.
:ref:`More information <nested-dict-indentation>`.
If using a template, any user-defined template variables in the file defined in
``source`` must be passed in using the ``defaults`` and/or ``context``
arguments. The general best practice is to place default values in
``defaults``, with conditional overrides going into ``context``, as seen above.
The template will receive a variable ``custom_var``, which would be accessed in
the template using ``{{ custom_var }}``. If the operating system is Ubuntu, the
value of the variable ``custom_var`` would be *override*, otherwise it is the
default *default value*
The ``source`` parameter can be specified as a list. If this is done, then the
first file to be matched will be the one that is used. This allows you to have
a default file on which to fall back if the desired file does not exist on the
salt fileserver. Here's an example:
.. code-block:: yaml
/etc/foo.conf:
file.managed:
- source:
- salt://foo.conf.{{ grains['fqdn'] }}
- salt://foo.conf.fallback
- user: foo
- group: users
- mode: 644
- backup: minion
.. note::
Salt supports backing up managed files via the backup option. For more
details on this functionality please review the
:doc:`backup_mode documentation </ref/states/backup_mode>`.
The ``source`` parameter can also specify a file in another Salt environment.
In this example ``foo.conf`` in the ``dev`` environment will be used instead.
.. code-block:: yaml
/etc/foo.conf:
file.managed:
- source:
- salt://foo.conf?saltenv=dev
- user: foo
- group: users
- mode: '0644'
.. warning::
When using a mode that includes a leading zero you must wrap the
value in single quotes. If the value is not wrapped in quotes it
will be read by YAML as an integer and evaluated as an octal.
Special files can be managed via the ``mknod`` function. This function will
create and enforce the permissions on a special file. The function supports the
creation of character devices, block devices, and fifo pipes. The function will
create the directory structure up to the special file if it is needed on the
minion. The function will not overwrite or operate on (change major/minor
numbers) existing special files with the exception of user, group, and
permissions. In most cases the creation of some special files require root
permisisons on the minion. This would require that the minion to be run as the
root user. Here is an example of a character device:
.. code-block:: yaml
/var/named/chroot/dev/random:
file.mknod:
- ntype: c
- major: 1
- minor: 8
- user: named
- group: named
- mode: 660
Here is an example of a block device:
.. code-block:: yaml
/var/named/chroot/dev/loop0:
file.mknod:
- ntype: b
- major: 7
- minor: 0
- user: named
- group: named
- mode: 660
Here is an example of a fifo pipe:
.. code-block:: yaml
/var/named/chroot/var/log/logfifo:
file.mknod:
- ntype: p
- user: named
- group: named
- mode: 660
Directories can be managed via the ``directory`` function. This function can
create and enforce the permissions on a directory. A directory statement will
look like this:
.. code-block:: yaml
/srv/stuff/substuf:
file.directory:
- user: fred
- group: users
- mode: 755
- makedirs: True
If you need to enforce user and/or group ownership or permissions recursively
on the directory's contents, you can do so by adding a ``recurse`` directive:
.. code-block:: yaml
/srv/stuff/substuf:
file.directory:
- user: fred
- group: users
- mode: 755
- makedirs: True
- recurse:
- user
- group
- mode
As a default, ``mode`` will resolve to ``dir_mode`` and ``file_mode``, to
specify both directory and file permissions, use this form:
.. code-block:: yaml
/srv/stuff/substuf:
file.directory:
- user: fred
- group: users
- file_mode: 744
- dir_mode: 755
- makedirs: True
- recurse:
- user
- group
- mode
Symlinks can be easily created; the symlink function is very simple and only
takes a few arguments:
.. code-block:: yaml
/etc/grub.conf:
file.symlink:
- target: /boot/grub/grub.conf
Recursive directory management can also be set via the ``recurse``
function. Recursive directory management allows for a directory on the salt
master to be recursively copied down to the minion. This is a great tool for
deploying large code and configuration systems. A state using ``recurse``
would look something like this:
.. code-block:: yaml
/opt/code/flask:
file.recurse:
- source: salt://code/flask
- include_empty: True
A more complex ``recurse`` example:
.. code-block:: yaml
{% set site_user = 'testuser' %}
{% set site_name = 'test_site' %}
{% set project_name = 'test_proj' %}
{% set sites_dir = 'test_dir' %}
django-project:
file.recurse:
- name: {{ sites_dir }}/{{ site_name }}/{{ project_name }}
- user: {{ site_user }}
- dir_mode: 2775
- file_mode: '0644'
- template: jinja
- source: salt://project/templates_dir
- include_empty: True
'''
# Import python libs
from __future__ import absolute_import
import difflib
import itertools
import json
import logging
import os
import pprint
import shutil
import traceback
import yaml
from collections import Iterable, Mapping
# Import salt libs
import salt.payload
import salt.utils
import salt.utils.templates
import salt.utils.url
from salt.exceptions import CommandExecutionError
from salt.serializers import yaml as yaml_serializer
from salt.serializers import json as json_serializer
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import zip_longest
log = logging.getLogger(__name__)
COMMENT_REGEX = r'^([[:space:]]*){0}[[:space:]]?'
def _get_accumulator_filepath():
'''
Return accumulator data path.
'''
return os.path.join(salt.utils.get_accumulator_dir(__opts__['cachedir']),
__instance_id__)
def _load_accumulators():
def _deserialize(path):
serial = salt.payload.Serial(__opts__)
ret = {'accumulators': {}, 'accumulators_deps': {}}
try:
with salt.utils.fopen(path, 'rb') as f:
loaded = serial.load(f)
return loaded if loaded else ret
except (IOError, NameError):
# NameError is a msgpack error from salt-ssh
return ret
loaded = _deserialize(_get_accumulator_filepath())
return loaded['accumulators'], loaded['accumulators_deps']
def _persist_accummulators(accumulators, accumulators_deps):
accumm_data = {'accumulators': accumulators,
'accumulators_deps': accumulators_deps}
serial = salt.payload.Serial(__opts__)
try:
with salt.utils.fopen(_get_accumulator_filepath(), 'w+b') as f:
serial.dump(accumm_data, f)
except NameError:
# msgpack error from salt-ssh
pass
def _check_user(user, group):
'''
Checks if the named user and group are present on the minion
'''
err = ''
if user:
uid = __salt__['file.user_to_uid'](user)
if uid == '':
err += 'User {0} is not available '.format(user)
if group:
gid = __salt__['file.group_to_gid'](group)
if gid == '':
err += 'Group {0} is not available'.format(group)
return err
def _gen_keep_files(name, require, walk_d=None):
'''
Generate the list of files that need to be kept when a dir based function
like directory or recurse has a clean.
'''
def _is_child(path, directory):
'''
Check whether ``path`` is child of ``directory``
'''
path = os.path.abspath(path)
directory = os.path.abspath(directory)
relative = os.path.relpath(path, directory)
return not relative.startswith(os.pardir)
def _add_current_path(path):
_ret = set()
if os.path.isdir(path):
dirs, files = walk_d.get(path, ((), ()))
_ret.add(path)
for _name in files:
_ret.add(os.path.join(path, _name))
for _name in dirs:
_ret.add(os.path.join(path, _name))
return _ret
def _process_by_walk_d(name, ret):
if os.path.isdir(name):
walk_ret.update(_add_current_path(name))
dirs, _ = walk_d.get(name, ((), ()))
for _d in dirs:
p = os.path.join(name, _d)
walk_ret.update(_add_current_path(p))
_process_by_walk_d(p, ret)
def _process(name):
ret = set()
if os.path.isdir(name):
for root, dirs, files in os.walk(name):
ret.add(name)
for name in files:
ret.add(os.path.join(root, name))
for name in dirs:
ret.add(os.path.join(root, name))
return ret
keep = set()
if isinstance(require, list):
required_files = [comp for comp in require if 'file' in comp]
for comp in required_files:
for low in __lowstate__:
if low['name'] == comp['file']:
fn = low['name']
if os.path.isdir(comp['file']):
if _is_child(comp['file'], name):
if walk_d:
walk_ret = set()
_process_by_walk_d(fn, walk_ret)
keep.update(walk_ret)
else:
keep.update(_process(fn))
else:
keep.add(fn)
return list(keep)
def _check_file(name):
ret = True
msg = ''
if not os.path.isabs(name):
ret = False
msg = 'Specified file {0} is not an absolute path'.format(name)
elif not os.path.exists(name):
ret = False
msg = '{0}: file not found'.format(name)
return ret, msg
def _clean_dir(root, keep, exclude_pat):
'''
Clean out all of the files and directories in a directory (root) while
preserving the files in a list (keep) and part of exclude_pat
'''
removed = set()
real_keep = set()
real_keep.add(root)
if isinstance(keep, list):
for fn_ in keep:
if not os.path.isabs(fn_):
continue
real_keep.add(fn_)
while True:
fn_ = os.path.dirname(fn_)
real_keep.add(fn_)
if fn_ in ['/', ''.join([os.path.splitdrive(fn_)[0], '\\'])]:
break
def _delete_not_kept(nfn):
if nfn not in real_keep:
# -- check if this is a part of exclude_pat(only). No need to
# check include_pat
if not salt.utils.check_include_exclude(
os.path.relpath(nfn, root), None, exclude_pat):
return
removed.add(nfn)
if not __opts__['test']:
try:
os.remove(nfn)
except OSError:
shutil.rmtree(nfn)
for roots, dirs, files in os.walk(root):
for name in itertools.chain(dirs, files):
_delete_not_kept(os.path.join(roots, name))
return list(removed)
def _error(ret, err_msg):
ret['result'] = False
ret['comment'] = err_msg
return ret
def _check_directory(name,
user,
group,
recurse,
mode,
clean,
require,
exclude_pat):
'''
Check what changes need to be made on a directory
'''
changes = {}
if recurse or clean:
walk_l = list(os.walk(name)) # walk path only once and store the result
# root: (dirs, files) structure, compatible for python2.6
walk_d = {}
for i in walk_l:
walk_d[i[0]] = (i[1], i[2])
if recurse:
if not set(['user', 'group', 'mode']) >= set(recurse):
return False, 'Types for "recurse" limited to "user", ' \
'"group" and "mode"'
if 'user' not in recurse:
user = None
if 'group' not in recurse:
group = None
if 'mode' not in recurse:
mode = None
for root, dirs, files in walk_l:
for fname in files:
fchange = {}
path = os.path.join(root, fname)
stats = __salt__['file.stats'](
path, None, follow_symlinks=False
)
if user is not None and user != stats.get('user'):
fchange['user'] = user
if group is not None and group != stats.get('group'):
fchange['group'] = group
if fchange:
changes[path] = fchange
for name_ in dirs:
path = os.path.join(root, name_)
fchange = _check_dir_meta(path, user, group, mode)
if fchange:
changes[path] = fchange
else:
fchange = _check_dir_meta(name, user, group, mode)
if fchange:
changes[name] = fchange
if clean:
keep = _gen_keep_files(name, require, walk_d)
def _check_changes(fname):
path = os.path.join(root, fname)
if path in keep:
return {}
else:
if not salt.utils.check_include_exclude(
os.path.relpath(path, name), None, exclude_pat):
return {}
else:
return {path: {'removed': 'Removed due to clean'}}
for root, dirs, files in walk_l:
for fname in files:
changes.update(_check_changes(fname))
for name_ in dirs:
changes.update(_check_changes(name_))
if not os.path.isdir(name):
changes[name] = {'directory': 'new'}
if changes:
comments = ['The following files will be changed:\n']
for fn_ in changes:
for key, val in six.iteritems(changes[fn_]):
comments.append('{0}: {1} - {2}\n'.format(fn_, key, val))
return None, ''.join(comments)
return True, 'The directory {0} is in the correct state'.format(name)
def _check_dir_meta(name,
user,
group,
mode):
'''
Check the changes in directory metadata
'''
stats = __salt__['file.stats'](name, follow_symlinks=False)
changes = {}
if not stats:
changes['directory'] = 'new'
return changes
if user is not None and user != stats['user']:
changes['user'] = user
if group is not None and group != stats['group']:
changes['group'] = group
# Normalize the dir mode
smode = __salt__['config.manage_mode'](stats['mode'])
mode = __salt__['config.manage_mode'](mode)
if mode is not None and mode != smode:
changes['mode'] = mode
return changes
def _check_touch(name, atime, mtime):
'''
Check to see if a file needs to be updated or created
'''
if not os.path.exists(name):
return None, 'File {0} is set to be created'.format(name)
stats = __salt__['file.stats'](name, follow_symlinks=False)
if atime is not None:
if str(atime) != str(stats['atime']):
return None, 'Times set to be updated on file {0}'.format(name)
if mtime is not None:
if str(mtime) != str(stats['mtime']):
return None, 'Times set to be updated on file {0}'.format(name)
return True, 'File {0} exists and has the correct times'.format(name)
def _get_symlink_ownership(path):
return (
__salt__['file.get_user'](path, follow_symlinks=False),
__salt__['file.get_group'](path, follow_symlinks=False)
)
def _check_symlink_ownership(path, user, group):
'''
Check if the symlink ownership matches the specified user and group
'''
cur_user, cur_group = _get_symlink_ownership(path)
return (cur_user == user) and (cur_group == group)
def _set_symlink_ownership(path, user, group):
'''
Set the ownership of a symlink and return a boolean indicating
success/failure
'''
try:
__salt__['file.lchown'](path, user, group)
except OSError:
pass
return _check_symlink_ownership(path, user, group)
def _symlink_check(name, target, force, user, group):
'''
Check the symlink function
'''
if not os.path.exists(name) and not __salt__['file.is_link'](name):
return None, 'Symlink {0} to {1} is set for creation'.format(
name, target
)
if __salt__['file.is_link'](name):
if __salt__['file.readlink'](name) != target:
return None, 'Link {0} target is set to be changed to {1}'.format(
name, target
)
else:
result = True
msg = 'The symlink {0} is present'.format(name)
if not _check_symlink_ownership(name, user, group):
result = None
msg += (
', but the ownership of the symlink would be changed '
'from {2}:{3} to {0}:{1}'
).format(user, group, *_get_symlink_ownership(name))
return result, msg
else:
if force:
return None, ('The file or directory {0} is set for removal to '
'make way for a new symlink targeting {1}'
.format(name, target))
return False, ('File or directory exists where the symlink {0} '
'should be. Did you mean to use force?'.format(name))
def _test_owner(kwargs, user=None):
'''
Convert owner to user, since other config management tools use owner,
no need to punish people coming from other systems.
PLEASE DO NOT DOCUMENT THIS! WE USE USER, NOT OWNER!!!!
'''
if user:
return user
if 'owner' in kwargs:
log.warning(
'Use of argument owner found, "owner" is invalid, please '
'use "user"'
)
return kwargs['owner']
return user
def _unify_sources_and_hashes(source=None, source_hash=None,
sources=None, source_hashes=None):
'''
Silly little function to give us a standard tuple list for sources and
source_hashes
'''
if sources is None:
sources = []
if source_hashes is None:
source_hashes = []
if source and sources:
return (False,
"source and sources are mutually exclusive", [])
if source_hash and source_hashes:
return (False,
"source_hash and source_hashes are mutually exclusive", [])
if source:
return (True, '', [(source, source_hash)])
# Make a nice neat list of tuples exactly len(sources) long..
return True, '', list(zip_longest(sources, source_hashes[:len(sources)]))
def _get_template_texts(source_list=None,
template='jinja',
defaults=None,
context=None,
**kwargs):
'''
Iterate a list of sources and process them as templates.
Returns a list of 'chunks' containing the rendered templates.
'''
ret = {'name': '_get_template_texts',
'changes': {},
'result': True,
'comment': '',
'data': []}
if source_list is None:
return _error(ret,
'_get_template_texts called with empty source_list')
txtl = []
for (source, source_hash) in source_list:
tmpctx = defaults if defaults else {}
if context:
tmpctx.update(context)
rndrd_templ_fn = __salt__['cp.get_template'](
source,
'',
template=template,
saltenv=__env__,
context=tmpctx,
**kwargs
)
msg = 'cp.get_template returned {0} (Called with: {1})'
log.debug(msg.format(rndrd_templ_fn, source))
if rndrd_templ_fn:
tmplines = None
with salt.utils.fopen(rndrd_templ_fn, 'rb') as fp_:
tmplines = fp_.readlines()
if not tmplines:
msg = 'Failed to read rendered template file {0} ({1})'
log.debug(msg.format(rndrd_templ_fn, source))
ret['name'] = source
return _error(ret, msg.format(rndrd_templ_fn, source))
txtl.append(''.join(tmplines))
else:
msg = 'Failed to load template file {0}'.format(source)
log.debug(msg)
ret['name'] = source
return _error(ret, msg)
ret['data'] = txtl
return ret
def _validate_str_list(arg):
'''
ensure ``arg`` is a list of strings
'''
if isinstance(arg, six.string_types):
return [arg]
elif isinstance(arg, Iterable) and not isinstance(arg, Mapping):
return [str(item) for item in arg]
else:
return [str(arg)]
def symlink(
name,
target,
force=False,
backupname=None,
makedirs=False,
user=None,
group=None,
mode=None,
**kwargs):
'''
Create a symlink
If the file already exists and is a symlink pointing to any location other
than the specified target, the symlink will be replaced. If the symlink is
a regular file or directory then the state will return False. If the
regular file or directory is desired to be replaced with a symlink pass
force: True, if it is to be renamed, pass a backupname.
name
The location of the symlink to create
target
The location that the symlink points to
force
If the name of the symlink exists and is not a symlink and
force is set to False, the state will fail. If force is set to
True, the file or directory in the way of the symlink file
will be deleted to make room for the symlink, unless
backupname is set, when it will be renamed
backupname
If the name of the symlink exists and is not a symlink, it will be
renamed to the backupname. If the backupname already
exists and force is False, the state will fail. Otherwise, the
backupname will be removed first.
makedirs
If the location of the symlink does not already have a parent directory
then the state will fail, setting makedirs to True will allow Salt to
create the parent directory
user
The user to own the file, this defaults to the user salt is running as
on the minion
group
The group ownership set for the file, this defaults to the group salt
is running as on the minion. On Windows, this is ignored
mode
The permissions to set on this file, aka 644, 0775, 4664. Not supported
on Windows
'''
name = os.path.expanduser(name)
# Make sure that leading zeros stripped by YAML loader are added back
mode = __salt__['config.manage_mode'](mode)
user = _test_owner(kwargs, user=user)
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.symlink')
if user is None:
user = __opts__['user']
if salt.utils.is_windows():
# Make sure the user exists in Windows
# Salt default is 'root'
if not __salt__['user.info'](user):
# User not found, use the account salt is running under
# If username not found, use System
user = __salt__['user.current']()
if not user:
user = 'SYSTEM'
if group is not None:
log.warning(
'The group argument for {0} has been ignored as this '
'is a Windows system.'.format(name)
)
group = user
if group is None:
group = __salt__['file.gid_to_group'](
__salt__['user.info'](user).get('gid', 0)
)
preflight_errors = []
uid = __salt__['file.user_to_uid'](user)
gid = __salt__['file.group_to_gid'](group)
if uid == '':
preflight_errors.append('User {0} does not exist'.format(user))
if gid == '':
preflight_errors.append('Group {0} does not exist'.format(group))
if not os.path.isabs(name):
preflight_errors.append(
'Specified file {0} is not an absolute path'.format(name)
)
if preflight_errors:
msg = '. '.join(preflight_errors)
if len(preflight_errors) > 1:
msg += '.'
return _error(ret, msg)
if __opts__['test']:
ret['result'], ret['comment'] = _symlink_check(name, target, force,
user, group)
return ret
if not os.path.isdir(os.path.dirname(name)):
if makedirs:
__salt__['file.makedirs'](
name,
user=user,
group=group,
mode=mode)
else:
return _error(
ret,
'Directory {0} for symlink is not present'.format(
os.path.dirname(name)
)
)
if __salt__['file.is_link'](name):
# The link exists, verify that it matches the target
if __salt__['file.readlink'](name) != target:
# The target is wrong, delete the link
os.remove(name)
else:
if _check_symlink_ownership(name, user, group):
# The link looks good!
ret['comment'] = ('Symlink {0} is present and owned by '
'{1}:{2}'.format(name, user, group))
else:
if _set_symlink_ownership(name, user, group):
ret['comment'] = ('Set ownership of symlink {0} to '
'{1}:{2}'.format(name, user, group))
ret['changes']['ownership'] = '{0}:{1}'.format(user, group)
else:
ret['result'] = False
ret['comment'] += (
'Failed to set ownership of symlink {0} to '
'{1}:{2}'.format(name, user, group)
)
return ret
elif os.path.isfile(name) or os.path.isdir(name):
# It is not a link, but a file or dir
if backupname is not None:
# Make a backup first
if os.path.lexists(backupname):
if not force:
return _error(ret, ((
'File exists where the backup target {0} should go'
).format(backupname)))
elif os.path.isfile(backupname):
os.remove(backupname)
elif os.path.isdir(backupname):
shutil.rmtree(backupname)
else:
return _error(ret, ((
'Something exists where the backup target {0}'
'should go'
).format(backupname)))
os.rename(name, backupname)
elif force:
# Remove whatever is in the way
if os.path.isfile(name):
os.remove(name)
ret['changes']['forced'] = 'Symlink was forcibly replaced'
else:
shutil.rmtree(name)
else:
# Otherwise throw an error
if os.path.isfile(name):
return _error(ret,
('File exists where the symlink {0} should be'
.format(name)))
else:
return _error(ret, ((
'Directory exists where the symlink {0} should be'
).format(name)))
if not os.path.exists(name):
# The link is not present, make it
try:
__salt__['file.symlink'](target, name)
except OSError as exc:
ret['result'] = False
ret['comment'] = ('Unable to create new symlink {0} -> '
'{1}: {2}'.format(name, target, exc))
return ret
else:
ret['comment'] = ('Created new symlink {0} -> '
'{1}'.format(name, target))
ret['changes']['new'] = name
if not _check_symlink_ownership(name, user, group):
if not _set_symlink_ownership(name, user, group):
ret['result'] = False
ret['comment'] += (', but was unable to set ownership to '
'{0}:{1}'.format(user, group))
return ret
def absent(name):
'''
Make sure that the named file or directory is absent. If it exists, it will
be deleted. This will work to reverse any of the functions in the file
state module.
name
The path which should be deleted
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.absent')
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name)
)
if name == '/':
return _error(ret, 'Refusing to make "/" absent')
if os.path.isfile(name) or os.path.islink(name):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'File {0} is set for removal'.format(name)
return ret
try:
__salt__['file.remove'](name)
ret['comment'] = 'Removed file {0}'.format(name)
ret['changes']['removed'] = name
return ret
except CommandExecutionError as exc:
return _error(ret, '{0}'.format(exc))
elif os.path.isdir(name):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Directory {0} is set for removal'.format(name)
return ret
try:
shutil.rmtree(name)
ret['comment'] = 'Removed directory {0}'.format(name)
ret['changes']['removed'] = name
return ret
except (OSError, IOError):
return _error(ret, 'Failed to remove directory {0}'.format(name))
ret['comment'] = 'File {0} is not present'.format(name)
return ret
def exists(name):
'''
Verify that the named file or directory is present or exists.
Ensures pre-requisites outside of Salt's purview
(e.g., keytabs, private keys, etc.) have been previously satisfied before
deployment.
name
Absolute path which must exist
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.exists')
if not os.path.exists(name):
return _error(ret, 'Specified path {0} does not exist'.format(name))
ret['comment'] = 'Path {0} exists'.format(name)
return ret
def missing(name):
'''
Verify that the named file or directory is missing, this returns True only
if the named file is missing but does not remove the file if it is present.
name
Absolute path which must NOT exist
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.missing')
if os.path.exists(name):
return _error(ret, 'Specified path {0} exists'.format(name))
ret['comment'] = 'Path {0} is missing'.format(name)
return ret
def managed(name,
source=None,
source_hash='',
user=None,
group=None,
mode=None,
template=None,
makedirs=False,
dir_mode=None,
context=None,
replace=True,
defaults=None,
env=None,
backup='',
show_diff=True,
create=True,
contents=None,
contents_pillar=None,
contents_grains=None,
contents_newline=True,
follow_symlinks=True,
check_cmd=None,
**kwargs):
'''
Manage a given file, this function allows for a file to be downloaded from
the salt master and potentially run through a templating system.
name
The location of the file to manage
source
The source file to download to the minion, this source file can be
hosted on either the salt master server, or on an HTTP or FTP server.
Both HTTPS and HTTP are supported as well as downloading directly
from Amazon S3 compatible URLs with both pre-configured and automatic
IAM credentials. (see s3.get state documentation)
File retrieval from Openstack Swift object storage is supported via
swift://container/object_path URLs, see swift.get documentation.
For files hosted on the salt file server, if the file is located on
the master in the directory named spam, and is called eggs, the source
string is salt://spam/eggs. If source is left blank or None
(use ~ in YAML), the file will be created as an empty file and
the content will not be managed
If the file is hosted on a HTTP or FTP server then the source_hash
argument is also required
A list of sources can also be passed in to provide a default source and
a set of fallbacks. The first source in the list that is found to exist
will be used and subsequent entries in the list will be ignored. Source
list functionality only supports local files and remote files hosted on
the salt master server or retrievable via HTTP, HTTPS, or FTP.
.. code-block:: yaml
file_override_example:
file.managed:
- source:
- salt://file_that_does_not_exist
- salt://file_that_exists
source_hash
This can be one of the following:
1. a source hash string
2. the URI of a file that contains source hash strings
The function accepts the first encountered long unbroken alphanumeric
string of correct length as a valid hash, in order from most secure to
least secure:
.. code-block:: text
Type Length
====== ======
sha512 128
sha384 96
sha256 64
sha224 56
sha1 40
md5 32
**Using a Source Hash File**
The file can contain several checksums for several files. Each line
must contain both the file name and the hash. If no file name is
matched, the first hash encountered will be used, otherwise the most
secure hash with the correct source file name will be used.
When using a source hash file the source_hash argument needs to be a
url, the standard download urls are supported, ftp, http, salt etc:
Example:
.. code-block:: yaml
tomdroid-src-0.7.3.tar.gz:
file.managed:
- name: /tmp/tomdroid-src-0.7.3.tar.gz
- source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz
- source_hash: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.hash
The following is an example of the supported source_hash format:
.. code-block:: text
/etc/rc.conf ef6e82e4006dee563d98ada2a2a80a27
sha254c8525aee419eb649f0233be91c151178b30f0dff8ebbdcc8de71b1d5c8bcc06a /etc/resolv.conf
ead48423703509d37c4a90e6a0d53e143b6fc268
Debian file type ``*.dsc`` files are also supported.
**Inserting the Source Hash in the sls Data**
Examples:
.. code-block:: yaml
tomdroid-src-0.7.3.tar.gz:
file.managed:
- name: /tmp/tomdroid-src-0.7.3.tar.gz
- source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz
- source_hash: md5=79eef25f9b0b2c642c62b7f737d4f53f
Known issues:
If the remote server URL has the hash file as an apparent
sub-directory of the source file, the module will discover that it
has already cached a directory where a file should be cached. For
example:
.. code-block:: yaml
tomdroid-src-0.7.3.tar.gz:
file.managed:
- name: /tmp/tomdroid-src-0.7.3.tar.gz
- source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz
- source_hash: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz/+md5
user
The user to own the file, this defaults to the user salt is running as
on the minion
group
The group ownership set for the file, this defaults to the group salt
is running as on the minion On Windows, this is ignored
mode
The permissions to set on this file, aka 644, 0775, 4664. Not supported
on Windows
template
If this setting is applied then the named templating engine will be
used to render the downloaded file, currently jinja, mako, and wempy
are supported
makedirs
If the file is located in a path without a parent directory, then
the state will fail. If makedirs is set to True, then the parent
directories will be created to facilitate the creation of the named
file.
dir_mode
If directories are to be created, passing this option specifies the
permissions for those directories. If this is not set, directories
will be assigned permissions from the 'mode' argument.
replace
If this file should be replaced. If false, this command will
not overwrite file contents but will enforce permissions if the file
exists already. Default is True.
context
Overrides default context variables passed to the template.
defaults
Default context passed to the template.
backup
Overrides the default backup mode for this specific file.
show_diff
If set to False, the diff will not be shown.
create
Default is True, if create is set to False then the file will only be
managed if the file already exists on the system.
contents
Default is None. If specified, will use the given string as the
contents of the file. Should not be used in conjunction with a source
file of any kind. Ignores hashes and does not use a templating engine.
contents_pillar
.. versionadded:: 0.17.0
Operates like ``contents``, but draws from a value stored in pillar,
using the pillar path syntax used in :mod:`pillar.get
<salt.modules.pillar.get>`. This is useful when the pillar value
contains newlines, as referencing a pillar variable using a jinja/mako
template can result in YAML formatting issues due to the newlines
causing indentation mismatches.
For example, the following could be used to deploy an SSH private key:
.. code-block:: yaml
/home/deployer/.ssh/id_rsa:
file.managed:
- user: deployer
- group: deployer
- mode: 600
- contents_pillar: userdata:deployer:id_rsa
This would populate ``/home/deployer/.ssh/id_rsa`` with the contents of
``pillar['userdata']['deployer']['id_rsa']``. An example of this pillar
setup would be like so:
.. code-block:: yaml
userdata:
deployer:
id_rsa: |
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAoQiwO3JhBquPAalQF9qP1lLZNXVjYMIswrMe2HcWUVBgh+vY
U7sCwx/dH6+VvNwmCoqmNnP+8gTPKGl1vgAObJAnMT623dMXjVKwnEagZPRJIxDy
B/HaAre9euNiY3LvIzBTWRSeMfT+rWvIKVBpvwlgGrfgz70m0pqxu+UyFbAGLin+
GpxzZAMaFpZw4sSbIlRuissXZj/sHpQb8p9M5IeO4Z3rjkCP1cxI
-----END RSA PRIVATE KEY-----
.. note::
The private key above is shortened to keep the example brief, but
shows how to do multiline string in YAML. The key is followed by a
pipe character, and the mutliline string is indented two more
spaces.
contents_grains
.. versionadded:: 2014.7.0
Same as contents_pillar, but with grains
contents_newline
.. versionadded:: 2014.7.0
When using contents, contents_pillar, or contents_grains, this option
ensures the file will have a newline at the end.
When loading some data this newline is better left off. Setting
contents_newline to False will omit this final newline.
follow_symlinks : True
.. versionadded:: 2014.7.0
If the desired path is a symlink follow it and make changes to the
file to which the symlink points.
check_cmd
.. versionadded:: 2014.7.0
The specified command will be run with the managed file as an argument.
If the command exits with a nonzero exit code, the command will not be
run.
'''
name = os.path.expanduser(name)
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
# If no source is specified, set replace to False, as there is nothing
# to replace the file with.
src_defined = source or contents or contents_pillar or contents_grains
if not src_defined and replace:
replace = False
log.warning(
'Neither \'source\' nor \'contents\' nor \'contents_pillar\' nor \'contents_grains\' '
'was defined, yet \'replace\' was set to \'True\'. As there is '
'no source to replace the file with, \'replace\' has been set '
'to \'False\' to avoid reading the file unnecessarily'
)
if len([_f for _f in [contents, contents_pillar, contents_grains] if _f]) > 1:
return _error(
ret, 'Only one of contents, contents_pillar, and contents_grains is permitted')
if contents_pillar:
contents = __salt__['pillar.get'](contents_pillar)
if contents_grains:
contents = __salt__['grains.get'](contents_grains)
# ensure contents is a string
if contents:
validated_contents = _validate_str_list(contents)
if not validated_contents:
return _error(ret, '"contents" is not a string or list of strings')
if isinstance(validated_contents, list):
contents = os.linesep.join(validated_contents)
if contents_newline:
# Make sure file ends in newline
if contents and not contents.endswith(os.linesep):
contents += os.linesep
# Make sure that leading zeros stripped by YAML loader are added back
mode = __salt__['config.manage_mode'](mode)
if not name:
return _error(ret, 'Must provide name to file.exists')
user = _test_owner(kwargs, user=user)
if salt.utils.is_windows():
if group is not None:
log.warning(
'The group argument for {0} has been ignored as this '
'is a Windows system.'.format(name)
)
group = user
if not create:
if not os.path.isfile(name):
# Don't create a file that is not already present
ret['comment'] = ('File {0} is not present and is not set for '
'creation').format(name)
return ret
u_check = _check_user(user, group)
if u_check:
# The specified user or group do not exist
return _error(ret, u_check)
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name))
if isinstance(env, six.string_types):
msg = (
'Passing a salt environment should be done using \'saltenv\' not '
'\'env\'. This warning will go away in Salt Boron and this '
'will be the default and expected behavior. Please update your '
'state files.'
)
salt.utils.warn_until('Boron', msg)
ret.setdefault('warnings', []).append(msg)
# No need to set __env__ = env since that's done in the state machinery
if os.path.isdir(name):
ret['comment'] = 'Specified target {0} is a directory'.format(name)
ret['result'] = False
return ret
if context is None:
context = {}
elif not isinstance(context, dict):
return _error(
ret, 'Context must be formed as a dict')
if defaults and not isinstance(defaults, dict):
return _error(
ret, 'Defaults must be formed as a dict')
if not replace and os.path.exists(name):
# Check and set the permissions if necessary
ret, _ = __salt__['file.check_perms'](name, ret, user, group, mode,
follow_symlinks)
if __opts__['test']:
ret['comment'] = 'File {0} not updated'.format(name)
elif not ret['changes'] and ret['result']:
ret['comment'] = ('File {0} exists with proper permissions. '
'No changes made.'.format(name))
return ret
accum_data, _ = _load_accumulators()
if name in accum_data:
if not context:
context = {}
context['accumulator'] = accum_data[name]
try:
if __opts__['test']:
ret['changes'] = __salt__['file.check_managed_changes'](
name,
source,
source_hash,
user,
group,
mode,
template,
context,
defaults,
__env__,
contents,
**kwargs
)
if ret['changes']:
ret['result'] = None
ret['comment'] = 'The file {0} is set to be changed'.format(name)
if not show_diff:
ret['changes']['diff'] = '<show_diff=False>'
else:
ret['result'] = True
ret['comment'] = 'The file {0} is in the correct state'.format(name)
return ret
# If the source is a list then find which file exists
source, source_hash = __salt__['file.source_list'](
source,
source_hash,
__env__
)
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'] = 'Unable to manage file: {0}'.format(exc)
return ret
# Gather the source file from the server
try:
sfn, source_sum, comment_ = __salt__['file.get_managed'](
name,
template,
source,
source_hash,
user,
group,
mode,
__env__,
context,
defaults,
**kwargs
)
except Exception as exc:
ret['changes'] = {}
log.debug(traceback.format_exc())
return _error(ret, 'Unable to manage file: {0}'.format(exc))
tmp_filename = None
if check_cmd:
tmp_filename = salt.utils.mkstemp()
# if exists copy existing file to tmp to compare
if __salt__['file.file_exists'](name):
try:
__salt__['file.copy'](name, tmp_filename)
except Exception as exc:
return _error(ret, 'Unable to copy file {0} to {1}: {2}'.format(name, tmp_filename, exc))
try:
ret = __salt__['file.manage_file'](
tmp_filename,
sfn,
ret,
source,
source_sum,
user,
group,
mode,
__env__,
backup,
makedirs,
template,
show_diff,
contents,
dir_mode,
follow_symlinks)
except Exception as exc:
ret['changes'] = {}
log.debug(traceback.format_exc())
if os.path.isfile(tmp_filename):
os.remove(tmp_filename)
return _error(ret, 'Unable to check_cmd file: {0}'.format(exc))
# file being updated to verify using check_cmd
if ret['changes']:
# Reset ret
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
check_cmd_opts = {}
if 'shell' in __grains__:
check_cmd_opts['shell'] = __grains__['shell']
cret = mod_run_check_cmd(check_cmd, tmp_filename, **check_cmd_opts)
if isinstance(cret, dict):
ret.update(cret)
if os.path.isfile(tmp_filename):
os.remove(tmp_filename)
return ret
# Since we generated a new tempfile and we are not returning here
# lets change the original sfn to the new tempfile or else we will
# get file not found
sfn = tmp_filename
else:
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if comment_ and contents is None:
return _error(ret, comment_)
else:
try:
return __salt__['file.manage_file'](
name,
sfn,
ret,
source,
source_sum,
user,
group,
mode,
__env__,
backup,
makedirs,
template,
show_diff,
contents,
dir_mode,
follow_symlinks)
except Exception as exc:
ret['changes'] = {}
log.debug(traceback.format_exc())
return _error(ret, 'Unable to manage file: {0}'.format(exc))
finally:
if tmp_filename and os.path.isfile(tmp_filename):
os.remove(tmp_filename)
def directory(name,
user=None,
group=None,
recurse=None,
dir_mode=None,
file_mode=None,
makedirs=False,
clean=False,
require=None,
exclude_pat=None,
follow_symlinks=False,
force=False,
backupname=None,
allow_symlink=True,
**kwargs):
'''
Ensure that a named directory is present and has the right perms
name
The location to create or manage a directory
user
The user to own the directory; this defaults to the user salt is
running as on the minion
group
The group ownership set for the directory; this defaults to the group
salt is running as on the minion. On Windows, this is ignored
recurse
Enforce user/group ownership and mode of directory recursively. Accepts
a list of strings representing what you would like to recurse. If
``mode`` is defined, will recurse on both ``file_mode`` and ``dir_mode`` if
they are defined. If ``ignore_files`` or ``ignore_dirs`` is included, files or
directories will be left unchanged respectively.
Example:
.. code-block:: yaml
/var/log/httpd:
file.directory:
- user: root
- group: root
- dir_mode: 755
- file_mode: 644
- recurse:
- user
- group
- mode
Leave files or directories unchanged:
.. code-block:: yaml
/var/log/httpd:
file.directory:
- user: root
- group: root
- dir_mode: 755
- file_mode: 644
- recurse:
- user
- group
- mode
- ignore_files
/var/log/httpd:
file.directory:
- user: root
- group: root
- dir_mode: 755
- file_mode: 644
- recurse:
- user
- group
- mode
- ignore_dirs
.. versionadded:: 2015.5.0
dir_mode / mode
The permissions mode to set any directories created. Not supported on
Windows
file_mode
The permissions mode to set any files created if 'mode' is run in
'recurse'. This defaults to dir_mode. Not supported on Windows
makedirs
If the directory is located in a path without a parent directory, then
the state will fail. If makedirs is set to True, then the parent
directories will be created to facilitate the creation of the named
file.
clean
Make sure that only files that are set up by salt and required by this
function are kept. If this option is set then everything in this
directory will be deleted unless it is required.
require
Require other resources such as packages or files
exclude_pat
When 'clean' is set to True, exclude this pattern from removal list
and preserve in the destination.
follow_symlinks : False
If the desired path is a symlink (or ``recurse`` is defined and a
symlink is encountered while recursing), follow it and check the
permissions of the directory/file to which the symlink points.
.. versionadded:: 2014.1.4
force
If the name of the directory exists and is not a directory and
force is set to False, the state will fail. If force is set to
True, the file in the way of the directory will be deleted to
make room for the directory, unless backupname is set,
then it will be renamed.
.. versionadded:: 2014.7.0
backupname
If the name of the directory exists and is not a directory, it will be
renamed to the backupname. If the backupname already
exists and force is False, the state will fail. Otherwise, the
backupname will be removed first.
.. versionadded:: 2014.7.0
allow_symlink : True
If allow_symlink is True and the specified path is a symlink, it will be
allowed to remain if it points to a directory. If allow_symlink is False
then the state will fail, unless force is also set to True, in which case
it will be removed or renamed, depending on the value of the backupname
argument.
.. versionadded:: 2014.7.0
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.directory')
# Remove trailing slash, if present
if name[-1] == '/':
name = name[:-1]
user = _test_owner(kwargs, user=user)
if salt.utils.is_windows():
if group is not None:
log.warning(
'The group argument for {0} has been ignored as this is '
'a Windows system.'.format(name)
)
group = user
if 'mode' in kwargs and not dir_mode:
dir_mode = kwargs.get('mode', [])
if not file_mode:
file_mode = dir_mode
# Make sure that leading zeros stripped by YAML loader are added back
dir_mode = __salt__['config.manage_mode'](dir_mode)
file_mode = __salt__['config.manage_mode'](file_mode)
u_check = _check_user(user, group)
if u_check:
# The specified user or group do not exist
return _error(ret, u_check)
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name))
if os.path.isfile(name) or (not allow_symlink and os.path.islink(name)):
if backupname is not None:
# Make a backup first
if os.path.lexists(backupname):
if not force:
return _error(ret, ((
'File exists where the backup target {0} should go'
).format(backupname)))
elif os.path.isfile(backupname):
os.remove(backupname)
elif os.path.islink(backupname):
os.remove(backupname)
elif os.path.isdir(backupname):
shutil.rmtree(backupname)
else:
return _error(ret, ((
'Something exists where the backup target {0}'
'should go'
).format(backupname)))
os.rename(name, backupname)
elif force:
# Remove whatever is in the way
if os.path.isfile(name):
os.remove(name)
ret['changes']['forced'] = 'File was forcibly replaced'
elif os.path.islink(name):
os.remove(name)
ret['changes']['forced'] = 'Symlink was forcibly replaced'
else:
shutil.rmtree(name)
else:
if os.path.isfile(name):
return _error(
ret,
'Specified location {0} exists and is a file'.format(name))
elif os.path.islink(name):
return _error(
ret,
'Specified location {0} exists and is a symlink'.format(name))
if __opts__['test']:
ret['result'], ret['comment'] = _check_directory(
name,
user,
group,
recurse or [],
dir_mode,
clean,
require,
exclude_pat)
return ret
if not os.path.isdir(name):
# The dir does not exist, make it
if not os.path.isdir(os.path.dirname(name)):
# The parent directory does not exist, create them
if makedirs:
# Make sure the drive is mapped before trying to create the
# path in windows
if salt.utils.is_windows():
drive, path = os.path.splitdrive(name)
if not os.path.isdir(drive):
return _error(
ret, 'Drive {0} is not mapped'.format(drive))
# Everything's good, create the path
__salt__['file.makedirs'](
name, user=user, group=group, mode=dir_mode
)
else:
return _error(
ret, 'No directory to create {0} in'.format(name))
__salt__['file.mkdir'](
name, user=user, group=group, mode=dir_mode
)
ret['changes'][name] = 'New Dir'
if not os.path.isdir(name):
return _error(ret, 'Failed to create directory {0}'.format(name))
# Check permissions
ret, perms = __salt__['file.check_perms'](name,
ret,
user,
group,
dir_mode,
follow_symlinks)
if recurse or clean:
walk_l = list(os.walk(name)) # walk path only once and store the result
# root: (dirs, files) structure, compatible for python2.6
walk_d = {}
for i in walk_l:
walk_d[i[0]] = (i[1], i[2])
if recurse:
if not isinstance(recurse, list):
ret['result'] = False
ret['comment'] = '"recurse" must be formed as a list of strings'
elif not set(['user', 'group', 'mode', 'ignore_files',
'ignore_dirs']) >= set(recurse):
ret['result'] = False
ret['comment'] = 'Types for "recurse" limited to "user", ' \
'"group", "mode", "ignore_files, and "ignore_dirs"'
else:
if 'ignore_files' in recurse and 'ignore_dirs' in recurse:
ret['result'] = False
ret['comment'] = 'Can not specify "recurse" options "ignore_files" ' \
'and "ignore_dirs" at the same time.'
return ret
if 'user' in recurse:
if user:
uid = __salt__['file.user_to_uid'](user)
# file.user_to_uid returns '' if user does not exist. Above
# check for user is not fatal, so we need to be sure user
# exists.
if isinstance(uid, six.string_types):
ret['result'] = False
ret['comment'] = 'Failed to enforce ownership for ' \
'user {0} (user does not ' \
'exist)'.format(user)
else:
ret['result'] = False
ret['comment'] = 'user not specified, but configured as ' \
'a target for recursive ownership ' \
'management'
else:
user = None
if 'group' in recurse:
if group:
gid = __salt__['file.group_to_gid'](group)
# As above with user, we need to make sure group exists.
if isinstance(gid, six.string_types):
ret['result'] = False
ret['comment'] = 'Failed to enforce group ownership ' \
'for group {0}'.format(group)
else:
ret['result'] = False
ret['comment'] = 'group not specified, but configured ' \
'as a target for recursive ownership ' \
'management'
else:
group = None
if 'mode' not in recurse:
file_mode = None
dir_mode = None
if 'ignore_files' in recurse:
ignore_files = True
else:
ignore_files = False
if 'ignore_dirs' in recurse:
ignore_dirs = True
else:
ignore_dirs = False
for root, dirs, files in walk_l:
if not ignore_files:
for fn_ in files:
full = os.path.join(root, fn_)
ret, perms = __salt__['file.check_perms'](
full,
ret,
user,
group,
file_mode,
follow_symlinks)
if not ignore_dirs:
for dir_ in dirs:
full = os.path.join(root, dir_)
ret, perms = __salt__['file.check_perms'](
full,
ret,
user,
group,
dir_mode,
follow_symlinks)
if clean:
keep = _gen_keep_files(name, require, walk_d)
log.debug('List of kept files when use file.directory with clean: %s',
keep)
removed = _clean_dir(name, list(keep), exclude_pat)
if removed:
ret['changes']['removed'] = removed
ret['comment'] = 'Files cleaned from directory {0}'.format(name)
if not ret['comment']:
ret['comment'] = 'Directory {0} updated'.format(name)
if __opts__['test']:
ret['comment'] = 'Directory {0} not updated'.format(name)
elif not ret['changes'] and ret['result']:
ret['comment'] = 'Directory {0} is in the correct state'.format(name)
return ret
def recurse(name,
source,
clean=False,
require=None,
user=None,
group=None,
dir_mode=None,
file_mode=None,
sym_mode=None,
template=None,
context=None,
defaults=None,
env=None,
include_empty=False,
backup='',
include_pat=None,
exclude_pat=None,
maxdepth=None,
keep_symlinks=False,
force_symlinks=False,
**kwargs):
'''
Recurse through a subdirectory on the master and copy said subdirectory
over to the specified path.
name
The directory to set the recursion in
source
The source directory, this directory is located on the salt master file
server and is specified with the salt:// protocol. If the directory is
located on the master in the directory named spam, and is called eggs,
the source string is salt://spam/eggs
clean
Make sure that only files that are set up by salt and required by this
function are kept. If this option is set then everything in this
directory will be deleted unless it is required.
require
Require other resources such as packages or files
user
The user to own the directory. This defaults to the user salt is
running as on the minion
group
The group ownership set for the directory. This defaults to the group
salt is running as on the minion. On Windows, this is ignored
dir_mode
The permissions mode to set on any directories created. Not supported on
Windows
file_mode
The permissions mode to set on any files created. Not supported on
Windows
sym_mode
The permissions mode to set on any symlink created. Not supported on
Windows
template
If this setting is applied then the named templating engine will be
used to render the downloaded file. Supported templates are:
`jinja`, `mako` and `wempy`.
.. note::
The template option is required when recursively applying templates.
context
Overrides default context variables passed to the template.
defaults
Default context passed to the template.
include_empty
Set this to True if empty directories should also be created
(default is False)
include_pat
When copying, include only this pattern from the source. Default
is glob match; if prefixed with 'E@', then regexp match.
Example:
.. code-block:: yaml
- include_pat: hello* :: glob matches 'hello01', 'hello02'
... but not 'otherhello'
- include_pat: E@hello :: regexp matches 'otherhello',
'hello01' ...
exclude_pat
Exclude this pattern from the source when copying. If both
`include_pat` and `exclude_pat` are supplied, then it will apply
conditions cumulatively. i.e. first select based on include_pat, and
then within that result apply exclude_pat.
Also, when 'clean=True', exclude this pattern from the removal
list and preserve in the destination.
Example:
.. code-block:: yaml
- exclude_pat: APPDATA* :: glob matches APPDATA.01,
APPDATA.02,.. for exclusion
- exclude_pat: E@(APPDATA)|(TEMPDATA) :: regexp matches APPDATA
or TEMPDATA for exclusion
maxdepth
When copying, only copy paths which are of depth `maxdepth` from the
source path.
Example:
.. code-block:: yaml
- maxdepth: 0 :: Only include files located in the source
directory
- maxdepth: 1 :: Only include files located in the source
or immediate subdirectories
keep_symlinks
Keep symlinks when copying from the source. This option will cause
the copy operation to terminate at the symlink. If desire behavior
similar to rsync, then set this to True.
force_symlinks
Force symlink creation. This option will force the symlink creation.
If a file or directory is obstructing symlink creation it will be
recursively removed so that symlink creation can proceed. This
option is usually not needed except in special circumstances.
'''
name = os.path.expanduser(name)
user = _test_owner(kwargs, user=user)
if salt.utils.is_windows():
if group is not None:
log.warning(
'The group argument for {0} has been ignored as this '
'is a Windows system.'.format(name)
)
group = user
ret = {
'name': name,
'changes': {},
'result': True,
'comment': {} # { path: [comment, ...] }
}
if 'mode' in kwargs:
ret['result'] = False
ret['comment'] = (
'\'mode\' is not allowed in \'file.recurse\'. Please use '
'\'file_mode\' and \'dir_mode\'.'
)
return ret
# Make sure that leading zeros stripped by YAML loader are added back
dir_mode = __salt__['config.manage_mode'](dir_mode)
file_mode = __salt__['config.manage_mode'](file_mode)
u_check = _check_user(user, group)
if u_check:
# The specified user or group do not exist
return _error(ret, u_check)
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name))
if isinstance(env, six.string_types):
msg = (
'Passing a salt environment should be done using \'saltenv\' not '
'\'env\'. This warning will go away in Salt Boron and this '
'will be the default and expected behavior. Please update your '
'state files.'
)
salt.utils.warn_until('Boron', msg)
ret.setdefault('warnings', []).append(msg)
# No need to set __env__ = env since that's done in the state machinery
# expand source into source_list
source_list = _validate_str_list(source)
for idx, val in enumerate(source_list):
source_list[idx] = val.rstrip('/')
for precheck in source_list:
if not precheck.startswith('salt://'):
return _error(ret, ('Invalid source {0!r} '
'(must be a salt:// URI)'.format(precheck)))
# Select the first source in source_list that exists
try:
source, source_hash = __salt__['file.source_list'](source_list, '', __env__)
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'] = 'Recurse failed: {0}'.format(exc)
return ret
# Check source path relative to fileserver root, make sure it is a
# directory
source_rel = source.partition('://')[2]
master_dirs = __salt__['cp.list_master_dirs'](__env__)
if source_rel not in master_dirs \
and not any((x for x in master_dirs
if x.startswith(source_rel + '/'))):
ret['result'] = False
ret['comment'] = (
'The directory {0!r} does not exist on the salt fileserver '
'in saltenv {1!r}'.format(source, __env__)
)
return ret
# Verify the target directory
if not os.path.isdir(name):
if os.path.exists(name):
# it is not a dir, but it exists - fail out
return _error(
ret, 'The path {0} exists and is not a directory'.format(name))
if not __opts__['test']:
__salt__['file.makedirs_perms'](
name, user, group, int(str(dir_mode), 8) if dir_mode else None)
def add_comment(path, comment):
comments = ret['comment'].setdefault(path, [])
if isinstance(comment, six.string_types):
comments.append(comment)
else:
comments.extend(comment)
def merge_ret(path, _ret):
# Use the most "negative" result code (out of True, None, False)
if _ret['result'] is False or ret['result'] is True:
ret['result'] = _ret['result']
# Only include comments about files that changed
if _ret['result'] is not True and _ret['comment']:
add_comment(path, _ret['comment'])
if _ret['changes']:
ret['changes'][path] = _ret['changes']
def manage_file(path, source):
source = salt.utils.url.escape(source)
if clean and os.path.exists(path) and os.path.isdir(path):
_ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
if __opts__['test']:
_ret['comment'] = 'Replacing directory {0} with a ' \
'file'.format(path)
_ret['result'] = None
merge_ret(path, _ret)
return
else:
shutil.rmtree(path)
_ret['changes'] = {'diff': 'Replaced directory with a '
'new file'}
merge_ret(path, _ret)
# Conflicts can occur if some kwargs are passed in here
pass_kwargs = {}
faults = ['mode', 'makedirs']
for key in kwargs:
if key not in faults:
pass_kwargs[key] = kwargs[key]
_ret = managed(
path,
source=source,
user=user,
group=group,
mode=file_mode,
template=template,
makedirs=True,
context=context,
defaults=defaults,
backup=backup,
**pass_kwargs)
merge_ret(path, _ret)
def manage_directory(path):
if os.path.basename(path) == '..':
return
if clean and os.path.exists(path) and not os.path.isdir(path):
_ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
if __opts__['test']:
_ret['comment'] = 'Replacing {0} with a directory'.format(path)
_ret['result'] = None
merge_ret(path, _ret)
return
else:
os.remove(path)
_ret['changes'] = {'diff': 'Replaced file with a directory'}
merge_ret(path, _ret)
_ret = directory(
path,
user=user,
group=group,
recurse=[],
dir_mode=dir_mode,
file_mode=None,
makedirs=True,
clean=False,
require=None)
merge_ret(path, _ret)
# Process symlinks and return the updated filenames list
def process_symlinks(filenames, symlinks):
for lname, ltarget in six.iteritems(symlinks):
if not salt.utils.check_include_exclude(
os.path.relpath(lname, srcpath), include_pat, exclude_pat):
continue
srelpath = os.path.relpath(lname, srcpath)
# Check for max depth
if maxdepth is not None:
srelpieces = srelpath.split('/')
if not srelpieces[-1]:
srelpieces = srelpieces[:-1]
if len(srelpieces) > maxdepth + 1:
continue
# Check for all paths that begin with the symlink
# and axe it leaving only the dirs/files below it.
# This needs to use list() otherwise they reference
# the same list.
_filenames = list(filenames)
for filename in _filenames:
if filename.startswith(lname):
log.debug('** skipping file ** {0}, it intersects a '
'symlink'.format(filename))
filenames.remove(filename)
# Create the symlink along with the necessary dirs.
# The dir perms/ownership will be adjusted later
# if needed
_ret = symlink(os.path.join(name, srelpath),
ltarget,
makedirs=True,
force=force_symlinks,
user=user,
group=group,
mode=sym_mode)
if not _ret:
continue
merge_ret(os.path.join(name, srelpath), _ret)
# Add the path to the keep set in case clean is set to True
keep.add(os.path.join(name, srelpath))
vdir.update(keep)
return filenames
keep = set()
vdir = set()
srcpath = salt.utils.url.parse(source)[0]
if not srcpath.endswith('/'):
# we're searching for things that start with this *directory*.
# use '/' since #master only runs on POSIX
srcpath = srcpath + '/'
fns_ = __salt__['cp.list_master'](__env__, srcpath)
# If we are instructed to keep symlinks, then process them.
if keep_symlinks:
# Make this global so that emptydirs can use it if needed.
symlinks = __salt__['cp.list_master_symlinks'](__env__, srcpath)
fns_ = process_symlinks(fns_, symlinks)
for fn_ in fns_:
if not fn_.strip():
continue
# fn_ here is the absolute (from file_roots) source path of
# the file to copy from; it is either a normal file or an
# empty dir(if include_empty==true).
relname = os.path.relpath(fn_, srcpath)
if relname.startswith('..'):
continue
# Check for maxdepth of the relative path
if maxdepth is not None:
# Since paths are all master, just use POSIX separator
relpieces = relname.split('/')
# Handle empty directories (include_empty==true) by removing the
# the last piece if it is an empty string
if not relpieces[-1]:
relpieces = relpieces[:-1]
if len(relpieces) > maxdepth + 1:
continue
# Check if it is to be excluded. Match only part of the path
# relative to the target directory
if not salt.utils.check_include_exclude(
relname, include_pat, exclude_pat):
continue
dest = os.path.join(name, relname)
dirname = os.path.dirname(dest)
keep.add(dest)
if dirname not in vdir:
# verify the directory perms if they are set
manage_directory(dirname)
vdir.add(dirname)
src = salt.utils.url.create(fn_)
manage_file(dest, src)
if include_empty:
mdirs = __salt__['cp.list_master_dirs'](__env__, srcpath)
for mdir in mdirs:
if not salt.utils.check_include_exclude(
os.path.relpath(mdir, srcpath), include_pat, exclude_pat):
continue
mdest = os.path.join(name, os.path.relpath(mdir, srcpath))
# Check for symlinks that happen to point to an empty dir.
if keep_symlinks:
islink = False
for link in symlinks:
if mdir.startswith(link, 0):
log.debug('** skipping empty dir ** {0}, it intersects'
' a symlink'.format(mdir))
islink = True
break
if islink:
continue
manage_directory(mdest)
keep.add(mdest)
keep = list(keep)
if clean:
# TODO: Use directory(clean=True) instead
keep += _gen_keep_files(name, require)
removed = _clean_dir(name, list(keep), exclude_pat)
if removed:
if __opts__['test']:
if ret['result']:
ret['result'] = None
add_comment('removed', removed)
else:
ret['changes']['removed'] = removed
# Flatten comments until salt command line client learns
# to display structured comments in a readable fashion
ret['comment'] = '\n'.join(u'\n#### {0} ####\n{1}'.format(
k, v if isinstance(v, six.string_types) else '\n'.join(v)
) for (k, v) in six.iteritems(ret['comment'])).strip()
if not ret['comment']:
ret['comment'] = 'Recursively updated {0}'.format(name)
if not ret['changes'] and ret['result']:
ret['comment'] = 'The directory {0} is in the correct state'.format(
name
)
return ret
def line(name, content, match=None, mode=None, location=None,
before=None, after=None, show_changes=True, backup=False,
quiet=False, indent=True):
'''
Line-based editing of a file.
.. versionadded:: 2015.8.0
Params are identical to the remote execution function
:mod:`file.line <salt.modules.file.line>`.
.. code-block: yaml
/etc/myconfig.conf
file.line:
- mode: ensure
- content: my key = my value
- before: somekey.*?
'''
name = os.path.expanduser(name)
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.line')
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
changes = __salt__['file.line'](name, content, match=match, mode=mode, location=location,
before=before, after=after, show_changes=show_changes,
backup=backup, quiet=quiet, indent=indent)
if changes:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Changes would have been made:\ndiff:\n{0}'.format(changes)
else:
ret['result'] = True
ret['comment'] = 'Changes were made'
ret['changes'] = {'diff': changes}
else:
ret['result'] = True
ret['comment'] = 'No changes needed to be made'
return ret
def replace(name,
pattern,
repl,
count=0,
flags=0,
bufsize=1,
append_if_not_found=False,
prepend_if_not_found=False,
not_found_content=None,
backup='.bak',
show_changes=True):
r'''
Maintain an edit in a file.
.. versionadded:: 0.17.0
name
Filesystem path to the file to be edited.
pattern
Python's `regular expression search <https://docs.python.org/2/library/re.html>`_.
repl
The replacement text.
count
Maximum number of pattern occurrences to be replaced.
flags
A list of flags defined in the :ref:`re module documentation <contents-of-module-re>`.
Each list item should be a string that will correlate to the human-friendly flag name.
E.g., ``['IGNORECASE', 'MULTILINE']``. Note: multiline searches must specify ``file``
as the ``bufsize`` argument below. Defaults to 0 and can be a list or an int.
bufsize
How much of the file to buffer into memory at once. The default value ``1`` processes
one line at a time. The special value ``file`` may be specified which will read the
entire file into memory before processing. Note: multiline searches must specify ``file``
buffering. Can be an int or a str.
append_if_not_found
If pattern is not found and set to ``True`` then, the content will be appended to the file.
.. versionadded:: 2014.7.0
prepend_if_not_found
If pattern is not found and set to ``True`` then, the content will be prepended to the file.
.. versionadded:: 2014.7.0
not_found_content
Content to use for append/prepend if not found. If ``None`` (default), uses ``repl``. Useful
when ``repl`` uses references to group in pattern.
.. versionadded:: 2014.7.0
backup
The file extension to use for a backup of the file before editing. Set to ``False`` to skip
making a backup.
show_changes
Output a unified diff of the old file and the new file. If ``False`` return a boolean if any
changes were made. Returns a boolean or a string.
.. note:
Using this option will store two copies of the file in-memory (the original version and
the edited version) in order to generate the diff.
For complex regex patterns it can be useful to avoid the need for complex quoting and escape
sequences by making use of YAML's multiline string syntax.
.. code-block:: yaml
complex_search_and_replace:
file.replace:
# <...snip...>
- pattern: |
CentOS \(2.6.32[^\n]+\n\s+root[^\n]+\n\)+
.. note::
When using YAML multiline string syntax in ``pattern:``, make sure to
also use that syntax in the ``repl:`` part, or you might loose line
feeds.
'''
name = os.path.expanduser(name)
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.replace')
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
changes = __salt__['file.replace'](name,
pattern,
repl,
count=count,
flags=flags,
bufsize=bufsize,
append_if_not_found=append_if_not_found,
prepend_if_not_found=prepend_if_not_found,
not_found_content=not_found_content,
backup=backup,
dry_run=__opts__['test'],
show_changes=show_changes)
if changes:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Changes would have been made:\ndiff:\n{0}'.format(changes)
else:
ret['result'] = True
ret['comment'] = 'Changes were made'
ret['changes'] = {'diff': changes}
else:
ret['result'] = True
ret['comment'] = 'No changes needed to be made'
return ret
def blockreplace(
name,
marker_start='#-- start managed zone --',
marker_end='#-- end managed zone --',
source=None,
source_hash=None,
template='jinja',
sources=None,
source_hashes=None,
defaults=None,
context=None,
content='',
append_if_not_found=False,
prepend_if_not_found=False,
backup='.bak',
show_changes=True):
'''
Maintain an edit in a file in a zone delimited by two line markers
.. versionadded:: 2014.1.0
A block of content delimited by comments can help you manage several lines
entries without worrying about old entries removal. This can help you
maintaining an un-managed file containing manual edits.
Note: this function will store two copies of the file in-memory
(the original version and the edited version) in order to detect changes
and only edit the targeted file if necessary.
name
Filesystem path to the file to be edited
marker_start
The line content identifying a line as the start of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output
marker_end
The line content identifying a line as the end of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output. Note: you can use file.accumulated and target this state.
All accumulated data dictionaries content will be added as new lines in
the content
content
The content to be used between the two lines identified by
``marker_start`` and ``marker_end``
source
The source file to download to the minion, this source file can be
hosted on either the salt master server, or on an HTTP or FTP server.
Both HTTPS and HTTP are supported as well as downloading directly
from Amazon S3 compatible URLs with both pre-configured and automatic
IAM credentials. (see s3.get state documentation)
File retrieval from Openstack Swift object storage is supported via
swift://container/object_path URLs, see swift.get documentation.
For files hosted on the salt file server, if the file is located on
the master in the directory named spam, and is called eggs, the source
string is salt://spam/eggs. If source is left blank or None
(use ~ in YAML), the file will be created as an empty file and
the content will not be managed
If the file is hosted on a HTTP or FTP server then the source_hash
argument is also required
A list of sources can also be passed in to provide a default source and
a set of fallbacks. The first source in the list that is found to exist
will be used and subsequent entries in the list will be ignored.
.. code-block:: yaml
file_override_example:
file.managed:
- source:
- salt://file_that_does_not_exist
- salt://file_that_exists
source_hash
This can be one of the following:
1. a source hash string
2. the URI of a file that contains source hash strings
The function accepts the first encountered long unbroken alphanumeric
string of correct length as a valid hash, in order from most secure to
least secure:
.. code-block:: text
Type Length
====== ======
sha512 128
sha384 96
sha256 64
sha224 56
sha1 40
md5 32
**Using a Source Hash File**
The file can contain several checksums for several files. Each line
must contain both the file name and the hash. If no file name is
matched, the first hash encountered will be used, otherwise the most
secure hash with the correct source file name will be used.
When using a source hash file the source_hash argument needs to be a
url, the standard download urls are supported, ftp, http, salt etc:
Example:
.. code-block:: yaml
tomdroid-src-0.7.3.tar.gz:
file.managed:
- name: /tmp/tomdroid-src-0.7.3.tar.gz
- source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz
- source_hash: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.hash
The following is an example of the supported source_hash format:
.. code-block:: text
/etc/rc.conf ef6e82e4006dee563d98ada2a2a80a27
sha254c8525aee419eb649f0233be91c151178b30f0dff8ebbdcc8de71b1d5c8bcc06a /etc/resolv.conf
ead48423703509d37c4a90e6a0d53e143b6fc268
Debian file type ``*.dsc`` files are also supported.
**Inserting the Source Hash in the sls Data**
Examples:
.. code-block:: yaml
tomdroid-src-0.7.3.tar.gz:
file.managed:
- name: /tmp/tomdroid-src-0.7.3.tar.gz
- source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz
- source_hash: md5=79eef25f9b0b2c642c62b7f737d4f53f
template
If this setting is applied then the named templating engine will be
used to render the downloaded file, currently jinja, mako, and wempy
are supported
context
Overrides default context variables passed to the template.
defaults
Default context passed to the template.
append_if_not_found
If markers are not found and set to True then the markers and content
will be appended to the file. Default is ``False``
prepend_if_not_found
If markers are not found and set to True then the markers and content
will be prepended to the file. Default is ``False``
backup
The file extension to use for a backup of the file if any edit is made.
Set this to ``False`` to skip making a backup.
dry_run
Don't make any edits to the file
show_changes
Output a unified diff of the old file and the new file. If ``False``
return a boolean if any changes were made
Example of usage with an accumulator and with a variable:
.. code-block:: yaml
{% set myvar = 42 %}
hosts-config-block-{{ myvar }}:
file.blockreplace:
- name: /etc/hosts
- marker_start: "# START managed zone {{ myvar }} -DO-NOT-EDIT-"
- marker_end: "# END managed zone {{ myvar }} --"
- content: 'First line of content'
- append_if_not_found: True
- backup: '.bak'
- show_changes: True
hosts-config-block-{{ myvar }}-accumulated1:
file.accumulated:
- filename: /etc/hosts
- name: my-accumulator-{{ myvar }}
- text: "text 2"
- require_in:
- file: hosts-config-block-{{ myvar }}
hosts-config-block-{{ myvar }}-accumulated2:
file.accumulated:
- filename: /etc/hosts
- name: my-accumulator-{{ myvar }}
- text: |
text 3
text 4
- require_in:
- file: hosts-config-block-{{ myvar }}
will generate and maintain a block of content in ``/etc/hosts``:
.. code-block:: text
# START managed zone 42 -DO-NOT-EDIT-
First line of content
text 2
text 3
text 4
# END managed zone 42 --
'''
name = os.path.expanduser(name)
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.blockreplace')
if sources is None:
sources = []
if source_hashes is None:
source_hashes = []
(ok_, err, sl_) = _unify_sources_and_hashes(source=source,
source_hash=source_hash,
sources=sources,
source_hashes=source_hashes)
if not ok_:
return _error(ret, err)
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
accum_data, accum_deps = _load_accumulators()
if name in accum_data:
accumulator = accum_data[name]
# if we have multiple accumulators for a file, only apply the one
# required at a time
deps = accum_deps.get(name, [])
filtered = [a for a in deps if
__low__['__id__'] in deps[a] and a in accumulator]
if not filtered:
filtered = [a for a in accumulator]
for acc in filtered:
acc_content = accumulator[acc]
for line in acc_content:
if content == '':
content = line
else:
content += "\n" + line
if sl_:
tmpret = _get_template_texts(source_list=sl_,
template=template,
defaults=defaults,
context=context)
if not tmpret['result']:
return tmpret
text = tmpret['data']
for index, item in enumerate(text):
content += str(item)
changes = __salt__['file.blockreplace'](
name,
marker_start,
marker_end,
content=content,
append_if_not_found=append_if_not_found,
prepend_if_not_found=prepend_if_not_found,
backup=backup,
dry_run=__opts__['test'],
show_changes=show_changes
)
if changes:
ret['changes'] = {'diff': changes}
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Changes would be made'
else:
ret['result'] = True
ret['comment'] = 'Changes were made'
else:
ret['result'] = True
ret['comment'] = 'No changes needed to be made'
return ret
def comment(name, regex, char='#', backup='.bak'):
'''
Comment out specified lines in a file.
name
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be commented;
this pattern will be wrapped in parenthesis and will move any
preceding/trailing ``^`` or ``$`` characters outside the parenthesis
(e.g., the pattern ``^foo$`` will be rewritten as ``^(foo)$``)
Note that you _need_ the leading ^, otherwise each time you run
highstate, another comment char will be inserted.
char : ``#``
The character to be inserted at the beginning of a line in order to
comment it out
backup : ``.bak``
The file will be backed up before edit with this file extension
.. warning::
This backup will be overwritten each time ``sed`` / ``comment`` /
``uncomment`` is called. Meaning the backup will only be useful
after the first invocation.
Usage:
.. code-block:: yaml
/etc/fstab:
file.comment:
- regex: ^bind 127.0.0.1
.. versionadded:: 0.9.5
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.comment')
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
unanchor_regex = regex.lstrip('^').rstrip('$')
# Make sure the pattern appears in the file before continuing
if not __salt__['file.search'](name, regex, multiline=True):
if __salt__['file.search'](name, unanchor_regex, multiline=True):
ret['comment'] = 'Pattern already commented'
ret['result'] = True
return ret
else:
return _error(ret, '{0}: Pattern not found'.format(unanchor_regex))
if __opts__['test']:
ret['comment'] = 'File {0} is set to be updated'.format(name)
ret['result'] = None
return ret
with salt.utils.fopen(name, 'rb') as fp_:
slines = fp_.readlines()
# Perform the edit
__salt__['file.comment_line'](name, regex, char, True, backup)
with salt.utils.fopen(name, 'rb') as fp_:
nlines = fp_.readlines()
# Check the result
ret['result'] = __salt__['file.search'](name, unanchor_regex, multiline=True)
if slines != nlines:
if not salt.utils.istextfile(name):
ret['changes']['diff'] = 'Replace binary file'
else:
# Changes happened, add them
ret['changes']['diff'] = (
''.join(difflib.unified_diff(slines, nlines))
)
if ret['result']:
ret['comment'] = 'Commented lines successfully'
else:
ret['comment'] = 'Expected commented lines not found'
return ret
def uncomment(name, regex, char='#', backup='.bak'):
'''
Uncomment specified commented lines in a file
name
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be uncommented.
This regex should not include the comment character. A leading ``^``
character will be stripped for convenience (for easily switching
between comment() and uncomment()). The regex will be searched for
from the beginning of the line, ignoring leading spaces (we prepend
'^[ \\t]*')
char : ``#``
The character to remove in order to uncomment a line
backup : ``.bak``
The file will be backed up before edit with this file extension;
.. warning::
This backup will be overwritten each time ``sed`` / ``comment`` /
``uncomment`` is called. Meaning the backup will only be useful
after the first invocation.
Usage:
.. code-block:: yaml
/etc/adduser.conf:
file.uncomment:
- regex: EXTRA_GROUPS
.. versionadded:: 0.9.5
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.uncomment')
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
# Make sure the pattern appears in the file
if __salt__['file.search'](
name,
'^[ \t]*{0}'.format(regex.lstrip('^')),
multiline=True):
ret['comment'] = 'Pattern already uncommented'
ret['result'] = True
return ret
elif __salt__['file.search'](
name,
'{0}[ \t]*{1}'.format(char, regex.lstrip('^')),
multiline=True):
# Line exists and is commented
pass
else:
return _error(ret, '{0}: Pattern not found'.format(regex))
if __opts__['test']:
ret['comment'] = 'File {0} is set to be updated'.format(name)
ret['result'] = None
return ret
with salt.utils.fopen(name, 'rb') as fp_:
slines = fp_.readlines()
# Perform the edit
__salt__['file.comment_line'](name, regex, char, False, backup)
with salt.utils.fopen(name, 'rb') as fp_:
nlines = fp_.readlines()
# Check the result
ret['result'] = __salt__['file.search'](
name,
'^[ \t]*{0}'.format(regex.lstrip('^')),
multiline=True
)
if slines != nlines:
if not salt.utils.istextfile(name):
ret['changes']['diff'] = 'Replace binary file'
else:
# Changes happened, add them
ret['changes']['diff'] = (
''.join(difflib.unified_diff(slines, nlines))
)
if ret['result']:
ret['comment'] = 'Uncommented lines successfully'
else:
ret['comment'] = 'Expected uncommented lines not found'
return ret
def append(name,
text=None,
makedirs=False,
source=None,
source_hash=None,
template='jinja',
sources=None,
source_hashes=None,
defaults=None,
context=None):
'''
Ensure that some text appears at the end of a file.
The text will not be appended if it already exists in the file.
A single string of text or a list of strings may be appended.
name
The location of the file to append to.
text
The text to be appended, which can be a single string or a list
of strings.
makedirs
If the file is located in a path without a parent directory,
then the state will fail. If makedirs is set to True, then
the parent directories will be created to facilitate the
creation of the named file. Defaults to False.
source
A single source file to append. This source file can be hosted on either
the salt master server, or on an HTTP or FTP server. Both HTTPS and
HTTP are supported as well as downloading directly from Amazon S3
compatible URLs with both pre-configured and automatic IAM credentials
(see s3.get state documentation). File retrieval from Openstack Swift
object storage is supported via swift://container/object_path URLs
(see swift.get documentation).
For files hosted on the salt file server, if the file is located on
the master in the directory named spam, and is called eggs, the source
string is salt://spam/eggs.
If the file is hosted on an HTTP or FTP server, the source_hash argument
is also required.
source_hash
This can be one of the following:
1. a source hash string
2. the URI of a file that contains source hash strings
The function accepts the first encountered long unbroken alphanumeric
string of correct length as a valid hash, in order from most secure to
least secure:
.. code-block:: text
Type Length
====== ======
sha512 128
sha384 96
sha256 64
sha224 56
sha1 40
md5 32
The file can contain several checksums for several files. Each line
must contain both the file name and the hash. If no file name is
matched, the first hash encountered will be used, otherwise the most
secure hash with the correct source file name will be used.
Debian file type ``*.dsc`` is supported.
Examples:
.. code-block:: text
/etc/rc.conf ef6e82e4006dee563d98ada2a2a80a27
sha254c8525aee419eb649f0233be91c151178b30f0dff8ebbdcc8de71b1d5c8bcc06a /etc/resolv.conf
ead48423703509d37c4a90e6a0d53e143b6fc268
Known issues:
If the remote server URL has the hash file as an apparent
sub-directory of the source file, the module will discover that it
has already cached a directory where a file should be cached. For
example:
.. code-block:: yaml
tomdroid-src-0.7.3.tar.gz:
file.managed:
- name: /tmp/tomdroid-src-0.7.3.tar.gz
- source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz
- source_hash: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz/+md5
template : ``jinja``
The named templating engine will be used to render the appended-to
file. Defaults to jinja.
sources
A list of source files to append. If the files are hosted on an HTTP or
FTP server, the source_hashes argument is also required.
source_hashes
A list of source_hashes corresponding to the sources list specified in
the sources argument.
defaults
Default context passed to the template.
context
Overrides default context variables passed to the template.
Multi-line example:
.. code-block:: yaml
/etc/motd:
file.append:
- text: |
Thou hadst better eat salt with the Philosophers of Greece,
than sugar with the Courtiers of Italy.
- Benjamin Franklin
Multiple lines of text:
.. code-block:: yaml
/etc/motd:
file.append:
- text:
- Trust no one unless you have eaten much salt with him.
- "Salt is born of the purest of parents: the sun and the sea."
Gather text from multiple template files:
.. code-block:: yaml
/etc/motd:
file:
- append
- template: jinja
- sources:
- salt://motd/devops-messages.tmpl
- salt://motd/hr-messages.tmpl
- salt://motd/general-messages.tmpl
.. versionadded:: 0.9.5
'''
name = os.path.expanduser(name)
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.append')
if sources is None:
sources = []
if source_hashes is None:
source_hashes = []
# Add sources and source_hashes with template support
# NOTE: FIX 'text' and any 'source' are mutually exclusive as 'text'
# is re-assigned in the original code.
(ok_, err, sl_) = _unify_sources_and_hashes(source=source,
source_hash=source_hash,
sources=sources,
source_hashes=source_hashes)
if not ok_:
return _error(ret, err)
if makedirs is True:
dirname = os.path.dirname(name)
if not __salt__['file.directory_exists'](dirname):
__salt__['file.makedirs'](name)
check_res, check_msg = _check_directory(
dirname, None, None, False, None, False, False, None
)
if not check_res:
return _error(ret, check_msg)
# Make sure that we have a file
__salt__['file.touch'](name)
check_res, check_msg = _check_file(name)
if not check_res:
touch(name, makedirs=makedirs)
retry_res, retry_msg = _check_file(name)
if not retry_res:
return _error(ret, check_msg)
# Follow the original logic and re-assign 'text' if using source(s)...
if sl_:
tmpret = _get_template_texts(source_list=sl_,
template=template,
defaults=defaults,
context=context)
if not tmpret['result']:
return tmpret
text = tmpret['data']
text = _validate_str_list(text)
with salt.utils.fopen(name, 'rb') as fp_:
slines = fp_.readlines()
count = 0
test_lines = []
try:
for chunk in text:
if __salt__['file.search'](
name,
salt.utils.build_whitespace_split_regex(chunk),
multiline=True):
continue
lines = chunk.splitlines()
for line in lines:
if __opts__['test']:
ret['comment'] = 'File {0} is set to be updated'.format(name)
ret['result'] = None
test_lines.append('{0}\n'.format(line))
else:
__salt__['file.append'](name, line)
count += 1
except TypeError:
return _error(ret, 'No text found to append. Nothing appended')
if __opts__['test']:
nlines = slines + test_lines
ret['result'] = None
if slines != nlines:
if not salt.utils.istextfile(name):
ret['changes']['diff'] = 'Replace binary file'
else:
# Changes happened, add them
ret['changes']['diff'] = (
''.join(difflib.unified_diff(slines, nlines))
)
else:
ret['comment'] = 'File {0} is in correct state'.format(name)
ret['result'] = True
return ret
with salt.utils.fopen(name, 'rb') as fp_:
nlines = fp_.readlines()
if slines != nlines:
if not salt.utils.istextfile(name):
ret['changes']['diff'] = 'Replace binary file'
else:
# Changes happened, add them
ret['changes']['diff'] = (
''.join(difflib.unified_diff(slines, nlines))
)
if count:
ret['comment'] = 'Appended {0} lines'.format(count)
else:
ret['comment'] = 'File {0} is in correct state'.format(name)
ret['result'] = True
return ret
def prepend(name,
text=None,
makedirs=False,
source=None,
source_hash=None,
template='jinja',
sources=None,
source_hashes=None,
defaults=None,
context=None):
'''
Ensure that some text appears at the beginning of a file
The text will not be prepended again if it already exists in the file. You
may specify a single line of text or a list of lines to append.
Multi-line example:
.. code-block:: yaml
/etc/motd:
file.prepend:
- text: |
Thou hadst better eat salt with the Philosophers of Greece,
than sugar with the Courtiers of Italy.
- Benjamin Franklin
Multiple lines of text:
.. code-block:: yaml
/etc/motd:
file.prepend:
- text:
- Trust no one unless you have eaten much salt with him.
- "Salt is born of the purest of parents: the sun and the sea."
Gather text from multiple template files:
.. code-block:: yaml
/etc/motd:
file:
- prepend
- template: jinja
- sources:
- salt://motd/devops-messages.tmpl
- salt://motd/hr-messages.tmpl
- salt://motd/general-messages.tmpl
.. versionadded:: 2014.7.0
'''
name = os.path.expanduser(name)
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.prepend')
if sources is None:
sources = []
if source_hashes is None:
source_hashes = []
# Add sources and source_hashes with template support
# NOTE: FIX 'text' and any 'source' are mutually exclusive as 'text'
# is re-assigned in the original code.
(ok_, err, sl_) = _unify_sources_and_hashes(source=source,
source_hash=source_hash,
sources=sources,
source_hashes=source_hashes)
if not ok_:
return _error(ret, err)
if makedirs is True:
dirname = os.path.dirname(name)
if not __salt__['file.directory_exists'](dirname):
__salt__['file.makedirs'](name)
check_res, check_msg = _check_directory(
dirname, None, None, False, None, False, False, None
)
if not check_res:
return _error(ret, check_msg)
# Make sure that we have a file
__salt__['file.touch'](name)
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
# Follow the original logic and re-assign 'text' if using source(s)...
if sl_:
tmpret = _get_template_texts(source_list=sl_,
template=template,
defaults=defaults,
context=context)
if not tmpret['result']:
return tmpret
text = tmpret['data']
text = _validate_str_list(text)
with salt.utils.fopen(name, 'rb') as fp_:
slines = fp_.readlines()
count = 0
test_lines = []
preface = []
for chunk in text:
if __salt__['file.search'](
name,
salt.utils.build_whitespace_split_regex(chunk),
multiline=True):
continue
lines = chunk.splitlines()
for line in lines:
if __opts__['test']:
ret['comment'] = 'File {0} is set to be updated'.format(name)
ret['result'] = None
test_lines.append('{0}\n'.format(line))
else:
preface.append(line)
count += 1
if __opts__['test']:
nlines = test_lines + slines
if slines != nlines:
if not salt.utils.istextfile(name):
ret['changes']['diff'] = 'Replace binary file'
else:
# Changes happened, add them
ret['changes']['diff'] = (
''.join(difflib.unified_diff(slines, nlines))
)
ret['result'] = None
else:
ret['comment'] = 'File {0} is in correct state'.format(name)
ret['result'] = True
return ret
__salt__['file.prepend'](name, *preface)
with salt.utils.fopen(name, 'rb') as fp_:
nlines = fp_.readlines()
if slines != nlines:
if not salt.utils.istextfile(name):
ret['changes']['diff'] = 'Replace binary file'
else:
# Changes happened, add them
ret['changes']['diff'] = (
''.join(difflib.unified_diff(slines, nlines))
)
if count:
ret['comment'] = 'Prepended {0} lines'.format(count)
else:
ret['comment'] = 'File {0} is in correct state'.format(name)
ret['result'] = True
return ret
def patch(name,
source=None,
hash=None,
options='',
dry_run_first=True,
env=None,
**kwargs):
'''
Apply a patch to a file.
.. note::
A suitable ``patch`` executable must be available on the minion when
using this state function.
name
The file to with the patch will be applied.
source
The source patch to download to the minion, this source file must be
hosted on the salt master server. If the file is located in the
directory named spam, and is called eggs, the source string is
salt://spam/eggs. A source is required.
hash
Hash of the patched file. If the hash of the target file matches this
value then the patch is assumed to have been applied. The hash string
is the hash algorithm followed by the hash of the file:
md5=e138491e9d5b97023cea823fe17bac22
options
Extra options to pass to patch.
dry_run_first : ``True``
Run patch with ``--dry-run`` first to check if it will apply cleanly.
env
Specify the environment from which to retrieve the patch file indicated
by the ``source`` parameter. If not provided, this defaults to the
environment from which the state is being executed.
Usage:
.. code-block:: yaml
# Equivalent to ``patch --forward /opt/file.txt file.patch``
/opt/file.txt:
file.patch:
- source: salt://file.patch
- hash: md5=e138491e9d5b97023cea823fe17bac22
'''
name = os.path.expanduser(name)
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.patch')
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
if not source:
return _error(ret, 'Source is required')
if hash is None:
return _error(ret, 'Hash is required')
if __salt__['file.check_hash'](name, hash):
ret.update(result=True, comment='Patch is already applied')
return ret
if isinstance(env, six.string_types):
msg = (
'Passing a salt environment should be done using \'saltenv\' not '
'\'env\'. This warning will go away in Salt Boron and this '
'will be the default and expected behavior. Please update your '
'state files.'
)
salt.utils.warn_until('Boron', msg)
ret.setdefault('warnings', []).append(msg)
# No need to set __env__ = env since that's done in the state machinery
# get cached file or copy it to cache
cached_source_path = __salt__['cp.cache_file'](source, __env__)
if not cached_source_path:
ret['comment'] = ('Unable to cache {0} from saltenv {1!r}'
.format(source, __env__))
return ret
log.debug(
'State patch.applied cached source {0} -> {1}'.format(
source, cached_source_path
)
)
if dry_run_first or __opts__['test']:
ret['changes'] = __salt__['file.patch'](
name, cached_source_path, options=options, dry_run=True
)
if __opts__['test']:
ret['comment'] = 'File {0} will be patched'.format(name)
ret['result'] = None
return ret
if ret['changes']['retcode']:
return ret
ret['changes'] = __salt__['file.patch'](
name, cached_source_path, options=options
)
ret['result'] = not ret['changes']['retcode']
if ret['result'] and not __salt__['file.check_hash'](name, hash):
ret.update(
result=False,
comment='File {0} hash mismatch after patch was applied'.format(
name
)
)
return ret
def touch(name, atime=None, mtime=None, makedirs=False):
'''
Replicate the 'nix "touch" command to create a new empty
file or update the atime and mtime of an existing file.
Note that if you just want to create a file and don't care about atime or
mtime, you should use ``file.managed`` instead, as it is more
feature-complete. (Just leave out the ``source``/``template``/``contents``
arguments, and it will just create the file and/or check its permissions,
without messing with contents)
name
name of the file
atime
atime of the file
mtime
mtime of the file
makedirs
whether we should create the parent directory/directories in order to
touch the file
Usage:
.. code-block:: yaml
/var/log/httpd/logrotate.empty:
file.touch
.. versionadded:: 0.9.5
'''
name = os.path.expanduser(name)
ret = {
'name': name,
'changes': {},
}
if not name:
return _error(ret, 'Must provide name to file.touch')
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name)
)
if __opts__['test']:
ret['result'], ret['comment'] = _check_touch(name, atime, mtime)
return ret
if makedirs:
__salt__['file.makedirs'](name)
if not os.path.isdir(os.path.dirname(name)):
return _error(
ret, 'Directory not present to touch file {0}'.format(name)
)
extant = os.path.exists(name)
ret['result'] = __salt__['file.touch'](name, atime, mtime)
if not extant and ret['result']:
ret['comment'] = 'Created empty file {0}'.format(name)
ret['changes']['new'] = name
elif extant and ret['result']:
ret['comment'] = 'Updated times on {0} {1}'.format(
'directory' if os.path.isdir(name) else 'file', name
)
ret['changes']['touched'] = name
return ret
def copy(
name,
source,
force=False,
makedirs=False,
preserve=False,
user=None,
group=None,
mode=None,
subdir=False,
**kwargs):
'''
If the source file exists on the system, copy it to the named file. The
named file will not be overwritten if it already exists unless the force
option is set to True.
name
The location of the file to copy to
source
The location of the file to copy to the location specified with name
force
If the target location is present then the file will not be moved,
specify "force: True" to overwrite the target file
makedirs
If the target subdirectories don't exist create them
preserve
.. versionadded:: 2015.5.0
Set ``preserve: True`` to preserve user/group ownership and mode
after copying. Default is ``False``. If ``preseve`` is set to ``True``,
then user/group/mode attributes will be ignored.
user
.. versionadded:: 2015.5.0
The user to own the copied file, this defaults to the user salt is
running as on the minion. If ``preserve`` is set to ``True``, then
this will be ignored
group
.. versionadded:: 2015.5.0
The group to own the copied file, this defaults to the group salt is
running as on the minion. If ``preserve`` is set to ``True`` or on
Windows this will be ignored
mode
.. versionadded:: 2015.5.0
The permissions to set on the copied file, aka 644, '0775', '4664'.
If ``preserve`` is set to ``True``, then this will be ignored.
Not supported on Windows
subdir
.. versionadded:: 2015.5.0
If the name is a directory then place the file inside the named
directory
'''
name = os.path.expanduser(name)
source = os.path.expanduser(source)
ret = {
'name': name,
'changes': {},
'comment': 'Copied "{0}" to "{1}"'.format(source, name),
'result': True}
if not name:
return _error(ret, 'Must provide name to file.comment')
changed = True
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name))
if not os.path.exists(source):
return _error(ret, 'Source file "{0}" is not present'.format(source))
if preserve:
user = __salt__['file.get_user'](source)
group = __salt__['file.get_group'](source)
mode = __salt__['file.get_mode'](source)
else:
user = _test_owner(kwargs, user=user)
if user is None:
user = __opts__['user']
if salt.utils.is_windows():
if group is not None:
log.warning(
'The group argument for {0} has been ignored as this is '
'a Windows system.'.format(name)
)
group = user
if group is None:
group = __salt__['file.gid_to_group'](
__salt__['user.info'](user).get('gid', 0)
)
u_check = _check_user(user, group)
if u_check:
# The specified user or group do not exist
return _error(ret, u_check)
if mode is None:
mode = __salt__['file.get_mode'](source)
if os.path.isdir(name) and subdir:
# If the target is a dir, and overwrite_dir is False, copy into the dir
name = os.path.join(name, os.path.basename(source))
if os.path.lexists(source) and os.path.lexists(name):
# if this is a file which did not change, do not update
if force and os.path.isfile(name):
hash1 = salt.utils.get_hash(name)
hash2 = salt.utils.get_hash(source)
if hash1 == hash2:
changed = False
if not force:
changed = False
elif not __opts__['test'] and changed:
# Remove the destination to prevent problems later
try:
if os.path.islink(name):
os.unlink(name)
elif os.path.isfile(name):
os.remove(name)
else:
shutil.rmtree(name)
except (IOError, OSError):
return _error(
ret,
'Failed to delete "{0}" in preparation for '
'forced move'.format(name)
)
if __opts__['test']:
if changed:
ret['comment'] = 'File "{0}" is set to be copied to "{1}"'.format(
source,
name
)
ret['result'] = None
else:
ret['comment'] = ('The target file "{0}" exists and will not be '
'overwritten'.format(name))
ret['result'] = True
return ret
if not changed:
ret['comment'] = ('The target file "{0}" exists and will not be '
'overwritten'.format(name))
ret['result'] = True
return ret
# Run makedirs
dname = os.path.dirname(name)
if not os.path.isdir(dname):
if makedirs:
__salt__['file.makedirs'](name)
else:
return _error(
ret,
'The target directory {0} is not present'.format(dname))
# All tests pass, move the file into place
try:
shutil.copy(source, name)
ret['changes'] = {name: source}
# Preserve really means just keep the behavior of the cp command. If
# the filesystem we're copying to is squashed or doesn't support chown
# then we shouldn't be checking anything.
if not preserve:
__salt__['file.check_perms'](name, ret, user, group, mode)
except (IOError, OSError):
return _error(
ret, 'Failed to copy "{0}" to "{1}"'.format(source, name))
return ret
def rename(name, source, force=False, makedirs=False):
'''
If the source file exists on the system, rename it to the named file. The
named file will not be overwritten if it already exists unless the force
option is set to True.
name
The location of the file to rename to
source
The location of the file to move to the location specified with name
force
If the target location is present then the file will not be moved,
specify "force: True" to overwrite the target file
makedirs
If the target subdirectories don't exist create them
'''
name = os.path.expanduser(name)
source = os.path.expanduser(source)
ret = {
'name': name,
'changes': {},
'comment': '',
'result': True}
if not name:
return _error(ret, 'Must provide name to file.rename')
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name))
if not os.path.lexists(source):
ret['comment'] = ('Source file "{0}" has already been moved out of '
'place').format(source)
return ret
if os.path.lexists(source) and os.path.lexists(name):
if not force:
ret['comment'] = ('The target file "{0}" exists and will not be '
'overwritten'.format(name))
ret['result'] = False
return ret
elif not __opts__['test']:
# Remove the destination to prevent problems later
try:
if os.path.islink(name):
os.unlink(name)
elif os.path.isfile(name):
os.remove(name)
else:
shutil.rmtree(name)
except (IOError, OSError):
return _error(
ret,
'Failed to delete "{0}" in preparation for '
'forced move'.format(name)
)
if __opts__['test']:
ret['comment'] = 'File "{0}" is set to be moved to "{1}"'.format(
source,
name
)
ret['result'] = None
return ret
# Run makedirs
dname = os.path.dirname(name)
if not os.path.isdir(dname):
if makedirs:
__salt__['file.makedirs'](name)
else:
return _error(
ret,
'The target directory {0} is not present'.format(dname))
# All tests pass, move the file into place
try:
if os.path.islink(source):
linkto = os.readlink(source)
os.symlink(linkto, name)
os.unlink(source)
else:
shutil.move(source, name)
except (IOError, OSError):
return _error(
ret, 'Failed to move "{0}" to "{1}"'.format(source, name))
ret['comment'] = 'Moved "{0}" to "{1}"'.format(source, name)
ret['changes'] = {name: source}
return ret
def accumulated(name, filename, text, **kwargs):
'''
Prepare accumulator which can be used in template in file.managed state.
Accumulator dictionary becomes available in template. It can also be used
in file.blockreplace.
name
Accumulator name
filename
Filename which would receive this accumulator (see file.managed state
documentation about ``name``)
text
String or list for adding in accumulator
require_in / watch_in
One of them required for sure we fill up accumulator before we manage
the file. Probably the same as filename
Example:
Given the following:
.. code-block:: yaml
animals_doing_things:
file.accumulated:
- filename: /tmp/animal_file.txt
- text: ' jumps over the lazy dog.'
- require_in:
- file: animal_file
animal_file:
file.managed:
- name: /tmp/animal_file.txt
- source: salt://animal_file.txt
- template: jinja
One might write a template for ``animal_file.txt`` like the following:
.. code-block:: jinja
The quick brown fox{% for animal in accumulator['animals_doing_things'] %}{{ animal }}{% endfor %}
Collectively, the above states and template file will produce:
.. code-block:: text
The quick brown fox jumps over the lazy dog.
Multiple accumulators can be "chained" together.
.. note::
The 'accumulator' data structure is a Python dictionary.
Do not expect any loop over the keys in a deterministic order!
'''
ret = {
'name': name,
'changes': {},
'result': True,
'comment': ''
}
if not name:
return _error(ret, 'Must provide name to file.accumulated')
if text is None:
ret['result'] = False
ret['comment'] = 'No text supplied for accumulator'
return ret
require_in = __low__.get('require_in', [])
watch_in = __low__.get('watch_in', [])
deps = require_in + watch_in
if not [x for x in deps if 'file' in x]:
ret['result'] = False
ret['comment'] = 'Orphaned accumulator {0} in {1}:{2}'.format(
name,
__low__['__sls__'],
__low__['__id__']
)
return ret
if isinstance(text, six.string_types):
text = (text,)
elif isinstance(text, dict):
text = (text,)
accum_data, accum_deps = _load_accumulators()
if filename not in accum_data:
accum_data[filename] = {}
if filename not in accum_deps:
accum_deps[filename] = {}
if name not in accum_deps[filename]:
accum_deps[filename][name] = []
for accumulator in deps:
accum_deps[filename][name].extend(six.itervalues(accumulator))
if name not in accum_data[filename]:
accum_data[filename][name] = []
for chunk in text:
if chunk not in accum_data[filename][name]:
accum_data[filename][name].append(chunk)
ret['comment'] = ('Accumulator {0} for file {1} '
'was charged by text'.format(name, filename))
_persist_accummulators(accum_data, accum_deps)
return ret
def _merge_dict(obj, k, v):
changes = {}
if k in obj:
if isinstance(obj[k], list):
if isinstance(v, list):
for a in v:
if a not in obj[k]:
changes[k] = a
obj[k].append(a)
else:
if obj[k] != v:
changes[k] = v
obj[k] = v
elif isinstance(obj[k], dict):
if isinstance(v, dict):
for a, b in six.iteritems(v):
if isinstance(b, dict) or isinstance(b, list):
updates = _merge_dict(obj[k], a, b)
for x, y in six.iteritems(updates):
changes[k + "." + x] = y
else:
if obj[k][a] != b:
changes[k + "." + a] = b
obj[k][a] = b
else:
if obj[k] != v:
changes[k] = v
obj[k] = v
else:
if obj[k] != v:
changes[k] = v
obj[k] = v
else:
changes[k] = v
obj[k] = v
return changes
def serialize(name,
dataset=None,
dataset_pillar=None,
user=None,
group=None,
mode=None,
env=None,
backup='',
makedirs=False,
show_diff=True,
create=True,
merge_if_exists=False,
**kwargs):
'''
Serializes dataset and store it into managed file. Useful for sharing
simple configuration files.
name
The location of the file to create
dataset
The dataset that will be serialized
dataset_pillar
Operates like ``dataset``, but draws from a value stored in pillar,
using the pillar path syntax used in :mod:`pillar.get
<salt.modules.pillar.get>`. This is useful when the pillar value
contains newlines, as referencing a pillar variable using a jinja/mako
template can result in YAML formatting issues due to the newlines
causing indentation mismatches.
.. versionadded:: FIXME
formatter
Write the data as this format. Supported output formats:
* JSON
* YAML
* Python (via pprint.pformat)
user
The user to own the directory, this defaults to the user salt is
running as on the minion
group
The group ownership set for the directory, this defaults to the group
salt is running as on the minion
mode
The permissions to set on this file, aka 644, 0775, 4664
backup
Overrides the default backup mode for this specific file.
makedirs
Create parent directories for destination file.
.. versionadded:: 2014.1.3
show_diff
If set to False, the diff will not be shown.
create
Default is True, if create is set to False then the file will only be
managed if the file already exists on the system.
merge_if_exists
Default is False, if merge_if_exists is True then the existing file will
be parsed and the dataset passed in will be merged with the existing
content
.. versionadded:: 2014.7.0
For example, this state:
.. code-block:: yaml
/etc/dummy/package.json:
file.serialize:
- dataset:
name: naive
description: A package using naive versioning
author: A confused individual <iam@confused.com>
dependencies:
express: >= 1.2.0
optimist: >= 0.1.0
engine: node 0.4.1
- formatter: json
will manage the file ``/etc/dummy/package.json``:
.. code-block:: json
{
"author": "A confused individual <iam@confused.com>",
"dependencies": {
"express": ">= 1.2.0",
"optimist": ">= 0.1.0"
},
"description": "A package using naive versioning",
"engine": "node 0.4.1",
"name": "naive"
}
'''
name = os.path.expanduser(name)
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if not name:
return _error(ret, 'Must provide name to file.serialize')
if isinstance(env, six.string_types):
msg = (
'Passing a salt environment should be done using \'saltenv\' not '
'\'env\'. This warning will go away in Salt Boron and this '
'will be the default and expected behavior. Please update your '
'state files.'
)
salt.utils.warn_until('Boron', msg)
ret.setdefault('warnings', []).append(msg)
# No need to set __env__ = env since that's done in the state machinery
if not create:
if not os.path.isfile(name):
# Don't create a file that is not already present
ret['comment'] = ('File {0} is not present and is not set for '
'creation').format(name)
return ret
formatter = kwargs.pop('formatter', 'yaml').lower()
if len([_f for _f in [dataset, dataset_pillar] if _f]) > 1:
return _error(
ret, 'Only one of \'dataset\' and \'dataset_pillar\' is permitted')
if dataset_pillar:
dataset = __salt__['pillar.get'](dataset_pillar)
if dataset is None:
return _error(
ret, 'Neither \'dataset\' nor \'dataset_pillar\' was defined')
if merge_if_exists:
if os.path.isfile(name):
if formatter == 'yaml':
with salt.utils.fopen(name, 'r') as fhr:
existing_data = yaml.safe_load(fhr)
elif formatter == 'json':
with salt.utils.fopen(name, 'r') as fhr:
existing_data = json.load(fhr)
else:
return {'changes': {},
'comment': ('{0} format is not supported for merging'
.format(formatter.capitalize())),
'name': name,
'result': False}
if existing_data is not None:
for k, v in six.iteritems(dataset):
if k in existing_data:
ret['changes'].update(_merge_dict(existing_data, k, v))
else:
ret['changes'][k] = v
existing_data[k] = v
dataset = existing_data
if formatter == 'yaml':
contents = yaml_serializer.serialize(dataset,
default_flow_style=False)
elif formatter == 'json':
contents = json_serializer.serialize(dataset,
indent=2,
separators=(',', ': '),
sort_keys=True)
elif formatter == 'python':
# round-trip this through JSON to avoid OrderedDict types
# there's probably a more performant way to do this...
# TODO remove json round-trip when all dataset will use
# serializers
contents = pprint.pformat(
json.loads(
json.dumps(dataset),
object_hook=salt.utils.decode_dict
)
)
else:
return {'changes': {},
'comment': '{0} format is not supported'.format(
formatter.capitalize()),
'name': name,
'result': False
}
if __opts__['test']:
ret['changes'] = __salt__['file.check_managed_changes'](
name=name,
source=None,
source_hash={},
user=user,
group=group,
mode=mode,
template=None,
context=None,
defaults=None,
saltenv=__env__,
contents=contents,
**kwargs
)
if ret['changes']:
ret['result'] = None
ret['comment'] = 'Dataset will be serialized and stored into {0}'.format(
name)
else:
ret['result'] = True
ret['comment'] = 'The file {0} is in the correct state'.format(name)
return ret
return __salt__['file.manage_file'](name=name,
sfn='',
ret=ret,
source=None,
source_sum={},
user=user,
group=group,
mode=mode,
saltenv=__env__,
backup=backup,
makedirs=makedirs,
template=None,
show_diff=show_diff,
contents=contents)
def mknod(name, ntype, major=0, minor=0, user=None, group=None, mode='0600'):
'''
Create a special file similar to the 'nix mknod command. The supported
device types are ``p`` (fifo pipe), ``c`` (character device), and ``b``
(block device). Provide the major and minor numbers when specifying a
character device or block device. A fifo pipe does not require this
information. The command will create the necessary dirs if needed. If a
file of the same name not of the same type/major/minor exists, it will not
be overwritten or unlinked (deleted). This is logically in place as a
safety measure because you can really shoot yourself in the foot here and
it is the behavior of 'nix ``mknod``. It is also important to note that not
just anyone can create special devices. Usually this is only done as root.
If the state is executed as none other than root on a minion, you may
receive a permission error.
name
name of the file
ntype
node type 'p' (fifo pipe), 'c' (character device), or 'b'
(block device)
major
major number of the device
does not apply to a fifo pipe
minor
minor number of the device
does not apply to a fifo pipe
user
owning user of the device/pipe
group
owning group of the device/pipe
mode
permissions on the device/pipe
Usage:
.. code-block:: yaml
/dev/chr:
file.mknod:
- ntype: c
- major: 180
- minor: 31
- user: root
- group: root
- mode: 660
/dev/blk:
file.mknod:
- ntype: b
- major: 8
- minor: 999
- user: root
- group: root
- mode: 660
/dev/fifo:
file.mknod:
- ntype: p
- user: root
- group: root
- mode: 660
.. versionadded:: 0.17.0
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'comment': '',
'result': False}
if not name:
return _error(ret, 'Must provide name to file.mknod')
if ntype == 'c':
# Check for file existence
if __salt__['file.file_exists'](name):
ret['comment'] = (
'File exists and is not a character device {0}. Cowardly '
'refusing to continue'.format(name)
)
# Check if it is a character device
elif not __salt__['file.is_chrdev'](name):
if __opts__['test']:
ret['comment'] = (
'Character device {0} is set to be created'
).format(name)
ret['result'] = None
else:
ret = __salt__['file.mknod'](name,
ntype,
major,
minor,
user,
group,
mode)
# Check the major/minor
else:
devmaj, devmin = __salt__['file.get_devmm'](name)
if (major, minor) != (devmaj, devmin):
ret['comment'] = (
'Character device {0} exists and has a different '
'major/minor {1}/{2}. Cowardly refusing to continue'
.format(name, devmaj, devmin)
)
# Check the perms
else:
ret = __salt__['file.check_perms'](name,
None,
user,
group,
mode)[0]
if not ret['changes']:
ret['comment'] = (
'Character device {0} is in the correct state'.format(
name
)
)
elif ntype == 'b':
# Check for file existence
if __salt__['file.file_exists'](name):
ret['comment'] = (
'File exists and is not a block device {0}. Cowardly '
'refusing to continue'.format(name)
)
# Check if it is a block device
elif not __salt__['file.is_blkdev'](name):
if __opts__['test']:
ret['comment'] = (
'Block device {0} is set to be created'
).format(name)
ret['result'] = None
else:
ret = __salt__['file.mknod'](name,
ntype,
major,
minor,
user,
group,
mode)
# Check the major/minor
else:
devmaj, devmin = __salt__['file.get_devmm'](name)
if (major, minor) != (devmaj, devmin):
ret['comment'] = (
'Block device {0} exists and has a different major/minor '
'{1}/{2}. Cowardly refusing to continue'.format(
name, devmaj, devmin
)
)
# Check the perms
else:
ret = __salt__['file.check_perms'](name,
None,
user,
group,
mode)[0]
if not ret['changes']:
ret['comment'] = (
'Block device {0} is in the correct state'.format(name)
)
elif ntype == 'p':
# Check for file existence
if __salt__['file.file_exists'](name):
ret['comment'] = (
'File exists and is not a fifo pipe {0}. Cowardly refusing '
'to continue'.format(name)
)
# Check if it is a fifo
elif not __salt__['file.is_fifo'](name):
if __opts__['test']:
ret['comment'] = 'Fifo pipe {0} is set to be created'.format(
name
)
ret['result'] = None
else:
ret = __salt__['file.mknod'](name,
ntype,
major,
minor,
user,
group,
mode)
# Check the perms
else:
ret = __salt__['file.check_perms'](name,
None,
user,
group,
mode)[0]
if not ret['changes']:
ret['comment'] = (
'Fifo pipe {0} is in the correct state'.format(name)
)
else:
ret['comment'] = (
'Node type unavailable: {0!r}. Available node types are '
'character (\'c\'), block (\'b\'), and pipe (\'p\')'.format(ntype)
)
return ret
def mod_run_check_cmd(cmd, filename, **check_cmd_opts):
'''
Execute the check_cmd logic.
Return a result dict if ``check_cmd`` succeeds (check_cmd == 0)
otherwise return True
'''
log.debug('running our check_cmd')
_cmd = '{0} {1}'.format(cmd, filename)
if __salt__['cmd.retcode'](_cmd, **check_cmd_opts) != 0:
return {'comment': 'check_cmd execution failed',
'skip_watch': True,
'result': False}
# No reason to stop, return True
return True
| 34.028148
| 117
| 0.546447
|
118f9ee70742aa18dd6993c5f0f11ed4f9624adf
| 1,878
|
py
|
Python
|
doujin-eromanga-com/utils/make_buynum_tag_url.py
|
GINK03/itmedia-scraping
|
5afbe06dd0aa12db1694a2b387aa2eeafb20e981
|
[
"MIT"
] | 16
|
2018-02-06T14:43:41.000Z
|
2021-01-23T05:07:33.000Z
|
doujin-eromanga-com/utils/make_buynum_tag_url.py
|
GINK03/itmedia-scraping
|
5afbe06dd0aa12db1694a2b387aa2eeafb20e981
|
[
"MIT"
] | null | null | null |
doujin-eromanga-com/utils/make_buynum_tag_url.py
|
GINK03/itmedia-scraping
|
5afbe06dd0aa12db1694a2b387aa2eeafb20e981
|
[
"MIT"
] | 4
|
2018-01-16T13:50:43.000Z
|
2019-12-16T19:45:54.000Z
|
import glob
import re
import json
import MeCab
import pandas as pd
import sys
tags = { tag for tag, hashs in json.load(open('tag_hashs.json')).items() }
m = MeCab.Tagger('-Owakati')
data = []
for fn in glob.glob('./data/*'):
#print(fn)
obj = json.load(open(fn))
#print(obj)
star = re.search(r'(\d|,){1,}', obj['star']).group(0).replace(',', '')
star = int(star)
text = obj['h1'] + obj['detail']
words = set(m.parse(text).strip().split())
words = { word for word in words if word in tags }
#print(star, words)
data.append( (star, words) )
# make dense
words_set = set()
for datum in data:
star, words = datum
for word in words:
words_set.add( word )
word_index = { word:index for index, word in enumerate(words_set) }
print(word_index)
if '--source' in sys.argv:
datasource = {'_stars_':[]}
for word in word_index.keys():
datasource[word] = []
for datum in data:
star, words = datum
datasource['_stars_'].append( star )
for word, index in word_index.items():
if word not in words:
datasource[word].append( 0 )
else:
datasource[word].append( 1 )
df = pd.DataFrame( datasource )
print( df.head() )
df.to_csv('source.csv', index=None)
if '--target' in sys.argv:
# test
datatarget = { '_hashval_':[] }
for word in word_index.keys():
datatarget[word] = []
for fn in sorted(glob.glob('../static_folder/*/*.json')):
print(fn)
hashval = fn.split('/')[2].replace('.json', '')
print(hashval)
datatarget['_hashval_'].append( hashval )
obj = json.load(open(fn))
_tags = set( obj['tags'] )
print(_tags)
for word, index in word_index.items():
if word not in _tags:
datatarget[word].append( 0 )
else:
datatarget[word].append( 1 )
dfT = pd.DataFrame( datatarget )
print( dfT.head() )
dfT.to_csv('target.csv', index=None)
| 26.828571
| 74
| 0.616613
|
8ff6eb20b1cffc94fc7c8c688ae611835c6de83e
| 6,037
|
py
|
Python
|
endorsement/dao/endorse.py
|
uw-it-aca/service-endorsement
|
a1ba3e4221bb3fe6c81c9f6947ad5e93f10a4a45
|
[
"Apache-2.0"
] | 3
|
2017-10-16T17:19:32.000Z
|
2019-07-31T22:31:48.000Z
|
endorsement/dao/endorse.py
|
uw-it-aca/service-endorsement
|
a1ba3e4221bb3fe6c81c9f6947ad5e93f10a4a45
|
[
"Apache-2.0"
] | 284
|
2016-06-17T18:21:31.000Z
|
2022-03-21T16:55:03.000Z
|
endorsement/dao/endorse.py
|
uw-it-aca/service-endorsement
|
a1ba3e4221bb3fe6c81c9f6947ad5e93f10a4a45
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.utils import timezone
from django.conf import settings
from endorsement.dao.uwnetid_categories import (
set_active_category, set_former_category)
from endorsement.dao.uwnetid_subscriptions import (
activate_subscriptions, active_subscriptions_for_netid)
from endorsement.models import EndorsementRecord
from endorsement.exceptions import NoEndorsementException
from restclients_core.exceptions import DataFailureException
import logging
logger = logging.getLogger(__name__)
def initiate_endorsement(endorser, endorsee, reason, category_code):
logger.info('initiate category {0} for {1} because {2} by {3}'.format(
category_code, endorsee.netid, reason, endorser.netid))
now = timezone.now()
try:
en = EndorsementRecord.objects.get(
endorser=endorser,
category_code=category_code,
endorsee=endorsee)
en.reason = reason
en.datetime_emailed = None
en.datetime_endorsed = None
en.datetime_renewed = now if en.is_deleted else None
en.datetime_expired = None
en.is_deleted = None
en.accept_id = None
en.accept_salt = None
en.save()
except EndorsementRecord.DoesNotExist:
en = EndorsementRecord.objects.create(
endorser=endorser,
category_code=category_code,
reason=reason,
endorsee=endorsee,
datetime_created=now,
datetime_emailed=None,
datetime_endorsed=None,
datetime_renewed=None,
datetime_expired=None,
is_deleted=None)
return en
def store_endorsement(endorser, endorsee, category_code,
subscription_codes, acted_as, reason):
"""Return with endorsee category active and subscribed
"""
logger.info('activate category {0} for {1}{2} because {3} by {4}'.format(
category_code, endorsee.netid,
" (by {0})".format(acted_as if acted_as else ""),
reason, endorser.netid))
set_active_category(endorsee.netid, category_code)
activate_subscriptions(
endorsee.netid, endorser.netid, subscription_codes)
return _store_endorsement(
endorser, endorsee, acted_as, reason, category_code)
def _store_endorsement(endorser, endorsee, acted_as, reason, category_code):
now = timezone.now()
try:
en = EndorsementRecord.objects.get(
endorser=endorser,
category_code=category_code,
endorsee=endorsee)
en.reason = reason
en.datetime_endorsed = now
en.acted_as = acted_as
en.datetime_emailed = None
en.datetime_notice_1_emailed = None
en.datetime_notice_2_emailed = None
en.datetime_notice_3_emailed = None
en.datetime_notice_4_emailed = None
en.datetime_renewed = now if en.is_deleted else None
en.datetime_expired = None
en.is_deleted = None
en.save()
except EndorsementRecord.DoesNotExist:
en = EndorsementRecord.objects.create(
endorser=endorser,
category_code=category_code,
endorsee=endorsee,
reason=reason,
datetime_endorsed=now,
acted_as=acted_as,
datetime_emailed=None,
datetime_notice_1_emailed=None,
datetime_notice_2_emailed=None,
datetime_notice_3_emailed=None,
datetime_notice_4_emailed=None,
datetime_renewed=None,
datetime_expired=None,
is_deleted=None)
return en
def clear_endorsement(endorsement):
if (endorsement.datetime_endorsed is not None and
EndorsementRecord.objects.get_endorsements_for_endorsee(
endorsement.endorsee, endorsement.category_code).count() <= 1):
set_former_category(
endorsement.endorsee.netid, endorsement.category_code)
logger.info('former category {0} for {1} by {2}'.format(
endorsement.category_code,
endorsement.endorsee.netid,
endorsement.endorser.netid))
logger.info('clearing record {0} for {1} by {2}'.format(
endorsement.category_code,
endorsement.endorsee.netid,
endorsement.endorser.netid))
endorsement.revoke()
return endorsement
def get_endorsement(endorser, endorsee, category_code):
try:
return EndorsementRecord.objects.get_endorsement(
endorser, endorsee, category_code)
except EndorsementRecord.DoesNotExist:
raise NoEndorsementException()
def get_endorsements_by_endorser(endorser):
return EndorsementRecord.objects.get_endorsements_for_endorser(endorser)
def get_all_endorsements_by_endorser(endorser):
return EndorsementRecord.objects.get_all_endorsements_for_endorser(
endorser)
def get_endorsements_for_endorsee(endorsee, category_code=None):
return EndorsementRecord.objects.get_endorsements_for_endorsee(
endorsee, category_code)
def get_endorsements_for_endorsee_re(endorsee_regex):
return EndorsementRecord.objects.get_endorsements_for_endorsee_re(
endorsee_regex)
def get_endorsement_records_for_endorsee_re(endorsee_regex):
return EndorsementRecord.objects.get_all_endorsements_for_endorsee_re(
endorsee_regex)
def is_permitted(endorser, endorsee, subscription_codes):
active = False
try:
active = active_subscriptions_for_netid(
endorsee.netid, subscription_codes)
except DataFailureException as ex:
if ex.status == 404:
active = False
# weirdness for testing with mock data
if getattr(settings, "RESTCLIENTS_DAO_CLASS", 'File') == 'File':
e = EndorsementRecord.objects.filter(endorsee=endorsee,
is_deleted__isnull=True)
active = len(e) < 0
else:
raise
return active
| 34.301136
| 77
| 0.680305
|
b561aa1c468ec292302d9d1ffdeb0c6545179155
| 2,916
|
py
|
Python
|
src/data_visualization.py
|
alu13/GCP_Exploration
|
872d38b133e246c0e28be985c3138db75b84496d
|
[
"MIT"
] | null | null | null |
src/data_visualization.py
|
alu13/GCP_Exploration
|
872d38b133e246c0e28be985c3138db75b84496d
|
[
"MIT"
] | null | null | null |
src/data_visualization.py
|
alu13/GCP_Exploration
|
872d38b133e246c0e28be985c3138db75b84496d
|
[
"MIT"
] | null | null | null |
from heapq import nlargest
import json
from matplotlib import pyplot as plt
import os
import statistics
from collections import Counter
import numpy as np
print(os.getcwd())
# This function converts a JSON of typically model scores to a bar chart
def json_to_bar(path, N):
with open(path) as json_file:
dic = json.load(json_file)
n_largest = nlargest(N, dic, key = dic.get)
keys = n_largest
values = [dic[n] for n in n_largest]
plt.bar(keys, values)
plt.show()
# This function converts a JSON of posts + associated subreddit into
# subreddit counts
def json_to_frequency(path):
with open(path) as json_file:
dic = json.load(json_file)
counts = Counter(dic.values())
tuples = [(key, val) for key, val in counts.items()]
sorted_tuples = sorted(tuples, key = lambda x:x[0], reverse = True)
x, y = zip(*sorted_tuples)
plt.yscale('log')
plt.xscale('log')
plt.plot(x, y)
plt.ylabel("# of subreddits")
plt.xlabel("subreddit size")
plt.show()
# This functions prints the mean/mean of a json file with keys + counts
# This file was under on subreddit counts.
def json_stats(path):
with open(path) as json_file:
dic = json.load(json_file)
cleaned_dic = dic
# cleaned_dic = {key:val for key, val in dic.items() if val >= 10}
mean = sum(cleaned_dic.values()) / len(cleaned_dic)
print(mean)
median = statistics.median(cleaned_dic.values())
print(median)
# This function plots takes in a json of temps x scores
# It plots the avg SS/BLEU scores vs temp
def model_avg_graphs(path):
scores = None
with open(path, 'r') as f:
scores = json.load(f)
start_temp = 0.8
y = scores
x = (np.arange(len(y)) / 10) + start_temp
plt.plot(x, y)
plt.title("DialoGPT Temperature vs SS")
plt.ylabel("Average SS score")
plt.xlabel("Temperature")
plt.show()
# This function takes in a json of temps x scores
# It plots the distribution of SS/BLEU scores in a specific temperature
def model_distributions(path, outputs_path):
scores = None
volatile = []
temp = 0
with open(path, 'r') as f:
scores = json.load(f)
scores = scores[temp]
for i in range(len(scores)):
if scores[i] < 0.15:
print(i)
volatile.append(i)
print(len(volatile))
with open(outputs_path, 'r') as output_f:
outputs = json.load(output_f)
for i in volatile:
print(outputs[temp][i])
plt.hist(scores, bins = 50)
plt.title("Dialogpt SS distribution, Temp = 0.3")
plt.ylabel("# Samples")
plt.xlabel("SS score")
plt.show()
if __name__ == "__main__":
path = "../data/jsons/dialogpt_ss_pairwise_avgs.json"
outputs_path = "../data/jsons/dialogpt_pairwise_outputs.json"
model_distributions(path, outputs_path)
| 31.354839
| 75
| 0.640604
|
5da0df0e983e68993c5d59481715f7989823a5a5
| 1,446
|
py
|
Python
|
panda/examples/can_logger.py
|
Shorrts/raspberry-pilot
|
84bd2e7dc0c9142a420999ed527b8abbd14e0d34
|
[
"MIT"
] | 114
|
2020-02-24T14:18:01.000Z
|
2022-03-19T03:42:00.000Z
|
panda/examples/can_logger.py
|
Shorrts/raspberry-pilot
|
84bd2e7dc0c9142a420999ed527b8abbd14e0d34
|
[
"MIT"
] | 15
|
2020-02-25T03:37:44.000Z
|
2021-09-08T01:51:15.000Z
|
panda/examples/can_logger.py
|
Shorrts/raspberry-pilot
|
84bd2e7dc0c9142a420999ed527b8abbd14e0d34
|
[
"MIT"
] | 55
|
2020-02-24T09:43:04.000Z
|
2022-02-15T04:52:00.000Z
|
#!/usr/bin/env python3
import binascii
import csv
import sys
from panda import Panda
def can_logger():
try:
print("Trying to connect to Panda over USB...")
p = Panda()
except AssertionError:
print("USB connection failed. Trying WiFi...")
try:
p = Panda("WIFI")
except:
print("WiFi connection timed out. Please make sure your Panda is connected and try again.")
sys.exit(0)
try:
outputfile = open('output.csv', 'wb')
csvwriter = csv.writer(outputfile)
#Write Header
csvwriter.writerow(['Bus', 'MessageID', 'Message', 'MessageLength'])
print("Writing csv file output.csv. Press Ctrl-C to exit...\n")
bus0_msg_cnt = 0
bus1_msg_cnt = 0
bus2_msg_cnt = 0
while True:
can_recv = p.can_recv()
for address, _, dat, src in can_recv:
csvwriter.writerow([str(src), str(hex(address)), "0x" + binascii.hexlify(dat), len(dat)])
if src == 0:
bus0_msg_cnt += 1
elif src == 1:
bus1_msg_cnt += 1
elif src == 2:
bus2_msg_cnt += 1
print("Message Counts... Bus 0: " + str(bus0_msg_cnt) + " Bus 1: " + str(bus1_msg_cnt) + " Bus 2: " + str(bus2_msg_cnt))
except KeyboardInterrupt:
print("\nNow exiting. Final message Counts... Bus 0: " + str(bus0_msg_cnt) + " Bus 1: " + str(bus1_msg_cnt) + " Bus 2: " + str(bus2_msg_cnt))
outputfile.close()
if __name__ == "__main__":
can_logger()
| 26.290909
| 145
| 0.612033
|
7b4d159445da6fe08af1094b9c28349dee1318ca
| 1,285
|
py
|
Python
|
release/stubs.min/System/Windows/Forms/__init___parts/WindowsFormsSection.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Windows/Forms/__init___parts/WindowsFormsSection.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Windows/Forms/__init___parts/WindowsFormsSection.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
class WindowsFormsSection(ConfigurationSection):
"""
Defines a new System.Configuration.ConfigurationSection for parsing application settings. This class cannot be inherited.
WindowsFormsSection()
"""
ElementProperty = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the System.Configuration.ConfigurationElementProperty object that represents the System.Configuration.ConfigurationElement object itself.
"""
EvaluationContext = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the System.Configuration.ContextInformation object for the System.Configuration.ConfigurationElement object.
"""
HasContext = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
JitDebugging = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets a value indicating whether just-in-time (JIT) debugging is used.
Get: JitDebugging(self: WindowsFormsSection) -> bool
Set: JitDebugging(self: WindowsFormsSection)=value
"""
Properties = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
| 25.7
| 150
| 0.678599
|
714e8f9f594f0c1633b1be64c4b4bd52c00f0ab9
| 431
|
py
|
Python
|
deepmerge/strategy/fallback.py
|
toumorokoshi/deepmerge
|
ee73250516228013b5aa6be9f8b1d908099fcd56
|
[
"MIT"
] | 95
|
2017-02-04T21:14:03.000Z
|
2022-03-14T09:04:19.000Z
|
deepmerge/strategy/fallback.py
|
sthagen/deepmerge
|
4ac5ff666d06cb072ff200ff4255d86d950b71a4
|
[
"MIT"
] | 14
|
2018-03-20T05:38:38.000Z
|
2022-02-15T06:24:30.000Z
|
deepmerge/strategy/fallback.py
|
sthagen/deepmerge
|
4ac5ff666d06cb072ff200ff4255d86d950b71a4
|
[
"MIT"
] | 20
|
2017-02-03T08:29:59.000Z
|
2022-03-14T09:06:57.000Z
|
from .core import StrategyList
class FallbackStrategies(StrategyList):
"""
The StrategyList containing fallback strategies.
"""
NAME = "fallback"
@staticmethod
def strategy_override(config, path, base, nxt):
"""use nxt, and ignore base."""
return nxt
@staticmethod
def strategy_use_existing(config, path, base, nxt):
"""use base, and ignore next."""
return base
| 21.55
| 55
| 0.640371
|
000888c62a546450a00b07c9c571e249ddedc0f2
| 1,031
|
py
|
Python
|
attalos/evaluation/densecap/scripts/pretty.py
|
millertime145/attalos
|
dbb7994928cfb5f3a338eb5faa4bd3c3fcda5e84
|
[
"Apache-2.0"
] | 101
|
2016-06-03T00:15:52.000Z
|
2021-11-12T11:57:46.000Z
|
attalos/evaluation/densecap/scripts/pretty.py
|
millertime145/attalos
|
dbb7994928cfb5f3a338eb5faa4bd3c3fcda5e84
|
[
"Apache-2.0"
] | 70
|
2016-05-27T21:05:33.000Z
|
2017-08-09T05:13:45.000Z
|
attalos/evaluation/densecap/scripts/pretty.py
|
millertime145/attalos
|
dbb7994928cfb5f3a338eb5faa4bd3c3fcda5e84
|
[
"Apache-2.0"
] | 46
|
2016-05-27T16:19:20.000Z
|
2019-06-24T12:37:22.000Z
|
def print_list_to_columns(words, items_per_row=5):
"""function to print a list of words in column format
Parameters
----------
words : list
list of words or terms to print
items_per_row : int
number of words in a row
"""
row = []
width = max(map(len, words)) + 2
for idx, word in enumerate(words):
if (idx + 1) % items_per_row == 0:
print("".join(word.ljust(width) for word in row))
row = []
row.append(word)
# append one last time just in case
if len(row) > 0:
print("".join(word.ljust(width) for word in row))
def sort_results(boxes):
"""Returns the top n boxes based on score given DenseCap
results.json output
Parameters
----------
boxes : dictionary
output from load_output_json
n : integer
number of boxes to return
Returns
-------
sorted dictionary
"""
return sorted(results[k], key=lambda x : x['score'], reverse=True)
| 25.775
| 70
| 0.56741
|
2371488e67aed2ac2233713987949209739bae1a
| 1,506
|
py
|
Python
|
stellar_sdk/xdr/thresholds.py
|
Shaptic/py-stellar-base
|
f5fa47f4d96f215889d99249fb25c7be002f5cf3
|
[
"Apache-2.0"
] | null | null | null |
stellar_sdk/xdr/thresholds.py
|
Shaptic/py-stellar-base
|
f5fa47f4d96f215889d99249fb25c7be002f5cf3
|
[
"Apache-2.0"
] | 27
|
2022-01-12T10:55:38.000Z
|
2022-03-28T01:38:24.000Z
|
stellar_sdk/xdr/thresholds.py
|
Shaptic/py-stellar-base
|
f5fa47f4d96f215889d99249fb25c7be002f5cf3
|
[
"Apache-2.0"
] | 2
|
2021-12-02T12:42:03.000Z
|
2021-12-07T20:53:10.000Z
|
# This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from ..type_checked import type_checked
from .base import Opaque
__all__ = ["Thresholds"]
@type_checked
class Thresholds:
"""
XDR Source Code::
typedef opaque Thresholds[4];
"""
def __init__(self, thresholds: bytes) -> None:
self.thresholds = thresholds
def pack(self, packer: Packer) -> None:
Opaque(self.thresholds, 4, True).pack(packer)
@classmethod
def unpack(cls, unpacker: Unpacker) -> "Thresholds":
thresholds = Opaque.unpack(unpacker, 4, True)
return cls(thresholds)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "Thresholds":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "Thresholds":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return self.thresholds == other.thresholds
def __str__(self):
return f"<Thresholds [thresholds={self.thresholds}]>"
| 26.421053
| 61
| 0.65405
|
3068d9910a23a6982b477cd1fde50f89cce082af
| 350
|
py
|
Python
|
script/fix_questionstyle.py
|
xziyue/PRML_Project
|
989580d9db2101ccc8b6b2652291a2d3546b55ff
|
[
"MIT"
] | 1
|
2019-12-11T14:33:12.000Z
|
2019-12-11T14:33:12.000Z
|
script/fix_questionstyle.py
|
xziyue/PRML_Project
|
989580d9db2101ccc8b6b2652291a2d3546b55ff
|
[
"MIT"
] | null | null | null |
script/fix_questionstyle.py
|
xziyue/PRML_Project
|
989580d9db2101ccc8b6b2652291a2d3546b55ff
|
[
"MIT"
] | null | null | null |
from script.util import *
import re
# this substitution will be used when all solutions are finished
root = os.path.split(os.getcwd())[0]
toc = GetAnswerTOC(root)
tex = toc['tex']
testString = r'123ji\prmlstyle{1.12} '
for key, val in tex.items():
text = None
with open(val, 'r') as infile:
text = infile.read()
print('read')
| 20.588235
| 64
| 0.66
|
682e0763aca2faee9a492d5e25bfb7582da36f94
| 2,724
|
py
|
Python
|
huaweicloud-sdk-vpc/huaweicloudsdkvpc/v2/model/create_subnet_request_body.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2021-04-16T07:59:28.000Z
|
2021-04-16T07:59:28.000Z
|
huaweicloud-sdk-vpc/huaweicloudsdkvpc/v2/model/create_subnet_request_body.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-vpc/huaweicloudsdkvpc/v2/model/create_subnet_request_body.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2022-01-17T02:24:18.000Z
|
2022-01-17T02:24:18.000Z
|
# coding: utf-8
import pprint
import re
import six
class CreateSubnetRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'subnet': 'CreateSubnetOption'
}
attribute_map = {
'subnet': 'subnet'
}
def __init__(self, subnet=None):
"""CreateSubnetRequestBody - a model defined in huaweicloud sdk"""
self._subnet = None
self.discriminator = None
self.subnet = subnet
@property
def subnet(self):
"""Gets the subnet of this CreateSubnetRequestBody.
:return: The subnet of this CreateSubnetRequestBody.
:rtype: CreateSubnetOption
"""
return self._subnet
@subnet.setter
def subnet(self, subnet):
"""Sets the subnet of this CreateSubnetRequestBody.
:param subnet: The subnet of this CreateSubnetRequestBody.
:type: CreateSubnetOption
"""
self._subnet = subnet
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateSubnetRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.222222
| 74
| 0.541116
|
41924ce1b40bbf88a00ed3e139cec7909606c77f
| 1,149
|
py
|
Python
|
allauth/socialaccount/providers/stackexchange_provider/provider.py
|
Fuzzwah/django-allauth
|
071cbef1388bb61a563d3e41197bd5b7c26664d2
|
[
"MIT"
] | null | null | null |
allauth/socialaccount/providers/stackexchange_provider/provider.py
|
Fuzzwah/django-allauth
|
071cbef1388bb61a563d3e41197bd5b7c26664d2
|
[
"MIT"
] | null | null | null |
allauth/socialaccount/providers/stackexchange_provider/provider.py
|
Fuzzwah/django-allauth
|
071cbef1388bb61a563d3e41197bd5b7c26664d2
|
[
"MIT"
] | null | null | null |
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2_provider.provider import OAuth2Provider
class StackExchangeAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get("html_url")
def get_avatar_url(self):
return self.account.extra_data.get("avatar_url")
def to_str(self):
dflt = super(StackExchangeAccount, self).to_str()
return self.account.extra_data.get("name", dflt)
class StackExchangeProvider(OAuth2Provider):
id = "stackexchange"
name = "Stack Exchange"
account_class = StackExchangeAccount
def get_site(self):
settings = self.get_settings()
return settings.get("SITE", "stackoverflow")
def extract_uid(self, data):
# `user_id` varies if you use the same account for
# e.g. StackOverflow and ServerFault. Therefore, we pick
# `account_id`.
uid = str(data["account_id"])
return uid
def extract_common_fields(self, data):
return dict(username=data.get("display_name"))
provider_classes = [StackExchangeProvider]
| 30.236842
| 83
| 0.706701
|
a91961e5831d289a7e9a6bb813845c0177c013fd
| 509
|
py
|
Python
|
csv__examples/writer_list_of_dict.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 117
|
2015-12-18T07:18:27.000Z
|
2022-03-28T00:25:54.000Z
|
csv__examples/writer_list_of_dict.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 8
|
2018-10-03T09:38:46.000Z
|
2021-12-13T19:51:09.000Z
|
csv__examples/writer_list_of_dict.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 28
|
2016-08-02T17:43:47.000Z
|
2022-03-21T08:31:12.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://docs.python.org/3/library/csv.html#csv.DictWriter
# SOURCE: https://stackoverflow.com/a/3087011/5909792
import csv
items = [
{'name': 'bob', 'age': 25, 'weight': 200},
{'name': 'jim', 'age': 31, 'weight': 180}
]
keys = items[0].keys()
with open('people.csv', 'w', encoding='utf-8', newline='') as f:
dict_writer = csv.DictWriter(f, keys)
dict_writer.writeheader()
dict_writer.writerows(items)
| 22.130435
| 67
| 0.636542
|
7bbf10f44d8d5ced3a55a8c59dcaf3d2ddcff8c7
| 570
|
py
|
Python
|
Dataset_cancer.py
|
Kiminwoo/Machine-Running-Exercises
|
eff025564a6895f3a060e278599129026f6204d5
|
[
"MIT"
] | 1
|
2019-03-23T07:00:37.000Z
|
2019-03-23T07:00:37.000Z
|
Dataset_cancer.py
|
Kiminwoo/Machine-Running-Exercises
|
eff025564a6895f3a060e278599129026f6204d5
|
[
"MIT"
] | null | null | null |
Dataset_cancer.py
|
Kiminwoo/Machine-Running-Exercises
|
eff025564a6895f3a060e278599129026f6204d5
|
[
"MIT"
] | 1
|
2019-07-12T18:15:11.000Z
|
2019-07-12T18:15:11.000Z
|
# * cancer 데이터셋 : 위스콘신 유방암 데이터셋. 유방암 종양의 임상 데이터가 기록된 실제 데이터셋.
# 569개의 데이터와 30개의 특성을 가진다. 그중 212개는 악성이고 357개는 양성이다.
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import mglearn
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
print("cancer.keys():\n{}".format(cancer.keys()))
print("데이터의 형태: {}".format(cancer.data.shape))
print("클래스별 샘플 개수:\n{}".format(
{n: v for n, v in zip(cancer.target_names, np.bincount(cancer.target))}))
print("특성 이름:\n{}".format(cancer.feature_names))
| 35.625
| 78
| 0.710526
|
bfcfff5f7918c8215575dc91d82361cfea396ba3
| 4,008
|
py
|
Python
|
tests/test_serializer.py
|
mariocesar/quill-delta-py
|
06dda55bfa91d4bded0f904eb7e7bbc56aee072c
|
[
"MIT"
] | 2
|
2018-02-14T19:04:23.000Z
|
2018-02-15T12:15:26.000Z
|
tests/test_serializer.py
|
mariocesar/quill-delta-py
|
06dda55bfa91d4bded0f904eb7e7bbc56aee072c
|
[
"MIT"
] | 1
|
2018-12-12T13:10:01.000Z
|
2018-12-13T13:54:22.000Z
|
tests/test_serializer.py
|
mariocesar/quill-delta-py
|
06dda55bfa91d4bded0f904eb7e7bbc56aee072c
|
[
"MIT"
] | null | null | null |
import unittest
import pytest
from quilldelta import Delete, Delta, Insert, Retain
from quilldelta.types import load_operation
class TestParser:
def test_insert_from_dict(self):
assert load_operation({'insert': 1}) == Insert(1, None)
assert load_operation({'insert': 'foo'}) == Insert('foo', None)
assert load_operation({
'insert': 'foo',
'attributes': {'bold': True}
}) == Insert('foo', {'bold': True})
def test_retain_from_dict(self):
assert load_operation({'retain': 1}) == Retain(1, None)
assert load_operation({
'retain': 1,
'attributes': {'bold': True}
}) == Insert(1, {'bold': True})
def test_delete_from_dict(self):
assert load_operation({'delete': 1}) == Delete(1)
def test_unknown_operation(self):
with pytest.raises(ValueError) as error:
assert load_operation({'emotion': 1})
assert error.match('Unknown operation')
class TestData(unittest.TestCase):
def test_insert_as_data(self):
assert Insert('foo', None).as_data() == {'insert': 'foo'}
assert Insert('foo', {}).as_data() == {'insert': 'foo'}
assert Insert('foo', {'bold': True}).as_data() == {
'insert': 'foo',
'attributes': {'bold': True}
}
assert Insert(1, {'img': 'image.jpg'}).as_data() == {
'insert': 1,
'attributes': {'img': 'image.jpg'}
}
def test_retain_as_data(self):
assert Retain(1, None).as_data() == {'retain': 1}
assert Retain(1, {}).as_data() == {'retain': 1}
assert Retain(1, {'bold': True}).as_data() == {
'retain': 1,
'attributes': {'bold': True}
}
def test_delete_as_data(self):
assert Delete(1).as_data() == {'delete': 1}
assert Delete(2).as_data() == {'delete': 2}
def test_delta_as_data(self):
delta = Delta(ops=[
Insert('abc', None),
Retain(1, {'color': 'red'}),
Delete(4),
Insert('def', {'bold': True}),
Retain(6, None)
])
expected = [
{'insert': 'abc'},
{'retain': 1, 'attributes': {'color': 'red'}},
{'delete': 4},
{'insert': 'def', 'attributes': {'bold': True}},
{'retain': 6}
]
self.assertListEqual(delta.as_data(), expected)
class TestAsStringJson(unittest.TestCase):
def test_insert_str_json(self):
assert str(Insert('foo', None)) == '{"insert": "foo"}'
assert str(Insert('foo', {})) == '{"insert": "foo"}'
assert str(Insert('foo', {
'bold': True})) == '{"insert": "foo", "attributes": {"bold": true}}'
def test_retain_str_json(self):
assert str(Retain('foo', None)) == '{"retain": "foo"}'
assert str(Retain('foo', {})) == '{"retain": "foo"}'
assert str(Retain('foo', {
'bold': True})) == '{"retain": "foo", "attributes": {"bold": true}}'
def test_delete_str_json(self):
assert str(Delete(1)) == '{"delete": 1}'
def test_delta_as_data(self):
delta = Delta(ops=[
Insert('abc', None),
Retain(1, {'color': 'red'}),
Delete(4),
Insert('def', {'bold': True}),
Retain(6, None)
])
expected = [
{'insert': 'abc'},
{'retain': 1, 'attributes': {'color': 'red'}},
{'delete': 4},
{'insert': 'def', 'attributes': {'bold': True}},
{'retain': 6}
]
self.assertListEqual(delta.as_data(), expected)
def test_delta_as_string(self):
delta = Delta(ops=[
Insert('abc', None),
Retain(1, {'color': 'red'}),
Delete(3),
])
assert str(delta) == ('[{"insert": "abc"}, '
'{"retain": 1, "attributes": {"color": "red"}}, '
'{"delete": 3}]')
| 32.064
| 80
| 0.498503
|
6caf9b66059aa4d9d963c3224053c8f23375751d
| 2,263
|
py
|
Python
|
densephrases/scripts/dump/save_meta.py
|
Pranav174/DensePhrases
|
0f3ad63107d7c09b81218b1e77dcc31325b61450
|
[
"Apache-2.0"
] | 331
|
2021-03-31T11:14:55.000Z
|
2022-03-28T04:18:52.000Z
|
densephrases/scripts/dump/save_meta.py
|
ss756/DensePhrases
|
5c5465100112ec5267a7ba06eea5765ea494b365
|
[
"Apache-2.0"
] | 17
|
2021-06-04T19:14:53.000Z
|
2021-12-22T06:10:15.000Z
|
densephrases/scripts/dump/save_meta.py
|
ss756/DensePhrases
|
5c5465100112ec5267a7ba06eea5765ea494b365
|
[
"Apache-2.0"
] | 40
|
2021-04-12T09:53:00.000Z
|
2022-03-23T10:56:54.000Z
|
import argparse
import os
import h5py
import torch
from tqdm import tqdm
def get_range(name):
# name = name.replace('_tfidf', '')
return list(map(int, os.path.splitext(name)[0].split('-')))
def find_name(names, pos):
for name in names:
start, end = get_range(name)
assert start != end, 'you have self-looping at %s' % name
if start == pos:
return name, end
raise Exception('hdf5 file starting with %d not found.')
def check_dump(args):
print('checking dir contiguity...')
names = os.listdir(args.dump_dir)
pos = args.start
while pos < args.end:
name, pos = find_name(names, pos)
assert pos == args.end, 'reached %d, which is different from the specified end %d' % (pos, args.end)
print('dir contiguity test passed!')
print('checking file corruption...')
pos = args.start
corrupted_paths = []
metadata = {}
keys_to_save = ['f2o_end', 'f2o_start', 'span_logits', 'start2end', 'word2char_end', 'word2char_start']
while pos < args.end:
name, pos = find_name(names, pos)
path = os.path.join(args.dump_dir, name)
try:
with h5py.File(path, 'r') as f:
print('checking %s...' % path)
for dk, group in tqdm(f.items()):
# keys = list(group.keys())
metadata[dk] = {save_key: group[save_key][:] for save_key in keys_to_save}
metadata[dk]['context'] = group.attrs['context']
metadata[dk]['title'] = group.attrs['title']
except Exception as e:
print(e)
print('%s corrupted!' % path)
corrupted_paths.append(path)
break
torch.save(metadata, 'tmp.bin')
if len(corrupted_paths) > 0:
print('following files are corrupted:')
for path in corrupted_paths:
print(path)
else:
print('file corruption test passed!')
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('dump_dir')
parser.add_argument('start', type=int)
parser.add_argument('end', type=int)
return parser.parse_args()
def main():
args = get_args()
check_dump(args)
if __name__ == '__main__':
main()
| 29.012821
| 107
| 0.590809
|
b38d553c850fdcbfc6ca9d112524269b92d0a8c0
| 4,689
|
py
|
Python
|
test/integration/025_duplicate_model_test/test_duplicate_model.py
|
tjengel/dbt
|
f985902a002fba36f6f709c6aacf9ae20778e58c
|
[
"Apache-2.0"
] | 1
|
2021-09-01T20:50:52.000Z
|
2021-09-01T20:50:52.000Z
|
test/integration/025_duplicate_model_test/test_duplicate_model.py
|
tjengel/dbt
|
f985902a002fba36f6f709c6aacf9ae20778e58c
|
[
"Apache-2.0"
] | 1
|
2019-10-28T15:33:04.000Z
|
2019-10-28T15:33:04.000Z
|
test/integration/025_duplicate_model_test/test_duplicate_model.py
|
tjengel/dbt
|
f985902a002fba36f6f709c6aacf9ae20778e58c
|
[
"Apache-2.0"
] | 2
|
2019-05-10T21:23:08.000Z
|
2021-06-09T01:28:37.000Z
|
from dbt.exceptions import CompilationException
from test.integration.base import DBTIntegrationTest, use_profile
class TestDuplicateModelEnabled(DBTIntegrationTest):
@property
def schema(self):
return "duplicate_model_025"
@property
def models(self):
return "models-1"
@property
def profile_config(self):
return {
"test": {
"outputs": {
"dev": {
"type": "postgres",
"threads": 1,
"host": self.database_host,
"port": 5432,
"user": "root",
"pass": "password",
"dbname": "dbt",
"schema": self.unique_schema()
},
},
"target": "dev"
}
}
@use_profile("postgres")
def test_postgres_duplicate_model_enabled(self):
message = "dbt found two resources with the name"
try:
self.run_dbt(["run"])
self.assertTrue(False, "dbt did not throw for duplicate models")
except CompilationException as e:
self.assertTrue(message in str(e), "dbt did not throw the correct error message")
class TestDuplicateModelDisabled(DBTIntegrationTest):
@property
def schema(self):
return "duplicate_model_025"
@property
def models(self):
return "models-2"
@property
def profile_config(self):
return {
"test": {
"outputs": {
"dev": {
"type": "postgres",
"threads": 1,
"host": self.database_host,
"port": 5432,
"user": "root",
"pass": "password",
"dbname": "dbt",
"schema": self.unique_schema()
},
},
"target": "dev"
}
}
@use_profile("postgres")
def test_postgres_duplicate_model_disabled(self):
try:
results = self.run_dbt(["run"])
except CompilationException:
self.fail(
"Compilation Exception raised on disabled model")
self.assertEqual(len(results), 1)
query = "select value from {schema}.model" \
.format(schema=self.unique_schema())
result = self.run_sql(query, fetch="one")[0]
assert result == 1
class TestDuplicateModelEnabledAcrossPackages(DBTIntegrationTest):
@property
def schema(self):
return "duplicate_model_025"
@property
def models(self):
return "models-3"
@property
def packages_config(self):
return {
"packages": [
{
'git': 'https://github.com/fishtown-analytics/dbt-integration-project',
'revision': 'master',
'warn-unpinned': False,
},
],
}
@use_profile("postgres")
def test_postgres_duplicate_model_enabled_across_packages(self):
self.run_dbt(["deps"])
message = "dbt found two resources with the name"
try:
self.run_dbt(["run"])
self.assertTrue(False, "dbt did not throw for duplicate models")
except CompilationException as e:
self.assertTrue(message in str(e), "dbt did not throw the correct error message")
class TestDuplicateModelDisabledAcrossPackages(DBTIntegrationTest):
def setUp(self):
DBTIntegrationTest.setUp(self)
self.run_sql_file("seed.sql")
@property
def schema(self):
return "duplicate_model_025"
@property
def models(self):
return "models-4"
@property
def packages_config(self):
return {
"packages": [
{
'git': 'https://github.com/fishtown-analytics/dbt-integration-project',
'revision': 'master',
'warn-unpinned': False,
},
],
}
@use_profile("postgres")
def test_postgres_duplicate_model_disabled_across_packages(self):
self.run_dbt(["deps"])
try:
self.run_dbt(["run"])
except CompilationException:
self.fail(
"Compilation Exception raised on disabled model")
query = "select 1 from {schema}.table_model" \
.format(schema=self.unique_schema())
result = self.run_sql(query, fetch="one")[0]
assert result == 1
| 29.30625
| 93
| 0.516528
|
561d183b6ec68826ce275c6fef47c16a07d7ed62
| 424
|
py
|
Python
|
leadmanager/accounts/urls.py
|
epoyepoy/lead-manager
|
db9bd76c3549a5f794af63b5583659d3d6e0e510
|
[
"MIT"
] | null | null | null |
leadmanager/accounts/urls.py
|
epoyepoy/lead-manager
|
db9bd76c3549a5f794af63b5583659d3d6e0e510
|
[
"MIT"
] | null | null | null |
leadmanager/accounts/urls.py
|
epoyepoy/lead-manager
|
db9bd76c3549a5f794af63b5583659d3d6e0e510
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from .api import RegisterAPI, LoginAPI, UserAPI
from knox import views as knox_views
urlpatterns = [
path('api/auth', include('knox.urls')),
path('api/auth/register', RegisterAPI.as_view()),
path('api/auth/login', LoginAPI.as_view()),
path('api/auth/user', UserAPI.as_view()),
path('api/auth/logout',
knox_views.LogoutView.as_view(), name='knox_logout')
]
| 32.615385
| 61
| 0.693396
|
e3c8a6dc433739546a19226d5e3070dddb2f26b1
| 19,697
|
py
|
Python
|
components/partition_table/gen_esp32part.py
|
ghsecuritylab/esp_alexa_idf
|
e132e8b960af6d576ce2fca4f6b2994be2f42e56
|
[
"Apache-2.0"
] | 24
|
2019-03-16T05:34:52.000Z
|
2020-06-20T08:08:37.000Z
|
components/partition_table/gen_esp32part.py
|
ghsecuritylab/esp_alexa_idf
|
e132e8b960af6d576ce2fca4f6b2994be2f42e56
|
[
"Apache-2.0"
] | 10
|
2021-03-09T15:34:26.000Z
|
2022-02-26T17:34:35.000Z
|
components/partition_table/gen_esp32part.py
|
ghsecuritylab/esp_alexa_idf
|
e132e8b960af6d576ce2fca4f6b2994be2f42e56
|
[
"Apache-2.0"
] | 16
|
2019-03-25T06:03:30.000Z
|
2020-07-18T23:44:28.000Z
|
#!/usr/bin/env python
#
# ESP32 partition table generation tool
#
# Converts partition tables to/from CSV and binary formats.
#
# See https://docs.espressif.com/projects/esp-idf/en/latest/api-guides/partition-tables.html
# for explanation of partition table structure and uses.
#
# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
from __future__ import unicode_literals
import argparse
import os
import re
import struct
import sys
import hashlib
import binascii
MAX_PARTITION_LENGTH = 0xC00 # 3K for partition data (96 entries) leaves 1K in a 4K sector for signature
MD5_PARTITION_BEGIN = b"\xEB\xEB" + b"\xFF" * 14 # The first 2 bytes are like magic numbers for MD5 sum
PARTITION_TABLE_SIZE = 0x1000 # Size of partition table
__version__ = '1.2'
APP_TYPE = 0x00
DATA_TYPE = 0x01
TYPES = {
"app" : APP_TYPE,
"data" : DATA_TYPE,
}
# Keep this map in sync with esp_partition_subtype_t enum in esp_partition.h
SUBTYPES = {
APP_TYPE : {
"factory" : 0x00,
"test" : 0x20,
},
DATA_TYPE : {
"ota" : 0x00,
"phy" : 0x01,
"nvs" : 0x02,
"coredump" : 0x03,
"nvs_keys" : 0x04,
"esphttpd" : 0x80,
"fat" : 0x81,
"spiffs" : 0x82,
},
}
quiet = False
md5sum = True
secure = False
offset_part_table = 0
def status(msg):
""" Print status message to stderr """
if not quiet:
critical(msg)
def critical(msg):
""" Print critical message to stderr """
sys.stderr.write(msg)
sys.stderr.write('\n')
class PartitionTable(list):
def __init__(self):
super(PartitionTable, self).__init__(self)
@classmethod
def from_csv(cls, csv_contents):
res = PartitionTable()
lines = csv_contents.splitlines()
def expand_vars(f):
f = os.path.expandvars(f)
m = re.match(r'(?<!\\)\$([A-Za-z_][A-Za-z0-9_]*)', f)
if m:
raise InputError("unknown variable '%s'" % m.group(1))
return f
for line_no in range(len(lines)):
line = expand_vars(lines[line_no]).strip()
if line.startswith("#") or len(line) == 0:
continue
try:
res.append(PartitionDefinition.from_csv(line, line_no+1))
except InputError as e:
raise InputError("Error at line %d: %s" % (line_no+1, e))
except Exception:
critical("Unexpected error parsing CSV line %d: %s" % (line_no+1, line))
raise
# fix up missing offsets & negative sizes
last_end = offset_part_table + PARTITION_TABLE_SIZE # first offset after partition table
for e in res:
if e.offset is not None and e.offset < last_end:
if e == res[0]:
raise InputError("CSV Error: First partition offset 0x%x overlaps end of partition table 0x%x"
% (e.offset, last_end))
else:
raise InputError("CSV Error: Partitions overlap. Partition at line %d sets offset 0x%x. Previous partition ends 0x%x"
% (e.line_no, e.offset, last_end))
if e.offset is None:
pad_to = 0x10000 if e.type == APP_TYPE else 4
if last_end % pad_to != 0:
last_end += pad_to - (last_end % pad_to)
e.offset = last_end
if e.size < 0:
e.size = -e.size - e.offset
last_end = e.offset + e.size
return res
def __getitem__(self, item):
""" Allow partition table access via name as well as by
numeric index. """
if isinstance(item, str):
for x in self:
if x.name == item:
return x
raise ValueError("No partition entry named '%s'" % item)
else:
return super(PartitionTable, self).__getitem__(item)
def find_by_type(self, ptype, subtype):
""" Return a partition by type & subtype, returns
None if not found """
# convert ptype & subtypes names (if supplied this way) to integer values
try:
ptype = TYPES[ptype]
except KeyError:
try:
ptypes = int(ptype, 0)
except TypeError:
pass
try:
subtype = SUBTYPES[int(ptype)][subtype]
except KeyError:
try:
ptypes = int(ptype, 0)
except TypeError:
pass
for p in self:
if p.type == ptype and p.subtype == subtype:
return p
return None
def find_by_name(self, name):
for p in self:
if p.name == name:
return p
return None
def verify(self):
# verify each partition individually
for p in self:
p.verify()
# check on duplicate name
names = [ p.name for p in self ]
duplicates = set( n for n in names if names.count(n) > 1 )
# print sorted duplicate partitions by name
if len(duplicates) != 0:
print("A list of partitions that have the same name:")
for p in sorted(self, key=lambda x:x.name):
if len(duplicates.intersection([p.name])) != 0:
print("%s" % (p.to_csv()))
raise InputError("Partition names must be unique")
# check for overlaps
last = None
for p in sorted(self, key=lambda x:x.offset):
if p.offset < offset_part_table + PARTITION_TABLE_SIZE:
raise InputError("Partition offset 0x%x is below 0x%x" % (p.offset, offset_part_table + PARTITION_TABLE_SIZE))
if last is not None and p.offset < last.offset + last.size:
raise InputError("Partition at 0x%x overlaps 0x%x-0x%x" % (p.offset, last.offset, last.offset+last.size-1))
last = p
def flash_size(self):
""" Return the size that partitions will occupy in flash
(ie the offset the last partition ends at)
"""
try:
last = sorted(self, reverse=True)[0]
except IndexError:
return 0 # empty table!
return last.offset + last.size
@classmethod
def from_binary(cls, b):
md5 = hashlib.md5();
result = cls()
for o in range(0,len(b),32):
data = b[o:o+32]
if len(data) != 32:
raise InputError("Partition table length must be a multiple of 32 bytes")
if data == b'\xFF'*32:
return result # got end marker
if md5sum and data[:2] == MD5_PARTITION_BEGIN[:2]: #check only the magic number part
if data[16:] == md5.digest():
continue # the next iteration will check for the end marker
else:
raise InputError("MD5 checksums don't match! (computed: 0x%s, parsed: 0x%s)" % (md5.hexdigest(), binascii.hexlify(data[16:])))
else:
md5.update(data)
result.append(PartitionDefinition.from_binary(data))
raise InputError("Partition table is missing an end-of-table marker")
def to_binary(self):
result = b"".join(e.to_binary() for e in self)
if md5sum:
result += MD5_PARTITION_BEGIN + hashlib.md5(result).digest()
if len(result )>= MAX_PARTITION_LENGTH:
raise InputError("Binary partition table length (%d) longer than max" % len(result))
result += b"\xFF" * (MAX_PARTITION_LENGTH - len(result)) # pad the sector, for signing
return result
def to_csv(self, simple_formatting=False):
rows = [ "# Espressif ESP32 Partition Table",
"# Name, Type, SubType, Offset, Size, Flags" ]
rows += [ x.to_csv(simple_formatting) for x in self ]
return "\n".join(rows) + "\n"
class PartitionDefinition(object):
MAGIC_BYTES = b"\xAA\x50"
ALIGNMENT = {
APP_TYPE : 0x10000,
DATA_TYPE : 0x04,
}
# dictionary maps flag name (as used in CSV flags list, property name)
# to bit set in flags words in binary format
FLAGS = {
"encrypted" : 0
}
# add subtypes for the 16 OTA slot values ("ota_XX, etc.")
for ota_slot in range(16):
SUBTYPES[TYPES["app"]]["ota_%d" % ota_slot] = 0x10 + ota_slot
def __init__(self):
self.name = ""
self.type = None
self.subtype = None
self.offset = None
self.size = None
self.encrypted = False
@classmethod
def from_csv(cls, line, line_no):
""" Parse a line from the CSV """
line_w_defaults = line + ",,,," # lazy way to support default fields
fields = [ f.strip() for f in line_w_defaults.split(",") ]
res = PartitionDefinition()
res.line_no = line_no
res.name = fields[0]
res.type = res.parse_type(fields[1])
res.subtype = res.parse_subtype(fields[2])
res.offset = res.parse_address(fields[3])
res.size = res.parse_address(fields[4])
if res.size is None:
raise InputError("Size field can't be empty")
flags = fields[5].split(":")
for flag in flags:
if flag in cls.FLAGS:
setattr(res, flag, True)
elif len(flag) > 0:
raise InputError("CSV flag column contains unknown flag '%s'" % (flag))
return res
def __eq__(self, other):
return self.name == other.name and self.type == other.type \
and self.subtype == other.subtype and self.offset == other.offset \
and self.size == other.size
def __repr__(self):
def maybe_hex(x):
return "0x%x" % x if x is not None else "None"
return "PartitionDefinition('%s', 0x%x, 0x%x, %s, %s)" % (self.name, self.type, self.subtype or 0,
maybe_hex(self.offset), maybe_hex(self.size))
def __str__(self):
return "Part '%s' %d/%d @ 0x%x size 0x%x" % (self.name, self.type, self.subtype, self.offset or -1, self.size or -1)
def __cmp__(self, other):
return self.offset - other.offset
def __lt__(self, other):
return self.offset < other.offset
def __gt__(self, other):
return self.offset > other.offset
def __le__(self, other):
return self.offset <= other.offset
def __ge__(self, other):
return self.offset >= other.offset
def parse_type(self, strval):
if strval == "":
raise InputError("Field 'type' can't be left empty.")
return parse_int(strval, TYPES)
def parse_subtype(self, strval):
if strval == "":
return 0 # default
return parse_int(strval, SUBTYPES.get(self.type, {}))
def parse_address(self, strval):
if strval == "":
return None # PartitionTable will fill in default
return parse_int(strval)
def verify(self):
if self.type is None:
raise ValidationError(self, "Type field is not set")
if self.subtype is None:
raise ValidationError(self, "Subtype field is not set")
if self.offset is None:
raise ValidationError(self, "Offset field is not set")
align = self.ALIGNMENT.get(self.type, 4)
if self.offset % align:
raise ValidationError(self, "Offset 0x%x is not aligned to 0x%x" % (self.offset, align))
if self.size % align and secure:
raise ValidationError(self, "Size 0x%x is not aligned to 0x%x" % (self.size, align))
if self.size is None:
raise ValidationError(self, "Size field is not set")
if self.name in TYPES and TYPES.get(self.name, "") != self.type:
critical("WARNING: Partition has name '%s' which is a partition type, but does not match this partition's type (0x%x). Mistake in partition table?" % (self.name, self.type))
all_subtype_names = []
for names in (t.keys() for t in SUBTYPES.values()):
all_subtype_names += names
if self.name in all_subtype_names and SUBTYPES.get(self.type, {}).get(self.name, "") != self.subtype:
critical("WARNING: Partition has name '%s' which is a partition subtype, but this partition has non-matching type 0x%x and subtype 0x%x. Mistake in partition table?" % (self.name, self.type, self.subtype))
STRUCT_FORMAT = b"<2sBBLL16sL"
@classmethod
def from_binary(cls, b):
if len(b) != 32:
raise InputError("Partition definition length must be exactly 32 bytes. Got %d bytes." % len(b))
res = cls()
(magic, res.type, res.subtype, res.offset,
res.size, res.name, flags) = struct.unpack(cls.STRUCT_FORMAT, b)
if b"\x00" in res.name: # strip null byte padding from name string
res.name = res.name[:res.name.index(b"\x00")]
res.name = res.name.decode()
if magic != cls.MAGIC_BYTES:
raise InputError("Invalid magic bytes (%r) for partition definition" % magic)
for flag,bit in cls.FLAGS.items():
if flags & (1<<bit):
setattr(res, flag, True)
flags &= ~(1<<bit)
if flags != 0:
critical("WARNING: Partition definition had unknown flag(s) 0x%08x. Newer binary format?" % flags)
return res
def get_flags_list(self):
return [ flag for flag in self.FLAGS.keys() if getattr(self, flag) ]
def to_binary(self):
flags = sum((1 << self.FLAGS[flag]) for flag in self.get_flags_list())
return struct.pack(self.STRUCT_FORMAT,
self.MAGIC_BYTES,
self.type, self.subtype,
self.offset, self.size,
self.name.encode(),
flags)
def to_csv(self, simple_formatting=False):
def addr_format(a, include_sizes):
if not simple_formatting and include_sizes:
for (val, suffix) in [ (0x100000, "M"), (0x400, "K") ]:
if a % val == 0:
return "%d%s" % (a // val, suffix)
return "0x%x" % a
def lookup_keyword(t, keywords):
for k,v in keywords.items():
if simple_formatting == False and t == v:
return k
return "%d" % t
def generate_text_flags():
""" colon-delimited list of flags """
return ":".join(self.get_flags_list())
return ",".join([ self.name,
lookup_keyword(self.type, TYPES),
lookup_keyword(self.subtype, SUBTYPES.get(self.type, {})),
addr_format(self.offset, False),
addr_format(self.size, True),
generate_text_flags()])
def parse_int(v, keywords={}):
"""Generic parser for integer fields - int(x,0) with provision for
k/m/K/M suffixes and 'keyword' value lookup.
"""
try:
for letter, multiplier in [ ("k",1024), ("m",1024*1024) ]:
if v.lower().endswith(letter):
return parse_int(v[:-1], keywords) * multiplier
return int(v, 0)
except ValueError:
if len(keywords) == 0:
raise InputError("Invalid field value %s" % v)
try:
return keywords[v.lower()]
except KeyError:
raise InputError("Value '%s' is not valid. Known keywords: %s" % (v, ", ".join(keywords)))
def main():
global quiet
global md5sum
global offset_part_table
global secure
parser = argparse.ArgumentParser(description='ESP32 partition table utility')
parser.add_argument('--flash-size', help='Optional flash size limit, checks partition table fits in flash',
nargs='?', choices=[ '1MB', '2MB', '4MB', '8MB', '16MB' ])
parser.add_argument('--disable-md5sum', help='Disable md5 checksum for the partition table', default=False, action='store_true')
parser.add_argument('--no-verify', help="Don't verify partition table fields", action='store_true')
parser.add_argument('--verify', '-v', help="Verify partition table fields (deprecated, this behaviour is enabled by default and this flag does nothing.", action='store_true')
parser.add_argument('--quiet', '-q', help="Don't print non-critical status messages to stderr", action='store_true')
parser.add_argument('--offset', '-o', help='Set offset partition table', default='0x8000')
parser.add_argument('--secure', help="Require app partitions to be suitable for secure boot", action='store_true')
parser.add_argument('input', help='Path to CSV or binary file to parse.', type=argparse.FileType('rb'))
parser.add_argument('output', help='Path to output converted binary or CSV file. Will use stdout if omitted.',
nargs='?', default='-')
args = parser.parse_args()
quiet = args.quiet
md5sum = not args.disable_md5sum
secure = args.secure
offset_part_table = int(args.offset, 0)
input = args.input.read()
input_is_binary = input[0:2] == PartitionDefinition.MAGIC_BYTES
if input_is_binary:
status("Parsing binary partition input...")
table = PartitionTable.from_binary(input)
else:
input = input.decode()
status("Parsing CSV input...")
table = PartitionTable.from_csv(input)
if not args.no_verify:
status("Verifying table...")
table.verify()
if args.flash_size:
size_mb = int(args.flash_size.replace("MB", ""))
size = size_mb * 1024 * 1024 # flash memory uses honest megabytes!
table_size = table.flash_size()
if size < table_size:
raise InputError("Partitions defined in '%s' occupy %.1fMB of flash (%d bytes) which does not fit in configured flash size %dMB. Change the flash size in menuconfig under the 'Serial Flasher Config' menu." %
(args.input.name, table_size / 1024.0 / 1024.0, table_size, size_mb))
if input_is_binary:
output = table.to_csv()
with sys.stdout if args.output == '-' else open(args.output, 'w') as f:
f.write(output)
else:
output = table.to_binary()
try:
stdout_binary = sys.stdout.buffer # Python 3
except AttributeError:
stdout_binary = sys.stdout
with stdout_binary if args.output == '-' else open(args.output, 'wb') as f:
f.write(output)
class InputError(RuntimeError):
def __init__(self, e):
super(InputError, self).__init__(e)
class ValidationError(InputError):
def __init__(self, partition, message):
super(ValidationError, self).__init__(
"Partition %s invalid: %s" % (partition.name, message))
if __name__ == '__main__':
try:
main()
except InputError as e:
print(e, file=sys.stderr)
sys.exit(2)
| 38.321012
| 219
| 0.584861
|
605ede8a124cbf08f682eab9a5bb1a6b0c8e5d38
| 13,600
|
py
|
Python
|
healthcare/api-client/fhir/fhir_resources_test.py
|
apecr/python-docs-samples
|
26b581bb6ce148e13a9c7f2cd801f138b8aa8412
|
[
"Apache-2.0"
] | 1
|
2020-06-04T16:50:49.000Z
|
2020-06-04T16:50:49.000Z
|
healthcare/api-client/fhir/fhir_resources_test.py
|
apecr/python-docs-samples
|
26b581bb6ce148e13a9c7f2cd801f138b8aa8412
|
[
"Apache-2.0"
] | null | null | null |
healthcare/api-client/fhir/fhir_resources_test.py
|
apecr/python-docs-samples
|
26b581bb6ce148e13a9c7f2cd801f138b8aa8412
|
[
"Apache-2.0"
] | 1
|
2020-05-29T20:33:18.000Z
|
2020-05-29T20:33:18.000Z
|
# Copyright 2018 Google LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import uuid
import backoff
import pytest
from requests.exceptions import HTTPError
# Add datasets for bootstrapping datasets for testing
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "datasets")) # noqa
import datasets # noqa
import fhir_stores # noqa
import fhir_resources # noqa
cloud_region = "us-central1"
base_url = "https://healthcare.googleapis.com/v1beta1"
project_id = os.environ["GCLOUD_PROJECT"]
service_account_json = os.environ["GOOGLE_APPLICATION_CREDENTIALS"]
bundle = os.path.join(os.path.dirname(__file__), "resources/execute_bundle.json")
dataset_id = "test_dataset_{}".format(uuid.uuid4())
fhir_store_id = "test_fhir_store-{}".format(uuid.uuid4())
resource_type = "Patient"
# A giveup callback for backoff.
def fatal_code(e):
return 400 <= e.response.status_code < 500
@pytest.fixture(scope="module")
def test_dataset():
dataset = datasets.create_dataset(
service_account_json, project_id, cloud_region, dataset_id
)
yield dataset
# Clean up
datasets.delete_dataset(service_account_json, project_id, cloud_region, dataset_id)
@pytest.fixture(scope="module")
def test_fhir_store():
fhir_store = fhir_stores.create_fhir_store(
service_account_json, project_id, cloud_region, dataset_id, fhir_store_id
)
yield fhir_store
# Clean up
fhir_stores.delete_fhir_store(
service_account_json, project_id, cloud_region, dataset_id, fhir_store_id
)
# Fixture that creates/deletes a Patient resource for various tests.
@pytest.fixture(scope="module")
def test_patient():
patient_response = fhir_resources.create_patient(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
)
patient_resource_id = patient_response.json()["id"]
yield patient_resource_id
# Clean up
fhir_resources.delete_resource(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
resource_type,
patient_resource_id,
)
def test_create_patient(test_dataset, test_fhir_store, capsys):
# Manually create a new Patient here to test that creating a Patient
# works.
fhir_resources.create_patient(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
)
out, _ = capsys.readouterr()
print(out)
assert "Created Patient" in out
def test_get_patient(test_dataset, test_fhir_store, test_patient, capsys):
fhir_resources.get_resource(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
resource_type,
test_patient
)
out, _ = capsys.readouterr()
print(out)
assert "Got Patient resource" in out
@pytest.mark.skip(reason='flaky test sometimes returns 403 errors, need to investigate')
def test_conditional_patch_resource(test_dataset, test_fhir_store, test_patient, capsys):
# The conditional method tests use an Observation, so we have to create an
# Encounter from test_patient and then create an Observation from the
# Encounter.
encounter_response = fhir_resources.create_encounter(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
test_patient,
)
encounter_resource_id = encounter_response.json()["id"]
observation_response = fhir_resources.create_observation(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
test_patient,
encounter_resource_id,
)
observation_resource_id = observation_response.json()["id"]
fhir_resources.conditional_patch_resource(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
)
# In accordance with the FHIR spec, if conditional patch or conditional update
# can only be applied to one resource at a time. If the search criteria
# identify more than one match, the request returns a 412 Precondition Failed
# error. Every time the tests create an Observation resource, the resource is
# identical, therefore you have to delete each Observation after it's created
# or else conditional patch/update will detect more than one Observation
# that matches.
fhir_resources.delete_resource(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
'Observation',
observation_resource_id
)
out, _ = capsys.readouterr()
print(out)
assert "Conditionally patched" in out
@pytest.mark.skip(reason='flaky test sometimes returns 412 errors, need to investigate')
def test_conditional_update_resource(test_dataset, test_fhir_store, test_patient, capsys):
# The conditional method tests use an Observation, so we have to create an
# Encounter from test_patient and then create an Observation from the
# Encounter.
encounter_response = fhir_resources.create_encounter(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
test_patient,
)
encounter_resource_id = encounter_response.json()["id"]
observation_response = fhir_resources.create_observation(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
test_patient,
encounter_resource_id,
)
observation_resource_id = observation_response.json()["id"]
fhir_resources.conditional_update_resource(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
test_patient,
encounter_resource_id,
)
# In accordance with the FHIR spec, if conditional patch or conditional update
# can only be applied to one resource at a time. If the search criteria
# identify more than one match, the request returns a 412 Precondition Failed
# error. Every time the tests create an Observation resource, the resource is
# identical, therefore you have to delete each Observation after it's created
# or else conditional patch/update will detect more than one Observation
# that matches.
fhir_resources.delete_resource(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
'Observation',
observation_resource_id
)
out, _ = capsys.readouterr()
assert "Conditionally updated" in out
def test_conditional_delete_resource(test_dataset, test_fhir_store, test_patient, capsys):
# The conditional method tests use an Observation, so we have to create an
# Encounter from test_patient and then create an Observation from the
# Encounter.
@backoff.on_exception(
backoff.expo, HTTPError, max_time=60, giveup=fatal_code
)
def create_encounter():
encounter_response = fhir_resources.create_encounter(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
test_patient,
)
return encounter_response.json()["id"]
encounter_resource_id = create_encounter()
@backoff.on_exception(
backoff.expo, HTTPError, max_time=60, giveup=fatal_code
)
def create_observation():
fhir_resources.create_observation(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
test_patient,
encounter_resource_id,
)
create_observation()
@backoff.on_exception(
backoff.expo, HTTPError, max_time=60, giveup=fatal_code
)
def conditional_delete_resource():
fhir_resources.conditional_delete_resource(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
)
conditional_delete_resource()
out, _ = capsys.readouterr()
print(out)
assert "Conditionally deleted" in out
def test_update_patient(test_dataset, test_fhir_store, test_patient, capsys):
fhir_resources.update_resource(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
resource_type,
test_patient
)
out, _ = capsys.readouterr()
print(out)
assert "Updated Patient resource" in out
def test_patch_patient(test_dataset, test_fhir_store, test_patient, capsys):
fhir_resources.patch_resource(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
resource_type,
test_patient
)
out, _ = capsys.readouterr()
print(out)
assert "Patched Patient resource" in out
def test_resource_versions(test_dataset, test_fhir_store, test_patient, capsys):
# We have to update the resource so that different versions of it are
# created, then we test to see if we can get/delete those versions.
fhir_resources.update_resource(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
resource_type,
test_patient,
)
history = fhir_resources.list_resource_history(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
resource_type,
test_patient,
)
fhir_resources.get_resource_history(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
resource_type,
test_patient,
history["entry"][-1]["resource"]["meta"]["versionId"],
)
fhir_resources.delete_resource_purge(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
resource_type,
test_patient,
)
out, _ = capsys.readouterr()
print(out)
# list_resource_history test
assert "History for Patient resource" in out
# get_resource_history test
assert "Got history for Patient resource" in out
# delete_resource_purge test
assert "Deleted versions of Patient resource" in out
def test_search_resources_get(test_dataset, test_fhir_store, test_patient, capsys):
fhir_resources.search_resources_get(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
resource_type,
)
out, _ = capsys.readouterr()
assert "Using GET request" in out
def test_search_resources_post(test_dataset, test_fhir_store, test_patient, capsys):
fhir_resources.search_resources_post(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
)
out, _ = capsys.readouterr()
assert "Using POST request" in out
def test_get_patient_everything(test_dataset, test_fhir_store, test_patient, capsys):
fhir_resources.get_patient_everything(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
test_patient,
)
out, _ = capsys.readouterr()
# get_patient_everything test
assert "id" in out
def test_get_metadata(test_dataset, test_fhir_store, capsys):
fhir_resources.get_metadata(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
)
out, _ = capsys.readouterr()
# get_metadata test
assert "fhirVersion" in out
def test_delete_patient(test_dataset, test_fhir_store, test_patient, capsys):
fhir_resources.delete_resource(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
resource_type,
test_patient
)
out, _ = capsys.readouterr()
print(out)
assert "Deleted Patient resource" in out
def test_execute_bundle(test_dataset, test_fhir_store, capsys):
fhir_resources.execute_bundle(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
bundle,
)
out, _ = capsys.readouterr()
# execute_bundle test
assert "Executed bundle" in out
| 25.954198
| 90
| 0.671838
|
bceefc277ee44ab6c4f5543ed2fbaec962996d98
| 7,842
|
py
|
Python
|
contrib/bitrpc/bitrpc.py
|
bitmxittz/Bitmxittz2
|
f4fdd17c09fdee79feea729bb81b3b99862423f7
|
[
"MIT"
] | 1
|
2017-12-22T15:32:01.000Z
|
2017-12-22T15:32:01.000Z
|
contrib/bitrpc/bitrpc.py
|
bitmxittz/Bitmxittz2
|
f4fdd17c09fdee79feea729bb81b3b99862423f7
|
[
"MIT"
] | null | null | null |
contrib/bitrpc/bitrpc.py
|
bitmxittz/Bitmxittz2
|
f4fdd17c09fdee79feea729bb81b3b99862423f7
|
[
"MIT"
] | 1
|
2019-05-30T14:10:31.000Z
|
2019-05-30T14:10:31.000Z
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:14432")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:14432")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitmxittz address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitmxittz address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| 24.129231
| 79
| 0.668452
|
d9ad0fb8916c59d43659f4efd575cefba82bbe23
| 3,928
|
py
|
Python
|
lib/spack/spack/cmd/dev_build.py
|
carlabguillen/spack
|
7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2020-05-24T15:23:12.000Z
|
2020-05-24T15:23:12.000Z
|
lib/spack/spack/cmd/dev_build.py
|
carlabguillen/spack
|
7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 6
|
2022-02-26T11:44:34.000Z
|
2022-03-12T12:14:50.000Z
|
lib/spack/spack/cmd/dev_build.py
|
carlabguillen/spack
|
7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2020-09-15T02:37:59.000Z
|
2020-09-21T04:34:38.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
import os
import llnl.util.tty as tty
import spack.config
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.repo
from spack.stage import DIYStage
description = "developer build: build from code in current working directory"
section = "build"
level = "long"
def setup_parser(subparser):
arguments.add_common_arguments(subparser, ['jobs'])
subparser.add_argument(
'-d', '--source-path', dest='source_path', default=None,
help="path to source directory. defaults to the current directory")
subparser.add_argument(
'-i', '--ignore-dependencies', action='store_true', dest='ignore_deps',
help="don't try to install dependencies of requested packages")
arguments.add_common_arguments(subparser, ['no_checksum'])
subparser.add_argument(
'--keep-prefix', action='store_true',
help="do not remove the install prefix if installation fails")
subparser.add_argument(
'--skip-patch', action='store_true',
help="skip patching for the developer build")
subparser.add_argument(
'-q', '--quiet', action='store_true', dest='quiet',
help="do not display verbose build output while installing")
subparser.add_argument(
'--drop-in', type=str, dest='shell', default=None,
help="drop into a build environment in a new shell, e.g. bash, zsh")
arguments.add_common_arguments(subparser, ['spec'])
stop_group = subparser.add_mutually_exclusive_group()
stop_group.add_argument(
'-b', '--before', type=str, dest='before', default=None,
help="phase to stop before when installing (default None)")
stop_group.add_argument(
'-u', '--until', type=str, dest='until', default=None,
help="phase to stop after when installing (default None)")
cd_group = subparser.add_mutually_exclusive_group()
arguments.add_common_arguments(cd_group, ['clean', 'dirty'])
def dev_build(self, args):
if not args.spec:
tty.die("spack dev-build requires a package spec argument.")
specs = spack.cmd.parse_specs(args.spec)
if len(specs) > 1:
tty.die("spack dev-build only takes one spec.")
spec = specs[0]
if not spack.repo.path.exists(spec.name):
tty.die("No package for '{0}' was found.".format(spec.name),
" Use `spack create` to create a new package")
if not spec.versions.concrete:
tty.die(
"spack dev-build spec must have a single, concrete version. "
"Did you forget a package version number?")
spec.concretize()
package = spack.repo.get(spec)
if package.installed:
tty.error("Already installed in %s" % package.prefix)
tty.msg("Uninstall or try adding a version suffix for this dev build.")
sys.exit(1)
source_path = args.source_path
if source_path is None:
source_path = os.getcwd()
source_path = os.path.abspath(source_path)
# Forces the build to run out of the current directory.
package.stage = DIYStage(source_path)
# disable checksumming if requested
if args.no_checksum:
spack.config.set('config:checksum', False, scope='command_line')
package.do_install(
make_jobs=args.jobs,
keep_prefix=args.keep_prefix,
install_deps=not args.ignore_deps,
verbose=not args.quiet,
keep_stage=True, # don't remove source dir for dev build.
dirty=args.dirty,
stop_before=args.before,
stop_at=args.until)
# drop into the build environment of the package?
if args.shell is not None:
spack.build_environment.setup_package(package, dirty=False)
os.execvp(args.shell, [args.shell])
| 36.036697
| 79
| 0.677444
|
27d6c596957bd137565ac19d5744d635afb3b8a6
| 1,841
|
py
|
Python
|
biodata/api/mutations.py
|
znatty22/biodataservice
|
a3eeb137d2e727a0fc58437b185f2637bc4665ed
|
[
"Apache-2.0"
] | null | null | null |
biodata/api/mutations.py
|
znatty22/biodataservice
|
a3eeb137d2e727a0fc58437b185f2637bc4665ed
|
[
"Apache-2.0"
] | null | null | null |
biodata/api/mutations.py
|
znatty22/biodataservice
|
a3eeb137d2e727a0fc58437b185f2637bc4665ed
|
[
"Apache-2.0"
] | null | null | null |
import graphene
from biodata.api import models as m
from biodata.api import objects as o
class CreateStudyMutation(graphene.Mutation):
class Arguments:
short_name = graphene.String(required=True)
name = graphene.String(required=False)
study = graphene.Field(o.StudyType)
def mutate(self, info, short_name, name=None):
study = m.Study(
short_name=short_name, name=name
)
study.save()
return CreateStudyMutation(study=study)
class CreateParticipantMutation(graphene.Mutation):
class Arguments:
study = graphene.String(required=True)
gender = graphene.String(required=True)
race = graphene.String(required=False)
ethnicity = graphene.String(required=False)
participant = graphene.Field(o.ParticipantType)
def mutate(self, info, study, gender, race=None, ethnicity=None):
participant = m.Participant(
gender=gender, race=race, ethnicity=ethnicity, study_id=study
)
participant.save()
return CreateParticipantMutation(participant=participant)
class CreateBiospecimenMutation(graphene.Mutation):
class Arguments:
participant = graphene.String(required=True)
analyte_type = graphene.String(required=False)
biospecimen = graphene.Field(o.BiospecimenType)
def mutate(self, info, participant, analyte_type=None):
biospecimen = m.Biospecimen(
analyte_type=analyte_type, participant_id=participant
)
biospecimen.save()
return CreateBiospecimenMutation(biospecimen=biospecimen)
class Mutation(graphene.ObjectType):
create_participant_mutation = CreateParticipantMutation.Field()
create_study_mutation = CreateStudyMutation.Field()
create_biospecimen_mutation = CreateBiospecimenMutation.Field()
| 31.741379
| 73
| 0.712113
|
24622a9ce68a8b80af701be571d0cfdc8b85652f
| 1,412
|
py
|
Python
|
auth0/v3/management/users_by_email.py
|
bumplzz69/auth0-python
|
763f331f66eab465e2ef6654a74b48d1f4b47d9a
|
[
"MIT"
] | null | null | null |
auth0/v3/management/users_by_email.py
|
bumplzz69/auth0-python
|
763f331f66eab465e2ef6654a74b48d1f4b47d9a
|
[
"MIT"
] | null | null | null |
auth0/v3/management/users_by_email.py
|
bumplzz69/auth0-python
|
763f331f66eab465e2ef6654a74b48d1f4b47d9a
|
[
"MIT"
] | null | null | null |
from .rest import RestClient
class UsersByEmail(object):
"""Auth0 users by email endpoints
Args:
domain (str): Your Auth0 domain, e.g: 'username.auth0.com'
token (str): Management API v2 Token
telemetry (bool, optional): Enable or disable Telemetry
(defaults to True)
"""
def __init__(self, domain, token, telemetry=True):
self.domain = domain
self.client = RestClient(jwt=token, telemetry=telemetry)
def _url(self):
url = 'https://%s/api/v2/users-by-email' % self.domain
return url
def search_users_by_email(self, email, fields=None, include_fields=True):
"""List or search users.
Args:
email: Email to search
fields (list of str, optional): A list of fields to include or
exclude from the result (depending on include_fields). Empty to
retrieve all fields.
include_fields (bool, optional): True if the fields specified are
to be include in the result, False otherwise.
See: https://auth0.com/docs/api/management/v2#!/Users_By_Email/get_users_by_email
"""
params = {
'email': email,
'fields': fields and ','.join(fields) or None,
'include_fields': str(include_fields).lower()
}
return self.client.get(self._url(), params=params)
| 30.042553
| 89
| 0.606941
|
f61755ac3f24324fba205e2759ca0c1bf8bd6dbc
| 3,059
|
py
|
Python
|
rule11_interruption.py
|
e96031413/proofread
|
f4fc798d3b57c6f7a55f4e18de68f2272eb9fc44
|
[
"MIT"
] | null | null | null |
rule11_interruption.py
|
e96031413/proofread
|
f4fc798d3b57c6f7a55f4e18de68f2272eb9fc44
|
[
"MIT"
] | null | null | null |
rule11_interruption.py
|
e96031413/proofread
|
f4fc798d3b57c6f7a55f4e18de68f2272eb9fc44
|
[
"MIT"
] | null | null | null |
import os
from color_print import *
from p_arser import nlp
from p_arser import sentencizer
principle = color_magenta('''Writing Principle 11:
Avoid interruptions between subject and verb
and between verb and object.
''')
example = color_cyan('''
Bad: We conclude, based on very simplified models of solar variability,
that soloar variability is insignificant.
Good: We conclude that soloar variability is insignificant
based on very simplified models of solar variability,
Good: Based on very simplified models of solar variability,
we conclude that soloar variability is insignificant.
''')
def get_dependence(sentence, dep=['ROOT'], parser=None):
'''dependence analysis to find subjects, verb, objects'''
parsed_ex = parser(sentence)
for idx, token in enumerate(parsed_ex):
if token.dep_ in dep:
return idx, token, parsed_ex
return None, None, parsed_ex
def report_subject_interruption(sentence, max_len=10):
'''report the long subject for one sentence '''
idx_s, subj, parsed_ex = get_dependence(sentence, ['nsubj', 'csubj'], nlp)
idx_v, verb, parsed_ex = get_dependence(sentence, ['ROOT'], nlp)
idx_o, obj, parsed_ex = get_dependence(sentence, ['obj'], nlp)
if (idx_s is None or idx_v is None): return None
interruption_distance = abs(idx_v - idx_s)
if interruption_distance > max_len:
n_coma = 0
interrupt_start= None
interrupt_end = None
suggestion = []
for ii, token in enumerate(parsed_ex):
if token.text == ',':
n_coma += 1
if interrupt_start == None:
interrupt_start = ii
else:
interrupt_end = ii
if interrupt_start is None or interrupt_end is None or \
interrupt_start < idx_s or interrupt_end > idx_v:
return ""
for ii, token in enumerate(parsed_ex):
if ii < interrupt_start:
suggestion.append(token.text)
elif ii < interrupt_end:
suggestion.append(color_red(token.text))
else:
suggestion.append(token.text)
return ' '.join(suggestion)
def report_interruptions(file_path, max_len=25):
'''report long subjects for all sentences in the paper'''
sentences = sentencizer(file_path)
suggestions = []
for sentence in sentences:
sug = report_subject_interruption(sentence.text, max_len=max_len)
if sug: suggestions.append(sug)
if len(suggestions):
print("Report interruptions for ", color_cyan(file_path.split('/')[-1]))
for sug in suggestions:
print(sug)
print(color_blue("******************************************************"))
if __name__ == "__main__":
exmp = "We conclude, based on very simplified models of solar variability, that soloar variability is insignificant."
report_subject_interruption(exmp, max_len=5)
| 35.16092
| 122
| 0.62831
|
7703321a0cbd9a9c4496dcab5366c448bac6f2eb
| 9,352
|
py
|
Python
|
data_generation/lumencor.py
|
AndrewGYork/remote_refocus
|
5be485e13df4b9caf71941e8e937acaf6b959348
|
[
"CC-BY-4.0"
] | 16
|
2018-01-11T22:31:05.000Z
|
2019-07-06T12:06:37.000Z
|
data_generation/lumencor.py
|
calico/remote_refocus
|
7c62907eed8f27afbfe3ef07d62c7a5ea9d3c7ac
|
[
"CC-BY-4.0"
] | null | null | null |
data_generation/lumencor.py
|
calico/remote_refocus
|
7c62907eed8f27afbfe3ef07d62c7a5ea9d3c7ac
|
[
"CC-BY-4.0"
] | 2
|
2018-01-11T20:03:51.000Z
|
2019-02-09T20:36:28.000Z
|
import serial
class SpectraX:
"""Controls Lumencor SpectraX through a serial port."""
def __init__(self, which_port, verbose=True):
"""Constructor for SpectraX class.
Arguments:
which_port -- Specify which serial port the SpectraX is connected to
e.g. 'COM6' (on Windows).
Keyword arguments:
verbose -- True | False. Specify if print statements will be
executed. This is useful for debug purposes.
"""
self.verbose = verbose
self.port = serial.Serial(port=which_port, baudrate=9600, timeout=.25)
## Set up initial states
self.led_intensities = {'red':0, 'green':0, 'cyan':0, 'uv':0, 'blue':0,
'teal':0}
self.led_states = {'red':False, 'green':False, 'cyan':False, 'uv':False,
'blue':False, 'teal':False, 'yellow_filter':False}
if self.verbose: print("SpectraX initializing...")
## Write initial 'must be issued' commands to the port
self._send(b'\x57\x02\xff\x50')
self._send(b'\x57\x03\xAB\x50')
## Set initial led intensities to 0 and ensure SpextraX is responding
self.set_intensity(blocking=False, **self.led_intensities)
self._force_response()
if self.verbose: print(" SpectraX is ready!")
def _send(self, cmd, expected_response=0):
"""Send bytes to the SpectraX serial port.
Arguments:
cmd -- bytes to write to the serial port
Keyword Arguments:
expected_response -- number of bytes expected for a response
defaults to 0 (no response)
Returns:
None if expected_reponse == 0
bytes if expected_response > 0
"""
assert type(cmd) is bytes
assert expected_response >= 0
assert int(expected_response) == expected_response
if self.verbose:
print(' Sending command to SpectraX:',
' '.join('%02x'%i for i in cmd))
self.port.write(cmd)
if expected_response > 0:
response = self.port.read(expected_response)
if self.verbose:
print(' Reading response from SpectraX:',
' '.join('%02x'%i for i in response))
return response
else:
return None
def get_temperature(self):
"""Read the temperature of the SpectraX unit.
Arguments:
None
Returns:
The current temperature of the SpectraX unit in degrees C
"""
if self.verbose: print("Checking SpectraX temperature...")
response = self._send(b'\x53\x91\x02\x50', expected_response=2)
temperature = 0.125 * (int.from_bytes(response, byteorder='big') >> 5)
if self.verbose: print("SpectraX is %s degrees" % temperature)
return temperature
def _force_response(self):
"""Force a response from the SpectraX unit.
The serial port protocol for interacting with the SpectraX unit
does not provide a response upon receipt of command. This
function uses the 'check temperature' serial command and will
block until a response is received.
If no response is received, this will result in an AssertionError.
The logic of calling this function is that if the unit is able to
repond to this command, it likely has executed all previous commands.
Arguments:
None
Returns:
None
"""
if self.verbose: print("Checking SpectraX for responsiveness...")
response = self._send(b'\x53\x91\x02\x50', expected_response=2)
assert len(response) == 2
if self.verbose: print (" ... ok to proceed")
def _intensity_setter(self, led, intensity, blocking = True):
"""Set intensity of specific led.
Arguments:
led -- 'blue' | 'teal' | 'uv' | 'cyan' | 'green' | 'red'. Specify
which led is to be set. Providing a value not listed above
will result in a KeyError.
intensity -- int between 0 and 255. Specify the intensity of this
led. This value has not been tested for linearity.
Providing a value outside of allowable range will
result in AssertionError.
Keyword Arguments:
blocking -- True | False. Set if this command will call
_force_response. If true, this command should block
until a response is received.
"""
assert 0 <= intensity <= 255
color = led.lower()
if self.verbose:
print("Setting SpectraX %s intensity to %i / 255" % (color,
intensity))
self._send(({
'blue': b'\x53\x1a\x03\x01',
'teal': b'\x53\x1a\x03\x02',
'uv': b'\x53\x18\x03\x01',
'cyan': b'\x53\x18\x03\x02',
'green':b'\x53\x18\x03\x04',
'red': b'\x53\x18\x03\x08'}[color]+
(((4095-intensity) << 12)+80).to_bytes(3,byteorder='big')))
if blocking:
self._force_response()
def set_intensity(self, red=None, green=None, cyan=None, uv=None, blue=None,
teal=None, blocking=True):
"""Set intensity of leds in SpectraX.
This will sequencially set the intensity for each led where the value
is not None. There is a way in the serial port protocol to set the same
intensity to multiple leds with a single command but that is ignored
here.
Keyword Arguments:
red -- int [0..255].
green -- int [0..255].
cyan -- int [0..255].
uv -- int [0..255].
blue -- int [0..255].
teal -- int [0..255].
blocking -- True | False. Check for the unit's ability to respond
after setting all intensities.
"""
intensities = locals()
intensities.pop('self')
intensities.pop('blocking')
for color , intensity in intensities.items():
if intensity is not None:
self._intensity_setter(color, intensity, blocking=blocking)
def _state_cmd_generator(self):
"""Formats bytes to write to serial port to specifiy led states.
This function was written to clarify the syntax of set_led_state
function.
Returns:
bytes -- in a format to be written to serial port for setting led
enabled/disabled states.
"""
states_byte = (127
-self.led_states['red']*1
-self.led_states['green']*2
-self.led_states['cyan']*4
-self.led_states['uv']*8
-self.led_states['yellow_filter']*16
-self.led_states['blue']*32
-self.led_states['teal']*64)
return b'\x4f'+states_byte.to_bytes(1,byteorder='big')+b'\x50'
def set_led_state(self, red=None, green=None, cyan=None, uv=None,
blue=None, teal=None, yellow_filter=None, blocking=True):
"""Enables/disables specific leds of the SpectraX.
Enabling 'yellow_filter' will shift the spectra of the light from the
green led.
Keyword Arguments:
red -- True | False. True for "ON". False for "OFF".
green -- True | False. True for "ON". False for "OFF".
cyan -- True | False. True for "ON". False for "OFF".
uv -- True | False. True for "ON". False for "OFF".
blue -- True | False. True for "ON". False for "OFF".
teal -- True | False. True for "ON". False for "OFF".
yellow_filter -- True | False. True for "IN". False for "OUT".
"""
states_to_set = locals()
states_to_set.pop('self')
states_to_set.pop('blocking')
## Default to previous known states if no new state is provided
for led in states_to_set:
if states_to_set[led] is not None:
self.led_states[led] = states_to_set[led]
if self.verbose:
print('Setting SpectraX LED states:')
for led in self.led_states:
print(' %13s: %s' % (led, self.led_states[led]))
self._send(self._state_cmd_generator())
if blocking:
self._force_response()
def close(self):
self.set_led_state(red=False, green=False, cyan=False, uv=False,
blue=False, teal=False)
self.set_intensity(red=0, green=0, cyan=0, uv=0, blue=0, teal=0)
self.port.close()
if __name__ == '__main__':
import time
spectrax = SpectraX('COM8', verbose=False)
print("Done initializing...")
spectrax.set_intensity(red=255,
blocking=True)
spectrax.set_led_state(red=True, blocking=False)
time.sleep(1)
spectrax.set_led_state(red=False, blocking=True)
| 41.93722
| 80
| 0.553785
|
b9e1f10935fd4f70163f71e81bd257bc2d6d1bba
| 1,788
|
py
|
Python
|
sdk/ml/azure-ai-ml/tests/test_configs/batch_setup/light_gbm_examples/python-guide/plot_example.py
|
dubiety/azure-sdk-for-python
|
62ffa839f5d753594cf0fe63668f454a9d87a346
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/ml/azure-ai-ml/tests/test_configs/batch_setup/light_gbm_examples/python-guide/plot_example.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
sdk/ml/azure-ai-ml/tests/test_configs/batch_setup/light_gbm_examples/python-guide/plot_example.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import lightgbm as lgb
import pandas as pd
if lgb.compat.MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
else:
raise ImportError("You need to install matplotlib for plot_example.py.")
print("Loading data...")
# load or create your dataset
df_train = pd.read_csv("../regression/regression.train", header=None, sep="\t")
df_test = pd.read_csv("../regression/regression.test", header=None, sep="\t")
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_test = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
params = {"num_leaves": 5, "metric": ("l1", "l2"), "verbose": 0}
evals_result = {} # to record eval results for plotting
print("Starting training...")
# train
gbm = lgb.train(
params,
lgb_train,
num_boost_round=100,
valid_sets=[lgb_train, lgb_test],
feature_name=["f" + str(i + 1) for i in range(X_train.shape[-1])],
categorical_feature=[21],
evals_result=evals_result,
verbose_eval=10,
)
print("Plotting metrics recorded during training...")
ax = lgb.plot_metric(evals_result, metric="l1")
plt.show()
print("Plotting feature importances...")
ax = lgb.plot_importance(gbm, max_num_features=10)
plt.show()
print("Plotting split value histogram...")
ax = lgb.plot_split_value_histogram(gbm, feature="f26", bins="auto")
plt.show()
print("Plotting 54th tree...") # one tree use categorical feature to split
ax = lgb.plot_tree(gbm, tree_index=53, figsize=(15, 15), show_info=["split_gain"])
plt.show()
print("Plotting 54th tree with graphviz...")
graph = lgb.create_tree_digraph(gbm, tree_index=53, name="Tree54")
graph.render(view=True)
| 29.311475
| 82
| 0.720358
|
3629bb281f0a1c1ede5b90f7f598ce32b6059574
| 53,084
|
py
|
Python
|
research/object_detection/inputs_test.py
|
873040/Abhishek
|
2ddd716e66bc5cc6e6f0787508dd07da0e02e75a
|
[
"Apache-2.0"
] | 10
|
2020-06-30T06:43:48.000Z
|
2022-03-22T11:01:20.000Z
|
research/object_detection/inputs_test.py
|
873040/Abhishek
|
2ddd716e66bc5cc6e6f0787508dd07da0e02e75a
|
[
"Apache-2.0"
] | 10
|
2019-12-28T21:31:19.000Z
|
2020-04-12T20:01:58.000Z
|
research/object_detection/inputs_test.py
|
873040/Abhishek
|
2ddd716e66bc5cc6e6f0787508dd07da0e02e75a
|
[
"Apache-2.0"
] | 9
|
2020-03-30T02:11:52.000Z
|
2020-04-05T02:15:08.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.tflearn.inputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from object_detection import inputs
from object_detection.core import preprocessor
from object_detection.core import standard_fields as fields
from object_detection.utils import config_util
from object_detection.utils import test_case
FLAGS = tf.flags.FLAGS
def _get_configs_for_model(model_name):
"""Returns configurations for model."""
fname = os.path.join(tf.resource_loader.get_data_files_path(),
'samples/configs/' + model_name + '.config')
label_map_path = os.path.join(tf.resource_loader.get_data_files_path(),
'data/pet_label_map.pbtxt')
data_path = os.path.join(tf.resource_loader.get_data_files_path(),
'test_data/pets_examples.record')
configs = config_util.get_configs_from_pipeline_file(fname)
override_dict = {
'train_input_path': data_path,
'eval_input_path': data_path,
'label_map_path': label_map_path
}
return config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
def _make_initializable_iterator(dataset):
"""Creates an iterator, and initializes tables.
Args:
dataset: A `tf.data.Dataset` object.
Returns:
A `tf.data.Iterator`.
"""
iterator = dataset.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
return iterator
class InputsTest(test_case.TestCase, parameterized.TestCase):
def test_faster_rcnn_resnet50_train_input(self):
"""Tests the training input function for FasterRcnnResnet50."""
configs = _get_configs_for_model('faster_rcnn_resnet50_pets')
model_config = configs['model']
model_config.faster_rcnn.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
configs['train_config'], configs['train_input_config'], model_config)
features, labels = _make_initializable_iterator(train_input_fn()).get_next()
self.assertAllEqual([1, None, None, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([1],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[1, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[1, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[1, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[1, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_confidences].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_confidences].dtype)
def test_faster_rcnn_resnet50_train_input_with_additional_channels(self):
"""Tests the training input function for FasterRcnnResnet50."""
configs = _get_configs_for_model('faster_rcnn_resnet50_pets')
model_config = configs['model']
configs['train_input_config'].num_additional_channels = 2
configs['train_config'].retain_original_images = True
model_config.faster_rcnn.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
configs['train_config'], configs['train_input_config'], model_config)
features, labels = _make_initializable_iterator(train_input_fn()).get_next()
self.assertAllEqual([1, None, None, 5],
features[fields.InputDataFields.image].shape.as_list())
self.assertAllEqual(
[1, None, None, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([1],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[1, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[1, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[1, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[1, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_confidences].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_confidences].dtype)
@parameterized.parameters(
{'eval_batch_size': 1},
{'eval_batch_size': 8}
)
def test_faster_rcnn_resnet50_eval_input(self, eval_batch_size=1):
"""Tests the eval input function for FasterRcnnResnet50."""
configs = _get_configs_for_model('faster_rcnn_resnet50_pets')
model_config = configs['model']
model_config.faster_rcnn.num_classes = 37
eval_config = configs['eval_config']
eval_config.batch_size = eval_batch_size
eval_input_fn = inputs.create_eval_input_fn(
eval_config, configs['eval_input_configs'][0], model_config)
features, labels = _make_initializable_iterator(eval_input_fn()).get_next()
self.assertAllEqual([eval_batch_size, None, None, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[eval_batch_size, None, None, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([eval_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[eval_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[eval_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_area].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_area].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())
self.assertEqual(
tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())
self.assertEqual(
tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype)
def test_ssd_inceptionV2_train_input(self):
"""Tests the training input function for SSDInceptionV2."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
model_config = configs['model']
model_config.ssd.num_classes = 37
batch_size = configs['train_config'].batch_size
train_input_fn = inputs.create_train_input_fn(
configs['train_config'], configs['train_input_config'], model_config)
features, labels = _make_initializable_iterator(train_input_fn()).get_next()
self.assertAllEqual([batch_size, 300, 300, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[batch_size],
labels[fields.InputDataFields.num_groundtruth_boxes].shape.as_list())
self.assertEqual(tf.int32,
labels[fields.InputDataFields.num_groundtruth_boxes].dtype)
self.assertAllEqual(
[batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[batch_size, 100, model_config.ssd.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[batch_size, 100],
labels[
fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
@parameterized.parameters(
{'eval_batch_size': 1},
{'eval_batch_size': 8}
)
def test_ssd_inceptionV2_eval_input(self, eval_batch_size=1):
"""Tests the eval input function for SSDInceptionV2."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
model_config = configs['model']
model_config.ssd.num_classes = 37
eval_config = configs['eval_config']
eval_config.batch_size = eval_batch_size
eval_input_fn = inputs.create_eval_input_fn(
eval_config, configs['eval_input_configs'][0], model_config)
features, labels = _make_initializable_iterator(eval_input_fn()).get_next()
self.assertAllEqual([eval_batch_size, 300, 300, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[eval_batch_size, 300, 300, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([eval_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[eval_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[eval_batch_size, 100, model_config.ssd.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[
fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_area].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_area].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())
self.assertEqual(
tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())
self.assertEqual(
tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype)
def test_ssd_inceptionV2_eval_input_with_additional_channels(
self, eval_batch_size=1):
"""Tests the eval input function for SSDInceptionV2 with additional channels.
Args:
eval_batch_size: Batch size for eval set.
"""
configs = _get_configs_for_model('ssd_inception_v2_pets')
model_config = configs['model']
model_config.ssd.num_classes = 37
configs['eval_input_configs'][0].num_additional_channels = 1
eval_config = configs['eval_config']
eval_config.batch_size = eval_batch_size
eval_config.retain_original_image_additional_channels = True
eval_input_fn = inputs.create_eval_input_fn(
eval_config, configs['eval_input_configs'][0], model_config)
features, labels = _make_initializable_iterator(eval_input_fn()).get_next()
self.assertAllEqual([eval_batch_size, 300, 300, 4],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[eval_batch_size, 300, 300, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([eval_batch_size, 300, 300, 1], features[
fields.InputDataFields.image_additional_channels].shape.as_list())
self.assertEqual(
tf.uint8,
features[fields.InputDataFields.image_additional_channels].dtype)
self.assertAllEqual([eval_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[eval_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[eval_batch_size, 100, model_config.ssd.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_area].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_area].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())
self.assertEqual(tf.bool,
labels[fields.InputDataFields.groundtruth_is_crowd].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())
self.assertEqual(tf.int32,
labels[fields.InputDataFields.groundtruth_difficult].dtype)
def test_predict_input(self):
"""Tests the predict input function."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
predict_input_fn = inputs.create_predict_input_fn(
model_config=configs['model'],
predict_input_config=configs['eval_input_configs'][0])
serving_input_receiver = predict_input_fn()
image = serving_input_receiver.features[fields.InputDataFields.image]
receiver_tensors = serving_input_receiver.receiver_tensors[
inputs.SERVING_FED_EXAMPLE_KEY]
self.assertEqual([1, 300, 300, 3], image.shape.as_list())
self.assertEqual(tf.float32, image.dtype)
self.assertEqual(tf.string, receiver_tensors.dtype)
def test_predict_input_with_additional_channels(self):
"""Tests the predict input function with additional channels."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['eval_input_configs'][0].num_additional_channels = 2
predict_input_fn = inputs.create_predict_input_fn(
model_config=configs['model'],
predict_input_config=configs['eval_input_configs'][0])
serving_input_receiver = predict_input_fn()
image = serving_input_receiver.features[fields.InputDataFields.image]
receiver_tensors = serving_input_receiver.receiver_tensors[
inputs.SERVING_FED_EXAMPLE_KEY]
# RGB + 2 additional channels = 5 channels.
self.assertEqual([1, 300, 300, 5], image.shape.as_list())
self.assertEqual(tf.float32, image.dtype)
self.assertEqual(tf.string, receiver_tensors.dtype)
def test_error_with_bad_train_config(self):
"""Tests that a TypeError is raised with improper train config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['eval_config'], # Expecting `TrainConfig`.
train_input_config=configs['train_input_config'],
model_config=configs['model'])
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_train_input_config(self):
"""Tests that a TypeError is raised with improper train input config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['train_config'],
train_input_config=configs['model'], # Expecting `InputReader`.
model_config=configs['model'])
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_train_model_config(self):
"""Tests that a TypeError is raised with improper train model config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['train_config'],
train_input_config=configs['train_input_config'],
model_config=configs['train_config']) # Expecting `DetectionModel`.
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_eval_config(self):
"""Tests that a TypeError is raised with improper eval config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['train_config'], # Expecting `EvalConfig`.
eval_input_config=configs['eval_input_configs'][0],
model_config=configs['model'])
with self.assertRaises(TypeError):
eval_input_fn()
def test_error_with_bad_eval_input_config(self):
"""Tests that a TypeError is raised with improper eval input config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['eval_config'],
eval_input_config=configs['model'], # Expecting `InputReader`.
model_config=configs['model'])
with self.assertRaises(TypeError):
eval_input_fn()
def test_error_with_bad_eval_model_config(self):
"""Tests that a TypeError is raised with improper eval model config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['eval_config'],
eval_input_config=configs['eval_input_configs'][0],
model_config=configs['eval_config']) # Expecting `DetectionModel`.
with self.assertRaises(TypeError):
eval_input_fn()
def test_output_equal_in_replace_empty_string_with_random_number(self):
string_placeholder = tf.placeholder(tf.string, shape=[])
replaced_string = inputs._replace_empty_string_with_random_number(
string_placeholder)
test_string = b'hello world'
feed_dict = {string_placeholder: test_string}
with self.test_session() as sess:
out_string = sess.run(replaced_string, feed_dict=feed_dict)
self.assertEqual(test_string, out_string)
def test_output_is_integer_in_replace_empty_string_with_random_number(self):
string_placeholder = tf.placeholder(tf.string, shape=[])
replaced_string = inputs._replace_empty_string_with_random_number(
string_placeholder)
empty_string = ''
feed_dict = {string_placeholder: empty_string}
tf.set_random_seed(0)
with self.test_session() as sess:
out_string = sess.run(replaced_string, feed_dict=feed_dict)
# Test whether out_string is a string which represents an integer.
int(out_string) # throws an error if out_string is not castable to int.
self.assertEqual(out_string, b'2798129067578209328')
def test_force_no_resize(self):
"""Tests the functionality of force_no_reisze option."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['eval_config'].force_no_resize = True
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['eval_config'],
eval_input_config=configs['eval_input_configs'][0],
model_config=configs['model']
)
train_input_fn = inputs.create_train_input_fn(
train_config=configs['train_config'],
train_input_config=configs['train_input_config'],
model_config=configs['model']
)
features_train, _ = _make_initializable_iterator(
train_input_fn()).get_next()
features_eval, _ = _make_initializable_iterator(
eval_input_fn()).get_next()
images_train, images_eval = features_train['image'], features_eval['image']
self.assertEqual([1, None, None, 3], images_eval.shape.as_list())
self.assertEqual([24, 300, 300, 3], images_train.shape.as_list())
class DataAugmentationFnTest(test_case.TestCase):
def test_apply_image_and_box_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
}),
(preprocessor.scale_boxes_to_pixel_coordinates, {}),
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1., 1.]], np.float32))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
with self.test_session() as sess:
augmented_tensor_dict_out = sess.run(augmented_tensor_dict)
self.assertAllEqual(
augmented_tensor_dict_out[fields.InputDataFields.image].shape,
[20, 20, 3]
)
self.assertAllClose(
augmented_tensor_dict_out[fields.InputDataFields.groundtruth_boxes],
[[10, 10, 20, 20]]
)
def test_apply_image_and_box_augmentation_with_scores(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
}),
(preprocessor.scale_boxes_to_pixel_coordinates, {}),
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1.0], np.float32)),
fields.InputDataFields.groundtruth_weights:
tf.constant(np.array([0.8], np.float32)),
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
with self.test_session() as sess:
augmented_tensor_dict_out = sess.run(augmented_tensor_dict)
self.assertAllEqual(
augmented_tensor_dict_out[fields.InputDataFields.image].shape,
[20, 20, 3]
)
self.assertAllClose(
augmented_tensor_dict_out[fields.InputDataFields.groundtruth_boxes],
[[10, 10, 20, 20]]
)
self.assertAllClose(
augmented_tensor_dict_out[fields.InputDataFields.groundtruth_classes],
[1.0]
)
self.assertAllClose(
augmented_tensor_dict_out[
fields.InputDataFields.groundtruth_weights],
[0.8]
)
def test_include_masks_in_data_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
})
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_instance_masks:
tf.constant(np.zeros([2, 10, 10], np.uint8))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
with self.test_session() as sess:
augmented_tensor_dict_out = sess.run(augmented_tensor_dict)
self.assertAllEqual(
augmented_tensor_dict_out[fields.InputDataFields.image].shape,
[20, 20, 3])
self.assertAllEqual(augmented_tensor_dict_out[
fields.InputDataFields.groundtruth_instance_masks].shape, [2, 20, 20])
def test_include_keypoints_in_data_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
}),
(preprocessor.scale_boxes_to_pixel_coordinates, {}),
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant(np.array([[[0.5, 1.0], [0.5, 0.5]]], np.float32))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
with self.test_session() as sess:
augmented_tensor_dict_out = sess.run(augmented_tensor_dict)
self.assertAllEqual(
augmented_tensor_dict_out[fields.InputDataFields.image].shape,
[20, 20, 3]
)
self.assertAllClose(
augmented_tensor_dict_out[fields.InputDataFields.groundtruth_boxes],
[[10, 10, 20, 20]]
)
self.assertAllClose(
augmented_tensor_dict_out[fields.InputDataFields.groundtruth_keypoints],
[[[10, 20], [10, 10]]]
)
def _fake_model_preprocessor_fn(image):
return (image, tf.expand_dims(tf.shape(image)[1:], axis=0))
def _fake_image_resizer_fn(image, mask):
return (image, mask, tf.shape(image))
def _fake_resize50_preprocess_fn(image):
image = image[0]
image, shape = preprocessor.resize_to_range(
image, min_dimension=50, max_dimension=50, pad_to_max_dimension=True)
return tf.expand_dims(image, 0), tf.expand_dims(shape, axis=0)
class DataTransformationFnTest(test_case.TestCase):
def test_combine_additional_channels_if_present(self):
image = np.random.rand(4, 4, 3).astype(np.float32)
additional_channels = np.random.rand(4, 4, 2).astype(np.float32)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(image),
fields.InputDataFields.image_additional_channels:
tf.constant(additional_channels),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 1], np.int32))
}
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=1)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllEqual(transformed_inputs[fields.InputDataFields.image].dtype,
tf.float32)
self.assertAllEqual(transformed_inputs[fields.InputDataFields.image].shape,
[4, 4, 5])
self.assertAllClose(transformed_inputs[fields.InputDataFields.image],
np.concatenate((image, additional_channels), axis=2))
def test_use_multiclass_scores_when_present(self):
image = np.random.rand(4, 4, 3).astype(np.float32)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(image),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.multiclass_scores:
tf.constant(np.array([0.2, 0.3, 0.5, 0.1, 0.6, 0.3], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32))
}
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=3, use_multiclass_scores=True)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllClose(
np.array([[0.2, 0.3, 0.5], [0.1, 0.6, 0.3]], np.float32),
transformed_inputs[fields.InputDataFields.groundtruth_classes])
def test_use_multiclass_scores_when_not_present(self):
image = np.random.rand(4, 4, 3).astype(np.float32)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(image),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.multiclass_scores:
tf.placeholder(tf.float32),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32))
}
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=3, use_multiclass_scores=True)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict),
feed_dict={
tensor_dict[fields.InputDataFields.multiclass_scores]:
np.array([], dtype=np.float32)
})
self.assertAllClose(
np.array([[0, 1, 0], [0, 0, 1]], np.float32),
transformed_inputs[fields.InputDataFields.groundtruth_classes])
def test_returns_correct_class_label_encodings(self):
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[0, 0, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_classes],
[[0, 0, 1], [1, 0, 0]])
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_confidences],
[[0, 0, 1], [1, 0, 0]])
def test_returns_correct_labels_with_unrecognized_class(self):
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(
np.array([[0, 0, 1, 1], [.2, .2, 4, 4], [.5, .5, 1, 1]],
np.float32)),
fields.InputDataFields.groundtruth_area:
tf.constant(np.array([.5, .4, .3])),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, -1, 1], np.int32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant(
np.array([[[.1, .1]], [[.2, .2]], [[.5, .5]]],
np.float32)),
fields.InputDataFields.groundtruth_keypoint_visibilities:
tf.constant([True, False, True]),
fields.InputDataFields.groundtruth_instance_masks:
tf.constant(np.random.rand(3, 4, 4).astype(np.float32)),
fields.InputDataFields.groundtruth_is_crowd:
tf.constant([False, True, False]),
fields.InputDataFields.groundtruth_difficult:
tf.constant(np.array([0, 0, 1], np.int32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_classes],
[[0, 0, 1], [1, 0, 0]])
self.assertAllEqual(
transformed_inputs[fields.InputDataFields.num_groundtruth_boxes], 2)
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_area], [.5, .3])
self.assertAllEqual(
transformed_inputs[fields.InputDataFields.groundtruth_confidences],
[[0, 0, 1], [1, 0, 0]])
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_boxes],
[[0, 0, 1, 1], [.5, .5, 1, 1]])
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_keypoints],
[[[.1, .1]], [[.5, .5]]])
self.assertAllEqual(
transformed_inputs[
fields.InputDataFields.groundtruth_keypoint_visibilities],
[True, True])
self.assertAllEqual(
transformed_inputs[
fields.InputDataFields.groundtruth_instance_masks].shape, [2, 4, 4])
self.assertAllEqual(
transformed_inputs[fields.InputDataFields.groundtruth_is_crowd],
[False, False])
self.assertAllEqual(
transformed_inputs[fields.InputDataFields.groundtruth_difficult],
[0, 1])
def test_returns_correct_merged_boxes(self):
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
merge_multiple_boxes=True)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_boxes],
[[.5, .5, 1., 1.]])
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_classes],
[[1, 0, 1]])
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_confidences],
[[1, 0, 1]])
self.assertAllClose(
transformed_inputs[fields.InputDataFields.num_groundtruth_boxes],
1)
def test_returns_correct_groundtruth_confidences_when_input_present(self):
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[0, 0, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32)),
fields.InputDataFields.groundtruth_confidences:
tf.constant(np.array([1.0, -1.0], np.float32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_classes],
[[0, 0, 1], [1, 0, 0]])
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_confidences],
[[0, 0, 1], [-1, 0, 0]])
def test_returns_resized_masks(self):
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_instance_masks:
tf.constant(np.random.rand(2, 4, 4).astype(np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32)),
fields.InputDataFields.original_image_spatial_shape:
tf.constant(np.array([4, 4], np.int32))
}
def fake_image_resizer_fn(image, masks=None):
resized_image = tf.image.resize_images(image, [8, 8])
results = [resized_image]
if masks is not None:
resized_masks = tf.transpose(
tf.image.resize_images(tf.transpose(masks, [1, 2, 0]), [8, 8]),
[2, 0, 1])
results.append(resized_masks)
results.append(tf.shape(resized_image))
return results
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=fake_image_resizer_fn,
num_classes=num_classes,
retain_original_image=True)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllEqual(transformed_inputs[
fields.InputDataFields.original_image].dtype, tf.uint8)
self.assertAllEqual(transformed_inputs[
fields.InputDataFields.original_image_spatial_shape], [4, 4])
self.assertAllEqual(transformed_inputs[
fields.InputDataFields.original_image].shape, [8, 8, 3])
self.assertAllEqual(transformed_inputs[
fields.InputDataFields.groundtruth_instance_masks].shape, [2, 8, 8])
def test_applies_model_preprocess_fn_to_image_tensor(self):
np_image = np.random.randint(256, size=(4, 4, 3))
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np_image),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def fake_model_preprocessor_fn(image):
return (image / 255., tf.expand_dims(tf.shape(image)[1:], axis=0))
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllClose(transformed_inputs[fields.InputDataFields.image],
np_image / 255.)
self.assertAllClose(transformed_inputs[fields.InputDataFields.
true_image_shape],
[4, 4, 3])
def test_applies_data_augmentation_fn_to_tensor_dict(self):
np_image = np.random.randint(256, size=(4, 4, 3))
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np_image),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def add_one_data_augmentation_fn(tensor_dict):
return {key: value + 1 for key, value in tensor_dict.items()}
num_classes = 4
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=add_one_data_augmentation_fn)
with self.test_session() as sess:
augmented_tensor_dict = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllEqual(augmented_tensor_dict[fields.InputDataFields.image],
np_image + 1)
self.assertAllEqual(
augmented_tensor_dict[fields.InputDataFields.groundtruth_classes],
[[0, 0, 0, 1], [0, 1, 0, 0]])
def test_applies_data_augmentation_fn_before_model_preprocess_fn(self):
np_image = np.random.randint(256, size=(4, 4, 3))
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np_image),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def mul_two_model_preprocessor_fn(image):
return (image * 2, tf.expand_dims(tf.shape(image)[1:], axis=0))
def add_five_to_image_data_augmentation_fn(tensor_dict):
tensor_dict[fields.InputDataFields.image] += 5
return tensor_dict
num_classes = 4
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=mul_two_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=add_five_to_image_data_augmentation_fn)
with self.test_session() as sess:
augmented_tensor_dict = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllEqual(augmented_tensor_dict[fields.InputDataFields.image],
(np_image + 5) * 2)
def test_resize_with_padding(self):
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(100, 50, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]],
np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant([[0.1, 0.2], [0.3, 0.4]]),
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_resize50_preprocess_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_boxes],
[[.5, .25, 1., .5], [.0, .0, .5, .25]])
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_keypoints],
[[[.1, .1], [.3, .2]]])
class PadInputDataToStaticShapesFnTest(test_case.TestCase):
def test_pad_images_boxes_and_classes(self):
input_tensor_dict = {
fields.InputDataFields.image:
tf.placeholder(tf.float32, [None, None, 3]),
fields.InputDataFields.groundtruth_boxes:
tf.placeholder(tf.float32, [None, 4]),
fields.InputDataFields.groundtruth_classes:
tf.placeholder(tf.int32, [None, 3]),
fields.InputDataFields.true_image_shape:
tf.placeholder(tf.int32, [3]),
fields.InputDataFields.original_image_spatial_shape:
tf.placeholder(tf.int32, [2])
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[5, 6, 3])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.true_image_shape]
.shape.as_list(), [3])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.original_image_spatial_shape]
.shape.as_list(), [2])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_boxes]
.shape.as_list(), [3, 4])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_classes]
.shape.as_list(), [3, 3])
def test_clip_boxes_and_classes(self):
input_tensor_dict = {
fields.InputDataFields.groundtruth_boxes:
tf.placeholder(tf.float32, [None, 4]),
fields.InputDataFields.groundtruth_classes:
tf.placeholder(tf.int32, [None, 3]),
fields.InputDataFields.num_groundtruth_boxes:
tf.placeholder(tf.int32, [])
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_boxes]
.shape.as_list(), [3, 4])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_classes]
.shape.as_list(), [3, 3])
with self.test_session() as sess:
out_tensor_dict = sess.run(
padded_tensor_dict,
feed_dict={
input_tensor_dict[fields.InputDataFields.groundtruth_boxes]:
np.random.rand(5, 4),
input_tensor_dict[fields.InputDataFields.groundtruth_classes]:
np.random.rand(2, 3),
input_tensor_dict[fields.InputDataFields.num_groundtruth_boxes]:
5,
})
self.assertAllEqual(
out_tensor_dict[fields.InputDataFields.groundtruth_boxes].shape, [3, 4])
self.assertAllEqual(
out_tensor_dict[fields.InputDataFields.groundtruth_classes].shape,
[3, 3])
self.assertEqual(
out_tensor_dict[fields.InputDataFields.num_groundtruth_boxes],
3)
def test_do_not_pad_dynamic_images(self):
input_tensor_dict = {
fields.InputDataFields.image:
tf.placeholder(tf.float32, [None, None, 3]),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[None, None])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[None, None, 3])
def test_images_and_additional_channels(self):
input_tensor_dict = {
fields.InputDataFields.image:
tf.placeholder(tf.float32, [None, None, 5]),
fields.InputDataFields.image_additional_channels:
tf.placeholder(tf.float32, [None, None, 2]),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
# pad_input_data_to_static_shape assumes that image is already concatenated
# with additional channels.
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[5, 6, 5])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image_additional_channels]
.shape.as_list(), [5, 6, 2])
def test_images_and_additional_channels_errors(self):
input_tensor_dict = {
fields.InputDataFields.image:
tf.placeholder(tf.float32, [None, None, 3]),
fields.InputDataFields.image_additional_channels:
tf.placeholder(tf.float32, [None, None, 2]),
fields.InputDataFields.original_image:
tf.placeholder(tf.float32, [None, None, 3]),
}
with self.assertRaises(ValueError):
_ = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
def test_gray_images(self):
input_tensor_dict = {
fields.InputDataFields.image:
tf.placeholder(tf.float32, [None, None, 1]),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[5, 6, 1])
def test_gray_images_and_additional_channels(self):
input_tensor_dict = {
fields.InputDataFields.image:
tf.placeholder(tf.float32, [None, None, 3]),
fields.InputDataFields.image_additional_channels:
tf.placeholder(tf.float32, [None, None, 2]),
}
# pad_input_data_to_static_shape assumes that image is already concatenated
# with additional channels.
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[5, 6, 3])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image_additional_channels]
.shape.as_list(), [5, 6, 2])
def test_keypoints(self):
input_tensor_dict = {
fields.InputDataFields.groundtruth_keypoints:
tf.placeholder(tf.float32, [None, 16, 4]),
fields.InputDataFields.groundtruth_keypoint_visibilities:
tf.placeholder(tf.bool, [None, 16]),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_keypoints]
.shape.as_list(), [3, 16, 4])
self.assertAllEqual(
padded_tensor_dict[
fields.InputDataFields.groundtruth_keypoint_visibilities]
.shape.as_list(), [3, 16])
if __name__ == '__main__':
tf.test.main()
| 41.536776
| 81
| 0.69083
|
e9461939ec5e38edd12bf74c238f436cc873fc0f
| 5,426
|
py
|
Python
|
build/PureCloudPlatformClientV2/models/date_range_with_optional_end.py
|
cjohnson-ctl/platform-client-sdk-python
|
38ce53bb8012b66e8a43cc8bd6ff00cf6cc99100
|
[
"MIT"
] | 10
|
2019-02-22T00:27:08.000Z
|
2021-09-12T23:23:44.000Z
|
libs/PureCloudPlatformClientV2/models/date_range_with_optional_end.py
|
rocketbot-cl/genesysCloud
|
dd9d9b5ebb90a82bab98c0d88b9585c22c91f333
|
[
"MIT"
] | 5
|
2018-06-07T08:32:00.000Z
|
2021-07-28T17:37:26.000Z
|
libs/PureCloudPlatformClientV2/models/date_range_with_optional_end.py
|
rocketbot-cl/genesysCloud
|
dd9d9b5ebb90a82bab98c0d88b9585c22c91f333
|
[
"MIT"
] | 6
|
2020-04-09T17:43:07.000Z
|
2022-02-17T08:48:05.000Z
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class DateRangeWithOptionalEnd(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
DateRangeWithOptionalEnd - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'start_business_unit_date': 'date',
'end_business_unit_date': 'date'
}
self.attribute_map = {
'start_business_unit_date': 'startBusinessUnitDate',
'end_business_unit_date': 'endBusinessUnitDate'
}
self._start_business_unit_date = None
self._end_business_unit_date = None
@property
def start_business_unit_date(self):
"""
Gets the start_business_unit_date of this DateRangeWithOptionalEnd.
The start date for work plan rotation or an agent, interpreted in the business unit's time zone. Dates are represented as an ISO-8601 string. For example: yyyy-MM-dd
:return: The start_business_unit_date of this DateRangeWithOptionalEnd.
:rtype: date
"""
return self._start_business_unit_date
@start_business_unit_date.setter
def start_business_unit_date(self, start_business_unit_date):
"""
Sets the start_business_unit_date of this DateRangeWithOptionalEnd.
The start date for work plan rotation or an agent, interpreted in the business unit's time zone. Dates are represented as an ISO-8601 string. For example: yyyy-MM-dd
:param start_business_unit_date: The start_business_unit_date of this DateRangeWithOptionalEnd.
:type: date
"""
self._start_business_unit_date = start_business_unit_date
@property
def end_business_unit_date(self):
"""
Gets the end_business_unit_date of this DateRangeWithOptionalEnd.
The end date for work plan rotation or an agent, interpreted in the business unit's time zone. Null denotes open ended date range. Dates are represented as an ISO-8601 string. For example: yyyy-MM-dd
:return: The end_business_unit_date of this DateRangeWithOptionalEnd.
:rtype: date
"""
return self._end_business_unit_date
@end_business_unit_date.setter
def end_business_unit_date(self, end_business_unit_date):
"""
Sets the end_business_unit_date of this DateRangeWithOptionalEnd.
The end date for work plan rotation or an agent, interpreted in the business unit's time zone. Null denotes open ended date range. Dates are represented as an ISO-8601 string. For example: yyyy-MM-dd
:param end_business_unit_date: The end_business_unit_date of this DateRangeWithOptionalEnd.
:type: date
"""
self._end_business_unit_date = end_business_unit_date
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 34.56051
| 207
| 0.63638
|
9f3d583430c06ace8d37ad0aefc7a07f17a8ddee
| 292
|
py
|
Python
|
suzieq/sqobjects/ifCounters.py
|
ewlumpkin/suzieq
|
9d55a46a631f01535d5b8ab1c0b870f840bbc526
|
[
"Apache-2.0"
] | null | null | null |
suzieq/sqobjects/ifCounters.py
|
ewlumpkin/suzieq
|
9d55a46a631f01535d5b8ab1c0b870f840bbc526
|
[
"Apache-2.0"
] | null | null | null |
suzieq/sqobjects/ifCounters.py
|
ewlumpkin/suzieq
|
9d55a46a631f01535d5b8ab1c0b870f840bbc526
|
[
"Apache-2.0"
] | null | null | null |
from suzieq.sqobjects.basicobj import SqObject
class IfCountersObj(SqObject):
def __init__(self, **kwargs):
super().__init__(table='ifCounters', **kwargs)
self._valid_get_args = ['namespace', 'hostname', 'columns', 'ifname',
'query_str']
| 32.444444
| 77
| 0.616438
|
df80b7003f5ee3d8e801a8d69947af261738663c
| 96
|
py
|
Python
|
.tox/py27/lib/python2.7/re.py
|
RGKim/locustksy
|
018322331d37483acf61e210302e4e1da21d719f
|
[
"MIT"
] | 8
|
2019-01-14T14:49:09.000Z
|
2020-07-24T18:32:06.000Z
|
ENV/lib/python2.7/re.py
|
timxor/onchainjobs
|
61f45b1bf2f88fd9c7de90ac43932dbe01cd43b7
|
[
"MIT"
] | 7
|
2018-11-26T01:07:13.000Z
|
2018-11-26T08:01:59.000Z
|
ENV/lib/python2.7/re.py
|
timxor/onchainjobs
|
61f45b1bf2f88fd9c7de90ac43932dbe01cd43b7
|
[
"MIT"
] | 1
|
2019-09-01T05:47:58.000Z
|
2019-09-01T05:47:58.000Z
|
/usr/local/Cellar/python@2/2.7.15_1/Frameworks/Python.framework/Versions/2.7/lib/python2.7/re.py
| 96
| 96
| 0.802083
|
b8afbd2ced86a49470dd704a579a10d78c158c03
| 19,359
|
py
|
Python
|
tensorflow/models/embedding/word2vec.py
|
285219011/hello-world
|
dfb71ea206eb9f61e5d97c9727caa1a6449e39cb
|
[
"Apache-2.0"
] | 6
|
2017-04-25T01:30:41.000Z
|
2019-12-11T15:08:46.000Z
|
tensorflow/models/embedding/word2vec.py
|
PaulTR/tensorflow
|
84bcff1e814ee5697b5980535583737f8e81d82f
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/models/embedding/word2vec.py
|
PaulTR/tensorflow
|
84bcff1e814ee5697b5980535583737f8e81d82f
|
[
"Apache-2.0"
] | 8
|
2017-04-17T23:39:12.000Z
|
2019-05-11T14:06:31.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec mini-batched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does traditional minibatching.
The key ops used are:
* placeholder for feeding in tensors for each example.
* embedding_lookup for fetching rows from the embedding matrix.
* sigmoid_cross_entropy_with_logits to calculate the loss.
* GradientDescentOptimizer for optimizing the loss.
* skipgram custom op that does input processing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec as word2vec
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model and "
"training summaries.")
flags.DEFINE_string("train_data", None, "Training text file. "
"E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "File consisting of analogies of four tokens."
"embedding 2 - embedding 1 + embedding 3 should be close "
"to embedding 4."
"E.g. https://word2vec.googlecode.com/svn/trunk/questions-words.txt.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.2, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 100,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 16,
"Number of training examples processed per step "
"(size of a minibatch).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy(b'france', b'paris', b'russia') and "
"model.nearby([b'proton', b'elephant', b'maxwell'])")
flags.DEFINE_integer("statistics_interval", 5,
"Print statistics every n seconds.")
flags.DEFINE_integer("summary_interval", 5,
"Save training summary to file every n seconds (rounded "
"up to statistics interval).")
flags.DEFINE_integer("checkpoint_interval", 600,
"Checkpoint the model (i.e. save the parameters) every n "
"seconds (rounded up to statistics interval).")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# How often to print statistics.
self.statistics_interval = FLAGS.statistics_interval
# How often to write to the summary file (rounds up to the nearest
# statistics_interval).
self.summary_interval = FLAGS.summary_interval
# How often to write checkpoints (rounds up to the nearest statistics
# interval).
self.checkpoint_interval = FLAGS.checkpoint_interval
# Where to write out summaries.
self.save_path = FLAGS.save_path
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
self._read_analogies()
def _read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def forward(self, examples, labels):
"""Build the graph for the forward pass."""
opts = self._options
# Declare all variables we need.
# Embedding: [vocab_size, emb_dim]
init_width = 0.5 / opts.emb_dim
emb = tf.Variable(
tf.random_uniform(
[opts.vocab_size, opts.emb_dim], -init_width, init_width),
name="emb")
self._emb = emb
# Softmax weight: [vocab_size, emb_dim]. Transposed.
sm_w_t = tf.Variable(
tf.zeros([opts.vocab_size, opts.emb_dim]),
name="sm_w_t")
# Softmax bias: [emb_dim].
sm_b = tf.Variable(tf.zeros([opts.vocab_size]), name="sm_b")
# Global step: scalar, i.e., shape [].
self.global_step = tf.Variable(0, name="global_step")
# Nodes to compute the nce loss w/ candidate sampling.
labels_matrix = tf.reshape(
tf.cast(labels,
dtype=tf.int64),
[opts.batch_size, 1])
# Negative sampling.
sampled_ids, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_matrix,
num_true=1,
num_sampled=opts.num_samples,
unique=True,
range_max=opts.vocab_size,
distortion=0.75,
unigrams=opts.vocab_counts.tolist()))
# Embeddings for examples: [batch_size, emb_dim]
example_emb = tf.nn.embedding_lookup(emb, examples)
# Weights for labels: [batch_size, emb_dim]
true_w = tf.nn.embedding_lookup(sm_w_t, labels)
# Biases for labels: [batch_size, 1]
true_b = tf.nn.embedding_lookup(sm_b, labels)
# Weights for sampled ids: [num_sampled, emb_dim]
sampled_w = tf.nn.embedding_lookup(sm_w_t, sampled_ids)
# Biases for sampled ids: [num_sampled, 1]
sampled_b = tf.nn.embedding_lookup(sm_b, sampled_ids)
# True logits: [batch_size, 1]
true_logits = tf.reduce_sum(tf.mul(example_emb, true_w), 1) + true_b
# Sampled logits: [batch_size, num_sampled]
# We replicate sampled noise lables for all examples in the batch
# using the matmul.
sampled_b_vec = tf.reshape(sampled_b, [opts.num_samples])
sampled_logits = tf.matmul(example_emb,
sampled_w,
transpose_b=True) + sampled_b_vec
return true_logits, sampled_logits
def nce_loss(self, true_logits, sampled_logits):
"""Build the graph for the NCE loss."""
# cross-entropy(logits, labels)
opts = self._options
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
true_logits, tf.ones_like(true_logits))
sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(
sampled_logits, tf.zeros_like(sampled_logits))
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
nce_loss_tensor = (tf.reduce_sum(true_xent) +
tf.reduce_sum(sampled_xent)) / opts.batch_size
return nce_loss_tensor
def optimize(self, loss):
"""Build the graph to optimize the loss function."""
# Optimizer nodes.
# Linear learning rate decay.
opts = self._options
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001, 1.0 - tf.cast(self._words, tf.float32) / words_to_train)
self._lr = lr
optimizer = tf.train.GradientDescentOptimizer(lr)
train = optimizer.minimize(loss,
global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train
def build_eval_graph(self):
"""Build the eval graph."""
# Eval graph
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._emb, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, self._options.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
def build_graph(self):
"""Build the graph for the full model."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, self._epoch, self._words, examples,
labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._examples = examples
self._labels = labels
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
true_logits, sampled_logits = self.forward(examples, labels)
loss = self.nce_loss(true_logits, sampled_logits)
tf.scalar_summary("NCE loss", loss)
self._loss = loss
self.optimize(loss)
# Properly initialize all variables.
tf.initialize_all_variables().run()
self.saver = tf.train.Saver()
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
f.write("%s %d\n" % (tf.compat.as_text(opts.vocab_words[i]),
opts.vocab_counts[i]))
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(opts.save_path, self._session.graph)
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time, last_summary_time = initial_words, time.time(), 0
last_checkpoint_time = 0
while True:
time.sleep(opts.statistics_interval) # Reports our progress once a while.
(epoch, step, loss, words, lr) = self._session.run(
[self._epoch, self.global_step, self._loss, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\r" %
(epoch, step, lr, loss, rate), end="")
sys.stdout.flush()
if now - last_summary_time > opts.summary_interval:
summary_str = self._session.run(summary_op)
summary_writer.add_summary(summary_str, step)
last_summary_time = now
if now - last_checkpoint_time > opts.checkpoint_interval:
self.saver.save(self._session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=step.astype(int))
last_checkpoint_time = now
if epoch != initial_epoch:
break
for t in workers:
t.join()
return epoch
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
total = self._analogy_questions.shape[0]
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
return c
return "unknown"
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy(b'france', b'paris', b'russia')
# [1]: model.nearby([b'proton', b'elephant', b'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
| 36.734345
| 80
| 0.649155
|
4903483dd9efe44dadf1e149b59c19fa0581388f
| 6,916
|
py
|
Python
|
onnxruntime/python/tools/featurizer_ops/data_frame_tool.py
|
michaelgsharp/onnxruntime
|
da653ccdac344372052025d95cc84e93237f6f84
|
[
"MIT"
] | null | null | null |
onnxruntime/python/tools/featurizer_ops/data_frame_tool.py
|
michaelgsharp/onnxruntime
|
da653ccdac344372052025d95cc84e93237f6f84
|
[
"MIT"
] | null | null | null |
onnxruntime/python/tools/featurizer_ops/data_frame_tool.py
|
michaelgsharp/onnxruntime
|
da653ccdac344372052025d95cc84e93237f6f84
|
[
"MIT"
] | 1
|
2020-10-01T09:26:19.000Z
|
2020-10-01T09:26:19.000Z
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#--------------------------------------------------------------------------
import numpy as np
import onnxruntime as onnxrt
ort_float_set = set([np.float32, np.float64])
pd_float_set = set(['float64'])
ort_int_set = set([np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64])
pd_int_set = set(['int64'])
types_dict = {
'tensor(float16)': np.float16,
'tensor(float)' : np.float32,
'tensor(double)' : np.float64,
'tensor(int8)' : np.int8,
'tensor(uint8)' : np.uint8,
'tensor(int16)' : np.int16,
'tensor(uint16)' : np.uint16,
'tensor(int32)' : np.int32,
'tensor(uint32)' : np.uint32,
'tensor(int64)' : np.int64,
'tensor(uint64)' : np.uint64,
'tensor(bool)' : np.bool,
'tensor(string)' : np.object
}
class DataFrameTool():
"""
This is a utility class used to run a model with pandas.DataFrame input
"""
def __init__(self, model_path, sess_options=None):
"""
:param model_path: path to the model to be loaded
:param sess_options: see onnxruntime.SessionsOptions
"""
self._model_path = model_path
self._sess_options = sess_options
self._sess = onnxrt.InferenceSession(self._model_path, self._sess_options)
def _reshape_input(self, input_array, expected_shape):
"""
:param - input_array numpy array. This one is obtained from DataFrame and expected to have
: a rank if 1.
:expected_shape - shape fetched from the model which may include dynamic elements.
: expected_shape may at most have one -1, None or zero which will be computed from
: the size of the input_array. We replace None and zeros to -1 and let np.ndarray.reshape deal with it.
"""
# expected_shape rank is one, we will let onnxruntime to deal with it
if len(expected_shape) == 1:
return input_array
inferred_shape = [dim if dim else -1 for dim in expected_shape]
return input_array.reshape(inferred_shape)
def _validate_type(self, input_meta, col_type):
"""
: input_meta - meta info obtained from the model for the given input
: col_type - dtype of the column
: throws if conditions are not met
float16 and bool will always require exact match
We attempt to convert any type to a string if it is required.
With strings we always want to put this into a flat array, cast to np.object and then reshape as object
Any other type to qualify for casting must match either integer or floating point types
"""
expected_type = types_dict[input_meta.type]
if input_meta.type == 'tensor(string)':
return
elif expected_type == col_type:
return
elif expected_type in ort_float_set and str(col_type) in pd_float_set:
return
elif expected_type in ort_int_set and str(col_type) in pd_int_set:
return
raise TypeError("Input {} requires type {} unable to cast column type {} ".format(
input_meta.name, expected_type, col_type))
def _process_input_list(self, df, input_metas, require):
"""
Return a dictionary of input_name : a typed and shaped np.array of values for a given input_meta
The function does the heavy lifting for _get_input_feeds()
:param df: See :class:`pandas.DataFrame`.
:param input_metas: a list of name/type pairs
:require is a boolean. If True this helper throws on a missing input.
"""
feeds = {}
# Process mandadory inputs. Raise an error if anything is not present
for input_meta in input_metas:
# We fully expect all the types are in the above dictionary
assert input_meta.type in types_dict, "Update types_dict for the new type"
if input_meta.name in df.columns:
self._validate_type(input_meta, df[input_meta.name].dtype)
# With strings we must cast first to np.object then then reshape
# so we do it for everything
input_array = np.array(df[input_meta.name]).astype(types_dict[input_meta.type])
feeds[input_meta.name] = self._reshape_input(input_array, input_meta.shape)
elif require:
raise RuntimeError("This model requires input {} of type {} but it is not found in the DataFrame".format(
input_meta.name, types_dict[input_meta.type]))
return feeds
def _get_input_feeds(self, df, sess):
"""
Return a dictionary of input_name : a typed and shaped np.array of values
This function accepts Pandas DataFrame as the first argument and onnxruntime
session with a loaded model. The function interrogates the model for the inputs
and matches the model input names to the DataFrame instance column names.
It requires exact matches for bool and float16 types. It attempts to convert to
string any input type if string is required.
It attempts to convert floating types to each other and does the same for all of the
integer types without requiring an exact match.
:param df: See :class:`pandas.DataFrame`. The function only considers the first row (0) of each column
and feeds the data to the appropriate model inputs.
:param sess: See :class:`onnxruntime.InferenceSession`.
::
For example: pd.DataFrame([[0], [4],[20]],index=[0], columns=['A', 'B', 'C'])
"""
if df.empty:
raise RuntimeError('input DataFrame is empty')
# Process mandadory inputs. Raise an error if anything is not present
feeds = self._process_input_list(df, sess.get_inputs(), True)
# Process optional overridable initializers. If present the initialzier value
# is overriden by the input. If not, the initialzier value embedded in the model takes effect.
initializers = self._process_input_list(df, sess.get_overridable_initializers(), False)
feeds.update(initializers)
return feeds
def execute(self, df, output_names, run_options=None):
"Return a list of output values restricted to output names if not empty"
"""
Compute the predictions.
:param df: See :class:`pandas.DataFrame`.
:param output_names: name of the outputs that we are interested in
:param run_options: See :class:`onnxruntime.RunOptions`.
::
sess.run([output_name], {input_name: x})
"""
input_feed = self._get_input_feeds(df, self._sess);
return self._sess.run(output_names, input_feed, run_options)
| 41.915152
| 121
| 0.640977
|
dd55389311667ceb97d8f7ce7a857b30070d2523
| 11,910
|
py
|
Python
|
symphony/bdk/gen/pod_model/avatar.py
|
SymphonyOSF/symphony-api-client-python
|
70137a893f4385381a3158ef80e1be156e0fc4bd
|
[
"Apache-2.0"
] | null | null | null |
symphony/bdk/gen/pod_model/avatar.py
|
SymphonyOSF/symphony-api-client-python
|
70137a893f4385381a3158ef80e1be156e0fc4bd
|
[
"Apache-2.0"
] | null | null | null |
symphony/bdk/gen/pod_model/avatar.py
|
SymphonyOSF/symphony-api-client-python
|
70137a893f4385381a3158ef80e1be156e0fc4bd
|
[
"Apache-2.0"
] | null | null | null |
"""
Pod API
This document refers to Symphony API calls that do not need encryption or decryption of content. - sessionToken can be obtained by calling the authenticationAPI on the symphony back end and the key manager respectively. Refer to the methods described in authenticatorAPI.yaml. - Actions are defined to be atomic, ie will succeed in their entirety or fail and have changed nothing. - If it returns a 40X status then it will have made no change to the system even if ome subset of the request would have succeeded. - If this contract cannot be met for any reason then this is an error and the response code will be 50X. # noqa: E501
The version of the OpenAPI document: 20.14.1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from typing import List, Union
from symphony.bdk.gen.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from symphony.bdk.gen.exceptions import ApiAttributeError
class Avatar(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a pod_model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a pod_model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'size': (str, none_type), # noqa: E501
'url': (str, none_type), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'size': 'size', # noqa: E501
'url': 'url', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""Avatar - a pod_model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the pod_model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
size (str): The Avatar Size. [optional] # noqa: E501
url (str): Url of the image. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""Avatar - a pod_model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the pod_model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
size (str): The Avatar Size. [optional] # noqa: E501
url (str): Url of the image. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.size: str = None
self.url: str = None
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 45.458015
| 637
| 0.578338
|
4e20437718718ca5e18a0b0e3ebdbc65cd30681e
| 1,136
|
py
|
Python
|
Patchport.py
|
zj3t/FirmAE
|
9b411a220966dc477df07f76c5354bdd213732ac
|
[
"MIT"
] | null | null | null |
Patchport.py
|
zj3t/FirmAE
|
9b411a220966dc477df07f76c5354bdd213732ac
|
[
"MIT"
] | null | null | null |
Patchport.py
|
zj3t/FirmAE
|
9b411a220966dc477df07f76c5354bdd213732ac
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import sys
import subprocess
def command(cmd):
try:
result = subprocess.check_output(cmd, shell=True)
result = result.decode().split('\n')[:-1]
except:
result = ''
return result
def Patchport(INDEX):
os.chdir('./scripts')
for i in range(1,10):
find ="listen=:"+str(i)+"00"
match = "listen=:"+str(INDEX)+"00"
cmd = 'find ./makeNetwork.py -name "*" -exec perl -pi -e "s/'+str(find)+'/'+str(match)+'/g" {} \;'
command(cmd+' 2> /dev/null')
print("Patch "+str(i))
os.chdir('../')
if __name__ == "__main__":
INDEX = sys.argv[1]
argc = len(sys.argv)
if argc != 2:
print('Arguement Error')
if int(INDEX) <= 0 or int(INDEX) > 9:
sys.exit(-1)
buf = []
while True:
if int(INDEX) >= 10:
print("INDEX 0~9")
sys.exit(-1)
buf = command('netstat -nlp | grep "'+str(INDEX)+'001"')
if len(buf) != 0:
INDEX = input("Already Use, bind error...'\n INDEX : ")
else:
Patchport(INDEX)
break
| 22.27451
| 106
| 0.505282
|
a228a29e6e89415385e57a4ac3d987c0bc283403
| 3,017
|
py
|
Python
|
Original_Data/providence_tide_gage_hourly.py
|
ccb60/Providence_SLR
|
240de3a42761a5b309a86f424155720e5a6caa44
|
[
"MIT"
] | null | null | null |
Original_Data/providence_tide_gage_hourly.py
|
ccb60/Providence_SLR
|
240de3a42761a5b309a86f424155720e5a6caa44
|
[
"MIT"
] | null | null | null |
Original_Data/providence_tide_gage_hourly.py
|
ccb60/Providence_SLR
|
240de3a42761a5b309a86f424155720e5a6caa44
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Quick script to download Providence Tide Station data to CSV files
A script is convenient because hourly data are only available on a
yearly basis.
@author: Curtis
"""
import requests
from datetime import date
MONTHLENGTHS = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
BASE = r'https://tidesandcurrents.noaa.gov/api/datagetter'
PARAMETERS = {
'station':'8454000', # Providence'', Maine
'product':'hourly_height',
# other products for this station include':'
# Air Temperature air_temperature
# Water Temperature water_temperature
# Barometric Pressure air_pressure
# Predicted tides predictions
# Observed Water Levels water_level
# Hourly heights hourly_height (predates six minute data)
# datums may also be useful to convert between units on the fly
'application':'CascoBayEstuaryPartnership', # This is just to be polite
'begin_date':'20150101', # express dates in yyyymmdd or mm/dd/yyyy format
'end_date':'20150105',
'datum':'MLLW', # many alternatives. Most likely would be MSL, MLLW or NAVD
'units':'metric',
'time_zone':'lst', # This gives consistent time, without DST, Alternatives: gmt or lst_dst
'format':'csv', # also available are json and xml
'interval':'h' # only need to specify 'h', hourly, or 'hilo' 6 min is default
}
def setupfile(thefilename):
with open(thefilename, 'w') as outfile:
outfile.write('DateTime, Date, Time, Water Level, Sigma\n')
def assembleurl(base, parms):
theurl = base + '?'
for k,v in parms.iteritems():
theurl += k + '=' + v + '&'
return theurl[:-1]
def adddata(thefilename, theresponse):
with open(thefilename, 'a') as outfile:
for a in theresponse.split('\n'):
if a[:4] == 'Date': #Eliminate the header row for each pass
continue
if a[:6] == 'Error': # Eliminate rows that say there's a problem
continue
if a.find('1,1,1') == -1: # Eliminate rows that contain no real data because of errors
try:
lst = a.split(',')
thedate, thetime = lst[0].split()
except ValueError: # If you can't parse the line, skip it!
continue
outfile.write(lst[0] + ',' + thedate + ',' +
thetime + ',' + lst[1] + ',' + lst[2] + '\n')
if __name__ == '__main__':
thefile = 'providence_tides_hourly.csv'
setupfile(thefile) # this will erase any file with the same name.
for year in range(1938, 2021):
print ('Year:', year)
begindate = str(year) + '0101'
enddate = str(year) + '1231'
PARAMETERS['begin_date'] = begindate
PARAMETERS['end_date'] = enddate
#url = assembleurl(BASE, PARAMETERS)
response = requests.get(BASE, PARAMETERS)
adddata(thefile, response.text)
| 40.226667
| 99
| 0.598276
|
640fb8f59b586f7e2c012fccd1c30ed06e9f7280
| 481
|
py
|
Python
|
Train_Model/util/tokenizer_helpers.py
|
collinli2022/2021_June_TeenHacks
|
1804645c8ece1d73ea4f00ee9d7be2bfb5037e71
|
[
"MIT"
] | 2
|
2020-01-29T20:35:47.000Z
|
2021-04-29T02:57:01.000Z
|
Train_Model/util/tokenizer_helpers.py
|
collinli2022/2021_June_TeenHacks
|
1804645c8ece1d73ea4f00ee9d7be2bfb5037e71
|
[
"MIT"
] | 11
|
2018-12-01T00:40:25.000Z
|
2022-02-10T00:05:58.000Z
|
Train_Model/util/tokenizer_helpers.py
|
collinli2022/2021_June_TeenHacks
|
1804645c8ece1d73ea4f00ee9d7be2bfb5037e71
|
[
"MIT"
] | null | null | null |
# Keras Tokenizer Serialization
# Written by Gautam Mittal
import pickle, json
def save_tokenizer(filepath, tokenizer):
with open(filepath, 'wb') as handle:
pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
def load_tokenizer(filepath):
with open(filepath, 'rb') as handle:
return pickle.load(handle)
def save_word_index(filepath, tokenizer):
with open(filepath, 'w') as handle:
handle.write(json.dumps(tokenizer.word_index))
| 28.294118
| 72
| 0.72973
|
16209d66f50cc03dd249efd7cacb5ebda93ffb94
| 53
|
py
|
Python
|
qtable/__init__.py
|
ihgazni2/qtable
|
269bb1052d7c7aeeae4d0b1024746fae38870c40
|
[
"MIT"
] | null | null | null |
qtable/__init__.py
|
ihgazni2/qtable
|
269bb1052d7c7aeeae4d0b1024746fae38870c40
|
[
"MIT"
] | null | null | null |
qtable/__init__.py
|
ihgazni2/qtable
|
269bb1052d7c7aeeae4d0b1024746fae38870c40
|
[
"MIT"
] | null | null | null |
__all__ = ["qtable","engine","ndarr","locs","utils"]
| 26.5
| 52
| 0.622642
|
5c418a8d46fcbfa9c3eaa13cf8de1f604df55b72
| 4,360
|
py
|
Python
|
pyspedas/mms/fpi/mms_load_fpi_calc_pad.py
|
ergsc-devel/pyspedas
|
43d985cbcd23c54205453b06e08f8e51d29ab435
|
[
"MIT"
] | 75
|
2019-02-22T12:59:33.000Z
|
2022-02-26T15:33:20.000Z
|
pyspedas/mms/fpi/mms_load_fpi_calc_pad.py
|
ergsc-devel/pyspedas
|
43d985cbcd23c54205453b06e08f8e51d29ab435
|
[
"MIT"
] | 40
|
2019-07-02T07:46:34.000Z
|
2022-02-23T21:48:50.000Z
|
pyspedas/mms/fpi/mms_load_fpi_calc_pad.py
|
ergsc-devel/pyspedas
|
43d985cbcd23c54205453b06e08f8e51d29ab435
|
[
"MIT"
] | 43
|
2019-02-22T13:03:41.000Z
|
2022-01-24T19:26:59.000Z
|
from pyspedas import tnames
from pytplot import get_data, store_data, options
def mms_load_fpi_calc_pad(probe='1', level='sitl', datatype='', data_rate='', suffix='', autoscale=True):
"""
Calculates the omni-directional pitch angle distribution (summed and averaged)
from the individual tplot variables
Parameters
----------
probe: str
probe, valid values for MMS probes are ['1','2','3','4'].
level: str
indicates level of data processing. the default if no level is specified is 'sitl'
datatype: str
Valid datatypes for FPI are:
Quicklook: ['des', 'dis']
SITL: '' (none; loads both electron and ion data from single CDF)
L1b/L2: ['des-dist', 'dis-dist', 'dis-moms', 'des-moms']
data_rate: str
instrument data rates for FPI include 'brst' and 'fast'. The
default is 'fast'.
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
autoscale: bool
If set, use the default zrange; otherwise, use the min and max of the data for the zrange
Returns:
List of tplot variables created.
"""
out_vars = []
if isinstance(datatype, str):
if datatype == '*' or datatype == '':
if level.lower() == 'ql':
datatype = ['des', 'dis']
else:
datatype = ['des-dist', 'dis-dist']
if isinstance(datatype, str):
datatype = [datatype]
for dtype in datatype:
species = dtype[1]
if level.lower() == 'sitl':
spec_str_format = 'PitchAngDist'
obs_str_format = '_fpi_' + species
else:
spec_str_format = 'pitchAngDist'
obs_str_format = '_d' + species + 's_'
obsstr = 'mms' + str(probe) + obs_str_format
if level.lower() == 'l2':
spec_str_format = 'pitchangdist'
pad_vars = [obsstr+spec_str_format+'_'+erange+'en_'+data_rate+suffix for erange in ['low', 'mid', 'high']]
else:
pad_vars = [obsstr+spec_str_format+'_'+erange+'En'+suffix for erange in ['low', 'mid', 'high']]
pad_avg_name = obsstr+'PitchAngDist_avg'+suffix
low_en = get_data(pad_vars[0])
mid_en = get_data(pad_vars[1])
high_en = get_data(pad_vars[2])
if low_en is None or mid_en is None or high_en is None:
v3_low_pad = tnames(pad_vars[0].lower()+'_'+data_rate)
v3_mid_pad = tnames(pad_vars[1].lower()+'_'+data_rate)
v3_high_pad = tnames(pad_vars[2].lower()+'_'+data_rate)
if v3_low_pad == [] or v3_mid_pad == [] or v3_high_pad == []:
continue
low_en = get_data(v3_low_pad[0])
mid_en = get_data(v3_mid_pad[0])
high_en = get_data(v3_high_pad[0])
pad_avg_name = pad_avg_name.lower()
e_pad_sum = low_en.y+mid_en.y+high_en.y
e_pad_avg = e_pad_sum/3.0
if level == 'l2':
pad_avg_name = pad_avg_name.lower()
if species == 'e':
species_str = 'electron'
elif species == 'i':
species_str = 'ion'
if level == 'ql':
store_data(obsstr+'PitchAngDist_sum'+suffix, data={'x': low_en.times, 'y': e_pad_sum, 'v': low_en.v})
options(obsstr+'PitchAngDist_sum'+suffix, 'ytitle', 'MMS'+str(probe)+' \\ '+species_str+' \\ PAD \\ SUM')
options(obsstr+'PitchAngDist_sum'+suffix, 'yrange', [0, 180])
options(obsstr+'PitchAngDist_sum'+suffix, 'zlog', True)
options(obsstr+'PitchAngDist_sum'+suffix, 'spec', True)
options(obsstr+'PitchAngDist_sum'+suffix, 'Colormap', 'jet')
out_vars.append(obsstr+'PitchAngDist_sum'+suffix)
store_data(pad_avg_name, data={'x': low_en.times, 'y': e_pad_avg, 'v': low_en.v})
options(pad_avg_name, 'ztitle', 'eV/(cm!U2!N s sr eV)')
options(pad_avg_name, 'ytitle', 'MMS'+str(probe)+' \\ '+species_str+' \\ PAD \\ AVG')
options(pad_avg_name, 'yrange', [0, 180])
options(pad_avg_name, 'zlog', True)
options(pad_avg_name, 'spec', True)
options(pad_avg_name, 'Colormap', 'jet')
out_vars.append(pad_avg_name)
return out_vars
| 37.913043
| 118
| 0.575917
|
3101ac2198f7e6ab7e411d1318016ffc72804460
| 7,870
|
py
|
Python
|
django_elasticsearch_dsl/documents.py
|
simion/django-elasticsearch-dsl
|
ecb6ac22bce353eb34851b185488ee6636034574
|
[
"Apache-2.0"
] | null | null | null |
django_elasticsearch_dsl/documents.py
|
simion/django-elasticsearch-dsl
|
ecb6ac22bce353eb34851b185488ee6636034574
|
[
"Apache-2.0"
] | null | null | null |
django_elasticsearch_dsl/documents.py
|
simion/django-elasticsearch-dsl
|
ecb6ac22bce353eb34851b185488ee6636034574
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from collections import deque
from functools import partial
from django import VERSION as DJANGO_VERSION
from django.db import models
from elasticsearch.helpers import bulk, parallel_bulk
from elasticsearch_dsl import Document as DSLDocument
from six import iteritems
from .exceptions import ModelFieldNotMappedError
from .fields import (
BooleanField,
DateField,
DEDField,
DoubleField,
FileField,
IntegerField,
KeywordField,
LongField,
ShortField,
TextField,
)
from .search import Search
from .signals import post_index
model_field_class_to_field_class = {
models.AutoField: IntegerField,
models.BigAutoField: LongField,
models.BigIntegerField: LongField,
models.BooleanField: BooleanField,
models.CharField: TextField,
models.DateField: DateField,
models.DateTimeField: DateField,
models.DecimalField: DoubleField,
models.EmailField: TextField,
models.FileField: FileField,
models.FilePathField: KeywordField,
models.FloatField: DoubleField,
models.ImageField: FileField,
models.IntegerField: IntegerField,
models.NullBooleanField: BooleanField,
models.PositiveIntegerField: IntegerField,
models.PositiveSmallIntegerField: ShortField,
models.SlugField: KeywordField,
models.SmallIntegerField: ShortField,
models.TextField: TextField,
models.TimeField: LongField,
models.URLField: TextField,
models.UUIDField: KeywordField,
}
class DocType(DSLDocument):
_prepared_fields = []
def __init__(self, related_instance_to_ignore=None, **kwargs):
super(DocType, self).__init__(**kwargs)
self._related_instance_to_ignore = related_instance_to_ignore
self._prepared_fields = self.init_prepare()
def __eq__(self, other):
return id(self) == id(other)
def __hash__(self):
return id(self)
@classmethod
def search(cls, using=None, index=None):
return Search(
using=cls._get_using(using),
index=cls._default_index(index),
doc_type=[cls],
model=cls.django.model
)
def get_queryset(self):
"""
Return the queryset that should be indexed by this doc type.
"""
return self.django.model._default_manager.all()
def get_indexing_queryset(self):
"""
Build queryset (iterator) for use by indexing.
"""
qs = self.get_queryset()
kwargs = {}
if DJANGO_VERSION >= (2,) and self.django.queryset_pagination:
kwargs = {'chunk_size': self.django.queryset_pagination}
return qs.iterator(**kwargs)
def init_prepare(self):
"""
Initialise the data model preparers once here. Extracts the preparers
from the model and generate a list of callables to avoid doing that
work on every object instance over.
"""
index_fields = getattr(self, '_fields', {})
fields = []
for name, field in iteritems(index_fields):
if not isinstance(field, DEDField):
continue
if not field._path:
field._path = [name]
prep_func = getattr(self, 'prepare_%s_with_related' % name, None)
if prep_func:
fn = partial(prep_func, related_to_ignore=self._related_instance_to_ignore)
else:
prep_func = getattr(self, 'prepare_%s' % name, None)
if prep_func:
fn = prep_func
else:
fn = partial(field.get_value_from_instance, field_value_to_ignore=self._related_instance_to_ignore)
fields.append((name, field, fn))
return fields
def prepare(self, instance):
"""
Take a model instance, and turn it into a dict that can be serialized
based on the fields defined on this DocType subclass
"""
data = {
name: prep_func(instance)
for name, field, prep_func in self._prepared_fields
}
return data
@classmethod
def to_field(cls, field_name, model_field):
"""
Returns the elasticsearch field instance appropriate for the model
field class. This is a good place to hook into if you have more complex
model field to ES field logic
"""
try:
return model_field_class_to_field_class[
model_field.__class__](attr=field_name)
except KeyError:
raise ModelFieldNotMappedError(
"Cannot convert model field {} "
"to an Elasticsearch field!".format(field_name)
)
def bulk(self, actions, **kwargs):
response = bulk(client=self._get_connection(using=kwargs.pop('using', None)), actions=actions, **kwargs)
# send post index signal
post_index.send(
sender=self.__class__,
instance=self,
actions=actions,
response=response
)
return response
def parallel_bulk(self, actions, **kwargs):
if self.django.queryset_pagination and 'chunk_size' not in kwargs:
kwargs['chunk_size'] = self.django.queryset_pagination
bulk_actions = parallel_bulk(client=self._get_connection(using=kwargs.pop('using', None)), actions=actions, **kwargs)
# As the `parallel_bulk` is lazy, we need to get it into `deque` to run it instantly
# See https://discuss.elastic.co/t/helpers-parallel-bulk-in-python-not-working/39498/2
deque(bulk_actions, maxlen=0)
# Fake return value to emulate bulk() since we don't have a result yet,
# the result is currently not used upstream anyway.
return (1, [])
@classmethod
def generate_id(cls, object_instance):
"""
The default behavior is to use the Django object's pk (id) as the
elasticseach index id (_id). If needed, this method can be overloaded
to change this default behavior.
"""
return object_instance.pk
def _prepare_action(self, object_instance, action):
return {
'_op_type': action,
'_index': self._index._name,
'_id': self.generate_id(object_instance),
'_source': (
self.prepare(object_instance) if action != 'delete' else None
),
}
def _get_actions(self, object_list, action):
for object_instance in object_list:
if action == 'delete' or self.should_index_object(object_instance):
yield self._prepare_action(object_instance, action)
def _bulk(self, *args, **kwargs):
"""Helper for switching between normal and parallel bulk operation"""
parallel = kwargs.pop('parallel', False)
if parallel:
return self.parallel_bulk(*args, **kwargs)
else:
return self.bulk(*args, **kwargs)
def should_index_object(self, obj):
"""
Overwriting this method and returning a boolean value
should determine whether the object should be indexed.
"""
return True
def update(self, thing, refresh=None, action='index', parallel=False, **kwargs):
"""
Update each document in ES for a model, iterable of models or queryset
"""
if refresh is not None:
kwargs['refresh'] = refresh
elif self.django.auto_refresh:
kwargs['refresh'] = self.django.auto_refresh
if isinstance(thing, models.Model):
object_list = [thing]
else:
object_list = thing
return self._bulk(
self._get_actions(object_list, action),
parallel=parallel,
**kwargs
)
# Alias of DocType. Need to remove DocType in 7.x
Document = DocType
| 33.632479
| 125
| 0.635578
|
a27de1fcaca15fce684563a6e104e457ecdcc96c
| 97
|
py
|
Python
|
aegis/utils/ssh/ssh_handler.py
|
Yijie-Wu/Aegis
|
f8082b66d55be135a5e2bec7ac15f860f99f7df7
|
[
"MIT"
] | null | null | null |
aegis/utils/ssh/ssh_handler.py
|
Yijie-Wu/Aegis
|
f8082b66d55be135a5e2bec7ac15f860f99f7df7
|
[
"MIT"
] | null | null | null |
aegis/utils/ssh/ssh_handler.py
|
Yijie-Wu/Aegis
|
f8082b66d55be135a5e2bec7ac15f860f99f7df7
|
[
"MIT"
] | null | null | null |
# -*- encoding:utf-8 -*-
"""
Author: Yijie.Wu
Email: 1694517106@qq.com
Date: 2020/5/16 15:17
"""
| 13.857143
| 24
| 0.618557
|
afa2a3bef7b8a6564cad85a7dd7d078b930baa1b
| 2,876
|
py
|
Python
|
mayan/apps/metadata/urls.py
|
mbehrle/mayan-edms
|
9ebf27d2ea1666eaa36ad6ddc0fb9c6accf5cced
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/metadata/urls.py
|
mbehrle/mayan-edms
|
9ebf27d2ea1666eaa36ad6ddc0fb9c6accf5cced
|
[
"Apache-2.0"
] | 1
|
2022-03-12T01:03:39.000Z
|
2022-03-12T01:03:39.000Z
|
mayan/apps/metadata/urls.py
|
mbehrle/mayan-edms
|
9ebf27d2ea1666eaa36ad6ddc0fb9c6accf5cced
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from django.conf.urls import patterns, url
from .api_views import (
APIDocumentMetadataListView, APIDocumentMetadataView,
APIDocumentTypeMetadataTypeOptionalListView,
APIDocumentTypeMetadataTypeRequiredListView,
APIDocumentTypeMetadataTypeRequiredView, APIMetadataTypeListView,
APIMetadataTypeView
)
urlpatterns = patterns('metadata.views',
url(r'^(?P<document_id>\d+)/edit/$', 'metadata_edit', (), 'metadata_edit'),
url(r'^(?P<document_id>\d+)/view/$', 'metadata_view', (), 'metadata_view'),
url(r'^multiple/edit/$', 'metadata_multiple_edit', (), 'metadata_multiple_edit'),
url(r'^(?P<document_id>\d+)/add/$', 'metadata_add', (), 'metadata_add'),
url(r'^multiple/add/$', 'metadata_multiple_add', (), 'metadata_multiple_add'),
url(r'^(?P<document_id>\d+)/remove/$', 'metadata_remove', (), 'metadata_remove'),
url(r'^multiple/remove/$', 'metadata_multiple_remove', (), 'metadata_multiple_remove'),
url(r'^setup/type/list/$', 'setup_metadata_type_list', (), 'setup_metadata_type_list'),
url(r'^setup/type/create/$', 'setup_metadata_type_create', (), 'setup_metadata_type_create'),
url(r'^setup/type/(?P<metadatatype_id>\d+)/edit/$', 'setup_metadata_type_edit', (), 'setup_metadata_type_edit'),
url(r'^setup/type/(?P<metadatatype_id>\d+)/delete/$', 'setup_metadata_type_delete', (), 'setup_metadata_type_delete'),
url(r'^setup/document/type/(?P<document_type_id>\d+)/metadata/edit/$', 'setup_document_type_metadata', (), 'setup_document_type_metadata'),
url(r'^setup/document/type/(?P<document_type_id>\d+)/metadata/edit/required/$', 'setup_document_type_metadata_required', (), 'setup_document_type_metadata_required'),
url(r'^tools/missing_required_metadata/$', 'documents_missing_required_metadata', (), 'documents_missing_required_metadata'),
)
api_urls = patterns('',
url(r'^metadatatypes/$', APIMetadataTypeListView.as_view(), name='metadatatype-list'),
url(r'^metadatatypes/(?P<pk>[0-9]+)/$', APIMetadataTypeView.as_view(), name='metadatatype-detail'),
url(r'^document/metadata/(?P<pk>[0-9]+)/$', APIDocumentMetadataView.as_view(), name='documentmetadata-detail'),
url(r'^document/(?P<document_pk>[0-9]+)/metadata/$', APIDocumentMetadataListView.as_view(), name='documentmetadata-list'),
url(r'^document_type/(?P<document_type_pk>[0-9]+)/metadatatypes/optional/$', APIDocumentTypeMetadataTypeOptionalListView.as_view(), name='documenttypemetadatatype-list'),
url(r'^document_type/(?P<document_type_pk>[0-9]+)/metadatatypes/required/$', APIDocumentTypeMetadataTypeRequiredListView.as_view(), name='documenttypemetadatatype-list'),
url(r'^document_type/(?P<document_type_pk>[0-9]+)/metadatatypes/(?P<metadata_type_pk>[0-9]+)/$', APIDocumentTypeMetadataTypeRequiredView.as_view(), name='documenttypemetadatatype-detail'),
)
| 68.47619
| 192
| 0.735396
|
b68168114934cd7ae924564b1f2e32801a406a63
| 8,679
|
py
|
Python
|
byfon/cell.py
|
LyricLy/byfon
|
83e771c9210b242282cdac96f06e3bdc5d4f39c4
|
[
"MIT"
] | 5
|
2020-04-08T10:04:52.000Z
|
2021-08-10T10:01:20.000Z
|
byfon/cell.py
|
LyricLy/byfon
|
83e771c9210b242282cdac96f06e3bdc5d4f39c4
|
[
"MIT"
] | null | null | null |
byfon/cell.py
|
LyricLy/byfon
|
83e771c9210b242282cdac96f06e3bdc5d4f39c4
|
[
"MIT"
] | 1
|
2020-04-09T14:22:03.000Z
|
2020-04-09T14:22:03.000Z
|
from contextlib import contextmanager
import sys
from .errors import FreedCellError
from .logical import And, Or
from .while_ import while_expr
def need_alloc(func):
def new_meth(self, *args):
for arg in args:
if func.__name__ != "mov" and getattr(arg, "freed", False):
raise FreedCellError(f"cell at {arg.ptr} was already freed")
if self.freed:
raise FreedCellError(f"cell at {self.ptr} was already freed")
return func(self, *args)
return new_meth
def resolve_diff(diff):
"""Find the shortest BF code to change a cell by `diff`, making use of overflow."""
diff %= 256
if diff < 128:
return "+" * diff
else:
return "-" * (256 - diff)
def unpack(ts, default=None):
for t in ts:
try:
x, y = t
except TypeError:
x = t
y = default
yield x, y
class Cell:
def __init__(self, tp, ptr, *, is_zero=True):
self.tp = tp
self.ptr = ptr
self.freed = False
self._mov_on_lt = False
self._must_be_zero = is_zero
@need_alloc
def free(self):
# the ultimate space optimization
if self._must_be_zero:
self.tp._temps.append(self)
self.freed = True
def _exe_on(self, code, reset_zero=True):
self.tp.seek(self.ptr)
self.tp.exe(code)
if code and reset_zero:
self._must_be_zero = False
def _ensure_zero(self):
if not self._must_be_zero:
self._exe_on("[-]")
self._known_zero()
def _known_zero(self):
if not self._must_be_zero:
try:
self.tp._restores[-1].append(self)
except IndexError:
pass
self._must_be_zero = True
@need_alloc
def copy(self):
"""Create a copy of the cell and return the new cell."""
c1 = self.tp.alloc()
c2 = self.tp.alloc()
self.mov(c1, c2)
self |= c2
return c1
@need_alloc
def mov(self, *args):
"""Move the cell's value to one or more other cells. Destructive (replaces destinations, frees source)"""
for b, _ in unpack(args):
b.freed = False
b._ensure_zero()
self.tp.seek(self.ptr)
with self.tp.loop():
self.tp.exe("-")
for b, m in unpack(args, 1):
b._exe_on("+" * m)
self.tp.seek(self.ptr)
self._known_zero()
self.free()
def write(self):
"""Write the cell's current value to stdout. Non-destructive."""
self._exe_on(".", False)
@need_alloc
def read(self):
"""Read a single byte from stdin into the cell. Returns self for chaining with Transpiler.alloc(). Destructive (replaces current value)."""
self._exe_on(",")
return self
@need_alloc
def __iter__(self):
self.tp.seek(self.ptr)
if self._must_be_zero:
print(f"WARNING: redundant if detected, cell at {self.ptr} is guaranteed to be 0", file=sys.stderr)
with self.tp.loop():
yield
self |= 0
self._known_zero()
self.free()
@contextmanager
@need_alloc
def _while(self):
self.tp.seek(self.ptr)
if self._must_be_zero:
print(f"WARNING: redundant while detected, cell at {self.ptr} is guaranteed to be 0", file=sys.stderr)
with self.tp.loop():
yield
if self._must_be_zero:
print(f"WARNING: while loop is equivalent to if, cell at {self.ptr} is guaranteed to be 0 after first loop", file=sys.stderr)
self._known_zero()
@need_alloc
def not_(self):
"""Negate the cell with boolean logic. Sets cell to 0 if it is nonzero, and 1 otherwise. Destructive (might replace current value, might destroy and make a new one)."""
if self._must_be_zero:
self += 1
self._must_be_zero = False
return self
else:
res = self.tp.alloc()
res += 1
for if_ in self:
res -= 1
return res
@need_alloc
def and_(self, other):
"""Perform logical AND on two cells. Destructive (frees both cells)."""
res = self.tp.alloc()
for if_ in self:
for if_ in other:
res += 1
return res
@need_alloc
def or_(self, other, *, normal=False):
"""Perform logical OR on two cells. Destructive (replaces first cell, frees second one).
If normal is True, forces result to be one of 0 or 1. Allocates an additional cell.
"""
result = self + other
if normal:
new_result = self.tp.alloc()
for if_ in result:
new_result += 1
result = new_result
return result
def __hash__(self):
return self.ptr
def __repr__(self):
after = " = 0" if self._must_be_zero else ""
return f"Cell(*{self.ptr}{after})"
def __invert__(self):
return self.copy()
def __neg__(self):
self._mov_on_lt = True
return self
def __ior__(self, other):
if isinstance(other, Cell):
other.mov(self)
else:
self.freed = False
self._ensure_zero()
self._exe_on(resolve_diff(other))
return self
@need_alloc
def __and__(self, other):
"""Create a dummy object for use in if_ that acts like the logical AND of two cells.
Does not actually perform AND, use Cell.and_ for this.
"""
return And(self, other)
@need_alloc
def __or__(self, other):
"""Create a dummy object for use in if_ that acts like the logical OR of two cells.
Does not actually perform OR, use Cell.or_ for this.
"""
return Or(self, other)
@need_alloc
def __iadd__(self, other):
"""Add a cell or integer to the cell. Frees the cell being added."""
if isinstance(other, Cell):
self.tp.seek(other.ptr)
with self.tp.loop():
self.tp.exe("-")
self._exe_on("+")
self.tp.seek(other.ptr)
other._known_zero()
other.free()
else:
self._exe_on(resolve_diff(other))
return self
@need_alloc
def __isub__(self, other):
"""Subtract a cell or integer from the cell. Frees the cell being subtracted."""
if isinstance(other, Cell):
self.tp.seek(other.ptr)
with self.tp.loop():
self.tp.exe("-")
self._exe_on("-")
self.tp.seek(other.ptr)
other._known_zero()
other.free()
else:
self._exe_on(resolve_diff(-other))
return self
@need_alloc
def __add__(self, other):
"""Sum two cells or a cell and an integer. Destructive, non-commutative (mutates left argument, frees right). For non-destructive, commutative behaviour, copy both sides."""
self += other
return self
__radd__ = __add__
@need_alloc
def __sub__(self, other):
"""Subtract two cells or a cell and an integer. Destructive. (mutates left argument, frees right). For non-destructive behaviour, copy both sides."""
self -= other
return self
@need_alloc
def __rsub__(self, other):
"""Subtract the cell from an integer. Destructive (frees)."""
res = self.tp.alloc()
res += other
res -= self
return res
@need_alloc
def __mul__(self, other):
"""Multiply a cell and an integer. Destroys the cell."""
new = self.tp.alloc()
self.mov((new, other))
return new
__rmul__ = __mul__
@need_alloc
def __eq__(self, other):
"""Check if the value pointed to by two cells is identical. Destroys both cells (frees them).
Does not compare the pointers themselves; for that, compare the ptr attributes of the cells.
"""
return (self != other).not_()
@need_alloc
def __ne__(self, other):
"""Check if the value pointed to by two cells differs. Destroys the first cell (frees it), returns the second un-normalized.
Does not compare the pointers themselves; for that, compare the ptr attributes of the cells.
"""
if isinstance(other, Cell):
with self._while():
self -= 1
other -= 1
self.free()
return other
else:
self -= other
return self
| 30.667845
| 181
| 0.564696
|
bccba898db172a9e612927812d096d49ee8480b0
| 1,564
|
py
|
Python
|
tests/test_vigenere.py
|
GuptaAyush19/Vigenere-Cipher
|
9e3d5d1cca07010ff7e7bc5c37cfee357d12d84b
|
[
"MIT"
] | 3
|
2020-12-29T11:42:09.000Z
|
2021-10-30T10:12:31.000Z
|
tests/test_vigenere.py
|
GuptaAyush19/Vigenere-Cipher
|
9e3d5d1cca07010ff7e7bc5c37cfee357d12d84b
|
[
"MIT"
] | null | null | null |
tests/test_vigenere.py
|
GuptaAyush19/Vigenere-Cipher
|
9e3d5d1cca07010ff7e7bc5c37cfee357d12d84b
|
[
"MIT"
] | 1
|
2021-08-18T23:16:58.000Z
|
2021-08-18T23:16:58.000Z
|
import unittest
import vigenere
import random
class TestVigenere(unittest.TestCase):
def setUp(self):
self.key_size = 100
self.valid_char = [chr(i) for i in range(43, 123)]
self.not_int = ["string", 1j, 3.14]
@staticmethod
def random_text(size=20):
text_list = [chr(random.randint(0x21, 0x7e)) for i in range(size)]
return "".join(text_list)
def test_valid_key(self):
self.assertTrue(vigenere.valid_key(vigenere.random_key()))
with self.assertRaises(TypeError):
vigenere.valid_key(420)
with self.assertRaises(Warning):
vigenere.valid_key('foo')
def test_random_key(self):
random_key = vigenere.random_key(key_size=self.key_size)
for char in random_key:
self.assertIn(char, self.valid_char,
"Character in the key does not match key set")
self.assertEqual(len(random_key), self.key_size,
"Length of key is not equal to the arguement passed")
def test_encrypt_decrypt(self):
for _ in range(1000):
random_key = vigenere.random_key(key_size=100)
plain_text = self.random_text(size=1000)
cipher = vigenere.encrypt(plain_text, random_key)
self.assertEqual(vigenere.decrypt(cipher, random_key), plain_text,
"Decrypted cipher not equal to the plain-text")
if __name__ == "__main__":
unittest.main()
| 35.545455
| 78
| 0.597187
|
a3c4e6be59c3105b93b89656d5e2a21dd6e0b2e4
| 1,201
|
py
|
Python
|
tests/test_viewsets.py
|
bensiu/django-rest-framework
|
28e8895b8abcc65457862c828b1118a9d722c8ed
|
[
"BSD-2-Clause"
] | 1
|
2020-03-18T22:31:29.000Z
|
2020-03-18T22:31:29.000Z
|
tests/test_viewsets.py
|
bensiu/django-rest-framework
|
28e8895b8abcc65457862c828b1118a9d722c8ed
|
[
"BSD-2-Clause"
] | 1
|
2017-11-22T07:48:52.000Z
|
2017-11-22T07:48:52.000Z
|
tests/test_viewsets.py
|
bensiu/django-rest-framework
|
28e8895b8abcc65457862c828b1118a9d722c8ed
|
[
"BSD-2-Clause"
] | 1
|
2020-05-13T00:31:27.000Z
|
2020-05-13T00:31:27.000Z
|
from django.test import TestCase
from rest_framework import status
from rest_framework.response import Response
from rest_framework.test import APIRequestFactory
from rest_framework.viewsets import GenericViewSet
factory = APIRequestFactory()
class BasicViewSet(GenericViewSet):
def list(self, request, *args, **kwargs):
return Response({'ACTION': 'LIST'})
class InitializeViewSetsTestCase(TestCase):
def test_initialize_view_set_with_actions(self):
request = factory.get('/', '', content_type='application/json')
my_view = BasicViewSet.as_view(actions={
'get': 'list',
})
response = my_view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {'ACTION': 'LIST'}
def test_initialize_view_set_with_empty_actions(self):
try:
BasicViewSet.as_view()
except TypeError as e:
assert str(e) == ("The `actions` argument must be provided "
"when calling `.as_view()` on a ViewSet. "
"For example `.as_view({'get': 'list'})`")
else:
self.fail("actions must not be empty.")
| 33.361111
| 72
| 0.642798
|
af9e6072e13fc920414a76db1dbea52cd9e59eca
| 68,048
|
py
|
Python
|
ionoscloud/api/network_interfaces_api.py
|
ionos-cloud/ionos-cloud-sdk-python
|
3c5804697c262898e6f6a438dc40e1b45a4bb5c9
|
[
"Apache-2.0"
] | null | null | null |
ionoscloud/api/network_interfaces_api.py
|
ionos-cloud/ionos-cloud-sdk-python
|
3c5804697c262898e6f6a438dc40e1b45a4bb5c9
|
[
"Apache-2.0"
] | null | null | null |
ionoscloud/api/network_interfaces_api.py
|
ionos-cloud/ionos-cloud-sdk-python
|
3c5804697c262898e6f6a438dc40e1b45a4bb5c9
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
import re # noqa: F401
import six
from ionoscloud.api_client import ApiClient
from ionoscloud.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class NetworkInterfacesApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def datacenters_servers_nics_delete(self, datacenter_id, server_id, nic_id, **kwargs): # noqa: E501
"""Delete NICs # noqa: E501
Remove the specified NIC. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_servers_nics_delete(datacenter_id, server_id, nic_id, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param server_id: The unique ID of the server. (required)
:type server_id: str
:param nic_id: The unique ID of the NIC. (required)
:type nic_id: str
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.datacenters_servers_nics_delete_with_http_info(datacenter_id, server_id, nic_id, **kwargs) # noqa: E501
def datacenters_servers_nics_delete_with_http_info(self, datacenter_id, server_id, nic_id, **kwargs): # noqa: E501
"""Delete NICs # noqa: E501
Remove the specified NIC. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_servers_nics_delete_with_http_info(datacenter_id, server_id, nic_id, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param server_id: The unique ID of the server. (required)
:type server_id: str
:param nic_id: The unique ID of the NIC. (required)
:type nic_id: str
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'datacenter_id',
'server_id',
'nic_id',
'pretty',
'depth',
'x_contract_number'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'response_type',
'query_params'
]
)
for local_var_params_key, local_var_params_val in six.iteritems(local_var_params['kwargs']):
if local_var_params_key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method datacenters_servers_nics_delete" % local_var_params_key
)
local_var_params[local_var_params_key] = local_var_params_val
del local_var_params['kwargs']
# verify the required parameter 'datacenter_id' is set
if self.api_client.client_side_validation and ('datacenter_id' not in local_var_params or # noqa: E501
local_var_params['datacenter_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `datacenter_id` when calling `datacenters_servers_nics_delete`") # noqa: E501
# verify the required parameter 'server_id' is set
if self.api_client.client_side_validation and ('server_id' not in local_var_params or # noqa: E501
local_var_params['server_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `server_id` when calling `datacenters_servers_nics_delete`") # noqa: E501
# verify the required parameter 'nic_id' is set
if self.api_client.client_side_validation and ('nic_id' not in local_var_params or # noqa: E501
local_var_params['nic_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `nic_id` when calling `datacenters_servers_nics_delete`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] > 10: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_servers_nics_delete`, must be a value less than or equal to `10`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_servers_nics_delete`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'datacenter_id' in local_var_params:
path_params['datacenterId'] = local_var_params['datacenter_id'] # noqa: E501
if 'server_id' in local_var_params:
path_params['serverId'] = local_var_params['server_id'] # noqa: E501
if 'nic_id' in local_var_params:
path_params['nicId'] = local_var_params['nic_id'] # noqa: E501
query_params = list(local_var_params.get('query_params', {}).items())
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'depth' in local_var_params and local_var_params['depth'] is not None: # noqa: E501
query_params.append(('depth', local_var_params['depth'])) # noqa: E501
header_params = {}
if 'x_contract_number' in local_var_params:
header_params['X-Contract-Number'] = local_var_params['x_contract_number'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic Authentication', 'Token Authentication'] # noqa: E501
response_type = None
if 'response_type' in kwargs:
response_type = kwargs['response_type']
return self.api_client.call_api(
'/datacenters/{datacenterId}/servers/{serverId}/nics/{nicId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=response_type, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def datacenters_servers_nics_find_by_id(self, datacenter_id, server_id, nic_id, **kwargs): # noqa: E501
"""Retrieve NICs # noqa: E501
Retrieve the properties of the specified NIC. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_servers_nics_find_by_id(datacenter_id, server_id, nic_id, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param server_id: The unique ID of the server. (required)
:type server_id: str
:param nic_id: The unique ID of the NIC. (required)
:type nic_id: str
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: Nic
"""
kwargs['_return_http_data_only'] = True
return self.datacenters_servers_nics_find_by_id_with_http_info(datacenter_id, server_id, nic_id, **kwargs) # noqa: E501
def datacenters_servers_nics_find_by_id_with_http_info(self, datacenter_id, server_id, nic_id, **kwargs): # noqa: E501
"""Retrieve NICs # noqa: E501
Retrieve the properties of the specified NIC. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_servers_nics_find_by_id_with_http_info(datacenter_id, server_id, nic_id, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param server_id: The unique ID of the server. (required)
:type server_id: str
:param nic_id: The unique ID of the NIC. (required)
:type nic_id: str
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(Nic, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'datacenter_id',
'server_id',
'nic_id',
'pretty',
'depth',
'x_contract_number'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'response_type',
'query_params'
]
)
for local_var_params_key, local_var_params_val in six.iteritems(local_var_params['kwargs']):
if local_var_params_key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method datacenters_servers_nics_find_by_id" % local_var_params_key
)
local_var_params[local_var_params_key] = local_var_params_val
del local_var_params['kwargs']
# verify the required parameter 'datacenter_id' is set
if self.api_client.client_side_validation and ('datacenter_id' not in local_var_params or # noqa: E501
local_var_params['datacenter_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `datacenter_id` when calling `datacenters_servers_nics_find_by_id`") # noqa: E501
# verify the required parameter 'server_id' is set
if self.api_client.client_side_validation and ('server_id' not in local_var_params or # noqa: E501
local_var_params['server_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `server_id` when calling `datacenters_servers_nics_find_by_id`") # noqa: E501
# verify the required parameter 'nic_id' is set
if self.api_client.client_side_validation and ('nic_id' not in local_var_params or # noqa: E501
local_var_params['nic_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `nic_id` when calling `datacenters_servers_nics_find_by_id`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] > 10: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_servers_nics_find_by_id`, must be a value less than or equal to `10`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_servers_nics_find_by_id`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'datacenter_id' in local_var_params:
path_params['datacenterId'] = local_var_params['datacenter_id'] # noqa: E501
if 'server_id' in local_var_params:
path_params['serverId'] = local_var_params['server_id'] # noqa: E501
if 'nic_id' in local_var_params:
path_params['nicId'] = local_var_params['nic_id'] # noqa: E501
query_params = list(local_var_params.get('query_params', {}).items())
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'depth' in local_var_params and local_var_params['depth'] is not None: # noqa: E501
query_params.append(('depth', local_var_params['depth'])) # noqa: E501
header_params = {}
if 'x_contract_number' in local_var_params:
header_params['X-Contract-Number'] = local_var_params['x_contract_number'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic Authentication', 'Token Authentication'] # noqa: E501
response_type = 'Nic'
if 'response_type' in kwargs:
response_type = kwargs['response_type']
return self.api_client.call_api(
'/datacenters/{datacenterId}/servers/{serverId}/nics/{nicId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=response_type, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def datacenters_servers_nics_get(self, datacenter_id, server_id, **kwargs): # noqa: E501
"""List NICs # noqa: E501
List all NICs, attached to the specified server. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_servers_nics_get(datacenter_id, server_id, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param server_id: The unique ID of the server. (required)
:type server_id: str
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param offset: The first element (from the complete list of the elements) to include in the response (used together with <b><i>limit</i></b> for pagination).
:type offset: int
:param limit: The maximum number of elements to return (use together with offset for pagination).
:type limit: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: Nics
"""
kwargs['_return_http_data_only'] = True
return self.datacenters_servers_nics_get_with_http_info(datacenter_id, server_id, **kwargs) # noqa: E501
def datacenters_servers_nics_get_with_http_info(self, datacenter_id, server_id, **kwargs): # noqa: E501
"""List NICs # noqa: E501
List all NICs, attached to the specified server. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_servers_nics_get_with_http_info(datacenter_id, server_id, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param server_id: The unique ID of the server. (required)
:type server_id: str
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param offset: The first element (from the complete list of the elements) to include in the response (used together with <b><i>limit</i></b> for pagination).
:type offset: int
:param limit: The maximum number of elements to return (use together with offset for pagination).
:type limit: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(Nics, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'datacenter_id',
'server_id',
'pretty',
'depth',
'x_contract_number',
'offset',
'limit'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'response_type',
'query_params'
]
)
for local_var_params_key, local_var_params_val in six.iteritems(local_var_params['kwargs']):
if local_var_params_key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method datacenters_servers_nics_get" % local_var_params_key
)
local_var_params[local_var_params_key] = local_var_params_val
del local_var_params['kwargs']
# verify the required parameter 'datacenter_id' is set
if self.api_client.client_side_validation and ('datacenter_id' not in local_var_params or # noqa: E501
local_var_params['datacenter_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `datacenter_id` when calling `datacenters_servers_nics_get`") # noqa: E501
# verify the required parameter 'server_id' is set
if self.api_client.client_side_validation and ('server_id' not in local_var_params or # noqa: E501
local_var_params['server_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `server_id` when calling `datacenters_servers_nics_get`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] > 10: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_servers_nics_get`, must be a value less than or equal to `10`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_servers_nics_get`, must be a value greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and 'offset' in local_var_params and local_var_params['offset'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `offset` when calling `datacenters_servers_nics_get`, must be a value greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] > 10000: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `datacenters_servers_nics_get`, must be a value less than or equal to `10000`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `datacenters_servers_nics_get`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'datacenter_id' in local_var_params:
path_params['datacenterId'] = local_var_params['datacenter_id'] # noqa: E501
if 'server_id' in local_var_params:
path_params['serverId'] = local_var_params['server_id'] # noqa: E501
query_params = list(local_var_params.get('query_params', {}).items())
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'depth' in local_var_params and local_var_params['depth'] is not None: # noqa: E501
query_params.append(('depth', local_var_params['depth'])) # noqa: E501
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
header_params = {}
if 'x_contract_number' in local_var_params:
header_params['X-Contract-Number'] = local_var_params['x_contract_number'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic Authentication', 'Token Authentication'] # noqa: E501
response_type = 'Nics'
if 'response_type' in kwargs:
response_type = kwargs['response_type']
return self.api_client.call_api(
'/datacenters/{datacenterId}/servers/{serverId}/nics', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=response_type, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def datacenters_servers_nics_patch(self, datacenter_id, server_id, nic_id, nic, **kwargs): # noqa: E501
"""Partially modify NICs # noqa: E501
Update the properties of the specified NIC. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_servers_nics_patch(datacenter_id, server_id, nic_id, nic, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param server_id: The unique ID of the server. (required)
:type server_id: str
:param nic_id: The unique ID of the NIC. (required)
:type nic_id: str
:param nic: The properties of the NIC to be updated. (required)
:type nic: NicProperties
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: Nic
"""
kwargs['_return_http_data_only'] = True
return self.datacenters_servers_nics_patch_with_http_info(datacenter_id, server_id, nic_id, nic, **kwargs) # noqa: E501
def datacenters_servers_nics_patch_with_http_info(self, datacenter_id, server_id, nic_id, nic, **kwargs): # noqa: E501
"""Partially modify NICs # noqa: E501
Update the properties of the specified NIC. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_servers_nics_patch_with_http_info(datacenter_id, server_id, nic_id, nic, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param server_id: The unique ID of the server. (required)
:type server_id: str
:param nic_id: The unique ID of the NIC. (required)
:type nic_id: str
:param nic: The properties of the NIC to be updated. (required)
:type nic: NicProperties
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(Nic, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'datacenter_id',
'server_id',
'nic_id',
'nic',
'pretty',
'depth',
'x_contract_number'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'response_type',
'query_params'
]
)
for local_var_params_key, local_var_params_val in six.iteritems(local_var_params['kwargs']):
if local_var_params_key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method datacenters_servers_nics_patch" % local_var_params_key
)
local_var_params[local_var_params_key] = local_var_params_val
del local_var_params['kwargs']
# verify the required parameter 'datacenter_id' is set
if self.api_client.client_side_validation and ('datacenter_id' not in local_var_params or # noqa: E501
local_var_params['datacenter_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `datacenter_id` when calling `datacenters_servers_nics_patch`") # noqa: E501
# verify the required parameter 'server_id' is set
if self.api_client.client_side_validation and ('server_id' not in local_var_params or # noqa: E501
local_var_params['server_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `server_id` when calling `datacenters_servers_nics_patch`") # noqa: E501
# verify the required parameter 'nic_id' is set
if self.api_client.client_side_validation and ('nic_id' not in local_var_params or # noqa: E501
local_var_params['nic_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `nic_id` when calling `datacenters_servers_nics_patch`") # noqa: E501
# verify the required parameter 'nic' is set
if self.api_client.client_side_validation and ('nic' not in local_var_params or # noqa: E501
local_var_params['nic'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `nic` when calling `datacenters_servers_nics_patch`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] > 10: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_servers_nics_patch`, must be a value less than or equal to `10`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_servers_nics_patch`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'datacenter_id' in local_var_params:
path_params['datacenterId'] = local_var_params['datacenter_id'] # noqa: E501
if 'server_id' in local_var_params:
path_params['serverId'] = local_var_params['server_id'] # noqa: E501
if 'nic_id' in local_var_params:
path_params['nicId'] = local_var_params['nic_id'] # noqa: E501
query_params = list(local_var_params.get('query_params', {}).items())
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'depth' in local_var_params and local_var_params['depth'] is not None: # noqa: E501
query_params.append(('depth', local_var_params['depth'])) # noqa: E501
header_params = {}
if 'x_contract_number' in local_var_params:
header_params['X-Contract-Number'] = local_var_params['x_contract_number'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'nic' in local_var_params:
body_params = local_var_params['nic']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic Authentication', 'Token Authentication'] # noqa: E501
response_type = 'Nic'
if 'response_type' in kwargs:
response_type = kwargs['response_type']
return self.api_client.call_api(
'/datacenters/{datacenterId}/servers/{serverId}/nics/{nicId}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=response_type, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def datacenters_servers_nics_post(self, datacenter_id, server_id, nic, **kwargs): # noqa: E501
"""Create NICs # noqa: E501
Add a NIC to the specified server. The combined total of NICs and attached volumes cannot exceed 24 per server. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_servers_nics_post(datacenter_id, server_id, nic, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param server_id: The unique ID of the server. (required)
:type server_id: str
:param nic: The NIC to create. (required)
:type nic: Nic
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: Nic
"""
kwargs['_return_http_data_only'] = True
return self.datacenters_servers_nics_post_with_http_info(datacenter_id, server_id, nic, **kwargs) # noqa: E501
def datacenters_servers_nics_post_with_http_info(self, datacenter_id, server_id, nic, **kwargs): # noqa: E501
"""Create NICs # noqa: E501
Add a NIC to the specified server. The combined total of NICs and attached volumes cannot exceed 24 per server. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_servers_nics_post_with_http_info(datacenter_id, server_id, nic, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param server_id: The unique ID of the server. (required)
:type server_id: str
:param nic: The NIC to create. (required)
:type nic: Nic
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(Nic, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'datacenter_id',
'server_id',
'nic',
'pretty',
'depth',
'x_contract_number'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'response_type',
'query_params'
]
)
for local_var_params_key, local_var_params_val in six.iteritems(local_var_params['kwargs']):
if local_var_params_key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method datacenters_servers_nics_post" % local_var_params_key
)
local_var_params[local_var_params_key] = local_var_params_val
del local_var_params['kwargs']
# verify the required parameter 'datacenter_id' is set
if self.api_client.client_side_validation and ('datacenter_id' not in local_var_params or # noqa: E501
local_var_params['datacenter_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `datacenter_id` when calling `datacenters_servers_nics_post`") # noqa: E501
# verify the required parameter 'server_id' is set
if self.api_client.client_side_validation and ('server_id' not in local_var_params or # noqa: E501
local_var_params['server_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `server_id` when calling `datacenters_servers_nics_post`") # noqa: E501
# verify the required parameter 'nic' is set
if self.api_client.client_side_validation and ('nic' not in local_var_params or # noqa: E501
local_var_params['nic'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `nic` when calling `datacenters_servers_nics_post`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] > 10: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_servers_nics_post`, must be a value less than or equal to `10`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_servers_nics_post`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'datacenter_id' in local_var_params:
path_params['datacenterId'] = local_var_params['datacenter_id'] # noqa: E501
if 'server_id' in local_var_params:
path_params['serverId'] = local_var_params['server_id'] # noqa: E501
query_params = list(local_var_params.get('query_params', {}).items())
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'depth' in local_var_params and local_var_params['depth'] is not None: # noqa: E501
query_params.append(('depth', local_var_params['depth'])) # noqa: E501
header_params = {}
if 'x_contract_number' in local_var_params:
header_params['X-Contract-Number'] = local_var_params['x_contract_number'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'nic' in local_var_params:
body_params = local_var_params['nic']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic Authentication', 'Token Authentication'] # noqa: E501
response_type = 'Nic'
if 'response_type' in kwargs:
response_type = kwargs['response_type']
return self.api_client.call_api(
'/datacenters/{datacenterId}/servers/{serverId}/nics', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=response_type, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def datacenters_servers_nics_put(self, datacenter_id, server_id, nic_id, nic, **kwargs): # noqa: E501
"""Modify NICs # noqa: E501
Modify the properties of the specified NIC. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_servers_nics_put(datacenter_id, server_id, nic_id, nic, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param server_id: The unique ID of the server. (required)
:type server_id: str
:param nic_id: The unique ID of the NIC. (required)
:type nic_id: str
:param nic: The modified NIC (required)
:type nic: NicPut
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: Nic
"""
kwargs['_return_http_data_only'] = True
return self.datacenters_servers_nics_put_with_http_info(datacenter_id, server_id, nic_id, nic, **kwargs) # noqa: E501
def datacenters_servers_nics_put_with_http_info(self, datacenter_id, server_id, nic_id, nic, **kwargs): # noqa: E501
"""Modify NICs # noqa: E501
Modify the properties of the specified NIC. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_servers_nics_put_with_http_info(datacenter_id, server_id, nic_id, nic, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param server_id: The unique ID of the server. (required)
:type server_id: str
:param nic_id: The unique ID of the NIC. (required)
:type nic_id: str
:param nic: The modified NIC (required)
:type nic: NicPut
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(Nic, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'datacenter_id',
'server_id',
'nic_id',
'nic',
'pretty',
'depth',
'x_contract_number'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'response_type',
'query_params'
]
)
for local_var_params_key, local_var_params_val in six.iteritems(local_var_params['kwargs']):
if local_var_params_key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method datacenters_servers_nics_put" % local_var_params_key
)
local_var_params[local_var_params_key] = local_var_params_val
del local_var_params['kwargs']
# verify the required parameter 'datacenter_id' is set
if self.api_client.client_side_validation and ('datacenter_id' not in local_var_params or # noqa: E501
local_var_params['datacenter_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `datacenter_id` when calling `datacenters_servers_nics_put`") # noqa: E501
# verify the required parameter 'server_id' is set
if self.api_client.client_side_validation and ('server_id' not in local_var_params or # noqa: E501
local_var_params['server_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `server_id` when calling `datacenters_servers_nics_put`") # noqa: E501
# verify the required parameter 'nic_id' is set
if self.api_client.client_side_validation and ('nic_id' not in local_var_params or # noqa: E501
local_var_params['nic_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `nic_id` when calling `datacenters_servers_nics_put`") # noqa: E501
# verify the required parameter 'nic' is set
if self.api_client.client_side_validation and ('nic' not in local_var_params or # noqa: E501
local_var_params['nic'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `nic` when calling `datacenters_servers_nics_put`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] > 10: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_servers_nics_put`, must be a value less than or equal to `10`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_servers_nics_put`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'datacenter_id' in local_var_params:
path_params['datacenterId'] = local_var_params['datacenter_id'] # noqa: E501
if 'server_id' in local_var_params:
path_params['serverId'] = local_var_params['server_id'] # noqa: E501
if 'nic_id' in local_var_params:
path_params['nicId'] = local_var_params['nic_id'] # noqa: E501
query_params = list(local_var_params.get('query_params', {}).items())
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'depth' in local_var_params and local_var_params['depth'] is not None: # noqa: E501
query_params.append(('depth', local_var_params['depth'])) # noqa: E501
header_params = {}
if 'x_contract_number' in local_var_params:
header_params['X-Contract-Number'] = local_var_params['x_contract_number'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'nic' in local_var_params:
body_params = local_var_params['nic']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic Authentication', 'Token Authentication'] # noqa: E501
response_type = 'Nic'
if 'response_type' in kwargs:
response_type = kwargs['response_type']
return self.api_client.call_api(
'/datacenters/{datacenterId}/servers/{serverId}/nics/{nicId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=response_type, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
| 58.360206
| 451
| 0.638887
|
c016a78b365ff3d83a68401feed386ac0f8160f8
| 2,354
|
py
|
Python
|
examples/imagenet_logits.py
|
sis0truk/pretrained-models.pytorch
|
4aea6d47996279b4b281355ca3d9738d0dff7469
|
[
"BSD-3-Clause"
] | 91
|
2018-03-21T19:45:00.000Z
|
2021-12-13T06:08:00.000Z
|
examples/imagenet_logits.py
|
wubin1836/pretrained-models.pytorch
|
cb5127f43c554c0bb52c5ded3c071d9de9a514a4
|
[
"BSD-3-Clause"
] | 6
|
2019-08-03T08:49:21.000Z
|
2022-03-11T23:43:56.000Z
|
examples/imagenet_logits.py
|
wubin1836/pretrained-models.pytorch
|
cb5127f43c554c0bb52c5ded3c071d9de9a514a4
|
[
"BSD-3-Clause"
] | 13
|
2018-03-23T12:31:52.000Z
|
2020-07-20T13:16:44.000Z
|
import argparse
from PIL import Image
import torch
import torchvision.transforms as transforms
import sys
sys.path.append('.')
import pretrainedmodels
import pretrainedmodels.utils as utils
model_names = sorted(name for name in pretrainedmodels.__dict__
if not name.startswith("__")
and name.islower()
and callable(pretrainedmodels.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--arch', '-a', metavar='ARCH', default='nasnetalarge',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: nasnetalarge)',
nargs='+')
parser.add_argument('--path_img', type=str, default='data/cat.jpg')
def main():
global args
args = parser.parse_args()
for arch in args.arch:
# Load Model
model = pretrainedmodels.__dict__[arch](num_classes=1000,
pretrained='imagenet')
model.eval()
path_img = args.path_img
# Load and Transform one input image
load_img = utils.LoadImage()
tf_img = utils.TransformImage(model)
input_data = load_img(args.path_img) # 3x400x225
input_data = tf_img(input_data) # 3x299x299
input_data = input_data.unsqueeze(0) # 1x3x299x299
input = torch.autograd.Variable(input_data)
# Load Imagenet Synsets
with open('data/imagenet_synsets.txt', 'r') as f:
synsets = f.readlines()
# len(synsets)==1001
# sysnets[0] == background
synsets = [x.strip() for x in synsets]
splits = [line.split(' ') for line in synsets]
key_to_classname = {spl[0]:' '.join(spl[1:]) for spl in splits}
with open('data/imagenet_classes.txt', 'r') as f:
class_id_to_key = f.readlines()
class_id_to_key = [x.strip() for x in class_id_to_key]
# Make predictions
output = model(input) # size(1, 1000)
max, argmax = output.data.squeeze().max(0)
class_id = argmax[0]
class_key = class_id_to_key[class_id]
classname = key_to_classname[class_key]
print("'{}': '{}' is a '{}'".format(arch, path_img, classname))
if __name__ == '__main__':
main()
| 32.694444
| 75
| 0.608751
|
7a48fcbaa9c3efd46f5f396a4fddde42f201fbbd
| 14,698
|
py
|
Python
|
utils/voc_evaluation.py
|
TheSeriousProgrammer/Keras_QuickNet_SSD
|
1c5accfa8d6ae16dab035ba9097fe8670680bdc2
|
[
"MIT"
] | 12
|
2020-09-28T19:56:06.000Z
|
2022-03-16T06:27:01.000Z
|
utils/voc_evaluation.py
|
500swapnil/TensorflowKeras-Efficientnet-SSD
|
30c3c7ac8a2c05cc60fb4635a3f954c45e46108a
|
[
"MIT"
] | 4
|
2021-05-20T21:47:44.000Z
|
2021-10-10T12:33:12.000Z
|
utils/voc_evaluation.py
|
500swapnil/TensorflowKeras-Efficientnet-SSD
|
30c3c7ac8a2c05cc60fb4635a3f954c45e46108a
|
[
"MIT"
] | 1
|
2021-05-20T21:41:11.000Z
|
2021-05-20T21:41:11.000Z
|
# Evalutation code from https://github.com/chainer/chainercv
from collections import defaultdict
import itertools
import numpy as np
import six
def bbox_iou(bbox_a, bbox_b):
"""Calculate the Intersection of Unions (IoUs) between bounding boxes.
IoU is calculated as a ratio of area of the intersection
and area of the union.
This function accepts both :obj:`numpy.ndarray` and :obj:`cupy.ndarray` as
inputs. Please note that both :obj:`bbox_a` and :obj:`bbox_b` need to be
same type.
The output is same type as the type of the inputs.
Args:
bbox_a (array): An array whose shape is :math:`(N, 4)`.
:math:`N` is the number of bounding boxes.
The dtype should be :obj:`numpy.float32`.
bbox_b (array): An array similar to :obj:`bbox_a`,
whose shape is :math:`(K, 4)`.
The dtype should be :obj:`numpy.float32`.
Returns:
array:
An array whose shape is :math:`(N, K)`. \
An element at index :math:`(n, k)` contains IoUs between \
:math:`n` th bounding box in :obj:`bbox_a` and :math:`k` th bounding \
box in :obj:`bbox_b`.
"""
if bbox_a.shape[1] != 4 or bbox_b.shape[1] != 4:
raise IndexError
# top left
tl = np.maximum(bbox_a[:, None, :2], bbox_b[:, :2])
# bottom right
br = np.minimum(bbox_a[:, None, 2:], bbox_b[:, 2:])
area_i = np.prod(br - tl, axis=2) * (tl < br).all(axis=2)
area_a = np.prod(bbox_a[:, 2:] - bbox_a[:, :2], axis=1)
area_b = np.prod(bbox_b[:, 2:] - bbox_b[:, :2], axis=1)
return area_i / (area_a[:, None] + area_b - area_i)
def eval_detection_voc(
pred_bboxes,
pred_labels,
pred_scores,
gt_bboxes,
gt_labels,
gt_difficults=None,
iou_thresh=0.5,
use_07_metric=False):
"""Calculate average precisions based on evaluation code of PASCAL VOC.
This function evaluates predicted bounding boxes obtained from a dataset
which has :math:`N` images by using average precision for each class.
The code is based on the evaluation code used in PASCAL VOC Challenge.
Args:
pred_bboxes (iterable of numpy.ndarray): An iterable of :math:`N`
sets of bounding boxes.
Its index corresponds to an index for the base dataset.
Each element of :obj:`pred_bboxes` is a set of coordinates
of bounding boxes. This is an array whose shape is :math:`(R, 4)`,
where :math:`R` corresponds
to the number of bounding boxes, which may vary among boxes.
The second axis corresponds to
:math:`y_{min}, x_{min}, y_{max}, x_{max}` of a bounding box.
pred_labels (iterable of numpy.ndarray): An iterable of labels.
Similar to :obj:`pred_bboxes`, its index corresponds to an
index for the base dataset. Its length is :math:`N`.
pred_scores (iterable of numpy.ndarray): An iterable of confidence
scores for predicted bounding boxes. Similar to :obj:`pred_bboxes`,
its index corresponds to an index for the base dataset.
Its length is :math:`N`.
gt_bboxes (iterable of numpy.ndarray): An iterable of ground truth
bounding boxes
whose length is :math:`N`. An element of :obj:`gt_bboxes` is a
bounding box whose shape is :math:`(R, 4)`. Note that the number of
bounding boxes in each image does not need to be same as the number
of corresponding predicted boxes.
gt_labels (iterable of numpy.ndarray): An iterable of ground truth
labels which are organized similarly to :obj:`gt_bboxes`.
gt_difficults (iterable of numpy.ndarray): An iterable of boolean
arrays which is organized similarly to :obj:`gt_bboxes`.
This tells whether the
corresponding ground truth bounding box is difficult or not.
By default, this is :obj:`None`. In that case, this function
considers all bounding boxes to be not difficult.
iou_thresh (float): A prediction is correct if its Intersection over
Union with the ground truth is above this value.
use_07_metric (bool): Whether to use PASCAL VOC 2007 evaluation metric
for calculating average precision. The default value is
:obj:`False`.
Returns:
dict:
The keys, value-types and the description of the values are listed
below.
* **ap** (*numpy.ndarray*): An array of average precisions. \
The :math:`l`-th value corresponds to the average precision \
for class :math:`l`. If class :math:`l` does not exist in \
either :obj:`pred_labels` or :obj:`gt_labels`, the corresponding \
value is set to :obj:`numpy.nan`.
* **map** (*float*): The average of Average Precisions over classes.
"""
prec, rec = calc_detection_voc_prec_rec(pred_bboxes,
pred_labels,
pred_scores,
gt_bboxes,
gt_labels,
gt_difficults,
iou_thresh=iou_thresh)
ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric)
return {'ap': ap, 'map': np.nanmean(ap)}
def calc_detection_voc_prec_rec(
pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels,
gt_difficults=None,
iou_thresh=0.5):
"""Calculate precision and recall based on evaluation code of PASCAL VOC.
This function calculates precision and recall of
predicted bounding boxes obtained from a dataset which has :math:`N`
images.
The code is based on the evaluation code used in PASCAL VOC Challenge.
Args:
pred_bboxes (iterable of numpy.ndarray): An iterable of :math:`N`
sets of bounding boxes.
Its index corresponds to an index for the base dataset.
Each element of :obj:`pred_bboxes` is a set of coordinates
of bounding boxes. This is an array whose shape is :math:`(R, 4)`,
where :math:`R` corresponds
to the number of bounding boxes, which may vary among boxes.
The second axis corresponds to
:math:`y_{min}, x_{min}, y_{max}, x_{max}` of a bounding box.
pred_labels (iterable of numpy.ndarray): An iterable of labels.
Similar to :obj:`pred_bboxes`, its index corresponds to an
index for the base dataset. Its length is :math:`N`.
pred_scores (iterable of numpy.ndarray): An iterable of confidence
scores for predicted bounding boxes. Similar to :obj:`pred_bboxes`,
its index corresponds to an index for the base dataset.
Its length is :math:`N`.
gt_bboxes (iterable of numpy.ndarray): An iterable of ground truth
bounding boxes
whose length is :math:`N`. An element of :obj:`gt_bboxes` is a
bounding box whose shape is :math:`(R, 4)`. Note that the number of
bounding boxes in each image does not need to be same as the number
of corresponding predicted boxes.
gt_labels (iterable of numpy.ndarray): An iterable of ground truth
labels which are organized similarly to :obj:`gt_bboxes`.
gt_difficults (iterable of numpy.ndarray): An iterable of boolean
arrays which is organized similarly to :obj:`gt_bboxes`.
This tells whether the
corresponding ground truth bounding box is difficult or not.
By default, this is :obj:`None`. In that case, this function
considers all bounding boxes to be not difficult.
iou_thresh (float): A prediction is correct if its Intersection over
Union with the ground truth is above this value..
Returns:
tuple of two lists:
This function returns two lists: :obj:`prec` and :obj:`rec`.
* :obj:`prec`: A list of arrays. :obj:`prec[l]` is precision \
for class :math:`l`. If class :math:`l` does not exist in \
either :obj:`pred_labels` or :obj:`gt_labels`, :obj:`prec[l]` is \
set to :obj:`None`.
* :obj:`rec`: A list of arrays. :obj:`rec[l]` is recall \
for class :math:`l`. If class :math:`l` that is not marked as \
difficult does not exist in \
:obj:`gt_labels`, :obj:`rec[l]` is \
set to :obj:`None`.
"""
pred_bboxes = iter(pred_bboxes)
pred_labels = iter(pred_labels)
pred_scores = iter(pred_scores)
gt_bboxes = iter(gt_bboxes)
gt_labels = iter(gt_labels)
if gt_difficults is None:
gt_difficults = itertools.repeat(None)
else:
gt_difficults = iter(gt_difficults)
n_pos = defaultdict(int)
score = defaultdict(list)
match = defaultdict(list)
for pred_bbox, pred_label, pred_score, gt_bbox, gt_label, gt_difficult in \
six.moves.zip(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults):
if gt_difficult is None:
gt_difficult = np.zeros(gt_bbox.shape[0], dtype=bool)
for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)):
pred_mask_l = pred_label == l
pred_bbox_l = pred_bbox[pred_mask_l]
pred_score_l = pred_score[pred_mask_l]
# sort by score
order = pred_score_l.argsort()[::-1]
pred_bbox_l = pred_bbox_l[order]
pred_score_l = pred_score_l[order]
gt_mask_l = gt_label == l
gt_bbox_l = gt_bbox[gt_mask_l]
gt_difficult_l = gt_difficult[gt_mask_l]
n_pos[l] += np.logical_not(gt_difficult_l).sum()
score[l].extend(pred_score_l)
if len(pred_bbox_l) == 0:
continue
if len(gt_bbox_l) == 0:
match[l].extend((0,) * pred_bbox_l.shape[0])
continue
# VOC evaluation follows integer typed bounding boxes.
pred_bbox_l = pred_bbox_l.copy()
pred_bbox_l[:, 2:] += 1
gt_bbox_l = gt_bbox_l.copy()
gt_bbox_l[:, 2:] += 1
iou = bbox_iou(pred_bbox_l, gt_bbox_l)
gt_index = iou.argmax(axis=1)
# set -1 if there is no matching ground truth
gt_index[iou.max(axis=1) < iou_thresh] = -1
del iou
selec = np.zeros(gt_bbox_l.shape[0], dtype=bool)
for gt_idx in gt_index:
if gt_idx >= 0:
if gt_difficult_l[gt_idx]:
match[l].append(-1)
else:
if not selec[gt_idx]:
match[l].append(1)
else:
match[l].append(0)
selec[gt_idx] = True
else:
match[l].append(0)
for iter_ in (
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults):
if next(iter_, None) is not None:
raise ValueError('Length of input iterables need to be same.')
n_fg_class = max(n_pos.keys()) + 1
prec = [None] * n_fg_class
rec = [None] * n_fg_class
for l in n_pos.keys():
score_l = np.array(score[l])
match_l = np.array(match[l], dtype=np.int8)
order = score_l.argsort()[::-1]
match_l = match_l[order]
tp = np.cumsum(match_l == 1)
fp = np.cumsum(match_l == 0)
# If an element of fp + tp is 0,
# the corresponding element of prec[l] is nan.
prec[l] = tp / (fp + tp)
# If n_pos[l] is 0, rec[l] is None.
if n_pos[l] > 0:
rec[l] = tp / n_pos[l]
return prec, rec
def calc_detection_voc_ap(prec, rec, use_07_metric=False):
"""Calculate average precisions based on evaluation code of PASCAL VOC.
This function calculates average precisions
from given precisions and recalls.
The code is based on the evaluation code used in PASCAL VOC Challenge.
Args:
prec (list of numpy.array): A list of arrays.
:obj:`prec[l]` indicates precision for class :math:`l`.
If :obj:`prec[l]` is :obj:`None`, this function returns
:obj:`numpy.nan` for class :math:`l`.
rec (list of numpy.array): A list of arrays.
:obj:`rec[l]` indicates recall for class :math:`l`.
If :obj:`rec[l]` is :obj:`None`, this function returns
:obj:`numpy.nan` for class :math:`l`.
use_07_metric (bool): Whether to use PASCAL VOC 2007 evaluation metric
for calculating average precision. The default value is
:obj:`False`.
Returns:
~numpy.ndarray:
This function returns an array of average precisions.
The :math:`l`-th value corresponds to the average precision
for class :math:`l`. If :obj:`prec[l]` or :obj:`rec[l]` is
:obj:`None`, the corresponding value is set to :obj:`numpy.nan`.
"""
n_fg_class = len(prec)
ap = np.empty(n_fg_class)
for l in six.moves.range(n_fg_class):
if prec[l] is None or rec[l] is None:
ap[l] = np.nan
continue
if use_07_metric:
# 11 point metric
ap[l] = 0
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec[l] >= t) == 0:
p = 0
else:
p = np.max(np.nan_to_num(prec[l])[rec[l] >= t])
ap[l] += p / 11
else:
# correct AP calculation
# first append sentinel values at the end
mpre = np.concatenate(([0], np.nan_to_num(prec[l]), [0]))
mrec = np.concatenate(([0], rec[l], [1]))
mpre = np.maximum.accumulate(mpre[::-1])[::-1]
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap[l] = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
| 44.539394
| 80
| 0.572255
|
98a52a0f274ab10cecd65f0fbaed260cd1ac9484
| 4,052
|
py
|
Python
|
sdk/python/pulumi_aws/eks/cluster.py
|
pulumi-bot/pulumi-aws
|
756c60135851e015232043c8206567101b8ebd85
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/eks/cluster.py
|
pulumi-bot/pulumi-aws
|
756c60135851e015232043c8206567101b8ebd85
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/eks/cluster.py
|
pulumi-bot/pulumi-aws
|
756c60135851e015232043c8206567101b8ebd85
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
class Cluster(pulumi.CustomResource):
"""
Manages an EKS Cluster.
"""
def __init__(__self__, __name__, __opts__=None, name=None, role_arn=None, version=None, vpc_config=None):
"""Create a Cluster resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, basestring):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if name and not isinstance(name, basestring):
raise TypeError('Expected property name to be a basestring')
__self__.name = name
"""
Name of the cluster.
"""
__props__['name'] = name
if not role_arn:
raise TypeError('Missing required property role_arn')
elif not isinstance(role_arn, basestring):
raise TypeError('Expected property role_arn to be a basestring')
__self__.role_arn = role_arn
"""
The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf.
"""
__props__['roleArn'] = role_arn
if version and not isinstance(version, basestring):
raise TypeError('Expected property version to be a basestring')
__self__.version = version
"""
Desired Kubernetes master version. If you do not specify a value, the latest available version is used.
"""
__props__['version'] = version
if not vpc_config:
raise TypeError('Missing required property vpc_config')
elif not isinstance(vpc_config, dict):
raise TypeError('Expected property vpc_config to be a dict')
__self__.vpc_config = vpc_config
"""
Nested argument for the VPC associated with your cluster. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html) and [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) in the Amazon EKS User Guide. Configuration detailed below.
"""
__props__['vpcConfig'] = vpc_config
__self__.arn = pulumi.runtime.UNKNOWN
"""
The Amazon Resource Name (ARN) of the cluster.
"""
__self__.certificate_authority = pulumi.runtime.UNKNOWN
"""
Nested attribute containing `certificate-authority-data` for your cluster.
"""
__self__.created_at = pulumi.runtime.UNKNOWN
__self__.endpoint = pulumi.runtime.UNKNOWN
"""
The endpoint for your Kubernetes API server.
"""
super(Cluster, __self__).__init__(
'aws:eks/cluster:Cluster',
__name__,
__props__,
__opts__)
def set_outputs(self, outs):
if 'arn' in outs:
self.arn = outs['arn']
if 'certificateAuthority' in outs:
self.certificate_authority = outs['certificateAuthority']
if 'createdAt' in outs:
self.created_at = outs['createdAt']
if 'endpoint' in outs:
self.endpoint = outs['endpoint']
if 'name' in outs:
self.name = outs['name']
if 'roleArn' in outs:
self.role_arn = outs['roleArn']
if 'version' in outs:
self.version = outs['version']
if 'vpcConfig' in outs:
self.vpc_config = outs['vpcConfig']
| 42.208333
| 448
| 0.638697
|
b751773e0d8a34d42ceb97cf1668b9edc46325ab
| 7,485
|
py
|
Python
|
dask_jobqueue/htcondor.py
|
lpsinger/dask-jobqueue
|
bff96a329439c23414bd0cb11c0ee8e67145d96b
|
[
"BSD-3-Clause"
] | 1
|
2019-07-25T08:39:44.000Z
|
2019-07-25T08:39:44.000Z
|
dask_jobqueue/htcondor.py
|
lpsinger/dask-jobqueue
|
bff96a329439c23414bd0cb11c0ee8e67145d96b
|
[
"BSD-3-Clause"
] | 1
|
2019-04-19T16:36:32.000Z
|
2019-04-19T16:36:32.000Z
|
dask_jobqueue/htcondor.py
|
zonca/dask-jobqueue
|
55b3c329ec54d708dd041f87723b28c332a6406e
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import logging
import re
import shlex
import dask
from distributed.utils import parse_bytes
from .core import JobQueueCluster, docstrings
logger = logging.getLogger(__name__)
class HTCondorCluster(JobQueueCluster):
__doc__ = docstrings.with_indents(
""" Launch Dask on an HTCondor cluster with a shared file system
Parameters
----------
disk : str
Total amount of disk per job
job_extra : dict
Extra submit file attributes for the job
%(JobQueueCluster.parameters)s
Examples
--------
>>> from dask_jobqueue.htcondor import HTCondorCluster
>>> cluster = HTCondorCluster(cores=24, memory="4GB", disk="4GB")
>>> cluster.scale(10)
>>> from dask.distributed import Client
>>> client = Client(cluster)
This also works with adaptive clusters. This automatically launches and kill workers based on load.
HTCondor can take longer to start jobs than other batch systems - tune Adaptive parameters accordingly.
>>> cluster.adapt(minimum=5, startup_cost='60s')
""",
4,
)
_script_template = """
%(shebang)s
%(job_header)s
Environment = "%(quoted_environment)s"
Arguments = "%(quoted_arguments)s"
Executable = %(executable)s
""".lstrip()
submit_command = "condor_submit -queue 1 -file"
cancel_command = "condor_rm"
job_id_regexp = r"(?P<job_id>\d+\.\d+)"
# condor sets argv[0] of the executable to "condor_exec.exe", which confuses
# Python (can't find its libs), so we have to go through the shell.
executable = "/bin/sh"
def __init__(self, disk=None, job_extra=None, config_name="htcondor", **kwargs):
if disk is None:
disk = dask.config.get("jobqueue.%s.disk" % config_name)
if disk is None:
raise ValueError(
"You must specify how much disk to use per job like ``disk='1 GB'``"
)
self.worker_disk = parse_bytes(disk)
if job_extra is None:
self.job_extra = dask.config.get("jobqueue.%s.job-extra" % config_name, {})
else:
self.job_extra = job_extra
# Instantiate args and parameters from parent abstract class
super(HTCondorCluster, self).__init__(config_name=config_name, **kwargs)
env_extra = kwargs.get("env_extra", None)
if env_extra is None:
env_extra = dask.config.get(
"jobqueue.%s.env-extra" % config_name, default=[]
)
self.env_dict = self.env_lines_to_dict(env_extra)
self.env_dict["JOB_ID"] = "$F(MY.JobId)"
self.job_header_dict = {
"MY.DaskWorkerName": '"htcondor--$F(MY.JobId)--"',
"RequestCpus": "MY.DaskWorkerCores",
"RequestMemory": "floor(MY.DaskWorkerMemory / 1048576)",
"RequestDisk": "floor(MY.DaskWorkerDisk / 1024)",
"MY.JobId": '"$(ClusterId).$(ProcId)"',
"MY.DaskWorkerCores": self.worker_cores,
"MY.DaskWorkerMemory": self.worker_memory,
"MY.DaskWorkerDisk": self.worker_disk,
}
if self.log_directory:
self.job_header_dict.update(
{
"LogDirectory": self.log_directory,
# $F(...) strips quotes
"Output": "$(LogDirectory)/worker-$F(MY.JobId).out",
"Error": "$(LogDirectory)/worker-$F(MY.JobId).err",
"Log": "$(LogDirectory)/worker-$(ClusterId).log",
# We kill all the workers to stop them so we need to stream their
# output+error if we ever want to see anything
"Stream_Output": True,
"Stream_Error": True,
}
)
if self.job_extra:
self.job_header_dict.update(self.job_extra)
def env_lines_to_dict(self, env_lines):
""" Convert an array of export statements (what we get from env-extra
in the config) into a dict """
env_dict = {}
for env_line in env_lines:
split_env_line = shlex.split(env_line)
if split_env_line[0] == "export":
split_env_line = split_env_line[1:]
for item in split_env_line:
if "=" in item:
k, v = item.split("=", 1)
env_dict[k] = v
return env_dict
def job_script(self):
""" Construct a job submission script """
quoted_arguments = quote_arguments(["-c", self._command_template])
quoted_environment = quote_environment(self.env_dict)
job_header_lines = "\n".join(
"%s = %s" % (k, v) for k, v in self.job_header_dict.items()
)
return self._script_template % {
"shebang": self.shebang,
"job_header": job_header_lines,
"quoted_environment": quoted_environment,
"quoted_arguments": quoted_arguments,
"executable": self.executable,
}
def _job_id_from_submit_output(self, out):
cluster_id_regexp = r"submitted to cluster (\d+)"
match = re.search(cluster_id_regexp, out)
if match is None:
msg = (
"Could not parse cluster id from submission command output.\n"
"Cluster id regexp is {!r}\n"
"Submission command output is:\n{}".format(cluster_id_regexp, out)
)
raise ValueError(msg)
return "%s.0" % match.group(1)
def _double_up_quotes(instr):
return instr.replace("'", "''").replace('"', '""')
def quote_arguments(args):
"""Quote a string or list of strings using the Condor submit file "new" argument quoting rules.
Returns
-------
str
The arguments in a quoted form.
Warnings
--------
You will need to surround the result in double-quotes before using it in
the Arguments attribute.
Examples
--------
>>> quote_arguments(["3", "simple", "arguments"])
'3 simple arguments'
>>> quote_arguments(["one", "two with spaces", "three"])
'one \'two with spaces\' three'
>>> quote_arguments(["one", "\"two\"", "spacy 'quoted' argument"])
'one ""two"" \'spacey \'\'quoted\'\' argument\''
"""
if isinstance(args, str):
args_list = [args]
else:
args_list = args
quoted_args = []
for a in args_list:
qa = _double_up_quotes(a)
if " " in qa or "'" in qa:
qa = "'" + qa + "'"
quoted_args.append(qa)
return " ".join(quoted_args)
def quote_environment(env):
"""Quote a dict of strings using the Condor submit file "new" environment quoting rules.
Returns
-------
str
The environment in quoted form.
Warnings
--------
You will need to surround the result in double-quotes before using it in
the Environment attribute.
Examples
--------
>>> from collections import OrderedDict
>>> quote_environment(OrderedDict([("one", 1), ("two", '"2"'), ("three", "spacey 'quoted' value")]))
'one=1 two=""2"" three=\'spacey \'\'quoted\'\' value\''
"""
if not isinstance(env, dict):
raise TypeError("env must be a dict")
entries = []
for k, v in env.items():
qv = _double_up_quotes(str(v))
if " " in qv or "'" in qv:
qv = "'" + qv + "'"
entries.append("%s=%s" % (k, qv))
return " ".join(entries)
| 33.266667
| 107
| 0.585438
|
b02b9d753349dfef40ab9e8159dca35f0b86ca53
| 911
|
py
|
Python
|
app.py
|
LoganCButler/BostonHousesDataScience
|
3cfaa102bfbbc47fe566eb1288d4c49d5672eaf1
|
[
"MIT"
] | null | null | null |
app.py
|
LoganCButler/BostonHousesDataScience
|
3cfaa102bfbbc47fe566eb1288d4c49d5672eaf1
|
[
"MIT"
] | null | null | null |
app.py
|
LoganCButler/BostonHousesDataScience
|
3cfaa102bfbbc47fe566eb1288d4c49d5672eaf1
|
[
"MIT"
] | null | null | null |
### TODO ###
import numpy as np
from flask import Flask, request, jsonify
import pickle
from pandas import DataFrame
import pandas as pd
import math
app = Flask(__name__)
# Load the model
model = pickle.load(open('model.pkl','rb'))
@app.route('/predict',methods=['POST'])
def predict():
# Get the data from the POST request.
data = request.get_json(force=True)
asDf = pd.read_json(data['payload'])
## add Hyper columns to test data
asDfHyper = asDf.copy()
asDfHyper['hyper_A'] = (asDf[12]-asDf[5])
asDfHyper['hyper_B'] = asDf[0].apply(abs).apply(math.sqrt)
asDfHyper['hyper_C'] = asDf[12] < 0
prediction = model.predict(asDfHyper)
# Take the first value of prediction
output = prediction[0][0]
return jsonify({'prediction (10k): ': output })
if __name__ == '__main__':
app.run(port=8081, debug=True)
################
| 23.973684
| 62
| 0.632272
|
9209f156263e9dd9fa17ad17704bae763713880f
| 8,821
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/aio/operations_async/_available_private_endpoint_types_operations_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2019-05-17T21:24:53.000Z
|
2020-02-12T11:13:42.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/aio/operations_async/_available_private_endpoint_types_operations_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 15
|
2019-07-12T18:18:04.000Z
|
2019-07-25T20:55:51.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/aio/operations_async/_available_private_endpoint_types_operations_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AvailablePrivateEndpointTypesOperations:
"""AvailablePrivateEndpointTypesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location: str,
**kwargs
) -> AsyncIterable["models.AvailablePrivateEndpointTypesResult"]:
"""Returns all of the resource types that can be linked to a Private Endpoint in this subscription
in this region.
:param location: The location of the domain name.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailablePrivateEndpointTypesResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_04_01.models.AvailablePrivateEndpointTypesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AvailablePrivateEndpointTypesResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AvailablePrivateEndpointTypesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/availablePrivateEndpointTypes'} # type: ignore
def list_by_resource_group(
self,
location: str,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.AvailablePrivateEndpointTypesResult"]:
"""Returns all of the resource types that can be linked to a Private Endpoint in this subscription
in this region.
:param location: The location of the domain name.
:type location: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailablePrivateEndpointTypesResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_04_01.models.AvailablePrivateEndpointTypesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AvailablePrivateEndpointTypesResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AvailablePrivateEndpointTypesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/locations/{location}/availablePrivateEndpointTypes'} # type: ignore
| 48.202186
| 210
| 0.660469
|
adcb42483eaae3ab2307edadf4ecf1a284030744
| 7,180
|
py
|
Python
|
sdk/python/pulumi_azure_native/recoveryservices/v20180710/replication_storage_classification_mapping.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/recoveryservices/v20180710/replication_storage_classification_mapping.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/recoveryservices/v20180710/replication_storage_classification_mapping.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ReplicationStorageClassificationMapping']
class ReplicationStorageClassificationMapping(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
fabric_name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['StorageMappingInputPropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
storage_classification_mapping_name: Optional[pulumi.Input[str]] = None,
storage_classification_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Storage mapping object.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] fabric_name: Fabric name.
:param pulumi.Input[pulumi.InputType['StorageMappingInputPropertiesArgs']] properties: Storage mapping input properties.
:param pulumi.Input[str] resource_group_name: The name of the resource group where the recovery services vault is present.
:param pulumi.Input[str] resource_name_: The name of the recovery services vault.
:param pulumi.Input[str] storage_classification_mapping_name: Storage classification mapping name.
:param pulumi.Input[str] storage_classification_name: Storage classification name.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if fabric_name is None and not opts.urn:
raise TypeError("Missing required property 'fabric_name'")
__props__['fabric_name'] = fabric_name
__props__['properties'] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if resource_name_ is None and not opts.urn:
raise TypeError("Missing required property 'resource_name_'")
__props__['resource_name'] = resource_name_
__props__['storage_classification_mapping_name'] = storage_classification_mapping_name
if storage_classification_name is None and not opts.urn:
raise TypeError("Missing required property 'storage_classification_name'")
__props__['storage_classification_name'] = storage_classification_name
__props__['location'] = None
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:recoveryservices/v20180710:ReplicationStorageClassificationMapping"), pulumi.Alias(type_="azure-native:recoveryservices:ReplicationStorageClassificationMapping"), pulumi.Alias(type_="azure-nextgen:recoveryservices:ReplicationStorageClassificationMapping"), pulumi.Alias(type_="azure-native:recoveryservices/latest:ReplicationStorageClassificationMapping"), pulumi.Alias(type_="azure-nextgen:recoveryservices/latest:ReplicationStorageClassificationMapping"), pulumi.Alias(type_="azure-native:recoveryservices/v20160810:ReplicationStorageClassificationMapping"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20160810:ReplicationStorageClassificationMapping"), pulumi.Alias(type_="azure-native:recoveryservices/v20180110:ReplicationStorageClassificationMapping"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20180110:ReplicationStorageClassificationMapping")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ReplicationStorageClassificationMapping, __self__).__init__(
'azure-native:recoveryservices/v20180710:ReplicationStorageClassificationMapping',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ReplicationStorageClassificationMapping':
"""
Get an existing ReplicationStorageClassificationMapping resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["location"] = None
__props__["name"] = None
__props__["properties"] = None
__props__["type"] = None
return ReplicationStorageClassificationMapping(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource Location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.StorageClassificationMappingPropertiesResponse']:
"""
Properties of the storage mapping object.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource Type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 50.20979
| 959
| 0.689276
|
286b8f9ed9e590590c8b33de167185ddedd3bd07
| 3,963
|
py
|
Python
|
poem_generator/word_network.py
|
Aaronsom/poem-generation
|
10cccad88d073f82f6556374fdfb23a5b5e3769a
|
[
"MIT"
] | null | null | null |
poem_generator/word_network.py
|
Aaronsom/poem-generation
|
10cccad88d073f82f6556374fdfb23a5b5e3769a
|
[
"MIT"
] | null | null | null |
poem_generator/word_network.py
|
Aaronsom/poem-generation
|
10cccad88d073f82f6556374fdfb23a5b5e3769a
|
[
"MIT"
] | null | null | null |
from keras.models import Sequential
from keras.layers import Dense, Embedding, Flatten, Dropout, CuDNNLSTM, CuDNNGRU, Conv1D, GlobalMaxPool1D, Bidirectional
from keras.callbacks import ModelCheckpoint, CSVLogger
import keras.optimizers as optimizer
from keras.backend import set_floatx, set_epsilon
from poem_generator.dataGenerator import TupleDataGenerator
import poem_generator.data_prepocessing as dp
import poem_generator.embedding as embedding_loader
from poem_generator.global_constants import TRAINING_DATA, EMBEDDING_DIMENSION, EMBEDDING_BINARY, MODELS_DICT
from poem_generator.transformer import transformer
def mlp(n, embedding, vocab_len):
model = Sequential([
Embedding(input_dim=vocab_len, output_dim=EMBEDDING_DIMENSION, input_length=n, weights=[embedding]),
Flatten(),
Dropout(0.2),
Dense(n*600, activation="relu"),
Dropout(0.2),
Dense(n*300, activation="relu"),
Dropout(0.2),
Dense(n*100, activation="relu"),
Dropout(0.2),
Dense(vocab_len,activation="softmax"),
])
return model
def lstm_rnn(n, embedding, vocab_len):
model = Sequential([
Embedding(input_dim=vocab_len, output_dim=EMBEDDING_DIMENSION, input_length=n, weights=[embedding]),
CuDNNLSTM(4096, return_sequences=False),
Dropout(0.2),
Dense(1024, activation="relu"),
Dropout(0.2),
Dense(vocab_len, activation="softmax")
])
return model
def gru_rnn(n, embedding, vocab_len):
model = Sequential([
Embedding(input_dim=vocab_len, output_dim=EMBEDDING_DIMENSION, input_length=n, weights=[embedding]),
CuDNNGRU(512, return_sequences=False),
Dropout(0.2),
Dense(512, activation="relu"),
Dropout(0.2),
Dense(vocab_len, activation="softmax")
])
return model
def cnn(n, embedding, vocab_len):
model = Sequential([
Embedding(input_dim=vocab_len, output_dim=EMBEDDING_DIMENSION, input_length=n, weights=[embedding]),
Conv1D(50, 5, activation="relu"),
GlobalMaxPool1D(),
Dropout(0.2),
Dense(1000, activation="relu"),
Dropout(0.2),
Dense(vocab_len, activation="softmax")
])
return model
def bidirectional_lstm(n, embedding, vocab_len):
model = Sequential([
Embedding(input_dim=vocab_len, output_dim=EMBEDDING_DIMENSION, input_length=n, weights=[embedding]),
Bidirectional(CuDNNLSTM(1024, return_sequences=False)),
Dropout(0.2),
Dense(512, activation="relu"),
Dropout(0.2),
Dense(vocab_len, activation="softmax")
])
return model
if __name__ == "__main__":
set_floatx("float16")
set_epsilon(1e-04)
ns = [5]
epochs = 20
batch_size = 512
max_limit = 25000
validation_split = 0.9
poems = dp.tokenize_poems(TRAINING_DATA)
words = sorted(list(set([token for poem in poems for token in poem])))
#Save embedding for generator
embedding, dictionary = embedding_loader.get_embedding(words, binary=EMBEDDING_BINARY, limit=max_limit, save=True)
#model = load_model(MODELS_DICT+"/5model.hdf5", custom_objects={"PositionalEncoding": PositionalEncoding, "Attention": Attention})
model = transformer(100, embedding, len(dictionary), True)
model.compile(optimizer=optimizer.Adam(decay=1e-5),
loss="categorical_crossentropy", metrics=["accuracy"])
generator = TupleDataGenerator(poems[:int(validation_split*len(poems))], ns, dictionary, 0.1, batch_size, single=True)
validation_generator = TupleDataGenerator(poems[int(validation_split*len(poems)):], ns, dictionary, 0, batch_size, single=True)
callbacks = [ModelCheckpoint(MODELS_DICT+"/model.hdf5", save_best_only=True),
CSVLogger(MODELS_DICT+"/log.csv", append=True, separator=';')]
model.fit_generator(
generator, epochs=epochs, callbacks=callbacks, validation_data=validation_generator, workers=4)
| 39.63
| 134
| 0.700479
|
213ec011e8a81bdd502b93b83c111cddc34df6fe
| 421
|
py
|
Python
|
leetcode/2140.py
|
ShengyuanWang/ShengyuanWang.github.io
|
b43f867a1b140b78f8031725cff212b43ecd001b
|
[
"MIT"
] | 1
|
2022-01-20T21:12:00.000Z
|
2022-01-20T21:12:00.000Z
|
leetcode/2140.py
|
ShengyuanWang/ShengyuanWang.github.io
|
b43f867a1b140b78f8031725cff212b43ecd001b
|
[
"MIT"
] | null | null | null |
leetcode/2140.py
|
ShengyuanWang/ShengyuanWang.github.io
|
b43f867a1b140b78f8031725cff212b43ecd001b
|
[
"MIT"
] | null | null | null |
class Solution:
def mostPoints(self, questions: List[List[int]]) -> int:
n = len(questions)
dp = [0] * n
for i in range(n-1, -1, -1):
end = i + questions[i][1] + 1
dp[i] = questions[i][0]
if end < n:
dp[i] = questions[i][0] + dp[end]
if i < n-1:
dp[i] = max(dp[i], dp[i+1])
return dp[0]
| 28.066667
| 60
| 0.396675
|
13597aaa99b116cebede6673234f74056d5edee8
| 343
|
py
|
Python
|
Python/mp.py
|
kaehsu/template-bash
|
f8a8a4babb8537622a4e4246701761a9832d6aeb
|
[
"MIT"
] | null | null | null |
Python/mp.py
|
kaehsu/template-bash
|
f8a8a4babb8537622a4e4246701761a9832d6aeb
|
[
"MIT"
] | null | null | null |
Python/mp.py
|
kaehsu/template-bash
|
f8a8a4babb8537622a4e4246701761a9832d6aeb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import multiprocessing
import os
def whoami(what):
print("Process {} says: {}".format(os.getpid(), what))
if __name__ == '__main__':
whoami("I'm the main program")
for n in range(64):
p = multiprocessing.Process(
target=whoami, args=("I'm function %s" % n,))
p.start()
quit9
| 19.055556
| 58
| 0.603499
|
895e9d276904e7fa11b694d30b5ae016a1bb3e99
| 414
|
py
|
Python
|
models/dpt_model/dpt_arguments.py
|
DominikSpiljak/depth-prediction
|
6a35579b121b34ed5b423b0f042e1d9b78f0120f
|
[
"Apache-2.0"
] | null | null | null |
models/dpt_model/dpt_arguments.py
|
DominikSpiljak/depth-prediction
|
6a35579b121b34ed5b423b0f042e1d9b78f0120f
|
[
"Apache-2.0"
] | 1
|
2022-03-30T20:45:28.000Z
|
2022-03-30T20:45:28.000Z
|
models/dpt_model/dpt_arguments.py
|
DominikSpiljak/depth-prediction
|
6a35579b121b34ed5b423b0f042e1d9b78f0120f
|
[
"Apache-2.0"
] | null | null | null |
def add_model_args(parser):
dpt = parser.add_argument_group("DPT")
dpt.add_argument(
"--no-backbone-pretrain",
action="store_false",
dest="backbone_pretrained",
)
dpt.add_argument(
"--pretrained-weights",
help="Path to pretrained weights for DPT hybrid model",
default="models/dpt_model/weights/dpt_hybrid-midas-501f0c75.pt",
)
return parser
| 29.571429
| 72
| 0.647343
|
f5645c771f767bc63f193c1d2dc45757a3552ae6
| 1,208
|
py
|
Python
|
locallibrary/locallibrary/urls.py
|
waltermaina/local-library-website
|
ac0f6d9c6cd79f891d196c707327717282fcc9cb
|
[
"MIT"
] | null | null | null |
locallibrary/locallibrary/urls.py
|
waltermaina/local-library-website
|
ac0f6d9c6cd79f891d196c707327717282fcc9cb
|
[
"MIT"
] | null | null | null |
locallibrary/locallibrary/urls.py
|
waltermaina/local-library-website
|
ac0f6d9c6cd79f891d196c707327717282fcc9cb
|
[
"MIT"
] | null | null | null |
"""locallibrary URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.views.generic import RedirectView
# Use static() to add url mapping to serve static files during development (only)
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('catalog/', include('catalog.urls')),
path('', RedirectView.as_view(url='catalog/', permanent=True)),
path('accounts/', include('django.contrib.auth.urls')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 41.655172
| 81
| 0.729305
|
3b2ef0192c0ba1975403c6244c371471321656f9
| 34,920
|
py
|
Python
|
emotion_model/classifiers.py
|
Bonsior-yyc/weibo
|
75e7e1df54b22d7985934b188c31dd413222c856
|
[
"MIT"
] | null | null | null |
emotion_model/classifiers.py
|
Bonsior-yyc/weibo
|
75e7e1df54b22d7985934b188c31dd413222c856
|
[
"MIT"
] | null | null | null |
emotion_model/classifiers.py
|
Bonsior-yyc/weibo
|
75e7e1df54b22d7985934b188c31dd413222c856
|
[
"MIT"
] | null | null | null |
import re
from collections import defaultdict
import jieba
import numpy as np
from jieba import posseg
# ################################################
# classifier based on sentiment f_dict
# ################################################
class DictClassifier:
def __init__(self):
self.__root_filepath = "f_dict/"
jieba.load_userdict("f_dict/user.dict") # 准备分词词典
# 准备情感词典词典
self.__phrase_dict = self.__get_phrase_dict()
self.__positive_dict = self.__get_dict(self.__root_filepath + "positive_dict.txt")
self.__negative_dict = self.__get_dict(self.__root_filepath + "negative_dict.txt")
self.__conjunction_dict = self.__get_dict(self.__root_filepath + "conjunction_dict.txt")
self.__punctuation_dict = self.__get_dict(self.__root_filepath + "punctuation_dict.txt")
self.__adverb_dict = self.__get_dict(self.__root_filepath + "adverb_dict.txt")
self.__denial_dict = self.__get_dict(self.__root_filepath + "denial_dict.txt")
def classify(self, sentence):
return self.analyse_sentence(sentence)
def analysis_file(self, filepath_in, filepath_out, encoding="utf-8", print_show=False, start=0, end=-1):
open(filepath_out, "w")
results = []
with open(filepath_in, "r", encoding=encoding) as f:
line_number = 0
for line in f:
# 控制分析的语料的开始位置(行数)
line_number += 1
if line_number < start:
continue
results.append(self.analyse_sentence(line.strip(), filepath_out, print_show))
# 控制分析的语料的结束位置(行数)
if 0 < end <= line_number:
break
return results
def analyse_sentence(self, sentence, runout_filepath=None, print_show=False):
# 情感分析整体数据结构
sentence = str(sentence)
comment_analysis = {"score": 0}
# 将评论分句
the_clauses = self.__divide_sentence_into_clauses(sentence + "%")
# 对每分句进行情感分析
for i in range(len(the_clauses)):
# 情感分析子句的数据结构
sub_clause = self.__analyse_clause(the_clauses[i].replace("。", "."), runout_filepath, print_show)
# 将子句分析的数据结果添加到整体数据结构中
comment_analysis["su-clause" + str(i)] = sub_clause
comment_analysis['score'] += sub_clause['score']
if runout_filepath is not None:
# 将整句写进运行输出文件,以便复查comment_analysis["score"]
self.__write_runout_file(runout_filepath, "\n" + sentence + '\n')
# 将每个评论的每个分句的分析结果写进运行输出文件,以便复查
self.__output_analysis(comment_analysis, runout_filepath)
# 将每个评论的的整体分析结果写进运行输出文件,以便复查
self.__write_runout_file(runout_filepath, str(comment_analysis) + "\n\n\n\n")
if print_show:
print("\n" + sentence)
self.__output_analysis(comment_analysis)
print(comment_analysis, end="\n\n\n")
return comment_analysis["score"] / len(sentence) if abs(comment_analysis["score"]) < 100 else 0
# if comment_analysis["score"] > 0:
# return 1
# else:
# return 0
def __analyse_clause(self, the_clause, runout_filepath, print_show):
sub_clause = {"score": 0, "positive": [], "negative": [], "conjunction": [], "punctuation": [], "pattern": []}
seg_result = posseg.lcut(the_clause)
# 将分句及分词结果写进运行输出文件,以便复查
if runout_filepath is not None:
self.__write_runout_file(runout_filepath, the_clause + '\n')
self.__write_runout_file(runout_filepath, str(seg_result) + '\n')
if print_show:
print(the_clause)
print(seg_result)
# 判断句式:如果……就好了
judgement = self.__is_clause_pattern2(the_clause)
if judgement != "":
sub_clause["pattern"].append(judgement)
sub_clause["score"] -= judgement["value"]
return sub_clause
# 判断句式:是…不是…
judgement = self.__is_clause_pattern1(the_clause)
if judgement != "":
sub_clause["pattern"].append(judgement)
sub_clause["score"] -= judgement["value"]
# 判断句式:短语
judgement = self.__is_clause_pattern3(the_clause, seg_result)
if judgement != "":
sub_clause["score"] += judgement["score"]
if judgement["score"] >= 0:
sub_clause["positive"].append(judgement)
elif judgement["score"] < 0:
sub_clause["negative"].append(judgement)
match_result = judgement["key"].split(":")[-1]
i = 0
while i < len(seg_result):
if seg_result[i].word in match_result:
if i + 1 == len(seg_result) or seg_result[i + 1].word in match_result:
del (seg_result[i])
continue
i += 1
# 逐个分析分词
for i in range(len(seg_result)):
mark, result = self.__analyse_word(seg_result[i].word, seg_result, i)
if mark == 0:
continue
elif mark == 1:
sub_clause["conjunction"].append(result)
elif mark == 2:
sub_clause["punctuation"].append(result)
elif mark == 3:
sub_clause["positive"].append(result)
sub_clause["score"] += result["score"]
elif mark == 4:
sub_clause["negative"].append(result)
sub_clause["score"] -= result["score"]
# 综合连词的情感值
for a_conjunction in sub_clause["conjunction"]:
sub_clause["score"] *= a_conjunction["value"]
# 综合标点符号的情感值
for a_punctuation in sub_clause["punctuation"]:
sub_clause["score"] *= a_punctuation["value"]
return sub_clause
@staticmethod
def __is_clause_pattern2(the_clause):
# re_pattern = re.compile(r".*(如果|要是|希望).+就[\u4e00-\u9fa5]+(好|完美)了")
re_pattern = re.compile(r".*(如果|要是|希望).+就[\u4e00-\u9fa5]*(好|完美)了")
match = re_pattern.match(the_clause)
if match is not None:
pattern = {"key": "如果…就好了", "value": 1.0}
return pattern
return ""
def __is_clause_pattern3(self, the_clause, seg_result):
for a_phrase in self.__phrase_dict:
keys = a_phrase.keys()
to_compile = a_phrase["key"].replace("……", "[\u4e00-\u9fa5]*")
if "start" in keys:
to_compile = to_compile.replace("*", "{" + a_phrase["start"] + "," + a_phrase["end"] + "}")
if "head" in keys:
to_compile = a_phrase["head"] + to_compile
match = re.compile(to_compile).search(the_clause)
if match is not None:
can_continue = True
pos = [flag for word, flag in posseg.cut(match.group())]
if "between_tag" in keys:
if a_phrase["between_tag"] not in pos and len(pos) > 2:
can_continue = False
if can_continue:
for i in range(len(seg_result)):
if seg_result[i].word in match.group():
try:
if seg_result[i + 1].word in match.group():
return self.__emotional_word_analysis(
a_phrase["key"] + ":" + match.group(), a_phrase["value"],
[x for x, y in seg_result], i)
except IndexError:
return self.__emotional_word_analysis(
a_phrase["key"] + ":" + match.group(), a_phrase["value"],
[x for x, y in seg_result], i)
return ""
def __analyse_word(self, the_word, seg_result=None, index=-1):
# 判断是否是连词
judgement = self.__is_word_conjunction(the_word)
if judgement != "":
return 1, judgement
# 判断是否是标点符号
judgement = self.__is_word_punctuation(the_word)
if judgement != "":
return 2, judgement
# 判断是否是正向情感词
judgement = self.__is_word_positive(the_word, seg_result, index)
if judgement != "":
return 3, judgement
# 判断是否是负向情感词
judgement = self.__is_word_negative(the_word, seg_result, index)
if judgement != "":
return 4, judgement
return 0, ""
@staticmethod
def __is_clause_pattern1(the_clause):
re_pattern = re.compile(r".*(要|选)的.+(送|给).*")
match = re_pattern.match(the_clause)
if match is not None:
pattern = {"key": "要的是…给的是…", "value": 1}
return pattern
return ""
def __is_word_conjunction(self, the_word):
if the_word in self.__conjunction_dict:
conjunction = {"key": the_word, "value": self.__conjunction_dict[the_word]}
return conjunction
return ""
def __is_word_punctuation(self, the_word):
if the_word in self.__punctuation_dict:
punctuation = {"key": the_word, "value": self.__punctuation_dict[the_word]}
return punctuation
return ""
def __is_word_positive(self, the_word, seg_result, index):
# 判断分词是否在情感词典内
if the_word in self.__positive_dict:
# 在情感词典内,则构建一个以情感词为中心的字典数据结构
return self.__emotional_word_analysis(the_word, self.__positive_dict[the_word],
[x for x, y in seg_result], index)
# 不在情感词典内,则返回空
return ""
def __is_word_negative(self, the_word, seg_result, index):
# 判断分词是否在情感词典内
if the_word in self.__negative_dict:
# 在情感词典内,则构建一个以情感词为中心的字典数据结构
return self.__emotional_word_analysis(the_word, self.__negative_dict[the_word],
[x for x, y in seg_result], index)
# 不在情感词典内,则返回空
return ""
def __emotional_word_analysis(self, core_word, value, segments, index):
# 在情感词典内,则构建一个以情感词为中心的字典数据结构
orientation = {"key": core_word, "adverb": [], "denial": [], "value": value}
orientation_score = orientation["value"] # my_sentiment_dict[segment]
# 在三个前视窗内,判断是否有否定词、副词
view_window = index - 1
if view_window > -1: # 无越界
# 判断前一个词是否是情感词
if segments[view_window] in self.__negative_dict or \
segments[view_window] in self.__positive_dict:
orientation['score'] = orientation_score
return orientation
# 判断是否是副词
if segments[view_window] in self.__adverb_dict:
# 构建副词字典数据结构
adverb = {"key": segments[view_window], "position": 1,
"value": self.__adverb_dict[segments[view_window]]}
orientation["adverb"].append(adverb)
orientation_score *= self.__adverb_dict[segments[view_window]]
# 判断是否是否定词
elif segments[view_window] in self.__denial_dict:
# 构建否定词字典数据结构
denial = {"key": segments[view_window], "position": 1,
"value": self.__denial_dict[segments[view_window]]}
orientation["denial"].append(denial)
orientation_score *= -1
view_window = index - 2
if view_window > -1:
# 判断前一个词是否是情感词
if segments[view_window] in self.__negative_dict or \
segments[view_window] in self.__positive_dict:
orientation['score'] = orientation_score
return orientation
if segments[view_window] in self.__adverb_dict:
adverb = {"key": segments[view_window], "position": 2,
"value": self.__adverb_dict[segments[view_window]]}
orientation_score *= self.__adverb_dict[segments[view_window]]
orientation["adverb"].insert(0, adverb)
elif segments[view_window] in self.__denial_dict:
denial = {"key": segments[view_window], "position": 2,
"value": self.__denial_dict[segments[view_window]]}
orientation["denial"].insert(0, denial)
orientation_score *= -1
# 判断是否是“不是很好”的结构(区别于“很不好”)
if len(orientation["adverb"]) > 0:
# 是,则引入调节阈值,0.3
orientation_score *= 0.3
view_window = index - 3
if view_window > -1:
# 判断前一个词是否是情感词
if segments[view_window] in self.__negative_dict or segments[view_window] in self.__positive_dict:
orientation['score'] = orientation_score
return orientation
if segments[view_window] in self.__adverb_dict:
adverb = {"key": segments[view_window], "position": 3,
"value": self.__adverb_dict[segments[view_window]]}
orientation_score *= self.__adverb_dict[segments[view_window]]
orientation["adverb"].insert(0, adverb)
elif segments[view_window] in self.__denial_dict:
denial = {"key": segments[view_window], "position": 3,
"value": self.__denial_dict[segments[view_window]]}
orientation["denial"].insert(0, denial)
orientation_score *= -1
# 判断是否是“不是很好”的结构(区别于“很不好”)
if len(orientation["adverb"]) > 0 and len(orientation["denial"]) == 0:
orientation_score *= 0.3
# 添加情感分析值。
orientation['score'] = orientation_score
# 返回的数据结构
return orientation
# 输出comment_analysis分析的数据结构结果
def __output_analysis(self, comment_analysis, runout_filepath=None):
output = "Score:" + str(comment_analysis["score"]) + "\n"
for i in range(len(comment_analysis) - 1):
output += "Sub-clause" + str(i) + ": "
clause = comment_analysis["su-clause" + str(i)]
if len(clause["conjunction"]) > 0:
output += "conjunction:"
for punctuation in clause["conjunction"]:
output += punctuation["key"] + " "
if len(clause["positive"]) > 0:
output += "positive:"
for positive in clause["positive"]:
if len(positive["denial"]) > 0:
for denial in positive["denial"]:
output += denial["key"] + str(denial["position"]) + "-"
if len(positive["adverb"]) > 0:
for adverb in positive["adverb"]:
output += adverb["key"] + str(adverb["position"]) + "-"
output += positive["key"] + " "
if len(clause["negative"]) > 0:
output += "negative:"
for negative in clause["negative"]:
if len(negative["denial"]) > 0:
for denial in negative["denial"]:
output += denial["key"] + str(denial["position"]) + "-"
if len(negative["adverb"]) > 0:
for adverb in negative["adverb"]:
output += adverb["key"] + str(adverb["position"]) + "-"
output += negative["key"] + " "
if len(clause["punctuation"]) > 0:
output += "punctuation:"
for punctuation in clause["punctuation"]:
output += punctuation["key"] + " "
if len(clause["pattern"]) > 0:
output += "pattern:"
for pattern in clause["pattern"]:
output += pattern["key"] + " "
# if clause["pattern"] is not None:
# output += "pattern:" + clause["pattern"]["key"] + " "
output += "\n"
if runout_filepath is not None:
self.__write_runout_file(runout_filepath, output)
else:
print(output)
def __divide_sentence_into_clauses(self, the_sentence):
the_clauses = self.__split_sentence(the_sentence)
# 识别“是……不是……”句式
pattern = re.compile(r"([,、。%!;??,!~~.… ]*)([\u4e00-\u9fa5]*?(要|选)"
r"的.+(送|给)[\u4e00-\u9fa5]+?[,。!%;、??,!~~.… ]+)")
match = re.search(pattern, the_sentence.strip())
if match is not None and len(self.__split_sentence(match.group(2))) <= 2:
to_delete = []
for i in range(len(the_clauses)):
if the_clauses[i] in match.group(2):
to_delete.append(i)
if len(to_delete) > 0:
for i in range(len(to_delete)):
the_clauses.remove(the_clauses[to_delete[0]])
the_clauses.insert(to_delete[0], match.group(2))
# 识别“要是|如果……就好了”的假设句式
pattern = re.compile(r"([,%。、!;??,!~~.… ]*)([\u4e00-\u9fa5]*?(如果|要是|"
r"希望).+就[\u4e00-\u9fa5]+(好|完美)了[,。;!%、??,!~~.… ]+)")
match = re.search(pattern, the_sentence.strip())
if match is not None and len(self.__split_sentence(match.group(2))) <= 3:
to_delete = []
for i in range(len(the_clauses)):
if the_clauses[i] in match.group(2):
to_delete.append(i)
if len(to_delete) > 0:
for i in range(len(to_delete)):
the_clauses.remove(the_clauses[to_delete[0]])
the_clauses.insert(to_delete[0], match.group(2))
the_clauses[-1] = the_clauses[-1][:-1]
return the_clauses
@staticmethod
def __split_sentence(sentence):
pattern = re.compile("[,。%、!!??,;~~.… ]+")
split_clauses = pattern.split(sentence.strip())
punctuations = pattern.findall(sentence.strip())
try:
split_clauses.remove("")
except ValueError:
pass
punctuations.append("")
clauses = [''.join(x) for x in zip(split_clauses, punctuations)]
return clauses
def __get_phrase_dict(self):
sentiment_dict = []
pattern = re.compile(r"\s+")
with open(self.__root_filepath + "phrase_dict.txt", "r", encoding="utf-8") as f:
for line in f:
a_phrase = {}
result = pattern.split(line.strip())
if len(result) >= 2:
a_phrase["key"] = result[0]
a_phrase["value"] = float(result[1])
for i, a_split in enumerate(result):
if i < 2:
continue
else:
a, b = a_split.split(":")
a_phrase[a] = b
sentiment_dict.append(a_phrase)
return sentiment_dict
# 情感词典的构建
@staticmethod
def __get_dict(path, encoding="utf-8"):
sentiment_dict = {}
pattern = re.compile(r"\s+")
with open(path, encoding=encoding) as f:
for line in f:
result = pattern.split(line.strip())
if len(result) == 2:
sentiment_dict[result[0]] = float(result[1])
return sentiment_dict
@staticmethod
def __write_runout_file(path, info, encoding="utf-8"):
with open(path, "a", encoding=encoding) as f:
f.write("%s" % info)
# ################################################
# classifier based on K-Nearest Neighbours
# ################################################
class KNNClassifier:
def __init__(self, train_data, train_data_labels, k=3, best_words=None, stopwords=None):
self.__train_data_labels = []
self.__total_words = []
self.__k = k
self.__stopwords = stopwords
self.__train_data_vectors = None
self.__total_words_length = 0
self.train_num = 0
if train_data is not None:
self.__train(train_data, train_data_labels, best_words)
def set_k(self, k):
self.__k = k
def __doc2vector(self, doc):
the_vector = [0] * self.__total_words_length
for i in range(self.__total_words_length):
the_vector[i] = doc.count(self.__total_words[i])
length = sum(the_vector)
if length == 0:
return [0 for _ in the_vector]
return [i / length for i in the_vector]
# return the_vector
def __get_total_words(self, train_data, best_words):
if best_words is not None:
total_words = best_words[:]
else:
total_words = set()
for doc in train_data:
total_words |= set(doc)
if self.__stopwords:
with open(self.__stopwords, encoding="utf-8") as sw_f:
for line in sw_f:
if line.strip() in total_words:
total_words.remove(line.strip())
return list(total_words)
@staticmethod
def __normalize(vectors):
min_values = vectors.min(axis=0)
max_values = vectors.max(axis=0)
ranges = max_values - min_values
m = vectors.shape[0]
norm_vectors = vectors - np.tile(min_values, (m, 1))
norm_vectors = norm_vectors / np.tile(ranges, (m, 1))
return norm_vectors
def __train(self, train_data, train_data_labels, best_words=None):
print("KNNClassifier is training ...... ")
self.__train_data_labels = train_data_labels[:]
self.__total_words = self.__get_total_words(train_data, best_words)
self.__total_words_length = len(self.__total_words)
vectors = []
for doc in train_data:
vectors.append(self.__doc2vector(doc))
self.train_num += 1
self.__train_data_vectors = np.array(vectors)
# self.__train_data_vectors = self.__normalize(np.array(vectors))
print("KNNClassifier trains over!")
def __get_sorted_distances(self, input_data):
size = self.__train_data_vectors.shape
vector = self.__doc2vector(input_data)
input_data_vector = np.array(vector)
diff_mat = np.tile(input_data_vector, (size[0], 1)) - self.__train_data_vectors
sq_diff_mat = diff_mat ** 2
sq_distances = sq_diff_mat.sum(axis=1)
distances = sq_distances ** 0.5
sorted_distances = distances.argsort()
return sorted_distances
def classify(self, input_data):
if isinstance(self.__k, int):
return self.single_k_classify(input_data)
elif isinstance(self.__k, list):
return self.multiple_k_classify(input_data)
else:
print("Wrong k.")
def multiple_k_classify(self, input_data):
# get the distance sorted list
sorted_distances = self.__get_sorted_distances(input_data)
# some variable
i = 0
# class_count[0] records the number of label "0"
# class_count[1] records the number of label "1"
class_count = [0, 0]
# final_record[0] records the number of label "0"
# final_record[1] records the number of label "1"
final_record = [0, 0]
# assert type(k) == list
assert type(self.__k) == list
for k in sorted(self.__k):
while i < k:
label = self.__train_data_labels[sorted_distances[i]]
class_count[label] += 1
i += 1
if class_count[0] > class_count[1]:
final_record[0] += 1
else:
final_record[1] += 1
if final_record[0] > final_record[1]:
return 0
else:
return 1
def single_k_classify(self, input_data):
# get the distance sorted list
sorted_distances = self.__get_sorted_distances(input_data)
# some variable
i = 0
# class_count[0] records the number of label "0"
# class_count[1] records the number of label "1"
class_count = [0, 0]
while i < self.__k:
label = self.__train_data_labels[sorted_distances[i]]
class_count[label] += 1
i += 1
if class_count[0] > class_count[1]:
return 0
else:
return 1
# ################################################
# classifier based on Naive bayes
# ################################################
class BayesClassifier:
def __init__(self, train_data, train_data_labels, best_words):
self._pos_word_p = {}
self._neg_word_p = {}
self._pos_p = 0.
self._neg_p = 1.
self._train(train_data, train_data_labels, best_words)
def _train(self, train_data, train_data_labels, best_words=None):
"""
this method is different from the the method self.train()
we use the training data, do some feature selection, then train,
get some import values
:param train_data:
:param train_data_labels:
:param best_words:
"""
print("BayesClassifier is training ...... ")
# get the frequency information of each word
total_pos_data, total_neg_data = {}, {}
total_pos_length, total_neg_length = 0, 0
total_word = set()
for i, doc in enumerate(train_data):
if train_data_labels[i] == 1:
for word in doc:
if best_words is None or word in best_words:
total_pos_data[word] = total_pos_data.get(word, 0) + 1
total_pos_length += 1
total_word.add(word)
else:
for word in doc:
if best_words is None or word in best_words:
total_neg_data[word] = total_neg_data.get(word, 0) + 1
total_neg_length += 1
total_word.add(word)
self._pos_p = total_pos_length / (total_pos_length + total_neg_length)
self._neg_p = total_neg_length / (total_pos_length + total_neg_length)
# get each word's probability
for word in total_word:
self._pos_word_p[word] = np.log(total_pos_data.get(word, 1e-100) / total_pos_length)
self._neg_word_p[word] = np.log(total_neg_data.get(word, 1e-100) / total_neg_length)
print("BayesClassifier trains over!")
def classify(self, input_data):
"""
according to the input data, calculate the probability of the each class
:param input_data:
"""
pos_score = 0.
for word in input_data:
pos_score += self._pos_word_p.get(word, 0.)
pos_score += np.log(self._pos_p)
neg_score = 0.
for word in input_data:
neg_score += self._neg_word_p.get(word, 0.)
neg_score += np.log(self._neg_p)
if pos_score > neg_score:
return 1
else:
return 0
# ################################################
# classifier based on Maximum Entropy
# ################################################
class MaxEntClassifier:
def __init__(self, max_iter=500):
self.feats = defaultdict(int)
self.labels = {0, 1}
self.weight = []
self.max_iter = max_iter
def prob_weight(self, features, label):
weight = 0.0
for feature in features:
if (label, feature) in self.feats:
weight += self.weight[self.feats[(label, feature)]]
return np.exp(weight)
def calculate_probability(self, features):
weights = [(self.prob_weight(features, label), label) for label in self.labels]
try:
z = sum([weight for weight, label in weights])
prob = [(weight / z, label) for weight, label in weights]
except ZeroDivisionError:
return "collapse"
return prob
def convergence(self, last_weight):
for w1, w2 in zip(last_weight, self.weight):
if abs(w1 - w2) >= 0.001:
return False
return True
def train(self, train_data, train_data_labels, best_words=None):
print("MaxEntClassifier is training ...... ")
# init the parameters
train_data_length = len(train_data_labels)
if best_words is None:
for i in range(train_data_length):
for word in set(train_data[i]):
self.feats[(train_data_labels[i], word)] += 1
else:
for i in range(train_data_length):
for word in set(train_data[i]):
if word in best_words:
self.feats[(train_data_labels[i], word)] += 1
the_max = max([len(record) - 1 for record in train_data]) # the_max param for GIS training algorithm
self.weight = [0.0] * len(self.feats) # init weight for each feature
ep_empirical = [0.0] * len(self.feats) # init the feature expectation on empirical distribution
for i, f in enumerate(self.feats):
ep_empirical[i] = self.feats[f] / train_data_length # feature expectation on empirical distribution
self.feats[f] = i # each feature function correspond to id
for i in range(self.max_iter):
ep_model = [0.0] * len(self.feats) # feature expectation on model distribution
for doc in train_data:
prob = self.calculate_probability(doc) # calculate p(y|x)
if prob == "collapse":
print("The program collapse. The iter number: %d." % (i + 1))
return
for feature in doc:
for weight, label in prob:
if (label, feature) in self.feats: # only focus on features from training data.
idx = self.feats[(label, feature)] # get feature id
ep_model[idx] += weight * (1.0 / train_data_length) # sum(1/N * f(y,x)*p(y|x)), p(x) = 1/N
last_weight = self.weight[:]
for j, win in enumerate(self.weight):
delta = 1.0 / the_max * np.log(ep_empirical[j] / ep_model[j])
self.weight[j] += delta # update weight
# test if the algorithm is convergence
if self.convergence(last_weight):
print("The program convergence. The iter number: %d." % (i + 1))
break
print("MaxEntClassifier trains over!")
def test(self, train_data, train_labels, best_words, test_data):
classify_results = []
# init the parameters
train_data_length = len(train_labels)
if best_words is None:
for i in range(train_data_length):
for word in set(train_data[i]):
self.feats[(train_labels[i], word)] += 1
else:
for i in range(train_data_length):
for word in set(train_data[i]):
if word in best_words:
self.feats[(train_labels[i], word)] += 1
the_max = max([len(record) - 1 for record in train_data]) # the_max param for GIS training algorithm
self.weight = [0.0] * len(self.feats) # init weight for each feature
ep_empirical = [0.0] * len(self.feats) # init the feature expectation on empirical distribution
for i, f in enumerate(self.feats):
ep_empirical[i] = self.feats[f] / train_data_length # feature expectation on empirical distribution
self.feats[f] = i # each feature function correspond to id
for i in range(self.max_iter):
print("MaxEntClassifier is training ...... ")
ep_model = [0.0] * len(self.feats) # feature expectation on model distribution
for doc in train_data:
prob = self.calculate_probability(doc) # calculate p(y|x)
if prob == "collapse":
print("The program collapse. The iter number: %d." % (i + 1))
return
for feature in doc:
for weight, label in prob:
if (label, feature) in self.feats: # only focus on features from training data.
idx = self.feats[(label, feature)] # get feature id
ep_model[idx] += weight * (1.0 / train_data_length) # sum(1/N * f(y,x)*p(y|x)), p(x) = 1/N
last_weight = self.weight[:]
for j, win in enumerate(self.weight):
delta = 1.0 / the_max * np.log(ep_empirical[j] / ep_model[j])
self.weight[j] += delta # update weight
print("MaxEntClassifier is testing ...")
classify_labels = []
for data in test_data:
classify_labels.append(self.classify(data))
classify_results.append(classify_labels)
# test if the algorithm is convergence
if self.convergence(last_weight):
print("The program convergence. The iter number: %d." % (i + 1))
break
print("MaxEntClassifier trains over!")
return classify_results
def classify(self, the_input_features):
prob = self.calculate_probability(the_input_features)
prob.sort(reverse=True)
if prob[0][0] > prob[1][0]:
return prob[0][1]
else:
return prob[1][1]
# ################################################
# classifier based on Support Vector Machine
# ################################################
from sklearn.svm import SVC
class SVMClassifier:
def __init__(self, train_data, train_labels, best_words, C):
train_data = np.array(train_data)
train_labels = np.array(train_labels)
self.best_words = best_words
self.clf = SVC(C=C)
self.__train(train_data, train_labels)
def words2vector(self, all_data):
vectors = []
best_words_index = {}
for i, word in enumerate(self.best_words):
best_words_index[word] = i
for data in all_data:
vector = [0 for x in range(len(self.best_words))]
for word in data:
i = best_words_index.get(word)
if i is not None:
vector[i] = vector[i] + 1
vectors.append(vector)
vectors = np.array(vectors)
return vectors
def __train(self, train_data, train_labels):
print("SVMClassifier is training ...... ")
train_vectors = self.words2vector(train_data)
self.clf.fit(train_vectors, np.array(train_labels))
print("SVMClassifier trains over!")
def classify(self, data):
vector = self.words2vector([data])
prediction = self.clf.predict(vector)
return prediction[0]
| 40.045872
| 119
| 0.548597
|
c4dbde89f7f91fa8096df1a20692af060fde099c
| 7,212
|
py
|
Python
|
ven2/lib/python2.7/site-packages/zope/browserresource/tests/test_icondirective.py
|
manliu1225/Facebook_crawler
|
0f75a1c4382dd4effc3178d84b99b0cad97337cd
|
[
"Apache-2.0"
] | 1
|
2019-11-30T07:47:08.000Z
|
2019-11-30T07:47:08.000Z
|
ven2/lib/python2.7/site-packages/zope/browserresource/tests/test_icondirective.py
|
manliu1225/Facebook_crawler
|
0f75a1c4382dd4effc3178d84b99b0cad97337cd
|
[
"Apache-2.0"
] | 10
|
2016-03-24T07:52:07.000Z
|
2020-03-02T09:52:06.000Z
|
ven2/lib/python2.7/site-packages/zope/browserresource/tests/test_icondirective.py
|
manliu1225/Facebook_crawler
|
0f75a1c4382dd4effc3178d84b99b0cad97337cd
|
[
"Apache-2.0"
] | 2
|
2015-04-03T08:18:34.000Z
|
2019-12-09T09:36:43.000Z
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test Icon-Directive
"""
import os
from io import StringIO
from unittest import TestCase
from zope import component
from zope.configuration.exceptions import ConfigurationError
from zope.configuration.xmlconfig import xmlconfig, XMLConfig
from zope.interface import implementer
from zope.publisher.browser import TestRequest
from zope.security.checker import ProxyFactory
from zope.security.interfaces import Forbidden
from zope.security.proxy import removeSecurityProxy
from zope.traversing.browser.absoluteurl import AbsoluteURL
from zope.traversing.browser.interfaces import IAbsoluteURL
import zope.location.interfaces
import zope.browserresource
from zope.component.testfiles.views import IC
from zope.browserresource.tests import support
from zope.testing import cleanup
template = u"""<configure
xmlns='http://namespaces.zope.org/zope'
xmlns:browser='http://namespaces.zope.org/browser'
i18n_domain='zope'
>
%s
</configure>"""
request = TestRequest()
@implementer(IC)
class Ob(object):
pass
ob = Ob()
request._vh_root = support.site
def defineCheckers():
# define the appropriate checker for a FileResource for these tests
from zope.security.protectclass import protectName
from zope.browserresource.file import FileResource
protectName(FileResource, '__call__', 'zope.Public')
class Test(support.SiteHandler, cleanup.CleanUp, TestCase):
def setUp(self):
super(Test, self).setUp()
XMLConfig('meta.zcml', zope.browserresource)()
defineCheckers()
component.provideAdapter(AbsoluteURL, (None, None), IAbsoluteURL)
def test(self):
self.assertEqual(
component.queryMultiAdapter((ob, request), name='zmi_icon'),
None)
import zope.browserresource.tests as p
path = os.path.dirname(p.__file__)
path = os.path.join(path, 'testfiles', 'test.gif')
# Configure the icon and make sure we can render the resulting view:
xmlconfig(StringIO(template % (
u'''
<browser:icon name="zmi_icon"
for="zope.component.testfiles.views.IC"
file="%s" />
''' % path
)))
view = component.getMultiAdapter((ob, request), name='zmi_icon')
rname = 'zope-component-testfiles-views-IC-zmi_icon.gif'
self.assertEqual(
view(),
'<img src="http://127.0.0.1/@@/%s" alt="C" '
'width="16" height="16" border="0" />'
% rname)
self.assertEqual(view.url(), 'http://127.0.0.1/@@/' + rname)
# Make sure that the title attribute works
xmlconfig(StringIO(template % (
u'''
<browser:icon name="zmi_icon_w_title"
for="zope.component.testfiles.views.IC"
file="%s" title="click this!" />
''' % path
)))
view = component.getMultiAdapter(
(ob, request), name='zmi_icon_w_title')
rname = 'zope-component-testfiles-views-IC-zmi_icon_w_title.gif'
self.assertEqual(
view(),
'<img src="http://127.0.0.1/@@/%s" alt="click this!" '
'width="16" height="16" border="0" />'
% rname)
# Make sure that the width and height attributes work
xmlconfig(StringIO(template % (
u'''
<browser:icon name="zmi_icon_w_width_and_height"
for="zope.component.testfiles.views.IC"
file="%s"
width="20" height="12" />
''' % path
)))
view = component.getMultiAdapter((ob, request),
name='zmi_icon_w_width_and_height')
rname = ('zope-component-testfiles-views-IC-'
'zmi_icon_w_width_and_height.gif')
self.assertEqual(
view(),
'<img src="http://127.0.0.1/@@/%s" alt="C" '
'width="20" height="12" border="0" />'
% rname)
# Make sure that the image was installed as a resource:
resource = ProxyFactory(component.getAdapter(request, name=rname))
self.assertRaises(Forbidden, getattr, resource, '_testData')
resource = removeSecurityProxy(resource)
with open(path, 'rb') as f:
self.assertEqual(resource._testData(), f.read())
def testResource(self):
self.assertEqual(
component.queryMultiAdapter((ob, request), name='zmi_icon'), None)
import zope.browserresource.tests as p
path = os.path.dirname(p.__file__)
path = os.path.join(path, 'testfiles', 'test.gif')
xmlconfig(StringIO(template % (
u'''
<browser:resource name="zmi_icon_res"
file="%s" />
<browser:icon name="zmi_icon"
for="zope.component.testfiles.views.IC"
resource="zmi_icon_res" />
''' % path
)))
view = component.getMultiAdapter((ob, request), name='zmi_icon')
rname = "zmi_icon_res"
self.assertEqual(
view(),
'<img src="http://127.0.0.1/@@/%s" alt="C" width="16" '
'height="16" border="0" />'
% rname)
resource = ProxyFactory(component.getAdapter(request, name=rname))
self.assertRaises(Forbidden, getattr, resource, '_testData')
resource = removeSecurityProxy(resource)
with open(path, 'rb') as f:
self.assertEqual(resource._testData(), f.read())
def testResourceErrors(self):
self.assertEqual(
component.queryMultiAdapter((ob, request), name='zmi_icon'), None)
import zope.browserresource.tests as p
path = os.path.dirname(p.__file__)
path = os.path.join(path, 'testfiles', 'test.gif')
config = StringIO(template % (
u'''
<browser:resource name="zmi_icon_res"
file="%s" />
<browser:icon name="zmi_icon"
for="zope.component.testfiles.views.IC"
file="%s"
resource="zmi_icon_res" />
''' % (path, path)
))
self.assertRaises(ConfigurationError, xmlconfig, config)
config = StringIO(template % (
u"""
<browser:icon name="zmi_icon"
for="zope.component.testfiles.views.IC"
/>
"""
))
self.assertRaises(ConfigurationError, xmlconfig, config)
| 34.84058
| 78
| 0.584997
|
8a919ca3de4e6c25e69332bf8eaf1b984bf9d0a4
| 5,065
|
py
|
Python
|
DAE.py
|
butroy/movie-autoencoder
|
d18a54ecf7145f6cbda2d2233d4ff53056812b11
|
[
"Apache-2.0"
] | null | null | null |
DAE.py
|
butroy/movie-autoencoder
|
d18a54ecf7145f6cbda2d2233d4ff53056812b11
|
[
"Apache-2.0"
] | null | null | null |
DAE.py
|
butroy/movie-autoencoder
|
d18a54ecf7145f6cbda2d2233d4ff53056812b11
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
import model_helper
class DAE:
def __init__(self, FLAGS):
''' Imlimentation of deep autoencoder class.'''
self.FLAGS=FLAGS
self.weight_initializer=model_helper._get_weight_initializer()
self.bias_initializer=model_helper._get_bias_initializer()
self.init_parameters()
def init_parameters(self):
'''Initialize networks weights abd biasis.'''
with tf.name_scope('weights'):
self.W_1=tf.get_variable(name='weight_1', shape=(self.FLAGS.num_v,self.FLAGS.num_h),
initializer=self.weight_initializer)
self.W_2=tf.get_variable(name='weight_2', shape=(self.FLAGS.num_h,self.FLAGS.num_h),
initializer=self.weight_initializer)
self.W_3=tf.get_variable(name='weight_3', shape=(self.FLAGS.num_h,self.FLAGS.num_h),
initializer=self.weight_initializer)
self.W_4=tf.get_variable(name='weight_4', shape=(self.FLAGS.num_h,self.FLAGS.num_v),
initializer=self.weight_initializer)
with tf.name_scope('biases'):
self.b1=tf.get_variable(name='bias_1', shape=(self.FLAGS.num_h),
initializer=self.bias_initializer)
self.b2=tf.get_variable(name='bias_2', shape=(self.FLAGS.num_h),
initializer=self.bias_initializer)
self.b3=tf.get_variable(name='bias_3', shape=(self.FLAGS.num_h),
initializer=self.bias_initializer)
def _inference(self, x):
''' Making one forward pass. Predicting the networks outputs.
@param x: input ratings
@return : networks predictions
'''
with tf.name_scope('inference'):
a1=tf.nn.elu(tf.nn.bias_add(tf.matmul(x, self.W_1),self.b1))
a2=tf.nn.elu(tf.nn.bias_add(tf.matmul(a1, self.W_2),self.b2))
a3=tf.nn.elu(tf.nn.bias_add(tf.matmul(a2, self.W_3),self.b3))
a4=tf.matmul(a3, self.W_4)
return a4
def _compute_loss(self, predictions, labels,num_labels):
''' Computing the Mean Squared Error loss between the input and output of the network.
@param predictions: predictions of the stacked autoencoder
@param labels: input values of the stacked autoencoder which serve as labels at the same time
@param num_labels: number of labels !=0 in the data set to compute the mean
@return mean squared error loss tf-operation
'''
with tf.name_scope('loss'):
loss_op=tf.div(tf.reduce_sum(tf.square(tf.subtract(predictions,labels))),num_labels)
return loss_op
def _optimizer(self, x):
'''Optimization of the network parameter through stochastic gradient descent.
@param x: input values for the stacked autoencoder.
@return: tensorflow training operation
@return: ROOT!! mean squared error
'''
outputs=self._inference(x)
mask=tf.where(tf.equal(x,0.0), tf.zeros_like(x), x) # indices of 0 values in the training set
num_train_labels=tf.cast(tf.count_nonzero(mask),dtype=tf.float32) # number of non zero values in the training set
bool_mask=tf.cast(mask,dtype=tf.bool) # boolean mask
outputs=tf.where(bool_mask, outputs, tf.zeros_like(outputs)) # set the output values to zero if corresponding input values are zero
MSE_loss=self._compute_loss(outputs,x,num_train_labels)
if self.FLAGS.l2_reg==True:
l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])
MSE_loss = MSE_loss + self.FLAGS.lambda_ * l2_loss
train_op=tf.train.AdamOptimizer(self.FLAGS.learning_rate).minimize(MSE_loss)
RMSE_loss=tf.sqrt(MSE_loss)
return train_op, RMSE_loss
def _validation_loss(self, x_train, x_test):
''' Computing the loss during the validation time.
@param x_train: training data samples
@param x_test: test data samples
@return networks predictions
@return root mean squared error loss between the predicted and actual ratings
'''
outputs=self._inference(x_train) # use training sample to make prediction
mask=tf.where(tf.equal(x_test,0.0), tf.zeros_like(x_test), x_test) # identify the zero values in the test ste
num_test_labels=tf.cast(tf.count_nonzero(mask),dtype=tf.float32) # count the number of non zero values
bool_mask=tf.cast(mask,dtype=tf.bool)
outputs=tf.where(bool_mask, outputs, tf.zeros_like(outputs))
MSE_loss=self._compute_loss(outputs, x_test, num_test_labels)
RMSE_loss=tf.sqrt(MSE_loss)
return outputs, RMSE_loss
| 44.043478
| 139
| 0.616387
|
b4a064baf22660a80b25ada5585870602b870d62
| 2,677
|
py
|
Python
|
tests/test_metadata.py
|
solanolabs/sphinx
|
d22cfb08a4a3013c92a65b357d814caf676dc238
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_metadata.py
|
solanolabs/sphinx
|
d22cfb08a4a3013c92a65b357d814caf676dc238
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_metadata.py
|
solanolabs/sphinx
|
d22cfb08a4a3013c92a65b357d814caf676dc238
|
[
"BSD-2-Clause"
] | 9
|
2015-08-26T19:59:06.000Z
|
2022-03-07T17:10:06.000Z
|
# -*- coding: utf-8 -*-
"""
test_metadata
~~~~~~~~~~~~~
Test our handling of metadata in files with bibliographic metadata.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# adapted from an example of bibliographic metadata at
# http://docutils.sourceforge.net/docs/user/rst/demo.txt
from util import *
from nose.tools import assert_equals
app = env = None
warnings = []
def setup_module():
# Is there a better way of generating this doctree than manually iterating?
global app, env
app = TestApp(srcdir='(temp)')
env = app.env
msg, num, it = env.update(app.config, app.srcdir, app.doctreedir, app)
for docname in it:
pass
def teardown_module():
app.cleanup()
def test_docinfo():
"""
Inspect the 'docinfo' metadata stored in the first node of the document.
Note this doesn't give us access to data stored in subsequence blocks
that might be considered document metadata, such as 'abstract' or
'dedication' blocks, or the 'meta' role. Doing otherwise is probably more
messing with the internals of sphinx than this rare use case merits.
"""
exampledocinfo = env.metadata['metadata']
expecteddocinfo = {
'author': u'David Goodger',
'authors': [u'Me', u'Myself', u'I'],
'address': u'123 Example Street\nExample, EX Canada\nA1B 2C3',
'field name': u'This is a generic bibliographic field.',
'field name 2': (u'Generic bibliographic fields may contain multiple '
u'body elements.\n\nLike this.'),
'status': u'This is a "work in progress"',
'version': u'1',
'copyright': (u'This document has been placed in the public domain. '
u'You\nmay do with it as you wish. You may copy, modify,'
u'\nredistribute, reattribute, sell, buy, rent, lease,\n'
u'destroy, or improve it, quote it at length, excerpt,\n'
u'incorporate, collate, fold, staple, or mutilate it, or '
u'do\nanything else to it that your or anyone else\'s '
u'heart\ndesires.'),
'contact': u'goodger@python.org',
'date': u'2006-05-21',
'organization': u'humankind',
'revision': u'4564',
}
# I like this way of comparing dicts - easier to see the error.
for key in exampledocinfo:
yield assert_equals, exampledocinfo.get(key), expecteddocinfo.get(key)
# but then we still have to check for missing keys
yield assert_equals, set(expecteddocinfo.keys()), set(exampledocinfo.keys())
| 38.242857
| 80
| 0.63093
|
09412056cf1fd2a8310ead40da4fafcacd1c7d93
| 436
|
py
|
Python
|
Django_Stuff/users_security/basic_app/forms.py
|
muhammad-mamdouh/Django_Projects
|
1f31e12aefb36b33474256db40a2c551882f445e
|
[
"MIT"
] | null | null | null |
Django_Stuff/users_security/basic_app/forms.py
|
muhammad-mamdouh/Django_Projects
|
1f31e12aefb36b33474256db40a2c551882f445e
|
[
"MIT"
] | 40
|
2020-06-05T22:10:58.000Z
|
2022-03-11T23:56:09.000Z
|
Django_Stuff/users_security/basic_app/forms.py
|
muhammad-mamdouh/Django_Projects
|
1f31e12aefb36b33474256db40a2c551882f445e
|
[
"MIT"
] | 1
|
2021-03-31T10:30:03.000Z
|
2021-03-31T10:30:03.000Z
|
from django import forms
from django.contrib.auth.models import User
from .models import UserProfileInfo
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'email', 'password')
class UserProfileInfoForm(forms.ModelForm):
class Meta:
model = UserProfileInfo
fields = ('portfolio_site', 'profile_pic')
| 22.947368
| 60
| 0.699541
|
15419a9df808b20258440dbd062895af01f629c0
| 1,821
|
py
|
Python
|
back-end/migrations/versions/bca0417f931d_comments_table_upgrade.py
|
Asterisme/flask-vuejs-madblog
|
5c215f5cb9eefb61c6c230b1cc26a2797b8ec280
|
[
"MIT"
] | null | null | null |
back-end/migrations/versions/bca0417f931d_comments_table_upgrade.py
|
Asterisme/flask-vuejs-madblog
|
5c215f5cb9eefb61c6c230b1cc26a2797b8ec280
|
[
"MIT"
] | null | null | null |
back-end/migrations/versions/bca0417f931d_comments_table_upgrade.py
|
Asterisme/flask-vuejs-madblog
|
5c215f5cb9eefb61c6c230b1cc26a2797b8ec280
|
[
"MIT"
] | null | null | null |
"""comments table upgrade
Revision ID: bca0417f931d
Revises: 5adcf0f4bbfa
Create Date: 2020-04-10 17:48:35.215063
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bca0417f931d'
down_revision = '5adcf0f4bbfa'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('mark_read', sa.Boolean(), nullable=True),
sa.Column('disabled', sa.Boolean(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['parent_id'], ['comments.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['post_id'], ['posts.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_comments_timestamp'), 'comments', ['timestamp'], unique=False)
op.create_table('comments_likes',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('comment_id', sa.Integer(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['comment_id'], ['comments.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('comments_likes')
op.drop_index(op.f('ix_comments_timestamp'), table_name='comments')
op.drop_table('comments')
# ### end Alembic commands ###
| 35.019231
| 91
| 0.675453
|
41e8f3b80673dd316b4f6211fcbd46743b4461e9
| 446
|
py
|
Python
|
test/servers_test/test_portal.py
|
AnonymousAuthor2013/KnowAlpha
|
7332231bbb34d7dcdab796aa20fc561dbca1fb76
|
[
"MIT"
] | 2
|
2019-06-25T02:46:37.000Z
|
2019-12-02T11:26:16.000Z
|
test/servers_test/test_portal.py
|
AnonymousAuthor2013/KnowAlpha
|
7332231bbb34d7dcdab796aa20fc561dbca1fb76
|
[
"MIT"
] | 1
|
2019-07-18T03:45:28.000Z
|
2019-07-18T03:45:28.000Z
|
test/servers_test/test_portal.py
|
AnonymousAuthor2013/KnowAlpha
|
7332231bbb34d7dcdab796aa20fc561dbca1fb76
|
[
"MIT"
] | 4
|
2019-06-23T13:49:07.000Z
|
2019-06-25T12:21:59.000Z
|
from programmingalpha.MainPortal.Requester import RequesterServices
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fmt = logging.Formatter('%(asctime)s: [ %(message)s ]', '%m/%d/%Y %I:%M:%S %p')
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
config_file="portalService.json"
print("staring server")
server=RequesterServices(config_file)
server.start()
print("server started")
| 26.235294
| 79
| 0.773543
|
f23f7871462c4d113917fef2b4536d3abedd6259
| 103
|
py
|
Python
|
venv/lib/python2.7/_weakrefset.py
|
IdeasBlockLT/emem
|
a3f6e1950e9a074fbb696728778b22d6f523c3df
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/_weakrefset.py
|
IdeasBlockLT/emem
|
a3f6e1950e9a074fbb696728778b22d6f523c3df
|
[
"MIT"
] | 9
|
2019-12-04T23:15:54.000Z
|
2022-02-10T11:05:43.000Z
|
venv/lib/python2.7/_weakrefset.py
|
edbolivar/perfectpair
|
c165cff40353c602fe0dc418375b90e9b25de674
|
[
"MIT"
] | null | null | null |
/usr/local/Cellar/python@2/2.7.16/Frameworks/Python.framework/Versions/2.7/lib/python2.7/_weakrefset.py
| 103
| 103
| 0.815534
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.