hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f5abdde5a3958abc9b10a8228aa1cc05ff1642eb
| 549
|
py
|
Python
|
cdk/app.py
|
aws-samples/aws-alb-target-group-load-shedding
|
f7a27214259ff069b76c8ff6246327d6319fd1da
|
[
"MIT-0"
] | 1
|
2021-10-24T01:45:48.000Z
|
2021-10-24T01:45:48.000Z
|
cdk/app.py
|
aws-samples/aws-alb-target-group-load-shedding
|
f7a27214259ff069b76c8ff6246327d6319fd1da
|
[
"MIT-0"
] | null | null | null |
cdk/app.py
|
aws-samples/aws-alb-target-group-load-shedding
|
f7a27214259ff069b76c8ff6246327d6319fd1da
|
[
"MIT-0"
] | 1
|
2021-10-24T01:45:42.000Z
|
2021-10-24T01:45:42.000Z
|
#!/usr/bin/env python3
import os
from aws_cdk import core as cdk
# For consistency with TypeScript code, `cdk` is the preferred import name for
# the CDK's core module. The following line also imports it as `core` for use
# with examples from the CDK Developer's Guide, which are in the process of
# being updated to use `cdk`. You may delete this import if you don't need it.
from aws_cdk import core
from cdk.alb_monitor_stack import ALBMonitorStack
app = core.App()
alb_monitor_stack = ALBMonitorStack(app, "ALBMonitorStack")
app.synth()
| 28.894737
| 79
| 0.765027
|
f9eb6b388b1d44d54ea490cd96561f5ff8d8cecb
| 1,179
|
py
|
Python
|
test/test_all.py
|
G-AshwinKumar/experiment-notebook
|
aae1c5fb9ef8f84dce5d75989ed8975797282f37
|
[
"MIT"
] | null | null | null |
test/test_all.py
|
G-AshwinKumar/experiment-notebook
|
aae1c5fb9ef8f84dce5d75989ed8975797282f37
|
[
"MIT"
] | null | null | null |
test/test_all.py
|
G-AshwinKumar/experiment-notebook
|
aae1c5fb9ef8f84dce5d75989ed8975797282f37
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Rull all test modules in the current working dir
"""
__author__ = "Miguel Hernández Cabronero <miguel.hernandez@uab.cat>"
__date__ = "19/09/2019"
import os
import unittest
import sys
import argparse
import datetime
# So that all tests can use the intended module structure transparently
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
os.chdir(os.path.dirname(os.path.dirname(__file__)))
import enb.ray_cluster
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", help="Be verbose? Repeat for more", action="count", default=0)
options = parser.parse_known_args()[0]
enb.ray_cluster.init_ray()
if __name__ == '__main__':
suite = unittest.TestLoader().discover(os.path.dirname(__file__))
if options.verbose:
print(f"Running {suite.countTestCases()} tests @ {datetime.datetime.now()}")
print(f"{'[Params]':-^30s}")
for param, value in options.__dict__.items():
print(f"{param}: {value}")
print(f"{'':-^30s}")
print()
unittest.TextTestRunner(verbosity=3 if options.verbose else 1).run(suite)
| 30.230769
| 101
| 0.697201
|
d95ecbdc4dd77315b35ba446df9a5200f5cb6848
| 5,289
|
py
|
Python
|
{{cookiecutter.project_slug}}/docs/conf.py
|
apiology/cookiecutter-pypackage
|
898d930d20347349b27ddc4bf5b2bff69c1ea711
|
[
"BSD-3-Clause"
] | 1
|
2020-12-18T21:04:29.000Z
|
2020-12-18T21:04:29.000Z
|
{{cookiecutter.project_slug}}/docs/conf.py
|
apiology/cookiecutter-pypackage
|
898d930d20347349b27ddc4bf5b2bff69c1ea711
|
[
"BSD-3-Clause"
] | 1
|
2021-11-23T18:52:17.000Z
|
2021-11-23T18:52:17.000Z
|
{{cookiecutter.project_slug}}/docs/conf.py
|
apiology/cookiecutter-pypackage
|
898d930d20347349b27ddc4bf5b2bff69c1ea711
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# {{ cookiecutter.project_slug }} documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
from typing import Dict # noqa: E402
import {{ cookiecutter.package_name }} # noqa: E402
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = '{{ cookiecutter.project_name }}'
copyright = "{% now 'local', '%Y' %}, {{ cookiecutter.full_name.replace('\"', '\\\"') }}"
author = "{{ cookiecutter.full_name.replace('\"', '\\\"') }}"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = {{ cookiecutter.package_name }}.__version__
# The full version, including alpha/beta/rc tags.
release = {{ cookiecutter.package_name }}.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = '{{ cookiecutter.project_slug }}doc'
# -- Options for LaTeX output ------------------------------------------
latex_elements: Dict[str, str] = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, '{{ cookiecutter.project_slug }}.tex',
'{{ cookiecutter.project_name }} Documentation',
"{{ cookiecutter.full_name.replace('\"', '\\\"') }}", 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, '{{ cookiecutter.project_slug }}',
'{{ cookiecutter.project_name }} Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, '{{ cookiecutter.project_slug }}',
'{{ cookiecutter.project_name }} Documentation',
author,
'{{ cookiecutter.project_slug }}',
'One line description of project.',
'Miscellaneous'),
]
| 32.648148
| 89
| 0.678956
|
1d3e9954daa2ed0cbaaaea5e8f260d01518f1ccb
| 1,750
|
py
|
Python
|
moses/urls.py
|
vassilyvv/django-moses
|
9745efdf770c87ae8ce341dec526a794b191595a
|
[
"BSD-3-Clause"
] | 3
|
2021-05-22T18:17:49.000Z
|
2021-11-12T07:01:04.000Z
|
moses/urls.py
|
vassilyvv/django-moses
|
9745efdf770c87ae8ce341dec526a794b191595a
|
[
"BSD-3-Clause"
] | null | null | null |
moses/urls.py
|
vassilyvv/django-moses
|
9745efdf770c87ae8ce341dec526a794b191595a
|
[
"BSD-3-Clause"
] | 1
|
2021-12-18T13:55:54.000Z
|
2021-12-18T13:55:54.000Z
|
from django.urls import path, include
from . import views as accounts_views
from rest_framework_simplejwt.views import (
TokenRefreshView
)
from .views import TokenObtainPairView, VerifyOTPView, ConfirmPhoneNumber, ConfirmEmail, \
RequestPhoneNumberConfirmPin, RequestEmailConfirmPin, GetUserRoles
app_name = 'moses'
urlpatterns = [
path('token/obtain/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('token/verify_otp/', VerifyOTPView.as_view(), name='token_verify_pair'),
path('token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('mfa/', accounts_views.MFAView.as_view(), name='mfa'),
path('confirm_phone_number/', ConfirmPhoneNumber.as_view(), name='confirm_phone_number'),
path('confirm_email/', ConfirmEmail.as_view(), name='confirm_phone_number'),
path('request_phone_number_pin/', RequestPhoneNumberConfirmPin.as_view(),
name='request_phone_number_confirm_pin'),
path('request_email_pin/', RequestEmailConfirmPin.as_view(), name='request_email_confirm_pin'),
path('password/', accounts_views.SetPasswordView.as_view(), name='set_password'),
path('password/reset/', accounts_views.ResetPassword.as_view(), name='reset-password'),
path('is_email_available/', accounts_views.CheckEmailAvailability.as_view(),
name='check_email_availability'),
path('is_mfa_enabled_for_phone_number/', accounts_views.CheckIsMFAEnabled.as_view(),
name='check_is_mfa_enabled'),
path('token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('roles/', GetUserRoles.as_view(), name='user_roles'),
path('', include('djoser.urls'))
]
| 43.75
| 99
| 0.747429
|
3557822fec3bc0e2a9c4a55282f8fde4177daa1f
| 1,649
|
py
|
Python
|
record.py
|
baobrian/uis-rnn
|
74bcbf78aa29ab50ada2170429772027b1b524c1
|
[
"Apache-2.0"
] | null | null | null |
record.py
|
baobrian/uis-rnn
|
74bcbf78aa29ab50ada2170429772027b1b524c1
|
[
"Apache-2.0"
] | null | null | null |
record.py
|
baobrian/uis-rnn
|
74bcbf78aa29ab50ada2170429772027b1b524c1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from pyaudio import PyAudio, paInt16
import numpy as np
from datetime import datetime
import wave
# 将data中的数据保存到名为filename的WAV文件中
def save_wave_file(filename, data):
wf = wave.open(filename, 'wb')
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(SAMPLING_RATE)
wf.writeframes("".join(data))
wf.close()
NUM_SAMPLES = 20 # pyAudio内部缓存的块的大小
SAMPLING_RATE = 8000 # 取样频率
LEVEL = 1500 # 声音保存的阈值
COUNT_NUM = 20 # NUM_SAMPLES个取样之内出现COUNT_NUM个大于LEVEL的取样则记录声音
SAVE_LENGTH = 8 # 声音记录的最小长度:SAVE_LENGTH * NUM_SAMPLES 个取样
# 开启声音输入
pa = PyAudio()
stream = pa.open(format=paInt16, channels=1, rate=SAMPLING_RATE, input=True,
frames_per_buffer=NUM_SAMPLES)
save_count = 0
save_buffer = []
while True:
# 读入NUM_SAMPLES个取样
string_audio_data = stream.read(NUM_SAMPLES)
# 将读入的数据转换为数组
audio_data = np.fromstring(string_audio_data, dtype=np.short)
# 计算大于LEVEL的取样的个数
large_sample_count = np.sum( audio_data > LEVEL )
print(np.max(audio_data))
# 如果个数大于COUNT_NUM,则至少保存SAVE_LENGTH个块
if large_sample_count > COUNT_NUM:
save_count = SAVE_LENGTH
else:
save_count -= 1
if save_count < 0:
save_count = 0
if save_count > 0:
# 将要保存的数据存放到save_buffer中
save_buffer.append( string_audio_data )
else:
# 将save_buffer中的数据写入WAV文件,WAV文件的文件名是保存的时刻
if len(save_buffer) > 0:
filename = datetime.now().strftime("%Y-%m-%d_%H_%M_%S") + ".wav"
save_wave_file(filename, save_buffer)
save_buffer = []
print (filename, "saved")
| 25.765625
| 76
| 0.664039
|
168c40c552bb5638a7c4061ca6837db7ad2290fb
| 1,515
|
py
|
Python
|
converting_cusip_to_isin.py
|
foreverycast/parse-script
|
66beacffd1751f9b7df54ad633f62625f0ac18ac
|
[
"MIT"
] | null | null | null |
converting_cusip_to_isin.py
|
foreverycast/parse-script
|
66beacffd1751f9b7df54ad633f62625f0ac18ac
|
[
"MIT"
] | null | null | null |
converting_cusip_to_isin.py
|
foreverycast/parse-script
|
66beacffd1751f9b7df54ad633f62625f0ac18ac
|
[
"MIT"
] | null | null | null |
alphabet = [
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'
]
def is_number(s):
""" Check if the number is a float """
try:
float(s)
return True
except ValueError:
return False
except TypeError:
return False
def convert_cusip_to_isin(cusip):
""" Convert cusip to isin """
print(cusip)
cusip = 'US' + str(cusip).upper()
only_digits_cusip = ""
for i in cusip:
# print(i)
if is_number(i):
only_digits_cusip = str(only_digits_cusip) + str(i)
else:
only_digits_cusip = str(only_digits_cusip) + str(10 + alphabet.index(i))
odd = []
even = []
for i, char in enumerate(only_digits_cusip):
if i % 2 == 0:
odd.append(char)
else:
even.append(char)
new_length_list = []
length_list = []
string_int = ""
if len(odd) > len(even):
length_list = odd
for i in even:
string_int += str(i)
else:
length_list = even
for i in odd:
string_int += str(i)
for i in length_list:
new_length_list.append(int(i) * 2)
for i in new_length_list:
string_int += str(i)
dig_sum = 0
for i in string_int:
dig_sum += int(i)
check_sum = (10 - (dig_sum % 10)) % 10
isin = str(cusip) + str(check_sum)
return isin
# print(convert_cusip_to_isin('037833100'))
| 22.279412
| 84
| 0.515512
|
44344c6e72bf0c88eb53c5cb564446c1419e32de
| 2,149
|
py
|
Python
|
misc/hlc_target.py
|
cagdemir/equity-index-predictors
|
2546e72328de848222cb6a1c744ababab2058477
|
[
"MIT"
] | null | null | null |
misc/hlc_target.py
|
cagdemir/equity-index-predictors
|
2546e72328de848222cb6a1c744ababab2058477
|
[
"MIT"
] | null | null | null |
misc/hlc_target.py
|
cagdemir/equity-index-predictors
|
2546e72328de848222cb6a1c744ababab2058477
|
[
"MIT"
] | 1
|
2021-07-21T12:24:51.000Z
|
2021-07-21T12:24:51.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 27 12:45:02 2019
@author: Administrator
"""
import pdblp
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import date
plt.style.use('seaborn')
#con = pdblp.BCon(debug=True, port=8194, timeout=5000)
con = pdblp.BCon(debug=False, port=8194, timeout=5000)
con.start()
target_tickers = [ 'XUTUM Index', 'MEXBOL Index',
'IBOV Index', 'IMOEX Index' , 'JALSH Index']
start = '20040101'
today = date.today().strftime('%Y%m%d')
firstday = '19991230'
ohlc_tickers = ['HIGH','LOW', 'CLOSE']
prices_high = con.bdh(target_tickers, 'PX HIGH',firstday, today)
prices_high.columns = [i[0] for i in prices_high.columns]
prices_high_int = prices_high.interpolate(method='linear')[target_tickers]
prices_high_w = prices_high_int.groupby(pd.Grouper(freq='W')).max()
prices_low = con.bdh(target_tickers, 'PX LOW',firstday, today)
prices_low.columns = [i[0] for i in prices_low.columns]
prices_low_int = prices_low.interpolate(method='linear')[target_tickers]
prices_low_w = prices_low_int.groupby(pd.Grouper(freq='W')).min()
prices_close = con.bdh(target_tickers, 'PX LAST',firstday, today)
prices_close.columns = [i[0] for i in prices_close.columns]
prices_close_int = prices_close.interpolate(method='linear')[target_tickers]
prices_close_w = prices_close_int.groupby(pd.Grouper(freq='W')).last()
returns_high = prices_high_w / prices_close_w.shift(1) - 1
returns_low = prices_low_w / prices_close_w.shift(1) - 1
returns_close = prices_close_w / prices_close_w.shift(1) - 1
returns_fromClose_hlc = pd.concat([returns_high, returns_low, returns_close],axis=1)
returns_fromClose_hlc.columns = [('_').join(i) for i in zip(returns_fromClose_hlc.columns,np.repeat(ohlc_tickers,len(target_tickers)))]
returns_fromClose_hlc = returns_fromClose_hlc[returns_fromClose_hlc.index>=start]
target_hlc = returns_fromClose_hlc.copy()
var_no = 'hltarget'
target_hlc.columns = [var_no+'_'+i for i in target_hlc.columns]
target_hlc.to_excel('C:/Users/sb0538/Desktop/15022020/excels/hlctarget.xlsx')
| 33.061538
| 137
| 0.731968
|
bcd9a69e1f6b43cc410e1ca12e5bda151aa79117
| 6,778
|
py
|
Python
|
agents/data_agent.py
|
samirma/yaStockRnn
|
89145b0a33de1161dc963f68c45e44c298c9dcd4
|
[
"MIT"
] | null | null | null |
agents/data_agent.py
|
samirma/yaStockRnn
|
89145b0a33de1161dc963f68c45e44c298c9dcd4
|
[
"MIT"
] | null | null | null |
agents/data_agent.py
|
samirma/yaStockRnn
|
89145b0a33de1161dc963f68c45e44c298c9dcd4
|
[
"MIT"
] | null | null | null |
import pandas as pd
from pandas._libs.tslibs import timestamps
from ta.trend import *
from ta.momentum import *
from ta.volume import *
from ta.volatility import *
from agents.tec_an import TecAn
import datetime as dt
TIMESTAMP_KEY = "timestamp"
MICROTIMESTAMP_KEY = "microtimestamp"
ASKS_KEY = "asks"
BIDS_KEY = "bids"
PRICE_KEY = "price"
AMOUNT_KEY = "amount"
CLOSE = 'close'
DATE = 'Date'
class DataAgent():
def __init__(self,
minutes,
tec,
on_new_data = lambda x: print("{}".format(x)),
on_state = lambda timestamp, price, bid, ask: price,
on_closed_price = lambda price: price,
verbose = False,
save_history = False,
):
self.tec :TecAn = tec
self.final_x = []
self.list = []
self.minutes = minutes
self.raw_limit = 10000
self.last_price = -1
self.last_amount = -1
self.last_timestamp = -1
self.last_processed_index = -1
self.on_new_data = on_new_data
self.on_state = on_state
self.on_closed_price = on_closed_price
self.verbose = verbose
self.history = []
self.save_history = save_history
if (self.verbose):
print("DataAgent (resample: {self.resample} tec: {self.tec})")
def on_new_data(self, x):
self.on_new_data(x)
def on_new_raw_data(self, raw):
price = raw[PRICE_KEY]
amount = raw[AMOUNT_KEY]
timestamp = int(raw[TIMESTAMP_KEY])
bids = raw[BIDS_KEY]
asks = raw[ASKS_KEY]
self.process_data(
price = price,
amount = amount,
timestamp = timestamp,
bids = bids,
asks = asks
)
def resample(self):
return f'{self.minutes}Min'
def process_data(self, price, amount, timestamp, bids, asks):
self.on_state(timestamp, price, bids, asks)
if (self.last_price == price):
#self.log(f"Same price {index_log} {price}")
return
else:
self.last_price = price
current_index = self.process_data_input(price, amount, timestamp)
index_log = f"last_index: {self.last_processed_index} current_index: {current_index} price: {price}"
if (self.last_processed_index == current_index):
#self.log(f"Returning {index_log}")
return self.last_processed_index
#else:
# self.log(f"New block {index_log} {price}")
#self.log(f"{index_log} - last_price: {self.last_price} = price: {price}")
if (current_index == None):
raise SystemExit(f"{price}, {amount}, {timestamp}")
self.validate_data(current_index)
self.list = self.list[-1:]
self.update_index(current_index)
self.on_new_price(
timestamp=current_index.timestamp(),
price=price,
amount=amount
)
def validate_data(self, current_index):
if (self.last_processed_index != -1):
timeframe = self.minutes * 60
#print(current_index)
#print(self.last_index)
current_timestamp = int(current_index.timestamp())
last_timestamp = int(self.last_processed_index.timestamp())
if (current_timestamp < last_timestamp):
raise SystemExit(f"last_index: {self.last_processed_index}({last_timestamp}) current_index: {current_index}({current_timestamp})")
self.check_consistency(
current_index = current_index,
last_index = self.last_processed_index,
timeframe = timeframe,
tag = "AGENT"
)
if (self.tec.last_index != self.last_processed_index):
raise SystemExit(f"Invalid indexes Tec.last_index: {self.tec.last_index} last_index: {self.last_processed_index}")
def update_index(self, current_index):
self.last_processed_index = current_index
def check_consistency(self,
current_index,
last_index,
timeframe,
tag):
current_timestamp = int(current_index.timestamp())
last_timestamp = int(last_index.timestamp())
diff = (current_timestamp - last_timestamp)
if (diff != timeframe):
error_msg = f"{tag} Diff {diff} Timeframe: {timeframe} last_index: {last_index}({last_timestamp}) current_index: {current_index}({current_timestamp})"
print(error_msg)
#raise SystemExit(error_msg)
def process_data_input(self, price, amount, timestamp):
#print(f"{self.last_timestamp} -> {self.last_price} {amount}")
self.last_amount = amount
self.last_timestamp = timestamp
timestamp_pd = pd.to_datetime(timestamp, unit='s')
self.list.append([timestamp_pd, price])
df = pd.DataFrame(self.list, columns = [DATE, CLOSE])
df = df.set_index(pd.DatetimeIndex(df[DATE]))
time = df[CLOSE].resample(self.resample())
ohlc = time.ohlc()
self.ohlc = ohlc
current_index = ohlc.index[-1]
return current_index
def on_action(self, action):
if (self.save_history):
self.history.append(action)
def on_new_price(self, timestamp, price, amount):
self.on_closed_price(price)
x = self.tec.add_tacs_realtime(
list = [],
price = price,
amount = amount,
timestamp = timestamp
)
is_up = self.on_new_data(x)
action = AgentHistory(
timestamp = timestamp,
price = price,
x = x,
is_up = is_up
)
self.on_action(action)
def report(self):
for data in self.history:
val_end = int(data.timestamp)
print(f"{pd.to_datetime(val_end, unit='s')}({val_end}) - {data.price} - {data.is_up}")
def log(self, message):
if (self.verbose):
print(f'{dt.datetime.now()} DataAgent: {message}')
class AgentHistory():
def __init__(self,
timestamp,
price,
x,
is_up
):
self.timestamp = timestamp
self.price = price
self.x = x
self.is_up = is_up
def __str__(self) -> str:
return f"AgentHistory (timestamp={self.timestamp} price={self.price} is_up={self.is_up})"
| 32.27619
| 162
| 0.55931
|
4514ac99e486a0098e743c4e9083d81dbcbc9f30
| 6,784
|
py
|
Python
|
nginx/datadog_checks/nginx/metrics.py
|
tcpatterson/integrations-core
|
3692601de09f8db60f42612b0d623509415bbb53
|
[
"BSD-3-Clause"
] | null | null | null |
nginx/datadog_checks/nginx/metrics.py
|
tcpatterson/integrations-core
|
3692601de09f8db60f42612b0d623509415bbb53
|
[
"BSD-3-Clause"
] | null | null | null |
nginx/datadog_checks/nginx/metrics.py
|
tcpatterson/integrations-core
|
3692601de09f8db60f42612b0d623509415bbb53
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# Map metrics from vhost_traffic_status to metrics from NGINX Plus
VTS_METRIC_MAP = {
'nginx.loadMsec': 'nginx.load_timestamp',
'nginx.nowMsec': 'nginx.timestamp',
'nginx.connections.accepted': 'nginx.connections.accepted',
'nginx.connections.active': 'nginx.connections.active',
'nginx.connections.reading': 'nginx.net.reading',
'nginx.connections.writing': 'nginx.net.writing',
'nginx.connections.waiting': 'nginx.net.waiting',
'nginx.connections.requests': 'nginx.requests.total',
'nginx.server_zone.requestCounter': 'nginx.server_zone.requests',
'nginx.server_zone.responses.1xx': 'nginx.server_zone.responses.1xx',
'nginx.server_zone.responses.2xx': 'nginx.server_zone.responses.2xx',
'nginx.server_zone.responses.3xx': 'nginx.server_zone.responses.3xx',
'nginx.server_zone.responses.4xx': 'nginx.server_zone.responses.4xx',
'nginx.server_zone.responses.5xx': 'nginx.server_zone.responses.5xx',
'nginx.server_zone.inBytes': 'nginx.server_zone.received',
'nginx.server_zone.outBytes': 'nginx.server_zone.sent',
'nginx.upstream.requestCounter': 'nginx.upstream.peers.requests',
'nginx.upstream.inBytes': 'nginx.upstream.peers.received',
'nginx.upstream.outBytes': 'nginx.upstream.peers.sent',
'nginx.upstream.responses.1xx': 'nginx.upstream.peers.responses.1xx',
'nginx.upstream.responses.2xx': 'nginx.upstream.peers.responses.2xx',
'nginx.upstream.responses.3xx': 'nginx.upstream.peers.responses.3xx',
'nginx.upstream.responses.4xx': 'nginx.upstream.peers.responses.4xx',
'nginx.upstream.responses.5xx': 'nginx.upstream.peers.responses.5xx',
'nginx.upstream.weight': 'nginx.upstream.peers.weight',
'nginx.upstream.backup': 'nginx.upstream.peers.backup',
'nginx.upstream.down': 'nginx.upstream.peers.health_checks.last_passed',
}
# NGNINX Plus metrics that are sent as both a count and gauge for backwards compatibility
# The count metrics will have _count appended to their names
METRICS_SEND_AS_COUNT = [
'nginx.upstream.peers.responses.1xx',
'nginx.upstream.peers.responses.2xx',
'nginx.upstream.peers.responses.3xx',
'nginx.upstream.peers.responses.4xx',
'nginx.upstream.peers.responses.5xx',
'nginx.upstream.peers.received',
'nginx.upstream.peers.sent',
'nginx.server_zone.responses.1xx',
'nginx.server_zone.responses.2xx',
'nginx.server_zone.responses.3xx',
'nginx.server_zone.responses.4xx',
'nginx.server_zone.responses.5xx',
'nginx.server_zone.received',
'nginx.server_zone.sent',
'nginx.cache.bypass.bytes',
'nginx.cache.bypass.bytes_written',
'nginx.cache.bypass.responses',
'nginx.cache.bypass.responses_written',
'nginx.cache.expired.bytes',
'nginx.cache.expired.bytes_written',
'nginx.cache.expired.responses',
'nginx.cache.expired.responses_written',
'nginx.cache.hit.bytes',
'nginx.cache.hit.responses',
'nginx.cache.miss.bytes',
'nginx.cache.miss.bytes_written',
'nginx.cache.miss.responses',
'nginx.cache.miss.responses_written',
'nginx.cache.revalidated.bytes',
'nginx.cache.revalidated.responses',
'nginx.cache.stale.bytes',
'nginx.cache.stale.responses',
'nginx.cache.updating.bytes',
'nginx.cache.updating.responses',
'nginx.connections.accepted',
'nginx.connections.dropped',
'nginx.generation',
'nginx.processes.respawned',
'nginx.requests.total',
'nginx.server_zone.discarded',
'nginx.server_zone.requests',
'nginx.server_zone.responses.total',
'nginx.slab.slots.fails',
'nginx.slab.slots.reqs',
'nginx.ssl.handshakes',
'nginx.ssl.handshakes_failed',
'nginx.ssl.session_reuses',
'nginx.stream.server_zone.connections',
'nginx.stream.server_zone.discarded',
'nginx.stream.server_zone.received',
'nginx.stream.server_zone.sent',
'nginx.stream.server_zone.sessions.2xx',
'nginx.stream.server_zone.sessions.4xx',
'nginx.stream.server_zone.sessions.5xx',
'nginx.stream.server_zone.sessions.total',
'nginx.stream.upstream.peers.connections',
'nginx.stream.upstream.peers.fails',
'nginx.stream.upstream.peers.downtime',
'nginx.stream.upstream.peers.health_checks.checks',
'nginx.stream.upstream.peers.health_checks.fails',
'nginx.stream.upstream.peers.health_checks.unhealthy',
'nginx.stream.upstream.peers.received',
'nginx.stream.upstream.peers.sent',
'nginx.stream.upstream.peers.unavail',
'nginx.stream.zone_sync.zone.records_total',
'nginx.upstream.peers.downtime',
'nginx.upstream.peers.fails',
'nginx.upstream.peers.health_checks.checks',
'nginx.upstream.peers.health_checks.fails',
'nginx.upstream.peers.health_checks.unhealthy',
'nginx.upstream.peers.requests',
'nginx.upstream.peers.responses.total',
'nginx.upstream.peers.unavail',
]
# NGNINX Plus metrics that are sent as both a histogram and gauge for backwards compatibility
# The histogram metrics will have _histogram appended to their names
METRICS_SEND_AS_HISTOGRAM = {'nginx.upstream.peers.response_time', 'nginx.stream.upstream.peers.response_time'}
# NGNINX Plus metrics that are sent as only a count.
# These metrics will not have _count appended to their names
COUNT_METRICS = [
'nginx.location_zone.responses.total',
'nginx.location_zone.discarded',
'nginx.location_zone.received',
'nginx.location_zone.requests',
'nginx.location_zone.responses.1xx',
'nginx.location_zone.responses.2xx',
'nginx.location_zone.responses.3xx',
'nginx.location_zone.responses.4xx',
'nginx.location_zone.responses.5xx',
'nginx.location_zone.responses.code',
'nginx.location_zone.responses.total',
'nginx.location_zone.sent',
'nginx.resolver.requests.addr',
'nginx.resolver.requests.name',
'nginx.resolver.requests.srv',
'nginx.resolver.responses.formerr',
'nginx.resolver.responses.noerror',
'nginx.resolver.responses.notimp',
'nginx.resolver.responses.nxdomain',
'nginx.resolver.responses.refused',
'nginx.resolver.responses.servfail',
'nginx.resolver.responses.timedout',
'nginx.resolver.responses.unknown',
'nginx.limit_req.delayed_dry_run',
'nginx.limit_req.delayed',
'nginx.limit_req.passed',
'nginx.limit_req.rejected_dry_run',
'nginx.limit_req.rejected',
'nginx.limit_conn.passed',
'nginx.limit_conn.rejected',
'nginx.limit_conn.rejected_dry_run',
'nginx.stream.limit_conn.passed',
'nginx.stream.limit_conn.rejected',
'nginx.stream.limit_conn.rejected_dry_run',
'nginx.server_zone.responses.code',
'nginx.upstream.peers.responses.code',
]
| 42.666667
| 111
| 0.730248
|
d2eea21c253f0e3be11e662fa073f791c18e92fe
| 7,850
|
py
|
Python
|
uci_data.py
|
WayneDW/Bayesian-Sparse-Deep-Learning
|
e95446911bbceaa9a1b859af186912e39a0240e0
|
[
"MIT"
] | 20
|
2019-09-09T18:20:33.000Z
|
2022-03-16T01:51:43.000Z
|
uci_data.py
|
JiexiYan/Bayesian-Sparse-Deep-Learning
|
94036065c4a249e8c87ebef84e686f885528c23f
|
[
"MIT"
] | 1
|
2021-05-01T08:23:57.000Z
|
2021-05-01T13:59:51.000Z
|
uci_data.py
|
JiexiYan/Bayesian-Sparse-Deep-Learning
|
94036065c4a249e8c87ebef84e686f885528c23f
|
[
"MIT"
] | 6
|
2020-05-18T02:40:44.000Z
|
2022-01-01T13:58:39.000Z
|
"""
IO module for UCI datasets for regression
"""
from sklearn.model_selection import train_test_split
import autograd.numpy as np
import pandas as pd
import os
def load_dataset(name, split_seed=0, test_fraction=.1):
# load full dataset
load_funs = { "wine" : _load_wine,
"boston" : _load_boston,
"concrete" : _load_concrete,
"power-plant" : _load_powerplant,
"yacht" : _load_yacht,
"energy-efficiency" : _load_energy_efficiency }
X, y = load_funs[name]()
X_train, X_test, y_train, y_test= train_test_split(X, y, test_size=0.1, random_state=split_seed)
# We create the train and test sets with 90% and 10% of the data
'''
rs = np.random.RandomState(split_seed)
permutation = rs.permutation(X.shape[0])
size_train = int(np.round(X.shape[ 0 ] * (1 - test_fraction)))
index_train = permutation[ 0 : size_train ]
index_test = permutation[ size_train : ]
X_train = X[index_train, : ]
y_train = y[index_train]
X_test = X[index_test, : ]
y_test = y[index_test]
'''
# Normalize features based on training set
means = np.mean(X_train, axis=0)
stds = np.std(X_train, axis=0)
X_train = (X_train - means) / stds
X_test = (X_test - means) / stds
'''
# Normalize labels
means = np.mean(y_train)
stds = np.std(y_train)
y_train = (y_train - means) / stds
y_test = (y_test - means) / stds
'''
return X_train, y_train, X_test, y_test
#####################################
# individual data files #
#####################################
vb_dir = os.path.dirname(__file__)
data_dir = os.path.join(vb_dir, "data/uci")
def _load_boston():
"""
Attribute Information:
1. CRIM: per capita crime rate by town
2. ZN: proportion of residential land zoned for lots over 25,000 sq.ft.
3. INDUS: proportion of non-retail business acres per town
4. CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
5. NOX: nitric oxides concentration (parts per 10 million)
6. RM: average number of rooms per dwelling
7. AGE: proportion of owner-occupied units built prior to 1940
8. DIS: weighted distances to five Boston employment centres
9. RAD: index of accessibility to radial highways
10. TAX: full-value property-tax rate per $10,000
11. PTRATIO: pupil-teacher ratio by town
12. B: 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
13. LSTAT: % lower status of the population
14. MEDV: Median value of owner-occupied homes in $1000's
"""
data = np.loadtxt(os.path.join(data_dir,
'boston-housing/boston_housing.txt'))
X = data[:, :-1]
y = data[:, -1]
return X, y
def _load_powerplant():
"""
attribute information:
features consist of hourly average ambient variables
- temperature (t) in the range 1.81 c and 37.11 c,
- ambient pressure (ap) in the range 992.89-1033.30 millibar,
- relative humidity (rh) in the range 25.56% to 100.16%
- exhaust vacuum (v) in teh range 25.36-81.56 cm hg
- net hourly electrical energy output (ep) 420.26-495.76 mw
the averages are taken from various sensors located around the
plant that record the ambient variables every second.
the variables are given without normalization.
"""
data_file = os.path.join(data_dir, 'power-plant/Folds5x2_pp.xlsx')
data = pd.read_excel(data_file)
x = data.values[:, :-1]
y = data.values[:, -1]
return x, y
def _load_concrete():
"""
Summary Statistics:
Number of instances (observations): 1030
Number of Attributes: 9
Attribute breakdown: 8 quantitative input variables, and 1 quantitative output variable
Missing Attribute Values: None
Name -- Data Type -- Measurement -- Description
Cement (component 1) -- quantitative -- kg in a m3 mixture -- Input Variable
Blast Furnace Slag (component 2) -- quantitative -- kg in a m3 mixture -- Input Variable
Fly Ash (component 3) -- quantitative -- kg in a m3 mixture -- Input Variable
Water (component 4) -- quantitative -- kg in a m3 mixture -- Input Variable
Superplasticizer (component 5) -- quantitative -- kg in a m3 mixture -- Input Variable
Coarse Aggregate (component 6) -- quantitative -- kg in a m3 mixture -- Input Variable
Fine Aggregate (component 7) -- quantitative -- kg in a m3 mixture -- Input Variable
Age -- quantitative -- Day (1~365) -- Input Variable
Concrete compressive strength -- quantitative -- MPa -- Output Variable
---------------------------------
"""
data_file = os.path.join(data_dir, 'concrete/Concrete_Data.xls')
data = pd.read_excel(data_file)
X = data.values[:, :-1]
y = data.values[:, -1]
return X, y
def _load_yacht():
"""
Attribute Information:
Variations concern hull geometry coefficients and the Froude number:
1. Longitudinal position of the center of buoyancy, adimensional.
2. Prismatic coefficient, adimensional.
3. Length-displacement ratio, adimensional.
4. Beam-draught ratio, adimensional.
5. Length-beam ratio, adimensional.
6. Froude number, adimensional.
The measured variable is the residuary resistance per unit weight of displacement:
7. Residuary resistance per unit weight of displacement, adimensional.
"""
data_file = os.path.join(data_dir, 'yacht/yacht_hydrodynamics.data')
data = pd.read_csv(data_file, delim_whitespace=True)
X = data.values[:, :-1]
y = data.values[:, -1]
return X, y
def _load_energy_efficiency():
"""
Data Set Information:
We perform energy analysis using 12 different building shapes simulated in
Ecotect. The buildings differ with respect to the glazing area, the
glazing area distribution, and the orientation, amongst other parameters.
We simulate various settings as functions of the afore-mentioned
characteristics to obtain 768 building shapes. The dataset comprises
768 samples and 8 features, aiming to predict two real valued responses.
It can also be used as a multi-class classification problem if the
response is rounded to the nearest integer.
Attribute Information:
The dataset contains eight attributes (or features, denoted by X1...X8) and two responses (or outcomes, denoted by y1 and y2). The aim is to use the eight features to predict each of the two responses.
Specifically:
X1 Relative Compactness
X2 Surface Area
X3 Wall Area
X4 Roof Area
X5 Overall Height
X6 Orientation
X7 Glazing Area
X8 Glazing Area Distribution
y1 Heating Load
y2 Cooling Load
"""
data_file = os.path.join(data_dir, 'energy-efficiency/ENB2012_data.xlsx')
data = pd.read_excel(data_file)
X = data.values[:, :-2]
y_heating = data.values[:, -2]
y_cooling = data.values[:, -1]
return X, y_cooling
def _load_wine():
"""
Attribute Information:
For more information, read [Cortez et al., 2009].
Input variables (based on physicochemical tests):
1 - fixed acidity
2 - volatile acidity
3 - citric acid
4 - residual sugar
5 - chlorides
6 - free sulfur dioxide
7 - total sulfur dioxide
8 - density
9 - pH
10 - sulphates
11 - alcohol
Output variable (based on sensory data):
12 - quality (score between 0 and 10)
"""
data_file = os.path.join(data_dir, 'wine-quality/winequality-red.csv')
data = pd.read_csv(data_file, sep=';')
X = data.values[:, :-1]
y = data.values[:, -1]
return X, y
| 36.175115
| 206
| 0.646369
|
18b86d6e6edacf6fd940c185f1b2713de324b6cb
| 16,338
|
py
|
Python
|
optimization/NCI-ALMANAC/ImageStacking_opt.py
|
omidbazgirTTU/IntegratedREFINED
|
a25e8be9bc3aea98110974d0d703613092831f66
|
[
"MIT"
] | 3
|
2021-08-05T22:34:23.000Z
|
2021-09-09T04:32:31.000Z
|
optimization/NCI-ALMANAC/ImageStacking_opt.py
|
Mostafa-MR/IntegratedREFINED
|
a25e8be9bc3aea98110974d0d703613092831f66
|
[
"MIT"
] | null | null | null |
optimization/NCI-ALMANAC/ImageStacking_opt.py
|
Mostafa-MR/IntegratedREFINED
|
a25e8be9bc3aea98110974d0d703613092831f66
|
[
"MIT"
] | 2
|
2021-09-09T04:32:34.000Z
|
2021-12-29T14:04:42.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 18 18:41:25 2020
@author: obazgir
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 14 10:42:32 2020
@author: obazgir
"""
import numpy as np
import pandas as pd
import os
import sklearn
import glob
import Toolbox
from Toolbox import NRMSE, Random_Image_Gen, two_d_norm, two_d_eq, Assign_features_to_pixels, MDS_Im_Gen, Bias_Calc, REFINED_Im_Gen
from scipy.stats import pearsonr
from sklearn.metrics import mean_absolute_error
from scipy.stats import sem, t
from scipy import mean
from sklearn.model_selection import train_test_split
def TypeConverter(NSC1):
yy = []
for num in NSC1:
yy.append(int(num))
yy = np.array(yy[0:])
return yy
def SetFinder(NSC1_Train,Drug1):
X_append = []
for nsc1 in NSC1_Train:
X_Train_D1 = Drug1[Drug1["NSC"] == nsc1]
X_append.append(X_Train_D1)
NonExisting = np.nonzero([xx.shape != (1, 674) for xx in X_append])[0]
X_Train_D1_PD = pd.concat(X_append)
return X_Train_D1_PD,NonExisting
NCI_ALM_PD = pd.read_csv("ComboDrugGrowth_Nov2017.csv")
Cells = NCI_ALM_PD["CELLNAME"].unique().tolist()
i = 0
Cell_Inf = NCI_ALM_PD[NCI_ALM_PD["CELLNAME"] == Cells[i]]
UN_NSC1 = Cell_Inf["NSC1"].unique(); UN_NSC1 = UN_NSC1[~np.isnan(UN_NSC1)]; UN_NSC1.dtype = int; UN_NSC1 = UN_NSC1[UN_NSC1 !=0]
UN_NSC2 = Cell_Inf["NSC2"].unique(); UN_NSC2 = UN_NSC2[~np.isnan(UN_NSC2)]; UN_NSC2 = np.array(UN_NSC2,np.int32); UN_NSC2 = UN_NSC2[UN_NSC2 !=0]
append_pd = []
for nsc1 in UN_NSC1:
for nsc2 in UN_NSC2:
Temp = Cell_Inf[Cell_Inf["NSC1"] == nsc1]
Temp2 =Temp[Temp["NSC2"] == nsc2]
PERCENTGROWTH = np.mean(Temp2["PERCENTGROWTH"])
if np.isnan(PERCENTGROWTH):
dumb = 0
else:
PERCENTGROWTHNOTZ = np.mean(Temp2["PERCENTGROWTHNOTZ"])
EXPECTEDGROWTH = np.mean(Temp2["EXPECTEDGROWTH"])
PANEL = str(Temp2["PANEL"].unique().tolist()).strip("[]")
Data = np.array([nsc1,nsc2,PERCENTGROWTH,PERCENTGROWTHNOTZ,EXPECTEDGROWTH,PANEL]).reshape(1,-1)
Target_temp = pd.DataFrame(data = Data, columns = ["NSC1","NSC2","PERCENTGROWTH","PERCENTGROWTHNOTZ","EXPECTEDGROWTH","PANEL"])
append_pd.append(Target_temp)
Target_PD = pd.concat(append_pd)
Target_PD = Target_PD.reset_index()
Target_PD = Target_PD.drop(['index'],axis = 1)
Target_PD = Target_PD.drop(["PERCENTGROWTHNOTZ"], axis = 1)
#%%
idx = Target_PD.isnull()
Feat_DF = pd.read_csv("normalized_padel_feats_NCI60_672.csv") # Load the drug descriptors of the drugs applied on the selected cell line
Drug1 = Feat_DF[Feat_DF.NSC.isin(Target_PD["NSC1"])]
Drug2 = Feat_DF[Feat_DF.NSC.isin(Target_PD["NSC2"])]
y = Target_PD["PERCENTGROWTH"].values.tolist()
yy = []
for num in y:
yy.append(float(num))
yyy = np.array(yy[0:])
Y = (yyy - yyy.min())/(yyy.max() - yyy.min())
# split training, validation and test sets based on each sample NSC ID
seed = 7
Train_Ind, Rest_Ind, Y_Train, Y_Rest = train_test_split(Target_PD.index.values, Target_PD.index.values, test_size= 0.2, random_state=seed)
Validation_Ind, Test_Ind, Y_Validation, Y_Test = train_test_split(Rest_Ind, Y_Rest, test_size= 0.5, random_state=seed)
# Sort the NSCs
Train_Ind = np.sort(Train_Ind).reshape(-1,1)
Validation_Ind = np.sort(Validation_Ind).reshape(-1,1)
Test_Ind = np.sort(Test_Ind).reshape(-1,1)
# Specifying the traget (observation) values
Y_Train = Y[Train_Ind]; Y_Validation = Y[Validation_Ind]; Y_Test = Y[Test_Ind]
# NSC Train
NSC1 = Target_PD["NSC1"].values.tolist()
NSC1 = TypeConverter(NSC1)
NSC1_Train = NSC1[Train_Ind.tolist()]; NSC1_Train = NSC1_Train.reshape(-1)
NSC2 = Target_PD["NSC2"].values.tolist()
NSC2 = TypeConverter(NSC2)
NSC2_Train = NSC2[Train_Ind.tolist()]; NSC2_Train = NSC2_Train.reshape(-1)
# NSC Validation
NSC1 = Target_PD["NSC1"].values.tolist()
NSC1 = TypeConverter(NSC1)
NSC1_Val = NSC1[Validation_Ind.tolist()]; NSC1_Val = NSC1_Val.reshape(-1)
NSC2 = Target_PD["NSC2"].values.tolist()
NSC2 = TypeConverter(NSC2)
NSC2_Val = NSC2[Validation_Ind.tolist()]; NSC2_Val = NSC2_Val.reshape(-1)
# NSC Test
NSC1 = Target_PD["NSC1"].values.tolist()
NSC1 = TypeConverter(NSC1)
NSC1_Test = NSC1[Test_Ind.tolist()]; NSC1_Test = NSC1_Test.reshape(-1)
NSC2 = Target_PD["NSC2"].values.tolist()
NSC2 = TypeConverter(NSC2)
NSC2_Test = NSC2[Test_Ind.tolist()]; NSC2_Test = NSC2_Test.reshape(-1)
X_Train_D1_PD, NonExTrain1 = SetFinder(NSC1_Train,Drug1); X_Train_D1 = X_Train_D1_PD.values[:,2:]
X_Train_D2_PD, NonExTrain2 = SetFinder(NSC2_Train,Drug2); X_Train_D2 = X_Train_D2_PD.values[:,2:]
NonExTrn = np.union1d(NonExTrain1,NonExTrain2)
Y_Train = np.delete(Y_Train,NonExTrn,axis = 0)
NSC1_Train = np.delete(NSC1_Train, NonExTrn,axis = 0)
NSC2_Train = np.delete(NSC2_Train, NonExTrn,axis = 0)
X_Train_D1_PD, NonExTrain1 = SetFinder(NSC1_Train,Drug1); X_Train_D1 = X_Train_D1_PD.values[:,2:]
X_Train_D2_PD, NonExTrain2 = SetFinder(NSC2_Train,Drug2); X_Train_D2 = X_Train_D2_PD.values[:,2:]
X_Val_D1_PD, NonExVal1 = SetFinder(NSC1_Val,Drug1); X_Val_D1 = X_Val_D1_PD.values[:,2:]
X_Val_D2_PD, NonExVal2 = SetFinder(NSC2_Val,Drug2); X_Val_D2 = X_Val_D2_PD.values[:,2:]
NonExVal = np.union1d(NonExVal1,NonExVal2)
Y_Validation = np.delete(Y_Validation, NonExVal,axis = 0)
NSC1_Val = np.delete(NSC1_Val, NonExVal,axis = 0)
NSC2_Val = np.delete(NSC2_Val, NonExVal,axis = 0)
X_Val_D1_PD, NonExVal1 = SetFinder(NSC1_Val,Drug1); X_Val_D1 = X_Val_D1_PD.values[:,2:]
X_Val_D2_PD, NonExVal2 = SetFinder(NSC2_Val,Drug2); X_Val_D2 = X_Val_D2_PD.values[:,2:]
X_Test_D1_PD, NonExTst1 = SetFinder(NSC2_Test,Drug1); X_Test_D1 = X_Test_D1_PD.values[:,2:]
X_Test_D2_PD, NonExTst2 = SetFinder(NSC2_Test,Drug2); X_Test_D2 = X_Test_D2_PD.values[:,2:]
NonExTst = np.union1d(NonExTst1,NonExTst2)
Y_Test = np.delete(Y_Test,NonExTst, axis = 0)
NSC1_Test = np.delete(NSC1_Test, NonExTst,axis = 0)
NSC2_Test = np.delete(NSC2_Test, NonExTst,axis = 0)
X_Test_D1_PD, NonExTst1 = SetFinder(NSC1_Test,Drug1); X_Test_D1 = X_Test_D1_PD.values[:,2:]
X_Test_D2_PD, NonExTst2 = SetFinder(NSC2_Test,Drug2); X_Test_D2 = X_Test_D2_PD.values[:,2:]
NonExTst = np.union1d(NonExTst1,NonExTst2)
Y_Test = np.delete(Y_Test,NonExTst, axis = 0)
NSC1_Test = np.delete(NSC1_Test, NonExTst,axis = 0)
NSC2_Test = np.delete(NSC2_Test, NonExTst,axis = 0)
X_Test_D1_PD, NonExTst1 = SetFinder(NSC1_Test,Drug1); X_Test_D1 = X_Test_D1_PD.values[:,2:]
X_Test_D2_PD, NonExTst2 = SetFinder(NSC2_Test,Drug2); X_Test_D2 = X_Test_D2_PD.values[:,2:]
#%% REFINED coordinates
# LE
import math
import pickle
import math
with open('REFINED_Coordinates_LE.pickle','rb') as file:
gene_names_LE,coords_LE,map_in_int_LE = pickle.load(file)
# LLE
with open('REFINED_Coordinates_LLE.pickle','rb') as file:
gene_names_LLE,coords_LLE,map_in_int_LLE = pickle.load(file)
# ISOMAP
with open('REFINED_Coordinates_Isomap.pickle','rb') as file:
gene_names_ISO,coords_ISO,map_in_int_ISO = pickle.load(file)
# MDS
with open('REFINED_Coordinates_MDS.pickle','rb') as file:
gene_names_MDS,coords_MDS,map_in_int_MDS = pickle.load(file)
#%% importing tensorflow
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.callbacks import EarlyStopping
from tqdm import tqdm
Results_Data = np.zeros((5,4))
nn = 26
cnt = 0 # Image size = sqrt(#features (drug descriptors))
X_Train_D1_REFINED = np.zeros((X_Train_D1.shape[0],nn**2,4))
X_Val_D1_REFINED = np.zeros((X_Val_D1.shape[0],nn**2,4))
X_Test_D1_REFINED = np.zeros((X_Test_D1.shape[0],nn**2,4))
X_Train_D1_REFINED[:,:,0] = REFINED_Im_Gen(X_Train_D1,nn, map_in_int_ISO, gene_names_ISO,coords_ISO)
X_Val_D1_REFINED[:,:,0] = REFINED_Im_Gen(X_Val_D1,nn, map_in_int_ISO, gene_names_ISO,coords_ISO)
X_Test_D1_REFINED[:,:,0] = REFINED_Im_Gen(X_Test_D1,nn, map_in_int_ISO, gene_names_ISO,coords_ISO)
X_Train_D1_REFINED[:,:,1] = REFINED_Im_Gen(X_Train_D1,nn, map_in_int_MDS, gene_names_MDS,coords_MDS)
X_Val_D1_REFINED[:,:,1] = REFINED_Im_Gen(X_Val_D1,nn, map_in_int_MDS, gene_names_MDS,coords_MDS)
X_Test_D1_REFINED[:,:,1] = REFINED_Im_Gen(X_Test_D1,nn, map_in_int_MDS, gene_names_MDS,coords_MDS)
X_Train_D1_REFINED[:,:,2] = REFINED_Im_Gen(X_Train_D1,nn, map_in_int_LE, gene_names_LE,coords_LE)
X_Val_D1_REFINED[:,:,2] = REFINED_Im_Gen(X_Val_D1,nn, map_in_int_LE, gene_names_LE,coords_LE)
X_Test_D1_REFINED[:,:,2] = REFINED_Im_Gen(X_Test_D1,nn, map_in_int_LE, gene_names_LE,coords_LE)
X_Train_D1_REFINED[:,:,3] = REFINED_Im_Gen(X_Train_D1,nn, map_in_int_LLE, gene_names_LLE,coords_LLE)
X_Val_D1_REFINED[:,:,3] = REFINED_Im_Gen(X_Val_D1,nn, map_in_int_LLE, gene_names_LLE,coords_LLE)
X_Test_D1_REFINED[:,:,3] = REFINED_Im_Gen(X_Test_D1,nn, map_in_int_LLE, gene_names_LLE,coords_LLE)
X_Train_D2_REFINED = np.zeros((X_Train_D2.shape[0],nn**2,4))
X_Val_D2_REFINED = np.zeros((X_Val_D2.shape[0],nn**2,4))
X_Test_D2_REFINED = np.zeros((X_Test_D2.shape[0],nn**2,4))
X_Train_D2_REFINED[:,:,0] = REFINED_Im_Gen(X_Train_D2,nn, map_in_int_ISO, gene_names_ISO,coords_ISO)
X_Val_D2_REFINED[:,:,0] = REFINED_Im_Gen(X_Val_D2,nn, map_in_int_ISO, gene_names_ISO,coords_ISO)
X_Test_D2_REFINED[:,:,0] = REFINED_Im_Gen(X_Test_D2,nn, map_in_int_ISO, gene_names_ISO,coords_ISO)
X_Train_D2_REFINED[:,:,1] = REFINED_Im_Gen(X_Train_D2,nn, map_in_int_MDS, gene_names_MDS,coords_MDS)
X_Val_D2_REFINED[:,:,1] = REFINED_Im_Gen(X_Val_D2,nn, map_in_int_MDS, gene_names_MDS,coords_MDS)
X_Test_D2_REFINED[:,:,1] = REFINED_Im_Gen(X_Test_D2,nn, map_in_int_MDS, gene_names_MDS,coords_MDS)
X_Train_D2_REFINED[:,:,2] = REFINED_Im_Gen(X_Train_D2,nn, map_in_int_LE, gene_names_LE,coords_LE)
X_Val_D2_REFINED[:,:,2] = REFINED_Im_Gen(X_Val_D2,nn, map_in_int_LE, gene_names_LE,coords_LE)
X_Test_D2_REFINED[:,:,2] = REFINED_Im_Gen(X_Test_D2,nn, map_in_int_LE, gene_names_LE,coords_LE)
X_Train_D2_REFINED[:,:,3] = REFINED_Im_Gen(X_Train_D2,nn, map_in_int_LLE, gene_names_LLE,coords_LLE)
X_Val_D2_REFINED[:,:,3] = REFINED_Im_Gen(X_Val_D2,nn, map_in_int_LLE, gene_names_LLE,coords_LLE)
X_Test_D2_REFINED[:,:,3] = REFINED_Im_Gen(X_Test_D2,nn, map_in_int_LLE, gene_names_LLE,coords_LLE)
sz = X_Train_D1_REFINED.shape
Width = int(math.sqrt(sz[1]))
Height = int(math.sqrt(sz[1]))
CNN_Train_D1 = X_Train_D1_REFINED.reshape(-1,Width,Height,4,1)
CNN_Val_D1 = X_Val_D1_REFINED.reshape(-1,Width,Height,4,1)
CNN_Test_D1 = X_Test_D1_REFINED.reshape(-1,Width,Height,4,1)
CNN_Train_D2 = X_Train_D2_REFINED.reshape(-1,Width,Height,4,1)
CNN_Val_D2 = X_Val_D2_REFINED.reshape(-1,Width,Height,4,1)
CNN_Test_D2 = X_Test_D2_REFINED.reshape(-1,Width,Height,4,1)
def CNN_model(Width,Height,params):
# ARM 1
input1 = layers.Input(shape = (Width, Height,4,1))
x1 = layers.Conv3D(int(params['Kernels1']), kernel_size = (int(params['kernel_size1']),int(params['kernel_size1']),4),padding='valid',strides=(int(params['strides1']),int(params['strides1']),1),dilation_rate=1)(input1)
x1 = layers.BatchNormalization()(x1)
x1 = layers.Activation('relu')(x1)
x1 = layers.Conv3D(int(params['Kernels2']), kernel_size = (int(params['kernel_size2']),int(params['kernel_size2']),1),padding='valid',strides=(int(params['strides2']),int(params['strides2']),1),dilation_rate=1)(x1)
x1 = layers.BatchNormalization()(x1)
x1 = layers.Activation('relu')(x1)
x1 = layers.Conv2D(int(params['Kernels3']), kernel_size = (1,1),padding='valid',strides=1,dilation_rate=1)(x1)
Out1 = layers.Flatten()(x1)
input2 = layers.Input(shape = (Width, Height,4,1))
y1 = layers.Conv3D(int(params['Kernels1']), kernel_size = (int(params['kernel_size1']),int(params['kernel_size1']),4),padding='valid',strides=(int(params['strides1']),int(params['strides1']),1),dilation_rate=1)(input2)
y1 = layers.BatchNormalization()(y1)
y1 = layers.Activation('relu')(y1)
y1 = layers.Conv3D(int(params['Kernels2']), kernel_size = (int(params['kernel_size2']),int(params['kernel_size2']),1),padding='valid',strides=(int(params['strides2']),int(params['strides2']),1),dilation_rate=1)(y1)
y1 = layers.BatchNormalization()(y1)
y1 = layers.Activation('relu')(y1)
y1 = layers.Conv2D(int(params['Kernels3']), kernel_size = (1,1),padding='valid',strides=1,dilation_rate=1)(y1)
Out2 = layers.Flatten()(y1)
x = layers.concatenate([Out1, Out2])
x = layers.Dense(units = int(params['units1']))(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.Dropout(1- 0.7)(x)
# x = layers.Dense(units = int(params['units2']))(x)
# x = layers.BatchNormalization()(x)
# x = layers.Activation('relu')(x)
# x = layers.Dropout(1- 0.7)(x)
Out = layers.Dense(1)(x)
model = tf.keras.Model(inputs = [input1, input2], outputs = [Out])
initial_learning_rate = params['lr']
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=int(params['decay_step']),
decay_rate=params['decay_rate'],
staircase=True)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule),
loss='mse',
metrics=['mse'])
return model
#%% Evaluate model function
def evaluate_model(Model, CNN_Train_D1,CNN_Train_D2, Y_Train, CNN_Val_D1,CNN_Val_D2, Y_Validation,CNN_Test_D1, CNN_Test_D2,Y_Test ):
ES = EarlyStopping(monitor='val_loss', mode='min', verbose=0, patience=60)
History = Model.fit([CNN_Train_D1,CNN_Train_D2], Y_Train, batch_size= 128, epochs = 250, verbose=0, validation_data=([CNN_Val_D1,CNN_Val_D2], Y_Validation), callbacks = [ES])
y_pred = Model.predict([CNN_Test_D1, CNN_Test_D2])
CNN_NRMSE, CNN_R2 = NRMSE(Y_Test, y_pred)
print('NRMSE > %.3f' % (CNN_NRMSE))
return CNN_NRMSE, History
#%% Hyper parameter tuning
# Defining the hyper parameter space
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
from hyperopt.pyll import scope
from hyperopt.pyll.stochastic import sample
## Hyper parameter grid
param_hyperopt = {
'lr': hp.loguniform('lr', np.log(0.00001), np.log(0.001)),
'decay_step' : hp.uniform('decay_step', 5000, 500000),
'decay_rate': hp.uniform('decay_rate', 0.4, 0.95),
'Kernels1': hp.uniform('Kernels1', 32, 128),
'Kernels2': hp.uniform('Kernels2', 32, 256),
'Kernels3': hp.uniform('Kernels3', 1, 128),
'kernel_size1': hp.quniform('kernel_size1', 3, 7, 5),
'kernel_size2': hp.quniform('kernel_size2', 3, 7, 5),
#'kernel_size3': hp.quniform('kernel_size3', 3, 7, 5),
'strides1' : hp.quniform('strides1', 1,2,2),
'strides2' : hp.quniform('strides2', 1,2,2),
#'strides3' : hp.quniform('strides3', 1,2,2),
#'units1': hp.quniform('units1', 300, 500, 10),
'units1': hp.quniform('units1', 80, 500, 30),
'units2': hp.quniform('units2', 10, 100, 30),
}
param_space = param_hyperopt
#%% RUN
#y_train = y_train.astype(int)
#y_valid = y_valid.astype(int)
#y_test = y_test.astype(int)
import time
start = time.time()
num_eval = 200
def objective_function(params):
Width = 26
Height = 26
clf = CNN_model(Width,Height,params)
NRMSE_try, history = evaluate_model(clf, CNN_Train_D1,CNN_Train_D2, Y_Train, CNN_Val_D1,CNN_Val_D2, Y_Validation,CNN_Test_D1, CNN_Test_D2,Y_Test )
return {'loss': NRMSE_try, 'status': STATUS_OK}
trials = Trials()
best_param = fmin(objective_function,
param_space,
algo=tpe.suggest,
max_evals=num_eval,
trials=trials,
rstate= np.random.RandomState(1))
loss = [x['result']['loss'] for x in trials.trials]
best_param_values = [x for x in best_param.values()]
# Retrieve Hyperopt scores
hyperopt_scores = [trial['result']['loss'] for trial in trials]
hyperopt_scores = np.maximum.accumulate(hyperopt_scores)
print("Hyper_opt scores:")
print(hyperopt_scores)
#%% Saving the parameters
import pickle
pickle.dump(trials,open("Trials.p","wb"))
trails = pickle.load(open("Trials.p","rb"))
print("Best parameters: ", best_param)
| 42.994737
| 223
| 0.706818
|
a1c62de4e0fcc9cc69bc6b21fbea91bb8513da0a
| 5,979
|
py
|
Python
|
bvspca/animals/management/commands/sync_petpoint_data.py
|
nfletton/bvspca
|
b0e54151b1e737720657a7cc3976e36ce7b6c5e4
|
[
"MIT"
] | 10
|
2019-02-25T07:06:09.000Z
|
2022-03-23T08:12:06.000Z
|
bvspca/animals/management/commands/sync_petpoint_data.py
|
nfletton/bvspca
|
b0e54151b1e737720657a7cc3976e36ce7b6c5e4
|
[
"MIT"
] | 18
|
2021-03-08T18:38:04.000Z
|
2021-08-20T14:16:37.000Z
|
bvspca/animals/management/commands/sync_petpoint_data.py
|
nfletton/bvspca
|
b0e54151b1e737720657a7cc3976e36ce7b6c5e4
|
[
"MIT"
] | 3
|
2019-01-29T05:14:22.000Z
|
2021-02-18T11:58:34.000Z
|
import datetime
import logging
import requests
from django.conf import settings
from django.core.management.base import BaseCommand
from bvspca.animals.models import Animal, AnimalCountSettings, AnimalsPage
from bvspca.animals.petpoint import fetch_petpoint_adoptable_animal_ids, \
fetch_petpoint_adopted_dates_since, \
fetch_petpoint_animal
from bvspca.social.interface import add_to_social_queue
logger = logging.getLogger('bvspca.animals.petpoint')
PETPOINT_AUTH_KEY = getattr(settings, 'PETPOINT_AUTH_KEY', "")
PETPOINT_BASE_URL = 'https://ws.petango.com/webservices/wsadoption.asmx/{}'
class Command(BaseCommand):
help = 'Synchronize data from PetPoint with local Animal objects'
def handle(self, *args, **options):
with requests.Session() as session:
session.params = {'authKey': PETPOINT_AUTH_KEY}
# create and update animals based on currently adoptable animals
adoptable_animal_ids = fetch_petpoint_adoptable_animal_ids(session, PETPOINT_BASE_URL)
if adoptable_animal_ids is not None:
for animal_id in adoptable_animal_ids:
petpoint_animal = fetch_petpoint_animal(session, PETPOINT_BASE_URL, animal_id)
if petpoint_animal is not None:
try:
local_animal = Animal.objects.get(petpoint_id=animal_id)
if local_animal.adoption_date:
# adjust adopted count since animal previously adopted
self.increment_animal_count(local_animal.species, 'adopted', -1)
if local_animal.updateAdoptableAnimal(petpoint_animal):
logger.info(
'{} {} updated ({})'.format(
local_animal.species,
local_animal.petpoint_id,
local_animal.title,
)
)
except Animal.DoesNotExist:
new_animal = Animal.create(petpoint_animal)
self.increment_animal_count(new_animal.species, 'rescued')
animal_parent = AnimalsPage.objects.get(species=new_animal.species)
animal_parent.add_child(instance=new_animal)
add_to_social_queue(new_animal)
logger.info(
'{} {} created ({})'.format(
new_animal.species,
new_animal.petpoint_id,
new_animal.title,
)
)
# check for adoptions since yesterday and set adoption dates
adoptions = fetch_petpoint_adopted_dates_since(session, PETPOINT_BASE_URL, datetime.date.today() - datetime.timedelta(1))
if adoptions:
for adoption in adoptions:
try:
local_animal = Animal.objects.get(petpoint_id=adoption[0])
if local_animal.adoption_date != adoption[1]:
local_animal.adoption_date = adoption[1]
local_animal.live = True
local_animal.save()
self.increment_animal_count(local_animal.species, 'adopted')
add_to_social_queue(local_animal)
logger.info(
'{} {} adopted on {} ({})'.format(
local_animal.species,
local_animal.petpoint_id,
local_animal.adoption_date,
local_animal.title,
)
)
except Animal.DoesNotExist:
logger.error(
'Animal {} did not exist when attempting to set adoption date {}'.format(
adoption[0],
adoption[1],
)
)
# unpublish animals no longer adoptable yet have not been adopted
if adoptable_animal_ids is not None:
unavailable_animals = Animal.objects.filter(live=True, adoption_date__isnull=True).exclude(
petpoint_id__in=adoptable_animal_ids,
)
for animal in unavailable_animals:
animal.live = False
animal.save()
logger.warning(
'Unpublished animal {} ({}) because it is neither adoptable or adopted'.format(
animal.petpoint_id,
animal.title,
)
)
@staticmethod
def increment_animal_count(species, event_type, increment=1):
"""
Increment (or decrement) the counts of animals rescued and adopted.
:param species: 'cat' or 'dog'
:param event_type: 'rescued' or 'adopted'
:param increment: 'rescued' or 'adopted'
:return:
"""
try:
animal_counts = AnimalCountSettings.objects.get(site_id=2)
count_field_name = '{}s_{}'.format(species.lower(), event_type.lower())
current_count = getattr(animal_counts, count_field_name, 0)
setattr(animal_counts, count_field_name, current_count + increment)
animal_counts.save()
logger.info(
'{} {} count incremented by {}'.format(species, event_type, increment)
)
except AnimalCountSettings.DoesNotExist:
logger.error('Animal count settings do not exist')
| 48.609756
| 133
| 0.526509
|
698a0724b9a354b5f71d74c1556258ae576c0453
| 1,004
|
py
|
Python
|
src/web-app/app.py
|
aravindvaddi/password-cracking
|
1f8a799924360b2130300dd819d40051b483c47b
|
[
"MIT"
] | null | null | null |
src/web-app/app.py
|
aravindvaddi/password-cracking
|
1f8a799924360b2130300dd819d40051b483c47b
|
[
"MIT"
] | null | null | null |
src/web-app/app.py
|
aravindvaddi/password-cracking
|
1f8a799924360b2130300dd819d40051b483c47b
|
[
"MIT"
] | null | null | null |
# import the Flask class from the flask module
from flask import Flask, render_template, redirect, url_for, request
from flask import jsonify
from hashlib import sha256
salt = '000000000000000000000000000078d2'.encode('utf-8')
hashed_password = '18821d89de11ab18488fdc0a01f1ddf4d290e198b0f80cd4974fc031dc2615a3'
def get_hash(salt, password):
hasher = sha256()
hasher.update(salt)
hasher.update(password.encode('utf-8'))
return hasher.hexdigest()
# create the application object
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def home():
if request.method == 'POST':
password = get_hash(salt, request.form['password'])
if request.form['username'] != 'admin' or password != hashed_password:
authorised = 'Denied'
else:
authorised = 'Granted'
return jsonify(authorised)
else:
return render_template('index.html')
# start the server with the 'run()' method
if __name__ == '__main__':
app.run()
| 29.529412
| 84
| 0.691235
|
f926e698093a857212ac0ce1f3c2544eb9bc375d
| 806
|
py
|
Python
|
reactionroute_web/reaction/manage.py
|
sxhexe/reaction-route-search
|
f7694c84ca1def4a133ade3e1e2e09705cd28312
|
[
"MIT"
] | 1
|
2017-09-16T07:36:29.000Z
|
2017-09-16T07:36:29.000Z
|
reactionroute_web/reaction/manage.py
|
sxhexe/reaction-route-search
|
f7694c84ca1def4a133ade3e1e2e09705cd28312
|
[
"MIT"
] | null | null | null |
reactionroute_web/reaction/manage.py
|
sxhexe/reaction-route-search
|
f7694c84ca1def4a133ade3e1e2e09705cd28312
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "reaction.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.043478
| 77
| 0.64268
|
03cab83f27efd99976a9b2972d8bc447bf4e2604
| 7,267
|
py
|
Python
|
python/ray/rllib/test/test_supported_spaces.py
|
jamescasbon/ray
|
fb0801ce8c43f163a5724be5a78e23774aed645e
|
[
"Apache-2.0"
] | null | null | null |
python/ray/rllib/test/test_supported_spaces.py
|
jamescasbon/ray
|
fb0801ce8c43f163a5724be5a78e23774aed645e
|
[
"Apache-2.0"
] | null | null | null |
python/ray/rllib/test/test_supported_spaces.py
|
jamescasbon/ray
|
fb0801ce8c43f163a5724be5a78e23774aed645e
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import traceback
import gym
from gym.spaces import Box, Discrete, Tuple, Dict
from gym.envs.registration import EnvSpec
import numpy as np
import sys
import ray
from ray.rllib.agents.registry import get_agent_class
from ray.rllib.test.test_multi_agent_env import MultiCartpole, MultiMountainCar
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.tune.registry import register_env
ACTION_SPACES_TO_TEST = {
"discrete": Discrete(5),
"vector": Box(-1.0, 1.0, (5, ), dtype=np.float32),
"tuple": Tuple(
[Discrete(2),
Discrete(3),
Box(-1.0, 1.0, (5, ), dtype=np.float32)]),
}
OBSERVATION_SPACES_TO_TEST = {
"discrete": Discrete(5),
"vector": Box(-1.0, 1.0, (5, ), dtype=np.float32),
"image": Box(-1.0, 1.0, (84, 84, 1), dtype=np.float32),
"atari": Box(-1.0, 1.0, (210, 160, 3), dtype=np.float32),
"tuple": Tuple([Discrete(10),
Box(-1.0, 1.0, (5, ), dtype=np.float32)]),
"dict": Dict({
"task": Discrete(10),
"position": Box(-1.0, 1.0, (5, ), dtype=np.float32),
}),
}
def make_stub_env(action_space, obs_space, check_action_bounds):
class StubEnv(gym.Env):
def __init__(self):
self.action_space = action_space
self.observation_space = obs_space
self.spec = EnvSpec("StubEnv-v0")
def reset(self):
sample = self.observation_space.sample()
return sample
def step(self, action):
if check_action_bounds and not self.action_space.contains(action):
raise ValueError("Illegal action for {}: {}".format(
self.action_space, action))
if (isinstance(self.action_space, Tuple)
and len(action) != len(self.action_space.spaces)):
raise ValueError("Illegal action for {}: {}".format(
self.action_space, action))
return self.observation_space.sample(), 1, True, {}
return StubEnv
def check_support(alg, config, stats, check_bounds=False):
for a_name, action_space in ACTION_SPACES_TO_TEST.items():
for o_name, obs_space in OBSERVATION_SPACES_TO_TEST.items():
print("=== Testing", alg, action_space, obs_space, "===")
stub_env = make_stub_env(action_space, obs_space, check_bounds)
register_env("stub_env", lambda c: stub_env())
stat = "ok"
a = None
try:
a = get_agent_class(alg)(config=config, env="stub_env")
a.train()
except UnsupportedSpaceException:
stat = "unsupported"
except Exception as e:
stat = "ERROR"
print(e)
print(traceback.format_exc())
finally:
if a:
try:
a.stop()
except Exception as e:
print("Ignoring error stopping agent", e)
pass
print(stat)
print()
stats[alg, a_name, o_name] = stat
def check_support_multiagent(alg, config):
register_env("multi_mountaincar", lambda _: MultiMountainCar(2))
register_env("multi_cartpole", lambda _: MultiCartpole(2))
if "DDPG" in alg:
a = get_agent_class(alg)(config=config, env="multi_mountaincar")
else:
a = get_agent_class(alg)(config=config, env="multi_cartpole")
try:
a.train()
finally:
a.stop()
class ModelSupportedSpaces(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=4)
def tearDown(self):
ray.shutdown()
def testAll(self):
stats = {}
check_support("IMPALA", {"num_gpus": 0}, stats)
check_support(
"DDPG", {
"noise_scale": 100.0,
"timesteps_per_iteration": 1
},
stats,
check_bounds=True)
check_support("DQN", {"timesteps_per_iteration": 1}, stats)
check_support(
"A3C", {
"num_workers": 1,
"optimizer": {
"grads_per_step": 1
}
},
stats,
check_bounds=True)
check_support(
"PPO", {
"num_workers": 1,
"num_sgd_iter": 1,
"train_batch_size": 10,
"sample_batch_size": 10,
"sgd_minibatch_size": 1,
},
stats,
check_bounds=True)
check_support(
"ES", {
"num_workers": 1,
"noise_size": 10000000,
"episodes_per_batch": 1,
"train_batch_size": 1
}, stats)
check_support(
"ARS", {
"num_workers": 1,
"noise_size": 10000000,
"num_rollouts": 1,
"rollouts_used": 1
}, stats)
check_support(
"PG", {
"num_workers": 1,
"optimizer": {}
},
stats,
check_bounds=True)
num_unexpected_errors = 0
for (alg, a_name, o_name), stat in sorted(stats.items()):
if stat not in ["ok", "unsupported"]:
num_unexpected_errors += 1
print(alg, "action_space", a_name, "obs_space", o_name, "result",
stat)
self.assertEqual(num_unexpected_errors, 0)
def testMultiAgent(self):
check_support_multiagent(
"APEX", {
"num_workers": 2,
"timesteps_per_iteration": 1000,
"num_gpus": 0,
"min_iter_time_s": 1,
"learning_starts": 1000,
"target_network_update_freq": 100,
})
check_support_multiagent(
"APEX_DDPG", {
"num_workers": 2,
"timesteps_per_iteration": 1000,
"num_gpus": 0,
"min_iter_time_s": 1,
"learning_starts": 1000,
"target_network_update_freq": 100,
})
check_support_multiagent("IMPALA", {"num_gpus": 0})
check_support_multiagent("DQN", {"timesteps_per_iteration": 1})
check_support_multiagent("A3C", {
"num_workers": 1,
"optimizer": {
"grads_per_step": 1
}
})
check_support_multiagent(
"PPO", {
"num_workers": 1,
"num_sgd_iter": 1,
"train_batch_size": 10,
"sample_batch_size": 10,
"sgd_minibatch_size": 1,
})
check_support_multiagent("PG", {"num_workers": 1, "optimizer": {}})
check_support_multiagent("DDPG", {"timesteps_per_iteration": 1})
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "--smoke":
ACTION_SPACES_TO_TEST = {
"discrete": Discrete(5),
}
OBSERVATION_SPACES_TO_TEST = {
"vector": Box(0.0, 1.0, (5, ), dtype=np.float32),
"atari": Box(0.0, 1.0, (210, 160, 3), dtype=np.float32),
}
unittest.main(verbosity=2)
| 33.031818
| 79
| 0.522224
|
8f91c3b428698b9012fbf57062aaf11c6cd7d4b7
| 221
|
py
|
Python
|
A/ShortSubstrings.py
|
shukkkur/hello-world
|
c6388c2bda387e850fdb928a8d40f769c5684fec
|
[
"CC0-1.0"
] | 11
|
2021-05-26T11:47:50.000Z
|
2022-03-08T14:31:02.000Z
|
A/ShortSubstrings.py
|
shukkkur/hello-world
|
c6388c2bda387e850fdb928a8d40f769c5684fec
|
[
"CC0-1.0"
] | null | null | null |
A/ShortSubstrings.py
|
shukkkur/hello-world
|
c6388c2bda387e850fdb928a8d40f769c5684fec
|
[
"CC0-1.0"
] | 4
|
2021-08-06T05:27:12.000Z
|
2022-03-22T14:05:41.000Z
|
__author__ = 'shukkkur'
'''
https://codeforces.com/problemset/problem/1367/A
A. Short Substrings
'''
n = int(input())
for _ in range(n):
word = input()
print(word[::2] + word[-1])
| 13
| 49
| 0.538462
|
cf9138c12d7c0eef214610fab13b72c3a04405fe
| 420
|
py
|
Python
|
backend/app/app/db/base_class.py
|
imadmoussa1/address_lookup
|
407317df7df13fa41eeaefe6e114ddc270a3438b
|
[
"MIT"
] | null | null | null |
backend/app/app/db/base_class.py
|
imadmoussa1/address_lookup
|
407317df7df13fa41eeaefe6e114ddc270a3438b
|
[
"MIT"
] | null | null | null |
backend/app/app/db/base_class.py
|
imadmoussa1/address_lookup
|
407317df7df13fa41eeaefe6e114ddc270a3438b
|
[
"MIT"
] | null | null | null |
from sqlalchemy import inspect
from typing import Any
from sqlalchemy.ext.declarative import as_declarative, declared_attr
@as_declarative()
class Base:
id: Any
__name__: str
# Generate __tablename__ automatically
@declared_attr
def __tablename__(cls) -> str:
return cls.__name__.lower()
def as_dict(self) -> dict:
return {c.key: getattr(self, c.key) for c in inspect(self).mapper.column_attrs}
| 22.105263
| 83
| 0.75
|
d25331c53b00f592d5bd013348fb6b1fe64e2e9a
| 4,053
|
py
|
Python
|
backend/settings/dev.py
|
khchine5/django-butter-cms
|
b6660c266e7c499d111961ed8e6613b1d2a6c838
|
[
"MIT"
] | null | null | null |
backend/settings/dev.py
|
khchine5/django-butter-cms
|
b6660c266e7c499d111961ed8e6613b1d2a6c838
|
[
"MIT"
] | null | null | null |
backend/settings/dev.py
|
khchine5/django-butter-cms
|
b6660c266e7c499d111961ed8e6613b1d2a6c838
|
[
"MIT"
] | null | null | null |
"""
Django settings for myblog project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
import environ
env = environ.Env(DEBUG=(bool, False))
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
env.read_env(os.path.join(BASE_DIR.parent, ".env"))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "django-insecure-zke!oot$1k6!fkx6o-r39+yakq&bl6$%)fr@d1s&o7x34cof7d"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"whitenoise.runserver_nostatic", # < Per Whitenoise, to disable built in
"django.contrib.staticfiles",
"backend",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
]
ROOT_URLCONF = "backend.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": ["dist"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "backend.wsgi.application"
# Place static in the same location as webpack build files
STATIC_ROOT = os.path.join(BASE_DIR.parent, "dist", "static")
STATICFILES_DIRS = []
##########
# STATIC #
##########
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# Insert Whitenoise Middleware at top but below Security Middleware
# MIDDLEWARE.insert(1, 'whitenoise.middleware.WhiteNoiseMiddleware',)
# http://whitenoise.evans.io/en/stable/django.html#make-sure-staticfiles-is-configured-correctly
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = "/static/"
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
BUTTER_CMS_AUTH_TOKEN = env("BUTTER_CMS_AUTH_TOKEN")
| 27.201342
| 96
| 0.713546
|
034be75971d583e7e5f9e832726485f795564a24
| 1,542
|
py
|
Python
|
intefraces/i_neural_net.py
|
Postmodernist/BoardAI
|
c2ffee8d3183dad388c6d25d73fb2defa5835365
|
[
"Unlicense"
] | null | null | null |
intefraces/i_neural_net.py
|
Postmodernist/BoardAI
|
c2ffee8d3183dad388c6d25d73fb2defa5835365
|
[
"Unlicense"
] | null | null | null |
intefraces/i_neural_net.py
|
Postmodernist/BoardAI
|
c2ffee8d3183dad388c6d25d73fb2defa5835365
|
[
"Unlicense"
] | null | null | null |
class INeuralNet:
"""
This class specifies the base NeuralNet class. To define your own neural
network, subclass this class and implement the functions below. The neural
network does not consider the current player, and instead only deals with
the canonical form of the board.
"""
@staticmethod
def create():
"""
:return a new INeuralNet object
"""
pass
def train(self, examples):
"""
This function trains the neural network with examples obtained from
self-play.
:param examples: a list of training examples, where each example is of
form (board, pi, v). pi is the MCTS informed policy vector for the
given board, and v is its value. The board is in canonical form
"""
pass
def predict(self, canonical_board, valid_actions):
"""
:param canonical_board: current board in its canonical form
:param valid_actions: a list of valid actions
:returns
pi: a policy vector for the current board, a numpy array of length
game.ACTION_SIZE
v: a float in [-1,1] that gives the value of the current board
"""
pass
def save(self, folder, file_name):
"""
Saves the current neural network (with its parameters) in folder/file_name
"""
pass
def load(self, folder, file_name):
"""
Loads parameters of the neural network from folder/file_name
"""
pass
| 32.125
| 82
| 0.61284
|
cdcb605f60bbefa12713bea6a0f48b67c60dc0ec
| 4,748
|
py
|
Python
|
python2_consul/file.py
|
ctma/consul_kv
|
8aa5bbb05a72dff8d29023e6d8df7956af5e2b2d
|
[
"MIT"
] | null | null | null |
python2_consul/file.py
|
ctma/consul_kv
|
8aa5bbb05a72dff8d29023e6d8df7956af5e2b2d
|
[
"MIT"
] | 5
|
2017-10-19T18:24:52.000Z
|
2021-06-01T21:20:13.000Z
|
python2_consul/file.py
|
ctma/consul_kv
|
8aa5bbb05a72dff8d29023e6d8df7956af5e2b2d
|
[
"MIT"
] | null | null | null |
import os
import fnmatch
import logging
import yaml
class File:
'''
Read and parse a yaml file
'''
def _read_file_content(self, file):
'''Open the file and return the file's content
Args:
file (str): The path to the file
Returns:
str object of the file's content
None object if the file is invalid
'''
logging.info("Attempting to read content from file: {}".format(file))
file_handler = None
try:
file_handler = open(file)
except FileNotFoundError:
logging.error("File {} does not exist")
exit(1)
content = file_handler.read()
file_handler.close()
return content
def _is_yaml_file(self, file_content):
'''Validate if the file contains valid yaml
Args:
file_content (str): File content
Returns:
bool: True if valid, False otherwise
'''
yaml_file = None
try:
yaml_file = yaml.load(file_content)
except yaml.scanner.ScannerError:
logging.error("File {} contains invalid yaml".format(file_content))
return True if yaml_file else False
def _glob_yaml_file(self, user_input):
'''If user's input is a directory, scan for all the yaml file
Args:
user_input (str): Path to a directory or file
Returns:
List of str object of file path
None if nothing is found
'''
# Credit:
# https://stackoverflow.com/questions/2186525/use-a-glob-to-find-files-recursively-in-python
files = []
for root, dirnames, filenames in os.walk(user_input):
for filename in fnmatch.filter(filenames, '*.yaml'):
files.append(os.path.join(root, filename))
# End Credit
return files
def is_directory(self, user_input):
'''Check to see if the user's input is a directory
Args:
user_input (str): Path to a directory
Returns:
bool: True if it is, False otherwise
'''
return os.path.isdir(user_input)
def is_file(self, user_input):
'''Check to see if the user's input is a file
Args:
user_input (str): Path to a file
Returns:
bool: True if it is, False otherwise
'''
return os.path.isfile(user_input)
def parse_yaml(self, file_content):
'''Parse the yaml file and return a json
Args:
file_content (str): File's content
Returns:
json object if valid
None if invalid
'''
return yaml.load(file_content) if self._is_yaml_file(file_content) else None
def process_file(self, file):
'''Process the file content and return a list
Args:
file (str): Path to a file
Returns:
List of json objects
None if invalid
'''
yaml_data = []
logging.debug("Processing content from file: {}".format(file))
file_content = self._read_file_content(file)
if file_content:
logging.debug("Parsing yaml content from file {}".format(file))
data = self.parse_yaml(file_content)
if data:
logging.debug("YAML in file {} is: \n{}".format(file, data))
yaml_data.append(data)
else:
logging.error("File {} does not contain valid yaml".format(file))
return yaml_data
def process_directory(self, directory):
'''Process each file and load it into a list of dict
Args:
directory (str): Path to a directory
Returns:
List of json objects
None if invalid
'''
yaml_data = []
files = self._glob_yaml_file(directory)
if files:
for file in files:
yaml_data += self.process_file(file)
else:
logging.info("Directory {} contains no file with extension .yaml".format(directory))
return yaml_data
def process_args_file(self, args_file):
'''Process the file or directory and extract the kv
Args:
args_file (str): Path to a file or directory
Returns:
payload: List of yaml definition
'''
payload = []
if self.is_directory(args_file):
data_set = self.process_directory(args_file)
for data in data_set:
logging.debug("Extracting kv: {}".format(data))
payload.append(data)
elif self.is_file(args_file):
payload = self.process_file(args_file)
return payload
| 31.653333
| 100
| 0.570767
|
6146787422450f2ad5db9a8d9752a97c5a898548
| 341
|
py
|
Python
|
Backend/onduty/zones/migrations/0002_auto_20200716_0906.py
|
marsh69/Beats-Allocation-SIH
|
597121dcd1d4259d9a1df394ef09891757df6895
|
[
"MIT"
] | 1
|
2020-08-12T03:19:22.000Z
|
2020-08-12T03:19:22.000Z
|
Backend/onduty/zones/migrations/0002_auto_20200716_0906.py
|
marsh69/Beats-Allocation-SIH
|
597121dcd1d4259d9a1df394ef09891757df6895
|
[
"MIT"
] | null | null | null |
Backend/onduty/zones/migrations/0002_auto_20200716_0906.py
|
marsh69/Beats-Allocation-SIH
|
597121dcd1d4259d9a1df394ef09891757df6895
|
[
"MIT"
] | 2
|
2020-08-11T08:04:15.000Z
|
2020-10-02T08:20:41.000Z
|
# Generated by Django 3.0.2 on 2020-07-16 09:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('zones', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='zones',
options={'verbose_name_plural': 'zones'},
),
]
| 18.944444
| 53
| 0.589443
|
429a1ff59aa2ad1be5b12cdaa4f06f9ce0d3406f
| 2,627
|
py
|
Python
|
cpp/tests/math/py/test_Diagonalization.py
|
ProkopHapala/SimpleSimulationEngine
|
240f9b7e85b3a6eda7a27dc15fe3f7b8c08774c5
|
[
"MIT"
] | 26
|
2016-12-04T04:45:12.000Z
|
2022-03-24T09:39:28.000Z
|
cpp/tests/math/py/test_Diagonalization.py
|
Aki78/FlightAI
|
9c5480f2392c9c89b9fee4902db0c4cde5323a6c
|
[
"MIT"
] | null | null | null |
cpp/tests/math/py/test_Diagonalization.py
|
Aki78/FlightAI
|
9c5480f2392c9c89b9fee4902db0c4cde5323a6c
|
[
"MIT"
] | 2
|
2019-02-09T12:31:06.000Z
|
2019-04-28T02:24:50.000Z
|
#!/usr/bin/python
import numpy as np
import libTest_Lingebra as LA
import JacobiEigen as JE
import matplotlib.pyplot as plt
import time
'''
test tol=1e-9
Lenovo-ideapad-Y700-15ISK i7-6700HQ CPU @ 2.60GHz
N time np.eig(A) [s] time LA.eig_Jacobi(A,tol=1e-9) Nrot
200 0.259829 0.902322
200 0.256582 0.900702
200 0.260135 0.875203 78648 => 11128 ns/rotation => 55ns/index
100 0.035707 0.1576
100 0.05374 0.252058
100 0.061157 0.180691 18983 => 9518 ns/rotation => 95ns/index
'''
n = 200;
#np.random.seed(15464)
sqrtA = np.random.rand( n,n ) - 0.5
A = np.dot( sqrtA, np.transpose(sqrtA) )
tol = 1e-9
V = np.zeros((n,n))
es = np.zeros(n)
mjs = np.zeros(n,dtype=np.int32)
ijmax = np.zeros(2,dtype=np.int32)
A_=A.copy(); V_=V.copy();
#print A
t1=time.clock()
esA, vsA = np.linalg.eig( A )
t2=time.clock()
print "eigenvalues np.eig: ",np.sort(esA)
print "time: ",t2-t1
t1=time.clock()
esA,vsA = LA.eig_Jacobi( A, tol=tol, nMaxIter=100000 )
t2=time.clock()
print "eigenvalues cpp.jacobi: ",np.sort(esA)
print "time: ",t2-t1
'''
esA,vsA = JE.jacobi ( A_, tol = tol )
print "eigenvalues py.jacobi: ",np.sort(esA)
'''
'''
vmax=LA.lib.eig_Jacobi_init(n,A,V,mjs,ijmax)
mjs_,mvs_ = JE.initMaxs(A_)
print "mjs ", mjs , vmax
print "mjs_ ", mjs_, mvs_.max()
for itr in range(100):
print "============ itr ", itr, (ijmax[0],ijmax[1])
#print "--- py ---"
vmax_,imax_,jmax_ = JE.maxElem(A_)
JE.rotate (A_,V_,imax_,jmax_)
JE.updateMaxs(A_, imax_,jmax_,mjs_,mvs_)
vmax_,imax_,jmax_ = JE.maxElem(A_)
#print "imax,jmax",(imax_,jmax_)
#print "--- cpp ---"
#ijmax[0]=imax_; ijmax[1]=jmax_ # to check just rotation - works fine
vmax=LA.lib.eig_Jacobi_step(n,A,V,mjs,ijmax,vmax)
#print "mjs ", mjs
#print "mjs_ ", mjs_
print "cpp ", (ijmax[0],ijmax[1]), mjs , vmax
print "py ", (imax_,jmax_) , mjs_, vmax_
if not np.array_equal(mjs, mjs_):
print "ERROR : mjs not equal "
break;
if (vmax<tol):
print "converged by %i Jacobi rotations" %itr
break;
#print ijmax,vmax,vmax
'''
plt.figure(figsize=(15,5))
plt.subplot(1,3,1); plt.imshow( np.log10(np.abs(A )+tol), interpolation="nearest", cmap='gray');
plt.subplot(1,3,2); plt.imshow( np.log10(np.abs(A_ )+tol), interpolation="nearest", cmap='gray');
plt.subplot(1,3,3); plt.imshow( np.log10(np.abs(A-A_)+tol), interpolation="nearest", cmap='gray');
plt.show()
| 29.852273
| 98
| 0.589265
|
075a75aa3f1ea204b4e09ca448e53eb0feb437bd
| 3,478
|
py
|
Python
|
deprecated/download_apks_from_device.py
|
markraemer/mH-PriSe
|
1555c4c7e6457c3bb5bce3b6dae9c514ed7fc3d3
|
[
"MIT"
] | 1
|
2016-12-02T06:50:44.000Z
|
2016-12-02T06:50:44.000Z
|
deprecated/download_apks_from_device.py
|
markraemer/mH-PriSe
|
1555c4c7e6457c3bb5bce3b6dae9c514ed7fc3d3
|
[
"MIT"
] | null | null | null |
deprecated/download_apks_from_device.py
|
markraemer/mH-PriSe
|
1555c4c7e6457c3bb5bce3b6dae9c514ed7fc3d3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# KK, October 2014
# Update: Dec, download selected apps
# The script loads all android apk files from a connected smart phone
# to the current directory and creates a file with the package name
# and title of the app
# Output will be written in file and directory with device id
# Prerequisutes: adb + aapt + access to apks over debugging bridge
# TBD Check if files are already existing prior to download
import os
import commands
import re
# Settings
GSA = 1 # Get only selected apks, must give apk names in file
app_list = "/home/labits/svn/kk/medical/google-apps.txt"
GAA = 0 # Get all apks from device
NGP = 1 # set = 1 for no download of appe with google or system in path
NGA = 1 # set = 1 for no download of google apps
# Get device number
command = "adb devices"
status, output = commands.getstatusoutput(command)
if status: # TBD add offline
print "Something wrong. Is a device attached?"
exit()
else:
devid = output[26:42] # get device ID
devid.strip("\n")
print "Device " + devid + " is attached. Staring processing."
f_packages = "packages-" + devid
f_path = "paths-" + devid
f_out = "out-" + devid
os.system("mkdir " + devid)
os.system("rm " + f_packages)
os.system("rm " + f_path)
counter = 1
# OPTION 1: GET all APKS from device
if GAA == 1:
os.system("touch " + f_packages)
os.system("adb shell pm list packages > " + f_packages)
f = open(f_packages, 'r')
print "Extracting package path for "
for line in f:
line = line.strip()
line = line.replace("package:", "")
if NGA and (line.find("com.google.")>-1 or line.find("com.android.")>-1):
print "Excluding ", line, "(NGA flag set)"
else:
print "Inlcuding " + line
command = "adb shell pm path " + line + " >> " + f_path
os.system(command)
counter += 1
f. close()
# OPTION 2: GET only selected apps
if GSA == 1:
f = open(app_list, 'r')
print "Extracting package path for "
counter = 1
for line in f:
if len(line)<4: continue
line = line.strip()
# line = line.replace("https://play.google.com/store/apps/details?id=", "")
if NGA and (line.find("com.google.")>-1 or line.find("com.android.")>-1):
print "Excluding ", line, "(NGA flag set)"
else:
print "Inlcuding " + line
command = "adb shell pm path " + line + " >> " + f_path
os.system(command)
counter += 1
f. close()
print "Will try do download ",counter, " APK files."
print "Downloading apk for "
f = open(f_path, 'r')
g = open(f_out, 'w')
for line in f:
line = line.strip()
line = line.replace("package:", "")
print "Treating: " + line
s = line.split("/")
print "Path information: " + str(s)
if NGP and ("android" in s or "system" in s): # no download for android or system apks
print "No download for android or system apks"
else:
print "Pulling " + line
command = "adb pull " + line + " " + devid
os.system(command)
command = "aapt dump badging " + s[-1] + " | grep application-label:"
out = commands.getstatusoutput(command)
# print "Result: "+ line + "\t" + s[-1] + "\t" + out[1]
g.write(line + "\t" + s[-1] + "\t" + out[1] + "\n")
g.close()
f.close()
| 31.333333
| 90
| 0.585106
|
614e0bfbe47d9662b6bbac7845e85053e390205c
| 3,112
|
py
|
Python
|
mayan/apps/document_indexing/tasks.py
|
Syunkolee9891/Mayan-EDMS
|
3759a9503a264a180b74cc8518388f15ca66ac1a
|
[
"Apache-2.0"
] | 1
|
2021-06-17T18:24:25.000Z
|
2021-06-17T18:24:25.000Z
|
mayan/apps/document_indexing/tasks.py
|
Syunkolee9891/Mayan-EDMS
|
3759a9503a264a180b74cc8518388f15ca66ac1a
|
[
"Apache-2.0"
] | 7
|
2020-06-06T00:01:04.000Z
|
2022-01-13T01:47:17.000Z
|
mayan/apps/document_indexing/tasks.py
|
Syunkolee9891/Mayan-EDMS
|
3759a9503a264a180b74cc8518388f15ca66ac1a
|
[
"Apache-2.0"
] | 1
|
2020-07-29T21:03:27.000Z
|
2020-07-29T21:03:27.000Z
|
from __future__ import unicode_literals
import logging
from django.apps import apps
from django.db import OperationalError
from mayan.apps.lock_manager.exceptions import LockError
from mayan.celery import app
from .literals import RETRY_DELAY
logger = logging.getLogger(__name__)
@app.task(bind=True, default_retry_delay=RETRY_DELAY, max_retries=None, ignore_result=True)
def task_delete_empty(self):
IndexInstanceNode = apps.get_model(
app_label='document_indexing', model_name='IndexInstanceNode'
)
try:
IndexInstanceNode.objects.delete_empty()
except LockError as exception:
raise self.retry(exc=exception)
@app.task(bind=True, default_retry_delay=RETRY_DELAY, max_retries=None, ignore_result=True)
def task_index_document(self, document_id):
Document = apps.get_model(
app_label='documents', model_name='Document'
)
Index = apps.get_model(
app_label='document_indexing', model_name='Index'
)
try:
document = Document.objects.get(pk=document_id)
except Document.DoesNotExist:
# Document was deleted before we could execute, abort about
# updating
pass
else:
try:
Index.objects.index_document(document=document)
except OperationalError as exception:
logger.warning(
'Operational error while trying to index document: '
'%s; %s', document, exception
)
raise self.retry(exc=exception)
except LockError as exception:
logger.warning(
'Unable to acquire lock for document %s; %s ',
document, exception
)
raise self.retry(exc=exception)
@app.task(bind=True, default_retry_delay=RETRY_DELAY, ignore_result=True)
def task_rebuild_index(self, index_id):
Index = apps.get_model(
app_label='document_indexing', model_name='Index'
)
try:
index = Index.objects.get(pk=index_id)
index.rebuild()
except LockError as exception:
# This index is being rebuilt by another task, retry later
raise self.retry(exc=exception)
@app.task(bind=True, default_retry_delay=RETRY_DELAY, max_retries=None, ignore_result=True)
def task_remove_document(self, document_id):
Document = apps.get_model(
app_label='documents', model_name='Document'
)
IndexInstanceNode = apps.get_model(
app_label='document_indexing', model_name='IndexInstanceNode'
)
try:
document = Document.objects.get(pk=document_id)
except Document.DoesNotExist:
# Document was deleted before we could execute
# Since it was automatically removed from the document M2M
# we just now delete the empty instance nodes
try:
IndexInstanceNode.objects.delete_empty()
except LockError as exception:
raise self.retry(exc=exception)
else:
try:
IndexInstanceNode.objects.remove_document(document=document)
except LockError as exception:
raise self.retry(exc=exception)
| 31.755102
| 91
| 0.679949
|
83f712e401e12d1367feb6ce27650cfd3d36574f
| 1,032
|
py
|
Python
|
core_get/package/reference/package_reference.py
|
core-get/core-get
|
8fb960e4e51d0d46b5e3b2f4832eb4a39e0e60f7
|
[
"MIT"
] | null | null | null |
core_get/package/reference/package_reference.py
|
core-get/core-get
|
8fb960e4e51d0d46b5e3b2f4832eb4a39e0e60f7
|
[
"MIT"
] | null | null | null |
core_get/package/reference/package_reference.py
|
core-get/core-get
|
8fb960e4e51d0d46b5e3b2f4832eb4a39e0e60f7
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from dataclasses import dataclass
from functools import total_ordering
from pathlib import PurePath
from core_get.package.manifest import Manifest
@dataclass
@total_ordering
class PackageReference:
manifest: Manifest
def __hash__(self):
return hash(self._cmp_key())
def __lt__(self, other):
if not isinstance(other, PackageReference):
return NotImplemented
return self._cmp_key() < other._cmp_key()
def __eq__(self, other):
if not isinstance(other, PackageReference):
return NotImplemented
return self._cmp_key() == other._cmp_key()
def _cmp_key(self):
return self.manifest.name, self.manifest.version
@dataclass(eq=False, order=False)
class LocalPackageReference(PackageReference):
path: PurePath
@dataclass(eq=False, order=False)
class RemotePackageReference(PackageReference):
url: str
@dataclass(eq=False, order=False)
class InstalledPackageReference(PackageReference):
pass
| 22.933333
| 56
| 0.73062
|
3490e7adfda905664a9a711afb33f3906899b02b
| 1,979
|
py
|
Python
|
reprounzip-vagrant/setup.py
|
arfon/reprozip
|
d64b8f42220d467a96805338d4c2579a20e0e1db
|
[
"BSD-3-Clause"
] | null | null | null |
reprounzip-vagrant/setup.py
|
arfon/reprozip
|
d64b8f42220d467a96805338d4c2579a20e0e1db
|
[
"BSD-3-Clause"
] | null | null | null |
reprounzip-vagrant/setup.py
|
arfon/reprozip
|
d64b8f42220d467a96805338d4c2579a20e0e1db
|
[
"BSD-3-Clause"
] | null | null | null |
import io
import os
from setuptools import setup
# pip workaround
os.chdir(os.path.abspath(os.path.dirname(__file__)))
# Need to specify encoding for PY3, which has the worst unicode handling ever
with io.open('README.rst', encoding='utf-8') as fp:
description = fp.read()
setup(name='reprounzip-vagrant',
version='1.1',
packages=['reprounzip', 'reprounzip.unpackers',
'reprounzip.unpackers.vagrant'],
entry_points={
'reprounzip.unpackers': [
'vagrant = reprounzip.unpackers.vagrant:setup']},
namespace_packages=['reprounzip', 'reprounzip.unpackers'],
install_requires=[
'reprounzip>=1.1',
'rpaths>=0.8',
'paramiko'],
description="Allows the ReproZip unpacker to create virtual machines",
author="Remi Rampin, Fernando Chirigati, Dennis Shasha, Juliana Freire",
author_email='dev@reprozip.org',
maintainer="Remi Rampin",
maintainer_email='remi@rampin.org',
url='https://www.reprozip.org/',
project_urls={
'Homepage': 'https://github.com/ViDA-NYU/reprozip',
'Documentation': 'https://docs.reprozip.org/',
'Examples': 'https://examples.reprozip.org/',
'Say Thanks': 'https://saythanks.io/to/remram44',
'Source': 'https://github.com/ViDA-NYU/reprozip',
'Tracker': 'https://github.com/ViDA-NYU/reprozip/issues',
},
long_description=description,
license='BSD-3-Clause',
keywords=['reprozip', 'reprounzip', 'reproducibility', 'provenance',
'vida', 'nyu', 'vagrant'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering',
'Topic :: System :: Archiving'])
| 38.803922
| 78
| 0.615968
|
e478dedd23541c89560b7642c3b2ac874b389386
| 11,912
|
py
|
Python
|
source/conf.py
|
MohsenQazi/ood-documentation
|
9323a7d0aa6d935528e10c19692b2d4efcc93a7f
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
source/conf.py
|
MohsenQazi/ood-documentation
|
9323a7d0aa6d935528e10c19692b2d4efcc93a7f
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
source/conf.py
|
MohsenQazi/ood-documentation
|
9323a7d0aa6d935528e10c19692b2d4efcc93a7f
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Open OnDemand documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 10 17:38:37 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os
import datetime
year = str(datetime.datetime.now().year)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.githubpages',
'sphinxcontrib.httpdomain',
'sphinxcontrib.plantuml',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Open OnDemand'
#copyright = u'2017-2019, Ohio Supercomputer Center'
copyright = u'2017-' + year + ', Ohio Supercomputer Center'
author = u'Ohio Supercomputer Center'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'2.0'
# The full version, including alpha/beta/rc tags.
release = u'2.0.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
'logo_only': True
}
# Context sets variables is used to render the templates
# See sphinx_rtd_theme/breadcrumbs.html
#
# thank you http://docs.readthedocs.io/en/latest/vcs.html#github
html_context = {
'display_github': True,
'github_user': 'OSC',
'github_repo': 'ood-documentation',
'github_version': os.environ.get('TRAVIS_BRANCH', 'develop'),
'conf_py_path': '/source/',
'versions' : [
['latest', '/latest/'],
['1.8', '/release-1.8/'],
['1.7', '/release-1.7/'],
['1.6', '/release-1.6/'],
['1.5', '/release-1.5/'],
['1.4', '/release-1.4/'],
['1.3', '/release-1.3/'],
['1.2', '/release-1.2/'],
['1.1', '/release-1.1/'],
['1.0', '/release-1.0/'],
['develop', '/develop/']
]
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Open OnDemand v0.0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
html_logo = '_static/logo.svg'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenOnDemanddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'OpenOnDemand.tex', u'Open OnDemand Documentation',
u'Ohio Supercomputer Center', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'openondemand', u'Open OnDemand Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'OpenOnDemand', u'Open OnDemand Documentation',
author, 'OpenOnDemand', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# If true, figures, tables and code-blocks are automatically numbered if they
# have a caption. At same time, the numref role is enabled.
numfig = True
# -- Custom configuration -------------------------------------------------
# Ignore anchors when checking links (fixes the dynamic anchors in GitHub)
linkcheck_anchors = False
from jinja2 import Environment, StrictUndefined
# Use Jinja to parse RST files first
def rstjinja(app, docname, source):
"""
Render our pages as a jinja template for fancy templating goodness.
"""
jinja_env = Environment(undefined=StrictUndefined)
src = source[0]
rendered = jinja_env.from_string(src).render(doc_context)
source[0] = rendered
def setup(app):
app.connect('source-read', rstjinja)
app.add_stylesheet('css/custom.css')
# Context used for jinja template
doc_context = {
'ondemand_version': '2.0',
}
| 28.982968
| 80
| 0.690984
|
71ef1d69dee1b885e5d1316c93cffb2aa659908e
| 1,134
|
py
|
Python
|
federated.py
|
eriksore/sdn
|
16eaa6a28bcbf957974e8339ea70724e604f5da9
|
[
"MIT"
] | null | null | null |
federated.py
|
eriksore/sdn
|
16eaa6a28bcbf957974e8339ea70724e604f5da9
|
[
"MIT"
] | null | null | null |
federated.py
|
eriksore/sdn
|
16eaa6a28bcbf957974e8339ea70724e604f5da9
|
[
"MIT"
] | null | null | null |
from mininet.net import Mininet
from mininet.node import UserSwitch, OVSKernelSwitch
from mininet.topo import Topo
from mininet.log import lg
from mininet.util import irange
import sys
flush = sys.stdout.flush
class FederatedNet( Topo ):
"Topology for a federated network."
def __init__( self, N, **params ):
Topo.__init__( self, **params )
hosts1 = [ self.addHost( 'h%d' % n )
for n in irange( 1, 2 ) ]
hosts2 = [ self.addHost( 'h%d' % n )
for n in irange( 3, 4 ) ]
hosts3 = [ self.addHost( 'h%d' % n )
for n in irange( 5, 6 ) ]
hosts4 = [ self.addHost( 'h%d' % n )
for n in irange( 7, 8 ) ]
switches = [ self.addSwitch( 's%s' % s )
for s in irange( 1, 6 ) ]
for h in hosts1:
self.addLink( s2, h )
for h in hosts2:
self.addLink( s3, h )
for h in hosts3:
self.addLink( s5, h )
for h in hosts4:
net.addLink( s6, h )
self.addLink( s1, s2 )
self.addLink( s1, s4 )
self.addLink( s1, s3 )
self.addLink( s4, s5 )
self.addLink( s4, s6 )
if __name__ == '__main__':
lg.setLogLevel( 'info' )
hostCount = 8
FederatedNet( hostCount )
| 23.625
| 52
| 0.604938
|
731dbb7b3c314a99569a856b588bcc7fc0643f86
| 268
|
py
|
Python
|
tests/test_constant_parameter.py
|
tonybaloney/inline
|
e8e523e7f3f934c8939104208d53f3526b08c421
|
[
"MIT"
] | 6
|
2021-12-20T08:53:02.000Z
|
2021-12-23T14:23:38.000Z
|
tests/test_constant_parameter.py
|
tonybaloney/inline
|
e8e523e7f3f934c8939104208d53f3526b08c421
|
[
"MIT"
] | null | null | null |
tests/test_constant_parameter.py
|
tonybaloney/inline
|
e8e523e7f3f934c8939104208d53f3526b08c421
|
[
"MIT"
] | null | null | null |
from pyinline import inline
import logging
log = logging.getLogger(__name__)
@inline
def log_error(msg: str, exception: Exception):
log.error(msg, exception, exc_info=True)
try:
x = 1 / 0
except Exception as e:
log_error("Could not divide number", e)
| 16.75
| 46
| 0.716418
|
9e2a64645977d6b7c981b117ee8e3e68093f9ffc
| 714
|
py
|
Python
|
sayn/logging/console_logger.py
|
robin-173/sayn
|
d1cf36b92fad6a1798b57ad80abb22e8386e0e86
|
[
"Apache-2.0"
] | 105
|
2020-04-23T17:04:34.000Z
|
2022-03-18T15:47:52.000Z
|
sayn/logging/console_logger.py
|
robin-173/sayn
|
d1cf36b92fad6a1798b57ad80abb22e8386e0e86
|
[
"Apache-2.0"
] | 53
|
2020-06-12T14:41:12.000Z
|
2022-01-24T13:04:58.000Z
|
sayn/logging/console_logger.py
|
robin-173/sayn
|
d1cf36b92fad6a1798b57ad80abb22e8386e0e86
|
[
"Apache-2.0"
] | 9
|
2020-04-23T16:56:23.000Z
|
2021-08-16T10:54:48.000Z
|
from .logger import Logger
from .log_formatter import LogFormatter
class ConsoleLogger(Logger):
fmt = LogFormatter(use_colour=True, output_ts=True)
is_debug = True
def __init__(self, debug=True):
self.is_debug = debug
def print(self, s=None):
if s is None:
print()
else:
prefix = " " * self.current_indent
s = s["message"]
if isinstance(s, str):
s = [s]
elif not isinstance(s, list):
raise ValueError("error in logging print")
print(f"{prefix}{s[0]}")
for e in s[1:]:
for l in e.split("\n"):
print(f"{prefix} {l}")
| 26.444444
| 58
| 0.512605
|
5fcc7b9f4177f7127ab1bf25a4285ea6bed90038
| 6,873
|
py
|
Python
|
python/smqtk/utils/bit_utils.py
|
jbeezley/SMQTK
|
e6b00f94be95f39bbca52a7983ac3d6d1f86f847
|
[
"BSD-3-Clause"
] | 1
|
2021-04-10T10:51:26.000Z
|
2021-04-10T10:51:26.000Z
|
python/smqtk/utils/bit_utils.py
|
jbeezley/SMQTK
|
e6b00f94be95f39bbca52a7983ac3d6d1f86f847
|
[
"BSD-3-Clause"
] | 3
|
2021-06-08T22:19:14.000Z
|
2022-03-12T00:46:44.000Z
|
python/smqtk/utils/bit_utils.py
|
DigitalCompanion/SMQTK
|
fc9404b69150ef44f24423844bc80735c0c2b669
|
[
"BSD-3-Clause"
] | null | null | null |
import math
import numpy
# noinspection PyUnresolvedReferences
from six.moves import range
from . import ncr
try:
# noinspection PyUnresolvedReferences
from numba import jit
except (ImportError, TypeError):
# Create passthrough function if numba is not installed.
def jit(func_or_sig):
import types
if isinstance(func_or_sig, (types.FunctionType, types.MethodType)):
return func_or_sig
else:
return lambda *args, **kwds: func_or_sig
def next_perm(v):
"""
Compute the lexicographically next bit permutation
Generates next permutation with a given amount of set bits,
given the previous lexicographical value.
Taken from http://graphics.stanford.edu/~seander/bithacks.html
"""
t = (v | (v - 1)) + 1
w = t | ((((t & -t) / (v & -v)) >> 1) - 1)
return w
def iter_perms(l, n):
"""
Return an iterator over bit combinations of length ``l`` with ``n`` set
bits.
:raises StopIteration: If ``n`` <= 0 or normal completion.
:param l: Total bit length to work with. The ``n`` in nCr problem.
:type l: int
:param n: Number of bits to be set in permutations. The ``r`` in nCr
problem.
:type n: int
:return: List of bit vector permutations of the value ``(1<<n)-1`` over
``l`` bits.
:rtype: list[int]
"""
if n <= 0:
raise StopIteration()
n = min(l, n)
s = (1 << n) - 1
yield s
for _ in range(ncr(l, n) - 1):
s = next_perm(s)
yield s
def neighbor_codes(b, c, d):
"""
Iterate through integers of bit length ``b``, where ``b`` is the number
of bits, that are ``d`` hamming distance away from query code ``c``.
This will yield a number of elements equal to ``nCr(b, d)``.
We expect ``d`` to be the integer hamming distance,
e.g. h(001101, 100101) == 2, not 0.333.
:param b: integer bit length
:param b: int
:param c: Query small-code integer
:type c: int
:param d: Integer hamming distance
:type d: int
"""
if not d:
yield c
else:
for fltr in iter_perms(b, d):
yield c ^ fltr
@jit
def bit_vector_to_int(v):
"""
Transform a numpy vector representing a sequence of binary bits [0 | >0]
into an integer representation.
This version handles vectors of up to 64bits in size.
:param v: 1D Vector of bits
:type v: numpy.ndarray
:return: Integer equivalent
:rtype: int
"""
c = 0
for b in v:
c = (c << 1) + int(b)
return c
def bit_vector_to_int_large(v):
"""
Transform a numpy vector representing a sequence of binary bits [0 | >0]
into an integer representation.
This function is the special form that can handle very large integers
(>64bit).
:param v: 1D Vector of bits
:type v: numpy.ndarray
:return: Integer equivalent
:rtype: int
"""
c = 0
for b in v:
c = (c << 1) + int(b)
return c
@jit
def int_to_bit_vector(integer, bits=0):
"""
Transform integer into a bit vector, optionally of a specific length.
This version handles vectors of up to 64bits in size.
:raises ValueError: If ``bits`` specified is smaller than the required bits
to represent the given ``integer`` value.
:param integer: integer to convert
:type integer: int
:param bits: Optional fixed number of bits that should be represented by the
vector.
:type bits: Optional specification of the size of returned vector.
:return: Bit vector as numpy array (big endian).
:rtype: numpy.ndarray[bool]
"""
# Can't use math version because floating-point precision runs out after
# about 2^48
# -2 to remove length of '0b' string prefix
size = len(bin(integer)) - 2
if bits and (bits - size) < 0:
raise ValueError("%d bits too small to represent integer value %d."
% (bits, integer))
# Converting integer to array
v = numpy.zeros(bits or size, numpy.bool_)
for i in range(0, size):
v[-(i+1)] = integer & 1
integer >>= 1
return v
def int_to_bit_vector_large(integer, bits=0):
"""
Transform integer into a bit vector, optionally of a specific length.
This function is the special form that can handle very large integers
(>64bit).
:raises ValueError: If ``bits`` specified is smaller than the required bits
to represent the given ``integer`` value.
:param integer: integer to convert
:type integer: int
:param bits: Optional fixed number of bits that should be represented by the
vector.
:type bits: Optional specification of the size of returned vector.
:return: Bit vector as numpy array (big endian).
:rtype: numpy.ndarray[bool]
"""
# Can't use math version because floating-point precision runs out after
# about 2^48
# -2 to remove length of '0b' string prefix
size = len(bin(integer)) - 2
if bits and (bits - size) < 0:
raise ValueError("%d bits too small to represent integer value %d."
% (bits, integer))
# Converting integer to array
v = numpy.zeros(bits or size, numpy.bool_)
for i in range(0, size):
v[-(i+1)] = integer & 1
integer >>= 1
return v
def popcount(v):
"""
Count the number of bits set (number of 1-bits, not 0-bits).
Pure python popcount algorithm adapted implementation at
https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel.
Maximum known stable value that can be passed through this method:
2**256 - 2. See the ``popcount.v_max`` function property.
:param v: Integer to count the set bits of. Must be a 32-bit integer or
less.
:type v: int
:return: Number of set bits in the given integer ``v``.
:rtype: int
"""
# TODO: C implementation of this
# since this version, being in python, isn't faster than counting 1's
# in result of ``bin`` function.
# Cannot take the log of 0.
if not v:
return 0
# T is the number of bits used to represent v to the nearest power of 2
ceil, log = math.ceil, math.log
tp = max(8, int(2**ceil(log(v.bit_length()) / log(2))))
t = 2**tp-1
b = tp // 8
# bit-length constrained
h55 = t//3
h33 = t//15*3
h0f = t//255*15
h01 = t//255
# noinspection PyAugmentAssignment
v = v - ((v >> 1) & h55)
v = (v & h33) + ((v >> 2) & h33)
v = (v + (v >> 4)) & h0f
# Need the extra ``& t`` after the multiplication in order to simulate bit
# truncation as if v were only a tp-bit integer
# Magic 8 represents bits ina byte
return ((v * h01) & t) >> ((b-1) * 8)
# Maximum known stable value that can be passed as ``v``.
popcount.v_max = (2**256) - 2
| 26.034091
| 80
| 0.615597
|
e839c418a4dd44b4aa243a4ac8f5efaaf7565d5e
| 6,095
|
py
|
Python
|
dace/transformation/dataflow/mpi.py
|
meshtag/dace
|
e6751ee6a4f6356b47b93065d43cefb3fd54ebaa
|
[
"BSD-3-Clause"
] | 1
|
2022-03-11T13:36:34.000Z
|
2022-03-11T13:36:34.000Z
|
dace/transformation/dataflow/mpi.py
|
meshtag/dace
|
e6751ee6a4f6356b47b93065d43cefb3fd54ebaa
|
[
"BSD-3-Clause"
] | null | null | null |
dace/transformation/dataflow/mpi.py
|
meshtag/dace
|
e6751ee6a4f6356b47b93065d43cefb3fd54ebaa
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
""" Contains the MPITransformMap transformation. """
from dace import dtypes
from dace.sdfg import has_dynamic_map_inputs
from dace.sdfg import utils as sdutil
from dace.sdfg import nodes
from dace.sdfg.sdfg import SDFG
from dace.sdfg.state import SDFGState
from dace.transformation import transformation
from dace.properties import make_properties
@make_properties
class MPITransformMap(transformation.SingleStateTransformation):
""" Implements the MPI parallelization pattern.
Takes a map and makes it an MPI-scheduled map, introduces transients
that keep locally accessed data.
Original SDFG
=============
```
Input1 - Output1
\ /
Input2 --- MapEntry -- Arbitrary R -- MapExit -- Output2
/ \
InputN - OutputN
```
Nothing in R may access other inputs/outputs that are not defined in R
itself and do not go through MapEntry/MapExit
Map must be a one-dimensional map for now.
The range of the map must be a Range object.
Output:
=======
* Add transients for the accessed parts
* The schedule property of Map is set to MPI
* The range of Map is changed to
var = startexpr + p * chunksize ... startexpr + p + 1 * chunksize
where p is the current rank and P is the total number of ranks,
and chunksize is defined as (endexpr - startexpr) / P, adding the
remaining K iterations to the first K procs.
* For each input InputI, create a new transient transInputI, which
has an attribute that specifies that it needs to be filled with
(possibly) remote data
* Collect all accesses to InputI within R, assume their convex hull is
InputI[rs ... re]
* The transInputI transient will contain InputI[rs ... re]
* Change all accesses to InputI within R to accesses to transInputI
"""
map_entry = transformation.PatternNode(nodes.MapEntry)
@staticmethod
def annotates_memlets():
return True
@classmethod
def expressions(cls):
return [sdutil.node_path_graph(cls.map_entry)]
def can_be_applied(self, graph, expr_index, sdfg, permissive=False):
map_entry = self.map_entry
# Check if the map is one-dimensional
if map_entry.map.range.dims() != 1:
return False
# We cannot transform a map which is already of schedule type MPI
if map_entry.map.schedule == dtypes.ScheduleType.MPI:
return False
# We cannot transform a map which is already inside a MPI map, or in
# another device
schedule_whitelist = [dtypes.ScheduleType.Default, dtypes.ScheduleType.Sequential]
sdict = graph.scope_dict()
parent = sdict[map_entry]
while parent is not None:
if parent.map.schedule not in schedule_whitelist:
return False
parent = sdict[parent]
# Dynamic map ranges not supported (will allocate dynamic memory)
if has_dynamic_map_inputs(graph, map_entry):
return False
# MPI schedules currently do not support WCR
map_exit = graph.exit_node(map_entry)
if any(e.data.wcr for e in graph.out_edges(map_exit)):
return False
return True
def apply(self, graph: SDFGState, sdfg: SDFG):
map_entry = self.map_entry
# Avoiding import loops
from dace.transformation.dataflow.strip_mining import StripMining
from dace.transformation.dataflow.local_storage import InLocalStorage, OutLocalStorage, LocalStorage
rangeexpr = str(map_entry.map.range.num_elements())
stripmine_subgraph = {StripMining.map_entry: self.subgraph[MPITransformMap.map_entry]}
sdfg_id = sdfg.sdfg_id
stripmine = StripMining(sdfg, sdfg_id, self.state_id, stripmine_subgraph, self.expr_index)
stripmine.dim_idx = -1
stripmine.new_dim_prefix = "mpi"
stripmine.tile_size = "(" + rangeexpr + "/__dace_comm_size)"
stripmine.divides_evenly = True
stripmine.apply(graph, sdfg)
# Find all in-edges that lead to the map entry
outer_map = None
edges = [e for e in graph.in_edges(map_entry) if isinstance(e.src, nodes.EntryNode)]
outer_map = edges[0].src
# Add MPI schedule attribute to outer map
outer_map.map._schedule = dtypes.ScheduleType.MPI
# Now create a transient for each array
for e in edges:
if e.data.is_empty():
continue
in_local_storage_subgraph = {
LocalStorage.node_a: graph.node_id(outer_map),
LocalStorage.node_b: self.subgraph[MPITransformMap.map_entry]
}
sdfg_id = sdfg.sdfg_id
in_local_storage = InLocalStorage(sdfg, sdfg_id, self.state_id, in_local_storage_subgraph, self.expr_index)
in_local_storage.array = e.data.data
in_local_storage.apply(graph, sdfg)
# Transform OutLocalStorage for each output of the MPI map
in_map_exit = graph.exit_node(map_entry)
out_map_exit = graph.exit_node(outer_map)
for e in graph.out_edges(out_map_exit):
if e.data.is_empty():
continue
name = e.data.data
outlocalstorage_subgraph = {
LocalStorage.node_a: graph.node_id(in_map_exit),
LocalStorage.node_b: graph.node_id(out_map_exit)
}
sdfg_id = sdfg.sdfg_id
outlocalstorage = OutLocalStorage(sdfg, sdfg_id, self.state_id, outlocalstorage_subgraph, self.expr_index)
outlocalstorage.array = name
outlocalstorage.apply(graph, sdfg)
| 39.836601
| 119
| 0.627728
|
2134d66b35eff7e0a2eba6afb3681ebca6acb992
| 1,472
|
py
|
Python
|
samples/model-builder/import_data_text_sentiment_analysis_sample_test.py
|
nachocano/python-aiplatform
|
1c6b998d9145309d79712f494a2b00b50a9a9bf4
|
[
"Apache-2.0"
] | null | null | null |
samples/model-builder/import_data_text_sentiment_analysis_sample_test.py
|
nachocano/python-aiplatform
|
1c6b998d9145309d79712f494a2b00b50a9a9bf4
|
[
"Apache-2.0"
] | null | null | null |
samples/model-builder/import_data_text_sentiment_analysis_sample_test.py
|
nachocano/python-aiplatform
|
1c6b998d9145309d79712f494a2b00b50a9a9bf4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.cloud.aiplatform import schema
import import_data_text_sentiment_analysis_sample
import test_constants as constants
def test_import_data_text_sentiment_analysis_sample(
mock_sdk_init, mock_get_text_dataset, mock_import_text_dataset
):
import_data_text_sentiment_analysis_sample.import_data_text_sentiment_analysis_sample(
project=constants.PROJECT,
location=constants.LOCATION,
dataset=constants.DATASET_NAME,
src_uris=constants.GCS_SOURCES,
)
mock_sdk_init.assert_called_once_with(
project=constants.PROJECT, location=constants.LOCATION
)
mock_get_text_dataset.assert_called_once_with(
constants.DATASET_NAME,
)
mock_import_text_dataset.assert_called_once_with(
gcs_source=constants.GCS_SOURCES,
import_schema_uri=schema.dataset.ioformat.text.sentiment,
sync=True,
)
| 32
| 90
| 0.771739
|
06bcd87416b1eaf161a42992ee2c2a000a92347a
| 7,143
|
py
|
Python
|
netbox/ipam/migrations/0002_squashed_0046.py
|
TheFlyingCorpse/netbox
|
a226f06b1beb575011d783b202d76cb74d3b1f79
|
[
"Apache-2.0"
] | 4,994
|
2019-07-01T13:15:44.000Z
|
2022-03-31T19:55:45.000Z
|
netbox/ipam/migrations/0002_squashed_0046.py
|
TheFlyingCorpse/netbox
|
a226f06b1beb575011d783b202d76cb74d3b1f79
|
[
"Apache-2.0"
] | 4,045
|
2019-07-01T14:24:09.000Z
|
2022-03-31T16:07:39.000Z
|
netbox/ipam/migrations/0002_squashed_0046.py
|
TheFlyingCorpse/netbox
|
a226f06b1beb575011d783b202d76cb74d3b1f79
|
[
"Apache-2.0"
] | 1,225
|
2019-07-01T15:34:03.000Z
|
2022-03-31T16:47:09.000Z
|
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('dcim', '0003_auto_20160628_1721'),
('virtualization', '0001_virtualization'),
('contenttypes', '0002_remove_content_type_name'),
('ipam', '0001_initial'),
('extras', '0002_custom_fields'),
('tenancy', '0001_initial'),
]
replaces = [
('ipam', '0002_vrf_add_enforce_unique'),
('ipam', '0003_ipam_add_vlangroups'),
('ipam', '0004_ipam_vlangroup_uniqueness'),
('ipam', '0005_auto_20160725_1842'),
('ipam', '0006_vrf_vlan_add_tenant'),
('ipam', '0007_prefix_ipaddress_add_tenant'),
('ipam', '0008_prefix_change_order'),
('ipam', '0009_ipaddress_add_status'),
('ipam', '0010_ipaddress_help_texts'),
('ipam', '0011_rir_add_is_private'),
('ipam', '0012_services'),
('ipam', '0013_prefix_add_is_pool'),
('ipam', '0014_ipaddress_status_add_deprecated'),
('ipam', '0015_global_vlans'),
('ipam', '0016_unicode_literals'),
('ipam', '0017_ipaddress_roles'),
('ipam', '0018_remove_service_uniqueness_constraint'),
('ipam', '0019_virtualization'),
('ipam', '0020_ipaddress_add_role_carp'),
('ipam', '0021_vrf_ordering'),
('ipam', '0022_tags'),
('ipam', '0023_change_logging'),
('ipam', '0024_vrf_allow_null_rd'),
('ipam', '0025_custom_tag_models'),
('ipam', '0026_prefix_ordering_vrf_nulls_first'),
('ipam', '0027_ipaddress_add_dns_name'),
('ipam', '0028_3569_prefix_fields'),
('ipam', '0029_3569_ipaddress_fields'),
('ipam', '0030_3569_vlan_fields'),
('ipam', '0031_3569_service_fields'),
('ipam', '0032_role_description'),
('ipam', '0033_deterministic_ordering'),
('ipam', '0034_fix_ipaddress_status_dhcp'),
('ipam', '0035_drop_ip_family'),
('ipam', '0036_standardize_description'),
('ipam', '0037_ipaddress_assignment'),
('ipam', '0038_custom_field_data'),
('ipam', '0039_service_ports_array'),
('ipam', '0040_service_drop_port'),
('ipam', '0041_routetarget'),
('ipam', '0042_standardize_name_length'),
('ipam', '0043_add_tenancy_to_aggregates'),
('ipam', '0044_standardize_models'),
('ipam', '0045_vlangroup_scope'),
('ipam', '0046_set_vlangroup_scope_types'),
]
operations = [
migrations.AddField(
model_name='service',
name='virtual_machine',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='services', to='virtualization.virtualmachine'),
),
migrations.AddField(
model_name='routetarget',
name='tags',
field=taggit.managers.TaggableManager(through='extras.TaggedItem', to='extras.Tag'),
),
migrations.AddField(
model_name='routetarget',
name='tenant',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='route_targets', to='tenancy.tenant'),
),
migrations.AddField(
model_name='prefix',
name='role',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='prefixes', to='ipam.role'),
),
migrations.AddField(
model_name='prefix',
name='site',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='prefixes', to='dcim.site'),
),
migrations.AddField(
model_name='prefix',
name='tags',
field=taggit.managers.TaggableManager(through='extras.TaggedItem', to='extras.Tag'),
),
migrations.AddField(
model_name='prefix',
name='tenant',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='prefixes', to='tenancy.tenant'),
),
migrations.AddField(
model_name='prefix',
name='vlan',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='prefixes', to='ipam.vlan'),
),
migrations.AddField(
model_name='prefix',
name='vrf',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='prefixes', to='ipam.vrf'),
),
migrations.AddField(
model_name='ipaddress',
name='assigned_object_type',
field=models.ForeignKey(blank=True, limit_choices_to=models.Q(models.Q(models.Q(('app_label', 'dcim'), ('model', 'interface')), models.Q(('app_label', 'virtualization'), ('model', 'vminterface')), _connector='OR')), null=True, on_delete=django.db.models.deletion.PROTECT, related_name='+', to='contenttypes.contenttype'),
),
migrations.AddField(
model_name='ipaddress',
name='nat_inside',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='nat_outside', to='ipam.ipaddress'),
),
migrations.AddField(
model_name='ipaddress',
name='tags',
field=taggit.managers.TaggableManager(through='extras.TaggedItem', to='extras.Tag'),
),
migrations.AddField(
model_name='ipaddress',
name='tenant',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='ip_addresses', to='tenancy.tenant'),
),
migrations.AddField(
model_name='ipaddress',
name='vrf',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='ip_addresses', to='ipam.vrf'),
),
migrations.AddField(
model_name='aggregate',
name='rir',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='aggregates', to='ipam.rir'),
),
migrations.AddField(
model_name='aggregate',
name='tags',
field=taggit.managers.TaggableManager(through='extras.TaggedItem', to='extras.Tag'),
),
migrations.AddField(
model_name='aggregate',
name='tenant',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='aggregates', to='tenancy.tenant'),
),
migrations.AlterUniqueTogether(
name='vlangroup',
unique_together={('scope_type', 'scope_id', 'name'), ('scope_type', 'scope_id', 'slug')},
),
migrations.AlterUniqueTogether(
name='vlan',
unique_together={('group', 'vid'), ('group', 'name')},
),
]
| 44.64375
| 333
| 0.609968
|
5abacb9cf100664d09c678f4c60ca7f588569f87
| 5,185
|
py
|
Python
|
scripts/clean_failed_dav_no_clobber_datasets.py
|
hysds/hysds
|
839d527114e115603ea0a2c4c1b7fe474f7b7b39
|
[
"Apache-2.0"
] | 17
|
2018-04-30T17:53:23.000Z
|
2021-11-10T18:24:24.000Z
|
scripts/clean_failed_dav_no_clobber_datasets.py
|
hysds/hysds
|
839d527114e115603ea0a2c4c1b7fe474f7b7b39
|
[
"Apache-2.0"
] | 54
|
2017-10-17T23:22:53.000Z
|
2022-02-09T22:05:07.000Z
|
scripts/clean_failed_dav_no_clobber_datasets.py
|
hysds/hysds
|
839d527114e115603ea0a2c4c1b7fe474f7b7b39
|
[
"Apache-2.0"
] | 9
|
2018-01-13T01:07:21.000Z
|
2021-02-25T21:21:43.000Z
|
#!/usr/bin/env python
"""
Search for failed jobs with osaka no-clobber errors during dataset publishing
and clean them out of WebDAV if the dataset was not indexed.
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import os
import sys
import re
import requests
import json
import logging
import argparse
import types
import traceback
from hysds.celery import app
log_format = (
"[%(asctime)s: %(levelname)s/clean_failed_dav_no_clobber_datasets] %(message)s"
)
logging.basicConfig(format=log_format, level=logging.INFO)
DAV_RE = re.compile(
r"Destination,\s+(davs?)://(.+?/.+/(.+))/.+?, already exists and no-clobber is set"
)
def check_dataset(es_url, id, es_index="grq"):
"""Query for dataset with specified input ID."""
query = {
"query": {
"bool": {
"must": [
{"term": {"_id": id}},
]
}
},
"fields": [],
}
if es_url.endswith("/"):
search_url = "%s%s/_search" % (es_url, es_index)
else:
search_url = "%s/%s/_search" % (es_url, es_index)
r = requests.post(search_url, data=json.dumps(query))
if r.status_code == 200:
result = r.json()
# logging.info("result: %s" % result)
total = result["hits"]["total"]
id = "NONE" if total == 0 else result["hits"]["hits"][0]["_id"]
else:
logging.error("Failed to query %s:\n%s" % (es_url, r.text))
logging.error("query: %s" % json.dumps(query, indent=2))
logging.error("returned: %s" % r.text)
if r.status_code == 404:
total, id = 0, "NONE"
else:
r.raise_for_status()
return total, id
def dataset_exists(es_url, id, es_index="grq"):
"""Return true if dataset id exists."""
total, id = check_dataset(es_url, id, es_index)
if total > 0:
return True
return False
def clean(jobs_es_url, grq_es_url, force=False):
"""Look for failed jobs with osaka no-clobber errors during dataset publishing
and clean them out if dataset was not indexed."""
# jobs query
jobs_query = {
"query": {
"bool": {
"must": [
{"term": {"status": "job-failed"}},
{
"term": {
"short_error.untouched": "Destination, davs://.....ber is set"
}
},
{
"query_string": {
"query": 'error:"already exists and no-clobber is set"',
"default_operator": "OR",
}
},
]
}
},
"partial_fields": {"_source": {"include": ["error"]}},
}
url_tmpl = "{}/job_status-current/_search?search_type=scan&scroll=10m&size=100"
r = requests.post(url_tmpl.format(jobs_es_url), data=json.dumps(jobs_query))
if r.status_code != 200:
logging.error(
"Failed to query ES. Got status code %d:\n%s"
% (r.status_code, json.dumps(jobs_query, indent=2))
)
r.raise_for_status()
scan_result = r.json()
count = scan_result["hits"]["total"]
scroll_id = scan_result["_scroll_id"]
# get list of results and sort by bucket
results = {}
while True:
r = requests.post("%s/_search/scroll?scroll=10m" % jobs_es_url, data=scroll_id)
res = r.json()
scroll_id = res["_scroll_id"]
if len(res["hits"]["hits"]) == 0:
break
for hit in res["hits"]["hits"]:
error = hit["fields"]["_source"][0]["error"]
# extract dav url and dataset id
match = DAV_RE.search(error)
if not match:
raise RuntimeError("Failed to find DAV url in error: {}".format(error))
proto, prefix, dataset_id = match.groups()
# query if dataset exists in GRQ; then no-clobber happened because of dataset deduplication
if dataset_exists(grq_es_url, dataset_id):
logging.warning(
"Found %s in %s. Not cleaning out from dav."
% (dataset_id, grq_es_url)
)
continue
# remove
ds_url = "%s://%s" % ("https" if proto == "davs" else "http", prefix)
try:
r = requests.delete(ds_url, verify=False)
r.raise_for_status()
except Exception as e:
logging.warning(
"Failed to delete %s: %s" % (ds_url, traceback.format_exc())
)
pass
if __name__ == "__main__":
jobs_es_url = app.conf["JOBS_ES_URL"]
grq_es_url = app.conf["GRQ_ES_URL"]
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-f", "--force", help="force deletion", action="store_true")
args = parser.parse_args()
clean(jobs_es_url, grq_es_url, args.force)
| 31.424242
| 103
| 0.551205
|
67fb58ad9d3b93a7a021063389d9c92117cf0c65
| 1,113
|
py
|
Python
|
tests/test_jinja_templates.py
|
alex-oleshkevich/mailers
|
36836ae750cd190f75c96c5a78896d4ef830260e
|
[
"MIT"
] | 17
|
2020-03-13T13:06:05.000Z
|
2022-03-06T00:17:45.000Z
|
tests/test_jinja_templates.py
|
alex-oleshkevich/mailers
|
36836ae750cd190f75c96c5a78896d4ef830260e
|
[
"MIT"
] | null | null | null |
tests/test_jinja_templates.py
|
alex-oleshkevich/mailers
|
36836ae750cd190f75c96c5a78896d4ef830260e
|
[
"MIT"
] | null | null | null |
import jinja2
import pathlib
from mailers import Email, InMemoryTransport, Mailer, TemplatedEmail
from mailers.plugins.jinja_renderer import JinjaRendererPlugin
THIS_DIR = pathlib.Path(__file__).parent
def test_renders_jinja_templates():
env = jinja2.Environment(loader=jinja2.FileSystemLoader([THIS_DIR / 'templates']))
mailbox = []
mailer = Mailer(InMemoryTransport(mailbox), plugins=[JinjaRendererPlugin(env)], from_address='root@localhost')
mailer.send_sync(TemplatedEmail(html_template='mail.html', text_template='mail.txt', context={'hello': 'world'}))
assert mailbox[0].get_payload()[0].get_content() == 'Text message: world.\n'
assert mailbox[0].get_payload()[1].get_content() == '<b>HTML message: world</b>\n'
def test_ignores_regular_mails():
env = jinja2.Environment(loader=jinja2.FileSystemLoader([THIS_DIR / 'templates']))
mailbox = []
mailer = Mailer(InMemoryTransport(mailbox), plugins=[JinjaRendererPlugin(env)], from_address='root@localhost')
mailer.send_sync(Email(text='Text message.'))
assert mailbox[0].get_content() == 'Text message.\n'
| 38.37931
| 117
| 0.743037
|
1c9aaf3243a9983ce4cd56f3a3076911351814c3
| 34,386
|
py
|
Python
|
discord/utils.py
|
ju/pycord
|
d754bb58f234a3c00b670aacb63d51698bf8f7e6
|
[
"MIT"
] | null | null | null |
discord/utils.py
|
ju/pycord
|
d754bb58f234a3c00b670aacb63d51698bf8f7e6
|
[
"MIT"
] | null | null | null |
discord/utils.py
|
ju/pycord
|
d754bb58f234a3c00b670aacb63d51698bf8f7e6
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Pycord Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import array
import asyncio
import collections.abc
import datetime
import functools
import itertools
import json
import re
import sys
import types
import unicodedata
import warnings
from base64 import b64encode
from bisect import bisect_left
from inspect import isawaitable as _isawaitable
from inspect import signature as _signature
from operator import attrgetter
from typing import (
TYPE_CHECKING,
Any,
AsyncIterator,
Awaitable,
Callable,
Coroutine,
Dict,
ForwardRef,
Generic,
Iterable,
Iterator,
List,
Literal,
Mapping,
NewType,
Optional,
Protocol,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from .errors import HTTPException, InvalidArgument
import orjson
__all__ = (
"oauth_url",
"snowflake_time",
"time_snowflake",
"find",
"get",
"sleep_until",
"utcnow",
"remove_markdown",
"escape_markdown",
"escape_mentions",
"as_chunks",
"format_dt",
"basic_autocomplete",
"generate_snowflake",
)
DISCORD_EPOCH = 1420070400000
class _MissingSentinel:
def __eq__(self, other):
return False
def __bool__(self):
return False
def __repr__(self):
return "..."
MISSING: Any = _MissingSentinel()
class _cached_property:
def __init__(self, function):
self.function = function
self.__doc__ = getattr(function, "__doc__")
def __get__(self, instance, owner):
if instance is None:
return self
value = self.function(instance)
setattr(instance, self.function.__name__, value)
return value
if TYPE_CHECKING:
from typing_extensions import ParamSpec
from .abc import Snowflake
from .commands.context import AutocompleteContext
from .invite import Invite
from .permissions import Permissions
from .template import Template
class _RequestLike(Protocol):
headers: Mapping[str, Any]
cached_property = NewType("cached_property", property)
P = ParamSpec("P")
else:
cached_property = _cached_property
AutocompleteContext = Any
T = TypeVar("T")
T_co = TypeVar("T_co", covariant=True)
_Iter = Union[Iterator[T], AsyncIterator[T]]
class CachedSlotProperty(Generic[T, T_co]):
def __init__(self, name: str, function: Callable[[T], T_co]) -> None:
self.name = name
self.function = function
self.__doc__ = getattr(function, "__doc__")
@overload
def __get__(self, instance: None, owner: Type[T]) -> CachedSlotProperty[T, T_co]:
...
@overload
def __get__(self, instance: T, owner: Type[T]) -> T_co:
...
def __get__(self, instance: Optional[T], owner: Type[T]) -> Any:
if instance is None:
return self
try:
return getattr(instance, self.name)
except AttributeError:
value = self.function(instance)
setattr(instance, self.name, value)
return value
class classproperty(Generic[T_co]):
def __init__(self, fget: Callable[[Any], T_co]) -> None:
self.fget = fget
def __get__(self, instance: Optional[Any], owner: Type[Any]) -> T_co:
return self.fget(owner)
def __set__(self, instance, value) -> None:
raise AttributeError("cannot set attribute")
def cached_slot_property(
name: str,
) -> Callable[[Callable[[T], T_co]], CachedSlotProperty[T, T_co]]:
def decorator(func: Callable[[T], T_co]) -> CachedSlotProperty[T, T_co]:
return CachedSlotProperty(name, func)
return decorator
class SequenceProxy(Generic[T_co], collections.abc.Sequence):
"""Read-only proxy of a Sequence."""
def __init__(self, proxied: Sequence[T_co]):
self.__proxied = proxied
def __getitem__(self, idx: int) -> T_co:
return self.__proxied[idx]
def __len__(self) -> int:
return len(self.__proxied)
def __contains__(self, item: Any) -> bool:
return item in self.__proxied
def __iter__(self) -> Iterator[T_co]:
return iter(self.__proxied)
def __reversed__(self) -> Iterator[T_co]:
return reversed(self.__proxied)
def index(self, value: Any, *args, **kwargs) -> int:
return self.__proxied.index(value, *args, **kwargs)
def count(self, value: Any) -> int:
return self.__proxied.count(value)
def delay_task(delay: float, func: Coroutine):
async def inner_call():
await asyncio.sleep(delay)
try:
await func
except HTTPException:
pass
asyncio.create_task(inner_call())
@overload
def parse_time(timestamp: None) -> None:
...
@overload
def parse_time(timestamp: str) -> datetime.datetime:
...
@overload
def parse_time(timestamp: Optional[str]) -> Optional[datetime.datetime]:
...
def parse_time(timestamp: Optional[str]) -> Optional[datetime.datetime]:
if timestamp:
return datetime.datetime.fromisoformat(timestamp)
return None
def copy_doc(original: Callable) -> Callable[[T], T]:
def decorator(overridden: T) -> T:
overridden.__doc__ = original.__doc__
overridden.__signature__ = _signature(original) # type: ignore
return overridden
return decorator
def deprecated(
instead: Optional[str] = None,
) -> Callable[[Callable[P, T]], Callable[P, T]]:
def actual_decorator(func: Callable[P, T]) -> Callable[P, T]:
@functools.wraps(func)
def decorated(*args: P.args, **kwargs: P.kwargs) -> T:
warnings.simplefilter("always", DeprecationWarning) # turn off filter
if instead:
fmt = "{0.__name__} is deprecated, use {1} instead."
else:
fmt = "{0.__name__} is deprecated."
warnings.warn(fmt.format(func, instead), stacklevel=3, category=DeprecationWarning)
warnings.simplefilter("default", DeprecationWarning) # reset filter
return func(*args, **kwargs)
return decorated
return actual_decorator
def oauth_url(
client_id: Union[int, str],
*,
permissions: Permissions = MISSING,
guild: Snowflake = MISSING,
redirect_uri: str = MISSING,
scopes: Iterable[str] = MISSING,
disable_guild_select: bool = False,
) -> str:
"""A helper function that returns the OAuth2 URL for inviting the bot
into guilds.
Parameters
-----------
client_id: Union[:class:`int`, :class:`str`]
The client ID for your bot.
permissions: :class:`~discord.Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
guild: :class:`~discord.abc.Snowflake`
The guild to pre-select in the authorization screen, if available.
redirect_uri: :class:`str`
An optional valid redirect URI.
scopes: Iterable[:class:`str`]
An optional valid list of scopes. Defaults to ``('bot',)``.
.. versionadded:: 1.7
disable_guild_select: :class:`bool`
Whether to disallow the user from changing the guild dropdown.
.. versionadded:: 2.0
Returns
--------
:class:`str`
The OAuth2 URL for inviting the bot into guilds.
"""
url = f"https://discord.com/oauth2/authorize?client_id={client_id}"
url += f"&scope={'+'.join(scopes or ('bot',))}"
if permissions is not MISSING:
url += f"&permissions={permissions.value}"
if guild is not MISSING:
url += f"&guild_id={guild.id}"
if redirect_uri is not MISSING:
from urllib.parse import urlencode
url += f"&response_type=code&{urlencode({'redirect_uri': redirect_uri})}"
if disable_guild_select:
url += "&disable_guild_select=true"
return url
def snowflake_time(id: int) -> datetime.datetime:
"""
Parameters
-----------
id: :class:`int`
The snowflake ID.
Returns
--------
:class:`datetime.datetime`
An aware datetime in UTC representing the creation time of the snowflake.
"""
timestamp = ((id >> 22) + DISCORD_EPOCH) / 1000
return datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc)
def time_snowflake(dt: datetime.datetime, high: bool = False) -> int:
"""Returns a numeric snowflake pretending to be created at the given date.
When using as the lower end of a range, use ``time_snowflake(high=False) - 1``
to be inclusive, ``high=True`` to be exclusive.
When using as the higher end of a range, use ``time_snowflake(high=True) + 1``
to be inclusive, ``high=False`` to be exclusive
Parameters
-----------
dt: :class:`datetime.datetime`
A datetime object to convert to a snowflake.
If naive, the timezone is assumed to be local time.
high: :class:`bool`
Whether or not to set the lower 22 bit to high or low.
Returns
--------
:class:`int`
The snowflake representing the time given.
"""
discord_millis = int(dt.timestamp() * 1000 - DISCORD_EPOCH)
return (discord_millis << 22) + (2**22 - 1 if high else 0)
def find(predicate: Callable[[T], Any], seq: Iterable[T]) -> Optional[T]:
"""A helper to return the first element found in the sequence
that meets the predicate. For example: ::
member = discord.utils.find(lambda m: m.name == 'Mighty', channel.guild.members)
would find the first :class:`~discord.Member` whose name is 'Mighty' and return it.
If an entry is not found, then ``None`` is returned.
This is different from :func:`py:filter` due to the fact it stops the moment it finds
a valid entry.
Parameters
-----------
predicate
A function that returns a boolean-like result.
seq: :class:`collections.abc.Iterable`
The iterable to search through.
"""
for element in seq:
if predicate(element):
return element
return None
def get(iterable: Iterable[T], **attrs: Any) -> Optional[T]:
r"""A helper that returns the first element in the iterable that meets
all the traits passed in ``attrs``. This is an alternative for
:func:`~discord.utils.find`.
When multiple attributes are specified, they are checked using
logical AND, not logical OR. Meaning they have to meet every
attribute passed in and not one of them.
To have a nested attribute search (i.e. search by ``x.y``) then
pass in ``x__y`` as the keyword argument.
If nothing is found that matches the attributes passed, then
``None`` is returned.
Examples
---------
Basic usage:
.. code-block:: python3
member = discord.utils.get(message.guild.members, name='Foo')
Multiple attribute matching:
.. code-block:: python3
channel = discord.utils.get(guild.voice_channels, name='Foo', bitrate=64000)
Nested attribute matching:
.. code-block:: python3
channel = discord.utils.get(client.get_all_channels(), guild__name='Cool', name='general')
Parameters
-----------
iterable
An iterable to search through.
\*\*attrs
Keyword arguments that denote attributes to search with.
"""
# global -> local
_all = all
attrget = attrgetter
# Special case the single element call
if len(attrs) == 1:
k, v = attrs.popitem()
pred = attrget(k.replace("__", "."))
for elem in iterable:
if pred(elem) == v:
return elem
return None
converted = [(attrget(attr.replace("__", ".")), value) for attr, value in attrs.items()]
for elem in iterable:
if _all(pred(elem) == value for pred, value in converted):
return elem
return None
async def get_or_fetch(obj, attr: str, id: int, *, default: Any = MISSING):
# TODO: Document this
getter = getattr(obj, f"get_{attr}")(id)
if getter is None:
try:
getter = await getattr(obj, f"fetch_{attr}")(id)
except AttributeError:
getter = await getattr(obj, f"_fetch_{attr}")(id)
if getter is None:
raise ValueError(f"Could not find {attr} with id {id} on {obj}")
except (HTTPException, ValueError):
if default is not MISSING:
return default
else:
raise
return getter
def _unique(iterable: Iterable[T]) -> List[T]:
return [x for x in dict.fromkeys(iterable)]
def _get_as_snowflake(data: Any, key: str) -> Optional[int]:
try:
value = data[key]
except KeyError:
return None
else:
return value and int(value)
def _get_mime_type_for_image(data: bytes):
if data.startswith(b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"):
return "image/png"
elif data[0:3] == b"\xff\xd8\xff" or data[6:10] in (b"JFIF", b"Exif"):
return "image/jpeg"
elif data.startswith((b"\x47\x49\x46\x38\x37\x61", b"\x47\x49\x46\x38\x39\x61")):
return "image/gif"
elif data.startswith(b"RIFF") and data[8:12] == b"WEBP":
return "image/webp"
else:
raise InvalidArgument("Unsupported image type given")
def _bytes_to_base64_data(data: bytes) -> str:
fmt = "data:{mime};base64,{data}"
mime = _get_mime_type_for_image(data)
b64 = b64encode(data).decode("ascii")
return fmt.format(mime=mime, data=b64)
def _to_json(obj: Any) -> str: # type: ignore
return orjson.dumps(obj).decode("utf-8")
_from_json = orjson.loads # type: ignore
def _parse_ratelimit_header(request: Any, *, use_clock: bool = False) -> float:
reset_after: Optional[str] = request.headers.get("X-Ratelimit-Reset-After")
if not use_clock and reset_after:
return float(reset_after)
utc = datetime.timezone.utc
now = datetime.datetime.now(utc)
reset = datetime.datetime.fromtimestamp(float(request.headers["X-Ratelimit-Reset"]), utc)
return (reset - now).total_seconds()
async def maybe_coroutine(f, *args, **kwargs):
value = f(*args, **kwargs)
if _isawaitable(value):
return await value
else:
return value
async def async_all(gen, *, check=_isawaitable):
for elem in gen:
if check(elem):
elem = await elem
if not elem:
return False
return True
async def sane_wait_for(futures, *, timeout):
ensured = [asyncio.ensure_future(fut) for fut in futures]
done, pending = await asyncio.wait(ensured, timeout=timeout, return_when=asyncio.ALL_COMPLETED)
if len(pending) != 0:
raise asyncio.TimeoutError()
return done
def get_slots(cls: Type[Any]) -> Iterator[str]:
for mro in reversed(cls.__mro__):
try:
yield from mro.__slots__
except AttributeError:
continue
def compute_timedelta(dt: datetime.datetime):
if dt.tzinfo is None:
dt = dt.astimezone()
now = datetime.datetime.now(datetime.timezone.utc)
return max((dt - now).total_seconds(), 0)
async def sleep_until(when: datetime.datetime, result: Optional[T] = None) -> Optional[T]:
"""|coro|
Sleep until a specified time.
If the time supplied is in the past this function will yield instantly.
.. versionadded:: 1.3
Parameters
-----------
when: :class:`datetime.datetime`
The timestamp in which to sleep until. If the datetime is naive then
it is assumed to be local time.
result: Any
If provided is returned to the caller when the coroutine completes.
"""
delta = compute_timedelta(when)
return await asyncio.sleep(delta, result)
def utcnow() -> datetime.datetime:
"""A helper function to return an aware UTC datetime representing the current time.
This should be preferred to :meth:`datetime.datetime.utcnow` since it is an aware
datetime, compared to the naive datetime in the standard library.
.. versionadded:: 2.0
Returns
--------
:class:`datetime.datetime`
The current aware datetime in UTC.
"""
return datetime.datetime.now(datetime.timezone.utc)
def valid_icon_size(size: int) -> bool:
"""Icons must be power of 2 within [16, 4096]."""
return not size & (size - 1) and 4096 >= size >= 16
class SnowflakeList(array.array):
"""Internal data storage class to efficiently store a list of snowflakes.
This should have the following characteristics:
- Low memory usage
- O(n) iteration (obviously)
- O(n log n) initial creation if data is unsorted
- O(log n) search and indexing
- O(n) insertion
"""
__slots__ = ()
if TYPE_CHECKING:
def __init__(self, data: Iterable[int], *, is_sorted: bool = False):
...
def __new__(cls, data: Iterable[int], *, is_sorted: bool = False):
return array.array.__new__(cls, "Q", data if is_sorted else sorted(data)) # type: ignore
def add(self, element: int) -> None:
i = bisect_left(self, element)
self.insert(i, element)
def get(self, element: int) -> Optional[int]:
i = bisect_left(self, element)
return self[i] if i != len(self) and self[i] == element else None
def has(self, element: int) -> bool:
i = bisect_left(self, element)
return i != len(self) and self[i] == element
_IS_ASCII = re.compile(r"^[\x00-\x7f]+$")
def _string_width(string: str, *, _IS_ASCII=_IS_ASCII) -> int:
"""Returns string's width."""
match = _IS_ASCII.match(string)
if match:
return match.endpos
UNICODE_WIDE_CHAR_TYPE = "WFA"
func = unicodedata.east_asian_width
return sum(2 if func(char) in UNICODE_WIDE_CHAR_TYPE else 1 for char in string)
def resolve_invite(invite: Union[Invite, str]) -> str:
"""
Resolves an invite from a :class:`~discord.Invite`, URL or code.
Parameters
-----------
invite: Union[:class:`~discord.Invite`, :class:`str`]
The invite.
Returns
--------
:class:`str`
The invite code.
"""
from .invite import Invite # circular import
if isinstance(invite, Invite):
return invite.code
rx = r"(?:https?\:\/\/)?discord(?:\.gg|(?:app)?\.com\/invite)\/(.+)"
m = re.match(rx, invite)
if m:
return m.group(1)
return invite
def resolve_template(code: Union[Template, str]) -> str:
"""
Resolves a template code from a :class:`~discord.Template`, URL or code.
.. versionadded:: 1.4
Parameters
-----------
code: Union[:class:`~discord.Template`, :class:`str`]
The code.
Returns
--------
:class:`str`
The template code.
"""
from .template import Template # circular import
if isinstance(code, Template):
return code.code
rx = r"(?:https?\:\/\/)?discord(?:\.new|(?:app)?\.com\/template)\/(.+)"
m = re.match(rx, code)
if m:
return m.group(1)
return code
_MARKDOWN_ESCAPE_SUBREGEX = "|".join(r"\{0}(?=([\s\S]*((?<!\{0})\{0})))".format(c) for c in ("*", "`", "_", "~", "|"))
# regular expression for finding and escaping links in markdown
# note: technically, brackets are allowed in link text.
# perhaps more concerningly, parentheses are also allowed in link destination.
# this regular expression matches neither of those.
# this page provides a good reference: http://blog.michaelperrin.fr/2019/02/04/advanced-regular-expressions/
_MARKDOWN_ESCAPE_LINKS = r"""
\[ # matches link text
[^\[\]]* # link text can contain anything but brackets
\]
\( # matches link destination
[^\(\)]+ # link destination cannot contain parentheses
\)""" # note 2: make sure this regex is consumed in re.X (extended mode) since it has whitespace and comments
_MARKDOWN_ESCAPE_COMMON = rf"^>(?:>>)?\s|{_MARKDOWN_ESCAPE_LINKS}"
_MARKDOWN_ESCAPE_REGEX = re.compile(
rf"(?P<markdown>{_MARKDOWN_ESCAPE_SUBREGEX}|{_MARKDOWN_ESCAPE_COMMON})",
re.MULTILINE | re.X,
)
_URL_REGEX = r"(?P<url><[^: >]+:\/[^ >]+>|(?:https?|steam):\/\/[^\s<]+[^<.,:;\"\'\]\s])"
_MARKDOWN_STOCK_REGEX = rf"(?P<markdown>[_\\~|\*`]|{_MARKDOWN_ESCAPE_COMMON})"
def remove_markdown(text: str, *, ignore_links: bool = True) -> str:
"""A helper function that removes markdown characters.
.. versionadded:: 1.7
.. note::
This function is not markdown aware and may remove meaning from the original text. For example,
if the input contains ``10 * 5`` then it will be converted into ``10 5``.
Parameters
-----------
text: :class:`str`
The text to remove markdown from.
ignore_links: :class:`bool`
Whether to leave links alone when removing markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone. Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters removed.
"""
def replacement(match):
groupdict = match.groupdict()
return groupdict.get("url", "")
regex = _MARKDOWN_STOCK_REGEX
if ignore_links:
regex = f"(?:{_URL_REGEX}|{regex})"
return re.sub(regex, replacement, text, 0, re.MULTILINE)
def escape_markdown(text: str, *, as_needed: bool = False, ignore_links: bool = True) -> str:
r"""A helper function that escapes Discord's markdown.
Parameters
-----------
text: :class:`str`
The text to escape markdown from.
as_needed: :class:`bool`
Whether to escape the markdown characters as needed. This
means that it does not escape extraneous characters if it's
not necessary, e.g. ``**hello**`` is escaped into ``\*\*hello**``
instead of ``\*\*hello\*\*``. Note however that this can open
you up to some clever syntax abuse. Defaults to ``False``.
ignore_links: :class:`bool`
Whether to leave links alone when escaping markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone. This option is not supported with ``as_needed``.
Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters escaped with a slash.
"""
if not as_needed:
def replacement(match):
groupdict = match.groupdict()
is_url = groupdict.get("url")
if is_url:
return is_url
return f"\\{groupdict['markdown']}"
regex = _MARKDOWN_STOCK_REGEX
if ignore_links:
regex = f"(?:{_URL_REGEX}|{regex})"
return re.sub(regex, replacement, text, 0, re.MULTILINE | re.X)
else:
text = re.sub(r"\\", r"\\\\", text)
return _MARKDOWN_ESCAPE_REGEX.sub(r"\\\1", text)
def escape_mentions(text: str) -> str:
"""A helper function that escapes everyone, here, role, and user mentions.
.. note::
This does not include channel mentions.
.. note::
For more granular control over what mentions should be escaped
within messages, refer to the :class:`~discord.AllowedMentions`
class.
Parameters
-----------
text: :class:`str`
The text to escape mentions from.
Returns
--------
:class:`str`
The text with the mentions removed.
"""
return re.sub(r"@(everyone|here|[!&]?[0-9]{17,20})", "@\u200b\\1", text)
def _chunk(iterator: Iterator[T], max_size: int) -> Iterator[List[T]]:
ret = []
n = 0
for item in iterator:
ret.append(item)
n += 1
if n == max_size:
yield ret
ret = []
n = 0
if ret:
yield ret
async def _achunk(iterator: AsyncIterator[T], max_size: int) -> AsyncIterator[List[T]]:
ret = []
n = 0
async for item in iterator:
ret.append(item)
n += 1
if n == max_size:
yield ret
ret = []
n = 0
if ret:
yield ret
@overload
def as_chunks(iterator: Iterator[T], max_size: int) -> Iterator[List[T]]:
...
@overload
def as_chunks(iterator: AsyncIterator[T], max_size: int) -> AsyncIterator[List[T]]:
...
def as_chunks(iterator: _Iter[T], max_size: int) -> _Iter[List[T]]:
"""A helper function that collects an iterator into chunks of a given size.
.. versionadded:: 2.0
Parameters
----------
iterator: Union[:class:`collections.abc.Iterator`, :class:`collections.abc.AsyncIterator`]
The iterator to chunk, can be sync or async.
max_size: :class:`int`
The maximum chunk size.
.. warning::
The last chunk collected may not be as large as ``max_size``.
Returns
--------
Union[:class:`Iterator`, :class:`AsyncIterator`]
A new iterator which yields chunks of a given size.
"""
if max_size <= 0:
raise ValueError("Chunk sizes must be greater than 0.")
if isinstance(iterator, AsyncIterator):
return _achunk(iterator, max_size)
return _chunk(iterator, max_size)
PY_310 = sys.version_info >= (3, 10)
def flatten_literal_params(parameters: Iterable[Any]) -> Tuple[Any, ...]:
params = []
literal_cls = type(Literal[0])
for p in parameters:
if isinstance(p, literal_cls):
params.extend(p.__args__)
else:
params.append(p)
return tuple(params)
def normalise_optional_params(parameters: Iterable[Any]) -> Tuple[Any, ...]:
none_cls = type(None)
return tuple(p for p in parameters if p is not none_cls) + (none_cls,)
def evaluate_annotation(
tp: Any,
globals: Dict[str, Any],
locals: Dict[str, Any],
cache: Dict[str, Any],
*,
implicit_str: bool = True,
):
if isinstance(tp, ForwardRef):
tp = tp.__forward_arg__
# ForwardRefs always evaluate their internals
implicit_str = True
if implicit_str and isinstance(tp, str):
if tp in cache:
return cache[tp]
evaluated = eval(tp, globals, locals)
cache[tp] = evaluated
return evaluate_annotation(evaluated, globals, locals, cache)
if hasattr(tp, "__args__"):
implicit_str = True
is_literal = False
args = tp.__args__
if not hasattr(tp, "__origin__"):
if PY_310 and tp.__class__ is types.UnionType: # type: ignore
converted = Union[args] # type: ignore
return evaluate_annotation(converted, globals, locals, cache)
return tp
if tp.__origin__ is Union:
try:
if args.index(type(None)) != len(args) - 1:
args = normalise_optional_params(tp.__args__)
except ValueError:
pass
if tp.__origin__ is Literal:
if not PY_310:
args = flatten_literal_params(tp.__args__)
implicit_str = False
is_literal = True
evaluated_args = tuple(
evaluate_annotation(arg, globals, locals, cache, implicit_str=implicit_str) for arg in args
)
if is_literal and not all(isinstance(x, (str, int, bool, type(None))) for x in evaluated_args):
raise TypeError("Literal arguments must be of type str, int, bool, or NoneType.")
if evaluated_args == args:
return tp
try:
return tp.copy_with(evaluated_args)
except AttributeError:
return tp.__origin__[evaluated_args]
return tp
def resolve_annotation(
annotation: Any,
globalns: Dict[str, Any],
localns: Optional[Dict[str, Any]],
cache: Optional[Dict[str, Any]],
) -> Any:
if annotation is None:
return type(None)
if isinstance(annotation, str):
annotation = ForwardRef(annotation)
locals = globalns if localns is None else localns
if cache is None:
cache = {}
return evaluate_annotation(annotation, globalns, locals, cache)
TimestampStyle = Literal["f", "F", "d", "D", "t", "T", "R"]
def format_dt(dt: datetime.datetime, /, style: Optional[TimestampStyle] = None) -> str:
"""A helper function to format a :class:`datetime.datetime` for presentation within Discord.
This allows for a locale-independent way of presenting data using Discord specific Markdown.
+-------------+----------------------------+-----------------+
| Style | Example Output | Description |
+=============+============================+=================+
| t | 22:57 | Short Time |
+-------------+----------------------------+-----------------+
| T | 22:57:58 | Long Time |
+-------------+----------------------------+-----------------+
| d | 17/05/2016 | Short Date |
+-------------+----------------------------+-----------------+
| D | 17 May 2016 | Long Date |
+-------------+----------------------------+-----------------+
| f (default) | 17 May 2016 22:57 | Short Date Time |
+-------------+----------------------------+-----------------+
| F | Tuesday, 17 May 2016 22:57 | Long Date Time |
+-------------+----------------------------+-----------------+
| R | 5 years ago | Relative Time |
+-------------+----------------------------+-----------------+
Note that the exact output depends on the user's locale setting in the client. The example output
presented is using the ``en-GB`` locale.
.. versionadded:: 2.0
Parameters
-----------
dt: :class:`datetime.datetime`
The datetime to format.
style: :class:`str`
The style to format the datetime with.
Returns
--------
:class:`str`
The formatted string.
"""
if style is None:
return f"<t:{int(dt.timestamp())}>"
return f"<t:{int(dt.timestamp())}:{style}>"
def generate_snowflake(dt: Optional[datetime.datetime] = None) -> int:
"""Returns a numeric snowflake pretending to be created at the given date but more accurate and random than time_snowflake.
If dt is not passed, it makes one from the current time using utcnow.
Parameters
-----------
dt: :class:`datetime.datetime`
A datetime object to convert to a snowflake.
If naive, the timezone is assumed to be local time.
Returns
--------
:class:`int`
The snowflake representing the time given.
"""
dt = dt or utcnow()
return int(dt.timestamp() * 1000 - DISCORD_EPOCH) << 22 | 0x3FFFFF
V = Union[Iterable[str], Iterable[int], Iterable[float]]
AV = Awaitable[V]
Values = Union[V, Callable[[AutocompleteContext], Union[V, AV]], AV]
AutocompleteFunc = Callable[[AutocompleteContext], AV]
def basic_autocomplete(values: Values) -> AutocompleteFunc:
"""A helper function to make a basic autocomplete for slash commands. This is a pretty standard autocomplete and
will return any options that start with the value from the user, case insensitive. If the ``values`` parameter is callable,
it will be called with the AutocompleteContext.
This is meant to be passed into the :attr:`discord.Option.autocomplete` attribute.
Note
-----
Autocomplete cannot be used for options that have specified choices.
Example
--------
.. code-block:: python3
Option(str, "color", autocomplete=basic_autocomplete(("red", "green", "blue")))
# or
async def autocomplete(ctx):
return "foo", "bar", "baz", ctx.interaction.user.name
Option(str, "name", autocomplete=basic_autocomplete(autocomplete))
.. versionadded:: 2.0
Parameters
-----------
values: Union[Union[Iterable[:class:`.OptionChoice`], Iterable[:class:`str`], Iterable[:class:`int`], Iterable[:class:`float`]], Callable[[:class:`.AutocompleteContext`], Union[Union[Iterable[:class:`str`], Iterable[:class:`int`], Iterable[:class:`float`]], Awaitable[Union[Iterable[:class:`str`], Iterable[:class:`int`], Iterable[:class:`float`]]]]], Awaitable[Union[Iterable[:class:`str`], Iterable[:class:`int`], Iterable[:class:`float`]]]]
Possible values for the option. Accepts an iterable of :class:`str`, a callable (sync or async) that takes a
single argument of :class:`.AutocompleteContext`, or a coroutine. Must resolve to an iterable of :class:`str`.
Returns
--------
Callable[[:class:`.AutocompleteContext`], Awaitable[Union[Iterable[:class:`.OptionChoice`], Iterable[:class:`str`], Iterable[:class:`int`], Iterable[:class:`float`]]]]
A wrapped callback for the autocomplete.
"""
async def autocomplete_callback(ctx: AutocompleteContext) -> V:
_values = values # since we reassign later, python considers it local if we don't do this
if callable(_values):
_values = _values(ctx)
if asyncio.iscoroutine(_values):
_values = await _values
def check(item: Any) -> bool:
item = getattr(item, "name", item)
return str(item).lower().startswith(str(ctx.value or "").lower())
gen = (val for val in _values if check(val))
return iter(itertools.islice(gen, 25))
return autocomplete_callback
| 30.031441
| 447
| 0.619787
|
83ad046c18532151fb86aa37910443fef0ba6ea9
| 2,281
|
py
|
Python
|
build_char_table.py
|
blues-lin/Char-level-CNN-for-Chinese-Text-Classification-in-Keras
|
81c7cdec48d4b17640e4c5465a4a412e338a7e62
|
[
"MIT"
] | 8
|
2017-03-05T04:57:22.000Z
|
2021-03-04T04:29:13.000Z
|
build_char_table.py
|
blues-lin/Char-level-CNN-for-Chinese-Text-Classification-in-Keras
|
81c7cdec48d4b17640e4c5465a4a412e338a7e62
|
[
"MIT"
] | null | null | null |
build_char_table.py
|
blues-lin/Char-level-CNN-for-Chinese-Text-Classification-in-Keras
|
81c7cdec48d4b17640e4c5465a4a412e338a7e62
|
[
"MIT"
] | 1
|
2018-03-07T07:56:33.000Z
|
2018-03-07T07:56:33.000Z
|
"""
Build look-up char table from iterable corpus.
Build labels from training terms.
For traditional chinese text, uses corpus.sqlite. Corpus comes from ptt.cc.
"""
import os.path
import sqlite3
import csv
from lib import table_builder
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
CHAR_PATH = os.path.join(BASE_PATH, 'lib/data/charTable.txt')
LABEL_PATH = os.path.join(BASE_PATH, 'lib/data/label.txt')
TRAINING_TERM_PATH = os.path.join(BASE_PATH, 'training_terms.tsv')
# Analyze character used for discard char.
CHAR_USAGE_PATH = os.path.join(BASE_PATH, 'charUsageCount.csv')
CORPUS_PATH = os.path.join(BASE_PATH, 'data/corpus.sqlite')
def buildCharTable(corpus):
discardCharFile = open("discardChar.txt", "r", encoding='utf-8').read()
discardCharSet = set()
for c in discardCharFile:
discardCharSet.add(c)
charDict = dict()
table = table_builder.LookupTableBuilder(CHAR_PATH)
i = 0
for doc in corpus:
text = doc.strip('\n')
i += 1
for c in text:
if c in discardCharSet:
continue
if type(c) is str:
table.addChar(c)
# Analyze char usage count.
if c in charDict:
charDict[c] += 1
else:
charDict[c] = 1
table.saveChar()
# For counting char.
csvfile = open(CHAR_USAGE_PATH, 'w', encoding='utf-8')
fieldnames = ['char', 'number']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for key, value in charDict.items():
writer.writerow({'char': key, 'number': value})
csvfile.close()
def buildLabel(filePath):
f = open(filePath, "r", encoding="utf-8")
labels = set()
for row in f:
r = row.strip("\n").split("\t")
l = r[1].split(" ")
for lab in l:
labels.add(lab)
print("Build labels file: {}".format(labels))
labelFile = open("label.txt", "w", encoding="utf-8")
for label in labels:
labelFile.write(label + "\n")
labelFile.close()
conn = sqlite3.connect(CORPUS_PATH)
cur = conn.cursor()
article = cur.execute("SELECT article from corpus")
docs = (x[0] for x in article)
buildCharTable(docs)
buildLabel(TRAINING_TERM_PATH)
| 30.413333
| 75
| 0.629987
|
68ef3054a7310ebdbd157817fa85a9bba54b2c92
| 613
|
py
|
Python
|
Leetcode/30. Substring with Concatenation of All Words/solution1.py
|
asanoviskhak/Outtalent
|
c500e8ad498f76d57eb87a9776a04af7bdda913d
|
[
"MIT"
] | 51
|
2020-07-12T21:27:47.000Z
|
2022-02-11T19:25:36.000Z
|
Leetcode/30. Substring with Concatenation of All Words/solution1.py
|
CrazySquirrel/Outtalent
|
8a10b23335d8e9f080e5c39715b38bcc2916ff00
|
[
"MIT"
] | null | null | null |
Leetcode/30. Substring with Concatenation of All Words/solution1.py
|
CrazySquirrel/Outtalent
|
8a10b23335d8e9f080e5c39715b38bcc2916ff00
|
[
"MIT"
] | 32
|
2020-07-27T13:54:24.000Z
|
2021-12-25T18:12:50.000Z
|
from itertools import permutations
class Solution:
def findSubstring(self, s: str, words: List[str]) -> List[int]:
if not s or not words: return []
if not all([word in s for word in words]): return []
result = set()
for substring in permutations(words):
substring = ''.join(substring)
start = 0
while True:
try:
start = s.index(substring, start)
result.add(start)
start += 1
except ValueError as e:
break
return result
| 30.65
| 67
| 0.497553
|
78c16b3a0fad1080190a3b7a3abc4c333b746286
| 1,825
|
py
|
Python
|
levantamento_dados/estrelas/identifica_data_criacao_repositorio.py
|
carlosdenner/github
|
09009abe4954bae6ab304af0db9ae6953a2479ac
|
[
"MIT"
] | 1
|
2021-08-23T23:17:18.000Z
|
2021-08-23T23:17:18.000Z
|
levantamento_dados/estrelas/identifica_data_criacao_repositorio.py
|
carlosdenner/github
|
09009abe4954bae6ab304af0db9ae6953a2479ac
|
[
"MIT"
] | null | null | null |
levantamento_dados/estrelas/identifica_data_criacao_repositorio.py
|
carlosdenner/github
|
09009abe4954bae6ab304af0db9ae6953a2479ac
|
[
"MIT"
] | 2
|
2021-06-01T17:52:47.000Z
|
2021-11-22T16:39:53.000Z
|
import json
import datetime
from dateutil import parser
def ler_arquivo_json_tipo_1(nome_arquivo):
with open(nome_arquivo, 'r', encoding='utf8') as f:
return json.load(f)
def gravar_arquivo_json(nome_arquivo, dados):
with open(nome_arquivo, 'w', encoding='utf-8') as f:
json.dump(dados, f, ensure_ascii=False, indent=2, sort_keys=False, separators=(',' , ':'))
def gerar_data_criacao_repositorio(arquivo, arquivo_2):
repo_id_ant = 0
for i in range(len(arquivo_2)):
if arquivo_2[i]['repo_id'] != repo_id_ant:
print(arquivo_2[i]['repo_id'] )
repo_id_ant = arquivo_2[i]['repo_id']
registro = list(filter(lambda x:x["id"] == arquivo_2[i]['repo_id'],arquivo))
data_criacao_utc = registro[0]['created_at']
data_criacao = parser.parse(data_criacao_utc)
data_criacao = datetime.datetime.strftime(data_criacao, "%d-%m-%Y")
arquivo_2[i]['data_criacao'] = data_criacao
else:
arquivo_2[i]['data_criacao'] = ""
return arquivo_2
#================================================================================#
# MAIN #
#================================================================================#
print("Informe o arquivo.json dos repositórios: ")
nome_arquivo_repositorios = input()
print("Informe o nome do arquivo.json: ")
nome_arquivo = input()
arquivo_json_repositorios = ler_arquivo_json_tipo_1(nome_arquivo_repositorios)
arquivo_json = ler_arquivo_json_tipo_1(nome_arquivo)
arquivo_json_saida = gerar_data_criacao_repositorio(arquivo_json_repositorios,arquivo_json)
nome_arquivo_saida = f'saida-1-{str(nome_arquivo)}'
gravar_arquivo_json(nome_arquivo_saida,arquivo_json_saida)
| 35.096154
| 98
| 0.608219
|
43391b778e79c1cfb296b546cc415c1a3e646f6c
| 1,907
|
py
|
Python
|
align_dataset.py
|
moh3n9595/align-face
|
3ad38065ef86bc57452a07c1a060e016586d5b36
|
[
"MIT"
] | 4
|
2019-07-10T12:23:53.000Z
|
2021-05-17T18:20:28.000Z
|
align_dataset.py
|
moh3n9595/align-face
|
3ad38065ef86bc57452a07c1a060e016586d5b36
|
[
"MIT"
] | null | null | null |
align_dataset.py
|
moh3n9595/align-face
|
3ad38065ef86bc57452a07c1a060e016586d5b36
|
[
"MIT"
] | 1
|
2020-09-17T14:37:30.000Z
|
2020-09-17T14:37:30.000Z
|
'''
Author: Mohsen Madani
Date: 5/31/2018
Email: mohsenando@gmail.com
'''
from eye_detector import get_eyes
from utils import rotate_img, rotate_coords
from os import listdir
from os.path import isfile, join
import cv2
import numpy as np
import math
# -- Settings :
W = 400
dataset_path = './clean_dataset/'
new_dataset_path = './align_dataset'
files = [f for f in listdir(dataset_path) if isfile(join(dataset_path, f))]
for file in files:
image = cv2.imread(dataset_path + file)
# Add some extra padding
image = cv2.copyMakeBorder(image, W, W, W, W, cv2.BORDER_CONSTANT)
try:
left_eye, right_eye = get_eyes(image)
except:
continue
if (type(left_eye) == type(True) and left_eye == False and right_eye == False):
continue
# -- Find slope :
x1 = left_eye[0] + left_eye[2]/2
x2 = right_eye[0] + right_eye[2] / 2
y1 = left_eye[1] + left_eye[3]/2
y2 = right_eye[1] + right_eye[3] / 2
slope = (y2 - y1)/(x2 - x1)
# -- Rotate :
centerX = image.shape[1] / 2
centerY = image.shape[0] / 2
angle = (np.arctan(slope) / np.pi) * 180
image = rotate_img(image, angle)
x1 , y1 = rotate_coords(x1, y1, centerX, centerY, angle)
x2 , y2 = rotate_coords(x2, y2, centerX, centerY, angle)
# -- Resize :
length = math.sqrt(math.pow(x2 - x1,2) + math.pow(y2 - y1,2))
scale = (W / 4) / length
image = cv2.resize(image, None, fx=scale, fy=scale)
x1 = int(x1 * scale)
x2 = int(x2 * scale)
y1 = int(y1 * scale)
y2 = int(y2 * scale)
# -- Crop from center :
centerX = int( ((x2 + x1) / 2) * (1))
centerY = int(((y2 + y1) / 2) * (1))
image = image[(centerY - int(.6*W+1)):(centerY + int(W/0.75 - .6*W+1)), int(centerX - .125*W - .375*W+1):int(centerX + .125*W + (W - .625*W))]
# -- Save :
new_name = join(new_dataset_path, file)
cv2.imwrite(new_name, image)
| 25.092105
| 146
| 0.60409
|
e3fcbe78e76aa862a200b4e3a8ce43144e0f0862
| 9,955
|
py
|
Python
|
tests/backend/xml.py
|
Terrance-forks/python-anyconfig
|
21d7c0e30287569b394972557b5a54fab03bcd5c
|
[
"MIT"
] | 213
|
2015-01-14T22:09:20.000Z
|
2022-02-02T17:23:41.000Z
|
tests/backend/xml.py
|
Terrance-forks/python-anyconfig
|
21d7c0e30287569b394972557b5a54fab03bcd5c
|
[
"MIT"
] | 120
|
2015-03-13T15:47:43.000Z
|
2022-03-31T01:55:34.000Z
|
tests/backend/xml.py
|
Terrance-forks/python-anyconfig
|
21d7c0e30287569b394972557b5a54fab03bcd5c
|
[
"MIT"
] | 34
|
2015-01-12T05:03:30.000Z
|
2021-09-09T14:40:56.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 - 2021 Satoru SATOH <satoru.satoh@gmail.com>
# SPDX-License-Identifier: MIT
#
# pylint: disable=missing-docstring,invalid-name,too-few-public-methods
# pylint: disable=ungrouped-imports,protected-access
import io
import unittest
import anyconfig.backend.xml as TT
import tests.backend.common as TBC
from tests.backend.common import to_bytes
CNF_0 = {'config': {'@attrs': {'val:name': 'foo',
'xmlns': 'http://example.com/ns/cnf',
'xmlns:val': 'http://example.com/ns/cnf/val'},
'val:a': '0',
'val:b': {'@attrs': {'id': 'b0'}, '@text': 'bbb'},
'val:c': None,
'sect0': {'val:d': 'x, y, z'},
'list1': [{'item': '0'}, {'item': '1'}, {'item': '2'}],
'list2': {'@attrs': {'id': 'list2'},
'@children': [{'item': 'i'},
{'item': 'j'}]}}}
class Test_00(unittest.TestCase):
def test_10__namespaces_from_file(self):
ref = {"http://example.com/ns/config": '',
"http://example.com/ns/config/val": "val"}
xmlfile = io.StringIO(TBC.read_from_res("20-00-cnf.xml"))
self.assertEqual(TT._namespaces_from_file(xmlfile), ref)
def test_20__process_elem_text__whitespaces(self):
(elem, dic, subdic) = (TT.ET.XML("<a> </a>"), {}, {})
TT._process_elem_text(elem, dic, subdic)
self.assertTrue(not dic)
self.assertTrue(not subdic)
def test_22__process_elem_text__wo_attrs_and_children(self):
(elem, dic, subdic) = (TT.ET.XML("<a>A</a>"), {}, {})
TT._process_elem_text(elem, dic, subdic, text="#text")
self.assertEqual(dic, {"a": 'A'})
self.assertTrue(not subdic)
def test_22__process_elem_text__wo_attrs_and_children_parse(self):
(elem, dic, subdic) = (TT.ET.XML("<a>A</a>"), {}, {})
TT._process_elem_text(elem, dic, subdic, text="#text",
ac_parse_value=True)
self.assertEqual(dic, {"a": 'A'})
self.assertTrue(not subdic)
(elem, dic, subdic) = (TT.ET.XML("<a>1</a>"), {}, {})
TT._process_elem_text(elem, dic, subdic, text="#text",
ac_parse_value=True)
self.assertEqual(dic, {"a": 1})
self.assertTrue(not subdic)
def test_24__process_elem_text__w_attrs(self):
(elem, dic, subdic) = (TT.ET.XML("<a id='1'>A</a>"), {}, {})
TT._process_elem_text(elem, dic, subdic, text="#text")
self.assertTrue(not dic)
self.assertEqual(subdic, {"#text": 'A'})
def test_24__process_elem_text__w_children(self):
(elem, dic, subdic) = (TT.ET.XML("<a>A<b/></a>"), {}, {})
TT._process_elem_text(elem, dic, subdic, text="#text")
self.assertTrue(not dic)
self.assertEqual(subdic, {"#text": 'A'})
def test_30__process_elem_attrs__wo_text_and_children(self):
(elem, dic, subdic) = (TT.ET.XML("<a id='A'/>"), {}, {})
TT._process_elem_attrs(elem, dic, subdic)
self.assertTrue(not dic)
self.assertEqual(subdic, {"@attrs": {"id": 'A'}})
def test_32__process_elem_attrs__w_text(self):
(elem, dic, subdic) = (TT.ET.XML("<a id='A'>AAA</a>"), {}, {})
TT._process_elem_attrs(elem, dic, subdic)
self.assertTrue(not dic)
self.assertEqual(subdic, {"@attrs": {"id": 'A'}})
def test_34__process_elem_attrs__merge_attrs(self):
(elem, dic, subdic) = (TT.ET.XML("<a id='A'/>"), {}, {})
TT._process_elem_attrs(elem, dic, subdic, merge_attrs=True)
self.assertEqual(dic, {"a": {"id": 'A'}})
self.assertTrue(not subdic)
def test_36__process_elem_attrs__wo_text_and_children_parse(self):
(elem, dic, subdic) = (TT.ET.XML("<a id='1'/>"), {}, {})
TT._process_elem_attrs(elem, dic, subdic, ac_parse_value=True)
self.assertTrue(not dic)
self.assertEqual(subdic, {"@attrs": {"id": 1}})
(elem, dic, subdic) = (TT.ET.XML("<a id='A'/>"), {}, {})
TT._process_elem_attrs(elem, dic, subdic, ac_parse_value=True)
self.assertTrue(not dic)
self.assertEqual(subdic, {"@attrs": {"id": 'A'}})
(elem, dic, subdic) = (TT.ET.XML("<a id='true'/>"), {}, {})
TT._process_elem_attrs(elem, dic, subdic, ac_parse_value=True)
self.assertTrue(not dic)
self.assertEqual(subdic, {"@attrs": {"id": True}})
def test_40__process_children_elems__root(self):
(elem, dic, subdic) = (TT.ET.XML("<list><i>A</i><i>B</i></list>"), {},
{})
TT._process_children_elems(elem, dic, subdic)
self.assertEqual(dic, {"list": [{"i": "A"}, {"i": "B"}]})
self.assertTrue(not subdic)
def test_42__process_children_elems__w_attr(self):
(elem, dic) = (TT.ET.XML("<list id='xyz'><i>A</i><i>B</i></list>"), {})
subdic = {"id": "xyz"}
ref = subdic.copy()
ref.update({"#children": [{"i": "A"}, {"i": "B"}]})
TT._process_children_elems(elem, dic, subdic, children="#children")
self.assertTrue(not dic)
self.assertEqual(subdic, ref, subdic)
def test_44__process_children_elems__w_children_have_unique_keys(self):
(elem, dic, subdic) = (TT.ET.XML("<a><x>X</x><y>Y</y></a>"), {}, {})
TT._process_children_elems(elem, dic, subdic)
self.assertEqual(dic, {"a": {"x": "X", "y": "Y"}})
self.assertTrue(not subdic)
def test_46__process_children_elems__w_merge_attrs(self):
elem = TT.ET.XML("<a z='Z'><x>X</x><y>Y</y></a>")
dic = {"a": {"@attrs": {"z": "Z"}}}
subdic = dic["a"]["@attrs"]
TT._process_children_elems(elem, dic, subdic, merge_attrs=True)
self.assertEqual(dic, {"a": {"x": "X", "y": "Y", "z": "Z"}}, dic)
class Test_00_1(unittest.TestCase):
def _assert_eq_dic_from_snippet(self, snippet, ref, **opts):
self.assertEqual(TT.elem_to_container(TT.ET.XML(snippet), **opts), ref)
def test_10_elem_to_container__None(self):
self.assertEqual(TT.elem_to_container(None), dict())
def test_10_root_to_container__None(self):
self.assertEqual(TT.root_to_container(None), dict())
def test_12_elem_to_container__empty(self):
self._assert_eq_dic_from_snippet("<a/>", dict(a=None))
def test_20_elem_to_container__attrs(self):
ref = dict(a={"@attrs": dict(x='1', y='y')})
self._assert_eq_dic_from_snippet("<a x='1' y='y'/>", ref)
def test_30_elem_to_container__child(self):
ref = dict(a=dict(b="b"))
self._assert_eq_dic_from_snippet("<a><b>b</b></a>", ref)
def test_32_elem_to_container__children__same_keys(self):
ref = {'a': [{'b': '1'}, {'b': '2'}]}
self._assert_eq_dic_from_snippet("<a><b>1</b><b>2</b></a>", ref)
def test_34_elem_to_container__children(self):
ref = {'a': {'b': 'b', 'c': 'c'}}
self._assert_eq_dic_from_snippet("<a><b>b</b><c>c</c></a>", ref)
def test_36_elem_to_container__children__same_keys_w_text(self):
ref = {'a': {'@text': 'aaa', '@children': [{'b': '1'}, {'b': '2'}]}}
self._assert_eq_dic_from_snippet("<a>aaa<b>1</b><b>2</b></a>", ref)
def test_40_elem_to_container__text(self):
self._assert_eq_dic_from_snippet("<a>A</a>", {'a': 'A'})
def test_42_elem_to_container__text_attrs(self):
ref = dict(a={"@attrs": {'x': 'X'}, "@text": "A"})
self._assert_eq_dic_from_snippet("<a x='X'>A</a>", ref)
def test_50_root_to_container__text_attrs_tags(self):
ref = dict(a={"_attrs": {'x': 'X'}, "_text": "A"})
tags = dict(attrs="_attrs", text="_text")
self.assertEqual(TT.root_to_container(TT.ET.XML("<a x='X'>A</a>"),
dict, {}, tags=tags),
ref)
def tree_to_string(tree):
return TT.ET.tostring(tree.getroot())
class Test_00_2(unittest.TestCase):
def test_00_container_to_etree__None(self):
self.assertTrue(TT.container_to_etree(None) is None)
def test_10_container_to_etree__text_attrs(self):
ref = to_bytes('<a x="X" y="Y">A</a>')
obj = dict(a={"@attrs": {'x': 'X', 'y': 'Y'}, "@text": "A"})
res = TT.container_to_etree(obj)
self.assertEqual(tree_to_string(res), ref)
def test_12_container_to_etree__text_attrs_tags(self):
ref = to_bytes('<a x="X" y="Y">A</a>')
obj = dict(a={"_attrs": {'x': 'X', 'y': 'Y'}, "_text": "A"})
tags = dict(attrs="_attrs", text="_text")
res = TT.container_to_etree(obj, tags=tags)
self.assertEqual(tree_to_string(res), ref)
def test_20_container_to_etree__child(self):
ref = to_bytes("<a><b>b</b></a>")
obj = dict(a=dict(b="b"))
res = TT.container_to_etree(obj)
self.assertEqual(tree_to_string(res), ref)
def test_22_container_to_etree__children(self):
ref = to_bytes("<a><b>b</b><c>c</c></a>")
obj = {'a': {'@children': [{'b': 'b'}, {'c': 'c'}]}}
res = TT.container_to_etree(obj)
self.assertEqual(tree_to_string(res), ref)
class HasParserTrait(TBC.HasParserTrait):
psr = TT.Parser()
cnf = CNF_0
cnf_s = to_bytes(TBC.read_from_res("20-10-cnf.xml"))
class Test_10(TBC.Test_10_dumps_and_loads, HasParserTrait):
load_options = dump_options = dict(ac_parse_value=False)
class Test_20(TBC.Test_20_dump_and_load, HasParserTrait):
def test_40_load_w_options(self):
cnf = self.psr.load(self.ioi, ac_parse_value=False)
self._assert_dicts_equal(cnf)
def test_42_dump_with_special_option(self):
ioi = self._to_ioinfo(self.cnf_path)
self.psr.dump(self.cnf, ioi, ac_parse_value=False)
cnf = self.psr.load(self.ioi)
self._assert_dicts_equal(cnf)
# vim:sw=4:ts=4:et:
| 39.82
| 79
| 0.581818
|
3aa8b449428603482ddb0bcc151815370f96525a
| 1,033
|
py
|
Python
|
plugins/install/main.py
|
StyXman/pbt
|
83dac5b896e9037fac17453f21a4d14faa25c09e
|
[
"Apache-2.0"
] | 1
|
2015-07-16T19:14:50.000Z
|
2015-07-16T19:14:50.000Z
|
plugins/install/main.py
|
StyXman/pbt
|
83dac5b896e9037fac17453f21a4d14faa25c09e
|
[
"Apache-2.0"
] | null | null | null |
plugins/install/main.py
|
StyXman/pbt
|
83dac5b896e9037fac17453f21a4d14faa25c09e
|
[
"Apache-2.0"
] | null | null | null |
import pbt
import sys
import os
@pbt.command(name="install")
def install(ctx, args, project):
"""
Works as a wrapper for pip, with some sugar
"""
try:
import pip
except ImportError:
print("You need pip in order to use install, please see "
"http://www.pip-installer.org/en/latest/installing.html")
sys.exit(0)
pipargs = ["install"]
if "-t" in args or "--target" in args:
if "-t" in args:
t = args.index("-t")
else:
t = args.index("--target")
# The destination folder is the next element in the list
folder = args.pop(t+1)
pipargs.append(args.pop(t))
target_path = project.join_path(folder)
ctx.ensure_dir_exists(target_path)
pipargs.append(target_path)
if args:
pipargs += args
# TODO: add the new dep to the requierements
else:
deps_spec = ["".join(dep) for dep in project.dependencies]
pipargs += deps_spec
pip.main(pipargs)
| 24.595238
| 71
| 0.583737
|
abebd6398fc6deaced99ff177e38946a44626a38
| 1,299
|
py
|
Python
|
Not working/test.py
|
Gayatri-2017/UBCHackathon
|
8e36cc2d44cca77cf83b2c1faef3f4c58b5afa6d
|
[
"MIT"
] | 1
|
2020-02-27T19:24:32.000Z
|
2020-02-27T19:24:32.000Z
|
Not working/test.py
|
Gayatri-2017/UBCHackathon
|
8e36cc2d44cca77cf83b2c1faef3f4c58b5afa6d
|
[
"MIT"
] | null | null | null |
Not working/test.py
|
Gayatri-2017/UBCHackathon
|
8e36cc2d44cca77cf83b2c1faef3f4c58b5afa6d
|
[
"MIT"
] | null | null | null |
import pandas as pd
import re
interest = "introduction"
csv_location = "/Users/apple/Desktop/UBCHackathon_Local/UBCHackathon/ubc_course_calendar_data_new.csv"
data = pd.read_csv(csv_location)
data.dropna(inplace = True)
sub = interest
data["Indexes"]= data["COURSE_DESCRIPTION"].str.contains(sub, case=False)
print("data = \n", data[data["Indexes"]==True])
# disp_str = str(data.loc[data['Indexes'] == True]['COURSE_TITLE'])
df = pd.DataFrame(data[data['Indexes'] == True]['COURSE_TITLE'])
#fil1 = data["Indexes"] == "True"
#print("where clause\n", data.where(fil1, inplace=True))
#print("df = \n", df, "\n\n")
courses_titles = df["COURSE_TITLE"].unique()
print("df[COURSE_TITLE]\n", courses_titles)
# disp_str = str(data[data['Indexes'] == True]['COURSE_TITLE'])
# print("before\n", disp_str)
# disp_str = "<br>".join(data.loc[data['Indexes'] == True]['COURSE_TITLE'])
# disp_str = str(data.loc[data['Indexes'] == True]['COURSE_TITLE'])
# disp_str = "".join(re.split(" ", disp_str))
# print("after split \n", disp_str)
if courses_titles is not None:
# disp_str = re.split("Name", disp_str, flags=re.IGNORECASE)[0]
disp_str = "<br>".join(courses_titles)
else:
disp_str = "No results found for your search interest. Check spelling or try another broader search keyword"
print(disp_str)
| 41.903226
| 109
| 0.704388
|
a71dd59ebb4ff0f2a3bea3266b594103bb52aeb0
| 383
|
py
|
Python
|
run_generators.py
|
se7endragon/Event_Driven_Stock_Prediction
|
fdb9130f14124960d4488151a7c81fe643582659
|
[
"Apache-2.0"
] | null | null | null |
run_generators.py
|
se7endragon/Event_Driven_Stock_Prediction
|
fdb9130f14124960d4488151a7c81fe643582659
|
[
"Apache-2.0"
] | null | null | null |
run_generators.py
|
se7endragon/Event_Driven_Stock_Prediction
|
fdb9130f14124960d4488151a7c81fe643582659
|
[
"Apache-2.0"
] | null | null | null |
from generators.data_generator import data_generator
from generators.svo_generator import svo_generator
from generators.svo_embedding_generator import svo_embedding_generator
import os
path = os.getcwd()+'/data/'
data_generator.extract_news_titles(path)
svo_generator.extract_relation_triples(path)
svo_embedding_generator.svo_to_word_embedding(path)
print('Preprocess Complete!')
| 31.916667
| 70
| 0.869452
|
7ee28ae39e3cc3b820d86be83f99c882137c3472
| 172
|
py
|
Python
|
remoteprotocols/__init__.py
|
ianchi/remoteprotocols
|
7ed7881087af9de0f7cf8836361ad037cc4466ba
|
[
"MIT"
] | null | null | null |
remoteprotocols/__init__.py
|
ianchi/remoteprotocols
|
7ed7881087af9de0f7cf8836361ad037cc4466ba
|
[
"MIT"
] | null | null | null |
remoteprotocols/__init__.py
|
ianchi/remoteprotocols
|
7ed7881087af9de0f7cf8836361ad037cc4466ba
|
[
"MIT"
] | null | null | null |
"""Main entry point of library"""
# flake8: noqa
from .protocol import DecodeMatch, RemoteCommand, SignalData
from .registry import ProtocolRegistry
__version__ = "0.0.7"
| 24.571429
| 60
| 0.773256
|
9550aff663dfded18ef4e450b36cecf4ea41502c
| 5,970
|
py
|
Python
|
releasenotes/source/conf.py
|
4383/tobiko
|
f8e6916db890021fa17ddbfc5e6007a25093c8cb
|
[
"Apache-2.0"
] | null | null | null |
releasenotes/source/conf.py
|
4383/tobiko
|
f8e6916db890021fa17ddbfc5e6007a25093c8cb
|
[
"Apache-2.0"
] | null | null | null |
releasenotes/source/conf.py
|
4383/tobiko
|
f8e6916db890021fa17ddbfc5e6007a25093c8cb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
TOBIKO_DIR = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
sys.path.insert(0, TOBIKO_DIR)
# -- Project information -----------------------------------------------------
project = 'Tobiko Release Notes'
copyright = "2019, Red Hat"
author = "Tobiko's Team"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# Version info
from tobiko import version
release = version.release
# The short X.Y version.
version = version.version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"canonical_url": "https://docs.openstack.org/tobiko/latest/",
"logo_only": False,
"display_version": True,
"prev_next_buttons_location": "top",
"style_external_links": True,
# Toc options
"collapse_navigation": True,
"sticky_navigation": True,
"navigation_depth": 4,
"includehidden": True,
"titles_only": False,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'TobikoReleaseNotesdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TobikoReleaseNotes.tex', u'Tobiko Release Notes Documentation',
u'Tobiko developers', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tobikoreleasenotes', u'Tobiko Release Notes Documentation',
[u'Tobiko developers'], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TobikoReleaseNotes', u'Tobiko Release Notes Documentation',
u'Tobiko developers', 'TobikoReleaseNotes', 'One line description of project.',
'Miscellaneous'),
]
| 32.27027
| 84
| 0.674037
|
0643a9dc7b00e63295aca70504c6fad320935e06
| 16,252
|
pyw
|
Python
|
BindboxGUI.pyw
|
Kapukw/Bindbox
|
62516a08e4e6b384bbc1bbeef1e47dfc698bb5f3
|
[
"MIT"
] | null | null | null |
BindboxGUI.pyw
|
Kapukw/Bindbox
|
62516a08e4e6b384bbc1bbeef1e47dfc698bb5f3
|
[
"MIT"
] | null | null | null |
BindboxGUI.pyw
|
Kapukw/Bindbox
|
62516a08e4e6b384bbc1bbeef1e47dfc698bb5f3
|
[
"MIT"
] | null | null | null |
import sys
import os
import time
import traceback
from PyQt5 import QtCore, QtGui, QtWidgets, QtSvg
import Utils
import Bindbox
import BindboxGUI_rc
g_sleepMin = 10.0
g_maxMessagesCount = 30
class TimestampWidget(QtWidgets.QWidget):
def __init__(self, timestamp, result):
super(TimestampWidget, self).__init__()
resultLabel = QtSvg.QSvgWidget()
if result == 0:
resultLabel.setFixedSize(QtCore.QSize(20, 16))
resultLabel.load(":/resources/success.svg")
else:
resultLabel.setFixedSize(QtCore.QSize(16, 16))
resultLabel.load(":/resources/error.svg")
timestampLabel = QtWidgets.QLabel()
timestampLabel.setObjectName("timestampLabel")
timestampLabel.setFont(QtGui.QFont("Eurostile", 10, QtGui.QFont.Normal))
timestampLabel.setText(Utils.stringFromTime(timestamp))
lineWidget = QtWidgets.QWidget()
lineWidget.setObjectName("lineWidget")
lineWidget.setFixedHeight(4)
layout = QtWidgets.QHBoxLayout()
layout.addWidget(resultLabel)
layout.addWidget(timestampLabel)
layout.addWidget(lineWidget)
layout.setStretch(2, 1)
layout.setContentsMargins(6, 6, 6, 4)
self.setLayout(layout)
class AppInfoWidget(QtWidgets.QWidget):
def __init__(self, name, result):
super(AppInfoWidget, self).__init__()
appNameLabel = QtWidgets.QLabel()
appNameLabel.setObjectName("appNameLabel")
appNameLabel.setFont(QtGui.QFont("Eurostile", 12, QtGui.QFont.Normal))
appNameLabel.setText(name)
resultSvg = QtSvg.QSvgWidget()
if result == Bindbox.AppSyncResult.HOST_TO_CLOUD:
resultSvg.setFixedSize(QtCore.QSize(30, 16))
resultSvg.load(":/resources/to_cloud.svg")
elif result == Bindbox.AppSyncResult.CLOUD_TO_HOST:
resultSvg.setFixedSize(QtCore.QSize(33, 16))
resultSvg.load(":/resources/to_host.svg")
layout = QtWidgets.QHBoxLayout()
layout.addWidget(appNameLabel)
layout.addStretch()
layout.addWidget(resultSvg)
layout.setContentsMargins(15, 0, 16, 0)
layout.setSpacing(0)
backgroundWidget = QtSvg.QSvgWidget()
backgroundWidget.setFixedSize(316, 32)
backgroundWidget.load(":/resources/item_bg.svg")
backgroundWidget.setLayout(layout)
backgroundLayout = QtWidgets.QHBoxLayout()
backgroundLayout.addWidget(backgroundWidget)
backgroundLayout.setAlignment(QtCore.Qt.AlignLeft)
backgroundLayout.setContentsMargins(28, 3, 0, 3)
self.setLayout(backgroundLayout)
class AppWindow(QtWidgets.QWidget):
qss = """
QWidget#appWindow {
background-color: #373737;
}
QWidget#lineWidget {
background-color: #575757;
}
QPushButton#openConfigButton {
background: none;
border: none;
}
QLabel#appCountLabel,
QLabel#timeToSyncLabel,
QLabel#timestampLabel
{
color: #999999;
}
QLabel#hostNameLabel,
QLabel#appNameLabel
{
color: #ffffff;
}
QListWidget#listWidget {
background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 #2c2c2c, stop: 0.1 #373737, stop: 0.8 #373737, stop: 1 #2c2c2c);
border: none;
outline: none;
}
QListWidget::item#listWidget,
QListWidget::item:selected#listWidget,
QListWidget::item:selected:active#listWidget,
QListWidget::item:hover#listWidget
{
background: none;
border: none;
}
QScrollBar:vertical {
background: #444444;
border: none;
width: 14px;
margin: 0 0 0 0;
}
QScrollBar::handle:vertical {
background: #818181;
min-height: 40px;
margin: 2 2 2 2;
border-radius: 2px;
}
QScrollBar::handle:disabled:vertical {
background: #505050;
min-height: 40px;
margin: 2 2 2 2;
border-radius: 2px;
}
QScrollBar::add-line:vertical {
border: none;
background: none;
height: 14px;
subcontrol-position: bottom;
subcontrol-origin: margin;
}
QScrollBar::sub-line:vertical {
border: none;
background: none;
height: 14px;
subcontrol-position: top;
subcontrol-origin: margin;
}
QScrollBar::up-arrow:vertical, QScrollBar::down-arrow:vertical {
width: 0;
height: 0;
background: none;
}
QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {
background: none;
}
"""
def __init__(self):
super(AppWindow, self).__init__()
QtGui.QFontDatabase().addApplicationFont(":/resources/Eurostile.ttf")
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
self.createTopWidget()
self.createListWidget()
self.createBottomWidget()
self.setLayout(self.layout)
self.createTrayIcon()
self.setObjectName("appWindow")
self.setStyleSheet(AppWindow.qss)
self.setFixedSize(370, 350)
self.setWindowTitle("Bindbox")
self.setWindowFlags(QtCore.Qt.Popup)
self.startupTime = time.time()
self.lastBeginSyncTime = self.startupTime
self.lastEndSyncTime = self.startupTime
self.startupScript()
def createTrayIcon(self):
showAction = QtWidgets.QAction("&Show", self, triggered=self.show)
quitAction = QtWidgets.QAction("&Quit", self, triggered=self.quitApp)
trayIconMenu = QtWidgets.QMenu(self)
trayIconMenu.addAction(showAction)
trayIconMenu.addSeparator()
trayIconMenu.addAction(quitAction)
self.trayIcon = QtWidgets.QSystemTrayIcon(self)
self.trayIcon.setContextMenu(trayIconMenu)
self.trayIcon.setToolTip("Bindbox")
self.trayIcon.setIcon(QtGui.QIcon(":/resources/app_icon.svg"))
self.trayIcon.activated.connect(self.iconActivated)
self.trayIcon.show()
def quitApp(self):
QtCore.QCoreApplication.instance().quit()
def openAppConfig(self):
os.startfile(Bindbox.getConfigPath())
def createTopWidget(self):
self.openConfigButton = QtWidgets.QPushButton()
self.openConfigButton.setObjectName("openConfigButton")
self.openConfigButton.setFixedSize(QtCore.QSize(32, 32))
self.openConfigButton.clicked.connect(self.openAppConfig)
# SVG icons are buggy
#self.openConfigButton.setIcon(QtGui.QIcon(":/resources/options.svg"))
#self.openConfigButton.setIconSize(QtCore.QSize(32, 32))
svgIcon = QtSvg.QSvgWidget()
svgIcon.setFixedSize(QtCore.QSize(32, 32))
svgIcon.load(":/resources/options.svg")
buttonLayout = QtWidgets.QHBoxLayout()
buttonLayout.setContentsMargins(0, 0, 0, 0)
buttonLayout.setSpacing(0)
buttonLayout.addWidget(svgIcon)
self.openConfigButton.setLayout(buttonLayout)
layout = QtWidgets.QHBoxLayout()
layout.addStretch()
layout.addWidget(self.openConfigButton)
layout.setContentsMargins(6, 6, 6, 6)
layout.setSpacing(0)
widget = QtWidgets.QWidget()
widget.setLayout(layout)
self.layout.addWidget(widget)
def createListWidget(self):
self.listWidget = QtWidgets.QListWidget()
self.listWidget.setObjectName("listWidget")
self.listWidget.setSortingEnabled(True)
self.listWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.listWidget.setVerticalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.listWidget)
layout.setContentsMargins(3, 0, 3, 0)
layout.setSpacing(0)
widget = QtWidgets.QWidget()
widget.setLayout(layout)
self.layout.addWidget(widget)
def createBottomWidget(self):
self.hostNameLabel = QtWidgets.QLabel(Bindbox.getHostName())
self.hostNameLabel.setObjectName("hostNameLabel")
self.hostNameLabel.setFont(QtGui.QFont("Eurostile", 16, QtGui.QFont.Normal))
self.appCountLabel = QtWidgets.QLabel(Bindbox.getSyncStats())
self.appCountLabel.setObjectName("appCountLabel")
self.appCountLabel.setFont(QtGui.QFont("Eurostile", 12, QtGui.QFont.Normal))
leftLayout = QtWidgets.QVBoxLayout()
leftLayout.addWidget(self.hostNameLabel)
leftLayout.addWidget(self.appCountLabel)
leftLayout.setContentsMargins(0, 0, 0, 0)
leftLayout.setSpacing(0)
leftWidget = QtWidgets.QWidget()
leftWidget.setLayout(leftLayout)
timer = QtCore.QTimer(self)
timer.timeout.connect(self.updateGuiByTimer)
timer.start(1000)
self.timeToSyncLabel = QtWidgets.QLabel()
self.timeToSyncLabel.setObjectName("timeToSyncLabel")
self.timeToSyncLabel.setFont(QtGui.QFont("Eurostile", 12, QtGui.QFont.Normal))
layout = QtWidgets.QHBoxLayout()
layout.addWidget(leftWidget)
layout.addStretch()
layout.addWidget(self.timeToSyncLabel)
layout.setContentsMargins(10, 10, 10, 10)
layout.setSpacing(0)
widget = QtWidgets.QWidget()
widget.setFixedHeight(60)
widget.setLayout(layout)
self.layout.addWidget(widget)
def startupScript(self):
self.workThread = WorkThread()
self.workThread.updateBeginSyncTimeSignal.connect(self.updateBeginSyncTime)
self.workThread.updateEndSyncTimeSignal.connect(self.updateEndSyncTime)
self.workThread.addTimestampSignal.connect(self.addTimestamp)
self.workThread.addAppInfoSignal.connect(self.addAppInfo)
self.workThread.raiseMessageBoxSignal.connect(self.raiseMessageBox)
self.workThread.start()
@Utils.pyqtSlotWithExceptions()
def updateGuiByTimer(self):
remainingTime = self.lastEndSyncTime + g_sleepMin * 60.0 - time.time()
if remainingTime > 0.0:
self.timeToSyncLabel.setText(Utils.stringFromRemainingTime(remainingTime))
else:
self.timeToSyncLabel.setText("...")
@Utils.pyqtSlotWithExceptions()
def stopAllTasks(self):
while self.workThread.isWorking:
print("Wait for sync ending...")
time.sleep(1)
self.workThread.terminate()
print("Sync thread stopped.")
self.trayIcon.hide()
print("App closed.")
def setVisible(self, visible):
if visible:
appWidth = self.width()
appHeigth = self.height()
rightOffset = 16
bottomOffset = 16
availableGeometry = QtWidgets.QApplication.desktop().availableGeometry()
appX = self.trayIcon.geometry().center().x() - appWidth/2
appX = min(appX, availableGeometry.width() - appWidth - rightOffset)
appY = availableGeometry.bottom() - appHeigth - bottomOffset
self.setGeometry(appX, appY, appWidth, appHeigth)
super(AppWindow, self).setVisible(visible)
def iconActivated(self, reason):
if reason in (QtWidgets.QSystemTrayIcon.Trigger, QtWidgets.QSystemTrayIcon.DoubleClick):
self.setVisible(not self.isVisible())
def addListWidgetItem(self, widget):
itemsCount = self.listWidget.count()
if itemsCount >= g_maxMessagesCount:
listWidgetItem = self.listWidget.item(itemsCount-1)
self.listWidget.removeItemWidget(listWidgetItem)
listWidgetItem = self.listWidget.takeItem(itemsCount-1)
else:
listWidgetItem = QtWidgets.QListWidgetItem(self.listWidget)
listWidgetItem.setSizeHint(widget.sizeHint())
self.listWidget.insertItem(0, listWidgetItem)
self.listWidget.setItemWidget(listWidgetItem, widget)
self.listWidget.setCurrentRow(0)
@Utils.pyqtSlotWithExceptions()
def updateBeginSyncTime(self, t):
self.lastBeginSyncTime = t
@Utils.pyqtSlotWithExceptions()
def updateEndSyncTime(self, t):
self.lastEndSyncTime = t
@Utils.pyqtSlotWithExceptions()
def addTimestamp(self, timestamp, result):
self.appCountLabel.setText(Bindbox.getSyncStats())
self.addListWidgetItem(TimestampWidget(timestamp, result))
@Utils.pyqtSlotWithExceptions()
def addAppInfo(self, name, result):
self.addListWidgetItem(AppInfoWidget(name, result))
def raiseMessageBox(self, title, text):
QtWidgets.QMessageBox.critical(None, title, text)
class WorkThread(QtCore.QThread):
updateBeginSyncTimeSignal = QtCore.pyqtSignal('PyQt_PyObject')
updateEndSyncTimeSignal = QtCore.pyqtSignal('PyQt_PyObject')
addTimestampSignal = QtCore.pyqtSignal('PyQt_PyObject', 'PyQt_PyObject')
addAppInfoSignal = QtCore.pyqtSignal('PyQt_PyObject', 'PyQt_PyObject')
raiseMessageBoxSignal = QtCore.pyqtSignal('PyQt_PyObject', 'PyQt_PyObject')
def __init__(self):
self.isWorking = False
return super(WorkThread, self).__init__()
def run(self):
while True:
self.isWorking = True
self.updateBeginSyncTime(time.time())
try:
Bindbox.mainFunction(self.addAppInfo)
except Exception:
self.raiseMessageBox("Sync: Unexpected Error", traceback.format_exc())
syncStatus = 1
else:
syncStatus = 0
finally:
timestamp = time.time()
self.updateEndSyncTime(timestamp)
self.addTimestamp(timestamp, syncStatus)
self.isWorking = False
print("sleep " + str(g_sleepMin) + " min")
self.sleep(int(g_sleepMin * 60))
def updateBeginSyncTime(self, t):
self.updateBeginSyncTimeSignal.emit(t)
def updateEndSyncTime(self, t):
self.updateEndSyncTimeSignal.emit(t)
def addTimestamp(self, timestamp, result):
self.addTimestampSignal.emit(timestamp, result)
def addAppInfo(self, name, result):
self.addAppInfoSignal.emit(name, result)
def raiseMessageBox(self, title, text):
self.raiseMessageBoxSignal.emit(title, text)
class MyApp(QtWidgets.QApplication):
def notify(self, obj, event):
try:
return QtWidgets.QApplication.notify(self, obj, event)
except Exception:
QtWidgets.QMessageBox.critical(None, "C++: Unexpected Error", traceback.format_exc())
return False
def myExcepthook(exctype, value, tback):
QtWidgets.QMessageBox.critical(None, "Hook: Unexpected Error", traceback.format_exc())
sys.__excepthook__(exctype, value, tback)
if __name__ == '__main__':
Utils.winGuiHook()
sys.excepthook = myExcepthook
if not QtWidgets.QSystemTrayIcon.isSystemTrayAvailable():
QtWidgets.QMessageBox.critical(None, "Bindbox", "I couldn't detect any system tray on this system.")
sys.exit(1)
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)
app = MyApp(sys.argv)
app.setQuitOnLastWindowClosed(False)
# TODO: handle little time before first sync
window = AppWindow()
app.aboutToQuit.connect(window.stopAllTasks)
try:
exitValue = app.exec_()
except:
exitValue = 1
finally:
sys.exit(exitValue)
| 35.407407
| 111
| 0.634999
|
74c0f2a906aeb860a8dac6766906e502df6e4d50
| 11,253
|
py
|
Python
|
nicos_mlz/refsans/devices/detector.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
nicos_mlz/refsans/devices/detector.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
nicos_mlz/refsans/devices/detector.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Enrico Faulhaber <enrico.faulhaber@frm2.tum.de>
#
# *****************************************************************************
"""Special device for Refsans Fast Detector (Comtec p7888)"""
import os
import numpy as np
from Detector import Detector # pylint: disable=import-error
from IO import Counter # pylint: disable=import-error
from nicos.core import INFO_CATEGORIES, LIVE, SIMULATION, Attach, Override, \
Param, Value, listof, oneof
from nicos.core.constants import POINT, SCAN
from nicos.core.data import DataSinkHandler
from nicos.devices.datasinks import ImageSink
from nicos.devices.generic.detector import ActiveChannel, \
CounterChannelMixin, PassiveChannel, TimerChannelMixin
from nicos.devices.taco.detector import BaseChannel as TacoBaseChannel
from nicos.utils import syncFile
class ComtecCounter(CounterChannelMixin, TacoBaseChannel, PassiveChannel):
taco_class = Counter
parameter_overrides = {
'type': Override(type=oneof('counter'), mandatory=False,
default='counter'),
'mode': Override(type=oneof('normal'), mandatory=False,
default='normal'),
'fmtstr': Override(default='%d'),
}
def doReadMode(self):
return 'normal'
def doWriteMode(self, value):
return 'normal'
def doReadIsmaster(self):
return False
def doWriteIsmaster(self, value):
return False
def valueInfo(self):
return Value(self.name, unit='cts', errors='sqrt',
type='counter', fmtstr='%d'),
class ComtecTimer(TimerChannelMixin, TacoBaseChannel, ActiveChannel):
taco_class = Detector
parameters = {
'binwidth': Param('Binning of timing channels', type=int,
settable=True, chatty=True),
'range': Param('Timing range', type=int, settable=True,
chatty=True),
'prefix': Param('Prefix of datafiles to be written', type=str,
settable=True, chatty=True),
'writelist': Param('Write listfile?', type=bool,
settable=True, chatty=True),
'autoinc': Param('Auto-increment prefix?', type=bool, settable=True,
chatty=True),
'autosave': Param('Auto-save?', type=bool, settable=True,
chatty=True),
}
def doRead(self, maxage=0):
return self._taco_guard(self._dev.read)[3] * 0.001
def doReadIsmaster(self):
return True
def doWriteIsmaster(self, value):
return True # is ALWAYS master
def doReadBinwidth(self):
return int(self._taco_guard(self._dev.deviceQueryResource,
'binwidth')[:-1])
def doWriteBinwidth(self, value):
self.doStop()
self._taco_update_resource('binwidth', '%dL' % value)
def doReadRange(self):
return int(self._taco_guard(self._dev.deviceQueryResource,
'range')[:-1])
def doWriteRange(self, value):
self.doStop()
self._taco_update_resource('range', '%dL' % value)
def doReadPrefix(self):
return self._taco_guard(self._dev.deviceQueryResource, 'prefix')
def doWritePrefix(self, value):
self.doStop()
self._taco_update_resource('prefix', str(value))
def doReadWritelist(self):
return self._taco_guard(self._dev.deviceQueryResource,
'writelist').lower() != 'off'
def doWriteWritelist(self, value):
self.doStop()
self._taco_update_resource('writelist', 'On' if value else 'Off')
def doReadAutoinc(self):
return self._taco_guard(self._dev.deviceQueryResource,
'autoinc').lower() != 'off'
def doWriteAutoinc(self, value):
self.doStop()
self._taco_update_resource('autoinc', 'On' if value else 'Off')
def doReadAutosave(self):
return self._taco_guard(self._dev.deviceQueryResource,
'autosave').lower() != 'off'
def doWriteAutosave(self, value):
self.doStop()
self._taco_update_resource('autosave', 'On' if value else 'Off')
def valueInfo(self):
return Value(self.name, unit='s', errors='next',
type='time', fmtstr='%d'),
class ComtecFilename(TacoBaseChannel, PassiveChannel):
taco_class = Detector
def doRead(self, maxage=0):
# How to obtain the part after the prefix???
return self._taco_guard(self._dev.deviceQueryResource, 'prefix')
def doReadIsmaster(self):
return False
def doWriteIsmaster(self, value):
return False # is NEVER master
def valueInfo(self):
return Value(self.name, unit='', errors='none',
type='filename', fmtstr='%s'),
class ComtecHeaderSinkHandler(DataSinkHandler):
_file = None
def prepare(self):
# obtain filenames /prefixes
# the first entry is normally used as the datafile.
# we use it for the prefix of the det.
# the other entries are normally 'just' the hardlinks to the datafile
# we use the first for the filename and the others for the links.
self.manager.assignCounter(self.dataset)
self.log.warning('tmpl:' + repr(self.sink.filenametemplate)) # XXX: rm
self.log.warning('subdir:' + repr(self.sink.subdir)) # XXX: rm
self.prefix, allfilepaths = self.manager.getFilenames(
self.dataset, self.sink.filenametemplate, self.sink.subdir)
self.log.warning('allpaths:' + repr(self.allfilepaths)) # XXX: rm
self.linkpaths = allfilepaths[1:]
self.log.warning('linkpaths:' + repr(self.linkpaths)) # XXX: rm
# set prefix on tacodevice
self.sink._attached_detector.prefix = self.prefix
self._arraydesc = self.detector.arrayInfo()[0]
def putResults(self, quality, results):
# write headerfile
if quality == LIVE:
return
if self.detector.name in results:
result = results[self.detector.name]
if result is None:
return
image = result[1][0]
self.log.debug("results: %r", results)
if not self.linkpaths: # XXX: rm
self.log.warning('no linkpaths set, NOT saving header')
return
self._file = self.manager.createDataFile(
self.dataset, [self.linkpaths[0] + self.prefix + '.header'],
self.sink.subdir)
self.writeHeader(self._file, self.dataset.metainfo, image)
def writeHeader(self, fp, metainfo, image):
fp.seek(0)
fp.write(np.asarray(image).tobytes())
fp.write('\n### NICOS Raw File Header V2.0\n')
fp.write('# detector prefix is %r' % self.prefix)
bycategory = {}
for (device, key), (_, val, unit, category) in metainfo.items():
if category:
bycategory.setdefault(category, []).append(
('%s_%s' % (device.name, key), (val + ' ' + unit).strip()))
for category, catname in INFO_CATEGORIES:
if category not in bycategory:
continue
fp.write('### %s\n' % catname)
for key, value in sorted(bycategory[category]):
fp.write('%25s : %s\n' % (key, value))
# to ease interpreting the data...
fp.write('\n%r\n' % self._arraydesc)
fp.flush()
def end(self):
if self._file:
self._file.close()
self._file = None
syncFile(self._file)
# datenfile aus dateisystem fieseln und kopieren. als self.linkpaths[0]
# pattern is:
# \home\pc\data2\A_username_JJJJ_MM\username_JJJJ_MM-xxx-A1-yyy.lst
# \home\pc\data3\B_username_JJJJ_MM\username_JJJJ_MM-xxx-B1-yyy.cfg
# \home\pc\data3\B_username_JJJJ_MM\username_JJJJ_MM-xxx-B1-yyy.lst
# where A1= A1...A8 and B1=B1..B8 xxx is local scancounter
# idea: treat \home\pc\data as mount_point and _username_JJJJ_MM
# \username_JJJJ_MM-xxx- as prefix
# srcfiles = '/home/pc/data2/A_' + self.prefix + '-A%d-%03d.lst'
# strategy: scan mountpoint for files containing prefix in their name
for basepath in self.sink.fast_basepaths:
for dirpath, _, filenames in os.walk(basepath):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
if self.prefix in filepath:
self.log.info('found matching datafile: %r' % filepath)
# Gotcha!
# dstfilename = self.linkpaths[0] + filename
# copy file
# shutil.copyfile(filepath, dstfilename)
# XXX: break?
# link files
# self.linkpaths constains the target file name and the names of the
# links as a list
# self.manager.linkFiles(self.linkpaths[0], self.linkpaths[1:])
COMTEC_TEMPLATES = [
'_%(session.experiment.users)s_%(year)04d_%(month)02d/'
'%(session.experiment.users)s_%(year)04d_%(month)02d_%(scancounter)03d_',
'%(proposal)s_%(scancounter)s_%(pointcounter)s_%(pointnumber)s_',
]
class ComtecHeaderSink(ImageSink):
"""Base class for sinks that save arrays to "image" files."""
attached_devices = {
'detector': Attach('Fast Detector', ComtecTimer),
}
parameters = {
'fast_basepaths': Param('Mount point(s) of the fast data storage',
type=listof(str), default=['/'],
settable=False),
}
parameter_overrides = {
'settypes': Override(default=[POINT, SCAN]),
'filenametemplate': Override(mandatory=False, settable=False,
userparam=False,
default=COMTEC_TEMPLATES),
'subdir': Override(default='comtec'),
}
handlerclass = ComtecHeaderSinkHandler
def doInit(self, mode):
if mode != SIMULATION:
# XXX: check existence of 'fast_basepath'
pass
| 38.016892
| 79
| 0.598329
|
a33644a4ce2053d0cacb358ad3900daef51fb213
| 9,013
|
py
|
Python
|
tasks/bert/datasets/bert_formatting.py
|
epfml/relaysgd
|
536f809f2a5fed5f5004b3f49857d67462ac89d2
|
[
"MIT"
] | 3
|
2021-10-31T21:00:36.000Z
|
2022-03-03T13:04:16.000Z
|
tasks/bert/datasets/bert_formatting.py
|
epfml/relaysgd
|
536f809f2a5fed5f5004b3f49857d67462ac89d2
|
[
"MIT"
] | null | null | null |
tasks/bert/datasets/bert_formatting.py
|
epfml/relaysgd
|
536f809f2a5fed5f5004b3f49857d67462ac89d2
|
[
"MIT"
] | 2
|
2022-02-19T05:25:09.000Z
|
2022-03-17T15:41:14.000Z
|
# -*- coding: utf-8 -*-
import json
import copy
from tqdm import tqdm
class BertInputFeature(object):
def __init__(self, uid, input_ids, attention_mask, token_type_ids, label):
self.uid = uid
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.gold = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
return copy.deepcopy(self.__dict__)
def to_json_string(self):
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def glue_example_to_feature(
task,
examples,
tokenizer,
max_seq_len,
label_list,
pad_token=0,
pad_token_segment_id=0,
):
"""
task: the name of one of the glue tasks, e.g., mrpc.
examples: raw examples, e.g., common.SentenceExamples.
tokenizer: BERT/ROBERTA tokenizer.
max_seq_len: maximum sequence length of the __word pieces__.
label_list: list of __the type__ of gold labels, e.g., [0, 1].
mzhao: I made following __default__ options to avoid useless stuff:
(i) pad the sequence from right.
(ii) attention masking:
1 -> real tokens
0 -> [PAD]
(iii) i skip the only one regression task in glue sts-b.
"""
assert pad_token == pad_token_segment_id == 0
print(
"[INFO]: using following label set for task {} : {}.".format(task, label_list)
)
# associate each label with an index
label_map = {l: i for i, l in enumerate(label_list)}
features = []
print("[INFO] *** Convert Example to Features ***")
for idx, eg in enumerate(tqdm(examples)):
# inputs:
# input_ids: list[int],
# token_type_ids: list[int] if return_token_type_ids is True (default)
# attention_mask: list[int] if return_attention_mask is True (default)
# overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True
# num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True
# special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True
# NOTE: [SEP] belongs to text_a
if not hasattr(eg, "text_b"): # just for now
setattr(eg, "text_b", None)
inputs = tokenizer.encode_plus(
eg.text_a, eg.text_b, add_special_tokens=True, max_length=max_seq_len
)
# these stuff are not padded
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
attention_mask = [1] * len(input_ids)
# pad everything to max_seq_len
padding_len = max_seq_len - len(input_ids)
input_ids = input_ids + [pad_token] * padding_len
attention_mask = attention_mask + [0] * padding_len
token_type_ids = token_type_ids + [pad_token_segment_id] * padding_len
assert (
len(input_ids) == len(attention_mask) == len(token_type_ids) == max_seq_len
), "{} - {} - {}".format(
len(input_ids), len(attention_mask), len(token_type_ids)
)
if idx < 2:
print()
print("[DEBUG] *** Example Entries in Dataset ***")
print("[DEBUG] uid: {}".format(eg.uid))
print("[DEBUG] input_ids: {}".format(" ".join([str(x) for x in input_ids])))
print(
"[DEBUG] attention_mask: {}".format(
" ".join([str(x) for x in attention_mask])
)
)
print(
"[DEBUG] token_type_ids: {}".format(
" ".join([str(x) for x in token_type_ids])
)
)
features.append(
BertInputFeature(
uid=eg.uid,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label_map[eg.label],
)
)
return features
# for POS, the long sentences are few -- when setting msl=128:
# trn sents: [201, 135, 187, 203, 408, 133, 132, 148, 138, 133, 180, 139, 140, 130, 185]
# val sents: [131], tst: [173, 166]. so I will just ignore them.
""" for POS tagging on PTB, I use the last wordpiece to represent the word """
class TaggingBertInputFeature(BertInputFeature):
def __init__(self, uid, input_ids, attention_mask, sent_if_tgt, tags_ids):
super(TaggingBertInputFeature, self).__init__(
uid, input_ids, attention_mask, None, None
)
self.sent_if_tgt = sent_if_tgt
self.tags_ids = tags_ids
# following tags are not considered in tagging on PTB
_skipped_tags = {"-NONE-", "NFP", "AFX"}
def tagging_example_to_feature(which_split, tagged_sents, tokenizer, t2i, msl):
all_fts, toolongs = [], []
for sent_idx, sent in enumerate(tqdm(tagged_sents)):
sent_pieces, sent_piece_tags, sent_if_tgt = [], [], []
for word, tag in sent:
word_pieces = tokenizer.tokenize(word)
piece_tags = ["<PAD>"] * (len(word_pieces) - 1) + [tag]
if tag in _skipped_tags:
piece_if_tgt = [0] * (len(word_pieces) - 1) + [0]
else:
piece_if_tgt = [0] * (len(word_pieces) - 1) + [1]
sent_pieces.extend(word_pieces)
sent_piece_tags.extend(piece_tags)
sent_if_tgt.extend(piece_if_tgt)
if len(sent_pieces) > msl - 2:
# print(sent_pieces)
print("{} > {} in {} ...".format(len(sent_pieces), msl - 2, which_split))
toolongs.append(len(sent_pieces))
sent_pieces, sent_piece_tags, sent_if_tgt = map(
lambda x: x[: (msl - 2)], [sent_pieces, sent_piece_tags, sent_if_tgt]
)
sent_pieces = ["[CLS]"] + sent_pieces + ["[SEP]"]
sent_piece_tags = ["<PAD>"] + sent_piece_tags + ["<PAD>"]
sent_if_tgt = [0] + sent_if_tgt + [0]
bert_inp_ids = tokenizer.convert_tokens_to_ids(sent_pieces)
bert_inp_mask = [1] * len(bert_inp_ids)
tags_ids = [t2i[tag] for tag in sent_piece_tags]
assert len(sent_pieces) == len(sent_if_tgt) == len(tags_ids)
while len(bert_inp_ids) < msl:
bert_inp_ids.append(0)
bert_inp_mask.append(0)
sent_if_tgt.append(0)
tags_ids.append(t2i["<PAD>"])
all_fts.append(
TaggingBertInputFeature(
uid="{}-{}".format(which_split, sent_idx),
input_ids=bert_inp_ids,
attention_mask=bert_inp_mask,
sent_if_tgt=sent_if_tgt,
tags_ids=tags_ids,
)
)
print("[WARN]: {} sentences longer than msl ...".format(len(toolongs)))
return all_fts
# formatting reading comprehention dataset with the bert format
class MultipleChoiceBertInputFeature(object):
def __init__(self, uid, composed, label):
self.uid = uid
self.label = label
self.composed = [
{
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
for input_ids, attention_mask, token_type_ids in composed
]
def multiplechoice_example_to_feature(
examples, tokenizer, max_seq_len, pad_token=0, pad_token_segment_id=0
):
assert pad_token == pad_token_segment_id == 0
features = []
print("[INFO] *** Convert Example to Features ***")
for idx, eg in enumerate(tqdm(examples)):
composed = []
for choice in eg.choices:
text_a = eg.context
text_b = eg.start_choice + " " + choice
inputs = tokenizer.encode_plus(
text_a, text_b, add_special_tokens=True, max_length=max_seq_len
)
# these stuff are not padded -- one can add set pad_to_max_length=True
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
attention_mask = [1] * len(input_ids)
# pad everything to max_seq_len
padding_len = max_seq_len - len(input_ids)
input_ids = input_ids + [pad_token] * padding_len
attention_mask = attention_mask + [0] * padding_len
token_type_ids = token_type_ids + [pad_token_segment_id] * padding_len
assert (
len(input_ids)
== len(attention_mask)
== len(token_type_ids)
== max_seq_len
), "{} - {} - {}".format(
len(input_ids), len(attention_mask), len(token_type_ids)
)
composed.append((input_ids, attention_mask, token_type_ids))
assert len(composed) == eg.num_choices
features.append(
MultipleChoiceBertInputFeature(
uid=eg.uid, composed=composed, label=eg.label
)
)
return features
| 37.090535
| 124
| 0.593698
|
d8123081e5341d449efcf969c86dc7a5914a4de7
| 850
|
py
|
Python
|
tests/test_metrics/test_metrics_utils.py
|
Pacman1984/etna
|
9b3ccb980e576d56858f14aca2e06ce2957b0fa9
|
[
"Apache-2.0"
] | 326
|
2021-11-18T15:30:50.000Z
|
2022-03-31T09:44:15.000Z
|
tests/test_metrics/test_metrics_utils.py
|
Pacman1984/etna
|
9b3ccb980e576d56858f14aca2e06ce2957b0fa9
|
[
"Apache-2.0"
] | 305
|
2021-11-17T10:28:31.000Z
|
2022-03-31T18:05:03.000Z
|
tests/test_metrics/test_metrics_utils.py
|
Pacman1984/etna
|
9b3ccb980e576d56858f14aca2e06ce2957b0fa9
|
[
"Apache-2.0"
] | 29
|
2021-11-21T12:10:48.000Z
|
2022-03-31T22:55:06.000Z
|
from typing import Tuple
import numpy as np
from etna.datasets import TSDataset
from etna.metrics import MAE
from etna.metrics import MAPE
from etna.metrics import MSE
from etna.metrics.utils import compute_metrics
def test_compute_metrics(train_test_dfs: Tuple[TSDataset, TSDataset]):
"""Check that compute_metrics return correct metrics keys."""
forecast_df, true_df = train_test_dfs
metrics = [MAE("per-segment"), MAE(mode="macro"), MSE("per-segment"), MAPE(mode="macro", eps=1e-5)]
expected_keys = [
"MAE(mode = 'per-segment', )",
"MAE(mode = 'macro', )",
"MSE(mode = 'per-segment', )",
"MAPE(mode = 'macro', eps = 1e-05, )",
]
result = compute_metrics(metrics=metrics, y_true=true_df, y_pred=forecast_df)
np.testing.assert_array_equal(sorted(expected_keys), sorted(result.keys()))
| 35.416667
| 103
| 0.695294
|
7b71a5c0be1dbe033f1332a3e1c71b3fcf644e48
| 31,588
|
py
|
Python
|
webpage/lib/python3.5/site-packages/scipy/spatial/tests/test_qhull.py
|
pseudoPixels/SourceFlow
|
e1738c8b838c71b18598ceca29d7c487c76f876b
|
[
"MIT"
] | 5
|
2017-03-21T13:04:12.000Z
|
2021-07-28T12:38:02.000Z
|
PokerBots_2017/Johnny/scipy/spatial/tests/test_qhull.py
|
surgebiswas/poker
|
019112147a3e6c208c3846ef699fb6ec24a45c30
|
[
"MIT"
] | null | null | null |
PokerBots_2017/Johnny/scipy/spatial/tests/test_qhull.py
|
surgebiswas/poker
|
019112147a3e6c208c3846ef699fb6ec24a45c30
|
[
"MIT"
] | null | null | null |
from __future__ import division, print_function, absolute_import
import os
import copy
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal, run_module_suite,
assert_, dec, assert_allclose, assert_array_equal,
assert_raises)
from scipy._lib.six import xrange
import scipy.spatial.qhull as qhull
from scipy.spatial import cKDTree as KDTree
def sorted_tuple(x):
return tuple(sorted(x))
def sorted_unique_tuple(x):
return tuple(np.unique(x))
def assert_unordered_tuple_list_equal(a, b, tpl=tuple):
if isinstance(a, np.ndarray):
a = a.tolist()
if isinstance(b, np.ndarray):
b = b.tolist()
a = list(map(tpl, a))
a.sort()
b = list(map(tpl, b))
b.sort()
assert_equal(a, b)
np.random.seed(1234)
points = [(0,0), (0,1), (1,0), (1,1), (0.5, 0.5), (0.5, 1.5)]
pathological_data_1 = np.array([
[-3.14,-3.14], [-3.14,-2.36], [-3.14,-1.57], [-3.14,-0.79],
[-3.14,0.0], [-3.14,0.79], [-3.14,1.57], [-3.14,2.36],
[-3.14,3.14], [-2.36,-3.14], [-2.36,-2.36], [-2.36,-1.57],
[-2.36,-0.79], [-2.36,0.0], [-2.36,0.79], [-2.36,1.57],
[-2.36,2.36], [-2.36,3.14], [-1.57,-0.79], [-1.57,0.79],
[-1.57,-1.57], [-1.57,0.0], [-1.57,1.57], [-1.57,-3.14],
[-1.57,-2.36], [-1.57,2.36], [-1.57,3.14], [-0.79,-1.57],
[-0.79,1.57], [-0.79,-3.14], [-0.79,-2.36], [-0.79,-0.79],
[-0.79,0.0], [-0.79,0.79], [-0.79,2.36], [-0.79,3.14],
[0.0,-3.14], [0.0,-2.36], [0.0,-1.57], [0.0,-0.79], [0.0,0.0],
[0.0,0.79], [0.0,1.57], [0.0,2.36], [0.0,3.14], [0.79,-3.14],
[0.79,-2.36], [0.79,-0.79], [0.79,0.0], [0.79,0.79],
[0.79,2.36], [0.79,3.14], [0.79,-1.57], [0.79,1.57],
[1.57,-3.14], [1.57,-2.36], [1.57,2.36], [1.57,3.14],
[1.57,-1.57], [1.57,0.0], [1.57,1.57], [1.57,-0.79],
[1.57,0.79], [2.36,-3.14], [2.36,-2.36], [2.36,-1.57],
[2.36,-0.79], [2.36,0.0], [2.36,0.79], [2.36,1.57],
[2.36,2.36], [2.36,3.14], [3.14,-3.14], [3.14,-2.36],
[3.14,-1.57], [3.14,-0.79], [3.14,0.0], [3.14,0.79],
[3.14,1.57], [3.14,2.36], [3.14,3.14],
])
pathological_data_2 = np.array([
[-1, -1], [-1, 0], [-1, 1],
[0, -1], [0, 0], [0, 1],
[1, -1 - np.finfo(np.float_).eps], [1, 0], [1, 1],
])
bug_2850_chunks = [np.random.rand(10, 2),
np.array([[0,0], [0,1], [1,0], [1,1]]) # add corners
]
# same with some additional chunks
bug_2850_chunks_2 = (bug_2850_chunks +
[np.random.rand(10, 2),
0.25 + np.array([[0,0], [0,1], [1,0], [1,1]])])
DATASETS = {
'some-points': np.asarray(points),
'random-2d': np.random.rand(30, 2),
'random-3d': np.random.rand(30, 3),
'random-4d': np.random.rand(30, 4),
'random-5d': np.random.rand(30, 5),
'random-6d': np.random.rand(10, 6),
'random-7d': np.random.rand(10, 7),
'random-8d': np.random.rand(10, 8),
'pathological-1': pathological_data_1,
'pathological-2': pathological_data_2
}
INCREMENTAL_DATASETS = {
'bug-2850': (bug_2850_chunks, None),
'bug-2850-2': (bug_2850_chunks_2, None),
}
def _add_inc_data(name, chunksize):
"""
Generate incremental datasets from basic data sets
"""
points = DATASETS[name]
ndim = points.shape[1]
opts = None
nmin = ndim + 2
if name == 'some-points':
# since Qz is not allowed, use QJ
opts = 'QJ Pp'
elif name == 'pathological-1':
# include enough points so that we get different x-coordinates
nmin = 12
chunks = [points[:nmin]]
for j in xrange(nmin, len(points), chunksize):
chunks.append(points[j:j+chunksize])
new_name = "%s-chunk-%d" % (name, chunksize)
assert new_name not in INCREMENTAL_DATASETS
INCREMENTAL_DATASETS[new_name] = (chunks, opts)
for name in DATASETS:
for chunksize in 1, 4, 16:
_add_inc_data(name, chunksize)
class Test_Qhull(object):
def test_swapping(self):
# Check that Qhull state swapping works
x = qhull._Qhull(b'v',
np.array([[0,0],[0,1],[1,0],[1,1.],[0.5,0.5]]),
b'Qz')
xd = copy.deepcopy(x.get_voronoi_diagram())
y = qhull._Qhull(b'v',
np.array([[0,0],[0,1],[1,0],[1,2.]]),
b'Qz')
yd = copy.deepcopy(y.get_voronoi_diagram())
xd2 = copy.deepcopy(x.get_voronoi_diagram())
x.close()
yd2 = copy.deepcopy(y.get_voronoi_diagram())
y.close()
assert_raises(RuntimeError, x.get_voronoi_diagram)
assert_raises(RuntimeError, y.get_voronoi_diagram)
assert_allclose(xd[0], xd2[0])
assert_unordered_tuple_list_equal(xd[1], xd2[1], tpl=sorted_tuple)
assert_unordered_tuple_list_equal(xd[2], xd2[2], tpl=sorted_tuple)
assert_unordered_tuple_list_equal(xd[3], xd2[3], tpl=sorted_tuple)
assert_array_equal(xd[4], xd2[4])
assert_allclose(yd[0], yd2[0])
assert_unordered_tuple_list_equal(yd[1], yd2[1], tpl=sorted_tuple)
assert_unordered_tuple_list_equal(yd[2], yd2[2], tpl=sorted_tuple)
assert_unordered_tuple_list_equal(yd[3], yd2[3], tpl=sorted_tuple)
assert_array_equal(yd[4], yd2[4])
x.close()
assert_raises(RuntimeError, x.get_voronoi_diagram)
y.close()
assert_raises(RuntimeError, y.get_voronoi_diagram)
class TestUtilities(object):
"""
Check that utility functions work.
"""
def test_find_simplex(self):
# Simple check that simplex finding works
points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
tri = qhull.Delaunay(points)
# +---+
# |\ 0|
# | \ |
# |1 \|
# +---+
assert_equal(tri.vertices, [[1, 3, 2], [3, 1, 0]])
for p in [(0.25, 0.25, 1),
(0.75, 0.75, 0),
(0.3, 0.2, 1)]:
i = tri.find_simplex(p[:2])
assert_equal(i, p[2], err_msg='%r' % (p,))
j = qhull.tsearch(tri, p[:2])
assert_equal(i, j)
def test_plane_distance(self):
# Compare plane distance from hyperplane equations obtained from Qhull
# to manually computed plane equations
x = np.array([(0,0), (1, 1), (1, 0), (0.99189033, 0.37674127),
(0.99440079, 0.45182168)], dtype=np.double)
p = np.array([0.99966555, 0.15685619], dtype=np.double)
tri = qhull.Delaunay(x)
z = tri.lift_points(x)
pz = tri.lift_points(p)
dist = tri.plane_distance(p)
for j, v in enumerate(tri.vertices):
x1 = z[v[0]]
x2 = z[v[1]]
x3 = z[v[2]]
n = np.cross(x1 - x3, x2 - x3)
n /= np.sqrt(np.dot(n, n))
n *= -np.sign(n[2])
d = np.dot(n, pz - x3)
assert_almost_equal(dist[j], d)
def test_convex_hull(self):
# Simple check that the convex hull seems to works
points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
tri = qhull.Delaunay(points)
# +---+
# |\ 0|
# | \ |
# |1 \|
# +---+
assert_equal(tri.convex_hull, [[3, 2], [1, 2], [1, 0], [3, 0]])
def test_volume_area(self):
#Basic check that we get back the correct volume and area for a cube
points = np.array([(0, 0, 0), (0, 1, 0), (1, 0, 0), (1, 1, 0),
(0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 1, 1)])
hull = qhull.ConvexHull(points)
assert_allclose(hull.volume, 1., rtol=1e-14,
err_msg="Volume of cube is incorrect")
assert_allclose(hull.area, 6., rtol=1e-14,
err_msg="Area of cube is incorrect")
def test_random_volume_area(self):
#Test that the results for a random 10-point convex are
#coherent with the output of qconvex Qt s FA
points = np.array([(0.362568364506, 0.472712355305, 0.347003084477),
(0.733731893414, 0.634480295684, 0.950513180209),
(0.511239955611, 0.876839441267, 0.418047827863),
(0.0765906233393, 0.527373281342, 0.6509863541),
(0.146694972056, 0.596725793348, 0.894860986685),
(0.513808585741, 0.069576205858, 0.530890338876),
(0.512343805118, 0.663537132612, 0.037689295973),
(0.47282965018, 0.462176697655, 0.14061843691),
(0.240584597123, 0.778660020591, 0.722913476339),
(0.951271745935, 0.967000673944, 0.890661319684)])
hull = qhull.ConvexHull(points)
assert_allclose(hull.volume, 0.14562013, rtol=1e-07,
err_msg="Volume of random polyhedron is incorrect")
assert_allclose(hull.area, 1.6670425, rtol=1e-07,
err_msg="Area of random polyhedron is incorrect")
def _check_barycentric_transforms(self, tri, err_msg="",
unit_cube=False,
unit_cube_tol=0):
"""Check that a triangulation has reasonable barycentric transforms"""
vertices = tri.points[tri.vertices]
sc = 1/(tri.ndim + 1.0)
centroids = vertices.sum(axis=1) * sc
# Either: (i) the simplex has a `nan` barycentric transform,
# or, (ii) the centroid is in the simplex
def barycentric_transform(tr, x):
ndim = tr.shape[1]
r = tr[:,-1,:]
Tinv = tr[:,:-1,:]
return np.einsum('ijk,ik->ij', Tinv, x - r)
eps = np.finfo(float).eps
c = barycentric_transform(tri.transform, centroids)
olderr = np.seterr(invalid="ignore")
try:
ok = np.isnan(c).all(axis=1) | (abs(c - sc)/sc < 0.1).all(axis=1)
finally:
np.seterr(**olderr)
assert_(ok.all(), "%s %s" % (err_msg, np.where(~ok)))
# Invalid simplices must be (nearly) zero volume
q = vertices[:,:-1,:] - vertices[:,-1,None,:]
volume = np.array([np.linalg.det(q[k,:,:])
for k in range(tri.nsimplex)])
ok = np.isfinite(tri.transform[:,0,0]) | (volume < np.sqrt(eps))
assert_(ok.all(), "%s %s" % (err_msg, np.where(~ok)))
# Also, find_simplex for the centroid should end up in some
# simplex for the non-degenerate cases
j = tri.find_simplex(centroids)
ok = (j != -1) | np.isnan(tri.transform[:,0,0])
assert_(ok.all(), "%s %s" % (err_msg, np.where(~ok)))
if unit_cube:
# If in unit cube, no interior point should be marked out of hull
at_boundary = (centroids <= unit_cube_tol).any(axis=1)
at_boundary |= (centroids >= 1 - unit_cube_tol).any(axis=1)
ok = (j != -1) | at_boundary
assert_(ok.all(), "%s %s" % (err_msg, np.where(~ok)))
def test_degenerate_barycentric_transforms(self):
# The triangulation should not produce invalid barycentric
# transforms that stump the simplex finding
data = np.load(os.path.join(os.path.dirname(__file__), 'data',
'degenerate_pointset.npz'))
points = data['c']
data.close()
tri = qhull.Delaunay(points)
# Check that there are not too many invalid simplices
bad_count = np.isnan(tri.transform[:,0,0]).sum()
assert_(bad_count < 20, bad_count)
# Check the transforms
self._check_barycentric_transforms(tri)
@dec.slow
def test_more_barycentric_transforms(self):
# Triangulate some "nasty" grids
eps = np.finfo(float).eps
npoints = {2: 70, 3: 11, 4: 5, 5: 3}
_is_32bit_platform = np.intp(0).itemsize < 8
for ndim in xrange(2, 6):
# Generate an uniform grid in n-d unit cube
x = np.linspace(0, 1, npoints[ndim])
grid = np.c_[list(map(np.ravel, np.broadcast_arrays(*np.ix_(*([x]*ndim)))))].T
err_msg = "ndim=%d" % ndim
# Check using regular grid
tri = qhull.Delaunay(grid)
self._check_barycentric_transforms(tri, err_msg=err_msg,
unit_cube=True)
# Check with eps-perturbations
np.random.seed(1234)
m = (np.random.rand(grid.shape[0]) < 0.2)
grid[m,:] += 2*eps*(np.random.rand(*grid[m,:].shape) - 0.5)
tri = qhull.Delaunay(grid)
self._check_barycentric_transforms(tri, err_msg=err_msg,
unit_cube=True,
unit_cube_tol=2*eps)
# Check with duplicated data
tri = qhull.Delaunay(np.r_[grid, grid])
self._check_barycentric_transforms(tri, err_msg=err_msg,
unit_cube=True,
unit_cube_tol=2*eps)
if not _is_32bit_platform:
# test numerically unstable, and reported to fail on 32-bit
# installs
# Check with larger perturbations
np.random.seed(4321)
m = (np.random.rand(grid.shape[0]) < 0.2)
grid[m,:] += 1000*eps*(np.random.rand(*grid[m,:].shape) - 0.5)
tri = qhull.Delaunay(grid)
self._check_barycentric_transforms(tri, err_msg=err_msg,
unit_cube=True,
unit_cube_tol=1500*eps)
# Check with yet larger perturbations
np.random.seed(4321)
m = (np.random.rand(grid.shape[0]) < 0.2)
grid[m,:] += 1e6*eps*(np.random.rand(*grid[m,:].shape) - 0.5)
tri = qhull.Delaunay(grid)
self._check_barycentric_transforms(tri, err_msg=err_msg,
unit_cube=True,
unit_cube_tol=1e7*eps)
class TestVertexNeighborVertices(object):
def _check(self, tri):
expected = [set() for j in range(tri.points.shape[0])]
for s in tri.simplices:
for a in s:
for b in s:
if a != b:
expected[a].add(b)
indices, indptr = tri.vertex_neighbor_vertices
got = []
for j in range(tri.points.shape[0]):
got.append(set(map(int, indptr[indices[j]:indices[j+1]])))
assert_equal(got, expected, err_msg="%r != %r" % (got, expected))
def test_triangle(self):
points = np.array([(0,0), (0,1), (1,0)], dtype=np.double)
tri = qhull.Delaunay(points)
self._check(tri)
def test_rectangle(self):
points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
tri = qhull.Delaunay(points)
self._check(tri)
def test_complicated(self):
points = np.array([(0,0), (0,1), (1,1), (1,0),
(0.5, 0.5), (0.9, 0.5)], dtype=np.double)
tri = qhull.Delaunay(points)
self._check(tri)
class TestDelaunay(object):
"""
Check that triangulation works.
"""
def test_masked_array_fails(self):
masked_array = np.ma.masked_all(1)
assert_raises(ValueError, qhull.Delaunay, masked_array)
def test_array_with_nans_fails(self):
points_with_nan = np.array([(0,0), (0,1), (1,1), (1,np.nan)], dtype=np.double)
assert_raises(ValueError, qhull.Delaunay, points_with_nan)
def test_nd_simplex(self):
# simple smoke test: triangulate a n-dimensional simplex
for nd in xrange(2, 8):
points = np.zeros((nd+1, nd))
for j in xrange(nd):
points[j,j] = 1.0
points[-1,:] = 1.0
tri = qhull.Delaunay(points)
tri.vertices.sort()
assert_equal(tri.vertices, np.arange(nd+1, dtype=int)[None,:])
assert_equal(tri.neighbors, -1 + np.zeros((nd+1), dtype=int)[None,:])
def test_2d_square(self):
# simple smoke test: 2d square
points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
tri = qhull.Delaunay(points)
assert_equal(tri.vertices, [[1, 3, 2], [3, 1, 0]])
assert_equal(tri.neighbors, [[-1, -1, 1], [-1, -1, 0]])
def test_duplicate_points(self):
x = np.array([0, 1, 0, 1], dtype=np.float64)
y = np.array([0, 0, 1, 1], dtype=np.float64)
xp = np.r_[x, x]
yp = np.r_[y, y]
# shouldn't fail on duplicate points
tri = qhull.Delaunay(np.c_[x, y])
tri2 = qhull.Delaunay(np.c_[xp, yp])
def test_pathological(self):
# both should succeed
points = DATASETS['pathological-1']
tri = qhull.Delaunay(points)
assert_equal(tri.points[tri.vertices].max(), points.max())
assert_equal(tri.points[tri.vertices].min(), points.min())
points = DATASETS['pathological-2']
tri = qhull.Delaunay(points)
assert_equal(tri.points[tri.vertices].max(), points.max())
assert_equal(tri.points[tri.vertices].min(), points.min())
def test_joggle(self):
# Check that the option QJ indeed guarantees that all input points
# occur as vertices of the triangulation
points = np.random.rand(10, 2)
points = np.r_[points, points] # duplicate input data
tri = qhull.Delaunay(points, qhull_options="QJ Qbb Pp")
assert_array_equal(np.unique(tri.simplices.ravel()),
np.arange(len(points)))
def test_coplanar(self):
# Check that the coplanar point output option indeed works
points = np.random.rand(10, 2)
points = np.r_[points, points] # duplicate input data
tri = qhull.Delaunay(points)
assert_(len(np.unique(tri.simplices.ravel())) == len(points)//2)
assert_(len(tri.coplanar) == len(points)//2)
assert_(len(np.unique(tri.coplanar[:,2])) == len(points)//2)
assert_(np.all(tri.vertex_to_simplex >= 0))
def test_furthest_site(self):
points = [(0, 0), (0, 1), (1, 0), (0.5, 0.5), (1.1, 1.1)]
tri = qhull.Delaunay(points, furthest_site=True)
expected = np.array([(1, 4, 0), (4, 2, 0)]) # from Qhull
assert_array_equal(tri.simplices, expected)
def test_incremental(self):
# Test incremental construction of the triangulation
def check(name):
chunks, opts = INCREMENTAL_DATASETS[name]
points = np.concatenate(chunks, axis=0)
obj = qhull.Delaunay(chunks[0], incremental=True,
qhull_options=opts)
for chunk in chunks[1:]:
obj.add_points(chunk)
obj2 = qhull.Delaunay(points)
obj3 = qhull.Delaunay(chunks[0], incremental=True,
qhull_options=opts)
if len(chunks) > 1:
obj3.add_points(np.concatenate(chunks[1:], axis=0),
restart=True)
# Check that the incremental mode agrees with upfront mode
if name.startswith('pathological'):
# XXX: These produce valid but different triangulations.
# They look OK when plotted, but how to check them?
assert_array_equal(np.unique(obj.simplices.ravel()),
np.arange(points.shape[0]))
assert_array_equal(np.unique(obj2.simplices.ravel()),
np.arange(points.shape[0]))
else:
assert_unordered_tuple_list_equal(obj.simplices, obj2.simplices,
tpl=sorted_tuple)
assert_unordered_tuple_list_equal(obj2.simplices, obj3.simplices,
tpl=sorted_tuple)
for name in sorted(INCREMENTAL_DATASETS):
yield check, name
def assert_hulls_equal(points, facets_1, facets_2):
# Check that two convex hulls constructed from the same point set
# are equal
facets_1 = set(map(sorted_tuple, facets_1))
facets_2 = set(map(sorted_tuple, facets_2))
if facets_1 != facets_2 and points.shape[1] == 2:
# The direct check fails for the pathological cases
# --- then the convex hull from Delaunay differs (due
# to rounding error etc.) from the hull computed
# otherwise, by the question whether (tricoplanar)
# points that lie almost exactly on the hull are
# included as vertices of the hull or not.
#
# So we check the result, and accept it if the Delaunay
# hull line segments are a subset of the usual hull.
eps = 1000 * np.finfo(float).eps
for a, b in facets_1:
for ap, bp in facets_2:
t = points[bp] - points[ap]
t /= np.linalg.norm(t) # tangent
n = np.array([-t[1], t[0]]) # normal
# check that the two line segments are parallel
# to the same line
c1 = np.dot(n, points[b] - points[ap])
c2 = np.dot(n, points[a] - points[ap])
if not np.allclose(np.dot(c1, n), 0):
continue
if not np.allclose(np.dot(c2, n), 0):
continue
# Check that the segment (a, b) is contained in (ap, bp)
c1 = np.dot(t, points[a] - points[ap])
c2 = np.dot(t, points[b] - points[ap])
c3 = np.dot(t, points[bp] - points[ap])
if c1 < -eps or c1 > c3 + eps:
continue
if c2 < -eps or c2 > c3 + eps:
continue
# OK:
break
else:
raise AssertionError("comparison fails")
# it was OK
return
assert_equal(facets_1, facets_2)
class TestConvexHull:
def test_masked_array_fails(self):
masked_array = np.ma.masked_all(1)
assert_raises(ValueError, qhull.ConvexHull, masked_array)
def test_array_with_nans_fails(self):
points_with_nan = np.array([(0,0), (1,1), (2,np.nan)], dtype=np.double)
assert_raises(ValueError, qhull.ConvexHull, points_with_nan)
def test_hull_consistency_tri(self):
# Check that a convex hull returned by qhull in ndim
# and the hull constructed from ndim delaunay agree
def check(name):
points = DATASETS[name]
tri = qhull.Delaunay(points)
hull = qhull.ConvexHull(points)
assert_hulls_equal(points, tri.convex_hull, hull.simplices)
# Check that the hull extremes are as expected
if points.shape[1] == 2:
assert_equal(np.unique(hull.simplices), np.sort(hull.vertices))
else:
assert_equal(np.unique(hull.simplices), hull.vertices)
for name in sorted(DATASETS):
yield check, name
def test_incremental(self):
# Test incremental construction of the convex hull
def check(name):
chunks, _ = INCREMENTAL_DATASETS[name]
points = np.concatenate(chunks, axis=0)
obj = qhull.ConvexHull(chunks[0], incremental=True)
for chunk in chunks[1:]:
obj.add_points(chunk)
obj2 = qhull.ConvexHull(points)
obj3 = qhull.ConvexHull(chunks[0], incremental=True)
if len(chunks) > 1:
obj3.add_points(np.concatenate(chunks[1:], axis=0),
restart=True)
# Check that the incremental mode agrees with upfront mode
assert_hulls_equal(points, obj.simplices, obj2.simplices)
assert_hulls_equal(points, obj.simplices, obj3.simplices)
for name in sorted(INCREMENTAL_DATASETS):
yield check, name
def test_vertices_2d(self):
# The vertices should be in counterclockwise order in 2-D
np.random.seed(1234)
points = np.random.rand(30, 2)
hull = qhull.ConvexHull(points)
assert_equal(np.unique(hull.simplices), np.sort(hull.vertices))
# Check counterclockwiseness
x, y = hull.points[hull.vertices].T
angle = np.arctan2(y - y.mean(), x - x.mean())
assert_(np.all(np.diff(np.unwrap(angle)) > 0))
def test_volume_area(self):
# Basic check that we get back the correct volume and area for a cube
points = np.array([(0, 0, 0), (0, 1, 0), (1, 0, 0), (1, 1, 0),
(0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 1, 1)])
tri = qhull.ConvexHull(points)
assert_allclose(tri.volume, 1., rtol=1e-14)
assert_allclose(tri.area, 6., rtol=1e-14)
class TestVoronoi:
def test_masked_array_fails(self):
masked_array = np.ma.masked_all(1)
assert_raises(ValueError, qhull.Voronoi, masked_array)
def test_simple(self):
# Simple case with known Voronoi diagram
points = [(0, 0), (0, 1), (0, 2),
(1, 0), (1, 1), (1, 2),
(2, 0), (2, 1), (2, 2)]
# qhull v o Fv Qbb Qc Qz < dat
output = """
2
5 10 1
-10.101 -10.101
0.5 0.5
1.5 0.5
0.5 1.5
1.5 1.5
2 0 1
3 3 0 1
2 0 3
3 2 0 1
4 4 3 1 2
3 4 0 3
2 0 2
3 4 0 2
2 0 4
0
12
4 0 3 0 1
4 0 1 0 1
4 1 4 1 3
4 1 2 0 3
4 2 5 0 3
4 3 4 1 2
4 3 6 0 2
4 4 5 3 4
4 4 7 2 4
4 5 8 0 4
4 6 7 0 2
4 7 8 0 4
"""
self._compare_qvoronoi(points, output)
def _compare_qvoronoi(self, points, output, **kw):
"""Compare to output from 'qvoronoi o Fv < data' to Voronoi()"""
# Parse output
output = [list(map(float, x.split())) for x in output.strip().splitlines()]
nvertex = int(output[1][0])
vertices = list(map(tuple, output[3:2+nvertex])) # exclude inf
nregion = int(output[1][1])
regions = [[int(y)-1 for y in x[1:]]
for x in output[2+nvertex:2+nvertex+nregion]]
nridge = int(output[2+nvertex+nregion][0])
ridge_points = [[int(y) for y in x[1:3]]
for x in output[3+nvertex+nregion:]]
ridge_vertices = [[int(y)-1 for y in x[3:]]
for x in output[3+nvertex+nregion:]]
# Compare results
vor = qhull.Voronoi(points, **kw)
def sorttuple(x):
return tuple(sorted(x))
assert_allclose(vor.vertices, vertices)
assert_equal(set(map(tuple, vor.regions)),
set(map(tuple, regions)))
p1 = list(zip(list(map(sorttuple, ridge_points)), list(map(sorttuple, ridge_vertices))))
p2 = list(zip(list(map(sorttuple, vor.ridge_points.tolist())),
list(map(sorttuple, vor.ridge_vertices))))
p1.sort()
p2.sort()
assert_equal(p1, p2)
def test_ridges(self):
# Check that the ridges computed by Voronoi indeed separate
# the regions of nearest neighborhood, by comparing the result
# to KDTree.
def check(name):
points = DATASETS[name]
tree = KDTree(points)
vor = qhull.Voronoi(points)
for p, v in vor.ridge_dict.items():
# consider only finite ridges
if not np.all(np.asarray(v) >= 0):
continue
ridge_midpoint = vor.vertices[v].mean(axis=0)
d = 1e-6 * (points[p[0]] - ridge_midpoint)
dist, k = tree.query(ridge_midpoint + d, k=1)
assert_equal(k, p[0])
dist, k = tree.query(ridge_midpoint - d, k=1)
assert_equal(k, p[1])
for name in DATASETS:
yield check, name
def test_furthest_site(self):
points = [(0, 0), (0, 1), (1, 0), (0.5, 0.5), (1.1, 1.1)]
# qhull v o Fv Qbb Qc Qu < dat
output = """
2
3 5 1
-10.101 -10.101
0.6000000000000001 0.5
0.5 0.6000000000000001
3 0 1 2
2 0 1
2 0 2
0
3 0 1 2
5
4 0 2 0 2
4 0 1 0 1
4 0 4 1 2
4 1 4 0 1
4 2 4 0 2
"""
self._compare_qvoronoi(points, output, furthest_site=True)
def test_incremental(self):
# Test incremental construction of the triangulation
def check(name):
chunks, opts = INCREMENTAL_DATASETS[name]
points = np.concatenate(chunks, axis=0)
obj = qhull.Voronoi(chunks[0], incremental=True,
qhull_options=opts)
for chunk in chunks[1:]:
obj.add_points(chunk)
obj2 = qhull.Voronoi(points)
obj3 = qhull.Voronoi(chunks[0], incremental=True,
qhull_options=opts)
if len(chunks) > 1:
obj3.add_points(np.concatenate(chunks[1:], axis=0),
restart=True)
# -- Check that the incremental mode agrees with upfront mode
assert_equal(len(obj.point_region), len(obj2.point_region))
assert_equal(len(obj.point_region), len(obj3.point_region))
# The vertices may be in different order or duplicated in
# the incremental map
for objx in obj, obj3:
vertex_map = {-1: -1}
for i, v in enumerate(objx.vertices):
for j, v2 in enumerate(obj2.vertices):
if np.allclose(v, v2):
vertex_map[i] = j
def remap(x):
if hasattr(x, '__len__'):
return tuple(set([remap(y) for y in x]))
try:
return vertex_map[x]
except KeyError:
raise AssertionError("incremental result has spurious vertex at %r"
% (objx.vertices[x],))
def simplified(x):
items = set(map(sorted_tuple, x))
if () in items:
items.remove(())
items = [x for x in items if len(x) > 1]
items.sort()
return items
assert_equal(
simplified(remap(objx.regions)),
simplified(obj2.regions)
)
assert_equal(
simplified(remap(objx.ridge_vertices)),
simplified(obj2.ridge_vertices)
)
# XXX: compare ridge_points --- not clear exactly how to do this
for name in sorted(INCREMENTAL_DATASETS):
if INCREMENTAL_DATASETS[name][0][0].shape[1] > 3:
# too slow (testing of the result --- qhull is still fast)
continue
yield check, name
if __name__ == "__main__":
run_module_suite()
| 35.452301
| 96
| 0.52808
|
abe01749a6244c0de09b4fcd567083d3d140bd8a
| 15,158
|
py
|
Python
|
tests/integration/modules/pip.py
|
preoctopus/salt
|
aceaaa0e2f2f2ff29a694393bd82bba0d88fa44d
|
[
"Apache-2.0"
] | 1
|
2018-09-19T22:42:54.000Z
|
2018-09-19T22:42:54.000Z
|
tests/integration/modules/pip.py
|
preoctopus/salt
|
aceaaa0e2f2f2ff29a694393bd82bba0d88fa44d
|
[
"Apache-2.0"
] | 1
|
2020-10-21T21:38:49.000Z
|
2020-10-21T21:38:49.000Z
|
tests/integration/modules/pip.py
|
preoctopus/salt
|
aceaaa0e2f2f2ff29a694393bd82bba0d88fa44d
|
[
"Apache-2.0"
] | 1
|
2019-07-23T13:42:23.000Z
|
2019-07-23T13:42:23.000Z
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
tests.integration.modules.pip
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
from __future__ import absolute_import
import os
import pwd
import shutil
import re
import tempfile
# Import Salt Testing libs
from salttesting import skipIf
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
@skipIf(salt.utils.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed')
class PipModuleTest(integration.ModuleCase):
def setUp(self):
super(PipModuleTest, self).setUp()
self.venv_test_dir = tempfile.mkdtemp(dir=integration.SYS_TMP_DIR)
self.venv_dir = os.path.join(self.venv_test_dir, 'venv')
for key in os.environ.copy():
if key.startswith('PIP_'):
os.environ.pop(key)
self.pip_temp = os.path.join(self.venv_test_dir, '.pip-temp')
if not os.path.isdir(self.pip_temp):
os.makedirs(self.pip_temp)
os.environ['PIP_SOURCE_DIR'] = os.environ['PIP_BUILD_DIR'] = ''
def pip_successful_install(self, target, expect=('flake8', 'pep8',)):
'''
isolate regex for extracting `successful install` message from pip
'''
expect = set(expect)
expect_str = '|'.join(expect)
success = re.search(
r'^.*Successfully installed\s([^\n]+)(?:Clean.*)?',
target,
re.M | re.S)
success_for = re.findall(
r'({0})(?:-(?:[\d\.-]))?'.format(expect_str),
success.groups()[0]
) if success else []
return expect.issubset(set(success_for))
def test_issue_2087_missing_pip(self):
# Let's create the testing virtualenv
self.run_function('virtualenv.create', [self.venv_dir])
# Let's remove the pip binary
pip_bin = os.path.join(self.venv_dir, 'bin', 'pip')
if not os.path.isfile(pip_bin):
self.skipTest(
'Failed to find the pip binary to the test virtualenv'
)
os.remove(pip_bin)
# Let's run a pip depending functions
for func in ('pip.freeze', 'pip.list'):
ret = self.run_function(func, bin_env=self.venv_dir)
self.assertIn(
'Command required for \'{0}\' not found: '
'Could not find a `pip` binary in virtualenv'.format(func),
ret
)
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_requirements_as_list_of_chains__sans_no_chown__cwd_set__absolute_file_path(self):
self.run_function('virtualenv.create', [self.venv_dir])
# Create a requirements file that depends on another one.
req1_filename = os.path.join(self.venv_dir, 'requirements1.txt')
req1b_filename = os.path.join(self.venv_dir, 'requirements1b.txt')
req2_filename = os.path.join(self.venv_dir, 'requirements2.txt')
req2b_filename = os.path.join(self.venv_dir, 'requirements2b.txt')
with salt.utils.fopen(req1_filename, 'wb') as f:
f.write('-r requirements1b.txt\n')
with salt.utils.fopen(req1b_filename, 'wb') as f:
f.write('flake8\n')
with salt.utils.fopen(req2_filename, 'wb') as f:
f.write('-r requirements2b.txt\n')
with salt.utils.fopen(req2b_filename, 'wb') as f:
f.write('pep8\n')
this_user = pwd.getpwuid(os.getuid())[0]
requirements_list = [req1_filename, req2_filename]
ret = self.run_function(
'pip.install', requirements=requirements_list, user=this_user,
bin_env=self.venv_dir, cwd=self.venv_dir
)
try:
self.assertEqual(ret['retcode'], 0)
found = self.pip_successful_install(ret['stdout'])
self.assertTrue(found)
except (AssertionError, TypeError):
import pprint
pprint.pprint(ret)
raise
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_requirements_as_list_of_chains__sans_no_chown__cwd_not_set__absolute_file_path(self):
self.run_function('virtualenv.create', [self.venv_dir])
# Create a requirements file that depends on another one.
req1_filename = os.path.join(self.venv_dir, 'requirements1.txt')
req1b_filename = os.path.join(self.venv_dir, 'requirements1b.txt')
req2_filename = os.path.join(self.venv_dir, 'requirements2.txt')
req2b_filename = os.path.join(self.venv_dir, 'requirements2b.txt')
with salt.utils.fopen(req1_filename, 'wb') as f:
f.write('-r requirements1b.txt\n')
with salt.utils.fopen(req1b_filename, 'wb') as f:
f.write('flake8\n')
with salt.utils.fopen(req2_filename, 'wb') as f:
f.write('-r requirements2b.txt\n')
with salt.utils.fopen(req2b_filename, 'wb') as f:
f.write('pep8\n')
this_user = pwd.getpwuid(os.getuid())[0]
requirements_list = [req1_filename, req2_filename]
ret = self.run_function(
'pip.install', requirements=requirements_list, user=this_user,
bin_env=self.venv_dir
)
try:
self.assertEqual(ret['retcode'], 0)
found = self.pip_successful_install(ret['stdout'])
self.assertTrue(found)
except (AssertionError, TypeError):
import pprint
pprint.pprint(ret)
raise
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_requirements_as_list__sans_no_chown__absolute_file_path(self):
self.run_function('virtualenv.create', [self.venv_dir])
req1_filename = os.path.join(self.venv_dir, 'requirements.txt')
req2_filename = os.path.join(self.venv_dir, 'requirements2.txt')
with salt.utils.fopen(req1_filename, 'wb') as f:
f.write('flake8\n')
with salt.utils.fopen(req2_filename, 'wb') as f:
f.write('pep8\n')
this_user = pwd.getpwuid(os.getuid())[0]
requirements_list = [req1_filename, req2_filename]
ret = self.run_function(
'pip.install', requirements=requirements_list, user=this_user,
bin_env=self.venv_dir
)
found = self.pip_successful_install(ret['stdout'])
try:
self.assertEqual(ret['retcode'], 0)
self.assertTrue(found)
except (AssertionError, TypeError):
import pprint
pprint.pprint(ret)
raise
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_requirements_as_list__sans_no_chown__non_absolute_file_path(self):
self.run_function('virtualenv.create', [self.venv_dir])
# Create a requirements file that depends on another one.
req1_filename = 'requirements.txt'
req2_filename = 'requirements2.txt'
req_cwd = self.venv_dir
req1_filepath = os.path.join(req_cwd, req1_filename)
req2_filepath = os.path.join(req_cwd, req2_filename)
with salt.utils.fopen(req1_filepath, 'wb') as f:
f.write('flake8\n')
with salt.utils.fopen(req2_filepath, 'wb') as f:
f.write('pep8\n')
this_user = pwd.getpwuid(os.getuid())[0]
requirements_list = [req1_filename, req2_filename]
ret = self.run_function(
'pip.install', requirements=requirements_list, user=this_user,
bin_env=self.venv_dir, cwd=req_cwd
)
try:
self.assertEqual(ret['retcode'], 0)
found = self.pip_successful_install(ret['stdout'])
self.assertTrue(found)
except (AssertionError, TypeError):
import pprint
pprint.pprint(ret)
raise
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_chained_requirements__sans_no_chown__absolute_file_path(self):
self.run_function('virtualenv.create', [self.venv_dir])
# Create a requirements file that depends on another one.
req1_filename = os.path.join(self.venv_dir, 'requirements.txt')
req2_filename = os.path.join(self.venv_dir, 'requirements2.txt')
with salt.utils.fopen(req1_filename, 'wb') as f:
f.write('-r requirements2.txt')
with salt.utils.fopen(req2_filename, 'wb') as f:
f.write('pep8')
this_user = pwd.getpwuid(os.getuid())[0]
ret = self.run_function(
'pip.install', requirements=req1_filename, user=this_user,
bin_env=self.venv_dir
)
try:
self.assertEqual(ret['retcode'], 0)
self.assertIn('installed pep8', ret['stdout'])
except (AssertionError, TypeError):
import pprint
pprint.pprint(ret)
raise
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_chained_requirements__sans_no_chown__non_absolute_file_path(self):
self.run_function('virtualenv.create', [self.venv_dir])
# Create a requirements file that depends on another one.
req_basepath = (self.venv_dir)
req1_filename = 'requirements.txt'
req2_filename = 'requirements2.txt'
req1_file = os.path.join(self.venv_dir, req1_filename)
req2_file = os.path.join(self.venv_dir, req2_filename)
with salt.utils.fopen(req1_file, 'wb') as f:
f.write('-r requirements2.txt')
with salt.utils.fopen(req2_file, 'wb') as f:
f.write('pep8')
this_user = pwd.getpwuid(os.getuid())[0]
ret = self.run_function(
'pip.install', requirements=req1_filename, user=this_user,
no_chown=False, cwd=req_basepath, bin_env=self.venv_dir
)
try:
self.assertEqual(ret['retcode'], 0)
self.assertIn('installed pep8', ret['stdout'])
except (AssertionError, TypeError):
import pprint
pprint.pprint(ret)
raise
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_issue_4805_nested_requirements_user_no_chown(self):
self.run_function('virtualenv.create', [self.venv_dir])
# Create a requirements file that depends on another one.
req1_filename = os.path.join(self.venv_dir, 'requirements.txt')
req2_filename = os.path.join(self.venv_dir, 'requirements2.txt')
with salt.utils.fopen(req1_filename, 'wb') as f:
f.write('-r requirements2.txt')
with salt.utils.fopen(req2_filename, 'wb') as f:
f.write('pep8')
this_user = pwd.getpwuid(os.getuid())[0]
ret = self.run_function(
'pip.install', requirements=req1_filename, user=this_user,
no_chown=True, bin_env=self.venv_dir
)
try:
self.assertEqual(ret['retcode'], 0)
self.assertIn('installed pep8', ret['stdout'])
except (AssertionError, TypeError):
import pprint
pprint.pprint(ret)
raise
def test_pip_uninstall(self):
# Let's create the testing virtualenv
self.run_function('virtualenv.create', [self.venv_dir])
ret = self.run_function('pip.install', ['pep8'], bin_env=self.venv_dir)
self.assertEqual(ret['retcode'], 0)
self.assertIn('installed pep8', ret['stdout'])
ret = self.run_function(
'pip.uninstall', ['pep8'], bin_env=self.venv_dir
)
try:
self.assertEqual(ret['retcode'], 0)
self.assertIn('uninstalled pep8', ret['stdout'])
except AssertionError:
import pprint
pprint.pprint(ret)
raise
def test_pip_install_upgrade(self):
# Create the testing virtualenv
self.run_function('virtualenv.create', [self.venv_dir])
ret = self.run_function(
'pip.install', ['pep8==1.3.4'], bin_env=self.venv_dir
)
try:
self.assertEqual(ret['retcode'], 0)
self.assertIn('installed pep8', ret['stdout'])
except AssertionError:
import pprint
pprint.pprint(ret)
raise
ret = self.run_function(
'pip.install',
['pep8'],
bin_env=self.venv_dir,
upgrade=True
)
try:
self.assertEqual(ret['retcode'], 0)
self.assertIn('installed pep8', ret['stdout'])
except AssertionError:
import pprint
pprint.pprint(ret)
raise
ret = self.run_function(
'pip.uninstall', ['pep8'], bin_env=self.venv_dir
)
try:
self.assertEqual(ret['retcode'], 0)
self.assertIn('uninstalled pep8', ret['stdout'])
except AssertionError:
import pprint
pprint.pprint(ret)
raise
def test_pip_install_multiple_editables(self):
editables = [
'git+https://github.com/jek/blinker.git#egg=Blinker',
'git+https://github.com/saltstack/salt-testing.git#egg=SaltTesting'
]
# Create the testing virtualenv
self.run_function('virtualenv.create', [self.venv_dir])
ret = self.run_function(
'pip.install', [],
editable='{0}'.format(','.join(editables)),
bin_env=self.venv_dir
)
try:
self.assertEqual(ret['retcode'], 0)
self.assertIn(
'Successfully installed Blinker SaltTesting', ret['stdout']
)
except AssertionError:
import pprint
pprint.pprint(ret)
raise
def test_pip_install_multiple_editables_and_pkgs(self):
editables = [
'git+https://github.com/jek/blinker.git#egg=Blinker',
'git+https://github.com/saltstack/salt-testing.git#egg=SaltTesting'
]
# Create the testing virtualenv
self.run_function('virtualenv.create', [self.venv_dir])
ret = self.run_function(
'pip.install', ['pep8'],
editable='{0}'.format(','.join(editables)),
bin_env=self.venv_dir
)
try:
self.assertEqual(ret['retcode'], 0)
for package in ('Blinker', 'SaltTesting', 'pep8'):
self.assertRegexpMatches(
ret['stdout'],
r'(?:.*)(Successfully installed)(?:.*)({0})(?:.*)'.format(package)
)
except AssertionError:
import pprint
pprint.pprint(ret)
raise
def tearDown(self):
super(PipModuleTest, self).tearDown()
if os.path.isdir(self.venv_test_dir):
shutil.rmtree(self.venv_test_dir)
if os.path.isdir(self.pip_temp):
shutil.rmtree(self.pip_temp)
if __name__ == '__main__':
from integration import run_tests
run_tests(PipModuleTest)
| 35.333333
| 98
| 0.601531
|
3ceb48d0a106f0f087e5779e52a273cc4d89ac64
| 1,793
|
py
|
Python
|
src/machinable/schema.py
|
machinable-org/machinable
|
9d96e942dde05d68699bc7bc0c3d062ee18652ad
|
[
"MIT"
] | 23
|
2020-02-28T14:29:04.000Z
|
2021-12-23T20:50:54.000Z
|
src/machinable/schema.py
|
machinable-org/machinable
|
9d96e942dde05d68699bc7bc0c3d062ee18652ad
|
[
"MIT"
] | 172
|
2020-02-24T12:12:11.000Z
|
2022-03-29T03:08:24.000Z
|
src/machinable/schema.py
|
machinable-org/machinable
|
9d96e942dde05d68699bc7bc0c3d062ee18652ad
|
[
"MIT"
] | 1
|
2020-11-23T22:42:20.000Z
|
2020-11-23T22:42:20.000Z
|
from typing import TYPE_CHECKING, Dict, Optional
from datetime import datetime
from machinable.types import ComponentType, VersionType
from machinable.utils import (
encode_experiment_id,
generate_experiment_id,
generate_nickname,
generate_seed,
)
from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator
if TYPE_CHECKING:
from machinable.storage.storage import Storage
class Model(BaseModel):
# morphMany relation to storage
_storage_id: Optional[str] = PrivateAttr(default=None)
_storage_instance: Optional["Storage"] = PrivateAttr(default=None)
class Project(Model):
directory: str
version: VersionType = None
code_version: Optional[dict] = None
code_diff: Optional[str] = None
host_info: Optional[dict] = None
class Experiment(Model):
interface: ComponentType
uses: Dict[str, ComponentType] = {}
experiment_id: str = Field(
default_factory=lambda: encode_experiment_id(generate_experiment_id())
)
timestamp: int = Field(
default_factory=lambda: int(datetime.now().timestamp())
)
seed: int = Field(default_factory=generate_seed)
config: Optional[dict] = None
nickname: str = Field(default_factory=generate_nickname)
derived_from_id: Optional[str] = None
derived_from_timestamp: Optional[int] = None
class Repository(Model):
storage: ComponentType
default_group: Optional[str] = None
class Group(Model):
pattern: str
path: Optional[str] = None
class Execution(Model):
engine: ComponentType
resources: Optional[dict] = None
host: Optional[dict] = None
timestamp: float = Field(default_factory=lambda: datetime.now().timestamp())
class Record(Model):
scope: str
current: dict = {}
last: dict = None
| 25.985507
| 80
| 0.722811
|
6317e99a527290b0ef8230f1e5e4c3ac810508ba
| 716
|
py
|
Python
|
src/979. Distribute Coins in Binary Tree.py
|
rajshrivastava/LeetCode
|
dfe6342fe22b324429b0be3e5c0fef46c7e6b3b0
|
[
"MIT"
] | 1
|
2019-12-16T08:18:25.000Z
|
2019-12-16T08:18:25.000Z
|
src/979. Distribute Coins in Binary Tree.py
|
rajshrivastava/LeetCode
|
dfe6342fe22b324429b0be3e5c0fef46c7e6b3b0
|
[
"MIT"
] | null | null | null |
src/979. Distribute Coins in Binary Tree.py
|
rajshrivastava/LeetCode
|
dfe6342fe22b324429b0be3e5c0fef46c7e6b3b0
|
[
"MIT"
] | null | null | null |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def distributeCoins(self, root: TreeNode) -> int:
def count_Nodes_Coins(node = root):
nonlocal ops
if not node:
return 0
left = count_Nodes_Coins(node.left)
right = count_Nodes_Coins(node.right)
balance = node.val + left + right - 1
ops += abs(balance)
return balance
ops = 0
count_Nodes_Coins()
return ops
| 28.64
| 55
| 0.5
|
0d0d10fc8d2918595eea186faf02c394965e68f5
| 169,938
|
py
|
Python
|
leveler/leveler.py
|
StuxieDev/StuxCogs
|
ce9fa4b2d966f668570f4bde9a652cab9b3c1c0a
|
[
"MIT"
] | null | null | null |
leveler/leveler.py
|
StuxieDev/StuxCogs
|
ce9fa4b2d966f668570f4bde9a652cab9b3c1c0a
|
[
"MIT"
] | null | null | null |
leveler/leveler.py
|
StuxieDev/StuxCogs
|
ce9fa4b2d966f668570f4bde9a652cab9b3c1c0a
|
[
"MIT"
] | null | null | null |
import asyncio
import contextlib
import logging
import operator
import os
import platform
import random
import re
import string
import textwrap
import time
from asyncio import TimeoutError
from copy import copy
from datetime import datetime, timedelta
from tabulate import tabulate
from io import BytesIO
from typing import Union
import aiohttp
import discord
import math
import numpy
import scipy
import scipy.cluster
from PIL import Image, ImageDraw, ImageFilter, ImageFont, ImageOps
from discord.utils import find
from motor.motor_asyncio import AsyncIOMotorClient
from pymongo import errors as mongoerrors
from redbot.core.bot import Red
from redbot.core import Config, bank, checks, commands
from redbot.core.data_manager import bundled_data_path, cog_data_path
from redbot.core.utils.chat_formatting import box, pagify
from redbot.core.utils.menus import DEFAULT_CONTROLS, menu
from redbot.core.utils.predicates import MessagePredicate
log = logging.getLogger("red.StuxCogs.leveler")
async def non_global_bank(ctx):
return not await bank.is_global()
class Leveler(commands.Cog):
"""A level up thing with image generation!"""
def __init__(self, bot: Red):
self.bot = bot
# fonts
self.font_file = f"{bundled_data_path(self)}/font.ttf"
self.font_bold_file = f"{bundled_data_path(self)}/font_bold.ttf"
self.font_unicode_file = f"{bundled_data_path(self)}/unicode.ttf"
self.config = Config.get_conf(self, identifier=2733301001)
default_mongodb = {
"host": "localhost",
"port": 27017,
"username": None,
"password": None,
"db_name": "leveler",
}
default_global = {
"bg_price": 0,
"badge_type": "circles",
"removed_backgrounds": {"profile": [], "rank": [], "levelup": []},
"backgrounds": {"profile": {}, "rank": {}, "levelup": {}},
"xp": [15, 20],
"default_profile": "https://cdn.stux.media/levels/bgs/profile/default.jpg",
"default_rank": "https://cdn.stux.media/levels/bgs/rank/default.jpg",
"default_levelup": "https://cdn.stux.media/levels/bgs/levelup/default.jpg",
"rep_price": 0,
}
default_guild = {
"disabled": False,
"lvl_msg": False,
"mentions": True,
"text_only": False,
"private_lvl_message": False,
"lvl_msg_lock": None,
"msg_credits": 0,
"ignored_channels": [],
}
self.config.init_custom("MONGODB", -1)
self.config.register_custom("MONGODB", **default_mongodb)
self.config.register_global(**default_global)
self.config.register_guild(**default_guild)
self._db_ready = False
self.client = None
self.db = None
self.session = aiohttp.ClientSession(loop=self.bot.loop)
self._message_tasks = []
self._message_task_processor = asyncio.create_task(self.process_tasks())
self._message_task_processor.add_done_callback(self._task_error_logger)
async def initialize(self):
await self._connect_to_mongo()
async def _connect_to_mongo(self):
if self._db_ready:
self._db_ready = False
self._disconnect_mongo()
config = await self.config.custom("MONGODB").all()
log.debug(f"Leveler is connecting to a MongoDB server at: {config}")
try:
self.client = AsyncIOMotorClient(**{k: v for k, v in config.items() if not k == "db_name"})
await self.client.server_info()
self.db = self.client[config["db_name"]]
self._db_ready = True
except (
mongoerrors.ServerSelectionTimeoutError,
mongoerrors.ConfigurationError,
mongoerrors.OperationFailure,
) as error:
log.exception(
"Can't connect to the MongoDB server.\nFollow instructions on Git/online to install MongoDB.",
exc_info=error,
)
self.client = None
self.db = None
return self.client
def _disconnect_mongo(self):
if self.client:
self.client.close()
async def cog_check(self, ctx):
if (ctx.command.parent is self.levelerset) or ctx.command is self.levelerset:
return True
return self._db_ready
def cog_unload(self):
self.bot.loop.create_task(self.session.close())
if self._message_task_processor:
self._message_task_processor.cancel()
self._disconnect_mongo()
def _task_error_logger(self, fut):
"""Logs errors in the _message_task_processor task."""
try:
fut.result()
except asyncio.CancelledError:
pass
except Exception as e:
log.critical("The leveler task encountered an unexpected error and has stopped.\n", exc_info=e)
@property
def DEFAULT_BGS(self):
return {
"profile": {
"default": "https://cdn.stux.media/levels/bgs/profile/default.jpg",
"alice": "https://cdn.stux.media/levels/bgs/profile/alice.png",
"abstract": "https://cdn.stux.media/levels/bgs/profile/abstract.png",
"bluestairs": "https://cdn.stux.media/levels/bgs/profile/bluestairs.png",
"lamp": "https://cdn.stux.media/levels/bgs/profile/lamp.jpg",
"coastline": "https://cdn.stux.media/levels/bgs/profile/coastline.jpg",
"redblack": "https://cdn.stux.media/levels/bgs/profile/redblack.jpg",
"iceberg": "https://cdn.stux.media/levels/bgs/profile/iceberg.png",
"miraiglasses": "https://cdn.stux.media/levels/bgs/profile/miraiglasses.png",
"miraikuriyama": "https://cdn.stux.media/levels/bgs/profile/miraikuriyama.png",
"mountaindawn": "https://cdn.stux.media/levels/bgs/profile/mountaindawn.jpg",
"waterlilies": "https://cdn.stux.media/levels/bgs/profile/waterlilies.jpg",
},
"rank": {
"default": "https://cdn.stux.media/levels/bgs/rank/default.jpg",
"aurora": "https://cdn.stux.media/levels/bgs/rank/aurora.jpg",
"nebula": "https://cdn.stux.media/levels/bgs/rank/nebula.jpg",
"mountain": "https://cdn.stux.media/levels/bgs/rank/mountain.jpg",
"city": "https://cdn.stux.media/levels/bgs/rank/city.jpg",
"trans": "https://cdn.stux.media/levels/bgs/rank/trans.jpg",
"lgbt": "https://cdn.stux.media/levels/bgs/rank/lgbt.jpg",
"pride": "https://cdn.stux.media/levels/bgs/rank/pride.jpg",
},
"levelup": {
"default": "https://cdn.stux.media/levels/bgs/levelup/default.jpg"
},
}
async def get_backgrounds(self):
ret = self.DEFAULT_BGS
removal_dict = await self.config.removed_backgrounds()
for bg_type, removals in removal_dict.items():
for rem in removals:
ret[bg_type].pop(rem, None)
user_backgrounds = await self.config.backgrounds()
for bg_type, update_with in user_backgrounds.items():
ret[bg_type].update(update_with)
return ret
async def delete_background(self, bg_type: str, bg_name: str):
found = False
async with self.config.backgrounds() as bgs:
if bg_name in bgs[bg_type]:
found = True
del bgs[bg_type][bg_name]
try:
_k = self.DEFAULT_BGS[bg_type][bg_name]
except KeyError:
if not found:
raise
else:
async with self.config.removed_backgrounds() as rms:
if bg_name not in rms[bg_type]:
rms[bg_type].append(bg_name)
@commands.cooldown(1, 10, commands.BucketType.user)
@commands.command(name="profile")
@commands.bot_has_permissions(attach_files=True)
@commands.guild_only()
async def profile(self, ctx, *, user: discord.Member = None):
"""Displays a user profile."""
if user is None:
user = ctx.message.author
channel = ctx.message.channel
server = user.guild
curr_time = time.time()
# creates user if doesn't exist
await self._create_user(user, server)
userinfo = await self.db.users.find_one({"user_id": str(user.id)})
# check if disabled
if await self.config.guild(ctx.guild).disabled():
await ctx.send("**Leveler commands for this server are disabled!**")
return
# no cooldown for text only
if await self.config.guild(ctx.guild).text_only():
em = await self.profile_text(user, server, userinfo)
await channel.send(embed=em)
else:
async with ctx.channel.typing():
file = await self.draw_profile(user, server)
await channel.send("**User profile for {}**".format(await self._is_mention(user)), file=file)
await self.db.users.update_one(
{"user_id": str(user.id)}, {"$set": {"profile_block": curr_time}}, upsert=True
)
async def profile_text(self, user, server, userinfo):
def test_empty(text):
if not text:
return "None"
else:
return text
em = discord.Embed(colour=user.colour)
em.add_field(name="Title:", value=test_empty(userinfo["title"]))
em.add_field(name="Reps:", value=userinfo["rep"])
global_ranking = await self._find_global_rank(user)
if global_ranking:
em.add_field(name="Global Rank:", value=f"#{global_ranking}")
em.add_field(name="Server Rank:", value=f"#{await self._find_server_rank(user, server)}")
em.add_field(name="Server Level:", value=format(userinfo["servers"][str(server.id)]["level"]))
em.add_field(name="Total Exp:", value=userinfo["total_exp"])
em.add_field(name="Server Exp:", value=await self._find_server_exp(user, server))
u_credits = await bank.get_balance(user)
em.add_field(name="Credits: ", value=f"${u_credits}")
em.add_field(name="Info: ", value=test_empty(userinfo["info"]))
em.add_field(name="Badges: ", value=test_empty(", ".join(userinfo["badges"])).replace("_", " "))
em.set_author(name=f"Profile for {user.name}", url=user.avatar_url)
em.set_thumbnail(url=user.avatar_url)
return em
@commands.cooldown(1, 10, commands.BucketType.user)
@commands.bot_has_permissions(attach_files=True)
@commands.command()
@commands.guild_only()
async def rank(self, ctx, user: discord.Member = None):
"""Displays a user's rank card."""
if user is None:
user = ctx.message.author
channel = ctx.message.channel
server = user.guild
curr_time = time.time()
# creates user if doesn't exist
await self._create_user(user, server)
userinfo = await self.db.users.find_one({"user_id": str(user.id)})
# check if disabled
if await self.config.guild(ctx.guild).disabled():
await ctx.send("**Leveler commands for this server are disabled!**")
return
# no cooldown for text only
if await self.config.guild(server).text_only():
em = await self.rank_text(user, server, userinfo)
await channel.send("", embed=em)
else:
async with ctx.typing():
file = await self.draw_rank(user, server)
await ctx.send(f"**Ranking & Statistics for {await self._is_mention(user)}**", file=file)
await self.db.users.update_one(
{"user_id": str(user.id)}, {"$set": {"rank_block".format(server.id): curr_time}}, upsert=True,
)
async def rank_text(self, user, server, userinfo):
em = discord.Embed(colour=user.colour)
em.add_field(name="Server Rank", value=f"#{await self._find_server_rank(user, server)}")
em.add_field(name="Reps", value=userinfo["rep"])
em.add_field(name="Server Level", value=userinfo["servers"][str(server.id)]["level"])
em.add_field(name="Server Exp", value=await self._find_server_exp(user, server))
em.set_author(name=f"Rank and Statistics for {user.name}", url=user.avatar_url)
em.set_thumbnail(url=user.avatar_url)
return em
# should the user be mentioned based on settings?
async def _is_mention(self, user):
if await self.config.guild(user.guild).mentions():
return user.mention
else:
return user.name
@commands.cooldown(1, 10, commands.BucketType.guild)
@commands.bot_has_permissions(embed_links=True)
@commands.command(usage="[page] [-rep] [-global]")
@commands.guild_only()
async def top(self, ctx, *options):
"""
Displays the leaderboard.
Add the `-global` parameter for global and `-rep` for reputation.
Examples:
`[p]top`
- Displays the server leaderboard
`[p]top -rep`
- Displays the server reputation leaderboard
`[p]top -global`
- Displays the global leaderboard
`[p]top -rep -global`
- Displays the global reputation leaderboard
"""
if await self.config.guild(ctx.guild).disabled():
await ctx.send("**Leveler commands for this server are disabled!**")
return
user = ctx.author
server = ctx.guild
q = f"servers.{server.id}"
space = "\N{EN SPACE}"
guild_ids = [str(x.id) for x in ctx.guild.members]
await self._create_user(user, server)
async with ctx.typing():
users = []
user_stat = None
if "-rep" in options and "-global" in options:
title = "Global Rep Leaderboard for {}\n".format(self.bot.user.name)
async for userinfo in self.db.users.find(({"rep": {"$gte": 1}})).sort("rep", -1).limit(300):
await asyncio.sleep(0)
try:
users.append((userinfo["username"], userinfo["rep"]))
except KeyError:
users.append((str(int(userinfo["user_id"])), userinfo["rep"]))
if str(user.id) == userinfo["user_id"]:
user_stat = userinfo["rep"]
board_type = "Rep"
global_rep_rank = await self._find_global_rep_rank(user)
if global_rep_rank:
footer_text = f"Your Rank: {global_rep_rank} {board_type}: {user_stat}"
else:
footer_text = f"{space*40}"
icon_url = self.bot.user.avatar_url
elif "-global" in options:
title = "Global Exp Leaderboard for {}\n".format(self.bot.user.name)
async for userinfo in self.db.users.find(({"total_exp": {"$gte": 100}})).sort("total_exp", -1).limit(
300
):
await asyncio.sleep(0)
try:
users.append((userinfo["username"], userinfo["total_exp"]))
except KeyError:
users.append((str(int(userinfo["user_id"])), userinfo["total_exp"]))
if str(user.id) == userinfo["user_id"]:
user_stat = userinfo["total_exp"]
board_type = "Points"
global_ranking = await self._find_global_rank(user)
if global_ranking:
footer_text = f"Your Rank: {global_ranking} {board_type}: {user_stat}"
else:
footer_text = f"{space*40}"
icon_url = self.bot.user.avatar_url
elif "-rep" in options:
title = "Rep Leaderboard for {}\n".format(server.name)
async for userinfo in self.db.users.find(
{"$and": [{q: {"$exists": "true"}}, {"rep": {"$gte": 1}}]}
).sort("rep", -1):
await asyncio.sleep(0)
if userinfo["user_id"] in guild_ids:
try:
users.append((userinfo["username"], userinfo["rep"]))
except KeyError:
users.append((str(int(userinfo["user_id"])), userinfo["rep"]))
if str(user.id) == userinfo["user_id"]:
user_stat = userinfo["rep"]
board_type = "Rep"
footer_text = "Your Rank: {} {}: {}".format(
await self._find_server_rep_rank(user, server), board_type, user_stat,
)
icon_url = server.icon_url
else:
title = "Exp Leaderboard for {}\n".format(server.name)
async for userinfo in self.db.users.find({q: {"$exists": "true"}}):
await asyncio.sleep(0)
if userinfo["user_id"] in guild_ids:
server_exp = 0
# generate total xp gain for each level gained
for i in range(userinfo["servers"][str(server.id)]["level"]):
await asyncio.sleep(0)
server_exp += self._required_exp(i)
# add current non-completed level exp to count
server_exp += userinfo["servers"][str(server.id)]["current_exp"]
try:
users.append((userinfo["username"], server_exp))
except:
users.append((str(int(userinfo["user_id"])), server_exp))
board_type = "Points"
footer_text = "Your Rank: {} {}: {}".format(
await self._find_server_rank(user, server), board_type, await self._find_server_exp(user, server),
)
icon_url = server.icon_url
sorted_list = sorted(users, key=operator.itemgetter(1), reverse=True)
if not sorted_list:
return await ctx.send("**There are no results to display.**")
# multiple page support
page = 1
per_page = 15
pages = math.ceil(len(sorted_list) / per_page)
for option in options:
if str(option).isdigit():
if page >= 1 and int(option) <= pages:
page = int(str(option))
else:
await ctx.send("**Please enter a valid page number! (1 - {})**".format(str(pages)))
return
break
msg = ""
rank = 1 + per_page * (page - 1)
start_index = per_page * page - per_page
end_index = per_page * page
top_user_value = 8 + len(str(sorted_list[start_index:end_index][0][1])) + 4
async for single_user in self.asyncit(sorted_list[start_index:end_index]):
await asyncio.sleep(0)
label = " "
rank_text = f"{rank:<2}"
label_text = f"{label:<2}"
separator_text = f"{'➤':<3}"
padding = len(rank_text), len(label_text), len(separator_text) + 1
point_text = f"# {'{}: {}'.format(board_type, single_user[1]).ljust(top_user_value, ' ')}"
nam_text = f"{self._truncate_text(single_user[0], 18):<5}\n"
msg += rank_text + label_text + separator_text + point_text + nam_text
rank += 1
separator = "-" * len(footer_text)
rank_pad, level_pad, extra_pad = padding
msg += f"{separator}\n{footer_text}\nPage: {page}/{pages}"
em = discord.Embed(description=box(msg), colour=user.colour)
em.set_author(name=title, icon_url=icon_url)
await ctx.send(embed=em)
@commands.cooldown(1, 30, commands.BucketType.user)
@commands.command()
@commands.guild_only()
async def rep(self, ctx, user: discord.Member = None):
"""Gives a reputation point to a designated player."""
org_user = ctx.author
server = ctx.guild
# creates user if doesn't exist
await self._create_user(org_user, server)
if user:
await self._create_user(user, server)
org_userinfo = await self.db.users.find_one({"user_id": str(org_user.id)})
curr_time = time.time()
if await self.config.guild(ctx.guild).disabled():
await ctx.send("**Leveler commands for this server are disabled!**")
return
if user and user.id == org_user.id:
await ctx.send("**You can't give a rep to yourself!**")
return
if user and user.bot:
await ctx.send("**You can't give a rep to a bot!**")
return
if "rep_block" not in org_userinfo:
org_userinfo["rep_block"] = 0
delta = float(curr_time) - float(org_userinfo["rep_block"])
if user and delta >= 43200.0 and delta > 0:
userinfo = await self.db.users.find_one({"user_id": str(user.id)})
await self.db.users.update_one({"user_id": str(org_user.id)}, {"$set": {"rep_block": curr_time}})
await self.db.users.update_one({"user_id": str(user.id)}, {"$set": {"rep": userinfo["rep"] + 1}})
await ctx.send("**You have just given {} a reputation point!**".format(await self._is_mention(user)))
else:
# calculate time left
seconds = 43200 - delta
if seconds < 0:
await ctx.send("**You can give a rep!**")
return
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
await ctx.send(
"**You need to wait {} hours, {} minutes, and {} seconds until you can give reputation again!**".format(
int(h), int(m), int(s)
)
)
@commands.cooldown(1, 30, commands.BucketType.user)
@commands.command()
@commands.guild_only()
async def represet(self, ctx):
"""Reset your rep cooldown for a price."""
if await self.config.guild(ctx.guild).disabled():
return await ctx.send("**Leveler commands for this server are disabled!**")
rep_price = await self.config.rep_price()
if rep_price == 0:
return await ctx.send("**Rep resets are not set up. Ask the bot owner to provide a rep reset cost.**")
user = ctx.author
server = ctx.guild
# creates user if doesn't exist
await self._create_user(user, server)
userinfo = await self.db.users.find_one({"user_id": str(user.id)})
if "rep_block" not in userinfo:
userinfo["rep_block"] = 0
curr_time = time.time()
delta = float(curr_time) - float(userinfo["rep_block"])
if delta >= 43200.0 and delta > 0:
return await ctx.send("**You can give a rep without resetting your rep cooldown!**")
if not await bank.can_spend(user, rep_price):
await ctx.send("**Insufficient funds. Rep resets cost: ${}**".format(rep_price))
else:
currency_name = await bank.get_currency_name(ctx.guild)
await ctx.send(
"**{}, you are about to reset your rep cooldown for `{}` {}. Confirm by typing **`yes`.".format(
await self._is_mention(user), rep_price, currency_name
)
)
pred = MessagePredicate.yes_or_no(ctx)
try:
await self.bot.wait_for("message", check=pred, timeout=15)
except TimeoutError:
return await ctx.send("**Purchase canceled.**")
if not pred.result:
return await ctx.send("**Purchase canceled.**")
await bank.withdraw_credits(user, rep_price)
await self.db.users.update_one(
{"user_id": str(user.id)}, {"$set": {"rep_block": (float(curr_time) - 43201.0)}}
)
await ctx.send("**You have reset your rep cooldown!**")
@commands.command()
@commands.bot_has_permissions(embed_links=True)
@commands.guild_only()
async def lvlinfo(self, ctx, user: discord.Member = None):
"""Gives more specific details about a user's profile."""
if not user:
user = ctx.author
server = ctx.guild
userinfo = await self.db.users.find_one({"user_id": str(user.id)})
if await self.config.guild(ctx.guild).disabled():
await ctx.send("**Leveler commands for this server are disabled!**")
return
# creates user if doesn't exist
await self._create_user(user, server)
msg = ""
msg += f"Name: {user.name}\n"
msg += f"Title: {userinfo['title']}\n"
msg += f"Reps: {userinfo['rep']}\n"
msg += f"Server Level: {userinfo['servers'][str(server.id)]['level']}\n"
total_server_exp = 0
for i in range(userinfo["servers"][str(server.id)]["level"]):
await asyncio.sleep(0)
total_server_exp += self._required_exp(i)
total_server_exp += userinfo["servers"][str(server.id)]["current_exp"]
msg += f"Server Exp: {total_server_exp}\n"
msg += f"Total Exp: {userinfo['total_exp']}\n"
msg += f"Info: {userinfo['info']}\n"
msg += f"Profile background: {userinfo['profile_background']}\n"
msg += f"Rank background: {userinfo['rank_background']}\n"
msg += f"Levelup background: {userinfo['levelup_background']}\n"
if "profile_info_color" in userinfo.keys() and userinfo["profile_info_color"]:
msg += f"Profile info color: {self._rgb_to_hex(userinfo['profile_info_color'])}\n"
if "profile_exp_color" in userinfo.keys() and userinfo["profile_exp_color"]:
msg += f"Profile exp color: {self._rgb_to_hex(userinfo['profile_exp_color'])}\n"
if "rep_color" in userinfo.keys() and userinfo["rep_color"]:
msg += f"Rep section color: {self._rgb_to_hex(userinfo['rep_color'])}\n"
if "badge_col_color" in userinfo.keys() and userinfo["badge_col_color"]:
msg += f"Badge section color: {self._rgb_to_hex(userinfo['badge_col_color'])}\n"
if "rank_info_color" in userinfo.keys() and userinfo["rank_info_color"]:
msg += f"Rank info color: {self._rgb_to_hex(userinfo['rank_info_color'])}\n"
if "rank_exp_color" in userinfo.keys() and userinfo["rank_exp_color"]:
msg += f"Rank exp color: {self._rgb_to_hex(userinfo['rank_exp_color'])}\n"
if "levelup_info_color" in userinfo.keys() and userinfo["levelup_info_color"]:
msg += f"Level info color: {self._rgb_to_hex(userinfo['levelup_info_color'])}\n"
msg += "Badges: "
msg += ", ".join(userinfo["badges"])
em = discord.Embed(description=msg, colour=user.colour)
em.set_author(name=f"Profile Information for {user.name}", icon_url=user.avatar_url)
await ctx.send(embed=em)
@staticmethod
def _rgb_to_hex(rgb):
rgb = tuple(rgb[:3])
return "#%02x%02x%02x" % rgb
@checks.is_owner()
@commands.group()
async def levelerset(self, ctx):
"""
MongoDB server configuration options.
Use this command in DMs to see current settings.
"""
if not ctx.invoked_subcommand and ctx.channel.type == discord.ChannelType.private:
settings = [
(setting.replace("_", " ").title(), value)
for setting, value in (await self.config.custom("MONGODB").get_raw()).items()
if value
]
await ctx.send(box(tabulate(settings, tablefmt="plain")))
@levelerset.command()
async def host(self, ctx, host: str = "localhost"):
"""Set the MongoDB server host."""
await self.config.custom("MONGODB").host.set(host)
message = await ctx.send(f"MongoDB host set to {host}.\nNow trying to connect to the new host...")
client = await self._connect_to_mongo()
if not client:
return await message.edit(
content=message.content.replace("Now trying to connect to the new host...", "")
+ "Failed to connect. Please try again with a valid host."
)
await message.edit(content=message.content.replace("Now trying to connect to the new host...", ""))
@levelerset.command()
async def port(self, ctx, port: int = 27017):
"""Set the MongoDB server port."""
await self.config.custom("MONGODB").port.set(port)
message = await ctx.send(f"MongoDB port set to {port}.\nNow trying to connect to the new port...")
client = await self._connect_to_mongo()
if not client:
return await message.edit(
content=message.content.replace("Now trying to connect to the new port...", "")
+ "Failed to connect. Please try again with a valid port."
)
await message.edit(content=message.content.replace("Now trying to connect to the new port...", ""))
@levelerset.command(aliases=["creds"])
async def credentials(self, ctx, username: str = None, password: str = None):
"""Set the MongoDB server credentials."""
await self.config.custom("MONGODB").username.set(username)
await self.config.custom("MONGODB").password.set(password)
message = await ctx.send("MongoDB credentials set.\nNow trying to connect...")
client = await self._connect_to_mongo()
if not client:
return await message.edit(
content=message.content.replace("Now trying to connect...", "")
+ "Failed to connect. Please try again with valid credentials."
)
await message.edit(content=message.content.replace("Now trying to connect...", ""))
@levelerset.command()
async def dbname(self, ctx, dbname: str = "leveler"):
"""Set the MongoDB db name."""
await self.config.custom("MONGODB").db_name.set(dbname)
message = await ctx.send("MongoDB db name set.\nNow trying to connect...")
client = await self._connect_to_mongo()
if not client:
return await message.edit(
content=message.content.replace("Now trying to connect...", "")
+ "Failed to connect. Please try again with a valid db name."
)
await message.edit(content=message.content.replace("Now trying to connect...", ""))
@commands.group(name="lvlset")
@commands.guild_only()
async def lvlset(self, ctx):
"""Profile configuration options."""
pass
@lvlset.group(name="profile")
async def profileset(self, ctx):
"""Profile options."""
pass
@lvlset.group(name="rank")
async def rankset(self, ctx):
"""Rank options."""
pass
@lvlset.group(name="levelup")
async def levelupset(self, ctx):
"""Level-up options."""
pass
@profileset.command(name="color")
async def profilecolors(self, ctx, section: str, color: str):
"""
Set colors on the profile card.
**section** can be one of: `exp` `rep` `badge` `info` `all`
`exp` is the experience bar and the xp numbers above the name
`rep` is the bar holding the rep number under the user's profile picture
`badge` is the backdrop of the badge area on the left of the profile
`info` is the backdrop of the text info areas
`all` is a combination of all of the above
**color** can be one of: `default` `white` `auto` or a hex code formatted like `#990000`
`default` will reset all profile parts to the default colors
`white` is used for a greyish transparent white, can be better than #FFFFFF
`auto` automatically chooses the appropriate colors based on the profile background image
"""
user = ctx.author
server = ctx.guild
# creates user if doesn't exist
await self._create_user(user, server)
userinfo = await self.db.users.find_one({"user_id": str(user.id)})
section = section.lower()
default_info_color = (30, 30, 30, 200)
white_info_color = (150, 150, 150, 180)
default_rep = (92, 130, 203, 230)
default_badge = (128, 151, 165, 230)
default_exp = (255, 255, 255, 230)
default_a = 200
if await self.config.guild(ctx.guild).disabled():
await ctx.send("**Leveler commands for this server are disabled!**")
return
if await self.config.guild(ctx.guild).text_only():
await ctx.send("**Leveler is in text-only mode.**")
return
# get correct section for self.db query
if section == "rep":
section_name = "rep_color"
elif section == "exp":
section_name = "profile_exp_color"
elif section == "badge":
section_name = "badge_col_color"
elif section == "info":
section_name = "profile_info_color"
elif section == "all":
section_name = "all"
else:
await ctx.send("**Not a valid section. (rep, exp, badge, info, all)**")
return
# get correct color choice
if color == "auto":
if section == "exp":
color_ranks = [random.randint(2, 3)]
elif section == "rep":
color_ranks = [random.randint(2, 3)]
elif section == "badge":
color_ranks = [0] # most prominent color
elif section == "info":
color_ranks = [random.randint(0, 1)]
elif section == "all":
color_ranks = [random.randint(2, 3), random.randint(2, 3), 0, random.randint(0, 2)]
else:
return
hex_colors = await self._auto_color(ctx, userinfo["profile_background"], color_ranks)
set_color = []
for hex_color in hex_colors:
await asyncio.sleep(0)
color_temp = self._hex_to_rgb(hex_color, default_a)
set_color.append(color_temp)
elif color == "white":
set_color = [white_info_color]
elif color == "default":
if section == "exp":
set_color = [default_exp]
elif section == "rep":
set_color = [default_rep]
elif section == "badge":
set_color = [default_badge]
elif section == "info":
set_color = [default_info_color]
elif section == "all":
set_color = [default_exp, default_rep, default_badge, default_info_color]
else:
return
elif self._is_hex(color):
set_color = [self._hex_to_rgb(color, default_a)]
else:
await ctx.send("**Not a valid color. Use** `default`, `white`, **a valid hex code formatted like** `#990000`, **or** `auto` **for automatic**.")
return
if section == "all":
if len(set_color) == 1:
await self.db.users.update_one(
{"user_id": str(user.id)},
{
"$set": {
"profile_exp_color": set_color[0],
"rep_color": set_color[0],
"badge_col_color": set_color[0],
"profile_info_color": set_color[0],
}
},
)
elif color == "default":
await self.db.users.update_one(
{"user_id": str(user.id)},
{
"$set": {
"profile_exp_color": default_exp,
"rep_color": default_rep,
"badge_col_color": default_badge,
"profile_info_color": default_info_color,
}
},
)
elif color == "auto":
await self.db.users.update_one(
{"user_id": str(user.id)},
{
"$set": {
"profile_exp_color": set_color[0],
"rep_color": set_color[1],
"badge_col_color": set_color[2],
"profile_info_color": set_color[3],
}
},
)
await ctx.send("**Colors for profile set.**")
else:
# print("update one")
await self.db.users.update_one({"user_id": str(user.id)}, {"$set": {section_name: set_color[0]}})
await ctx.send("**Color for profile {} set.**".format(section))
@rankset.command(name="color")
@commands.guild_only()
async def rankcolors(self, ctx, section: str, color: str = None):
"""
Set colors on the rank card.
**section** can be one of: `exp` `info` `all`
`exp` is the experience bar around the user's profile picture
`info` is the backdrop of the text info areas
`all` is a combination of all of the above
**color** can be one of: `default` `white` `auto` or a hex code formatted like `#990000`
`default` will reset all rank parts to the default colors
`white` is used for a greyish transparent white, can be better than #FFFFFF
`auto` automatically chooses the appropriate colors based on the rank background image
"""
user = ctx.author
server = ctx.guild
# creates user if doesn't exist
await self._create_user(user, server)
userinfo = await self.db.users.find_one({"user_id": str(user.id)})
section = section.lower()
default_info_color = (30, 30, 30, 200)
white_info_color = (150, 150, 150, 180)
default_exp = (255, 255, 255, 230)
default_rep = (92, 130, 203, 230)
default_badge = (128, 151, 165, 230)
default_a = 200
if await self.config.guild(ctx.guild).disabled():
await ctx.send("**Leveler commands for this server are disabled!**")
return
if await self.config.guild(ctx.guild).text_only():
await ctx.send("**Leveler is in text-only mode.**")
return
# get correct section for db query
if section == "exp":
section_name = "rank_exp_color"
elif section == "info":
section_name = "rank_info_color"
elif section == "all":
section_name = "all"
else:
await ctx.send("**Not a valid section. (exp, info, all)**")
return
# get correct color choice
if color == "auto":
if section == "exp":
color_ranks = [random.randint(2, 3)]
elif section == "info":
color_ranks = [random.randint(0, 1)]
elif section == "all":
color_ranks = [random.randint(2, 3), random.randint(0, 1)]
else:
return
hex_colors = await self._auto_color(ctx, userinfo["rank_background"], color_ranks)
set_color = []
for hex_color in hex_colors:
await asyncio.sleep(0)
color_temp = self._hex_to_rgb(hex_color, default_a)
set_color.append(color_temp)
elif color == "white":
set_color = [white_info_color]
elif color == "default":
if section == "exp":
set_color = [default_exp]
elif section == "info":
set_color = [default_info_color]
elif section == "all":
set_color = [default_exp, default_rep, default_badge, default_info_color]
else:
return
elif self._is_hex(color):
set_color = [self._hex_to_rgb(color, default_a)]
else:
await ctx.send("**Not a valid color. Use** `default`, `white`, **a valid hex code formatted like** `#990000`, **or** `auto` **for automatic**.")
return
if section == "all":
if len(set_color) == 1:
await self.db.users.update_one(
{"user_id": str(user.id)},
{"$set": {"rank_exp_color": set_color[0], "rank_info_color": set_color[0]}},
)
elif color == "default":
await self.db.users.update_one(
{"user_id": str(user.id)},
{"$set": {"rank_exp_color": default_exp, "rank_info_color": default_info_color,}},
)
elif color == "auto":
await self.db.users.update_one(
{"user_id": str(user.id)},
{"$set": {"rank_exp_color": set_color[0], "rank_info_color": set_color[1]}},
)
await ctx.send("**Colors for rank set.**")
else:
await self.db.users.update_one({"user_id": str(user.id)}, {"$set": {section_name: set_color[0]}})
await ctx.send("**Color for rank {} set.**".format(section))
@levelupset.command(name="color")
@commands.guild_only()
async def levelupcolors(self, ctx, section: str, color: str = None):
"""
Set colors on your levelup message, if enabled.
**section** can only be: `info`
`info` is the backdrop of the text info areas
**color** can be one of: `default` `white` `auto` or a hex code formatted like `#990000`
`default` will reset levelup colors to the default colors
`white` is used for a greyish transparent white, can be better than #FFFFFF
`auto` automatically chooses the appropriate colors based on the levelup background image
"""
user = ctx.author
server = ctx.guild
# the only color customizable section on a levelup message is the "info" area, maybe more in the future
section = "info"
# creates user if doesn't exist
await self._create_user(user, server)
userinfo = await self.db.users.find_one({"user_id": str(user.id)})
section = section.lower()
default_info_color = (30, 30, 30, 200)
white_info_color = (150, 150, 150, 180)
default_a = 200
if await self.config.guild(ctx.guild).disabled():
await ctx.send("**Leveler commands for this server are disabled!**")
return
if await self.config.guild(ctx.guild).text_only():
await ctx.send("**Leveler is in text-only mode.**")
return
# get correct color choice
if color == "auto":
if section == "info":
color_ranks = [random.randint(0, 1)]
else:
return
hex_colors = await self._auto_color(ctx, userinfo["levelup_background"], color_ranks)
set_color = []
for hex_color in hex_colors:
await asyncio.sleep(0)
color_temp = self._hex_to_rgb(hex_color, default_a)
set_color.append(color_temp)
elif color == "white":
set_color = [white_info_color]
elif color == "default":
if section == "info":
set_color = [default_info_color]
else:
return
elif self._is_hex(color):
set_color = [self._hex_to_rgb(color, default_a)]
else:
await ctx.send("**Not a valid color. Use** `default`, `white`, **a valid hex code formatted like** `#990000`, **or** `auto` **for automatic**.")
return
await self.db.users.update_one({"user_id": str(user.id)}, {"$set": {section_name: set_color[0]}})
await ctx.send("**Color for level-up {} set.**".format(section))
# uses k-means algorithm to find color from bg, rank is abundance of color, descending
async def _auto_color(self, ctx, url: str, ranks):
phrases = ["Calculating colors...", "Reticulating Splines..."] # in case I want more
await ctx.send("**{}**".format(random.choice(phrases)))
clusters = 10
async with self.session.get(url) as r:
image = await r.content.read()
with open(f"{cog_data_path(self)}/temp_auto.png", "wb") as f:
f.write(image)
im = Image.open(f"{cog_data_path(self)}/temp_auto.png").convert("RGBA")
im = im.resize((290, 290)) # resized to reduce time
ar = numpy.asarray(im)
shape = ar.shape
ar = ar.reshape(scipy.product(shape[:2]), shape[2])
codes, dist = scipy.cluster.vq.kmeans(ar.astype(float), clusters)
vecs, dist = scipy.cluster.vq.vq(ar, codes) # assign codes
counts, bins = scipy.histogram(vecs, len(codes)) # count occurrences
# sort counts
freq_index = []
index = 0
for count in counts:
await asyncio.sleep(0)
freq_index.append((index, count))
index += 1
sorted_list = sorted(freq_index, key=operator.itemgetter(1), reverse=True)
colors = []
for rank in ranks:
await asyncio.sleep(0)
color_index = min(rank, len(codes))
peak = codes[sorted_list[color_index][0]] # gets the original index
peak = peak.astype(int)
colors.append("".join(format(c, "02x") for c in peak))
return colors # returns array
# converts hex to rgb
@staticmethod
def _hex_to_rgb(hex_num: str, a: int):
h = hex_num.lstrip("#")
# if only 3 characters are given
if len(str(h)) == 3:
expand = "".join([x * 2 for x in str(h)])
h = expand
colors = [int(h[i : i + 2], 16) for i in (0, 2, 4)]
colors.append(a)
return tuple(colors)
# dampens the color given a parameter
@staticmethod
def _moderate_color(rgb, a, moderate_num):
new_colors = []
for color in rgb[:3]:
if color > 128:
color -= moderate_num
else:
color += moderate_num
new_colors.append(color)
new_colors.append(230)
return tuple(new_colors)
@profileset.command()
@commands.guild_only()
async def info(self, ctx, *, info):
"""Set your user info."""
user = ctx.author
server = ctx.guild
# creates user if doesn't exist
await self._create_user(user, server)
max_char = 150
if await self.config.guild(ctx.guild).disabled():
await ctx.send("**Leveler commands for this server are disabled.**")
return
if len(info) < max_char:
await self.db.users.update_one({"user_id": str(user.id)}, {"$set": {"info": info}})
await ctx.send("**Your info section has been successfully set!**")
else:
await ctx.send("**Your description has too many characters! Must be <{}**".format(max_char))
@levelupset.command(name="bg")
@commands.guild_only()
async def levelbg(self, ctx, *, image_name: str):
"""Set your level background."""
user = ctx.author
server = ctx.guild
backgrounds = await self.get_backgrounds()
# creates user if doesn't exist
await self._create_user(user, server)
if await self.config.guild(ctx.guild).disabled():
await ctx.send("**Leveler commands for this server are disabled.**")
return
if await self.config.guild(ctx.guild).text_only():
await ctx.send("**Leveler is in text-only mode.**")
return
if image_name in backgrounds["levelup"].keys():
if await self._process_purchase(ctx):
await self.db.users.update_one(
{"user_id": str(user.id)}, {"$set": {"levelup_background": backgrounds["levelup"][image_name]}},
)
await ctx.send(
"**Your new level-up background has been successfully set!\nCalculate matching colors next by using** `{}lvlset levelup color info auto`".format(
ctx.prefix
)
)
else:
await ctx.send(f"That is not a valid bg. See available bgs at `{ctx.prefix}backgrounds levelup`")
@profileset.command(name="bg")
@commands.guild_only()
async def profilebg(self, ctx, *, image_name: str):
"""Set your profile background."""
user = ctx.author
server = ctx.guild
backgrounds = await self.get_backgrounds()
# creates user if doesn't exist
await self._create_user(user, server)
if await self.config.guild(ctx.guild).disabled():
await ctx.send("**Leveler commands for this server are disabled.**")
return
if await self.config.guild(ctx.guild).text_only():
await ctx.send("**Leveler is in text-only mode.**")
return
if image_name in backgrounds["profile"].keys():
if await self._process_purchase(ctx):
await self.db.users.update_one(
{"user_id": str(user.id)}, {"$set": {"profile_background": backgrounds["profile"][image_name]}},
)
await ctx.send(
"**Your new profile background has been successfully set!\nCalculate matching colors next by using** `{}lvlset profile color all auto`".format(
ctx.prefix
)
)
else:
await ctx.send(f"That is not a valid bg. See available bgs at `{ctx.prefix}backgrounds profile`")
@rankset.command(name="bg")
@commands.guild_only()
async def rankbg(self, ctx, *, image_name: str):
"""Set your rank background."""
user = ctx.author
server = ctx.guild
backgrounds = await self.get_backgrounds()
# creates user if doesn't exist
await self._create_user(user, server)
if await self.config.guild(ctx.guild).disabled():
await ctx.send("**Leveler commands for this server are disabled.**")
return
if await self.config.guild(ctx.guild).text_only():
await ctx.send("**Leveler is in text-only mode.**")
return
if image_name in backgrounds["rank"].keys():
if await self._process_purchase(ctx):
await self.db.users.update_one(
{"user_id": str(user.id)}, {"$set": {"rank_background": backgrounds["rank"][image_name]}},
)
await ctx.send(
"**Your new rank background has been successfully set!\nCalculate matching colors next by using** `{}lvlset rank color all auto`".format(
ctx.prefix
)
)
else:
await ctx.send(f"That is not a valid bg. See available bgs at `{ctx.prefix}backgrounds rank`")
@profileset.command()
@commands.guild_only()
async def title(self, ctx, *, title):
"""Set your title."""
user = ctx.author
server = ctx.guild
# creates user if doesn't exist
await self._create_user(user, server)
userinfo = await self.db.users.find_one({"user_id": str(user.id)})
max_char = 20
if await self.config.guild(ctx.guild).disabled():
await ctx.send("**Leveler commands for this server are disabled.**")
return
if len(title) < max_char:
userinfo["title"] = title
await self.db.users.update_one({"user_id": str(user.id)}, {"$set": {"title": title}})
await ctx.send("**Your title has been successfully set!**")
else:
await ctx.send("**Your title has too many characters! Must be <{}**".format(max_char))
@checks.admin_or_permissions(manage_guild=True)
@commands.group()
@commands.guild_only()
async def lvladmin(self, ctx):
"""Admin settings."""
pass
@checks.mod_or_permissions(manage_messages=True)
@commands.bot_has_permissions(embed_links=True)
@lvladmin.group(invoke_without_command=True)
async def overview(self, ctx, guild_id: int = None):
"""A list of settings."""
num_users = await self.db.users.count_documents({})
default_profile = await self.config.default_profile()
default_rank = await self.config.default_rank()
default_levelup = await self.config.default_levelup()
if guild_id is not None:
if ctx.author.id in self.bot.owner_ids:
guild_data = await self.config.guild_from_id(guild_id).all()
g = self.bot.get_guild(guild_id)
else:
guild_data = await self.config.guild(ctx.guild).all()
g = ctx.guild
else:
guild_data = await self.config.guild(ctx.guild).all()
g = ctx.guild
msg = "`Guild Settings`\n"
msg += "**Leveler on {}:** {}\n".format(ctx.guild.name if not g else g.name, "Disabled" if guild_data['disabled'] else "Enabled")
msg += "**Mentions on {}:** {}\n".format(ctx.guild.name if not g else g.name, "Enabled" if guild_data['mentions'] else "Disabled")
msg += "**Public Level Messages:** {}\n".format("Enabled" if guild_data['lvl_msg'] else "Disabled")
msg += "**Private Level Messages:** {}\n".format("Enabled" if guild_data['private_lvl_message'] else "Disabled")
msg += "**Channel Locks:** {}\n".format(ctx.guild.get_channel(guild_data['lvl_msg_lock']))
msg += "\n`Bot Owner Only Settings`\n"
msg += "**Background Price:** {}\n".format(await self.config.bg_price())
msg += "**Rep Reset Price:** {}\n".format(await self.config.rep_price())
msg += "**Badge Type:** {}\n".format(await self.config.badge_type())
msg += "**Default Profile Background:** {}\n".format(default_profile)
msg += "**Default Rank Background:** {}\n".format(default_rank)
msg += "**Default Levelup Background:** {}\n".format(default_levelup)
if ctx.author.id in self.bot.owner_ids:
msg += "\n**Servers:** {}\n".format(len(self.bot.guilds))
msg += "**Unique Users:** {}\n".format(num_users)
em = discord.Embed(description=msg, colour=await ctx.embed_color())
em.set_author(name="Settings Overview for {}".format(g.name))
await ctx.send(embed=em)
@lvladmin.command()
@checks.is_owner()
@commands.check(non_global_bank)
@commands.guild_only()
async def msgcredits(self, ctx, currency: int = 0):
"""Credits per message logged. Default = 0"""
channel = ctx.channel
server = ctx.guild
if currency < 0 or currency > 1000:
await ctx.send("**Please enter a valid number (0 - 1000)**".format(channel.name))
return
await self.config.guild(server).msg_credits.set(currency)
await ctx.send("**Credits per message logged set to `{}`.**".format(currency))
@lvladmin.command()
@commands.guild_only()
async def ignorechannel(self, ctx, channel: discord.TextChannel = None):
"""Blocks exp gain in the given channel.
Use command with no channel to see list of ignored channels."""
server = ctx.guild
if channel is None:
channels = [
server.get_channel(c) and server.get_channel(c).mention or c
for c in await self.config.guild(server).ignored_channels()
if server.get_channel(c)
]
await ctx.send("**Ignored channels:** \n" + ("\n".join(channels) or "No ignored channels set."))
return
if channel.id in await self.config.guild(server).ignored_channels():
async with self.config.guild(server).ignored_channels() as channels:
channels.remove(channel.id)
await ctx.send(f"**Messages in {channel.mention} will give exp now.**")
else:
async with self.config.guild(server).ignored_channels() as channels:
channels.append(channel.id)
await ctx.send(f"**Messages in {channel.mention} will not give exp now.**")
@lvladmin.command(name="lock")
@commands.guild_only()
async def lvlmsglock(self, ctx, channel: discord.TextChannel = None):
"""Locks levelup messages to one channel. Use with no channel to disable."""
server = ctx.guild
if not channel:
await self.config.guild(server).lvl_msg_lock.set(None)
await ctx.send("**Level-up message lock disabled.**")
else:
await self.config.guild(server).lvl_msg_lock.set(channel.id)
await ctx.send("**Level-up messages locked to `#{}`**".format(channel.name))
async def _process_purchase(self, ctx):
user = ctx.author
# server = ctx.guild
bg_price = await self.config.bg_price()
if bg_price != 0:
if not await bank.can_spend(user, bg_price):
await ctx.send("**Insufficient funds. Backgrounds changes cost: ${}**".format(bg_price))
return False
else:
await ctx.send(
"**{}, you are about to buy a background for `{}`. Confirm by typing** `yes`.".format(
await self._is_mention(user), bg_price
)
)
pred = MessagePredicate.yes_or_no(ctx)
try:
await self.bot.wait_for("message", check=pred, timeout=15)
except TimeoutError:
await ctx.send("**Purchase canceled.**")
return False
if pred.result is True:
await bank.withdraw_credits(user, bg_price)
return True
else:
await ctx.send("**Purchase canceled.**")
return False
else:
return True
async def _give_chat_credit(self, user, server):
msg_credits = await self.config.guild(server).msg_credits()
if msg_credits and not await bank.is_global():
await bank.deposit_credits(user, msg_credits)
@checks.is_owner()
@lvladmin.command()
@commands.guild_only()
async def setbgprice(self, ctx, price: int):
"""Set a price for background changes."""
if price < 0:
await ctx.send("**That is not a valid background price.**")
else:
await self.config.bg_price.set(price)
await ctx.send(f"**Background price set to: `{price}`!**")
@checks.is_owner()
@lvladmin.command()
@commands.guild_only()
async def setrepprice(self, ctx, price: int):
"""Set a price for rep resets."""
if price < 0:
await ctx.send("**That is not a valid rep reset price.**")
else:
await self.config.rep_price.set(price)
await ctx.send(f"**Rep reset price set to: `{price}`!**")
@checks.is_owner()
@lvladmin.command()
@commands.guild_only()
async def setlevel(self, ctx, user: discord.Member, level: int):
"""Set a user's level. (What a cheater C:)."""
server = user.guild
channel = ctx.channel
# creates user if doesn't exist
await self._create_user(user, server)
userinfo = await self.db.users.find_one({"user_id": str(user.id)})
if await self.config.guild(ctx.guild).disabled():
await ctx.send("**Leveler commands for this server are disabled.**")
return
if level < 0:
await ctx.send("**Please enter a positive number.**")
return
if level > 10000:
await ctx.send("**Please enter a number that is less than 10,000.**")
return
# get rid of old level exp
old_server_exp = 0
for i in range(userinfo["servers"][str(server.id)]["level"]):
await asyncio.sleep(0)
old_server_exp += self._required_exp(i)
userinfo["total_exp"] -= old_server_exp
userinfo["total_exp"] -= userinfo["servers"][str(server.id)]["current_exp"]
# add in new exp
total_exp = self._level_exp(level)
userinfo["servers"][str(server.id)]["current_exp"] = 0
userinfo["servers"][str(server.id)]["level"] = level
userinfo["total_exp"] += total_exp
await self.db.users.update_one(
{"user_id": str(user.id)},
{
"$set": {
"servers.{}.level".format(server.id): level,
"servers.{}.current_exp".format(server.id): 0,
"total_exp": userinfo["total_exp"],
}
},
)
await ctx.send("**{}'s Level has been set to `{}`.**".format(await self._is_mention(user), level))
await self._handle_levelup(user, userinfo, server, channel)
@checks.is_owner()
@lvladmin.command()
@commands.guild_only()
async def setrep(self, ctx, user: discord.Member, rep_level: int):
"""Set a user's rep level. (What a cheater C:)."""
server = user.guild
channel = ctx.channel
if rep_level < 0:
await ctx.send("**Please enter a positive number.**")
return
if rep_level > 99999:
await ctx.send("**Please use a number that is smaller than 100,000.**")
return
# creates user if doesn't exist
await self._create_user(user, server)
if await self.config.guild(ctx.guild).disabled():
await ctx.send("**Leveler commands for this server are disabled.**")
return
await self.db.users.update_one({"user_id": str(user.id)}, {"$set": {"rep": rep_level}})
await ctx.send("**{}'s rep has been set to `{}`.**".format(await self._is_mention(user), rep_level))
@checks.is_owner()
@lvladmin.command()
@commands.guild_only()
async def xpban(self, ctx, days: int, *, user: Union[discord.Member, int, None]):
"""Ban user from getting experience."""
if isinstance(user, int):
try:
user = await self.bot.fetch_user(user)
except (discord.HTTPException, discord.NotFound):
user = None
if user is None:
await ctx.send_help()
return
chat_block = time.time() + timedelta(days=days).total_seconds()
try:
await self.db.users.update_one({"user_id": str(user.id)}, {"$set": {"chat_block": chat_block}})
except Exception as exc:
await ctx.send("Unable to add chat block: {}".format(exc))
else:
await ctx.tick()
@checks.is_owner()
@lvladmin.command()
@commands.guild_only()
async def mention(self, ctx):
"""Toggle mentions on messages."""
if await self.config.guild(ctx.guild).mentions():
await self.config.guild(ctx.guild).mentions.set(False)
await ctx.send("**Mentions disabled.**")
else:
await self.config.guild(ctx.guild).mentions.set(True)
await ctx.send("**Mentions enabled.**")
async def _valid_image_url(self, url):
try:
async with self.session.get(url) as r:
image = await r.read()
return Image.open(BytesIO(image)).convert("RGBA")
except Exception as exc:
log.exception(
"Something went wrong while trying to get a badge image or convert it: ", exc_info=exc,
)
return None
@checks.admin_or_permissions(manage_guild=True)
@lvladmin.command()
@commands.guild_only()
async def toggle(self, ctx):
"""Toggle most leveler commands on the current server."""
server = ctx.guild
if await self.config.guild(server).disabled():
await self.config.guild(server).disabled.set(False)
await ctx.send("**Leveler enabled on `{}`.**".format(server.name))
else:
await self.config.guild(server).disabled.set(True)
await ctx.send("**Leveler disabled on `{}`.**".format(server.name))
@checks.admin_or_permissions(manage_guild=True)
@lvladmin.command()
@commands.guild_only()
async def textonly(self, ctx):
"""Toggle text-based messages on the server."""
server = ctx.guild
if await self.config.guild(server).text_only():
await self.config.guild(server).text_only.set(False)
await ctx.send("**Text-only messages disabled for `{}`.**".format(server.name))
else:
await self.config.guild(server).text_only.set(True)
await ctx.send("**Text-only messages enabled for `{}`.**".format(server.name))
@checks.admin_or_permissions(manage_guild=True)
@lvladmin.command(name="alerts")
@commands.guild_only()
async def lvlalert(self, ctx):
"""Toggle level-up messages on the server."""
server = ctx.guild
# user = ctx.author
if await self.config.guild(server).lvl_msg():
await self.config.guild(server).lvl_msg.set(False)
await ctx.send("**Level-up alerts disabled for `{}`.**".format(server.name))
else:
await self.config.guild(server).lvl_msg.set(True)
await ctx.send("**Level-up alerts enabled for `{}`.**".format(server.name))
@checks.admin_or_permissions(manage_guild=True)
@lvladmin.command(name="private")
@commands.guild_only()
async def lvlprivate(self, ctx):
"""Toggles if level alert is a private message to the user."""
server = ctx.guild
if await self.config.guild(server).private_lvl_message():
await self.config.guild(server).private_lvl_message.set(False)
await ctx.send("**Private level-up alerts disabled for `{}`.**".format(server.name))
else:
await self.config.guild(server).private_lvl_message.set(True)
await ctx.send("**Private level-up alerts enabled for `{}`.**".format(server.name))
@lvladmin.command()
@checks.is_owner()
async def xp(self, ctx, min_xp: int = None, max_xp: int = None):
"""Set the range for the xp given on each successful xp gain.
Leaving the entries blank will reset the xp to the default."""
if not (min_xp and max_xp):
await self.config.xp.set([15, 20])
return await ctx.send("XP given has been reset to the default range of 15-20 xp per message.")
elif not max_xp:
return await ctx.send(f"Enter the values as a range: `{ctx.prefix}lvladmin xp 15 20`")
elif (max_xp or min_xp) > 1000:
return await ctx.send(
"Don't you think that number is a bit high? That might break things. Try something under 1k xp."
)
elif min_xp >= max_xp:
return await ctx.send("The minimum xp amount needs to be less than the maximum xp amount.")
elif (min_xp or max_xp) <= 0:
return await ctx.send("The xp amounts can't be zero or less.")
else:
await self.config.xp.set([min_xp, max_xp])
await ctx.send(f"XP given has been set to a range of {min_xp} to {max_xp} xp per message.")
@commands.group()
@commands.guild_only()
async def badge(self, ctx):
"""Badge configuration options."""
pass
@badge.command(name="available")
@commands.bot_has_permissions(embed_links=True)
@commands.guild_only()
async def badge_available(self, ctx):
"""Get a list of available badges."""
ids = [
("global", "Global", self.bot.user.avatar_url),
(ctx.guild.id, ctx.guild.name, ctx.guild.icon_url),
]
global_list = []
server_list = []
for serverid, servername, icon_url in ids:
await asyncio.sleep(0)
msg = ""
server_badge_info = await self.db.badges.find_one({"server_id": str(serverid)})
if server_badge_info:
server_badges = server_badge_info["badges"]
if len(server_badges) >= 1:
for badgename in server_badges:
await asyncio.sleep(0)
badgeinfo = server_badges[badgename]
if badgeinfo["price"] == -1:
price = "Non-purchasable"
elif badgeinfo["price"] == 0:
price = "Free"
else:
price = badgeinfo["price"]
msg += "**• {}** ({}) - {}\n".format(badgename, price, badgeinfo["description"])
else:
msg = "None."
else:
msg = "None."
total_pages = len(list(pagify(msg, ["\n"], page_length=1500)))
page_num = 1
for page in pagify(msg, ["\n"], page_length=1500):
em = discord.Embed(colour=await ctx.embed_color(), description=page)
em.set_author(name="{}".format(servername), icon_url=icon_url)
em.set_footer(text="Page {} of {}".format(page_num, total_pages))
page_num += 1
if serverid == "global":
global_list.append(em)
else:
server_list.append(em)
for embed in global_list + server_list:
await ctx.send(embed=embed)
@badge.command(name="list")
@commands.bot_has_permissions(embed_links=True)
@commands.guild_only()
async def listuserbadges(self, ctx, user: discord.Member = None):
"""
List all of a user's badges.
0 or -1 on the priority number means the badge is not visible on the profile.
"""
if user is None:
user = ctx.author
server = ctx.guild
await self._create_user(user, server)
userinfo = await self.db.users.find_one({"user_id": str(user.id)})
userinfo = await self._badge_convert_dict(userinfo)
# sort
priority_badges = []
for badgename in userinfo["badges"].keys():
badge = userinfo["badges"][badgename]
priority_num = badge["priority_num"]
priority_badges.append((badge, priority_num))
sorted_badges = sorted(priority_badges, key=operator.itemgetter(1), reverse=True)
badge_ranks = ""
counter = 1
for badge, priority_num in sorted_badges:
badge_ranks += "**{}. {}** ({}) [{}] **—** {}\n".format(
counter, badge["badge_name"], badge["server_name"], priority_num, badge["description"],
)
counter += 1
if not badge_ranks:
badge_ranks = "None"
total_pages = len(list(pagify(badge_ranks, ["\n"], page_length=1500)))
embeds = []
counter = 1
for page in pagify(badge_ranks, ["\n"], page_length=1500):
em = discord.Embed(colour=user.colour)
em.description = page
em.set_author(name="Badges for {}".format(user.name), icon_url=user.avatar_url)
em.set_footer(text="Page {} of {}".format(counter, total_pages))
embeds.append(em)
counter += 1
if len(embeds) == 1:
await ctx.send(embed=embeds[0])
else:
await menu(ctx, embeds, DEFAULT_CONTROLS)
@badge.command(name="buy")
@commands.guild_only()
async def badge_buy(self, ctx, name: str, global_badge: str = None):
"""
Buy a badge.
Use `-global` after the badge name to specify a global badge.
"""
user = ctx.author
server = ctx.guild
if global_badge == "-global":
serverid = "global"
else:
serverid = server.id
await self._create_user(user, server)
userinfo = await self.db.users.find_one({"user_id": str(user.id)})
userinfo = await self._badge_convert_dict(userinfo)
server_badge_info = await self.db.badges.find_one({"server_id": str(serverid)})
if server_badge_info:
server_badges = server_badge_info["badges"]
if name in server_badges:
if "{}_{}".format(name, str(serverid)) not in userinfo["badges"].keys():
badge_info = server_badges[name]
if badge_info["price"] == -1:
await ctx.send("**That badge is not purchasable.**".format(name))
elif badge_info["price"] == 0:
userinfo["badges"][f"{name}_{serverid}"] = server_badges[name]
await self.db.users.update_one(
{"user_id": userinfo["user_id"]}, {"$set": {"badges": userinfo["badges"]}},
)
await ctx.send("**`{}` has been obtained.**".format(name))
else:
await ctx.send(
'**{}, you are about to buy the `{}` badge for `{}`. Confirm by typing** `yes`'.format(
await self._is_mention(user), name, badge_info["price"]
)
)
pred = MessagePredicate.yes_or_no(ctx)
try:
await self.bot.wait_for("message", check=pred, timeout=15)
except TimeoutError:
return await ctx.send("**Purchase canceled.**")
if pred.result is False:
await ctx.send("**Purchase canceled.**")
return
else:
if badge_info["price"] <= await bank.get_balance(user):
await bank.withdraw_credits(user, badge_info["price"])
userinfo["badges"]["{}_{}".format(name, str(serverid))] = server_badges[name]
await self.db.users.update_one(
{"user_id": userinfo["user_id"]}, {"$set": {"badges": userinfo["badges"]}},
)
await ctx.send(
"**You have bought the `{}` badge for `{}`.\nSet it on your profile by using** `{}badge set` **next.**".format(
name, badge_info["price"], ctx.prefix
)
)
elif await bank.get_balance(user) < badge_info["price"]:
await ctx.send(
"**Not enough money! Need `{}` more.**".format(
badge_info["price"] - await bank.get_balance(user)
)
)
else:
await ctx.send("**{}, you already have this badge!**".format(user.name))
else:
await ctx.send("**The badge `{}` does not exist. List badges with** `{}badge available`.".format(name, ctx.prefix))
else:
await ctx.send("**The badge `{}` does not exist in the global badge list. List badges with** `{}badge available`.".format(ctx.prefix))
@badge.command(name="set")
@commands.guild_only()
async def badge_set(self, ctx, name: str, priority_num: int):
"""
Set a badge on the profile.
`priority_num` is a priority number based on:
`-1`\t\t: Invisible on profile card
`0`\t\t: Not on profile card
`1 - 5000`\t\t: Priority level. `1` is the least priority (last on badge display), `5000` is first.
"""
user = ctx.author
server = ctx.guild
await self._create_user(user, server)
userinfo = await self.db.users.find_one({"user_id": str(user.id)})
userinfo = await self._badge_convert_dict(userinfo)
if priority_num < -1 or priority_num > 5000:
await ctx.send("**Invalid priority number! -1 to 5000.**")
return
for badge in userinfo["badges"]:
await asyncio.sleep(0)
if userinfo["badges"][badge]["badge_name"] == name:
userinfo["badges"][badge]["priority_num"] = priority_num
await self.db.users.update_one(
{"user_id": userinfo["user_id"]}, {"$set": {"badges": userinfo["badges"]}}
)
await ctx.send(
"**The `{}` badge priority has been set to `{}`!**".format(
userinfo["badges"][badge]["badge_name"], priority_num
)
)
break
else:
await ctx.send("**You don't have that badge!**")
async def _badge_convert_dict(self, userinfo):
if "badges" not in userinfo or not isinstance(userinfo["badges"], dict):
await self.db.users.update_one({"user_id": userinfo["user_id"]}, {"$set": {"badges": {}}})
return await self.db.users.find_one({"user_id": userinfo["user_id"]})
@checks.mod_or_permissions(manage_roles=True)
@badge.command(name="add")
@commands.guild_only()
async def badge_add(self, ctx, name: str, badge_image_url: str, border_color: str, price: int, *, description: str):
"""
Add a badge.
`name`: The name for your badge. Use one word.
`badge_image_url`: The image url for the badge. Make sure it is on a permanent hosting service like imgur or similar.
`border_color`: A hex code for color, formatted like `#990000`.
`price`:
`-1` Non-purchaseable.
`0` Free.
Otherwise it will be the number provided for price.
`description`: A description for the badge.
Use `-global` after your description to make the badge global, if you are the bot owner.
"""
user = ctx.author
server = ctx.guild
required_members = 5
members = len([member for member in server.members if not member.bot])
if members < required_members:
await ctx.send("**You may only add badges in servers with {}+ non-bot members.**".format(required_members))
return
if "-global" in description and user.id in self.bot.owner_ids:
description = description.replace(" -global", "")
serverid = "global"
servername = "global"
else:
serverid = server.id
servername = server.name
if "." in name:
await ctx.send("**Name cannot contain `.`**")
return
if not await self._valid_image_url(badge_image_url):
await ctx.send("**Background is not valid. Enter hex or image url!**")
return
if not self._is_hex(border_color):
await ctx.send("**Border color is not valid!**")
return
if price < -1:
await ctx.send("**Price is not valid!**")
return
if price > 9223372036854775807:
# max economy balance
await ctx.send("**Price needs to be lower!**")
return
if len(description.split(" ")) > 40:
await ctx.send("**Description is too long! <=40**")
return
badges = await self.db.badges.find_one({"server_id": str(serverid)})
if not badges:
await self.db.badges.insert_one({"server_id": str(serverid), "badges": {}})
badges = await self.db.badges.find_one({"server_id": str(serverid)})
new_badge = {
"badge_name": name,
"bg_img": badge_image_url,
"price": price,
"description": description,
"border_color": border_color,
"server_id": str(serverid),
"server_name": servername,
"priority_num": 0,
}
if name not in badges["badges"].keys():
# create the badge regardless
badges["badges"][name] = new_badge
await self.db.badges.update_one({"server_id": str(serverid)}, {"$set": {"badges": badges["badges"]}})
await ctx.send("**`{}` badge added in `{}` server.**".format(name, servername))
else:
# update badge in the server
badges["badges"][name] = new_badge
await self.db.badges.update_one({"server_id": serverid}, {"$set": {"badges": badges["badges"]}})
# go though all users and update the badge.
# Doing it this way because dynamic does more accesses when doing profile
async for user in self.db.users.find({}):
await asyncio.sleep(0)
try:
user = await self._badge_convert_dict(user)
userbadges = user["badges"]
badge_name = "{}_{}".format(name, serverid)
if badge_name in userbadges.keys():
user_priority_num = userbadges[badge_name]["priority_num"]
new_badge["priority_num"] = user_priority_num # maintain old priority number set by user
userbadges[badge_name] = new_badge
await self.db.users.update_one({"user_id": user["user_id"]}, {"$set": {"badges": userbadges}})
except:
pass
await ctx.send("**The `{}` badge has been updated.**".format(name))
@checks.is_owner()
@badge.command(name="type")
@commands.guild_only()
async def badge_type(self, ctx, name: str):
"""Circles or bars."""
valid_types = ["circles", "bars"]
if name.lower() not in valid_types:
await ctx.send("**That is not a valid badge type!**")
return
await self.config.badge_type.set(name.lower())
await ctx.send("**Badge type set to `{}`.**".format(name.lower()))
@checks.mod_or_permissions(manage_roles=True)
@badge.command(name="delete", aliases=["remove"])
@commands.guild_only()
async def badge_delete(self, ctx, *, name: str):
"""
Delete a badge.
Use `-global` after the badge name to specify a global badge.
"""
user = ctx.author
server = ctx.guild
if "-global" in name and user.id in self.bot.owner_ids:
name = name.replace(" -global", "")
serverid = "global"
else:
serverid = server.id
# creates user if doesn't exist
await self._create_user(user, server)
if await self.config.guild(server).disabled():
await ctx.send("**Leveler commands for this server are disabled.**")
return
serverbadges = await self.db.badges.find_one({"server_id": str(serverid)})
if name in serverbadges["badges"].keys():
del serverbadges["badges"][name]
await self.db.badges.update_one(
{"server_id": serverbadges["server_id"]}, {"$set": {"badges": serverbadges["badges"]}},
)
# remove the badge if there
async for user_info_temp in self.db.users.find({}):
try:
user_info_temp = await self._badge_convert_dict(user_info_temp)
badge_name = "{}_{}".format(name, serverid)
if badge_name in user_info_temp["badges"].keys():
del user_info_temp["badges"][badge_name]
await self.db.users.update_one(
{"user_id": user_info_temp["user_id"]}, {"$set": {"badges": user_info_temp["badges"]}},
)
except Exception as exc:
log.error(f"Unable to delete badge {name} from {user_info_temp['user_id']}: {exc}")
await ctx.send("**The `{}` badge has been removed.**".format(name))
else:
await ctx.send("**That badge does not exist.**")
@checks.mod_or_permissions(manage_roles=True)
@badge.command(name="give")
@commands.guild_only()
async def badge_give(self, ctx, user: discord.Member, name: str, global_badge: str = None):
"""
Give a user a badge.
Use `-global` after the badge name to specify a global badge.
"""
org_user = ctx.message.author
server = ctx.guild
# creates user if doesn't exist
await self._create_user(user, server)
userinfo = await self.db.users.find_one({"user_id": str(user.id)})
userinfo = await self._badge_convert_dict(userinfo)
if await self.config.guild(server).disabled():
await ctx.send("**Leveler commands for this server are disabled.**")
return
if global_badge == "-global":
badgeserver = "global"
else:
badgeserver = ctx.guild.id
serverbadges = await self.db.badges.find_one({"server_id": str(badgeserver)})
if serverbadges:
badges = serverbadges["badges"]
else:
badges = None
badge_name = "{}_{}".format(name, server.id)
if not badges:
await ctx.send("**That badge doesn't exist in this server!**")
return
elif badge_name in badges.keys():
await ctx.send("**{} already has that badge!**".format(await self._is_mention(user)))
return
else:
try:
userinfo["badges"][badge_name] = badges[name]
await self.db.users.update_one({"user_id": str(user.id)}, {"$set": {"badges": userinfo["badges"]}})
await ctx.send(
"**{} has just given {} the `{}` badge!**".format(
await self._is_mention(org_user), await self._is_mention(user), name
)
)
except KeyError:
await ctx.send("**That badge doesn't exist in this server!**")
@checks.mod_or_permissions(manage_roles=True)
@badge.command(name="take")
@commands.guild_only()
async def badge_take(self, ctx, user: discord.Member, name: str):
"""Take a user's badge."""
org_user = ctx.author
server = ctx.guild
# creates user if doesn't exist
await self._create_user(user, server)
userinfo = await self.db.users.find_one({"user_id": str(user.id)})
userinfo = await self._badge_convert_dict(userinfo)
if await self.config.guild(server).disabled():
await ctx.send("**Leveler commands for this server are disabled.**")
return
serverbadges = await self.db.badges.find_one({"server_id": str(server.id)})
badges = serverbadges["badges"]
badge_name = "{}_{}".format(name, server.id)
if name not in badges:
await ctx.send("**That badge doesn't exist in this server!**")
elif badge_name not in userinfo["badges"]:
await ctx.send("**{} does not have that badge!**".format(await self._is_mention(user)))
else:
del userinfo["badges"][badge_name]
await self.db.users.update_one({"user_id": str(user.id)}, {"$set": {"badges": userinfo["badges"]}})
await ctx.send(
"**{} has taken the `{}` badge from {}! :upside_down:**".format(
await self._is_mention(org_user), name, await self._is_mention(user)
)
)
@checks.mod_or_permissions(manage_roles=True)
@badge.command(name="link")
@commands.guild_only()
async def badge_link(self, ctx, badge_name: str, level: int):
"""Associate a badge with a level."""
server = ctx.guild
serverbadges = await self.db.badges.find_one({"server_id": str(server.id)})
if serverbadges is None:
await ctx.send("**This server does not have any badges!**")
return
if badge_name not in serverbadges["badges"].keys():
await ctx.send("**Please make sure the `{}` badge exists!**".format(badge_name))
return
else:
server_linked_badges = await self.db.badgelinks.find_one({"server_id": str(server.id)})
if not server_linked_badges:
new_server = {"server_id": str(server.id), "badges": {badge_name: str(level)}}
await self.db.badgelinks.insert_one(new_server)
else:
server_linked_badges["badges"][badge_name] = str(level)
await self.db.badgelinks.update_one(
{"server_id": str(server.id)}, {"$set": {"badges": server_linked_badges["badges"]}},
)
await ctx.send("**The `{}` badge has been linked to level `{}`.**".format(badge_name, level))
@checks.admin_or_permissions(manage_roles=True)
@badge.command(name="unlink")
@commands.guild_only()
async def badge_unlink(self, ctx, *, badge_name: str):
"""Unlink a badge/level association."""
server = ctx.guild
server_linked_badges = await self.db.badgelinks.find_one({"server_id": str(server.id)})
badge_links = server_linked_badges["badges"]
if badge_name in badge_links.keys():
await ctx.send("**Badge/Level association `{}`/`{}` removed.**".format(badge_name, badge_links[badge_name]))
del badge_links[badge_name]
await self.db.badgelinks.update_one({"server_id": str(server.id)}, {"$set": {"badges": badge_links}})
else:
await ctx.send("**The `{}` badge is not linked to any levels!**".format(badge_name))
@checks.mod_or_permissions(manage_roles=True)
@badge.command(name="listlinks")
@commands.bot_has_permissions(embed_links=True)
@commands.guild_only()
async def badge_list(self, ctx):
"""List badge/level associations."""
server = ctx.guild
server_badges = await self.db.badgelinks.find_one({"server_id": str(server.id)})
em = discord.Embed(colour=await ctx.embed_color())
em.set_author(name="Current Badge - Level Links for {}".format(server.name), icon_url=server.icon_url)
if server_badges is None or "badges" not in server_badges or server_badges["badges"] == {}:
msg = "None"
else:
badges = server_badges["badges"]
msg = "**Badge** → Level\n"
for badge in badges.keys():
await asyncio.sleep(0)
msg += "**• {} →** {}\n".format(badge, badges[badge])
em.description = msg
await ctx.send(embed=em)
@commands.group()
@commands.guild_only()
@checks.mod_or_permissions(manage_roles=True)
async def role(self, ctx):
"""Role configuration."""
pass
@role.command(name="link")
@commands.guild_only()
async def linkrole(self, ctx, role_name: str, level: int, remove_role=None):
"""Associate a role with a level. Removes previous role if given."""
server = ctx.guild
role_obj = discord.utils.find(lambda r: r.name == role_name, server.roles)
remove_role_obj = discord.utils.find(lambda r: r.name == remove_role, server.roles)
if role_obj is None or (remove_role is not None and remove_role_obj is None):
if remove_role is None:
await ctx.send("**Please make sure the `{}` role exists!**".format(role_name))
else:
await ctx.send("**Please make sure the `{}` and/or `{}` roles exist!**".format(role_name, remove_role))
else:
server_roles = await self.db.roles.find_one({"server_id": str(server.id)})
if not server_roles:
new_server = {
"server_id": str(server.id),
"roles": {role_name: {"level": str(level), "remove_role": remove_role}},
}
await self.db.roles.insert_one(new_server)
else:
if role_name not in server_roles["roles"]:
server_roles["roles"][role_name] = {}
server_roles["roles"][role_name]["level"] = str(level)
server_roles["roles"][role_name]["remove_role"] = remove_role
await self.db.roles.update_one(
{"server_id": str(server.id)}, {"$set": {"roles": server_roles["roles"]}}
)
if remove_role is None:
await ctx.send("**The `{}` role has been linked to level `{}`**".format(role_name, level))
else:
await ctx.send(
"**The `{}` role has been linked to level `{}`. "
"Will also remove `{}` role.**".format(role_name, level, remove_role)
)
@role.command(name="unlink")
@commands.guild_only()
async def unlinkrole(self, ctx, *, role_name: str):
"""Unlink a role/level association."""
server = ctx.guild
server_roles = await self.db.roles.find_one({"server_id": str(server.id)})
roles = server_roles["roles"]
if role_name in roles:
await ctx.send("**Role/Level association `{}`/`{}` removed.**".format(role_name, roles[role_name]["level"]))
del roles[role_name]
await self.db.roles.update_one({"server_id": str(server.id)}, {"$set": {"roles": roles}})
else:
await ctx.send("**The `{}` role is not linked to any levels!**".format(role_name))
@role.command(name="listlinks")
@commands.bot_has_permissions(embed_links=True)
@commands.guild_only()
async def listrole(self, ctx):
"""List role/level associations."""
server = ctx.guild
# user = ctx.author
server_roles = await self.db.roles.find_one({"server_id": str(server.id)})
em = discord.Embed(colour=await ctx.embed_color())
em.set_author(name="Current Role - Level Links for {}".format(server.name), icon_url=server.icon_url)
if server_roles is None or "roles" not in server_roles or server_roles["roles"] == {}:
msg = "None"
else:
roles = server_roles["roles"]
msg = "**Role** → Level\n"
for role in roles:
await asyncio.sleep(0)
if roles[role]["remove_role"] is not None:
msg += "**• {} →** {} (Removes: {})\n".format(
role, roles[role]["level"], roles[role]["remove_role"]
)
else:
msg += "**• {} →** {}\n".format(role, roles[role]["level"])
em.description = msg
await ctx.send(embed=em)
@lvladmin.group(name="bg")
async def lvladminbg(self, ctx):
"""Background configuration."""
pass
@checks.is_owner()
@lvladminbg.command()
@commands.guild_only()
async def addprofilebg(self, ctx, name: str, url: str):
"""Add a profile background. Proportions: (290px x 290px)"""
backgrounds = await self.get_backgrounds()
if name in backgrounds["profile"].keys():
await ctx.send("**That profile background name already exists!**")
elif not await self._valid_image_url(url):
await ctx.send("**That is not a valid image url!**")
else:
async with self.config.backgrounds() as backgrounds:
backgrounds["profile"][name] = url
await ctx.send("**New profile background (`{}`) added.**".format(name))
@checks.is_owner()
@lvladminbg.command()
@commands.guild_only()
async def addrankbg(self, ctx, name: str, url: str):
"""Add a rank background. Proportions: (360px x 100px)"""
backgrounds = await self.get_backgrounds()
if name in backgrounds["profile"].keys():
await ctx.send("**That rank background name already exists!**")
elif not await self._valid_image_url(url):
await ctx.send("**That is not a valid image url!**")
else:
async with self.config.backgrounds() as backgrounds:
backgrounds["rank"][name] = url
await ctx.send("**New rank background (`{}`) added.**".format(name))
@checks.is_owner()
@lvladminbg.command()
@commands.guild_only()
async def addlevelbg(self, ctx, name: str, url: str):
"""Add a level-up background. Proportions: (175px x 65px)"""
backgrounds = await self.get_backgrounds()
if name in backgrounds["levelup"].keys():
await ctx.send("**That level-up background name already exists!**")
elif not await self._valid_image_url(url):
await ctx.send("**That is not a valid image url!**")
else:
async with self.config.backgrounds() as backgrounds:
backgrounds["levelup"][name] = url
await ctx.send("**New level-up background (`{}`) added.**".format(name))
@checks.is_owner()
@lvladminbg.command()
@commands.guild_only()
async def setcustombg(self, ctx, bg_type: str, user_id: str, img_url: str):
"""Set one-time custom profile background"""
valid_types = ["profile", "rank", "levelup"]
type_input = bg_type.lower()
if type_input not in valid_types:
await ctx.send("**Please choose a valid type: `profile`, `rank`, `levelup`.")
return
# test if valid user_id
userinfo = await self.db.users.find_one({"user_id": str(user_id)})
if not userinfo:
await ctx.send("**That is not a valid user id!**")
return
if not await self._valid_image_url(img_url):
await ctx.send("**That is not a valid image url!**")
return
await self.db.users.update_one(
{"user_id": str(user_id)}, {"$set": {"{}_background".format(type_input): img_url}}
)
await ctx.send("**User {} custom {} background set.**".format(user_id, bg_type))
@checks.is_owner()
@lvladminbg.command()
@commands.guild_only()
async def defaultprofilebg(self, ctx, name: str):
"""Set a profile background as the new default profile background for new users.
The profile bg must be in the existing profile background list.
Does not convert existing users to the new default."""
bgs = await self.get_backgrounds()
if name in bgs["profile"].keys():
await self.config.default_profile.set(bgs["profile"][name])
return await ctx.send("**The profile background (`{}`) has been set as the new default.**".format(name))
else:
return await ctx.send("**That profile background name doesn't exist.**")
@checks.is_owner()
@lvladminbg.command()
@commands.guild_only()
async def defaultrankbg(self, ctx, name: str):
"""Set a rank background as the new default rank background for new users.
The rank bg must be in the existing rank background list.
Does not convert existing users to the new default."""
bgs = await self.get_backgrounds()
if name in bgs["rank"].keys():
await self.config.default_rank.set(bgs["rank"][name])
return await ctx.send("**The rank background (`{}`) has been set as the new default.**".format(name))
else:
return await ctx.send("**That rank background name doesn't exist.**")
@checks.is_owner()
@lvladminbg.command()
@commands.guild_only()
async def defaultlevelbg(self, ctx, name: str):
"""Set a levelup background as the new default levelup background for new users.
The levelup bg must be in the existing levelup background list.
Does not convert existing users to the new default."""
bgs = await self.get_backgrounds()
if name in bgs["levelup"].keys():
await self.config.default_levelup.set(bgs["levelup"][name])
return await ctx.send("**The levelup background (`{}`) has been set as the new default.**".format(name))
else:
return await ctx.send("**That levelup background name doesn't exist.**")
@checks.is_owner()
@lvladminbg.command()
@commands.guild_only()
async def delprofilebg(self, ctx, name: str):
"""Delete a profile background."""
backgrounds = await self.get_backgrounds()
if len(backgrounds["profile"]) == 1:
return await ctx.send(
"**Add more profile backgrounds with** `{}lvladmin bg addprofilebg` **before removing the last one!**".format(
ctx.prefix
)
)
default_profile = await self.config.default_profile()
try:
if backgrounds["profile"][name] == default_profile:
msg = (
"**That profile background is currently set as the default.**\n"
"Use `{}lvladmin bg defaultprofilebg` to set a new default profile background.\n"
"Then run `{}lvladmin bg delprofilebg {}` again once you have set the new default."
).format(ctx.prefix, ctx.prefix, name)
return await ctx.send(msg)
else:
await self.delete_background("profile", name)
except KeyError:
return await ctx.send("**That profile background name doesn't exist.**")
else:
return await ctx.send("**The profile background (`{}`) has been deleted.**".format(name))
@checks.is_owner()
@lvladminbg.command()
@commands.guild_only()
async def delrankbg(self, ctx, name: str):
"""Delete a rank background."""
backgrounds = await self.get_backgrounds()
if len(backgrounds["rank"]) == 1:
return await ctx.send(
"**Add more rank backgrounds with** `{}lvladmin bg addrankbg` **before removing the last one!**".format(
ctx.prefix
)
)
default_rank = await self.config.default_rank()
try:
if backgrounds["rank"][name] == default_rank:
msg = (
"**That rank background is currently set as the default.**\n"
"Use `{}lvladmin bg defaultrankbg` to set a new default rank background.\n"
"Then run `{}lvladmin bg delrankbg {}` again once you have set the new default."
).format(ctx.prefix, ctx.prefix, name)
return await ctx.send(msg)
else:
await self.delete_background("rank", name)
except KeyError:
return await ctx.send("**That profile background name doesn't exist.**")
else:
return await ctx.send("**The profile background (`{}`) has been deleted.**".format(name))
@checks.is_owner()
@lvladminbg.command()
@commands.guild_only()
async def dellevelbg(self, ctx, name: str):
"""Delete a level background."""
backgrounds = await self.get_backgrounds()
if len(backgrounds["levelup"]) == 1:
return await ctx.send(
"**Add more level up backgrounds with** `{}lvladmin bg addlevelbg` **before removing the last one!**".format(
ctx.prefix
)
)
default_levelup = await self.config.default_levelup()
try:
if backgrounds["levelup"][name] == default_levelup:
msg = (
"**That levelup background is currently set as the default.**\n"
"Use `{}lvladmin bg defaultlevelbg` to set a new default levelup background.\n"
"Then run `{}lvladmin bg dellevelbg {}` again once you have set the new default."
).format(ctx.prefix, ctx.prefix, name)
return await ctx.send(msg)
else:
await self.delete_background("levelup", name)
except KeyError:
return await ctx.send("**That profile background name doesn't exist.**")
else:
return await ctx.send("**The profile background (`{}`) has been deleted.**".format(name))
@commands.command(name="backgrounds")
@commands.bot_has_permissions(embed_links=True)
@commands.guild_only()
async def disp_backgrounds(self, ctx, background_type):
"""
Displays available backgrounds.
Valid background types are: `profile`, `rank`, or `levelup`.
"""
server = ctx.guild
backgrounds = await self.get_backgrounds()
if await self.config.guild(server).disabled():
await ctx.send("**Leveler commands for this server are disabled!**")
return
em = discord.Embed(colour=await ctx.embed_color())
if background_type.lower() == "profile":
em.set_author(
name="Profile Backgrounds for {}".format(self.bot.user.name), icon_url=self.bot.user.avatar_url,
)
bg_key = "profile"
elif background_type.lower() == "rank":
em.set_author(
name="Rank Backgrounds for {}".format(self.bot.user.name), icon_url=self.bot.user.avatar_url,
)
bg_key = "rank"
elif background_type.lower() == "levelup":
em.set_author(
name="Level Up Backgrounds for {}".format(self.bot.user.name), icon_url=self.bot.user.avatar_url,
)
bg_key = "levelup"
else:
bg_key = None
if bg_key:
embeds = []
total = len(backgrounds[bg_key])
cnt = 1
for bg in sorted(backgrounds[bg_key].keys()):
await asyncio.sleep(0)
em = discord.Embed(
title=bg,
color=await ctx.embed_color(),
url=backgrounds[bg_key][bg],
description=f"Background {cnt}/{total}",
)
em.set_image(url=backgrounds[bg_key][bg])
embeds.append(em)
cnt += 1
await menu(ctx, embeds, DEFAULT_CONTROLS)
else:
await ctx.send("**Invalid Background Type. (profile, rank, levelup)**")
async def draw_profile(self, user, server):
if not self._db_ready:
return
font_file = f"{bundled_data_path(self)}/font.ttf"
font_bold_file = f"{bundled_data_path(self)}/font_bold.ttf"
font_unicode_file = f"{bundled_data_path(self)}/unicode.ttf"
# name_fnt = ImageFont.truetype(font_bold_file, 22, encoding="utf-8")
header_u_fnt = ImageFont.truetype(font_unicode_file, 18, encoding="utf-8")
# title_fnt = ImageFont.truetype(font_file, 18, encoding="utf-8")
sub_header_fnt = ImageFont.truetype(font_bold_file, 14, encoding="utf-8")
# badge_fnt = ImageFont.truetype(font_bold_file, 10, encoding="utf-8")
exp_fnt = ImageFont.truetype(font_bold_file, 14, encoding="utf-8")
# large_fnt = ImageFont.truetype(font_bold_file, 33, encoding="utf-8")
level_label_fnt = ImageFont.truetype(font_bold_file, 22, encoding="utf-8")
general_info_fnt = ImageFont.truetype(font_bold_file, 15, encoding="utf-8")
general_info_u_fnt = ImageFont.truetype(font_unicode_file, 12, encoding="utf-8")
rep_fnt = ImageFont.truetype(font_bold_file, 26, encoding="utf-8")
text_fnt = ImageFont.truetype(font_bold_file, 12, encoding="utf-8")
text_u_fnt = ImageFont.truetype(font_unicode_file, 8, encoding="utf-8")
# credit_fnt = ImageFont.truetype(font_bold_file, 10, encoding="utf-8")
def _write_unicode(text, init_x, y, font, unicode_font, fill):
write_pos = init_x
for char in text:
if char.isalnum() or char in string.punctuation or char in string.whitespace:
draw.text((write_pos, y), "{}".format(char), font=font, fill=fill)
write_pos += font.getsize(char)[0]
else:
draw.text((write_pos, y), "{}".format(char), font=unicode_font, fill=fill)
write_pos += unicode_font.getsize(char)[0]
# get urls
userinfo = await self.db.users.find_one({"user_id": str(user.id)})
await self._badge_convert_dict(userinfo)
bg_url = userinfo["profile_background"]
# profile_url = user.avatar_url
# create image objects
# bg_image = Image
# profile_image = Image
async with self.session.get(bg_url) as r:
image = await r.content.read()
profile_background = BytesIO(image)
profile_avatar = BytesIO()
try:
await user.avatar_url.save(profile_avatar, seek_begin=True)
except discord.HTTPException:
profile_avatar = f"{bundled_data_path(self)}/defaultavatar.png"
bg_image = Image.open(profile_background).convert("RGBA")
profile_image = Image.open(profile_avatar).convert("RGBA")
# set canvas
bg_color = (255, 255, 255, 0)
result = Image.new("RGBA", (290, 290), bg_color)
process = Image.new("RGBA", (290, 290), bg_color)
# draw
draw = ImageDraw.Draw(process)
# puts in background
bg_image = bg_image.resize((290, 290), Image.ANTIALIAS)
bg_image = bg_image.crop((0, 0, 290, 290))
result.paste(bg_image, (0, 0))
# draw filter
draw.rectangle([(0, 0), (290, 290)], fill=(0, 0, 0, 10))
# draw transparent overlay
vert_pos = 110
left_pos = 70
right_pos = 285
title_height = 22
# gap = 3
# determines rep section color
if "rep_color" not in userinfo.keys() or not userinfo["rep_color"]:
rep_fill = (92, 130, 203, 230)
else:
rep_fill = tuple(userinfo["rep_color"])
# determines badge section color, should be behind the titlebar
if "badge_col_color" not in userinfo.keys() or not userinfo["badge_col_color"]:
badge_fill = (128, 151, 165, 230)
else:
badge_fill = tuple(userinfo["badge_col_color"])
if "profile_info_color" in userinfo.keys():
info_color = tuple(userinfo["profile_info_color"])
else:
info_color = (30, 30, 30, 220)
draw.rectangle([(left_pos - 20, vert_pos + title_height), (right_pos, 156)], fill=info_color) # title box
draw.rectangle([(100, 159), (285, 212)], fill=info_color) # general content
draw.rectangle([(100, 215), (285, 285)], fill=info_color) # info content
# stick in credits if needed
# if bg_url in bg_credits.keys():
# credit_text = " ".join("Background by {}".format(bg_credits[bg_url]))
# credit_init = 290 - credit_fnt.getsize(credit_text)[0]
# draw.text((credit_init, 0), credit_text, font=credit_fnt, fill=(0,0,0,100))
draw.rectangle(
[(5, vert_pos), (right_pos, vert_pos + title_height)], fill=(230, 230, 230, 230)
) # name box in front
# draw level circle
multiplier = 8
lvl_circle_dia = 104
circle_left = 1
circle_top = 42
raw_length = lvl_circle_dia * multiplier
# create mask
mask = Image.new("L", (raw_length, raw_length), 0)
draw_thumb = ImageDraw.Draw(mask)
draw_thumb.ellipse((0, 0) + (raw_length, raw_length), fill=255, outline=0)
# drawing level bar calculate angle
start_angle = -90 # from top instead of 3oclock
angle = (
int(
360
* (
userinfo["servers"][str(server.id)]["current_exp"]
/ self._required_exp(userinfo["servers"][str(server.id)]["level"])
)
)
+ start_angle
)
# level outline
lvl_circle = Image.new("RGBA", (raw_length, raw_length))
draw_lvl_circle = ImageDraw.Draw(lvl_circle)
draw_lvl_circle.ellipse(
[0, 0, raw_length, raw_length],
fill=(badge_fill[0], badge_fill[1], badge_fill[2], 180),
outline=(255, 255, 255, 250),
)
# determines exp bar color
if "profile_exp_color" not in userinfo.keys() or not userinfo["profile_exp_color"]:
exp_fill = (255, 255, 255, 230)
else:
exp_fill = tuple(userinfo["profile_exp_color"])
draw_lvl_circle.pieslice(
[0, 0, raw_length, raw_length], start_angle, angle, fill=exp_fill, outline=(255, 255, 255, 255),
)
# put on level bar circle
lvl_circle = lvl_circle.resize((lvl_circle_dia, lvl_circle_dia), Image.ANTIALIAS)
lvl_bar_mask = mask.resize((lvl_circle_dia, lvl_circle_dia), Image.ANTIALIAS)
process.paste(lvl_circle, (circle_left, circle_top), lvl_bar_mask)
# draws boxes
draw.rectangle([(5, 133), (100, 285)], fill=badge_fill) # badges
draw.rectangle([(10, 138), (95, 168)], fill=rep_fill) # reps
total_gap = 10
# border = int(total_gap / 2)
profile_size = lvl_circle_dia - total_gap
# raw_length = profile_size * multiplier
# put in profile picture
total_gap = 6
border = int(total_gap / 2)
profile_size = lvl_circle_dia - total_gap
mask = mask.resize((profile_size, profile_size), Image.ANTIALIAS)
profile_image = profile_image.resize((profile_size, profile_size), Image.ANTIALIAS)
process.paste(profile_image, (circle_left + border, circle_top + border), mask)
# write label text
white_color = (240, 240, 240, 255)
light_color = (160, 160, 160, 255)
head_align = 105
_write_unicode(
self._truncate_text(self._name(user, 22), 22),
head_align,
vert_pos + 3,
level_label_fnt,
header_u_fnt,
(110, 110, 110, 255),
) # NAME
_write_unicode(userinfo["title"], head_align, 136, level_label_fnt, header_u_fnt, white_color)
# draw level box
level_right = 290
level_left = level_right - 78
draw.rectangle(
[(level_left, 0), (level_right, 21)], fill=(badge_fill[0], badge_fill[1], badge_fill[2], 160),
) # box
lvl_text = "LEVEL {}".format(userinfo["servers"][str(server.id)]["level"])
if badge_fill == (128, 151, 165, 230):
lvl_color = white_color
else:
lvl_color = self._contrast(badge_fill, rep_fill, exp_fill)
draw.text(
(self._center(level_left + 2, level_right, lvl_text, level_label_fnt), 2),
lvl_text,
font=level_label_fnt,
fill=(lvl_color[0], lvl_color[1], lvl_color[2], 255),
) # Level #
rep_text = "{} REP".format(userinfo["rep"])
draw.text(
(self._center(7, 100, rep_text, rep_fnt), 144), rep_text, font=rep_fnt, fill=white_color,
)
exp_text = "{}/{}".format(
userinfo["servers"][str(server.id)]["current_exp"],
self._required_exp(userinfo["servers"][str(server.id)]["level"]),
) # Exp
exp_color = exp_fill
draw.text((105, 99), exp_text, font=exp_fnt, fill=(exp_color[0], exp_color[1], exp_color[2], 255)) # Exp Text
# determine info text color
dark_text = (35, 35, 35, 230)
info_text_color = self._contrast(info_color, light_color, dark_text)
# lvl_left = 100
label_align = 105
_write_unicode("Rank:", label_align, 165, general_info_fnt, general_info_u_fnt, info_text_color)
draw.text((label_align, 180), "Exp:", font=general_info_fnt, fill=info_text_color) # Exp
draw.text((label_align, 195), "Credits:", font=general_info_fnt, fill=info_text_color) # Credits
# local stats
num_local_align = 172
# local_symbol = "\U0001F3E0 "
if "linux" in platform.system().lower():
local_symbol = "\U0001F3E0 "
else:
local_symbol = "S "
s_rank_txt = local_symbol + self._truncate_text(f"#{await self._find_server_rank(user, server)}", 8)
_write_unicode(
s_rank_txt,
num_local_align - general_info_u_fnt.getsize(local_symbol)[0],
165,
general_info_fnt,
general_info_u_fnt,
info_text_color,
) # Rank
s_exp_txt = self._truncate_text(f"{await self._find_server_exp(user, server)}", 8)
_write_unicode(s_exp_txt, num_local_align, 180, general_info_fnt, general_info_u_fnt, info_text_color) # Exp
credits = await bank.get_balance(user)
credit_txt = "${}".format(credits)
draw.text(
(num_local_align, 195), self._truncate_text(credit_txt, 18), font=general_info_fnt, fill=info_text_color,
) # Credits
# global stats
num_align = 230
if "linux" in platform.system().lower():
global_symbol = "\U0001F30E "
fine_adjust = 1
else:
global_symbol = "G "
fine_adjust = 0
global_rank = await self._find_global_rank(user)
rank_number = global_rank if global_rank else "1000+"
rank_txt = global_symbol + self._truncate_text(f"#{rank_number}", 8)
exp_txt = self._truncate_text(f"{userinfo['total_exp']}", 8)
_write_unicode(
rank_txt,
num_align - general_info_u_fnt.getsize(global_symbol)[0] + fine_adjust,
165,
general_info_fnt,
general_info_u_fnt,
info_text_color,
) # Rank
_write_unicode(exp_txt, num_align, 180, general_info_fnt, general_info_u_fnt, info_text_color) # Exp
draw.text((105, 220), "Info Box", font=sub_header_fnt, fill=white_color) # Info Box
margin = 105
offset = 238
for line in textwrap.wrap(userinfo["info"], width=42):
await asyncio.sleep(0)
# draw.text((margin, offset), line, font=text_fnt, fill=(70,70,70,255))
_write_unicode(line, margin, offset, text_fnt, text_u_fnt, info_text_color)
offset += text_fnt.getsize(line)[1] + 2
# sort badges
priority_badges = []
for badgename in userinfo["badges"].keys():
await asyncio.sleep(0)
badge = userinfo["badges"][badgename]
priority_num = badge["priority_num"]
if priority_num != 0 and priority_num != -1:
priority_badges.append((badge, priority_num))
sorted_badges = sorted(priority_badges, key=operator.itemgetter(1), reverse=True)
# TODO: simplify this. it shouldn't be this complicated... sacrifices conciseness for customizability
if await self.config.badge_type() == "circles":
# circles require antialiasing
vert_pos = 171
right_shift = 0
left = 9 + right_shift
# right = 52 + right_shift
size = 27
total_gap = 4 # /2
hor_gap = 3
vert_gap = 2
border_width = int(total_gap / 2)
mult = [
(0, 0),
(1, 0),
(2, 0),
(0, 1),
(1, 1),
(2, 1),
(0, 2),
(1, 2),
(2, 2),
(0, 3),
(1, 3),
(2, 3),
]
i = 0
for pair in sorted_badges[:12]:
try:
coord = (
left + int(mult[i][0]) * int(hor_gap + size),
vert_pos + int(mult[i][1]) * int(vert_gap + size),
)
badge = pair[0]
bg_color = badge["bg_img"]
border_color = badge["border_color"]
multiplier = 6 # for antialiasing
raw_length = size * multiplier
# draw mask circle
mask = Image.new("L", (raw_length, raw_length), 0)
draw_thumb = ImageDraw.Draw(mask)
draw_thumb.ellipse((0, 0) + (raw_length, raw_length), fill=255, outline=0)
# check image
badge_image = await self._valid_image_url(bg_color)
if not badge_image:
continue
badge_image = badge_image.resize((raw_length, raw_length), Image.ANTIALIAS)
# structured like this because if border = 0, still leaves outline.
if border_color:
square = Image.new("RGBA", (raw_length, raw_length), border_color)
# put border on ellipse/circle
output = ImageOps.fit(square, (raw_length, raw_length), centering=(0.5, 0.5))
output = output.resize((size, size), Image.ANTIALIAS)
outer_mask = mask.resize((size, size), Image.ANTIALIAS)
process.paste(output, coord, outer_mask)
# put on ellipse/circle
output = ImageOps.fit(badge_image, (raw_length, raw_length), centering=(0.5, 0.5))
output = output.resize((size - total_gap, size - total_gap), Image.ANTIALIAS)
inner_mask = mask.resize((size - total_gap, size - total_gap), Image.ANTIALIAS)
process.paste(
output, (coord[0] + border_width, coord[1] + border_width), inner_mask,
)
else:
# put on ellipse/circle
output = ImageOps.fit(badge_image, (raw_length, raw_length), centering=(0.5, 0.5))
output = output.resize((size, size), Image.ANTIALIAS)
outer_mask = mask.resize((size, size), Image.ANTIALIAS)
process.paste(output, coord, outer_mask)
except:
pass
i += 1
elif await self.config.badge_type() == "bars":
vert_pos = 187
i = 0
for pair in sorted_badges[:5]:
badge = pair[0]
bg_color = badge["bg_img"]
border_color = badge["border_color"]
left_pos = 10
right_pos = 95
total_gap = 4
border_width = int(total_gap / 2)
bar_size = (85, 15)
# check image
badge_image = await self._valid_image_url(bg_color)
if not badge_image:
continue
if border_color is not None:
draw.rectangle(
[(left_pos, vert_pos + i * 17), (right_pos, vert_pos + 15 + i * 17)],
fill=border_color,
outline=border_color,
) # border
badge_image = badge_image.resize(
(bar_size[0] - total_gap + 1, bar_size[1] - total_gap + 1), Image.ANTIALIAS,
)
process.paste(
badge_image, (left_pos + border_width, vert_pos + border_width + i * 17),
)
else:
badge_image = badge_image.resize(bar_size, Image.ANTIALIAS)
process.paste(badge_image, (left_pos, vert_pos + i * 17))
vert_pos += 3 # spacing
i += 1
image_object = BytesIO()
result = Image.alpha_composite(result, process)
result.save(image_object, format="PNG")
image_object.seek(0)
return discord.File(image_object, f"profile_{user.id}_{server.id}_{int(datetime.now().timestamp())}.png")
# returns color that contrasts better in background
def _contrast(self, bg_color, color1, color2):
color1_ratio = self._contrast_ratio(bg_color, color1)
color2_ratio = self._contrast_ratio(bg_color, color2)
if color1_ratio >= color2_ratio:
return color1
else:
return color2
@staticmethod
def _luminance(color):
# convert to greyscale
luminance = float((0.2126 * color[0]) + (0.7152 * color[1]) + (0.0722 * color[2]))
return luminance
def _contrast_ratio(self, bgcolor, foreground):
f_lum = float(self._luminance(foreground) + 0.05)
bg_lum = float(self._luminance(bgcolor) + 0.05)
if bg_lum > f_lum:
return bg_lum / f_lum
else:
return f_lum / bg_lum
# returns a string with possibly a nickname
def _name(self, user, max_length):
if user.name == user.display_name:
return user.name
else:
return "{} ({})".format(
user.name, self._truncate_text(user.display_name, max_length - len(user.name) - 3), max_length,
)
@staticmethod
async def _add_dropshadow(image, offset=(4, 4), background=0x000, shadow=0x0F0, border=3, iterations=5):
totalWidth = image.size[0] + abs(offset[0]) + 2 * border
totalHeight = image.size[1] + abs(offset[1]) + 2 * border
back = Image.new(image.mode, (totalWidth, totalHeight), background)
# Place the shadow, taking into account the offset from the image
shadowLeft = border + max(offset[0], 0)
shadowTop = border + max(offset[1], 0)
back.paste(shadow, [shadowLeft, shadowTop, shadowLeft + image.size[0], shadowTop + image.size[1]])
n = 0
while n < iterations:
back = back.filter(ImageFilter.BLUR)
n += 1
# Paste the input image onto the shadow backdrop
imageLeft = border - min(offset[0], 0)
imageTop = border - min(offset[1], 0)
back.paste(image, (imageLeft, imageTop))
return back
async def draw_rank(self, user, server):
# fonts
# font_file = f"{bundled_data_path(self)}/font.ttf"
font_bold_file = f"{bundled_data_path(self)}/font_bold.ttf"
font_unicode_file = f"{bundled_data_path(self)}/unicode.ttf"
name_fnt = ImageFont.truetype(font_bold_file, 22)
header_u_fnt = ImageFont.truetype(font_unicode_file, 18)
# sub_header_fnt = ImageFont.truetype(font_bold_file, 14)
# badge_fnt = ImageFont.truetype(font_bold_file, 12)
# large_fnt = ImageFont.truetype(font_bold_file, 33)
level_label_fnt = ImageFont.truetype(font_bold_file, 22)
general_info_fnt = ImageFont.truetype(font_bold_file, 15)
# general_info_u_fnt = ImageFont.truetype(font_unicode_file, 11)
# credit_fnt = ImageFont.truetype(font_bold_file, 10)
def _write_unicode(text, init_x, y, font, unicode_font, fill):
write_pos = init_x
for char in text:
if char.isalnum() or char in string.punctuation or char in string.whitespace:
draw.text((write_pos, y), char, font=font, fill=fill)
write_pos += font.getsize(char)[0]
else:
draw.text((write_pos, y), "{}".format(char), font=unicode_font, fill=fill)
write_pos += unicode_font.getsize(char)[0]
userinfo = await self.db.users.find_one({"user_id": str(user.id)})
# get urls
bg_url = userinfo["rank_background"]
server_icon_url = server.icon_url_as(format="png", size=256)
# guild icon image
if not server_icon_url._url:
server_icon = f"{bundled_data_path(self)}/defaultguildicon.png"
else:
server_icon = BytesIO()
try:
await server_icon_url.save(server_icon, seek_begin=True)
except discord.HTTPException:
server_icon = f"{bundled_data_path(self)}/defaultguildicon.png"
# rank bg image
async with self.session.get(bg_url) as r:
image = await r.content.read()
rank_background = BytesIO(image)
# user icon image
rank_avatar = BytesIO()
try:
await user.avatar_url.save(rank_avatar, seek_begin=True)
except discord.HTTPException:
rank_avatar = f"{bundled_data_path(self)}/defaultavatar.png"
# set all to RGBA
bg_image = Image.open(rank_background).convert("RGBA")
profile_image = Image.open(rank_avatar).convert("RGBA")
server_icon_image = Image.open(server_icon).convert("RGBA")
# set canvas
width = 360
height = 100
bg_color = (255, 255, 255, 0)
result = Image.new("RGBA", (width, height), bg_color)
process = Image.new("RGBA", (width, height), bg_color)
# puts in background
bg_image = bg_image.resize((width, height), Image.ANTIALIAS)
bg_image = bg_image.crop((0, 0, width, height))
result.paste(bg_image, (0, 0))
# draw
draw = ImageDraw.Draw(process)
# draw transparent overlay
vert_pos = 5
left_pos = 70
right_pos = width - vert_pos
title_height = 22
gap = 3
draw.rectangle(
[(left_pos - 20, vert_pos), (right_pos, vert_pos + title_height)], fill=(230, 230, 230, 230),
) # title box
content_top = vert_pos + title_height + gap
content_bottom = 100 - vert_pos
if "rank_info_color" in userinfo.keys():
info_color = tuple(userinfo["rank_info_color"])
info_color = (
info_color[0],
info_color[1],
info_color[2],
160,
) # increase transparency
else:
info_color = (30, 30, 30, 160)
draw.rectangle(
[(left_pos - 20, content_top), (right_pos, content_bottom)], fill=info_color, outline=(180, 180, 180, 180),
) # content box
# stick in credits if needed
# if bg_url in bg_credits.keys():
# credit_text = " ".join("{}".format(bg_credits[bg_url]))
# draw.text((2, 92), credit_text, font=credit_fnt, fill=(0,0,0,190))
# draw level circle
multiplier = 6
lvl_circle_dia = 94
circle_left = 15
circle_top = int((height - lvl_circle_dia) / 2)
raw_length = lvl_circle_dia * multiplier
# create mask
mask = Image.new("L", (raw_length, raw_length), 0)
draw_thumb = ImageDraw.Draw(mask)
draw_thumb.ellipse((0, 0) + (raw_length, raw_length), fill=255, outline=0)
# drawing level bar calculate angle
start_angle = -90 # from top instead of 3oclock
angle = (
int(
360
* (
userinfo["servers"][str(server.id)]["current_exp"]
/ self._required_exp(userinfo["servers"][str(server.id)]["level"])
)
)
+ start_angle
)
lvl_circle = Image.new("RGBA", (raw_length, raw_length))
draw_lvl_circle = ImageDraw.Draw(lvl_circle)
draw_lvl_circle.ellipse([0, 0, raw_length, raw_length], fill=(180, 180, 180, 180), outline=(255, 255, 255, 220))
# determines exp bar color
if "rank_exp_color" not in userinfo.keys() or not userinfo["rank_exp_color"]:
exp_fill = (255, 255, 255, 230)
else:
exp_fill = tuple(userinfo["rank_exp_color"])
draw_lvl_circle.pieslice(
[0, 0, raw_length, raw_length], start_angle, angle, fill=exp_fill, outline=(255, 255, 255, 230),
)
# put on level bar circle
lvl_circle = lvl_circle.resize((lvl_circle_dia, lvl_circle_dia), Image.ANTIALIAS)
lvl_bar_mask = mask.resize((lvl_circle_dia, lvl_circle_dia), Image.ANTIALIAS)
process.paste(lvl_circle, (circle_left, circle_top), lvl_bar_mask)
# draws mask
total_gap = 10
border = int(total_gap / 2)
profile_size = lvl_circle_dia - total_gap
raw_length = profile_size * multiplier
# put in profile picture
output = ImageOps.fit(profile_image, (raw_length, raw_length), centering=(0.5, 0.5))
output.resize((profile_size, profile_size), Image.ANTIALIAS)
mask = mask.resize((profile_size, profile_size), Image.ANTIALIAS)
profile_image = profile_image.resize((profile_size, profile_size), Image.ANTIALIAS)
process.paste(profile_image, (circle_left + border, circle_top + border), mask)
# draw level box
level_left = 274
level_right = right_pos
draw.rectangle([(level_left, vert_pos), (level_right, vert_pos + title_height)], fill="#AAA") # box
lvl_text = "LEVEL {}".format(userinfo["servers"][str(server.id)]["level"])
draw.text(
(self._center(level_left, level_right, lvl_text, level_label_fnt), vert_pos + 3),
lvl_text,
font=level_label_fnt,
fill=(110, 110, 110, 255),
) # Level #
# labels text colors
white_text = (240, 240, 240, 255)
dark_text = (35, 35, 35, 230)
label_text_color = self._contrast(info_color, white_text, dark_text)
# draw text
grey_color = (110, 110, 110, 255)
# white_color = (230, 230, 230, 255)
# put in server picture
server_size = content_bottom - content_top - 10
server_border_size = server_size + 4
radius = 20
light_border = (150, 150, 150, 180)
dark_border = (90, 90, 90, 180)
border_color = self._contrast(info_color, light_border, dark_border)
draw_server_border = Image.new(
"RGBA", (server_border_size * multiplier, server_border_size * multiplier), border_color,
)
draw_server_border = self._add_corners(draw_server_border, int(radius * multiplier / 2))
draw_server_border = draw_server_border.resize((server_border_size, server_border_size), Image.ANTIALIAS)
server_icon_image = server_icon_image.resize(
(server_size * multiplier, server_size * multiplier), Image.ANTIALIAS
)
server_icon_image = self._add_corners(server_icon_image, int(radius * multiplier / 2) - 10)
server_icon_image = server_icon_image.resize((server_size, server_size), Image.ANTIALIAS)
process.paste(
draw_server_border, (circle_left + profile_size + 2 * border + 8, content_top + 3), draw_server_border,
)
process.paste(
server_icon_image, (circle_left + profile_size + 2 * border + 10, content_top + 5), server_icon_image,
)
# name
left_text_align = 130
_write_unicode(
self._truncate_text(self._name(user, 20), 20),
left_text_align - 12,
vert_pos + 3,
name_fnt,
header_u_fnt,
grey_color,
) # Name
# divider bar
draw.rectangle([(187, 45), (188, 85)], fill=(160, 160, 160, 220))
# labels
label_align = 200
draw.text((label_align, 38), "Server Rank:", font=general_info_fnt, fill=label_text_color) # Server Rank
draw.text((label_align, 58), "Server Exp:", font=general_info_fnt, fill=label_text_color) # Server Exp
draw.text((label_align, 78), "Credits:", font=general_info_fnt, fill=label_text_color) # Credit
# info
right_text_align = 290
rank_txt = f"#{await self._find_server_rank(user, server)}"
draw.text(
(right_text_align, 38), self._truncate_text(rank_txt, 12), font=general_info_fnt, fill=label_text_color,
) # Rank
exp_txt = f"{await self._find_server_exp(user, server)}"
draw.text(
(right_text_align, 58), self._truncate_text(exp_txt, 12), font=general_info_fnt, fill=label_text_color,
) # Exp
credits = await bank.get_balance(user)
credit_txt = f"${credits}"
draw.text(
(right_text_align, 78), self._truncate_text(credit_txt, 12), font=general_info_fnt, fill=label_text_color,
) # Credits
image_object = BytesIO()
result = Image.alpha_composite(result, process)
result.save(image_object, format="PNG")
image_object.seek(0)
return discord.File(image_object, f"rank_{user.id}_{server.id}_{int(datetime.now().timestamp())}.png")
@staticmethod
def _add_corners(im, rad, multiplier=6):
raw_length = rad * 2 * multiplier
circle = Image.new("L", (raw_length, raw_length), 0)
draw = ImageDraw.Draw(circle)
draw.ellipse((0, 0, raw_length, raw_length), fill=255)
circle = circle.resize((rad * 2, rad * 2), Image.ANTIALIAS)
alpha = Image.new("L", im.size, 255)
w, h = im.size
alpha.paste(circle.crop((0, 0, rad, rad)), (0, 0))
alpha.paste(circle.crop((0, rad, rad, rad * 2)), (0, h - rad))
alpha.paste(circle.crop((rad, 0, rad * 2, rad)), (w - rad, 0))
alpha.paste(circle.crop((rad, rad, rad * 2, rad * 2)), (w - rad, h - rad))
im.putalpha(alpha)
return im
async def draw_levelup(self, user, server):
if not self._db_ready:
return
font_bold_file = f"{bundled_data_path(self)}/font_bold.ttf"
userinfo = await self.db.users.find_one({"user_id": str(user.id)})
# get urls
bg_url = userinfo["levelup_background"]
# profile_url = user.avatar_url
# create image objects
# bg_image = Image
# profile_image = Image
async with self.session.get(bg_url) as r:
image = await r.content.read()
level_background = BytesIO(image)
level_avatar = BytesIO()
try:
await user.avatar_url.save(level_avatar, seek_begin=True)
except discord.HTTPException:
level_avatar = f"{bundled_data_path(self)}/defaultavatar.png"
bg_image = Image.open(level_background).convert("RGBA")
profile_image = Image.open(level_avatar).convert("RGBA")
# set canvas
width = 175
height = 65
bg_color = (255, 255, 255, 0)
result = Image.new("RGBA", (width, height), bg_color)
process = Image.new("RGBA", (width, height), bg_color)
# draw
draw = ImageDraw.Draw(process)
# puts in background
bg_image = bg_image.resize((width, height), Image.ANTIALIAS)
bg_image = bg_image.crop((0, 0, width, height))
result.paste(bg_image, (0, 0))
# draw transparent overlay
if "levelup_info_color" in userinfo.keys():
info_color = tuple(userinfo["levelup_info_color"])
info_color = (
info_color[0],
info_color[1],
info_color[2],
150,
) # increase transparency
else:
info_color = (30, 30, 30, 150)
draw.rectangle([(38, 5), (170, 60)], fill=info_color) # info portion
# draw level circle
multiplier = 6
lvl_circle_dia = 60
circle_left = 4
circle_top = int((height - lvl_circle_dia) / 2)
raw_length = lvl_circle_dia * multiplier
# create mask
mask = Image.new("L", (raw_length, raw_length), 0)
draw_thumb = ImageDraw.Draw(mask)
draw_thumb.ellipse((0, 0) + (raw_length, raw_length), fill=255, outline=0)
# drawing level bar calculate angle
# start_angle = -90 # from top instead of 3oclock
lvl_circle = Image.new("RGBA", (raw_length, raw_length))
draw_lvl_circle = ImageDraw.Draw(lvl_circle)
draw_lvl_circle.ellipse([0, 0, raw_length, raw_length], fill=(255, 255, 255, 220), outline=(255, 255, 255, 220))
# put on level bar circle
lvl_circle = lvl_circle.resize((lvl_circle_dia, lvl_circle_dia), Image.ANTIALIAS)
lvl_bar_mask = mask.resize((lvl_circle_dia, lvl_circle_dia), Image.ANTIALIAS)
process.paste(lvl_circle, (circle_left, circle_top), lvl_bar_mask)
# draws mask
total_gap = 6
border = int(total_gap / 2)
profile_size = lvl_circle_dia - total_gap
raw_length = profile_size * multiplier
# put in profile picture
output = ImageOps.fit(profile_image, (raw_length, raw_length), centering=(0.5, 0.5))
# output = output.resize((profile_size, profile_size), Image.ANTIALIAS)
mask = mask.resize((profile_size, profile_size), Image.ANTIALIAS)
profile_image = profile_image.resize((profile_size, profile_size), Image.ANTIALIAS)
process.paste(profile_image, (circle_left + border, circle_top + border), mask)
# fonts
# level_fnt2 = ImageFont.truetype(font_bold_file, 19)
level_fnt = ImageFont.truetype(font_bold_file, 26)
# write label text
white_text = (240, 240, 240, 255)
dark_text = (35, 35, 35, 230)
level_up_text = self._contrast(info_color, white_text, dark_text)
lvl_text = "LEVEL {}".format(userinfo["servers"][str(server.id)]["level"])
draw.text(
(self._center(50, 170, lvl_text, level_fnt), 22), lvl_text, font=level_fnt, fill=level_up_text,
) # Level Number
image_object = BytesIO()
result = Image.alpha_composite(result, process)
result.save(image_object, format="PNG")
image_object.seek(0)
return discord.File(image_object, f"levelup_{user.id}_{server.id}_{int(datetime.now().timestamp())}.png")
@commands.Cog.listener("on_message_without_command")
async def _handle_on_message(self, message):
server = message.guild
user = message.author
if not server or user.bot:
return
if await self.config.guild(server).disabled():
return
self._message_tasks.append([user, server, message]) # Add to task list
async def process_tasks(self): # Run all tasks and resets task list
log.debug("_process_tasks is starting for batch xp writing")
log.debug(f"DB ready state: {self._db_ready}")
await self.bot.wait_until_red_ready()
with contextlib.suppress(asyncio.CancelledError):
while True:
if not self._db_ready:
log.debug("_process_tasks has exited early because db is not ready")
await asyncio.sleep(5)
log.debug("_process_tasks is trying to connect again")
continue
tasks = copy(self._message_tasks)
self._message_tasks = []
for a in tasks:
try:
await self._process_user_on_message(*a)
await asyncio.sleep(0.1)
except asyncio.CancelledError:
raise asyncio.CancelledError
except Exception as err:
log.error(
f"Error while giving XP to {a[0]}({a[0].id}) in {a[1]}({a[1].id})", exc_info=err,
)
log.debug("Process task sleeping for 30 seconds")
await asyncio.sleep(30)
async def _process_user_on_message(self, user, server, message): # Process a users message
if not self._db_ready:
log.debug("process_user_on_message has exited early because db is not ready")
return
text = message.content
curr_time = time.time()
log.debug(f"Processing {user} {server}")
prefix = await self.bot.command_prefix(self.bot, message)
# creates user if doesn't exist, bots are not logged.
userinfo = await self._create_user(user, server)
if not userinfo:
return
user_id = f"{user.id}"
userinfo = await self.db.users.find_one({"user_id": user_id})
# check if chat_block exists
if "chat_block" not in userinfo:
userinfo["chat_block"] = 0
if "last_message" not in userinfo:
userinfo["last_message"] = 0
await asyncio.sleep(0)
if all(
[
float(curr_time) - float(userinfo["chat_block"]) >= 120,
not any(text.startswith(x) for x in prefix),
len(message.content) > 10,
message.content != userinfo["last_message"],
message.channel.id not in await self.config.guild(server).ignored_channels(),
]
):
log.debug(f"{user} {server}'s message qualifies for xp awarding")
await asyncio.sleep(0)
xp = await self.config.xp()
await self._process_exp(message, userinfo, random.randint(xp[0], xp[1]))
await asyncio.sleep(0)
await self._give_chat_credit(user, server)
else:
log.debug(f"{user} {server}'s message DOES NOT qualify for xp awarding")
async def _process_exp(self, message, userinfo, exp: int):
if not self._db_ready:
log.debug("process_exp has exited early because db is not ready")
return
server = message.guild
channel = message.channel
user = message.author
# add to total exp
required = self._required_exp(userinfo["servers"][str(server.id)]["level"])
try:
await self.db.users.update_one(
{"user_id": str(user.id)}, {"$set": {"total_exp": userinfo["total_exp"] + exp}}
)
await asyncio.sleep(0)
except Exception as e:
log.warning(f"Could not add XP to {user}!\n", exc_info=e)
if userinfo["servers"][str(server.id)]["current_exp"] + exp >= required:
await asyncio.sleep(0)
userinfo["servers"][str(server.id)]["level"] += 1
await self.db.users.update_one(
{"user_id": str(user.id)},
{
"$set": {
f"servers.{server.id}.level": userinfo["servers"][str(server.id)]["level"],
f"servers.{server.id}.current_exp": userinfo["servers"][str(server.id)]["current_exp"]
+ exp
- required,
"chat_block": time.time(),
"last_message": message.content,
}
},
)
await asyncio.sleep(0)
await self._handle_levelup(user, userinfo, server, channel)
else:
await self.db.users.update_one(
{"user_id": str(user.id)},
{
"$set": {
f"servers.{server.id}.current_exp": userinfo["servers"][str(server.id)]["current_exp"] + exp,
"chat_block": time.time(),
"last_message": message.content,
}
},
)
log.debug("process_exp has written the exp")
async def _handle_levelup(self, user, userinfo, server, channel):
if not self._db_ready:
log.debug("_handle_levelup has exited early because db is not ready")
return
# channel lock implementation
lock_channel_id = await self.config.guild(server).lvl_msg_lock()
if lock_channel_id:
lock_channel = self.bot.get_channel(lock_channel_id)
if not lock_channel:
await self.config.guild(server).lvl_msg_lock.set(None)
else:
channel = lock_channel
server_identifier = "" # super hacky
name = await self._is_mention(user) # also super hacky
# private message takes precedent, of course
if await self.config.guild(server).private_lvl_message():
server_identifier = f" on {server.name}"
channel = user
name = "You"
new_level = str(userinfo["servers"][str(server.id)]["level"])
self.bot.dispatch("leveler_levelup", user, new_level)
# add to appropriate role if necessary
# try:
server_roles = await self.db.roles.find_one({"server_id": str(server.id)})
await asyncio.sleep(0)
if server_roles is not None:
for role in server_roles["roles"].keys():
await asyncio.sleep(0)
if int(server_roles["roles"][role]["level"]) == int(new_level):
await asyncio.sleep(0)
add_role = discord.utils.get(server.roles, name=role)
if add_role is not None:
await asyncio.sleep(0)
try:
await user.add_roles(add_role, reason="Levelup")
except discord.Forbidden:
await channel.send("Levelup role adding failed: Missing Permissions")
except discord.HTTPException:
await channel.send("Levelup role adding failed")
remove_role = discord.utils.get(server.roles, name=server_roles["roles"][role]["remove_role"])
if remove_role is not None:
await asyncio.sleep(0)
try:
await user.remove_roles(remove_role, reason="Levelup")
except discord.Forbidden:
await channel.send("Levelup role removal failed: Missing Permissions")
except discord.HTTPException:
await channel.send("Levelup role removal failed")
try:
server_linked_badges = await self.db.badgelinks.find_one({"server_id": str(server.id)})
await asyncio.sleep(0)
if server_linked_badges is not None:
for badge_name in server_linked_badges["badges"]:
await asyncio.sleep(0)
if int(server_linked_badges["badges"][badge_name]) == int(new_level):
server_badges = await self.db.badges.find_one({"server_id": str(server.id)})
await asyncio.sleep(0)
if server_badges is not None and badge_name in server_badges["badges"].keys():
await asyncio.sleep(0)
userinfo_db = await self.db.users.find_one({"user_id": str(user.id)})
new_badge_name = "{}_{}".format(badge_name, server.id)
userinfo_db["badges"][new_badge_name] = server_badges["badges"][badge_name]
await self.db.users.update_one(
{"user_id": str(user.id)}, {"$set": {"badges": userinfo_db["badges"]}},
)
except Exception as exc:
await channel.send(f"Error. Badge was not given: {exc}")
if await self.config.guild(server).lvl_msg(): # if lvl msg is enabled
if await self.config.guild(server).text_only():
if all(
[channel.permissions_for(server.me).send_messages, channel.permissions_for(server.me).embed_links,]
):
async with channel.typing():
em = discord.Embed(
description="**{} just gained a level{}! (LEVEL {})**".format(
name, server_identifier, new_level
),
colour=user.colour,
)
await channel.send(embed=em)
else:
if all(
[channel.permissions_for(server.me).send_messages, channel.permissions_for(server.me).attach_files,]
):
async with channel.typing():
file = await self.draw_levelup(user, server)
await channel.send(
"**{} just gained a level{}!**".format(name, server_identifier), file=file,
)
async def _find_server_rank(self, user, server):
if not self._db_ready:
return
targetid = str(user.id)
users = []
q = f"servers.{server.id}"
guild_ids = [str(x.id) for x in server.members]
async for userinfo in self.db.users.find({q: {"$exists": "true"}}):
await asyncio.sleep(0)
if userinfo["user_id"] in guild_ids:
try:
server_exp = 0
userid = userinfo["user_id"]
for i in range(userinfo["servers"][str(server.id)]["level"]):
server_exp += self._required_exp(i)
server_exp += userinfo["servers"][str(server.id)]["current_exp"]
users.append((userid, server_exp))
except:
pass
sorted_list = sorted(users, key=operator.itemgetter(1), reverse=True)
rank = 1
for a_user in sorted_list:
await asyncio.sleep(0)
if a_user[0] == targetid:
return rank
rank += 1
async def _find_server_rep_rank(self, user, server):
if not self._db_ready:
return
users = []
q = f"servers.{server.id}"
guild_ids = [str(x.id) for x in server.members]
async for userinfo in self.db.users.find({"$and": [{q: {"$exists": "true"}}, {"rep": {"$gte": 1}}]}).sort(
"rep", -1
):
await asyncio.sleep(0)
if userinfo["user_id"] in guild_ids:
users.append((userinfo["user_id"], userinfo["rep"]))
sorted_list = sorted(users, key=operator.itemgetter(1), reverse=True)
rank = 1
for a_user in sorted_list:
await asyncio.sleep(0)
if a_user[0] == str(user.id):
return rank
rank += 1
async def _find_server_exp(self, user, server):
if not self._db_ready:
return
server_exp = 0
userinfo = await self.db.users.find_one({"user_id": str(user.id)})
try:
for i in range(userinfo["servers"][str(server.id)]["level"]):
await asyncio.sleep(0)
server_exp += self._required_exp(i)
server_exp += userinfo["servers"][str(server.id)]["current_exp"]
return server_exp
except:
return server_exp
async def _find_global_rank(self, user):
if not self._db_ready:
return
users = []
async for userinfo in self.db.users.find(({"total_exp": {"$gte": 10}})).sort("total_exp", -1).limit(1000):
await asyncio.sleep(0)
try:
users.append((userinfo["user_id"], userinfo["total_exp"]))
except KeyError:
pass
sorted_list = sorted(users, key=operator.itemgetter(1), reverse=True)
rank = 1
for stats in sorted_list:
await asyncio.sleep(0)
if stats[0] == str(user.id):
return rank
rank += 1
async def _find_global_rep_rank(self, user):
if not self._db_ready:
return
users = []
async for userinfo in self.db.users.find(({"rep": {"$gte": 1}})).sort("rep", -1).limit(1000):
await asyncio.sleep(0)
try:
userid = userinfo["user_id"]
users.append((userid, userinfo["rep"]))
except KeyError:
pass
sorted_list = sorted(users, key=operator.itemgetter(1), reverse=True)
rank = 1
for stats in sorted_list:
await asyncio.sleep(0)
if stats[0] == str(user.id):
return rank
rank += 1
# handles user creation, adding new server, blocking
async def _create_user(self, user, server):
if not self._db_ready:
return
# backgrounds = await self.get_backgrounds() ... This wasn't used here
try:
user_id = f"{user.id}"
userinfo = await self.db.users.find_one({"user_id": user_id})
if not userinfo:
default_profile = await self.config.default_profile()
default_rank = await self.config.default_rank()
default_levelup = await self.config.default_levelup()
new_account = {
"user_id": user_id,
"username": user.name,
"servers": {},
"total_exp": 0,
"profile_background": default_profile,
"rank_background": default_rank,
"levelup_background": default_levelup,
"title": "",
"info": "I am a mysterious person.",
"rep": 0,
"badges": {},
"active_badges": {},
"rep_color": [],
"badge_col_color": [],
"rep_block": 0,
"chat_block": 0,
"last_message": "",
"profile_block": 0,
"rank_block": 0,
}
await self.db.users.insert_one(new_account)
userinfo = await self.db.users.find_one({"user_id": user_id})
if "username" not in userinfo or userinfo["username"] != user.name:
await self.db.users.update_one({"user_id": user_id}, {"$set": {"username": user.name}}, upsert=True)
if "servers" not in userinfo or str(server.id) not in userinfo["servers"]:
await self.db.users.update_one(
{"user_id": user_id},
{"$set": {f"servers.{server.id}.level": 0, f"servers.{server.id}.current_exp": 0,}},
upsert=True,
)
return userinfo
except AttributeError as err:
log.debug("error in user creation", exc_info=err)
except Exception as err:
log.debug("error in user creation", exc_info=err)
async def asyncit(self, iterable):
for i in iterable:
yield i
await asyncio.sleep(0)
@staticmethod
def _truncate_text(text, max_length):
if len(text) > max_length:
if text.strip("$").isdigit():
text = int(text.strip("$"))
return "${:.2E}".format(text)
return text[: max_length - 3] + "..."
return text
# finds the the pixel to center the text
@staticmethod
def _center(start, end, text, font):
dist = end - start
width = font.getsize(text)[0]
start_pos = start + ((dist - width) / 2)
return int(start_pos)
# calculates required exp for next level
@staticmethod
def _required_exp(level: int):
if level < 0:
return 0
return 139 * level + 65
@staticmethod
def _level_exp(level: int):
return level * 65 + 139 * level * (level - 1) // 2
@staticmethod
def _find_level(total_exp):
# this is specific to the function above
return int((1 / 278) * (9 + math.sqrt(81 + 1112 * total_exp)))
@staticmethod
def char_in_font(unicode_char, font):
for cmap in font["cmap"].tables:
if cmap.isUnicode():
if ord(unicode_char) in cmap.cmap:
return True
return False
@staticmethod
def _is_hex(color: str):
if color is not None and len(color) != 4 and len(color) != 7:
return False
reg_ex = r"^#(?:[0-9a-fA-F]{3}){1,2}$"
return re.search(reg_ex, str(color))
@checks.is_owner()
@lvladmin.group()
@commands.guild_only()
async def convert(self, ctx):
"""Conversion commands."""
pass
@checks.is_owner()
@convert.command(name="mee6levels")
@commands.guild_only()
async def mee6convertlevels(self, ctx, pages: int):
"""Convert Mee6 levels.
Each page returns 999 users at most.
This command must be run in a channel in the guild to be converted."""
if await self.config.guild(ctx.guild).mentions():
msg = (
"**{}, levelup mentions are on in this server.**\n"
"The bot will ping every user that will be leveled up through this process if you continue.\n"
"Reply with `yes` if you want this conversion to continue.\n"
"If not, reply with `no` and then run `{}lvladmin mention` to turn off mentions before running this command again."
).format(ctx.author.display_name, ctx.prefix)
await ctx.send(msg)
pred = MessagePredicate.yes_or_no(ctx)
try:
await self.bot.wait_for("message", check=pred, timeout=15)
except TimeoutError:
return await ctx.send("**Timed out waiting for a response.**")
if pred.result is False:
return await ctx.send("**Command cancelled.**")
failed = 0
for i in range(pages):
await asyncio.sleep(0)
async with self.session.get(
f"https://mee6.xyz/api/plugins/levels/leaderboard/{ctx.guild.id}?page={i}&limit=999"
) as r:
if r.status == 200:
data = await r.json()
else:
return await ctx.send("No data was found within the Mee6 API.")
for userdata in data["players"]:
await asyncio.sleep(0)
# _handle_levelup requires a Member
user = ctx.guild.get_member(int(userdata["id"]))
if not user:
failed += 1
continue
level = userdata["level"]
server = ctx.guild
channel = ctx.channel
# creates user if doesn't exist
await self._create_user(user, server)
userinfo = await self.db.users.find_one({"user_id": str(user.id)})
# get rid of old level exp
old_server_exp = 0
for _i in range(userinfo["servers"][str(server.id)]["level"]):
await asyncio.sleep(0)
old_server_exp += self._required_exp(_i)
userinfo["total_exp"] -= old_server_exp
userinfo["total_exp"] -= userinfo["servers"][str(server.id)]["current_exp"]
# add in new exp
total_exp = self._level_exp(level)
userinfo["servers"][str(server.id)]["current_exp"] = 0
userinfo["servers"][str(server.id)]["level"] = level
userinfo["total_exp"] += total_exp
await self.db.users.update_one(
{"user_id": str(user.id)},
{
"$set": {
"servers.{}.level".format(server.id): level,
"servers.{}.current_exp".format(server.id): 0,
"total_exp": userinfo["total_exp"],
}
},
)
await self._handle_levelup(user, userinfo, server, channel)
await ctx.send(f"{failed} users could not be found and were skipped.")
@checks.is_owner()
@convert.command(name="mee6ranks")
@commands.guild_only()
async def mee6convertranks(self, ctx):
"""Convert Mee6 role rewards.
This command must be run in a channel in the guild to be converted."""
async with self.session.get(f"https://mee6.xyz/api/plugins/levels/leaderboard/{ctx.guild.id}") as r:
if r.status == 200:
data = await r.json()
else:
return await ctx.send("No data was found within the Mee6 API.")
server = ctx.guild
remove_role = None
for role in data["role_rewards"]:
await asyncio.sleep(0)
role_name = role["role"]["name"]
level = role["rank"]
role_obj = discord.utils.find(lambda rol: rol.name == role_name, server.roles)
if role_obj is None:
await ctx.send("**Please make sure the `{}` roles exist!**".format(role_name))
else:
server_roles = await self.db.roles.find_one({"server_id": str(server.id)})
if not server_roles:
new_server = {
"server_id": str(server.id),
"roles": {role_name: {"level": str(level), "remove_role": remove_role}},
}
await self.db.roles.insert_one(new_server)
else:
if role_name not in server_roles["roles"]:
server_roles["roles"][role_name] = {}
server_roles["roles"][role_name]["level"] = str(level)
server_roles["roles"][role_name]["remove_role"] = remove_role
await self.db.roles.update_one(
{"server_id": str(server.id)}, {"$set": {"roles": server_roles["roles"]}}
)
await ctx.send("**The `{}` role has been linked to level `{}`**".format(role_name, level))
@checks.is_owner()
@convert.command(name="tatsulevels")
@commands.guild_only()
async def tatsumakiconvertlevels(self, ctx):
"""Convert Tatsumaki levels.
This command must be run in a channel in the guild to be converted."""
token = await self.bot.get_shared_api_tokens("tatsumaki")
tatsu_token = token.get("api_key", False)
if not tatsu_token:
return await ctx.send(f"You do not have a valid Tatsumaki API key set up. "
f"If you have a key, you can set it via `{ctx.clean_prefix}set api tatsumaki api_key <api_key_here>`\n"
f"Keys are not currently available if you do not have one already as the API is in the process of being revamped.")
if await self.config.guild(ctx.guild).mentions():
msg = (
"**{}, levelup mentions are on in this server.**\n"
"The bot will ping every user that will be leveled up through this process if you continue.\n"
"Reply with `yes` if you want this conversion to continue.\n"
"If not, reply with `no` and then run `{}lvladmin mention` to turn off mentions before running this command again."
).format(ctx.author.display_name, ctx.prefix)
await ctx.send(msg)
pred = MessagePredicate.yes_or_no(ctx)
try:
await self.bot.wait_for("message", check=pred, timeout=15)
except TimeoutError:
return await ctx.send("**Timed out waiting for a response.**")
if pred.result is False:
return await ctx.send("**Command cancelled.**")
failed = 0
await asyncio.sleep(0)
async with self.session.get(
f"https://api.tatsumaki.xyz/guilds/{ctx.guild.id}/leaderboard?limit&=-1"
) as r:
if r.status == 200:
data = await r.json()
else:
return await ctx.send("No data was found within the Tastumaki API.")
for userdata in data:
if userdata is None:
continue
await asyncio.sleep(0)
# _handle_levelup requires a Member
user = ctx.guild.get_member(int(userdata["user_id"]))
if not user:
failed += 1
continue
level = self._find_level(userdata["score"])
server = ctx.guild
channel = ctx.channel
# creates user if doesn't exist
await self._create_user(user, server)
userinfo = await self.db.users.find_one({"user_id": str(user.id)})
# get rid of old level exp
old_server_exp = 0
for _i in range(userinfo["servers"][str(server.id)]["level"]):
await asyncio.sleep(0)
old_server_exp += self._required_exp(_i)
userinfo["total_exp"] -= old_server_exp
userinfo["total_exp"] -= userinfo["servers"][str(server.id)]["current_exp"]
# add in new exp
total_exp = self._level_exp(level)
userinfo["servers"][str(server.id)]["current_exp"] = 0
userinfo["servers"][str(server.id)]["level"] = level
userinfo["total_exp"] += total_exp
await self.db.users.update_one(
{"user_id": str(user.id)},
{
"$set": {
"servers.{}.level".format(server.id): level,
"servers.{}.current_exp".format(server.id): 0,
"total_exp": userinfo["total_exp"],
}
},
)
await self._handle_levelup(user, userinfo, server, channel)
await ctx.send(f"{failed} users could not be found and were skipped.")
| 43.000506
| 165
| 0.561664
|
bf0ee2f77da4fa9a88dbc92ea9d528660f594d40
| 1,177
|
py
|
Python
|
domain/entities/message_pool.py
|
singnet/snet-converter-services
|
346b26f8281944a9f47d4bdd1eba54c8fb43e799
|
[
"MIT"
] | null | null | null |
domain/entities/message_pool.py
|
singnet/snet-converter-services
|
346b26f8281944a9f47d4bdd1eba54c8fb43e799
|
[
"MIT"
] | 1
|
2022-03-21T04:43:48.000Z
|
2022-03-21T04:43:48.000Z
|
domain/entities/message_pool.py
|
singnet/snet-converter-services
|
346b26f8281944a9f47d4bdd1eba54c8fb43e799
|
[
"MIT"
] | 4
|
2021-11-30T04:32:59.000Z
|
2022-03-23T07:20:53.000Z
|
from datetime import date
from constants.entity import MessagePoolEntities
from utils.general import datetime_to_str
class MessagePool:
def __init__(self, row_id: int, id: str, name: str, message_group_id: str, is_enabled: bool, created_by: str,
created_at: date, updated_at: date):
self.row_id = row_id
self.id = id
self.name = name
self.message_group_id = message_group_id
self.is_enabled = is_enabled
self.created_by = created_by
self.created_at = datetime_to_str(created_at)
self.updated_at = datetime_to_str(updated_at)
def to_dict(self):
return {
MessagePoolEntities.ROW_ID.value: self.row_id,
MessagePoolEntities.ID.value: self.id,
MessagePoolEntities.NAME.value: self.name,
MessagePoolEntities.MESSAGE_GROUP_ID.value: self.message_group_id,
MessagePoolEntities.IS_ENABLED.value: self.is_enabled,
MessagePoolEntities.CREATED_BY.value: self.created_by,
MessagePoolEntities.CREATED_AT.value: self.created_at,
MessagePoolEntities.UPDATED_AT.value: self.updated_at,
}
| 37.967742
| 113
| 0.685641
|
ce3239e836ee709ccc2addc45f5298ebba7684da
| 1,099
|
py
|
Python
|
setup.py
|
lqdc/pefile_py3
|
548fd976a75f0df63500206e8131107b5a63ebeb
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
lqdc/pefile_py3
|
548fd976a75f0df63500206e8131107b5a63ebeb
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
lqdc/pefile_py3
|
548fd976a75f0df63500206e8131107b5a63ebeb
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError as excp:
from distutils.core import setup
import pefile
import os
os.environ['COPY_EXTENDED_ATTRIBUTES_DISABLE'] = 'true'
os.environ['COPYFILE_DISABLE'] = 'true'
setup(name = 'pefile',
version = pefile.__version__,
description = 'Python PE parsing module',
author = pefile.__author__,
author_email = pefile.__contact__,
install_requires = ['entropy'],
url = 'http://code.google.com/p/pefile/',
download_url = 'http://pefile.googlecode.com/files/pefile-%s.tar.gz' % pefile.__version__,
platforms = ['any'],
classifiers = ['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'],
long_description = "\n".join(pefile.__doc__.split('\n')),
py_modules = ['pefile', 'peutils'],
packages = ['ordlookup'] )
| 32.323529
| 94
| 0.676979
|
cc239ac6337482d9d6876887bc6058e6c0a3a074
| 1,541
|
py
|
Python
|
piccolo_api/jwt_auth/endpoints.py
|
aminalaee/piccolo_api
|
432ff760d013fc3976a9b80a5185c95746d1be0d
|
[
"MIT"
] | null | null | null |
piccolo_api/jwt_auth/endpoints.py
|
aminalaee/piccolo_api
|
432ff760d013fc3976a9b80a5185c95746d1be0d
|
[
"MIT"
] | null | null | null |
piccolo_api/jwt_auth/endpoints.py
|
aminalaee/piccolo_api
|
432ff760d013fc3976a9b80a5185c95746d1be0d
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from abc import abstractproperty
from datetime import datetime, timedelta
import typing as t
import jwt
from starlette.endpoints import HTTPEndpoint
from starlette.exceptions import HTTPException
from starlette.responses import JSONResponse
from starlette.requests import Request
from piccolo.apps.user.tables import BaseUser
class JWTLoginBase(HTTPEndpoint):
@abstractproperty
def _auth_table(self) -> t.Type[BaseUser]:
raise NotImplementedError
@abstractproperty
def _secret(self) -> str:
raise NotImplementedError
@abstractproperty
def _expiry(self) -> timedelta:
raise NotImplementedError
async def post(self, request: Request) -> JSONResponse:
body = await request.json()
username = body.get("username", None)
password = body.get("password", None)
user_id = await self._auth_table.login(
username=username, password=password
)
if not user_id:
raise HTTPException(status_code=401, detail="Login failed")
expiry = datetime.now() + self._expiry
payload = jwt.encode({"user_id": user_id, "exp": expiry}, self._secret)
return JSONResponse({"token": payload})
def jwt_login(
secret: str,
auth_table: t.Type[BaseUser] = BaseUser,
expiry: timedelta = timedelta(days=1),
) -> t.Type[JWTLoginBase]:
class JWTLogin(JWTLoginBase):
_auth_table = auth_table
_secret = secret
_expiry = expiry
return JWTLogin
| 26.568966
| 79
| 0.692408
|
b1fbe69014097ce3b625f50ab90117f1e4a96842
| 8,302
|
py
|
Python
|
tsrc/cli/push_gitlab.py
|
pdecat/tsrc
|
472778a473a31a1dc1093f9a5487facbd0bd8643
|
[
"BSD-3-Clause"
] | null | null | null |
tsrc/cli/push_gitlab.py
|
pdecat/tsrc
|
472778a473a31a1dc1093f9a5487facbd0bd8643
|
[
"BSD-3-Clause"
] | null | null | null |
tsrc/cli/push_gitlab.py
|
pdecat/tsrc
|
472778a473a31a1dc1093f9a5487facbd0bd8643
|
[
"BSD-3-Clause"
] | 1
|
2020-09-03T13:34:42.000Z
|
2020-09-03T13:34:42.000Z
|
""" Entry point for tsrc push """
import argparse
import itertools
import textwrap
from typing import cast, Any, List, Optional, Set # noqa
from gitlab import Gitlab
from gitlab.v4.objects import Group, User, Project, ProjectMergeRequest # noqa
from gitlab.exceptions import GitlabGetError
import cli_ui as ui
import tsrc
from tsrc.cli.push import RepositoryInfo
WIP_PREFIX = "WIP: "
class UserNotFound(tsrc.Error):
def __init__(self, username: str) -> None:
self.username = username
super().__init__("No user found with this username : %s" % self.username)
class TooManyUsers(tsrc.Error):
def __init__(self, max_users: int) -> None:
self.max_users = max_users
super().__init__("More than %s users found" % self.max_users)
class AmbiguousUser(tsrc.Error):
def __init__(self, query: str) -> None:
self.query = query
super().__init__("Found more that one user matching query: %s" % self.query)
class NoGitLabToken(tsrc.Error):
def __init__(self) -> None:
message = textwrap.dedent(
"""\
Could not find GitLab token in tsrc config file
Please check https://tankerhq.github.io/tsrc/ref/formats/#tsrcyml_format
for details\
"""
)
super().__init__(message)
class FeatureNotAvailable(tsrc.Error):
def __init__(self, feature: str) -> None:
self.name = feature
message = (
"The '%s' feature is not available on your GitLab installation" % self.name
)
super().__init__(message)
def get_token() -> str:
config = tsrc.parse_tsrc_config()
try:
res = config["auth"]["gitlab"]["token"]
return cast(str, res)
except KeyError:
raise NoGitLabToken() from None
def wipify(title: str) -> str:
if not title.startswith(WIP_PREFIX):
return WIP_PREFIX + title
else:
return title
def unwipify(title: str) -> str:
if title.startswith(WIP_PREFIX):
return title[len(WIP_PREFIX) :]
else:
return title
class PushAction(tsrc.cli.push.PushAction):
def __init__(
self,
repository_info: RepositoryInfo,
args: argparse.Namespace,
gitlab_api: Optional[Gitlab] = None,
) -> None:
super().__init__(repository_info, args)
self.gitlab_api = gitlab_api
self.group = None # type: Optional[Group]
self.project = None # type: Optional[Project]
self.review_candidates = [] # type: List[User]
def _get_group(self, group_name: str) -> Optional[Group]:
assert self.gitlab_api
try:
return self.gitlab_api.groups.get(group_name)
except GitlabGetError as e:
if e.response_code == 404:
return None
else:
raise
def check_gitlab_feature(self, name: str) -> None:
assert self.gitlab_api
# Note: don't worry, the http request is cached under the hood by
# the python-gitlab library. This should be fine
features = self.gitlab_api.features.list()
names = [x.name for x in features]
if name not in names:
raise FeatureNotAvailable(name)
def setup_service(self) -> None:
if not self.gitlab_api:
token = get_token()
self.gitlab_api = Gitlab(
self.repository_info.repository_login_url, private_token=token
)
assert self.project_name
self.project = self.gitlab_api.projects.get(self.project_name)
group_name = self.project_name.split("/")[0]
self.group = self._get_group(group_name)
def handle_assignee(self) -> User:
assert self.requested_assignee
return self.get_reviewer_by_username(self.requested_assignee)
def handle_reviewers(self) -> List[User]:
self.check_gitlab_feature("multiple_merge_request_assignees")
res = list()
for requested_username in self.args.reviewers:
username = requested_username.strip()
approver = self.get_reviewer_by_username(username)
res.append(approver)
return res
def get_reviewer_by_username(self, username: str) -> User:
assert self.project
in_project = self.get_users_matching(self.project.members, username)
if self.group:
in_group = self.get_users_matching(self.group.members, username)
else:
in_group = list()
candidates = list()
seen = set() # type: Set[int]
for user in itertools.chain(in_project, in_group):
if user.id in seen:
continue
candidates.append(user)
seen.add(user.id)
if not candidates:
raise UserNotFound(username)
if len(candidates) > 1:
raise AmbiguousUser(username)
return candidates[0]
def get_users_matching(self, members: Any, query: str) -> List[User]:
res = members.list(active=True, query=query, per_page=100, as_list=False)
if res.next_page:
raise TooManyUsers(100)
return cast(List[User], res)
def post_push(self) -> None:
merge_request = self.ensure_merge_request()
assert self.gitlab_api
if self.args.close:
ui.info_2("Closing merge request #%s" % merge_request.iid)
merge_request.state_event = "close"
merge_request.save()
return
assignee = None
if self.requested_assignee:
assignee = self.handle_assignee()
if assignee:
ui.info_2("Assigning to", assignee.username)
title = self.handle_title(merge_request)
merge_request.title = title
merge_request.remove_source_branch = True
if self.requested_target_branch:
merge_request.target_branch = self.requested_target_branch
if assignee:
merge_request.assignee_id = assignee.id
if self.args.reviewers:
approvers = self.handle_reviewers()
if approvers:
ui.info_2(
"Requesting approvals from", ", ".join(x.name for x in approvers)
)
merge_request.approvals.set_approvers([x.id for x in approvers])
merge_request.save()
if self.args.accept:
merge_request.merge(merge_when_pipeline_succeeds=True)
ui.info(ui.green, "::", ui.reset, "See merge request at", merge_request.web_url)
def handle_title(self, merge_request: ProjectMergeRequest) -> str:
# If explicitely set, use it
if self.requested_title:
return self.requested_title
else:
# Else change the title if we need to
title = merge_request.title # type: str
if self.args.ready:
return unwipify(title)
if self.args.wip:
return wipify(title)
return title
def find_merge_request(self) -> Optional[ProjectMergeRequest]:
assert self.remote_branch
assert self.project
res = self.project.mergerequests.list(
state="opened", source_branch=self.remote_branch, all=True
)
if len(res) >= 2:
raise tsrc.Error(
"Found more than one opened merge request with the same branch"
)
if not res:
return None
return res[0]
def create_merge_request(self) -> ProjectMergeRequest:
assert self.project
if self.requested_target_branch:
target_branch = self.requested_target_branch
else:
target_branch = self.project.default_branch
assert self.remote_branch
return self.project.mergerequests.create(
{
"source_branch": self.remote_branch,
"target_branch": target_branch,
"title": self.remote_branch,
}
)
def ensure_merge_request(self) -> ProjectMergeRequest:
merge_request = self.find_merge_request()
if merge_request:
ui.info_2("Found existing merge request: !%s" % merge_request.iid)
return merge_request
else:
return self.create_merge_request()
| 32.814229
| 88
| 0.615635
|
5d139187634ef241474fa9e650422f41e4ddebfb
| 10,237
|
py
|
Python
|
pirates/piratesgui/BarSelectionMenu.py
|
ksmit799/POTCO-PS
|
520d38935ae8df4b452c733a82c94dddac01e275
|
[
"Apache-2.0"
] | 8
|
2017-01-24T04:33:29.000Z
|
2020-11-01T08:36:24.000Z
|
pirates/piratesgui/BarSelectionMenu.py
|
ksmit799/Pirates-Online-Remake
|
520d38935ae8df4b452c733a82c94dddac01e275
|
[
"Apache-2.0"
] | 1
|
2017-03-02T18:05:17.000Z
|
2017-03-14T06:47:10.000Z
|
pirates/piratesgui/BarSelectionMenu.py
|
ksmit799/Pirates-Online-Remake
|
520d38935ae8df4b452c733a82c94dddac01e275
|
[
"Apache-2.0"
] | 11
|
2017-03-02T18:46:07.000Z
|
2020-11-01T08:36:26.000Z
|
# File: B (Python 2.4)
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.directnotify import DirectNotifyGlobal
from otp.otpbase import OTPGlobals
from pirates.piratesgui import GuiPanel
from pirates.piratesgui import PiratesGuiGlobals
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from pirates.reputation import ReputationGlobals
from pirates.battle import WeaponGlobals
from pirates.economy import EconomyGlobals
from pirates.economy.EconomyGlobals import *
from pirates.piratesbase import Freebooter
from pirates.inventory import ItemGlobals
class BarSelectionMenu(GuiPanel.GuiPanel):
notify = DirectNotifyGlobal.directNotify.newCategory('BarSelectionMenu')
ICON_WIDTH = 0.13
HEIGHT = 0.14999999999999999
SelectionDelay = 0.59999999999999998
def __init__(self, items, command = None):
GuiPanel.GuiPanel.__init__(self, None, 1.0, self.HEIGHT, showClose = 0)
self.items = items
self.icons = []
self.hotkeys = []
self.repMeters = []
self.choice = 0
self.command = command
self.hideTask = None
card = loader.loadModel('models/textureCards/selectionGui')
texCard = card.find('**/main_gui_general_box_over')
self.cursor = DirectFrame(parent = self, state = DGG.DISABLED, relief = None, frameSize = (0, 0.080000000000000002, 0, 0.080000000000000002), pos = (0.080000000000000002, 0, 0.070000000000000007), geom = texCard, geom_scale = 0.12)
self.cursor.setTransparency(1)
self.cursor.resetFrameSize()
card.removeNode()
self.initialiseoptions(BarSelectionMenu)
self.card = loader.loadModel('models/gui/gui_icons_weapon')
self.accept('escape', self._BarSelectionMenu__handleCancel)
self.loadWeaponButtons()
self.hide()
def loadWeaponButtons(self):
for hotkey in self.hotkeys:
hotkey.destroy()
self.hotkeys = []
for icon in self.icons:
icon.destroy()
self.icons = []
for repMeter in self.repMeters:
repMeter.destroy()
self.repMeters = []
self['frameSize'] = (0, self.ICON_WIDTH * len(self.items) + 0.040000000000000001, 0, self.HEIGHT)
self.setX(-((self.ICON_WIDTH * len(self.items) + 0.040000000000000001) / 2.0))
topGui = loader.loadModel('models/gui/toplevel_gui')
kbButton = topGui.find('**/keyboard_button')
for i in range(len(self.items)):
if self.items[i]:
category = WeaponGlobals.getRepId(self.items[i][0])
icon = DirectFrame(parent = self, state = DGG.DISABLED, relief = None, frameSize = (0, 0.080000000000000002, 0, 0.080000000000000002), pos = (self.ICON_WIDTH * i + 0.080000000000000002, 0, 0.082000000000000003))
icon.setTransparency(1)
hotkeyText = 'F%s' % self.items[i][1]
hotkey = DirectFrame(parent = icon, state = DGG.DISABLED, relief = None, text = hotkeyText, text_align = TextNode.ACenter, text_scale = 0.044999999999999998, text_pos = (0, 0), text_fg = PiratesGuiGlobals.TextFG2, text_shadow = PiratesGuiGlobals.TextShadow, image = kbButton, image_scale = 0.059999999999999998, image_pos = (0, 0, 0.01), image_color = (0.5, 0.5, 0.34999999999999998, 1), pos = (0, 0, 0.080000000000000002))
self.hotkeys.append(hotkey)
category = WeaponGlobals.getRepId(self.items[i][0])
if Freebooter.getPaidStatus(base.localAvatar.getDoId()) or Freebooter.allowedFreebooterWeapon(category):
asset = ItemGlobals.getIcon(self.items[i][0])
if asset:
texCard = self.card.find('**/%s' % asset)
icon['geom'] = texCard
icon['geom_scale'] = 0.080000000000000002
icon.resetFrameSize()
self.icons.append(icon)
else:
texCard = topGui.find('**/pir_t_gui_gen_key_subscriber*')
icon['geom'] = texCard
icon['geom_scale'] = 0.20000000000000001
icon.resetFrameSize()
self.icons.append(icon)
repMeter = DirectWaitBar(parent = icon, relief = DGG.SUNKEN, state = DGG.DISABLED, borderWidth = (0.002, 0.002), range = 0, value = 0, frameColor = (0.23999999999999999, 0.23999999999999999, 0.20999999999999999, 1), barColor = (0.80000000000000004, 0.80000000000000004, 0.69999999999999996, 1), pos = (-0.050000000000000003, 0, -0.052499999999999998), hpr = (0, 0, 0), frameSize = (0.0050000000000000001, 0.095000000000000001, 0, 0.012500000000000001))
self.repMeters.append(repMeter)
inv = base.localAvatar.getInventory()
if inv:
repValue = inv.getReputation(category)
(level, leftoverValue) = ReputationGlobals.getLevelFromTotalReputation(category, repValue)
max = ReputationGlobals.getReputationNeededToLevel(category, level)
repMeter['range'] = max
repMeter['value'] = leftoverValue
def selectPrev(self):
if len(self.items) < 1:
return None
self.show()
if len(self.items) > 1:
keepTrying = True
else:
keepTrying = False
while keepTrying:
keepTrying = False
self.choice = self.choice - 1
if self.choice < 0 or self.choice > len(self.items) - 1:
self.choice = len(self.items) - 1
if not Freebooter.getPaidStatus(base.localAvatar.getDoId()):
if self.items[self.choice]:
category = WeaponGlobals.getRepId(self.items[self.choice][0])
if not Freebooter.allowedFreebooterWeapon(category):
keepTrying = True
else:
keepTrying = True
self.items[self.choice]
self.cursor.setPos(self.ICON_WIDTH * self.choice + 0.080000000000000002, 0, 0.071999999999999995)
taskMgr.remove('BarSelectHideTask' + str(self.getParent()))
self.hideTask = taskMgr.doMethodLater(self.SelectionDelay, self.confirmSelection, 'BarSelectHideTask' + str(self.getParent()), extraArgs = [])
def selectNext(self):
if len(self.items) < 1:
return None
self.show()
if len(self.items) > 1:
keepTrying = True
else:
keepTrying = False
while keepTrying:
keepTrying = False
self.choice = self.choice + 1
if self.choice > len(self.items) - 1:
self.choice = 0
if not Freebooter.getPaidStatus(base.localAvatar.getDoId()):
category = WeaponGlobals.getRepId(self.items[self.choice][0])
if not Freebooter.allowedFreebooterWeapon(category):
keepTrying = True
Freebooter.allowedFreebooterWeapon(category)
self.cursor.setPos(self.ICON_WIDTH * self.choice + 0.080000000000000002, 0, 0.071999999999999995)
taskMgr.remove('BarSelectHideTask' + str(self.getParent()))
self.hideTask = taskMgr.doMethodLater(self.SelectionDelay, self.confirmSelection, 'BarSelectHideTask' + str(self.getParent()), extraArgs = [])
def selectChoice(self, weaponId):
if len(self.items) < 1:
return None
if weaponId not in self.items:
return None
self.show()
self.choice = self.items.index(weaponId)
self.cursor.setPos(self.ICON_WIDTH * self.choice + 0.080000000000000002, 0, 0.071999999999999995)
taskMgr.remove('BarSelectHideTask' + str(self.getParent()))
self.hideTask = taskMgr.doMethodLater(self.SelectionDelay * 2, self.hide, 'BarSelectHideTask' + str(self.getParent()), extraArgs = [])
def confirmSelection(self):
self.hide()
if self.command and self.choice < len(self.items):
self.command(self.items[self.choice][0], self.items[self.choice][1], fromWheel = 1)
def update(self, items):
if self.items != items:
self.items = items
self.loadWeaponButtons()
def updateRep(self, category, value):
for i in range(len(self.items)):
repId = WeaponGlobals.getRepId(self.items[i][0])
if repId == category:
(level, leftoverValue) = ReputationGlobals.getLevelFromTotalReputation(category, value)
max = ReputationGlobals.getReputationNeededToLevel(category, level)
if len(self.repMeters) - 1 >= i:
self.repMeters[i]['range'] = max
self.repMeters[i]['value'] = leftoverValue
len(self.repMeters) - 1 >= i
def destroy(self):
if hasattr(self, 'destroyed'):
return None
self.destroyed = 1
taskMgr.remove('BarSelectHideTask' + str(self.getParent()))
self.ignore('escape')
for icon in self.icons:
icon.destroy()
icon = None
self.icons = []
if self.card:
self.card.removeNode()
self.card = None
GuiPanel.GuiPanel.destroy(self)
def _BarSelectionMenu__handleCancel(self):
taskMgr.remove('BarSelectHideTask' + str(self.getParent()))
self.hide()
for item in self.items:
if item and localAvatar.currentWeaponId == item[0]:
index = self.items.index(item)
self.choice = index
return None
continue
def hide(self):
if hasattr(base, 'localAvatar'):
if hasattr(localAvatar.guiMgr.combatTray, 'skillTray'):
localAvatar.guiMgr.combatTray.skillTray.show()
GuiPanel.GuiPanel.hide(self)
| 43.194093
| 468
| 0.598027
|
997cb518a502a972e8451aeb1077e55a7d95d3dc
| 6,719
|
py
|
Python
|
trading_calendars/exchange_calendar_xlon.py
|
quantrocket-llc/trading-calendars
|
b72630cbcb288601c62e61ebe002a9043f9a3112
|
[
"Apache-2.0"
] | 1
|
2020-07-25T06:18:30.000Z
|
2020-07-25T06:18:30.000Z
|
trading_calendars/exchange_calendar_xlon.py
|
quantrocket-llc/trading-calendars
|
b72630cbcb288601c62e61ebe002a9043f9a3112
|
[
"Apache-2.0"
] | 13
|
2021-04-13T06:49:23.000Z
|
2022-03-31T00:08:10.000Z
|
trading_calendars/exchange_calendar_xlon.py
|
quantrocket-llc/trading-calendars
|
b72630cbcb288601c62e61ebe002a9043f9a3112
|
[
"Apache-2.0"
] | 3
|
2020-03-05T23:38:14.000Z
|
2021-12-12T00:31:36.000Z
|
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import time
import pandas as pd
from pandas.tseries.holiday import (
MO,
DateOffset,
EasterMonday,
GoodFriday,
Holiday,
previous_friday,
weekend_to_monday,
)
from pytz import timezone
from .common_holidays import (
boxing_day,
christmas,
new_years_day,
weekend_boxing_day,
weekend_christmas,
)
from .trading_calendar import HolidayCalendar, TradingCalendar
# Regular Holidays
# ----------------
# New Year's Day
LSENewYearsDay = new_years_day(observance=weekend_to_monday)
# Early May bank holiday has two exceptions based on the 50th and 75th
# anniversary of VE-Day
# 1995-05-01 Early May bank holiday removed for VE-day 50th anniversary
# 2020-05-04 Early May bank holiday removed for VE-day 75th anniversary
# Early May bank holiday pre-1995
MayBank_pre_1995 = Holiday(
"Early May Bank Holiday",
month=5,
offset=DateOffset(weekday=MO(1)),
day=1,
end_date=pd.Timestamp("1994-12-31"),
)
# Early May bank holiday post-1995 and pre-2020
MayBank_post_1995_pre_2020 = Holiday(
"Early May Bank Holiday",
month=5,
offset=DateOffset(weekday=MO(1)),
day=1,
start_date=pd.Timestamp("1996-01-01"),
end_date=pd.Timestamp("2019-12-31"),
)
# Early May bank holiday post 2020
MayBank_post_2020 = Holiday(
"Early May Bank Holiday",
month=5,
offset=DateOffset(weekday=MO(1)),
day=1,
start_date=pd.Timestamp("2021-01-01"),
)
# Spring bank holiday has two exceptions based on the Golden & Diamond Jubilee
# 2002-05-27 Spring bank holiday removed for Golden Jubilee
# 2012-05-28 Spring bank holiday removed for Diamond Jubilee
# Spring bank holiday
SpringBank_pre_2002 = Holiday(
"Spring Bank Holiday",
month=5,
day=31,
offset=DateOffset(weekday=MO(-1)),
end_date=pd.Timestamp("2001-12-31"),
)
SpringBank_post_2002_pre_2012 = Holiday(
"Spring Bank Holiday",
month=5,
day=31,
offset=DateOffset(weekday=MO(-1)),
start_date=pd.Timestamp("2003-01-01"),
end_date=pd.Timestamp("2011-12-31"),
)
SpringBank_post_2012 = Holiday(
"Spring Bank Holiday",
month=5,
day=31,
offset=DateOffset(weekday=MO(-1)),
start_date=pd.Timestamp("2013-01-01"),
)
# Summer bank holiday
SummerBank = Holiday(
"Summer Bank Holiday",
month=8,
day=31,
offset=DateOffset(weekday=MO(-1)),
)
Christmas = christmas()
WeekendChristmas = weekend_christmas()
BoxingDay = boxing_day()
WeekendBoxingDay = weekend_boxing_day()
# Early Closes
# ------------
# If Christmas Eve falls on a weekday, that day is a half day.
# If it falls on a weekend, the preceding Friday is a half day.
ChristmasEve = Holiday(
"Christmas Eve",
month=12,
day=24,
observance=previous_friday,
)
# New Year's eve (or the preceding Friday if it falls on a weekend)
# is a half day. Except for 1999-12-31, when the Queen declared a
# bank holiday.
NewYearsEvePre1999 = Holiday(
"New Year's Eve",
month=12,
day=31,
observance=previous_friday,
end_date=pd.Timestamp("1999-01-01"),
)
NewYearsEvePost2000 = Holiday(
"New Year's Eve",
month=12,
day=31,
observance=previous_friday,
start_date=pd.Timestamp("2000-01-01"),
)
class XLONExchangeCalendar(TradingCalendar):
"""
Exchange calendar for the London Stock Exchange (XLON).
Open Time: 8:00 AM, GMT
Close Time: 4:30 PM, GMT
Regularly-Observed Holidays:
- New Years Day (observed on first business day on/after)
- Good Friday
- Easter Monday
- Early May Bank Holiday (first Monday in May)
- Spring Bank Holiday (last Monday in May)
- Summer Bank Holiday (last Monday in May)
- Christmas Day
- Dec. 27th (if Christmas is on a weekend)
- Boxing Day
- Dec. 28th (if Boxing Day is on a weekend)
Early Closes:
- Christmas Eve
- New Year's Eve
"""
regular_early_close = time(12, 30)
name = "XLON"
country_code = "GB"
tz = timezone("Europe/London")
open_times = ((None, time(8, 1)),)
close_times = ((None, time(16, 30)),)
@property
def regular_holidays(self):
return HolidayCalendar(
[
LSENewYearsDay,
GoodFriday,
EasterMonday,
MayBank_pre_1995,
MayBank_post_1995_pre_2020,
MayBank_post_2020,
SpringBank_pre_2002,
SpringBank_post_2002_pre_2012,
SpringBank_post_2012,
SummerBank,
Christmas,
WeekendChristmas,
BoxingDay,
WeekendBoxingDay,
]
)
@property
def adhoc_holidays(self):
return [
# VE-Day Anniversary
pd.Timestamp("1995-05-08", tz="UTC"), # 50th Anniversary
pd.Timestamp("2020-05-08", tz="UTC"), # 75th Anniversary
# Queen Elizabeth II Jubilees
# Silver Jubilee
pd.Timestamp("1977-06-07", tz="UTC"),
# Golden Jubilee
pd.Timestamp("2002-06-03", tz="UTC"),
pd.Timestamp("2002-06-04", tz="UTC"),
# Diamond Jubilee
pd.Timestamp("2012-06-04", tz="UTC"),
pd.Timestamp("2012-06-05", tz="UTC"),
# Royal Weddings
# Wedding Day of Princess Anne and Mark Phillips
pd.Timestamp("1973-11-14", tz="UTC"),
# Wedding Day of Prince Charles and Diana Spencer
pd.Timestamp("1981-07-29", tz="UTC"),
# Wedding Day of Prince William and Catherine Middleton
pd.Timestamp("2011-04-29", tz="UTC"),
# Miscellaneous
# Eve of 3rd Millenium A.D.
pd.Timestamp("1999-12-31", tz="UTC"),
]
@property
def special_closes(self):
return [
(
self.regular_early_close,
HolidayCalendar(
[
ChristmasEve,
NewYearsEvePre1999,
NewYearsEvePost2000,
]
),
)
]
| 26.983936
| 78
| 0.624647
|
e16aaeda942a9674b7961745ac3e32c66b6f513c
| 53,758
|
py
|
Python
|
tests/forte/pipeline_test.py
|
bhaskar2443053/forte
|
95fabd94126d45c0db07cdcc197049ed1859d228
|
[
"Apache-2.0"
] | null | null | null |
tests/forte/pipeline_test.py
|
bhaskar2443053/forte
|
95fabd94126d45c0db07cdcc197049ed1859d228
|
[
"Apache-2.0"
] | null | null | null |
tests/forte/pipeline_test.py
|
bhaskar2443053/forte
|
95fabd94126d45c0db07cdcc197049ed1859d228
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for Pipeline.
"""
import os
import unittest
import tempfile
import shutil
from dataclasses import dataclass
from typing import Any, Dict, Iterator, Optional, Set, List
from unittest.mock import MagicMock
import numpy as np
from ddt import ddt, data, unpack
from forte.common import ProcessExecutionException, ProcessorConfigError
from forte.common.exception import ValidationError
from forte.common.configuration import Config
from forte.data.base_pack import PackType
from forte.data.base_reader import PackReader, MultiPackReader
from forte.data.batchers import (
ProcessingBatcher,
FixedSizeRequestDataPackBatcher,
)
from forte.data.caster import MultiPackBoxer
from forte.data.data_pack import DataPack
from forte.data.multi_pack import MultiPack
from forte.data.ontology.top import Generics, Annotation
from forte.data.readers import PlainTextReader, StringReader, OntonotesReader
from forte.data.selector import (
FirstPackSelector,
NameMatchSelector,
SinglePackSelector,
AllPackSelector,
)
from forte.evaluation.base import Evaluator
from forte.pipeline import Pipeline
from forte.processors.base import (
PackProcessor,
RequestPackingProcessor,
MultiPackProcessor,
)
from forte.processors.base.batch_processor import (
Predictor,
)
from forte.processors.misc import PeriodSentenceSplitter
from forte.utils import get_full_module_name
from ft.onto.base_ontology import Token, Sentence, EntityMention, RelationLink
data_samples_root = os.path.abspath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
*([os.path.pardir] * 2),
"data_samples",
)
)
onto_specs_samples_root = os.path.abspath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
*([os.path.pardir] * 1),
"forte",
"data",
"ontology",
"test_specs",
)
)
@dataclass
class NewType(Generics):
"""A dummy generic type to check the correctness of pipeline execution."""
value: Optional[str] = None
def __init__(self, pack, value):
super().__init__(pack)
self.value = value
class NothingSelector(SinglePackSelector):
"""Select no pack from the :class:`~forte.data.multi_pack.MultiPack`"""
def select(self, m_pack: MultiPack) -> Iterator[DataPack]:
yield from []
class SentenceReader(PackReader):
"""A simple sentence reader for pipeline tests."""
def __init__(self):
super().__init__()
self.count = 0
def _collect(self, file_path) -> Iterator[Any]: # type: ignore
return iter([file_path])
def _cache_key_function(self, text_file: str) -> str:
return os.path.basename(text_file)
def text_replace_operation(self, text: str):
return []
def _parse_pack(self, file_path: str) -> Iterator[DataPack]:
with open(file_path, "r", encoding="utf8") as doc:
for line in doc:
pack = DataPack(file_path)
line = line.strip()
if len(line) == 0:
continue
pack.set_text(line)
Sentence(pack, 0, len(line))
self.count += 1
yield pack
class MultiPackSentenceReader(MultiPackReader):
"""A simple sentence reader for pipeline tests. This creates a multipack
with only one pack inside."""
def __init__(self):
super().__init__()
self.count = 0
def _collect(self, file_path) -> Iterator[Any]: # type: ignore
return iter([file_path])
def _cache_key_function(self, text_file: str) -> str:
return os.path.basename(text_file)
def text_replace_operation(self, text: str):
return []
def _parse_pack(self, file_path: str) -> Iterator[DataPack]: # type: ignore
with open(file_path, "r", encoding="utf8") as doc:
for line in doc:
line = line.strip()
if len(line) == 0:
continue
m_pack = MultiPack()
pack = m_pack.add_pack("pack")
pack.set_text(line)
Sentence(pack, 0, len(line))
self.count += 1
yield m_pack # type: ignore
class MultiPackCopier(MultiPackProcessor):
"""
Create a new pack inside the multi pack, make a copy of the first pack.
"""
def _process(self, input_pack: MultiPack):
pack = input_pack.add_pack("copy")
pack.set_text(input_pack.get_pack_at(0).text)
class DummyRelationExtractor(RequestPackingProcessor):
r"""A dummy relation extractor.
Note that to use :class:`DummyRelationExtractor`, the :attr:`ontology` of
:class:`Pipeline` must be an ontology that includes
``ft.onto.base_ontology.Sentence``.
"""
@classmethod
def define_batcher(cls) -> ProcessingBatcher:
return FixedSizeRequestDataPackBatcher()
@classmethod
def default_configs(cls) -> Dict[str, Any]:
return {
"batcher": {
"context_type": "ft.onto.base_ontology.Sentence",
"requests": {
"ft.onto.base_ontology.Token": [],
"ft.onto.base_ontology.EntityMention": {
"fields": ["ner_type", "tid"]
},
},
}
}
def predict(self, data_batch: Dict) -> Dict[str, List[Any]]:
entities_span = data_batch["EntityMention"]["span"]
entities_tid = data_batch["EntityMention"]["tid"]
pred: Dict = {
"RelationLink": {
"parent.tid": [],
"child.tid": [],
"rel_type": [],
}
}
for tid, entity in zip(entities_tid, entities_span):
parent = []
child = []
rel_type = []
entity_num = len(entity)
for i in range(entity_num):
for j in range(i + 1, entity_num):
parent.append(tid[i])
child.append(tid[j])
rel_type.append("dummy_relation")
pred["RelationLink"]["parent.tid"].append(np.array(parent))
pred["RelationLink"]["child.tid"].append(np.array(child))
pred["RelationLink"]["rel_type"].append(np.array(rel_type))
return pred
def pack(
self,
pack: PackType,
predict_results: Dict[str, List[Any]],
context: Optional[Annotation] = None,
):
# pass
# def pack(self, data_pack: DataPack, output_dict: Optional[Dict] =
# None):
r"""Add corresponding fields to data_pack"""
if predict_results is None:
return
for i in range(len(predict_results["RelationLink"]["parent.tid"])):
for j in range(
len(predict_results["RelationLink"]["parent.tid"][i])
):
link = RelationLink(pack)
link.rel_type = predict_results["RelationLink"]["rel_type"][i][
j
]
parent: EntityMention = pack.get_entry( # type: ignore
predict_results["RelationLink"]["parent.tid"][i][j]
)
link.set_parent(parent)
child: EntityMention = pack.get_entry( # type: ignore
predict_results["RelationLink"]["child.tid"][i][j]
)
link.set_child(child)
class DummyEvaluator(Evaluator):
"""This evaluator does nothing, just for test purpose."""
def consume_next(self, pred_pack: PackType, ref_pack: PackType):
pass
def get_result(self) -> Any:
pass
class DummyPackProcessor(PackProcessor):
def __init__(self):
super().__init__()
# Use to test the initialization behavior.
self.initialize_count = 0
def initialize(self, resources, configs):
super().initialize(resources, configs)
if "successor" in configs["test"] and "test" not in configs["test"]:
raise ProcessorConfigError(
'"test" is necessary as the first '
'step for "successor" in config '
"for test case purpose."
)
self.initialize_count += 1
def _process(self, input_pack: DataPack):
entries = list(input_pack.get_entries_of(NewType))
if len(entries) == 0:
NewType(pack=input_pack, value="[PACK]")
else:
entry = entries[0] # type: ignore
entry.value += "[PACK]"
@classmethod
def default_configs(cls) -> Dict[str, Any]:
return {"test": "test, successor"}
class DummyFixedSizeBatchProcessor(RequestPackingProcessor):
def __init__(self):
super().__init__()
self.counter = 0
def initialize(self, resources, configs: Optional[Config]):
super().initialize(resources, configs)
def predict(self, data_batch: Dict):
self.counter += 1
return data_batch
def pack(
self,
pack: DataPack,
predict_results: Optional[Dict],
context: Optional[Annotation] = None,
):
entries = list(pack.get_entries_of(NewType))
if len(entries) == 0:
NewType(pack=pack, value="[BATCH]")
else:
entry = entries[0] # type: ignore
entry.value += "[BATCH]"
class DummyModel:
"""Dummy Model."""
def __call__(self, batch):
"""Dummy model does nothing."""
pass
class DummyPredictor(Predictor):
"""Dummy Predictor."""
def predict(self, batch):
return {}
@ddt
class PredictorPipelineTest(unittest.TestCase):
@data(2, 4, 8)
def test_pipeline_different_batch_size_chain_predictor(self, batch_size):
"""Tests a chain of Batch->Pack->Batch with different batch sizes."""
data_path = data_samples_root + "/random_texts/0.txt"
pipeline = Pipeline[DataPack]()
pipeline.set_reader(SentenceReader())
pipeline.initialize()
text_extractor_name = "forte.data.extractors.AttributeExtractor"
text_extractor_config = {
"need_pad": True,
"entry_type": "ft.onto.base_ontology.Token",
"attribute": "text",
}
model = DummyModel()
predictor = DummyPredictor()
predictor_config = {
"context_type": "ft.onto.base_ontology.Sentence",
"feature_scheme": {
"text_tag": {
"extractor": {
"class_name": text_extractor_name,
"config": text_extractor_config,
},
"type": "data_input",
},
},
"batcher": {"batch_size": batch_size},
}
predictor.load(model)
nlp = Pipeline[DataPack]()
reader = SentenceReader()
nlp.set_reader(reader)
nlp.add(predictor, config=predictor_config)
nlp.add(DummyEvaluator())
nlp.initialize()
text_extractor = predictor._request["schemes"]["text_tag"]["extractor"]
for pack in pipeline.process_dataset(data_path):
for instance in pack.get(Sentence):
text_extractor.update_vocab(pack, instance)
num_packs = 0
for _ in nlp.process_dataset(data_path):
num_packs += 1
# check that all packs are yielded
self.assertEqual(num_packs, reader.count)
@ddt
class PipelineTest(unittest.TestCase):
def test_process_next(self):
# Define and config the Pipeline
nlp = Pipeline[DataPack]()
nlp.set_reader(OntonotesReader())
dummy = DummyRelationExtractor()
config = {"batcher": {"batch_size": 5}}
nlp.add(dummy, config=config)
nlp.initialize()
dataset_path = os.path.join(data_samples_root, "ontonotes/00")
# get processed pack from dataset
for pack in nlp.process_dataset(dataset_path):
# get sentence from pack
for sentence in pack.get(Sentence):
sent_text = sentence.text
# second method to get entry in a sentence
tokens = [token.text for token in pack.get(Token, sentence)]
self.assertEqual(sent_text, " ".join(tokens))
def test_pipeline_invalid_config(self):
# Test a invalid config
nlp = Pipeline[DataPack]()
reader = SentenceReader()
nlp.set_reader(reader)
dummy = DummyPackProcessor()
config = {"test": "successor"}
nlp.add(dummy, config=config)
with self.assertRaises(ProcessorConfigError):
nlp.initialize()
def test_pipeline_pack_processor(self):
"""Tests a pack processor only."""
nlp = Pipeline[DataPack]()
reader = SentenceReader()
nlp.set_reader(reader)
dummy = DummyPackProcessor()
nlp.add(dummy)
nlp.initialize()
data_path = data_samples_root + "/random_texts/0.txt"
num_packs = 0
for pack in nlp.process_dataset(data_path):
types = list(pack.get_entries_of(NewType))
num_packs += 1
self.assertEqual(len(types), 1)
self.assertEqual(types[0].value, "[PACK]")
# check that all packs are yielded
self.assertEqual(num_packs, reader.count)
def test_pipeline_batch_processor(self):
"""Tests a batch processor only."""
nlp = Pipeline[DataPack]()
reader = SentenceReader()
nlp.set_reader(reader)
dummy = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": 4,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy, config=config)
nlp.initialize()
data_path = data_samples_root + "/random_texts/0.txt"
num_packs = 0
for pack in nlp.process_dataset(data_path):
types = list(pack.get_entries_of(NewType))
num_packs += 1
self.assertEqual(len(types), 1)
self.assertEqual(types[0].value, "[BATCH]")
# check that all packs are yielded
self.assertEqual(num_packs, reader.count)
@data(2, 4, 8)
def test_pipeline_different_batch_size_chain(self, batch_size):
"""Tests a chain of Batch->Pack->Batch with different batch sizes."""
nlp = Pipeline[DataPack]()
reader = SentenceReader()
nlp.set_reader(reader)
dummy1 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy1, config=config)
dummy2 = DummyPackProcessor()
nlp.add(component=dummy2)
dummy3 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size * 2,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy3, config=config)
nlp.initialize()
data_path = data_samples_root + "/random_texts/0.txt"
num_packs = 0
for pack in nlp.process_dataset(data_path):
types = list(pack.get_entries_of(NewType))
num_packs += 1
self.assertEqual(len(types), 1)
self.assertEqual(types[0].value, "[BATCH][PACK][BATCH]")
# check that all packs are yielded
self.assertEqual(num_packs, reader.count)
@data(4, 8, 16)
def test_pipeline_pack_batch_pack_chain(self, batch_size):
"""Tests a chain of Pack->Batch->Pack."""
nlp = Pipeline[DataPack]()
reader = SentenceReader()
nlp.set_reader(reader)
dummy1 = DummyPackProcessor()
nlp.add(component=dummy1)
dummy2 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy2, config=config)
dummy3 = DummyPackProcessor()
nlp.add(component=dummy3)
nlp.initialize()
data_path = data_samples_root + "/random_texts/0.txt"
num_packs = 0
for pack in nlp.process_dataset(data_path):
types = list(pack.get_entries_of(NewType))
num_packs += 1
self.assertEqual(len(types), 1)
self.assertEqual(types[0].value, "[PACK][BATCH][PACK]")
# check that all packs are yielded
self.assertEqual(num_packs, reader.count)
@data((2, 3), (4, 5), (8, 9), (3, 2), (5, 4), (9, 8))
@unpack
def test_pipeline_batch_pack_batch_diff_size(
self, batch_size1, batch_size2
):
# Tests a chain of Batch->Pack->Batch with different batch sizes.
nlp = Pipeline[DataPack]()
reader = SentenceReader()
nlp.set_reader(reader)
dummy1 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size1,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy1, config=config)
dummy2 = DummyPackProcessor()
nlp.add(component=dummy2)
dummy3 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size2,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy3, config=config)
nlp.initialize()
data_path = data_samples_root + "/random_texts/0.txt"
num_packs = 0
for pack in nlp.process_dataset(data_path):
types = list(pack.get_entries_of(NewType))
num_packs += 1
self.assertEqual(len(types), 1)
self.assertEqual(types[0].value, "[BATCH][PACK][BATCH]")
# check that all packs are yielded
self.assertEqual(num_packs, reader.count)
@data((2, 3, 4), (4, 5, 3), (8, 9, 7))
@unpack
def test_pipeline_three_stack_batch_diff_size(
self, batch_size1, batch_size2, batch_size3
):
# Tests a chain of Batch->Batch->Batch with different batch sizes.
nlp = Pipeline[DataPack]()
reader = SentenceReader()
nlp.set_reader(reader)
dummy1 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size1,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy1, config=config)
dummy2 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size2,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy2, config=config)
dummy3 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size3,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy3, config=config)
nlp.initialize()
data_path = data_samples_root + "/random_texts/0.txt"
num_packs = 0
for pack in nlp.process_dataset(data_path):
types = list(pack.get_entries_of(NewType))
num_packs += 1
self.assertEqual(len(types), 1)
self.assertEqual(types[0].value, "[BATCH][BATCH][BATCH]")
# check that all packs are yielded
self.assertEqual(num_packs, reader.count)
@data((2, 3, 4), (4, 5, 3), (8, 9, 7))
@unpack
def test_pipeline_three_stack_diff_size_batch_pack_chain(
self, batch_size1, batch_size2, batch_size3
):
# Tests a chain of Batch->Batch->Batch->Pack with different batch sizes.
nlp = Pipeline[DataPack]()
reader = SentenceReader()
nlp.set_reader(reader)
dummy1 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size1,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy1, config=config)
dummy2 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size2,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy2, config=config)
dummy3 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size3,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy3, config=config)
dummy4 = DummyPackProcessor()
nlp.add(component=dummy4)
nlp.initialize()
data_path = data_samples_root + "/random_texts/0.txt"
num_packs = 0
for pack in nlp.process_dataset(data_path):
types = list(pack.get_entries_of(NewType))
num_packs += 1
self.assertEqual(len(types), 1)
self.assertEqual(types[0].value, "[BATCH][BATCH][BATCH][PACK]")
# check that all packs are yielded
self.assertEqual(num_packs, reader.count)
@ddt
class MultiPackPipelineTest(unittest.TestCase):
def test_process_multi_next(self):
from forte.data.readers import OntonotesReader
# Define and config the Pipeline
nlp = Pipeline[DataPack]()
nlp.set_reader(OntonotesReader())
pack_name = "test_pack"
nlp.add(MultiPackBoxer(), {"pack_name": pack_name})
nlp.add(
DummyRelationExtractor(),
config={"batcher": {"batch_size": 5}},
selector=NameMatchSelector(),
selector_config={"select_name": pack_name},
)
nlp.initialize()
dataset_path = data_samples_root + "/ontonotes/00"
# get processed pack from dataset
m_pack: MultiPack
for m_pack in nlp.process_dataset(dataset_path):
pack = m_pack.get_pack(pack_name)
# get sentence from pack
for sentence in pack.get(Sentence):
sent_text = sentence.text
# second method to get entry in a sentence
tokens = [token.text for token in pack.get(Token, sentence)]
self.assertEqual(sent_text, " ".join(tokens))
def test_pipeline_multipack_reader(self):
"""Tests a pack processor only."""
nlp = Pipeline[MultiPack]()
reader = MultiPackSentenceReader()
nlp.set_reader(reader)
dummy = DummyPackProcessor()
nlp.add(dummy, selector=FirstPackSelector())
nlp.initialize()
data_path = data_samples_root + "/random_texts/0.txt"
num_packs = 0
for pack in nlp.process_dataset(data_path):
types = list(pack.get_pack("pack").get_entries_of(NewType))
num_packs += 1
self.assertEqual(len(types), 1)
self.assertEqual(types[0].value, "[PACK]")
# check that all packs are yielded
self.assertEqual(num_packs, reader.count)
def test_pipeline_multipack_selector(self):
"""Tests a batch processor only."""
nlp = Pipeline[MultiPack]()
reader = MultiPackSentenceReader()
nlp.set_reader(reader)
dummy = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": 4,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy, config=config, selector=FirstPackSelector())
nlp.initialize()
data_path = data_samples_root + "/random_texts/0.txt"
num_packs = 0
for pack in nlp.process_dataset(data_path):
types = list(pack.get_pack("pack").get_entries_of(NewType))
num_packs += 1
self.assertEqual(len(types), 1)
self.assertEqual(types[0].value, "[BATCH]")
# check that all packs are yielded
self.assertEqual(num_packs, reader.count)
@data(1, 2, 3)
def test_one_batch_processor(self, batch_size):
nlp = Pipeline[DataPack]()
nlp.set_reader(StringReader())
batch_processor = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(PeriodSentenceSplitter())
nlp.add(batch_processor, config=config)
nlp.initialize()
sentences = [
"This tool is called Forte. The goal of this project to "
"help you build NLP pipelines. NLP has never been made "
"this easy before."
]
pack = nlp.process(sentences)
sent_len = len(list(pack.get(Sentence)))
self.assertEqual(
batch_processor.counter,
(sent_len // batch_size + (sent_len % batch_size > 0)),
)
@data(1, 2, 3)
def test_two_batch_processors(self, batch_size):
nlp = Pipeline[DataPack]()
nlp.set_reader(PlainTextReader())
dummy1 = DummyFixedSizeBatchProcessor()
dummy2 = DummyFixedSizeBatchProcessor()
nlp.add(PeriodSentenceSplitter())
nlp.add(
dummy1,
config={
"batcher": {
"batch_size": batch_size,
"context_type": "ft.onto.base_ontology.Sentence",
}
},
)
nlp.add(
dummy2,
config={
"batcher": {
"batch_size": 2 * batch_size,
"context_type": "ft.onto.base_ontology.Sentence",
}
},
)
nlp.initialize()
data_path = os.path.join(data_samples_root, "random_texts")
pack = nlp.process(data_path)
sent_len = len(list(pack.get(Sentence)))
self.assertEqual(
dummy1.counter,
(sent_len // batch_size + (sent_len % batch_size > 0)),
)
self.assertEqual(
dummy2.counter,
(sent_len // (2 * batch_size) + (sent_len % (2 * batch_size) > 0)),
)
@data(2, 4, 8)
def test_pipeline_multipack_batch_pack_batch_double_size(self, batch_size):
"""Tests a chain of Batch->Pack->Batch with different batch sizes."""
nlp = Pipeline[MultiPack]()
reader = MultiPackSentenceReader()
nlp.set_reader(reader)
dummy1 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy1, config=config, selector=FirstPackSelector())
dummy2 = DummyPackProcessor()
nlp.add(component=dummy2, selector=FirstPackSelector())
dummy3 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size * 2,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy3, config=config, selector=FirstPackSelector())
nlp.initialize()
data_path = os.path.join(data_samples_root, "random_texts", "0.txt")
num_packs = 0
for pack in nlp.process_dataset(data_path):
types = list(pack.get_pack("pack").get_entries_of(NewType))
num_packs += 1
self.assertEqual(len(types), 1)
self.assertEqual(types[0].value, "[BATCH][PACK][BATCH]")
# check that all packs are yielded
self.assertEqual(num_packs, reader.count)
@data(4, 8, 16)
def test_pipeline_multipack_pack_batch_pack_chain(self, batch_size):
"""Tests a chain of Pack->Batch->Pack."""
nlp = Pipeline[MultiPack]()
reader = MultiPackSentenceReader()
nlp.set_reader(reader)
dummy1 = DummyPackProcessor()
nlp.add(component=dummy1, selector=FirstPackSelector())
dummy2 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy2, config=config, selector=FirstPackSelector())
dummy3 = DummyPackProcessor()
nlp.add(component=dummy3, selector=FirstPackSelector())
nlp.initialize()
data_path = os.path.join(data_samples_root, "random_texts", "0.txt")
num_packs = 0
for pack in nlp.process_dataset(data_path):
types = list(pack.get_pack("pack").get_entries_of(NewType))
num_packs += 1
self.assertEqual(len(types), 1)
self.assertEqual(types[0].value, "[PACK][BATCH][PACK]")
# check that all packs are yielded
self.assertEqual(num_packs, reader.count)
@data((2, 3), (4, 5), (8, 9), (3, 2), (5, 4), (9, 8))
@unpack
def test_pipeline_multipack_batch_pack_batch_diff_size(
self, batch_size1, batch_size2
):
# Tests a chain of Batch->Pack->Batch with different batch sizes.
nlp = Pipeline[MultiPack]()
reader = MultiPackSentenceReader()
nlp.set_reader(reader)
dummy1 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size1,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy1, config=config, selector=FirstPackSelector())
dummy2 = DummyPackProcessor()
nlp.add(component=dummy2, selector=FirstPackSelector())
dummy3 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size2,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy3, config=config, selector=FirstPackSelector())
nlp.initialize()
data_path = os.path.join(data_samples_root, "random_texts", "0.txt")
num_packs = 0
for pack in nlp.process_dataset(data_path):
types = list(pack.get_pack("pack").get_entries_of(NewType))
num_packs += 1
self.assertEqual(len(types), 1)
self.assertEqual(types[0].value, "[BATCH][PACK][BATCH]")
# check that all packs are yielded
self.assertEqual(num_packs, reader.count)
@data((2, 3, 4), (4, 5, 3), (8, 9, 7))
@unpack
def test_pipeline_multipack_three_stack_batch_diff(
self, batch_size1, batch_size2, batch_size3
):
# Tests a chain of Batch->Batch->Batch with different batch sizes.
nlp = Pipeline[MultiPack]()
reader = MultiPackSentenceReader()
nlp.set_reader(reader)
dummy1 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size1,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy1, config=config, selector=FirstPackSelector())
dummy2 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size2,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy2, config=config, selector=FirstPackSelector())
dummy3 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size3,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy3, config=config, selector=FirstPackSelector())
nlp.initialize()
data_path = os.path.join(data_samples_root, "random_texts", "0.txt")
num_packs = 0
for pack in nlp.process_dataset(data_path):
types = list(pack.get_pack("pack").get_entries_of(NewType))
num_packs += 1
self.assertEqual(len(types), 1)
self.assertEqual(types[0].value, "[BATCH][BATCH][BATCH]")
# check that all packs are yielded
self.assertEqual(num_packs, reader.count)
@data((2, 3, 4), (4, 5, 3), (8, 9, 7))
@unpack
def test_pipeline_multipack_three_stack_batch_diff_size_pack_chain(
self, batch_size1, batch_size2, batch_size3
):
# Tests a chain of Batch->Batch->Batch->Pack with different batch sizes.
nlp = Pipeline[MultiPack]()
reader = MultiPackSentenceReader()
nlp.set_reader(reader)
dummy1 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size1,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy1, config=config, selector=FirstPackSelector())
dummy2 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size2,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy2, config=config, selector=FirstPackSelector())
dummy3 = DummyFixedSizeBatchProcessor()
config = {
"batcher": {
"batch_size": batch_size3,
"context_type": "ft.onto.base_ontology.Sentence",
},
}
nlp.add(component=dummy3, config=config, selector=FirstPackSelector())
dummy4 = DummyPackProcessor()
nlp.add(component=dummy4, selector=FirstPackSelector())
nlp.initialize()
data_path = os.path.join(data_samples_root, "random_texts", "0.txt")
num_packs = 0
for pack in nlp.process_dataset(data_path):
types = list(pack.get_pack("pack").get_entries_of(NewType))
num_packs += 1
self.assertEqual(len(types), 1)
self.assertEqual(types[0].value, "[BATCH][BATCH][BATCH][PACK]")
# check that all packs are yielded
self.assertEqual(num_packs, reader.count)
def test_empty_selector(self):
"""
Test the selector that doesn't select anything perform well in the
pipeline.
"""
for pack in (
Pipeline()
.set_reader(MultiPackSentenceReader())
.add(DummyPackProcessor(), selector=NothingSelector())
.initialize()
.process_dataset(
os.path.join(data_samples_root, "random_texts", "0.txt")
)
):
# Because no packs are selected, we do not have any entries added.
self.assertTrue(pack.get_pack("pack").num_generics_entries == 0)
def test_caster_all_selector(self):
"""
Test if the caster and all pack selector works well.
The caster is used to convert a single pack to multi pack, and then
pack copier is used to create a new pack. The all pack selector selects
all the pack from the multi pack. This test make sure this pipeline
works OK.
"""
mp: MultiPack
for mp in (
Pipeline()
.set_reader(SentenceReader())
.add(MultiPackBoxer())
.add(MultiPackCopier())
.add(DummyPackProcessor(), selector=AllPackSelector())
.initialize()
.process_dataset(
os.path.join(data_samples_root, "random_texts", "0.txt")
)
):
num_pack = 0
for pack in mp.packs:
num_pack += 1
entries = list(pack.get(NewType))
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0].value, "[PACK]")
self.assertEqual(num_pack, 2)
class DummySentenceReaderOne(SentenceReader):
def record(self, record_meta: Dict[str, Set[str]]):
record_meta["Sentence"] = {"1", "2", "3"}
class DummySentenceReaderTwo(SentenceReader):
def record(self, record_meta: Dict[str, Set[str]]):
record_meta["ft.onto.example_ontology.Word"] = {
"string_features",
"word_forms",
"token_ranks",
}
class DummyPackProcessorOne(DummyPackProcessor):
def record(self, record_meta: Dict[str, Set[str]]):
record_meta["Token"] = {"1", "2"}
record_meta["Document"] = {"2"}
def expected_types_and_attributes(self):
expectation: Dict[str, Set[str]] = {"Sentence": {"1", "2", "3"}}
return expectation
class DummyPackProcessorTwo(DummyPackProcessor):
def record(self, record_meta: Dict[str, Set[str]]):
record_meta["Token"] = {"1", "2"}
record_meta["Document"] = {"2"}
def expected_types_and_attributes(self):
expectation: Dict[str, Set[str]] = {"Document": {"1", "2", "3", "4"}}
return expectation
class DummyPackProcessorThree(DummyPackProcessor):
def expected_types_and_attributes(self):
expectation: Dict[str, Set[str]] = {
"ft.onto.example_import_ontology.Token": {"pos", "lemma"}
}
return expectation
class DummyPackProcessorFour(DummyPackProcessor):
def _process(self, input_pack: DataPack):
for sentence in input_pack.get(entry_type=Sentence):
scores = {"a": 0.1, "b": 0.9}
sentence.ab = scores
class DummyPackProcessorFive(DummyPackProcessor):
def _process(self, input_pack: DataPack):
for sentence in input_pack.get(entry_type=Sentence):
scores = 0.1
sentence.classification = scores
class DummyEvaluatorOne(Evaluator):
"""This evaluator does nothing, just for test purpose."""
def pred_pack_record(self, record_meta: Dict[str, Set[str]]):
record_meta["Token"] = {"1", "2"}
def consume_next(self, pred_pack: PackType, ref_pack: PackType):
pred_pack_expectation: Dict[str, Set[str]] = {
"Sentence": {"1", "2", "3"}
}
ref_pack_expectation: Dict[str, Set[str]] = {
"Sentence": {"1", "2", "3"}
}
self.expected_types_and_attributes(
pred_pack_expectation, ref_pack_expectation
)
self.check_record(pred_pack, ref_pack)
self.writes_record(pred_pack, ref_pack)
def get_result(self):
pass
class DummyEvaluatorTwo(Evaluator):
"""This evaluator does nothing, just for test purpose."""
def pred_pack_record(self, record_meta: Dict[str, Set[str]]):
record_meta["Token"] = {"1", "2"}
def consume_next(self, pred_pack: PackType, ref_pack: PackType):
pred_pack_expectation: Dict[str, Set[str]] = {
"Sentence": {"1", "2", "3"}
}
ref_pack_expectation: Dict[str, Set[str]] = {
"Document": {"1", "2", "3"}
}
self.expected_types_and_attributes(
pred_pack_expectation, ref_pack_expectation
)
self.check_record(pred_pack, ref_pack)
self.writes_record(pred_pack, ref_pack)
def get_result(self):
pass
class DummyEvaluatorThree(Evaluator):
"""This evaluator does nothing, just for test purpose."""
def consume_next(self, pred_pack: PackType, ref_pack: PackType):
pred_pack_expectation: Dict[str, Set[str]] = {
"ft.onto.example_import_ontology.Token": {"pos", "lemma"}
}
ref_pack_expectation: Dict[str, Set[str]] = {
"ft.onto.example_import_ontology.Token": {"pos", "lemma"}
}
self.expected_types_and_attributes(
pred_pack_expectation, ref_pack_expectation
)
self.check_record(pred_pack, ref_pack)
self.writes_record(pred_pack, ref_pack)
def get_result(self):
pass
class DummyEvaluatorFour(Evaluator):
"""This evaluator does nothing, just for test purpose."""
def pred_pack_record(self, record_meta: Dict[str, Set[str]]):
record_meta["Token"] = {"1", "2"}
def consume_next(self, pred_pack: PackType, ref_pack: PackType):
pred_pack_expectation: Dict[str, Set[str]] = {
"Sentence": {"1", "2", "3"}
}
ref_pack_expectation: Dict[str, Set[str]] = {
"Sentence": {"1", "2", "3"}
}
self.expected_types_and_attributes(
pred_pack_expectation, ref_pack_expectation
)
self.check_record(pred_pack, ref_pack)
self.writes_record(pred_pack, ref_pack)
def get_result(self):
return "Reference name of DummyEvaluatorFour is ref_dummy"
class RecordCheckPipelineTest(unittest.TestCase):
def test_pipeline_reader_record_writing(self):
"""Tests reader record writing"""
nlp = Pipeline[DataPack](enforce_consistency=True)
reader = DummySentenceReaderOne()
nlp.set_reader(reader)
nlp.initialize()
data_path = data_samples_root + "/random_texts/0.txt"
pack = nlp.process(data_path)
self.assertEqual(pack._meta.record["Sentence"], {"1", "2", "3"})
def test_pipeline_processor_record_writing(self):
"""Tests the processor record writing"""
nlp = Pipeline[DataPack](enforce_consistency=True)
reader = DummySentenceReaderOne()
nlp.set_reader(reader)
dummy = DummyPackProcessorOne()
nlp.add(dummy)
nlp.initialize()
data_path = data_samples_root + "/random_texts/0.txt"
pack = nlp.process(data_path)
self.assertEqual(pack._meta.record["Sentence"], {"1", "2", "3"})
self.assertEqual(pack._meta.record["Token"], {"1", "2"})
self.assertEqual(pack._meta.record["Document"], {"2"})
def test_pipeline_processor_record_checking_mismatching_error(self):
"""Tests the behavior of processor raising error exception
and behavior of set enforce_consistency for the pipeline"""
nlp = Pipeline[DataPack](enforce_consistency=True)
reader = DummySentenceReaderOne()
nlp.set_reader(reader)
dummy = DummyPackProcessorTwo()
nlp.add(dummy)
nlp.initialize()
data_path = data_samples_root + "/random_texts/0.txt"
with self.assertRaises(ProcessExecutionException):
nlp.process(data_path)
nlp.enforce_consistency(enforce=False)
nlp.initialize()
nlp.process(data_path)
def test_pipeline_evaluator_record_writing(self):
"""Tests the evaluator record writing"""
nlp = Pipeline[DataPack](enforce_consistency=True)
reader = DummySentenceReaderOne()
nlp.set_reader(reader)
dummy = DummyEvaluatorOne()
nlp.add(dummy)
nlp.initialize()
data_path = data_samples_root + "/random_texts/0.txt"
pack = nlp.process(data_path)
self.assertEqual(pack._meta.record["Sentence"], {"1", "2", "3"})
self.assertEqual(pack._meta.record["Token"], {"1", "2"})
def test_pipeline_evaluator_record_checking_mismatching_error(self):
"""Tests the behavior of evaluator raising error exception"""
nlp = Pipeline[DataPack](enforce_consistency=True)
reader = DummySentenceReaderOne()
nlp.set_reader(reader)
dummy = DummyEvaluatorTwo()
nlp.add(dummy)
nlp.initialize()
data_path = data_samples_root + "/random_texts/0.txt"
with self.assertRaises(ProcessExecutionException):
nlp.process(data_path)
def test_reuse_processor(self):
# Create a basic pipeline of multi packs that have two pack (by copying)
nlp = (
Pipeline()
.set_reader(SentenceReader())
.add(MultiPackBoxer())
.add(MultiPackCopier())
)
# Create one shared instance of this extractor
dummy = DummyPackProcessor()
nlp.add(
dummy,
config={"test": "dummy1"},
selector=NameMatchSelector(),
selector_config={"select_name": "default"},
)
# This will not add the component successfully because the processor is
# initialized.
with self.assertRaises(ProcessorConfigError):
nlp.add(dummy, config={"test": "dummy2"})
# This will add the component, with a different selector
nlp.add(
dummy,
selector=NameMatchSelector(),
selector_config={"select_name": "copy"},
)
nlp.initialize()
# Check that the two processors have the same name.
self.assertEqual(
nlp.components[2].name, get_full_module_name(DummyPackProcessor)
)
self.assertEqual(
nlp.components[3].name, get_full_module_name(DummyPackProcessor)
)
# Check that the two processors are also the same instance.
self.assertEqual(nlp.components[2], nlp.components[3])
# Check that the initialization is only done once, here the count
# will only be 1.
self.assertEqual(nlp.components[2].initialize_count, 1)
self.assertEqual(nlp.components[3].initialize_count, 1)
# Check that the configuration is not changed by the second insertion.
self.assertEqual(nlp.components[3].configs.test, "dummy1")
# Run it once to make sure it can run.
dataset_path = os.path.join(data_samples_root, "random_texts", "0.txt")
nlp.run(dataset_path)
# Check that initialization will be false after `run`, because it
# calls the `finish` function of all components.
self.assertFalse(nlp.components[2].is_initialized)
self.assertFalse(nlp.components[3].is_initialized)
# Check that we are able to re-initialize the pipeline.
nlp.initialize() # initialize the first time.
nlp.initialize() # re-initialize.
# Check the name again after re-initialize.
self.assertEqual(
nlp.components[2].name, get_full_module_name(DummyPackProcessor)
)
self.assertEqual(
nlp.components[3].name, get_full_module_name(DummyPackProcessor)
)
# Obtain the results from the multipack.
mp: MultiPack = nlp.process(dataset_path)
pack: DataPack = mp.get_pack("default")
pack_copy: DataPack = mp.get_pack("copy")
# Check both pack are processed by the DummyProcessor once, because
# we use different selector.
pack.get_single(NewType).value = "[PACK]"
pack_copy.get_single(NewType).value = "[PACK]"
def test_pipeline_processor_subclass_type_checking(self):
r"""Tests the processor record subclass type checking for processor with
pipeline initialized with ontology specification file"""
onto_specs_file_path = os.path.join(
onto_specs_samples_root, "example_merged_ontology.json"
)
nlp = Pipeline[DataPack](
ontology_file=onto_specs_file_path, enforce_consistency=True
)
reader = DummySentenceReaderTwo()
nlp.set_reader(reader)
dummy = DummyPackProcessorThree()
nlp.add(dummy)
nlp.initialize()
data_path = data_samples_root + "/random_texts/0.txt"
pack = nlp.process(data_path)
self.assertEqual(
pack._meta.record,
{
"ft.onto.example_ontology.Word": {
"string_features",
"word_forms",
"token_ranks",
}
},
)
def test_pipeline_evaluator_subclass_type_checking(self):
r"""Tests the processor record subclass type checking for evaluator with
pipeline initialized with ontology specification file"""
onto_specs_file_path = os.path.join(
onto_specs_samples_root, "example_merged_ontology.json"
)
nlp = Pipeline[DataPack](
ontology_file=onto_specs_file_path, enforce_consistency=True
)
reader = DummySentenceReaderTwo()
nlp.set_reader(reader)
dummy = DummyEvaluatorThree()
nlp.add(dummy)
nlp.initialize()
data_path = data_samples_root + "/random_texts/0.txt"
pack = nlp.process(data_path)
self.assertEqual(
pack._meta.record,
{
"ft.onto.example_ontology.Word": {
"string_features",
"word_forms",
"token_ranks",
}
},
)
def test_pipeline_processor_get_eval_result_by_ref_name(self):
"""Tests to get the processor result by it's reference name"""
nlp = Pipeline[DataPack](enforce_consistency=True)
reader = DummySentenceReaderOne()
nlp.set_reader(reader)
dummy = DummyEvaluatorFour()
nlp.add(dummy, ref_name="ref_dummy")
nlp.initialize()
data_path = data_samples_root + "/random_texts/0.txt"
pack = nlp.process(data_path)
self.assertEqual(
nlp.get_component("ref_dummy").get_result(),
"Reference name of DummyEvaluatorFour is ref_dummy",
)
def test_pipeline_processor_invalid_ref_name(self):
"""Tests to get the processor result by it's reference name"""
nlp = Pipeline[DataPack](enforce_consistency=True)
reader = DummySentenceReaderOne()
nlp.set_reader(reader)
dummy = DummyEvaluatorFour()
nlp.add(dummy, ref_name="ref_dummy")
dummy1 = DummyEvaluatorOne()
with self.assertRaises(ValidationError):
nlp.add(dummy1, ref_name="ref_dummy")
nlp.initialize()
class ExportPipelineTest(unittest.TestCase):
def setUp(self) -> None:
self.nlp = Pipeline()
self.nlp.set_reader(SentenceReader())
self.export_env_var = "FORTE_EXPORT_PATH"
def test_do_nothing(self):
r"""Should do nothing if FORTE_EXPORT_PATH is not set"""
self.nlp.save = MagicMock()
self.nlp.export()
self.nlp.save.assert_not_called()
def test_default_export(self):
r"""Should export pipeline if FORTE_EXPORT_PATH is set"""
temp_export_dir = tempfile.TemporaryDirectory()
os.environ[self.export_env_var] = temp_export_dir.name
ppl_export_path = self.nlp.export()
self.assertTrue(os.path.isfile(ppl_export_path))
def test_auto_create_export_dir(self):
r"""Test if auto-gen dir if it does not exist"""
temp_export_dir = tempfile.TemporaryDirectory()
export_dir = os.path.join(temp_export_dir.name, "randome_dir")
os.environ[self.export_env_var] = export_dir
ppl_export_path = self.nlp.export()
self.assertTrue(os.path.isfile(ppl_export_path))
def test_named_export(self):
r"""Test the export name"""
temp_export_dir = tempfile.TemporaryDirectory()
os.environ[self.export_env_var] = temp_export_dir.name
export_name = "test-commit"
self.nlp.export(export_name)
ppl_export_path = os.path.join(
temp_export_dir.name, f"{export_name}.yml"
)
self.assertTrue(os.path.isfile(ppl_export_path))
def test_two_default_export(self):
r"""Test two exported pipeline with default name"""
temp_export_dir = tempfile.TemporaryDirectory()
os.environ[self.export_env_var] = temp_export_dir.name
export_path_1 = self.nlp.export()
export_path_2 = self.nlp.export()
self.assertEqual(len(os.listdir(temp_export_dir.name)), 2)
self.assertTrue(os.path.isfile(export_path_1))
self.assertTrue(os.path.isfile(export_path_2))
def test_conflict_export(self):
r"""Test conflicting pipeline exporting"""
temp_export_dir = tempfile.TemporaryDirectory()
os.environ[self.export_env_var] = temp_export_dir.name
export_name = "test-export"
self.nlp.export(export_name)
with self.assertRaises(ValueError):
self.nlp.export(export_name)
def test_two_named_export(self):
r"""Test two named pipeline exporting"""
temp_export_dir = tempfile.TemporaryDirectory()
os.environ[self.export_env_var] = temp_export_dir.name
export_name_1 = "test-export-1"
export_name_2 = "test-export-2"
self.nlp.export(export_name_1)
self.nlp.export(export_name_2)
export_path_1 = os.path.join(
temp_export_dir.name, f"{export_name_1}.yml"
)
export_path_2 = os.path.join(
temp_export_dir.name, f"{export_name_2}.yml"
)
self.assertTrue(os.path.isfile(export_path_1))
self.assertTrue(os.path.isfile(export_path_2))
def tearDown(self) -> None:
if self.export_env_var in os.environ:
del os.environ[self.export_env_var]
if __name__ == "__main__":
unittest.main()
| 34.504493
| 80
| 0.600804
|
0801d3dca72ff1c72d68b0c358bef190ef3ef699
| 599
|
py
|
Python
|
Pd patch and scripts/listener.py
|
Apolotary/NetsendPD
|
8a407a8c0b33b0352b2b1a4877f68f1ffbd6c00a
|
[
"MIT"
] | 1
|
2016-06-03T05:46:01.000Z
|
2016-06-03T05:46:01.000Z
|
Pd patch and scripts/listener.py
|
Apolotary/NetsendPD
|
8a407a8c0b33b0352b2b1a4877f68f1ffbd6c00a
|
[
"MIT"
] | null | null | null |
Pd patch and scripts/listener.py
|
Apolotary/NetsendPD
|
8a407a8c0b33b0352b2b1a4877f68f1ffbd6c00a
|
[
"MIT"
] | null | null | null |
from zeroconf import raw_input, ServiceBrowser, Zeroconf
import socket
class MyListener(object):
def removeService(self, zeroconf, type, name):
print("Service %s removed" % (name,))
def addService(self, zeroconf, type, name):
info = zeroconf.getServiceInfo(type, name)
print("Service %s added, service info: %s %s" % (name, info.name, socket.inet_ntoa(info.address)))
zeroconf = Zeroconf()
listener = MyListener()
browser = ServiceBrowser(zeroconf, "_netsendpd._udp.local.", listener)
try:
raw_input("Press enter to exit...\n\n")
finally:
zeroconf.close()
| 31.526316
| 106
| 0.69783
|
038ddef0c2e56da50d496bc159b36f480bebe911
| 599
|
py
|
Python
|
sphinx-sources/Examples/Commands/PipFFT.py
|
jccmak/lightpipes
|
1a296fe08bdd97fc9a0e11f92bab25c85f68e57d
|
[
"BSD-3-Clause"
] | 132
|
2017-03-15T15:28:46.000Z
|
2022-03-09T00:28:25.000Z
|
sphinx-sources/Examples/Commands/PipFFT.py
|
jccmak/lightpipes
|
1a296fe08bdd97fc9a0e11f92bab25c85f68e57d
|
[
"BSD-3-Clause"
] | 63
|
2017-01-26T15:46:55.000Z
|
2022-01-25T04:50:59.000Z
|
sphinx-sources/Examples/Commands/PipFFT.py
|
jccmak/lightpipes
|
1a296fe08bdd97fc9a0e11f92bab25c85f68e57d
|
[
"BSD-3-Clause"
] | 37
|
2017-02-17T16:11:38.000Z
|
2022-01-25T18:03:47.000Z
|
from LightPipes import *
import matplotlib.pyplot as plt
size=15*mm
wavelength=1*um
N=150
z=1*m
R=3*mm
Rf=1.5*mm
seed=7
MaxPhase=1.5
F=Begin(size,wavelength,N);
F=CircAperture(R,0,0,F);
F=RandomPhase(seed,MaxPhase,F);
F=Fresnel(z,F);
I0=Intensity(0,F);
F=PipFFT(1,F);
F=CircAperture(Rf,0,0,F);
F=PipFFT(-1,F);
I1=Intensity(1,F);
fig=plt.figure(figsize=(10,6))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.imshow(I0,cmap='rainbow'); ax1.axis('off'); ax1.set_title('Unfiltered intensity')
ax2.imshow(I1,cmap='rainbow'); ax2.axis('off'); ax2.set_title('Filtered intensity')
plt.show()
| 19.966667
| 85
| 0.714524
|
5378e86f70c9c7ed7f9ea4dc1e62bf6c49a5b37f
| 3,413
|
py
|
Python
|
access_face_vision/source/camera.py
|
accessai/access-face-vision
|
04469ebc03ac9644a44bbdb90951f1821dca0f6d
|
[
"Apache-2.0"
] | 3
|
2019-07-19T17:59:19.000Z
|
2019-07-21T16:07:43.000Z
|
access_face_vision/source/camera.py
|
accessai/access-face-vision
|
04469ebc03ac9644a44bbdb90951f1821dca0f6d
|
[
"Apache-2.0"
] | 9
|
2019-07-19T17:38:11.000Z
|
2022-03-11T23:53:13.000Z
|
access_face_vision/source/camera.py
|
accessai/access-face-vision
|
04469ebc03ac9644a44bbdb90951f1821dca0f6d
|
[
"Apache-2.0"
] | 1
|
2019-07-21T16:07:54.000Z
|
2019-07-21T16:07:54.000Z
|
import signal
from time import time, sleep
import math
import cv2
from access_face_vision import utils
from access_face_vision.component import AccessComponent
from access_face_vision.access_logger import get_logger
class Camera(AccessComponent):
def __init__(self, cmd_args, out_queue, log_que, log_level, kill_app, draw_frames=False):
super(Camera, self).__init__(capture,
cmd_args=cmd_args,
out_queue=out_queue,
log_que=log_que,
log_level=log_level,
kill_app=kill_app,
draw_frames=draw_frames)
def capture(cmd_args, out_queue, log_que, log_level, kill_proc, kill_app, draw_frames):
logger = get_logger(log_que, log_level)
device = cmd_args.camera_url if cmd_args.camera_url != '' else cmd_args.camera_index
REQUIRED_FPS = cmd_args.fps
CAMERA_WAIT = cmd_args.camera_wait
img_dim = (cmd_args.img_width, cmd_args.img_height)
NUM_FRAME_TO_SKIP = 2
logger.info('Acquiring camera. Please wait...')
sleep(CAMERA_WAIT)
factor = cmd_args.img_red_factor
cap = cv2.VideoCapture(device)
logger.info('Camera acquired')
cap.set(cv2.CAP_PROP_FRAME_WIDTH, img_dim[0])
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, img_dim[1])
logger.info("Capturing Images with dimension: {}".format(img_dim))
def exit_gracefully(signum, frame):
kill_app.value = 1
logger.warning('Terminating camera process due to kill signal')
cap.release()
cv2.destroyAllWindows()
utils.clean_queue(out_queue)
signal.signal(signal.SIGINT, exit_gracefully)
signal.signal(signal.SIGTERM, exit_gracefully)
skip_count= 0
tik = time()
count=0
if cap.isOpened():
logger.info('Camera opened')
else:
logger.error('Unable to open camera')
while cap.isOpened():
if kill_proc.value > 0 or kill_app.value > 0:
logger.warning('Breaking camera process loop')
break
ret, frame = cap.read()
tok = time()
count +=1
if (tok-tik) > 2.0:
tik = time()
camera_fps = math.ceil(count / 2)
NUM_FRAME_TO_SKIP = math.ceil((camera_fps - REQUIRED_FPS) / REQUIRED_FPS)
count=0
if ret is True:
cv2.flip(frame, 1, frame)
if skip_count >= NUM_FRAME_TO_SKIP:
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
red_frame_rgb = cv2.resize(frame_rgb, (int(frame.shape[1] * factor), int(frame.shape[0] * factor)))
out_queue.put({'cap_time': time(), 'raw_frame': frame,
'small_rgb_frame': red_frame_rgb, 'factor': factor}, block=True, timeout=5)
skip_count=0
if draw_frames:
logger.info("Required frame size {}. Captured size {}".format(img_dim, frame.shape))
cv2.imshow('CameraFeed', frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
skip_count += 1
else:
break
kill_app.value = 1
cap.release()
utils.clean_queue(out_queue)
cv2.destroyAllWindows()
logger.info('Exiting from Camera Process')
| 33.135922
| 115
| 0.59625
|
30bf378a5a5aad17ddb190315250c16fe2123069
| 135
|
py
|
Python
|
textattack/constraints/grammaticality/__init__.py
|
tahmid-kazi/TextAttack
|
cc6f9af25df0f14a23ca11f8380b83c160aeb5bc
|
[
"MIT"
] | 1
|
2020-12-04T18:05:44.000Z
|
2020-12-04T18:05:44.000Z
|
textattack/constraints/grammaticality/__init__.py
|
tahmid-kazi/TextAttack
|
cc6f9af25df0f14a23ca11f8380b83c160aeb5bc
|
[
"MIT"
] | null | null | null |
textattack/constraints/grammaticality/__init__.py
|
tahmid-kazi/TextAttack
|
cc6f9af25df0f14a23ca11f8380b83c160aeb5bc
|
[
"MIT"
] | null | null | null |
from . import language_models
from .language_tool import LanguageTool
from .part_of_speech import PartOfSpeech
from .cola import COLA
| 22.5
| 40
| 0.844444
|
edcc4626acba4faf33d6ded9b81857357293b4f6
| 935
|
py
|
Python
|
azure-iot-hub/azure/iot/hub/protocol/models/interface.py
|
olivakar/azure-iot-sdk-python
|
d8f2403030cf94510d381d8d5ac37af6e8d306f8
|
[
"MIT"
] | null | null | null |
azure-iot-hub/azure/iot/hub/protocol/models/interface.py
|
olivakar/azure-iot-sdk-python
|
d8f2403030cf94510d381d8d5ac37af6e8d306f8
|
[
"MIT"
] | null | null | null |
azure-iot-hub/azure/iot/hub/protocol/models/interface.py
|
olivakar/azure-iot-sdk-python
|
d8f2403030cf94510d381d8d5ac37af6e8d306f8
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Interface(Model):
"""Interface.
:param name: Full name of digital twin interface.
:type name: str
:param properties: List of all properties in an interface.
:type properties: dict[str, ~protocol.models.Property]
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
"properties": {"key": "properties", "type": "{Property}"},
}
def __init__(self, **kwargs):
super(Interface, self).__init__(**kwargs)
self.name = kwargs.get("name", None)
self.properties = kwargs.get("properties", None)
| 32.241379
| 76
| 0.542246
|
b5c456a5fcccaac9a49bcdbd3112af861154a385
| 340
|
py
|
Python
|
src/vovaProject/urls.py
|
vova-lantsov-dev/django-library
|
a424416d7688cba1bdb2b6b918b3ae345d21932b
|
[
"MIT"
] | null | null | null |
src/vovaProject/urls.py
|
vova-lantsov-dev/django-library
|
a424416d7688cba1bdb2b6b918b3ae345d21932b
|
[
"MIT"
] | null | null | null |
src/vovaProject/urls.py
|
vova-lantsov-dev/django-library
|
a424416d7688cba1bdb2b6b918b3ae345d21932b
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include("main.urls")),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 26.153846
| 80
| 0.75
|
3a50749479507dff61e3b7a2099e7d9a726be4ab
| 204
|
py
|
Python
|
pvfit/modeling/double_diode/__init__.py
|
markcampanelli/pvfit
|
e526f3757279a3f8c41295aab94c1575ee9485fc
|
[
"MIT"
] | 4
|
2019-09-17T15:19:17.000Z
|
2021-11-24T03:21:03.000Z
|
pvfit/modeling/double_diode/__init__.py
|
markcampanelli/pvfit-m
|
6e2b9e0797b40de8eb587ec50102421cd2071b9e
|
[
"MIT"
] | 10
|
2019-08-09T15:40:51.000Z
|
2019-09-06T06:06:20.000Z
|
pvfit/modeling/double_diode/__init__.py
|
markcampanelli/pvfit-m
|
6e2b9e0797b40de8eb587ec50102421cd2071b9e
|
[
"MIT"
] | 2
|
2019-08-13T18:44:01.000Z
|
2019-08-14T13:42:32.000Z
|
import logging
logging.warning("PVfit's pvit.modeling.double_diode package is in the alpha stage of development. "
"It's API can and will change and the algorithms are not well tested.")
| 40.8
| 99
| 0.72549
|
067060bd07031c801036c50bd02a72a082d4316d
| 1,557
|
py
|
Python
|
datafactory/__init__.py
|
righ/datafactory
|
8299df4e29472381ccfe91535fdecf8e97a46d32
|
[
"Apache-2.0"
] | 2
|
2015-07-09T08:49:32.000Z
|
2015-09-04T13:43:40.000Z
|
datafactory/__init__.py
|
righ/datafactory
|
8299df4e29472381ccfe91535fdecf8e97a46d32
|
[
"Apache-2.0"
] | 1
|
2020-06-06T13:12:39.000Z
|
2020-06-06T13:12:39.000Z
|
datafactory/__init__.py
|
righ/datafactory
|
8299df4e29472381ccfe91535fdecf8e97a46d32
|
[
"Apache-2.0"
] | 1
|
2020-11-06T08:11:51.000Z
|
2020-11-06T08:11:51.000Z
|
# coding: utf-8
"""generate testdata.
"""
__author__ = "righ"
__author_email__ = "righ.m9@gmail.com"
__version__ = "1.0.0"
__license__ = "Apache License 2.0"
from .containers.dict import DictContainer
from .containers.list import ListContainer
from .containers.iter import IterContainer
from .models.list import ListModel
from .models.dict import DictModel
from .fields.choice import ChoiceField
from .fields.pickout import PickoutField
from .fields.cycle import CycleField
from .fields.hashof import HashOfField
from .fields.increment import IncrementField
from .fields.sequence import SequenceField
from .formatters.string import StringFormatter
from .formatters.json import JsonFormatter
from .formatters.pickle import PickleFormatter
from .formatters.csv import CsvFormatter
from .api.special import BLANK, ESCAPE
Model = DictModel
Container = ListContainer
CONTAINERS = {
"list": ListContainer,
"dict": DictContainer,
"iter": IterContainer,
}
MODELS = {
"list": ListModel,
"dict": DictModel,
}
PATTERNS = {
"choice": ChoiceField,
"pickout": PickoutField,
"cycle": CycleField,
"hashof": HashOfField,
"increment": IncrementField,
"sequence": SequenceField,
}
FORMATTERS = {
"string": StringFormatter,
"json": JsonFormatter,
"pickle": PickleFormatter,
"csv": CsvFormatter,
}
IncField = IncrementField
SeqField = SequenceField
# Container is ListContainer
Container = ListContainer
try:
del containers, models, fields, formatters, exceptions, api
except NameError:
pass
| 20.76
| 63
| 0.747592
|
a6d2bd920a466f321c4426dfbd3c33f7be358067
| 694
|
py
|
Python
|
app/core/migrations/0003_tag.py
|
Sahinovic/Ucenje
|
41e6c7621e1433051541f9170ce68284d14673a3
|
[
"MIT"
] | null | null | null |
app/core/migrations/0003_tag.py
|
Sahinovic/Ucenje
|
41e6c7621e1433051541f9170ce68284d14673a3
|
[
"MIT"
] | null | null | null |
app/core/migrations/0003_tag.py
|
Sahinovic/Ucenje
|
41e6c7621e1433051541f9170ce68284d14673a3
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2021-03-24 23:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20210320_2305'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.916667
| 118
| 0.622478
|
e968f2f715ed7c93e32acb4bb3677c7e21680141
| 411
|
py
|
Python
|
main.py
|
rebeling/monday-morning-mite
|
95fbd6d64e3578c183b097e9caeff974c2250afe
|
[
"MIT"
] | null | null | null |
main.py
|
rebeling/monday-morning-mite
|
95fbd6d64e3578c183b097e9caeff974c2250afe
|
[
"MIT"
] | null | null | null |
main.py
|
rebeling/monday-morning-mite
|
95fbd6d64e3578c183b097e9caeff974c2250afe
|
[
"MIT"
] | null | null | null |
from mite_api import data_of_mite
from results import overview
# 1. get the entries from mite, default at=last_week
# 1.1 augment the data entries, parse note for ids and urls
# and access issue in specified source
user_data = data_of_mite()
print 'retrieved'
# 2. calculate velocity
from velocity import correlations
user_data = correlations(user_data)
# 3. prepare and show data
# overview(user_data)
| 25.6875
| 59
| 0.778589
|
4a2e67895c275c24b37ae5d9e7e6dd89dd825946
| 132
|
py
|
Python
|
tools/deployment/lsst_nb_deploy/__init__.py
|
michitaro/nublado
|
777968007884bfff3938de6d4159f2b46affc3ff
|
[
"MIT"
] | null | null | null |
tools/deployment/lsst_nb_deploy/__init__.py
|
michitaro/nublado
|
777968007884bfff3938de6d4159f2b46affc3ff
|
[
"MIT"
] | null | null | null |
tools/deployment/lsst_nb_deploy/__init__.py
|
michitaro/nublado
|
777968007884bfff3938de6d4159f2b46affc3ff
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from .deploy import LSSTNotebookAspectDeployment, standalone
from ._version import version_info, __version__
| 33
| 60
| 0.840909
|
9394886e7dd0ea22cda628750f8b373552e1f0f9
| 51,803
|
py
|
Python
|
diofant/core/mul.py
|
project-kotinos/diofant___diofant
|
882549ac3a4dac238695aa620c02fce6ca33f9d3
|
[
"BSD-3-Clause"
] | 1
|
2021-08-22T09:34:15.000Z
|
2021-08-22T09:34:15.000Z
|
diofant/core/mul.py
|
project-kotinos/diofant___diofant
|
882549ac3a4dac238695aa620c02fce6ca33f9d3
|
[
"BSD-3-Clause"
] | null | null | null |
diofant/core/mul.py
|
project-kotinos/diofant___diofant
|
882549ac3a4dac238695aa620c02fce6ca33f9d3
|
[
"BSD-3-Clause"
] | null | null | null |
import operator
from collections import defaultdict
from functools import reduce
from .basic import Basic
from .cache import cacheit
from .compatibility import default_sort_key
from .logic import _fuzzy_group, fuzzy_and, fuzzy_not
from .operations import AssocOp
from .singleton import S
from .sympify import sympify
# internal marker to indicate:
# "there are still non-commutative objects -- don't forget to process them"
class NC_Marker:
is_Order = False
is_Mul = False
is_Number = False
is_Poly = False
is_commutative = False
def _unevaluated_Mul(*args):
"""Return a well-formed unevaluated Mul: Numbers are collected and
put in slot 0, any arguments that are Muls will be flattened, and args
are sorted. Use this when args have changed but you still want to return
an unevaluated Mul.
Examples
========
>>> a = _unevaluated_Mul(*[Float(3.0), x, Integer(2)])
>>> a.args[0]
6.00000000000000
>>> a.args[1]
x
Two unevaluated Muls with the same arguments will
always compare as equal during testing:
>>> m = _unevaluated_Mul(sqrt(2), sqrt(3))
>>> m == _unevaluated_Mul(sqrt(3), sqrt(2))
True
>>> u = Mul(sqrt(3), sqrt(2), evaluate=False)
>>> m == _unevaluated_Mul(u)
True
>>> m == Mul(*m.args)
False
"""
args = list(args)
newargs = []
ncargs = []
co = S.One
while args:
a = args.pop()
if a.is_Mul:
c, nc = a.args_cnc()
args.extend(c)
if nc:
ncargs.append(Mul._from_args(nc))
elif a.is_Number:
co *= a
else:
newargs.append(a)
newargs.sort(key=default_sort_key)
if co is not S.One:
newargs.insert(0, co)
if ncargs:
newargs.append(Mul._from_args(ncargs))
return Mul._from_args(newargs)
class Mul(AssocOp):
is_Mul = True
identity = S.One
@classmethod
def flatten(cls, seq):
"""Return commutative, noncommutative and order arguments by
combining related terms.
Notes
=====
* In an expression like ``a*b*c``, python process this through diofant
as ``Mul(Mul(a, b), c)``. This can have undesirable consequences.
- Sometimes terms are not combined as one would like:
{c.f. https://github.com/sympy/sympy/issues/4596}
>>> 2*(x + 1) # this is the 2-arg Mul behavior
2*x + 2
>>> y*(x + 1)*2
2*y*(x + 1)
>>> 2*(x + 1)*y # 2-arg result will be obtained first
y*(2*x + 2)
>>> Mul(2, x + 1, y) # all 3 args simultaneously processed
2*y*(x + 1)
>>> 2*((x + 1)*y) # parentheses can control this behavior
2*y*(x + 1)
Powers with compound bases may not find a single base to
combine with unless all arguments are processed at once.
Post-processing may be necessary in such cases.
{c.f. https://github.com/sympy/sympy/issues/5728}
>>> a = sqrt(x*sqrt(y))
>>> a**3
(x*sqrt(y))**(3/2)
>>> Mul(a, a, a)
(x*sqrt(y))**(3/2)
>>> a*a*a
x*sqrt(y)*sqrt(x*sqrt(y))
>>> _.subs({a.base: z}).subs({z: a.base})
(x*sqrt(y))**(3/2)
- If more than two terms are being multiplied then all the
previous terms will be re-processed for each new argument.
So if each of ``a``, ``b`` and ``c`` were :class:`Mul`
expression, then ``a*b*c`` (or building up the product
with ``*=``) will process all the arguments of ``a`` and
``b`` twice: once when ``a*b`` is computed and again when
``c`` is multiplied.
Using ``Mul(a, b, c)`` will process all arguments once.
* The results of Mul are cached according to arguments, so flatten
will only be called once for ``Mul(a, b, c)``. If you can
structure a calculation so the arguments are most likely to be
repeats then this can save time in computing the answer. For
example, say you had a Mul, M, that you wished to divide by ``d[i]``
and multiply by ``n[i]`` and you suspect there are many repeats
in ``n``. It would be better to compute ``M*n[i]/d[i]`` rather
than ``M/d[i]*n[i]`` since every time n[i] is a repeat, the
product, ``M*n[i]`` will be returned without flattening -- the
cached value will be returned. If you divide by the ``d[i]``
first (and those are more unique than the ``n[i]``) then that will
create a new Mul, ``M/d[i]`` the args of which will be traversed
again when it is multiplied by ``n[i]``.
{c.f. https://github.com/sympy/sympy/issues/5706}
This consideration is moot if the cache is turned off.
The validity of the above notes depends on the implementation
details of Mul and flatten which may change at any time. Therefore,
you should only consider them when your code is highly performance
sensitive.
"""
from ..series.order import Order
rv = None
if len(seq) == 2:
a, b = seq
if b.is_Rational:
a, b = b, a
assert a is not S.One
if not a.is_zero and a.is_Rational:
r, b = b.as_coeff_Mul()
if b.is_Add:
if r is not S.One: # 2-arg hack
if a*r is S.One:
rv = [b], [], None
else:
# leave the Mul as a Mul
rv = [cls(a*r, b, evaluate=False)], [], None
elif b.is_commutative:
r, b = b.as_coeff_Add()
bargs = [_keep_coeff(a, bi) for bi in Add.make_args(b)]
bargs.sort(key=default_sort_key)
ar = a*r
if ar:
bargs.insert(0, ar)
bargs = [Add._from_args(bargs)]
rv = bargs, [], None
if rv:
return rv
# apply associativity, separate commutative part of seq
c_part = [] # out: commutative factors
nc_part = [] # out: non-commutative factors
nc_seq = []
# standalone term e.g. 3 * ...
coeff = S.One
# n
# (base,exp)e.g. (x,n) for x
c_powers = []
# y
# (num-base, exp) e.g. (3, y) for ... * 3 * ...
num_exp = []
neg1e = S.Zero # exponent on -1 extracted from Number-based Pow and I
# 1/2
# (num-base, Rat-exp) e.g. (3, 1/2) for ... * 3 * ...
pnum_rat = defaultdict(list)
order_symbols = None
# --- PART 1 ---
#
# "collect powers and coeff":
#
# o coeff
# o c_powers
# o num_exp
# o neg1e
# o pnum_rat
#
# NOTE: this is optimized for all-objects-are-commutative case
for o in seq:
# O(x)
if o.is_Order:
o, order_symbols = o.as_expr_variables(order_symbols)
# Mul([...])
if o.is_Mul:
if o.is_commutative:
seq.extend(o.args) # XXX zerocopy?
else:
# NCMul can have commutative parts as well
for q in o.args:
if q.is_commutative:
seq.append(q)
else:
nc_seq.append(q)
# append non-commutative marker, so we don't forget to
# process scheduled non-commutative objects
seq.append(NC_Marker)
continue
# 3
elif o.is_Number:
if o is nan or coeff is zoo and o is S.Zero:
# we know for sure the result will be nan
return [nan], [], None
if coeff.is_Number: # it could be zoo
coeff *= o
if coeff is nan:
# we know for sure the result will be nan
return [nan], [], None
o # XXX "peephole" optimization, http://bugs.python.org/issue2506
continue
elif o is zoo:
if not coeff:
# 0 * zoo = NaN
return [nan], [], None
if coeff is zoo:
# zoo * zoo = zoo
return [zoo], [], None
coeff = zoo
continue
elif o is I:
neg1e += S.Half
continue
elif o.is_commutative:
# e
# o = b
b, e = o.as_base_exp()
if o.has(Order):
b, e = o, S.One
# y
# 3
elif o.is_Pow:
if b.is_Number:
# get all the factors with numeric base so they can be
# combined below, but don't combine negatives unless
# the exponent is an integer
if e.is_Rational:
if e.is_Integer:
coeff *= Pow(b, e) # it is an unevaluated power
continue
elif e.is_negative: # also a sign of an unevaluated power
seq.append(Pow(b, e))
continue
elif b.is_negative:
neg1e += e
b = -b
if b is not S.One:
pnum_rat[b].append(e)
o # XXX "peephole" optimization, http://bugs.python.org/issue2506
continue
elif b.is_positive or e.is_integer:
num_exp.append((b, e))
continue
elif b is I and e.is_Rational:
neg1e += e/2
continue
c_powers.append((b, e))
# NON-COMMUTATIVE
# TODO: Make non-commutative exponents not combine automatically
else:
if o is not NC_Marker:
nc_seq.append(o)
# process nc_seq (if any)
while nc_seq:
o = nc_seq.pop(0)
if not nc_part:
nc_part.append(o)
continue
# b c b+c
# try to combine last terms: a * a -> a
o1 = nc_part.pop()
b1, e1 = o1.as_base_exp()
b2, e2 = o.as_base_exp()
new_exp = e1 + e2
# Only allow powers to combine if the new exponent is
# not an Add. This allow things like a**2*b**3 == a**5
# if a.is_commutative == False, but prohibits
# a**x*a**y and x**a*x**b from combining (x,y commute).
if b1 == b2 and (not new_exp.is_Add) and not (o.has(Order) or o1.has(Order)):
o12 = b1 ** new_exp
# now o12 could be a commutative object
if o12.is_commutative:
seq.append(o12)
continue
else:
nc_seq.insert(0, o12)
else:
nc_part.append(o1)
nc_part.append(o)
# We do want a combined exponent if it would not be an Add, such as
# y 2y 3y
# x * x -> x
# We determine if two exponents have the same term by using
# as_coeff_Mul.
#
# Unfortunately, this isn't smart enough to consider combining into
# exponents that might already be adds, so things like:
# z - y y
# x * x will be left alone. This is because checking every possible
# combination can slow things down.
# gather exponents of common bases...
def _gather(c_powers):
new_c_powers = []
common_b = {} # b:e
for b, e in c_powers:
co = e.as_coeff_Mul()
common_b.setdefault(b, {}).setdefault(co[1], []).append(co[0])
for b, d in common_b.items():
for di, li in d.items():
d[di] = Add(*li)
for b, e in common_b.items():
for t, c in e.items():
new_c_powers.append((b, c*t))
return new_c_powers
# in c_powers
c_powers = _gather(c_powers)
# and in num_exp
num_exp = _gather(num_exp)
# --- PART 2 ---
#
# o process collected powers (x**0 -> 1; x**1 -> x; otherwise Pow)
# o combine collected powers (2**x * 3**x -> 6**x)
# with numeric base
# ................................
# now we have:
# - coeff:
# - c_powers: (b, e)
# - num_exp: (2, e)
# - pnum_rat: {(1/3, [1/3, 2/3, 1/4])}
# 0 1
# x -> 1 x -> x
for b, e in c_powers:
if e is S.One:
assert not b.is_Number
c_part.append(b)
elif e is not S.Zero:
c_part.append(Pow(b, e))
# x x x
# 2 * 3 -> 6
# exp:Mul(num-bases) x x
# e.g. x:6 for ... * 2 * 3 * ...
inv_exp_dict = defaultdict(list)
for b, e in num_exp:
inv_exp_dict[e].append(b)
for e, b in inv_exp_dict.items():
inv_exp_dict[e] = cls(*b)
c_part.extend([Pow(b, e) for e, b in inv_exp_dict.items() if e])
# b, e -> e' = sum(e), b
# {(1/5, [1/3]), (1/2, [1/12, 1/4]} -> {(1/3, [1/5, 1/2])}
comb_e = defaultdict(list)
for b, e in pnum_rat.items():
comb_e[Add(*e)].append(b)
del pnum_rat
# process them, reducing exponents to values less than 1
# and updating coeff if necessary else adding them to
# num_rat for further processing
num_rat = []
for e, b in comb_e.items():
b = cls(*b)
if e.denominator == 1:
coeff *= Pow(b, e)
continue
if e.numerator > e.denominator:
e_i, ep = divmod(e.numerator, e.denominator)
coeff *= Pow(b, e_i)
e = Rational(ep, e.denominator)
num_rat.append((b, e))
del comb_e
# extract gcd of bases in num_rat
# 2**(1/3)*6**(1/4) -> 2**(1/3+1/4)*3**(1/4)
pnew = defaultdict(list)
i = 0 # steps through num_rat which may grow
while i < len(num_rat):
bi, ei = num_rat[i]
grow = []
for j in range(i + 1, len(num_rat)):
bj, ej = num_rat[j]
g = bi.gcd(bj)
if g is not S.One:
# 4**r1*6**r2 -> 2**(r1+r2) * 2**r1 * 3**r2
# this might have a gcd with something else
e = ei + ej
if e.denominator == 1:
coeff *= Pow(g, e)
else:
if e.numerator > e.denominator:
e_i, ep = divmod(e.numerator, e.denominator) # change e in place
coeff *= Pow(g, e_i)
e = Rational(ep, e.denominator)
grow.append((g, e))
# update the jth item
num_rat[j] = (bj/g, ej)
# update bi that we are checking with
bi = bi/g
if bi is S.One:
break
if bi is not S.One:
obj = Pow(bi, ei)
if obj.is_Number:
coeff *= obj
else:
# changes like sqrt(12) -> 2*sqrt(3)
for obj in Mul.make_args(obj):
if obj.is_Number:
coeff *= obj
else:
assert obj.is_Pow
bi, ei = obj.args
pnew[ei].append(bi)
num_rat.extend(grow)
i += 1
# combine bases of the new powers
for e, b in pnew.items():
pnew[e] = cls(*b)
# handle -1 and I
if neg1e:
# treat I as (-1)**(1/2) and compute -1's total exponent
p, q = neg1e.as_numer_denom()
# if the integer part is odd, extract -1
n, p = divmod(p, q)
if n % 2:
coeff = -coeff
# if it's a multiple of 1/2 extract I
if q == 2:
c_part.append(I)
elif p:
# see if there is any positive base this power of
# -1 can join
neg1e = Rational(p, q)
for e, b in pnew.items():
if e == neg1e and b.is_positive:
pnew[e] = -b
break
else:
# keep it separate; we've already evaluated it as
# much as possible so evaluate=False
c_part.append(Pow(S.NegativeOne, neg1e, evaluate=False))
# add all the pnew powers
c_part.extend([Pow(b, e) for e, b in pnew.items()])
# oo, -oo
if coeff in (oo, -oo):
def _handle_for_oo(c_part, coeff_sign):
new_c_part = []
for t in c_part:
if t.is_positive:
continue
if t.is_negative:
coeff_sign *= -1
continue
new_c_part.append(t)
return new_c_part, coeff_sign
c_part, coeff_sign = _handle_for_oo(c_part, 1)
nc_part, coeff_sign = _handle_for_oo(nc_part, coeff_sign)
coeff *= coeff_sign
# zoo
if coeff is zoo:
# zoo might be
# infinite_real + bounded_im
# bounded_real + infinite_im
# infinite_real + infinite_im
# and non-zero real or imaginary will not change that status.
c_part = [c for c in c_part if not (c.is_nonzero and
c.is_extended_real is not None)]
nc_part = [c for c in nc_part if not (c.is_nonzero and
c.is_extended_real is not None)]
# 0
elif coeff is S.Zero:
# we know for sure the result will be 0 except the multiplicand
# is infinity
if any(c.is_finite is False for c in c_part):
return [nan], [], order_symbols
return [coeff], [], order_symbols
# check for straggling Numbers that were produced
_new = []
for i in c_part:
if i.is_Number:
coeff *= i
else:
_new.append(i)
c_part = _new
# order commutative part canonically
c_part.sort(key=default_sort_key)
# current code expects coeff to be always in slot-0
if coeff is not S.One:
c_part.insert(0, coeff)
# we are done
if (not nc_part and len(c_part) == 2 and c_part[0].is_Number and
c_part[1].is_Add):
# 2*(1+a) -> 2 + 2 * a
coeff = c_part[0]
c_part = [Add(*[coeff*f for f in c_part[1].args])]
return c_part, nc_part, order_symbols
def _eval_power(self, e):
# don't break up NC terms: (A*B)**3 != A**3*B**3, it is A*B*A*B*A*B
cargs, nc = self.args_cnc(split_1=False)
if e.is_Integer:
return Mul(*[Pow(b, e, evaluate=False) for b in cargs]) * \
Pow(Mul._from_args(nc), e, evaluate=False)
p = Pow(self, e, evaluate=False)
if e.is_Rational or e.is_Float:
return p._eval_expand_power_base()
return p
@classmethod
def class_key(cls):
"""Nice order of classes."""
return 4, 0, cls.__name__
def _eval_evalf(self, prec):
c, m = self.as_coeff_Mul()
if c is S.NegativeOne:
if m.is_Mul:
rv = -AssocOp._eval_evalf(m, prec)
else:
mnew = m._eval_evalf(prec)
if mnew is not None:
m = mnew
rv = -m
else:
rv = AssocOp._eval_evalf(self, prec)
return rv
@cacheit
def as_two_terms(self):
"""Return head and tail of self.
This is the most efficient way to get the head and tail of an
expression.
- if you want only the head, use self.args[0];
- if you want to process the arguments of the tail then use
self.as_coef_mul() which gives the head and a tuple containing
the arguments of the tail when treated as a Mul.
- if you want the coefficient when self is treated as an Add
then use self.as_coeff_add()[0]
>>> (3*x*y).as_two_terms()
(3, x*y)
"""
args = self.args
if len(args) == 2:
return args
else:
return args[0], self._new_rawargs(*args[1:])
@cacheit
def as_coeff_mul(self, *deps, **kwargs):
"""Return the tuple (c, args) where self is written as a Mul.
See Also
========
diofant.core.expr.Expr.as_coeff_mul
"""
rational = kwargs.pop('rational', True)
if deps:
l1 = []
l2 = []
for f in self.args:
if f.has(*deps):
l2.append(f)
else:
l1.append(f)
return self._new_rawargs(*l1), tuple(l2)
args = self.args
if args[0].is_Number:
if not rational or args[0].is_Rational:
return args[0], args[1:]
elif args[0].is_negative:
return S.NegativeOne, (-args[0],) + args[1:]
return S.One, args
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product."""
coeff, args = self.args[0], self.args[1:]
if coeff.is_Number:
if not rational or coeff.is_Rational:
if len(args) == 1:
return coeff, args[0]
else:
return coeff, self._new_rawargs(*args)
elif coeff.is_negative:
return S.NegativeOne, self._new_rawargs(*((-coeff,) + args))
return S.One, self
def as_real_imag(self, deep=True, **hints):
"""Returns real and imaginary parts of self
See Also
========
diofant.core.expr.Expr.as_real_imag
"""
from .function import expand_mul
from ..functions import Abs, im, re
other = []
coeffr = []
coeffi = []
addterms = S.One
for a in self.args:
if a.is_extended_real:
coeffr.append(a)
elif a.is_imaginary:
coeffi.append(a)
elif a.is_commutative:
# search for complex conjugate pairs:
for i, x in enumerate(other):
if x == a.conjugate():
coeffr.append(Abs(x)**2)
del other[i]
break
else:
if a.is_Add:
addterms *= a
else:
other.append(a)
else:
other.append(a)
m = self.func(*other)
if hints.get('ignore') == m:
return
if len(coeffi) % 2:
imco = im(coeffi.pop(0))
# all other pairs make a real factor; they will be
# put into reco below
else:
imco = S.Zero
reco = self.func(*(coeffr + coeffi))
r, i = (reco*re(m), reco*im(m))
if addterms == 1:
if m == 1:
if imco is S.Zero:
return reco, S.Zero
else:
return S.Zero, reco*imco
if imco is S.Zero:
return r, i
return -imco*i, imco*r
addre, addim = expand_mul(addterms, deep=False).as_real_imag()
if imco is S.Zero:
return r*addre - i*addim, i*addre + r*addim
else:
r, i = -imco*i, imco*r
return r*addre - i*addim, r*addim + i*addre
@staticmethod
def _expandsums(sums):
"""
Helper function for _eval_expand_mul.
sums must be a list of instances of Basic.
"""
L = len(sums)
if L == 1:
return sums[0].args
terms = []
left = Mul._expandsums(sums[:L//2])
right = Mul._expandsums(sums[L//2:])
terms = [Mul(a, b) for a in left for b in right]
added = Add(*terms)
return Add.make_args(added) # it may have collapsed down to one term
def _eval_expand_mul(self, **hints):
from ..simplify import fraction
# Handle things like 1/(x*(x + 1)), which are automatically converted
# to 1/x*1/(x + 1)
expr = self
n, d = fraction(expr)
if d.is_Mul:
n, d = [i._eval_expand_mul(**hints) if i.is_Mul else i
for i in (n, d)]
expr = n/d
if not expr.is_Mul:
return expr
plain, sums, rewrite = [], [], False
for factor in expr.args:
if factor.is_Add:
sums.append(factor)
rewrite = True
else:
if factor.is_commutative:
plain.append(factor)
else:
sums.append(Basic(factor)) # Wrapper
if not rewrite:
return expr
else:
plain = self.func(*plain)
terms = self.func._expandsums(sums)
args = []
for term in terms:
t = self.func(plain, term)
if t.is_Mul and any(a.is_Add for a in t.args):
t = t._eval_expand_mul()
args.append(t)
return Add(*args)
@cacheit
def _eval_derivative(self, s):
args = list(self.args)
terms = []
for i in range(len(args)):
d = args[i].diff(s)
if d:
terms.append(self.func(*(args[:i] + [d] + args[i + 1:])))
return Add(*terms)
def _matches_simple(self, expr, repl_dict):
# handle (w*3)._matches('x*5') -> {w: x*5/3}
coeff, terms = self.as_coeff_Mul()
terms = Mul.make_args(terms)
if len(terms) == 1:
newexpr = self.__class__._combine_inverse(expr, coeff)
return terms[0]._matches(newexpr, repl_dict)
return
def _matches(self, expr, repl_dict={}):
"""Helper method for match().
See Also
========
diofant.core.basic.Basic.matches
"""
expr = sympify(expr)
if self.is_commutative and expr.is_commutative:
return AssocOp._matches_commutative(self, expr, repl_dict)
elif self.is_commutative is not expr.is_commutative:
return
c1, nc1 = self.args_cnc()
c2, nc2 = expr.args_cnc()
repl_dict = repl_dict.copy()
if c1:
if not c2:
c2 = [1]
a = self.func(*c1)
if isinstance(a, AssocOp):
repl_dict = a._matches_commutative(self.func(*c2), repl_dict)
else:
repl_dict = a._matches(self.func(*c2), repl_dict)
if repl_dict:
a = self.func(*nc1)
if not isinstance(a, self.func):
repl_dict = a._matches(self.func(*nc2), repl_dict)
else:
raise NotImplementedError
return repl_dict or None
@staticmethod
def _combine_inverse(lhs, rhs):
"""
Returns lhs/rhs, but treats arguments like symbols, so things like
oo/oo return 1, instead of a nan.
"""
if lhs == rhs:
return S.One
def check(l, r):
if l.is_Float and r.is_comparable:
# if both objects are added to 0 they will share the same "normalization"
# and are more likely to compare the same. Since Add(foo, 0) will not allow
# the 0 to pass, we use __add__ directly.
return l.__add__(0) == r.evalf(strict=False).__add__(0)
return False
if check(lhs, rhs) or check(rhs, lhs):
return S.One
if lhs.is_Mul and rhs.is_Mul:
a = list(lhs.args)
b = [1]
for x in rhs.args:
if x in a:
a.remove(x)
elif -x in a:
a.remove(-x)
b.append(-1)
else:
b.append(x)
return lhs.func(*a)/rhs.func(*b)
return lhs/rhs
def as_powers_dict(self):
"""Return self as a dictionary of factors with each factor being
treated as a power.
See Also
========
diofant.core.expr.Expr.as_powers_dict
"""
d = defaultdict(int)
for term in self.args:
b, e = term.as_base_exp()
d[b] += e
return d
def _eval_as_numer_denom(self):
"""expression -> a/b -> a, b
See Also
========
diofant.core.expr.Expr.as_numer_denom
"""
# don't use _from_args to rebuild the numerators and denominators
# as the order is not guaranteed to be the same once they have
# been separated from each other
numers, denoms = list(zip(*[f.as_numer_denom() for f in self.args]))
return self.func(*numers), self.func(*denoms)
def as_base_exp(self):
"""Return base and exp of self.
See Also
========
diofant.core.expr.Expr.as_base_exp
"""
e1 = None
bases = []
nc = 0
for m in self.args:
b, e = m.as_base_exp()
if not b.is_commutative:
nc += 1
if e1 is None:
e1 = e
elif e != e1 or nc > 1:
return self, S.One
bases.append(b)
return self.func(*bases), e1
def _eval_is_polynomial(self, syms):
return all(term._eval_is_polynomial(syms) for term in self.args)
def _eval_is_rational_function(self, syms):
return all(term._eval_is_rational_function(syms) for term in self.args)
def _eval_is_algebraic_expr(self, syms):
return all(term._eval_is_algebraic_expr(syms) for term in self.args)
def _eval_is_commutative(self):
return _fuzzy_group(a.is_commutative for a in self.args)
def _eval_is_finite(self):
return _fuzzy_group(a.is_finite for a in self.args)
def _eval_is_complex(self):
return _fuzzy_group((a.is_complex for a in self.args), quick_exit=True)
def _eval_is_infinite(self):
if any(a.is_infinite for a in self.args):
if any(not a.is_nonzero for a in self.args):
return
return True
def _eval_is_rational(self):
r = _fuzzy_group((a.is_rational for a in self.args), quick_exit=True)
if r:
return r
elif r is False:
return self.is_zero
def _eval_is_algebraic(self):
r = _fuzzy_group((a.is_algebraic for a in self.args), quick_exit=True)
if r:
return r
elif r is False:
return self.is_zero
def _eval_is_zero(self):
if any(a.is_zero for a in self.args):
if all(a.is_finite for a in self.args):
return True
elif all(a.is_nonzero for a in self.args):
return False
def _eval_is_integer(self):
is_rational = self.is_rational
if is_rational:
n, d = self.as_numer_denom()
if d is S.One:
return True
elif d == 2:
return n.is_even
else:
return is_rational
def _eval_is_polar(self):
if all(arg.is_polar or arg.is_positive for arg in self.args):
return True
def _eval_is_extended_real(self):
real = True
zero = one_neither = False
for t in self.args:
if t.is_finite and not t.is_complex:
return t.is_complex
elif t.is_imaginary or t.is_extended_real:
if t.is_imaginary:
real = not real
z = t.is_zero
if not z and zero is False:
zero = z
elif z:
if all(a.is_finite for a in self.args):
return True
return
elif t.is_complex and t.is_real is False:
if one_neither:
return # complex terms might cancel
one_neither = True
else:
return
if one_neither: # self is a+I*b or I*b
if real:
return zero # real*self is like self: neither is real
elif zero is False:
return real # can't be trumped by 0
elif real:
return real # doesn't matter what zero is
def _eval_is_imaginary(self):
obj = I*self
if obj.is_Mul:
return fuzzy_and([obj._eval_is_extended_real(),
obj._eval_is_finite()])
else:
return obj.is_real
def _eval_is_hermitian(self):
hermitian = True
one_nc = zero = False
for t in self.args:
if not t.is_commutative:
if one_nc:
return
one_nc = True
if t.is_antihermitian or t.is_hermitian:
if t.is_antihermitian:
hermitian = not hermitian
z = t.is_zero
if not z and zero is False:
zero = z
elif z:
if self.is_finite:
return True
return
else:
return
if zero is False or hermitian:
return hermitian
def _eval_is_antihermitian(self):
if self.is_zero:
return False
elif self.is_nonzero:
return (I*self).is_hermitian
def _eval_is_irrational(self):
for t in self.args:
a = t.is_irrational
if a:
if all(x.is_rational and x.is_nonzero
for x in self.args if x != t):
return True
return
elif a is None:
return
return False
def _eval_is_positive(self):
"""Return True if self is positive, False if not, and None if it
cannot be determined.
This algorithm is non-recursive and works by keeping track of the
sign which changes when a negative or nonpositive is encountered.
Whether a nonpositive or nonnegative is seen is also tracked since
the presence of these makes it impossible to return True, but
possible to return False if the end result is nonpositive. e.g.
pos * neg * nonpositive -> pos or zero -> None is returned
pos * neg * nonnegative -> neg or zero -> False is returned
"""
sign = 1
saw_NON = False
for t in self.args:
if t.is_positive:
continue
elif t.is_negative:
sign = -sign
elif t.is_zero:
if self.is_finite:
return False
else:
return
elif t.is_nonpositive:
sign = -sign
saw_NON = True
elif t.is_nonnegative:
saw_NON = True
else:
return
if sign == 1 and saw_NON is False:
return True
if sign < 0:
return False
def _eval_is_negative(self):
obj = -self
if obj.is_Mul:
return obj._eval_is_positive()
else:
return obj.is_positive
def _eval_is_odd(self):
is_integer = self.is_integer
if is_integer:
r, acc = True, 1
for t in self.args:
if not t.is_integer:
return
elif t.is_even or (acc + t).is_odd:
r = False
elif r is False:
pass
elif r and t.is_odd is None:
r = None
acc = t
return r
else:
return is_integer
def _eval_is_even(self):
is_integer = self.is_integer
if is_integer:
return fuzzy_not(self.is_odd)
elif is_integer is False:
return False
def _eval_subs(self, old, new):
from . import Integer
from ..functions.elementary.complexes import sign
from ..ntheory.factor_ import multiplicity
from ..simplify.powsimp import powdenest
from ..simplify.radsimp import fraction
if not old.is_Mul:
return
# try keep replacement literal so -2*x doesn't replace 4*x
if old.args[0].is_Number and old.args[0] < 0:
if self.args[0].is_Number:
if self.args[0] < 0:
return self._subs(-old, -new)
return
def base_exp(a):
# if I and -1 are in a Mul, they get both end up with
# a -1 base (see issue sympy/sympy#6421); all we want here are the
# true Pow separated into base and exponent
if a.is_Pow:
return a.as_base_exp()
return a, S.One
def breakup(eq):
"""break up powers of eq when treated as a Mul::
b**(Rational*e) -> b**e, Rational
commutatives come back as a dictionary {b**e: Rational}
noncommutatives come back as a list [(b**e, Rational)]
"""
c, nc = defaultdict(int), []
for a in Mul.make_args(eq):
a = powdenest(a)
b, e = base_exp(a)
if e is not S.One:
co, _ = e.as_coeff_mul()
b = Pow(b, e/co)
e = co
if a.is_commutative:
c[b] += e
else:
nc.append([b, e])
return c, nc
def rejoin(b, co):
"""
Put rational back with exponent; in general this is not ok, but
since we took it from the exponent for analysis, it's ok to put
it back.
"""
b, e = base_exp(b)
return Pow(b, e*co)
def ndiv(a, b):
"""if b divides a in an extractive way (like 1/4 divides 1/2
but not vice versa, and 2/5 does not divide 1/3) then return
the integer number of times it divides, else return 0.
"""
if not b.denominator % a.denominator or not a.denominator % b.denominator:
return int(a/b)
return 0
# give Muls in the denominator a chance to be changed (see issue sympy/sympy#5651)
# rv will be the default return value
rv = None
n, d = fraction(self)
self2 = self
if d is not S.One:
self2 = n._subs(old, new)/d._subs(old, new)
if not self2.is_Mul:
return self2._subs(old, new)
if self2 != self:
rv = self2
# Now continue with regular substitution.
# handle the leading coefficient and use it to decide if anything
# should even be started; we always know where to find the Rational
# so it's a quick test
co_self = self2.args[0]
co_old = old.args[0]
co_xmul = None
if co_old.is_Rational and co_self.is_Rational:
# if coeffs are the same there will be no updating to do
# below after breakup() step; so skip (and keep co_xmul=None)
if co_old != co_self:
co_xmul = co_self.extract_multiplicatively(co_old)
elif co_old.is_Rational:
return rv
# break self and old into factors
c, nc = breakup(self2)
old_c, old_nc = breakup(old)
# update the coefficients if we had an extraction
# e.g. if co_self were 2*(3/35*x)**2 and co_old = 3/5
# then co_self in c is replaced by (3/5)**2 and co_residual
# is 2*(1/7)**2
if co_xmul and co_xmul.is_Rational and abs(co_old) != 1:
mult = Integer(multiplicity(abs(co_old), co_self))
c.pop(co_self)
if co_old in c:
c[co_old] += mult
else:
c[co_old] = mult
co_residual = co_self/co_old**mult
else:
co_residual = 1
# do quick tests to see if we can't succeed
ok = True
if len(old_nc) > len(nc):
# more non-commutative terms
ok = False
elif len(old_c) > len(c):
# more commutative terms
ok = False
elif {i[0] for i in old_nc} - {i[0] for i in nc}:
# unmatched non-commutative bases
ok = False
elif set(old_c) - set(c):
# unmatched commutative terms
ok = False
elif any(sign(c[b]) != sign(old_c[b]) for b in old_c):
# differences in sign
ok = False
if not ok:
return rv
if not old_c:
cdid = None
else:
rat = []
for (b, old_e) in old_c.items():
c_e = c[b]
rat.append(ndiv(c_e, old_e))
if not rat[-1]:
return rv
cdid = min(rat)
if not old_nc:
ncdid = None
for i in range(len(nc)):
nc[i] = rejoin(*nc[i])
else:
ncdid = 0 # number of nc replacements we did
take = len(old_nc) # how much to look at each time
limit = cdid or oo # max number that we can take
failed = [] # failed terms will need subs if other terms pass
i = 0
while limit and i + take <= len(nc):
hit = False
# the bases must be equivalent in succession, and
# the powers must be extractively compatible on the
# first and last factor but equal inbetween.
rat = []
for j in range(take):
if nc[i + j][0] != old_nc[j][0]:
break
elif j == 0:
rat.append(ndiv(nc[i + j][1], old_nc[j][1]))
elif j == take - 1:
rat.append(ndiv(nc[i + j][1], old_nc[j][1]))
elif nc[i + j][1] != old_nc[j][1]:
break
else:
rat.append(1)
j += 1
else:
ndo = min(rat)
if ndo:
if take == 1:
assert cdid
ndo = min(cdid, ndo)
nc[i] = Pow(new, ndo)*rejoin(nc[i][0],
nc[i][1] - ndo*old_nc[0][1])
else:
ndo = 1
# the left residual
l = rejoin(nc[i][0], nc[i][1] - ndo*old_nc[0][1])
# eliminate all middle terms
mid = new
# the right residual (which may be the same as the middle if take == 2)
ir = i + take - 1
r = (nc[ir][0], nc[ir][1] - ndo*old_nc[-1][1])
if r[1]:
if i + take < len(nc):
nc[i:i + take] = [l*mid, r]
else:
r = rejoin(*r)
nc[i:i + take] = [l*mid*r]
else:
# there was nothing left on the right
nc[i:i + take] = [l*mid]
limit -= ndo
ncdid += ndo
hit = True
if not hit:
# do the subs on this failing factor
failed.append(i)
i += 1
else:
if not ncdid:
return rv
# although we didn't fail, certain nc terms may have
# failed so we rebuild them after attempting a partial
# subs on them
failed.extend(range(i, len(nc)))
for i in failed:
nc[i] = rejoin(*nc[i]).subs({old: new})
# rebuild the expression
if cdid is None:
do = ncdid
elif ncdid is None:
do = cdid
else:
do = min(ncdid, cdid)
margs = []
for b in c:
if b in old_c:
# calculate the new exponent
e = c[b] - old_c[b]*do
margs.append(rejoin(b, e))
else:
margs.append(rejoin(b.subs({old: new}), c[b]))
if cdid and not ncdid:
# in case we are replacing commutative with non-commutative,
# we want the new term to come at the front just like the
# rest of this routine
margs = [Pow(new, cdid)] + margs
return co_residual*self2.func(*margs)*self2.func(*nc)
def _eval_nseries(self, x, n, logx):
from ..simplify import powsimp
terms = [t.nseries(x, n=n, logx=logx) for t in self.args]
return powsimp(self.func(*terms).expand(), combine='exp', deep=True)
def _eval_as_leading_term(self, x):
return self.func(*[t.as_leading_term(x) for t in self.args])
def _eval_conjugate(self):
return self.func(*[t.conjugate() for t in self.args])
def _eval_transpose(self):
return self.func(*[t.transpose() for t in self.args[::-1]])
def _eval_adjoint(self):
return self.func(*[t.adjoint() for t in self.args[::-1]])
def as_content_primitive(self, radical=False):
"""Return the tuple (R, self/R) where R is the positive Rational
extracted from self.
Examples
========
>>> (-3*sqrt(2)*(2 - 2*sqrt(2))).as_content_primitive()
(6, -sqrt(2)*(-sqrt(2) + 1))
See Also
========
diofant.core.expr.Expr.as_content_primitive
"""
coef = S.One
args = []
for i, a in enumerate(self.args):
c, p = a.as_content_primitive(radical=radical)
coef *= c
if p is not S.One:
args.append(p)
# don't use self._from_args here to reconstruct args
# since there may be identical args now that should be combined
# e.g. (2+2*x)*(3+3*x) should be (6, (1 + x)**2) not (6, (1+x)*(1+x))
return coef, self.func(*args)
def as_ordered_factors(self, order=None):
"""Transform an expression into an ordered list of factors.
Examples
========
>>> (2*x*y*sin(x)*cos(x)).as_ordered_factors()
[2, x, y, sin(x), cos(x)]
"""
cpart, ncpart = self.args_cnc()
cpart.sort(key=lambda expr: expr.sort_key(order=order))
return cpart + ncpart
@property
def _sorted_args(self):
return tuple(self.as_ordered_factors())
def prod(a, start=1):
"""Return product of elements of a. Start with int 1 so if only
ints are included then an int result is returned.
Examples
========
>>> prod(range(3))
0
>>> type(_) is int
True
>>> prod([Integer(2), 3])
6
>>> _.is_Integer
True
You can start the product at something other than 1:
>>> prod([1, 2], 3)
6
"""
return reduce(operator.mul, a, start)
def _keep_coeff(coeff, factors, clear=True, sign=False):
"""Return ``coeff*factors`` unevaluated if necessary.
If ``clear`` is False, do not keep the coefficient as a factor
if it can be distributed on a single factor such that one or
more terms will still have integer coefficients.
If ``sign`` is True, allow a coefficient of -1 to remain factored out.
Examples
========
>>> _keep_coeff(S.Half, x + 2)
(x + 2)/2
>>> _keep_coeff(S.Half, x + 2, clear=False)
x/2 + 1
>>> _keep_coeff(S.Half, (x + 2)*y, clear=False)
y*(x + 2)/2
>>> _keep_coeff(Integer(-1), x + y)
-x - y
>>> _keep_coeff(Integer(-1), x + y, sign=True)
-(x + y)
"""
from . import Integer
if not coeff.is_Number:
if factors.is_Number:
factors, coeff = coeff, factors
else:
return coeff*factors
if coeff is S.One:
return factors
elif coeff is S.NegativeOne and not sign:
return -factors
elif factors.is_Add:
if not clear and coeff.is_Rational and coeff.denominator != 1:
q = Integer(coeff.denominator)
for i in factors.args:
c, t = i.as_coeff_Mul()
r = c/q
if r == int(r):
return coeff*factors
return Mul._from_args((coeff, factors))
elif factors.is_Mul:
margs = list(factors.args)
if margs[0].is_Number:
margs[0] *= coeff
if margs[0] == 1:
margs.pop(0)
else:
margs.insert(0, coeff)
return Mul._from_args(margs)
else:
return coeff*factors
def expand_2arg(e):
from ..simplify.simplify import bottom_up
def do(e):
if e.is_Mul:
c, r = e.as_coeff_Mul()
if c.is_Number and r.is_Add:
return Add(*[c*ri for ri in r.args], evaluate=False)
return e
return bottom_up(e, do)
from .numbers import I, Rational, nan, oo, zoo
from .power import Pow
from .add import Add
| 32.683281
| 99
| 0.474895
|
9f782d671214bf15a00bc23b4e0a6505faf2a73a
| 1,644
|
py
|
Python
|
python/tests/api/transforms/test_dates.py
|
moulimukherjee/incubator-iceberg
|
bf7edc4b325df6dd80d86fea0149d2be0ad09468
|
[
"Apache-2.0"
] | 2,161
|
2020-05-28T01:20:01.000Z
|
2022-03-31T14:48:04.000Z
|
python/tests/api/transforms/test_dates.py
|
moulimukherjee/incubator-iceberg
|
bf7edc4b325df6dd80d86fea0149d2be0ad09468
|
[
"Apache-2.0"
] | 3,096
|
2020-05-27T20:57:13.000Z
|
2022-03-31T22:55:42.000Z
|
python/tests/api/transforms/test_dates.py
|
moulimukherjee/incubator-iceberg
|
bf7edc4b325df6dd80d86fea0149d2be0ad09468
|
[
"Apache-2.0"
] | 879
|
2020-05-28T01:20:01.000Z
|
2022-03-31T12:48:48.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from iceberg.api.expressions import Literal
from iceberg.api.transforms import Transforms
from iceberg.api.types import DateType
import pytest
@pytest.mark.parametrize("transform_gran,expected", [
(Transforms.year, "2017"),
(Transforms.month, "2017-12"),
(Transforms.day, "2017-12-01")])
def test_date_to_human_string(transform_gran, expected):
type_var = DateType.get()
date = Literal.of("2017-12-01").to(type_var)
assert (transform_gran(DateType.get())
.to_human_string(transform_gran(DateType.get())
.apply(date.value))) == expected
@pytest.mark.parametrize("transform_gran", [
Transforms.year,
Transforms.month,
Transforms.day])
def test_null_human_string(transform_gran):
type_var = DateType.get()
assert transform_gran(type_var).to_human_string(None) == "null"
| 36.533333
| 67
| 0.737226
|
391f400e251abaccfadc664248dfaa5037f257d0
| 5,555
|
py
|
Python
|
docs/conf.py
|
grlee77/conda-build
|
ea99e2dc2fa473039dbeb73d92b3f5d5c59548fe
|
[
"BSD-3-Clause"
] | 1
|
2019-01-15T10:51:38.000Z
|
2019-01-15T10:51:38.000Z
|
docs/conf.py
|
grlee77/conda-build
|
ea99e2dc2fa473039dbeb73d92b3f5d5c59548fe
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
grlee77/conda-build
|
ea99e2dc2fa473039dbeb73d92b3f5d5c59548fe
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
os.chdir('..')
import versioneer
version = versioneer.get_versions()['version']
os.chdir('docs')
# -- Project information -----------------------------------------------------
project = 'conda-build'
copyright = '2018, Anaconda, Inc.'
author = 'Anaconda, Inc.'
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_short_title = "Conda-build"
html_show_sourcelink = False
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'conda-builddoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'conda-build.tex', 'conda-build Documentation',
'Anaconda, Inc.', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'conda-build', 'conda-build Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'conda-build', 'conda-build Documentation',
author, 'conda-build', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
html_style = 'css/no_fixed_width.css'
| 30.355191
| 79
| 0.647705
|
0f2da30ba9cdf5ac328a8c277a70369d1ce1ad16
| 707
|
py
|
Python
|
Week5/Assignment 3.3.py
|
MXS11/Getting-Started-with-Python
|
47f1e57bae4e26874380d85b9f1956a0c6e136dd
|
[
"MIT"
] | null | null | null |
Week5/Assignment 3.3.py
|
MXS11/Getting-Started-with-Python
|
47f1e57bae4e26874380d85b9f1956a0c6e136dd
|
[
"MIT"
] | null | null | null |
Week5/Assignment 3.3.py
|
MXS11/Getting-Started-with-Python
|
47f1e57bae4e26874380d85b9f1956a0c6e136dd
|
[
"MIT"
] | null | null | null |
#3.3 Write a program to prompt for a score between 0.0 and 1.0. If the score is out of range, print an error. If the score is between 0.0 and 1.0, print a grade using the following table:
#Score Grade
#>= 0.9 A
#>= 0.8 B
#>= 0.7 C
#>= 0.6 D
#< 0.6 F
#If the user enters a value out of range, print a suitable error message and exit. For the test, enter a score of 0.85.
#Solution
score = input("Enter Score: ")
result= float(score)
if result > 1.0:
print("erorr")
quit()
elif result < 0.0:
print("erorr")
quit()
elif result >= 0.9:
print("A")
elif result >= 0.8:
print("B")
elif result >= 0.7:
print("C")
elif result >= 0.6:
print("D")
elif result < 0.6:
print("F")
| 21.424242
| 187
| 0.619519
|
44c359dd7a6561c9839f677cc7277bdce94abb90
| 2,930
|
py
|
Python
|
python-crons/lib/mongoConnector.py
|
dantunescost/antunedo
|
b66517605f1231604aba620048eaca963ac2ae2b
|
[
"MIT"
] | null | null | null |
python-crons/lib/mongoConnector.py
|
dantunescost/antunedo
|
b66517605f1231604aba620048eaca963ac2ae2b
|
[
"MIT"
] | null | null | null |
python-crons/lib/mongoConnector.py
|
dantunescost/antunedo
|
b66517605f1231604aba620048eaca963ac2ae2b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import configparser
import os
from pymongo import MongoClient
path = os.path.dirname(os.path.abspath(__file__))
config = configparser.ConfigParser()
config.read(path + '''/../config/configuration.cfg''')
def connect_to_mongodb():
client = MongoClient(
'mongodb://' + config['MONGODB']['User'] + ':' + config['MONGODB']['Password'] + '@' + config['MONGODB'][
'Host'] + ':' + config['MONGODB']['Port'] + '/admin')
return client
def pop_offers_already_saved(client, offers):
ids = []
collection = client['antunedo']['raw_data']
for i in offers:
ids.append(i["id"])
res = collection.aggregate([
{
'$match': {
'id': {'$in': ids}
}
},
{
'$project': {
'id': 1
}
},
{
'$group': {
'_id': None,
'ids': {'$push': '$id'}
}
}
])
ids_to_pop = []
for i in res:
ids_to_pop = i['ids']
return ids_to_pop
def get_last_maradona_execution(client):
collection = client['antunedo']['logs']
result = 1564358400
res = collection.aggregate([
{
'$sort': {
'start_time': -1
}
},
{
'$limit': 1
}
])
for i in res:
result = i['start_time']
return result
def update_geographical_filter_options(client, offer):
collection = client['antunedo']['geographical_filter_options']
if 'geo' in offer:
if 'country' in offer['geo']:
collection.update_one(
{'type': 'countries'},
{
'$addToSet': {
'options': {'type': 'country', 'name': offer['geo']['country'], 'country': offer['geo']['country']}
}
}
)
if 'city' in offer['geo']:
collection.update_one(
{'type': 'cities'},
{
'$addToSet': {
'options': {'type': 'city', 'name': offer['geo']['city'], 'country': offer['geo']['country']}
}
}
)
if 'completeGeoInfos' in offer and 'levels' in offer['completeGeoInfos']:
for key in offer['completeGeoInfos']['levels']:
if key in ['L4', 'L5', 'L7', 'L10']:
collection.update_one(
{'type': 'others'},
{
'$addToSet': {
'options': {
'type': key,
'name': offer['completeGeoInfos']['levels'][key],
'country': offer['geo']['country']
}
}
}
)
| 28.173077
| 123
| 0.424573
|
0f3f8dd9464949474aa1514efd1e6b3d65b2f8df
| 1,904
|
py
|
Python
|
tests/test_minting_elders.py
|
cds95/crypto-champions
|
dcb759ae7ffd0c5335d17840bb3eace59e3b1f49
|
[
"MIT"
] | 8
|
2021-03-27T00:52:51.000Z
|
2021-09-17T18:03:43.000Z
|
tests/test_minting_elders.py
|
cds95/crypto-champions
|
dcb759ae7ffd0c5335d17840bb3eace59e3b1f49
|
[
"MIT"
] | 1
|
2021-04-10T13:04:05.000Z
|
2021-04-10T13:04:05.000Z
|
tests/test_minting_elders.py
|
cds95/crypto-champions
|
dcb759ae7ffd0c5335d17840bb3eace59e3b1f49
|
[
"MIT"
] | 4
|
2021-03-31T07:37:56.000Z
|
2021-08-31T11:28:45.000Z
|
import brownie
def test_mint_elder_initial_state_variable_elders_minted(accounts, crypto_champions):
assert crypto_champions.eldersInGame() == 0
def test_mint_first_elder_variable_elders_minted(accounts, crypto_champions, mint_first_elder):
assert crypto_champions.eldersInGame() == 1
def test_mint_elder_owner_initial_state(accounts, crypto_champions):
with brownie.reverts(""):
crypto_champions.getElderOwner(1)
def test_mint_elder_no_affinity(accounts, crypto_champions):
with brownie.reverts("dev: Affinity does not exist."):
crypto_champions.mintElderSpirit(0, 0, "affinity", {"from": accounts[0], "value": crypto_champions.elderMintPrice()})
def test_mint_first_elder_owner(accounts, crypto_champions, mint_first_elder):
assert crypto_champions.getElderOwner(crypto_champions.eldersInGame() - 1) == accounts[0]
def test_mint_max_number_elders(accounts, crypto_champions, mint_max_elders):
assert crypto_champions.eldersInGame() == crypto_champions.MAX_NUMBER_OF_ELDERS()
with brownie.reverts("dev: Max number of elders already minted."):
crypto_champions.mintElderSpirit(0, 0, "affinity", {"from": accounts[0], "value": crypto_champions.elderMintPrice()})
assert crypto_champions.eldersInGame() == crypto_champions.MAX_NUMBER_OF_ELDERS()
def test_mint_elder_insufficient_funds(accounts, crypto_champions):
with brownie.reverts("dev: Insufficient payment."):
crypto_champions.mintElderSpirit(0, 0, "affinity", {"from": accounts[0], "value": crypto_champions.elderMintPrice() - 1})
def test_mint_elder_refund(accounts, crypto_champions, create_affinities):
ethSent = crypto_champions.elderMintPrice() + 1000
tx = crypto_champions.mintElderSpirit(0, 0, "ETH", {"from": accounts[0], "value": ethSent})
assert tx.internal_transfers[0]["to"] == accounts[0]
assert tx.internal_transfers[0]["value"] == 1000
| 46.439024
| 129
| 0.772059
|
95ba6c7382991345fef8a39b38566983dd4299f0
| 13,877
|
py
|
Python
|
DeepCellState.py
|
umarov90/DeepFake
|
e65c72f255817532e8a8a3afe2138ae270477601
|
[
"Apache-2.0"
] | 3
|
2021-01-28T08:08:20.000Z
|
2021-10-30T02:15:54.000Z
|
DeepCellState.py
|
umarov90/DeepCellState
|
e65c72f255817532e8a8a3afe2138ae270477601
|
[
"Apache-2.0"
] | null | null | null |
DeepCellState.py
|
umarov90/DeepCellState
|
e65c72f255817532e8a8a3afe2138ae270477601
|
[
"Apache-2.0"
] | 1
|
2022-03-09T14:56:49.000Z
|
2022-03-09T14:56:49.000Z
|
import argparse
import os
from scipy import stats
import deepfake
from figures import profiles_viz
from CellData import CellData
import numpy as np
import pandas as pd
import random
from shutil import copyfile
def get_options():
parser = argparse.ArgumentParser(description='Version: 1.0')
parser.add_argument('-O', metavar='output', default="DeepCellState_output",
help='Output directory')
parser.add_argument('-CT', metavar='cell types', default="",
type=str, help='Comma separated list of cell types to use in addition to MCF7 and PC3')
parser.add_argument('-PT', metavar='pert type', default="trt_cp",
type=str, help='Perturbation type to be used, defaults to trt_cp')
parser.add_argument('-N', metavar='number of runs', default=1,
type=int, help='Number of models trained for each fold.'
' The model with best validation performance is picked.')
parser.add_argument('-SM', metavar='special models', default=0,
type=int, help='Set to 1 to train drug MoA family models or'
' set to 2 to train external validation model.'
' Defaults to 0, i.e. 10-fold cross-validation.')
args = parser.parse_args()
return args
def test_loss(prediction, ground_truth):
return np.sqrt(np.mean((prediction - ground_truth) ** 2))
def main():
revision_hodos = False
random.seed(0)
np.random.seed(0)
args = get_options()
regul_stren = 2
if args.CT is not None and len(args.CT)>0:
regul_stren = 1
folds_folder = "../data/folds/"
if args.PT == "trt_sh":
folds_folder = "../data/folds_sh+cp/"
if args.SM == 0:
test_folds = range(1, 11)
elif args.SM == 1:
test_folds = ["antibiotics_ids", "adrenergic_ids", "cholinergic_ids",
"5-HT modulator_ids", "TKI_ids", "COX inh._ids",
"histaminergic_ids", "antipsychotic_ids", "GABAergic_ids", "dopaminergic_ids"]
else:
test_folds = ["ext_val"]
regul_stren = 3
input_size = 978
latent_dim = 128
wdir = open("data_dir").read().strip() + args.O
if not os.path.exists(wdir):
os.makedirs(wdir)
os.chdir(wdir)
# copyfile("/home/user/PycharmProjects/DeepFake/deepfake.py", "deepfake.py")
df = pd.read_csv("../data/GSE70138_Broad_LINCS_pert_info.txt", sep="\t")
good = []
tsne_perts = []
tsne_input = []
tsne_latent = []
for r, test_fold in enumerate(test_folds):
test_fold = str(test_fold)
# For Hodos with our data
# regul_stren = 1
# cell_data = CellData("../data/lincs_phase_1_2.tsv", "../Hodos/our_data/hodos_folds_our_data/" + test_fold, None, args.PT)
# For Hodos with their data
# regul_stren = 1
# cell_data = CellData("../Hodos/their_data/hodos_data_large_tensor", "../data/hodos_folds_their_data/" + test_fold, None, "trt_cp", revision=True)
# Normal run
cell_data = CellData("../data/lincs_phase_1_2.tsv", folds_folder + test_fold, "MCF7,PC3," + args.CT, args.PT)
autoencoder, cell_decoders = deepfake.get_best_autoencoder(input_size, latent_dim,
cell_data, test_fold, args.N, regul_stren)
encoder = autoencoder.get_layer("encoder")
results = {}
img_count = 0
seen_perts = []
print("Total test objects: " + str(len(cell_data.test_data)))
all_results = []
good_perts = []
test_trt = "trt_cp"
vectors = []
input_profiles = []
perts_order = []
for i in range(len(cell_data.test_data)):
if i % 100 == 0:
print(str(i) + " - ", end="", flush=True)
test_meta_object = cell_data.test_meta[i]
if test_meta_object[2] != test_trt:
continue
# if test_meta_object[0] not in ["MCF7", "PC3"]:
# continue
if revision_hodos:
if test_meta_object[1] in cell_data.meta_dictionary_pert.keys():
closest, closest_profile, mean_profile, all_profiles = cell_data.get_profile(cell_data.train_data,
cell_data.meta_dictionary_pert[
test_meta_object[1]],
test_meta_object)
elif test_meta_object[1] in cell_data.meta_dictionary_pert_val.keys():
closest, closest_profile, mean_profile, all_profiles = cell_data.get_profile(cell_data.val_data,
cell_data.meta_dictionary_pert_val[
test_meta_object[
1]],
test_meta_object)
else:
all_results.append(str(0) + ", " + str(0) + ", "
+ test_meta_object[0] + ", " + test_meta_object[1] + ", " + str(0))
continue
else:
closest, closest_profile, mean_profile, all_profiles = cell_data.get_profile(cell_data.test_data,
cell_data.meta_dictionary_pert_test[
test_meta_object[1]],
test_meta_object)
if closest_profile is None:
all_results.append(str(0) + ", " + str(0) + ", "
+ test_meta_object[0] + ", " + test_meta_object[1] + ", " + str(0))
continue
# if test_meta_object[1] in seen_perts:
# continue
seen_perts.append(test_meta_object[1])
test_profile = np.asarray([cell_data.test_data[i]])
weights = cell_decoders[cell_data.test_meta[i][0]]
autoencoder.get_layer("decoder").set_weights(weights)
decoded1 = autoencoder.predict(closest_profile)
results["count"] = results.get("count", 0) + 1
results["Our performance is: "] = results.get("Our performance is: ", 0) + test_loss(decoded1, test_profile)
results["Our correlation is: "] = results.get("Our correlation is: ", 0) + \
stats.pearsonr(decoded1.flatten(), test_profile.flatten())[0]
predictions = []
for p in all_profiles:
predictions.append(autoencoder.predict(np.asarray([p])))
special_decoded = np.mean(np.asarray(predictions), axis=0, keepdims=True)
results["Our multi-correlation is: "] = results.get("Our multi-correlation is: ", 0) + \
stats.pearsonr(special_decoded.flatten(), test_profile.flatten())[0]
results["Our multi-performance is: "] = results.get("Our multi-performance is: ", 0) + \
test_loss(special_decoded, test_profile)
decoded1 = autoencoder.predict(mean_profile)
results["Our performance is (mean profile): "] = results.get("Our performance is (mean profile): ",
0) + test_loss(decoded1, test_profile)
results["Our correlation (mean profile): "] = results.get("Our correlation (mean profile): ", 0) + \
stats.pearsonr(decoded1.flatten(), test_profile.flatten())[0]
results["Baseline correlation (mean profile): "] = results.get("Baseline correlation (mean profile): ", 0) + \
stats.pearsonr(mean_profile.flatten(),
test_profile.flatten())[0]
results["Baseline performance (mean profile): "] = results.get("Baseline performance (mean profile): ", 0) + \
test_loss(mean_profile, test_profile)
all_results.append(str(stats.pearsonr(special_decoded.flatten(), test_profile.flatten())[0]) + ", " +
str(stats.pearsonr(mean_profile.flatten(), test_profile.flatten())[0]) + ", "
+ test_meta_object[0] + ", " + test_meta_object[1] + ", " + str(len(all_profiles)))
results["closest profile: "] = results.get("closest profile: ", 0) + test_loss(closest_profile, test_profile)
results["closest profile correlation is: "] = results.get("closest profile correlation is: ", 0) + \
stats.pearsonr(closest_profile.flatten(), test_profile.flatten())[
0]
# bp = stats.pearsonr(mean_profile.flatten(), test_profile.flatten())[0]
# dp = stats.pearsonr(special_decoded.flatten(), test_profile.flatten())[0]
# if dp > 0.4: # and bp < 0.5
# os.makedirs("profiles", exist_ok=True)
# pname = profiles_viz.fix(df.query('pert_id=="' + str(test_meta_object[1]) + '"')["pert_iname"].tolist()[0])
# profiles_viz.draw_profiles(test_profile, special_decoded, closest_profile, pname,
# input_size, "profiles/" + cell_data.test_meta[i][0] + "_" + str(i)
# + "_" + str(dp) + "_" + str(bp) + "_" + pname + ".svg")
# profiles_viz.draw_scatter_profiles(test_profile, special_decoded, closest_profile, pname,
# "profiles/" + cell_data.test_meta[i][0] + "_" + str(i)
# + "_" + str(dp) + "_" + str(bp) + "_" +
# pname + "_scatter.svg")
# tsne_perts.append(["PC3" if test_meta_object[0] == "MCF7" else "MCF7",
# df.query('pert_id=="' + str(test_meta_object[1]) + '"')["pert_iname"].tolist()[0]])
# tsne_input.append(closest_profile.flatten())
# tsne_latent.append(encoder.predict(closest_profile).flatten())
# if test_meta_object[0] == "MCF7":
# good_perts.append([test_meta_object[1], bp])
# np.savetxt("../figures_data/tsne_perts.csv", np.array(tsne_perts), delimiter=',', fmt="%s")
# np.savetxt("../figures_data/tsne_input.csv", np.array(tsne_input), delimiter=',')
# np.savetxt("../figures_data/tsne_latent.csv", np.array(tsne_latent), delimiter=',')
# good_perts.sort(key=lambda x: x[1], reverse=True)
# matrix = np.zeros((len(good_perts), len(good_perts)))
# for i in range(len(good_perts)):
# for j in range(len(good_perts)):
# a = cell_data.get_profile_cell_pert(cell_data.test_data, cell_data.test_meta, "MCF7",
# good_perts[i][0])
# b = cell_data.get_profile_cell_pert(cell_data.test_data, cell_data.test_meta, "PC3",
# good_perts[j][0])
# if a is None or b is None:
# continue
# vector1 = encoder.predict(np.asarray(a))
# vector2 = encoder.predict(np.asarray(b))
# vpcc = stats.pearsonr(vector1.flatten(), vector2.flatten())[0]
# matrix[i][j] = vpcc
# for i in range(len(good_perts)):
# good_perts[i] = df.query('pert_id=="'+str(good_perts[i][0]) + '"')["pert_iname"].tolist()[0]
# df1 = pd.DataFrame(data=matrix, index=good_perts, columns=good_perts)
# df1.to_pickle("../figures_data/latent.p")
print(" Done")
with open("log.txt", 'a+') as f:
for key, value in results.items():
if key == "count":
continue
f.write(key + str(value / results["count"]))
f.write("\n")
performance = str(results["Our performance is: "] / results["count"]) + "\t" + \
str(results["Our correlation is: "] / results["count"]) + "\t" + \
str(results["Our multi-performance is: "] / results["count"]) + "\t" + \
str(results["Our multi-correlation is: "] / results["count"]) + "\t" + \
str(results["closest profile: "] / results["count"]) + "\t" + \
str(results["closest profile correlation is: "] / results["count"]) + "\t" + \
str(results["Baseline correlation (mean profile): "] / results["count"]) + "\t" + \
str(results["Baseline performance (mean profile): "] / results["count"])
with open("final_result.tsv", 'a+') as f:
f.write(str(latent_dim) + "\t" + performance) # str(tr_size) + "\t" +
f.write("\n")
with open("all_results_2", 'a+') as f:
f.write("\n".join(all_results))
f.write("\n")
if __name__ == '__main__':
main()
| 56.640816
| 155
| 0.506594
|
62dd0caf71ae39a9a21f5bbced0c22c8a7504ad5
| 343
|
py
|
Python
|
asper/admin.py
|
anshumanprajapatiap/thasperteam
|
4593584dd8560dbf1e8ac39c8b4135234a87d9f6
|
[
"MIT"
] | 1
|
2020-08-03T15:18:37.000Z
|
2020-08-03T15:18:37.000Z
|
asper/admin.py
|
anshumanprajapatiap/thasperteam
|
4593584dd8560dbf1e8ac39c8b4135234a87d9f6
|
[
"MIT"
] | null | null | null |
asper/admin.py
|
anshumanprajapatiap/thasperteam
|
4593584dd8560dbf1e8ac39c8b4135234a87d9f6
|
[
"MIT"
] | 2
|
2020-05-13T21:39:50.000Z
|
2020-05-14T07:51:13.000Z
|
from django.contrib import admin
from .models import *
from import_export.admin import ImportExportModelAdmin
# Register your models here.
@admin.register(UserDetail, FirstTask,Project, DesignAssignment, DesignSubmit, WebAssignment, WebSubmit,AppAssignment, AppSubmit, UserSkills, UserPort)
class ViewAdmin(ImportExportModelAdmin):
pass
| 34.3
| 151
| 0.830904
|
f1d5113661398df8973ddeed52223c377910df9e
| 1,319
|
py
|
Python
|
nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py
|
PAmcconnell/nipype
|
39fbd5411a844ce7c023964d3295eb7643b95af5
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py
|
PAmcconnell/nipype
|
39fbd5411a844ce7c023964d3295eb7643b95af5
|
[
"Apache-2.0"
] | 2
|
2018-04-26T12:09:32.000Z
|
2018-04-27T06:36:49.000Z
|
nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py
|
PAmcconnell/nipype
|
39fbd5411a844ce7c023964d3295eb7643b95af5
|
[
"Apache-2.0"
] | 1
|
2019-11-14T14:16:57.000Z
|
2019-11-14T14:16:57.000Z
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..epi import EddyCorrect
def test_EddyCorrect_inputs():
input_map = dict(
args=dict(argstr='%s', ),
environ=dict(
nohash=True,
usedefault=True,
),
in_file=dict(
argstr='%s',
extensions=None,
mandatory=True,
position=0,
),
out_file=dict(
argstr='%s',
extensions=None,
name_source=['in_file'],
name_template='%s_edc',
output_name='eddy_corrected',
position=1,
),
output_type=dict(),
ref_num=dict(
argstr='%d',
mandatory=True,
position=2,
usedefault=True,
),
)
inputs = EddyCorrect.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_EddyCorrect_outputs():
output_map = dict(eddy_corrected=dict(extensions=None, ), )
outputs = EddyCorrect.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 28.673913
| 67
| 0.551175
|
88f776c99b006341ee07b28705c308b119eb4048
| 8,281
|
py
|
Python
|
NMTK_apps/NMTK_server/validation/tool_config_validator.py
|
jrawbits/nmtk-1
|
759781770b5f2464008ceb5376fd3922b1b877fa
|
[
"Unlicense"
] | null | null | null |
NMTK_apps/NMTK_server/validation/tool_config_validator.py
|
jrawbits/nmtk-1
|
759781770b5f2464008ceb5376fd3922b1b877fa
|
[
"Unlicense"
] | null | null | null |
NMTK_apps/NMTK_server/validation/tool_config_validator.py
|
jrawbits/nmtk-1
|
759781770b5f2464008ceb5376fd3922b1b877fa
|
[
"Unlicense"
] | null | null | null |
import json
from NMTK_server import models
import decimal
import collections
import logging
logger = logging.getLogger(__name__)
class ToolConfigValidator(object):
def __init__(self, job, file_config, tool_config):
self.job = job
self.file_config = file_config
self.tool_config = tool_config
self.errors = None
def genToolConfig(self, force=False):
'''
The validation process "fills in" empty spaces with hidden and/or default
settings, here we return it.
'''
if not (force or self.is_valid()):
return None
if not hasattr(self, '_tool_config'):
self._tool_config = self.tool_config
return self._tool_config
def validate(
self,
type,
v,
required,
readonly,
default,
fields=[],
validation={}):
'''
Handle all the validation criteria for the types here.
'''
error = None
if type == 'property':
if not required and not v:
pass
elif v not in fields:
error = 'Please choose from the available fields'
elif readonly:
if v != default:
error = 'Readonly field must have a value of {0}'.format(
default)
elif type == 'numeric':
if required and not v:
error = 'Please enter a valid numeric value'
elif v:
try:
v = decimal.Decimal(v)
error = self.range_check(v, validation)
except:
error = 'Please enter a valid numeric value'
elif type == 'integer':
if required and not v:
error = 'Please enter a valid integer value'
elif v:
try:
if str(v).isdigit():
v = int(v)
error = self.range_check(v, validation)
else:
error = 'Please enter a valid integer value'
except:
error = 'Please enter a valid integer value'
elif type == 'string':
if required and not v:
error = 'Please enter a valid string value'
return error
def range_check(self, v, validation):
max = validation.get('maximum', None)
min = validation.get('minimum', None)
try:
if min:
min = decimal.Decimal(min)
if max:
max = decimal.Decimal(max)
if min and max and (min > v or max < v):
return 'Value must be between {0} and {1}'.format(min, max)
if min and min > v:
return 'Value must be less than {0}'.format(min)
if max and max < v:
return 'Value must be greater than {0}'.format(max)
return None
except:
return 'Tool validation criteria is invalid (contact tool developer)'
def is_valid(self):
self.file_config_parsed = {}
# Convert the URI into proper files objects, otherwise we won't be
# able to get things like the list of valid fields from the namespace
# entries.
logger.debug('File config is %s', self.file_config)
for namespace, uri in self.file_config.iteritems():
if len(uri) > 0:
try:
id = uri.rsplit('/', 2)[-2]
self.file_config_parsed[
namespace] = models.DataFile.objects.get(pk=id)
except:
pass
# Get the configuration for this particular tool.
# config from the user is of the form {'namespace':{'field_name':
# {type: type, value: value} } }
tool_config = self.job.tool.toolconfig.json_config
errors = collections.defaultdict(dict)
for entry in ['input', 'output']:
for config_entry in tool_config.get(entry, []):
namespace_config = self.tool_config.get(
config_entry['namespace'], {})
for item in config_entry.get(
'elements',
[]): # each item here will have name, required, and type keys
# So now we have type and value keys
data = namespace_config.get(item['name'], None)
validate_kwargs = {}
# Verify that the required fields are provided
if data is None:
# if the user didn't provide the config for an item,
# but there is a default, then use the default.
# This would also work for hidden stuff.
if item.get(
'default',
None) and not item.get(
'required',
True):
namespace_config[item['name']] = {
'type': item['type'], 'value': item['default']}
if id(self.tool_config) != id(namepsace_config):
self.tool_config[
config_entry['namespace']] = namespace_config
# if it is a required field, but there is no default, then
# we will give them an error message.
elif item.get('required', True):
errors[
config_entry['namespace']][
item['name']] = 'This field is required'
continue
else:
allowed_types = []
validate_kwargs['validation'] = item.get(
'validation', {})
if config_entry['type'].lower() == 'file':
# Get the list of allowable file fields.
validate_kwargs['fields'] = getattr(
self.file_config_parsed.get(
config_entry['namespace']), 'fields', [])
if item['type'] != 'property':
allowed_types.append('property')
allowed_types.append(item['type'])
data_type = data.get('type', None)
value = data.get('value', None)
if not data_type:
errors[
config_entry['namespace']][
item['name']] = 'A valid type must be specified with the config'
continue
else:
error = self.validate(data_type, value,
item.get('required', True),
item.get('readonly', False),
item.get('default', None),
**validate_kwargs)
if error:
errors[
config_entry['namespace']][
item['name']] = error
if len(errors): # Only return errors dict if there are errors.
self.errors = errors
return False
return True
def genJobFiles(self, force=False):
'''
Here we make a list of
'''
if not hasattr(self, '_job_files'):
self._job_files = []
if force or self.is_valid():
logger.debug(
'File config parsed is %s',
self.file_config_parsed)
for namespace, datafile in self.file_config_parsed.iteritems():
self._job_files.append(models.JobFile(job=self.job,
datafile=datafile,
namespace=namespace))
return self._job_files
| 42.466667
| 92
| 0.457433
|
5595ac0362ead28f6b05314134705270096c7ea7
| 360
|
py
|
Python
|
compute.py
|
pltrdy/krypton-mining-calculator
|
97f8417e7b911e68105e0333a69d5e31258cf523
|
[
"MIT"
] | null | null | null |
compute.py
|
pltrdy/krypton-mining-calculator
|
97f8417e7b911e68105e0333a69d5e31258cf523
|
[
"MIT"
] | null | null | null |
compute.py
|
pltrdy/krypton-mining-calculator
|
97f8417e7b911e68105e0333a69d5e31258cf523
|
[
"MIT"
] | null | null | null |
import requests
import json
import sys
import time
from api import *
blockTime = get_block_time()
difficulty = get_difficulty()
usd_price = get_usd_price()
data = {
'blockTime': blockTime,
'difficulty': difficulty,
'priceUsd': usd_price,
'lastUpdate': time.time(),
}
file(sys.argv[1], 'w').write('ethereumStats = ' + json.dumps(data) + ';')
| 18.947368
| 73
| 0.683333
|
31871a62a1380485037700c4cdfee9b281b23131
| 1,126
|
py
|
Python
|
src/lvgl/scripts/lv_conf_checker.py
|
sparquay/TTGO_TWatch_Library
|
4f37e5f31f3d02f436183590a056f90a221721b6
|
[
"MIT"
] | 1
|
2020-04-15T15:04:24.000Z
|
2020-04-15T15:04:24.000Z
|
lvgl/scripts/lv_conf_checker.py
|
jc1995wade/mx287_littlevGL
|
e32bb55260dfb5ca87b0a47ffcc19d9de6685c64
|
[
"MIT"
] | null | null | null |
lvgl/scripts/lv_conf_checker.py
|
jc1995wade/mx287_littlevGL
|
e32bb55260dfb5ca87b0a47ffcc19d9de6685c64
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3.6
'''
Generates a checker file for lv_conf.h from lv_conf_templ.h define all the not defined values
'''
import re
fin = open("../lv_conf_template.h", "r")
fout = open("../src/lv_conf_checker.h", "w")
fout.write(
'''/**
* GENERATED FILE, DO NOT EDIT IT!
* @file lv_conf_checker.h
* Make sure all the defines of lv_conf.h have a default value
**/
#ifndef LV_CONF_CHECKER_H
#define LV_CONF_CHECKER_H
'''
)
started = 0
for i in fin.read().splitlines():
if not started:
if '#define LV_CONF_H' in i:
started = 1
continue
else:
continue
if '/*--END OF LV_CONF_H--*/' in i: break
r = re.search(r'^ *# *define ([^\s]+).*$', i)
if r:
line = re.sub('\(.*?\)', '', r[1], 1) #remove parentheses from macros
fout.write(
f'#ifndef {line}\n'
f'{i}\n'
'#endif\n'
)
elif re.search('^ *typedef .*;.*$', i):
continue #ignore typedefs to avoide redeclaration
else:
fout.write(f'{i}\n')
fout.write(
'''
#endif /*LV_CONF_CHECKER_H*/
'''
)
fin.close()
fout.close()
| 18.459016
| 94
| 0.563943
|
a90e1741bacbbb6aab236e1e5b8decab84f67a7e
| 2,513
|
py
|
Python
|
Adafree_simpletest.py
|
Richard-Kirby/oscope
|
9f0b849176cafdf805c0f080ed54f87f3c39c54b
|
[
"MIT"
] | null | null | null |
Adafree_simpletest.py
|
Richard-Kirby/oscope
|
9f0b849176cafdf805c0f080ed54f87f3c39c54b
|
[
"MIT"
] | null | null | null |
Adafree_simpletest.py
|
Richard-Kirby/oscope
|
9f0b849176cafdf805c0f080ed54f87f3c39c54b
|
[
"MIT"
] | null | null | null |
# Simple example of reading the MCP3008 analog input channels and printing
# them all out.
# Author: Tony DiCola
# License: Public Domain
import time
import spidev
import time
import os
import csv
# Open SPI bus
spi = spidev.SpiDev()
spi.open(0, 0)
spi.max_speed_hz = 1250000
# Function to read SPI data from MCP3008 chip
# Channel must be an integer 0-7
def ReadChannel(channel):
adc = spi.xfer2([1, (8 + channel) << 4, 0])
data = ((adc[1] & 3) << 8) + adc[2]
return data
# Software SPI configuration:
#CLK = 18
#MISO = 23
#MOSI = 24
#CS = 25
#mcp = Adafruit_MCP3008.MCP3008(clk=CLK, cs=CS, miso=MISO, mosi=MOSI)
# Hardware SPI configuration:
#SPI_PORT = 0
#SPI_DEVICE = 0
#mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
print('Reading MCP3008 values, press Ctrl-C to quit...')
# Print nice channel column headers.
print('| {0:>4} | {1:>4} | {2:>4} | {3:>4} | {4:>4} | {5:>4} | {6:>4} | {7:>4} |'.format(*range(8)))
print('-' * 57)
sample_count = 0
samples = []
# Main program loop.
try:
while True:
# Read all the ADC channel values in a list.
values = [0]*8
#for i in range(8):
# # The read_adc function will get the value of the specified channel (0-7).
# values[i] = float(mcp.read_adc(i)/1024 *3.3)
values[0] = float(ReadChannel(0)/1024 *3.3)
values[1] = float(ReadChannel(1)/1024 *3.3)
#print (values[0], values[1])
#print(values[0])
samples.append((time.time(), values[0], values[1]))
sample_count +=1
# print ("{},{:.3f},{:.3f}" .format(time.time(), values[6], values[7]))
# Print the ADC values.
#print('| {0:>4} | {1:>4} | {2:>4} | {3:>4} | {4:>4} | {5:>4} | {6:>4} | {7:>4} |'.format(*values))
# Pause for half a second.
#time.sleep(0.5)
except KeyboardInterrupt:
spi.close()
print(sample_count)
first = samples.pop(0)
last = samples.pop()
duration = last[0]-first[0]
samples_per_sec = float(sample_count/duration)
print ("First {}, Last {}". format(samples.pop(0), samples.pop()))
print ("Samples per sec {}" .format(samples_per_sec))
with open('/home/pi/oscope/oscope.csv', 'w', newline='') as resultfile:
sample_num = 0
for sample in samples:
str = "{}, {:.3f}, {:.3f}\n".format(sample_num, sample[1], sample[2])
resultfile.write(str)
sample_num +=1
#for sample in samples:
# print (sample)
| 24.163462
| 107
| 0.588938
|
a0099c35b1e2ce61075a8b4a947d454ce9daf539
| 493
|
py
|
Python
|
application/__init__.py
|
numkem/pycollect-web
|
1513d6033d7a865035c70e417f1dfcdf30454892
|
[
"MIT"
] | null | null | null |
application/__init__.py
|
numkem/pycollect-web
|
1513d6033d7a865035c70e417f1dfcdf30454892
|
[
"MIT"
] | null | null | null |
application/__init__.py
|
numkem/pycollect-web
|
1513d6033d7a865035c70e417f1dfcdf30454892
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask.ext.bower import Bower
from flask.ext.pymongo import PyMongo
from flask.ext.login import LoginManager
from flask.ext.bootstrap import Bootstrap
from flask.ext.debugtoolbar import DebugToolbarExtension
import logging
app = Flask('application')
app.config.from_pyfile('../etc/config.cfg')
if app.config['DEBUG']:
toolbar = DebugToolbarExtension(app)
Bower(app)
Bootstrap(app)
mongo = PyMongo(app)
from filters import *
from application.views import *
| 22.409091
| 56
| 0.793103
|
bbaddb91930814906346f9aff0e92ca1c513abc4
| 727
|
py
|
Python
|
cibyl/exceptions/jenkins_job_builder.py
|
snippergoals/cibyl
|
1721450532593c86acaf3aff3c5a60d48213d38b
|
[
"Apache-2.0"
] | null | null | null |
cibyl/exceptions/jenkins_job_builder.py
|
snippergoals/cibyl
|
1721450532593c86acaf3aff3c5a60d48213d38b
|
[
"Apache-2.0"
] | null | null | null |
cibyl/exceptions/jenkins_job_builder.py
|
snippergoals/cibyl
|
1721450532593c86acaf3aff3c5a60d48213d38b
|
[
"Apache-2.0"
] | null | null | null |
"""
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
class JenkinsJobBuilderError(Exception):
"""Represents an error occurring while querying JenkinsJobBuilder."""
| 36.35
| 78
| 0.724897
|
4e5ed334cfdad015ad168b7a5686c8c3632f4677
| 587
|
py
|
Python
|
examples/022_robot_from_github.py
|
weichengyuan1226/MAS-2021_compas_fab_intro
|
55e82fa70aea0c4b6cf5072505f531ea7967f7f3
|
[
"MIT"
] | 64
|
2019-08-07T07:19:06.000Z
|
2022-03-22T16:48:23.000Z
|
examples/022_robot_from_github.py
|
weichengyuan1226/MAS-2021_compas_fab_intro
|
55e82fa70aea0c4b6cf5072505f531ea7967f7f3
|
[
"MIT"
] | 228
|
2019-07-08T07:55:30.000Z
|
2022-03-25T16:39:17.000Z
|
examples/022_robot_from_github.py
|
weichengyuan1226/MAS-2021_compas_fab_intro
|
55e82fa70aea0c4b6cf5072505f531ea7967f7f3
|
[
"MIT"
] | 18
|
2019-08-04T16:42:37.000Z
|
2022-01-12T18:36:06.000Z
|
import compas
from compas.robots import GithubPackageMeshLoader
from compas.robots import RobotModel
# Set high precision to import meshes defined in meters
compas.PRECISION = '12f'
# Select Github repository, package and branch where the model is stored
repository = 'ros-industrial/abb'
package = 'abb_irb6600_support'
branch = 'kinetic-devel'
github = GithubPackageMeshLoader(repository, package, branch)
urdf = github.load_urdf('irb6640.urdf')
# Create robot model from URDF
model = RobotModel.from_urdf_file(urdf)
# Also load geometry
model.load_geometry(github)
print(model)
| 25.521739
| 72
| 0.800681
|
a1320dc343ecd487fcd5a8118a2ae86ec99572d8
| 2,112
|
py
|
Python
|
diofant/tests/series/test_residues.py
|
project-kotinos/diofant___diofant
|
882549ac3a4dac238695aa620c02fce6ca33f9d3
|
[
"BSD-3-Clause"
] | 1
|
2021-08-22T09:34:15.000Z
|
2021-08-22T09:34:15.000Z
|
diofant/tests/series/test_residues.py
|
project-kotinos/diofant___diofant
|
882549ac3a4dac238695aa620c02fce6ca33f9d3
|
[
"BSD-3-Clause"
] | null | null | null |
diofant/tests/series/test_residues.py
|
project-kotinos/diofant___diofant
|
882549ac3a4dac238695aa620c02fce6ca33f9d3
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from diofant import (Function, I, Rational, Symbol, exp, factorial, log, pi,
residue, root, sin)
from diofant.abc import a, s, x, z
__all__ = ()
def test_basic1():
assert residue(1/x, x, 0) == 1
assert residue(-2/x, x, 0) == -2
assert residue(81/x, x, 0) == 81
assert residue(1/x**2, x, 0) == 0
assert residue(0, x, 0) == 0
assert residue(5, x, 0) == 0
assert residue(x, x, 0) == 0
assert residue(x**2, x, 0) == 0
def test_basic2():
assert residue(1/x, x, 1) == 0
assert residue(-2/x, x, 1) == 0
assert residue(81/x, x, -1) == 0
assert residue(1/x**2, x, 1) == 0
assert residue(0, x, 1) == 0
assert residue(5, x, 1) == 0
assert residue(x, x, 1) == 0
assert residue(x**2, x, 5) == 0
def test_f():
f = Function("f")
assert residue(f(x)/x**5, x, 0) == f(x).diff(x, 4).subs({x: 0})/24
def test_functions():
assert residue(1/sin(x), x, 0) == 1
assert residue(2/sin(x), x, 0) == 2
assert residue(1/sin(x)**2, x, 0) == 0
assert residue(1/sin(x)**5, x, 0) == Rational(3, 8)
def test_expressions():
assert residue(1/(x + 1), x, 0) == 0
assert residue(1/(x + 1), x, -1) == 1
assert residue(1/(x**2 + 1), x, -1) == 0
assert residue(1/(x**2 + 1), x, I) == -I/2
assert residue(1/(x**2 + 1), x, -I) == I/2
assert residue(1/(x**4 + 1), x, 0) == 0
assert residue(1/(x**4 + 1), x, exp(I*pi/4)) == -root(-1, 4)/4
assert residue(1/(x**2 + a**2)**2, x, a*I) == -I/4/a**3
@pytest.mark.xfail
def test_expressions_failing():
n = Symbol('n', integer=True, positive=True)
assert residue(exp(z)/(z - pi*I/4*a)**n, z, I*pi*a) == \
exp(I*pi*a/4)/factorial(n - 1)
def test_NotImplemented():
pytest.raises(NotImplementedError, lambda: residue(exp(1/z), z, 0))
def test_bug():
assert residue(2**(z)*(s + z)*(1 - s - z)/z**2, z, 0) == \
1 + s*log(2) - s**2*log(2) - 2*s
def test_sympyissue_5654():
assert residue(1/(x**2 + a**2)**2, x, a*I) == -I/(4*a**3)
def test_sympyissue_6499():
assert residue(1/(exp(z) - 1), z, 0) == 1
| 27.076923
| 76
| 0.534564
|
0b7dfde95d8ed998fc099b417bb073ca0e14f5b2
| 997
|
py
|
Python
|
benchmark/tests/test_18.py
|
trxw/qutip
|
b923c973edd9a071d86eb849650661549f73585f
|
[
"BSD-3-Clause"
] | 1
|
2015-11-06T06:35:06.000Z
|
2015-11-06T06:35:06.000Z
|
benchmark/tests/test_18.py
|
trxw/qutip
|
b923c973edd9a071d86eb849650661549f73585f
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/tests/test_18.py
|
trxw/qutip
|
b923c973edd9a071d86eb849650661549f73585f
|
[
"BSD-3-Clause"
] | null | null | null |
from qutip import *
from numpy import *
from time import time
def test_18(runs=1):
"""
dissipative trilinear hamiltonian
"""
test_name='trilinear MC_F90 [3375]'
N0=15
N1=15
N2=15
gamma0=0.01
gamma1=0.05
gamma2=0.05
alpha=2
tlist=linspace(0,5,200)
a0=tensor(destroy(N0),qeye(N1),qeye(N2));
a1=tensor(qeye(N0),destroy(N1),qeye(N2));
a2=tensor(qeye(N0),qeye(N1),destroy(N2));
num0=a0.dag()*a0
num1=a1.dag()*a1
num2=a2.dag()*a2
C0=sqrt(2*gamma0)*a0
C1=sqrt(2*gamma1)*a1
C2=sqrt(2*gamma2)*a2
H=1j*(a0*a1.dag()*a2.dag()-a0.dag()*a1*a2)
psi0=tensor([coherent(N0,alpha),basis(N1,0),basis(N2,0)])
opts=Odeoptions(gui=False)
tot_elapsed = 0
for n in range(runs):
tic=time()
mcsolve_f90(H, psi0, tlist, [C0,C1,C2],[num0,num1,num2],options=opts)
toc=time()
tot_elapsed += toc - tic
return [test_name], [tot_elapsed / runs]
if __name__=='__main__':
test_18()
| 23.186047
| 77
| 0.601805
|
e4cb2e216f3027a3103fb2376b186a7f9c2f4df5
| 3,068
|
py
|
Python
|
improvement_over_1m/efficiency_exp_its_the_end.py
|
dalakada/TwiCSv2
|
40672a99a201f6e2aab9dd085e1f4a29e8253f3b
|
[
"MIT"
] | 2
|
2019-04-01T00:54:39.000Z
|
2021-06-22T18:02:47.000Z
|
improvement_over_1m/efficiency_exp_its_the_end.py
|
dalakada/TwiCSv2
|
40672a99a201f6e2aab9dd085e1f4a29e8253f3b
|
[
"MIT"
] | null | null | null |
improvement_over_1m/efficiency_exp_its_the_end.py
|
dalakada/TwiCSv2
|
40672a99a201f6e2aab9dd085e1f4a29e8253f3b
|
[
"MIT"
] | 2
|
2018-06-20T14:50:03.000Z
|
2020-08-27T01:55:34.000Z
|
#import SatadishaModule as phase1
import SatadishaModule_final_trie as phase1
import phase2_Trie as phase2
import datetime
from threading import Thread
import random
import math
from queue import Queue
import pandas as pd
import warnings
import numpy as np
import time
import trie as trie
import pickle
import matplotlib.pyplot as plt
import copy
import SVM as svm
import matplotlib.ticker as ticker
import scipy.optimize
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.style
def p1_f(x,p1):
return p1[x]
def p2_f(x,p2):
return p2[x]
warnings.filterwarnings("ignore")
thread_processed=0
stream_count=0
queue = Queue(1000)
#time_in=datetime.datetime.now()
#time_out=datetime.datetime.now()
fieldnames=['candidate','freq','length','cap','start_of_sen','abbrv','all_cap','is_csl','title','has_no','date','is_apostrp','has_inter_punct','ends_verb','ends_adverb','change_in_cap','topic_ind','entry_time','entry_batch','@mention']
global total_time
total_time=0
Phase1= phase1.SatadishaModule()
Phase2 = phase2.EntityResolver()
tweets=pd.read_csv("new_set4.csv",sep =',')
tweets=tweets[:50000:]
# tweets = tweets.sample(frac=1).reset_index(drop=True)
# annotated_tweets=pd.read_csv("political_annotated.csv",sep =',')
# tweets=tweets[:1000:]
print('Tweets are in memory...')
batch_size=250000
print(len(tweets))
length=len(tweets)
# Z_scores=[-1.0,-0.9,-0.8,-0.7,-0.6,-0.5,-0.4,-0.3,-0.2,-0.1,0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]
#
Phase1= phase1.SatadishaModule()
Phase2 = phase2.EntityResolver()
execution_time_list=[]
accuracy_list=[]
tp_count=[]
eviction_parameter_recorder=[]
whole_level=[]
val=math.ceil(length/batch_size)
for i in range(val):
print(i)
print("anani siki2m")
# val =3
my_classifier= svm.SVM1('training.csv')
#last one is the without eviction, that why i added one more.
#look the defter notes to see mapping.
eviction_parameter=1
eviction_parameter_recorder.append(eviction_parameter)
Phase1= phase1.SatadishaModule()
Phase2 = phase2.EntityResolver()
total_time=0
execution_time_list=[]
tweets_been_processed_list=[]
tweets_been_processed=0
total_mentions_discovered=[]
level_holder=[]
# annotated_tweet_evenly_partitioned_list=np.array_split(annotated_tweets, val)
# for g, tweet_batch in tweets.groupby(np.arange(length) //batch_size):
tuple_of= Phase1.extract(tweets,0)
tweet_base=tuple_of[0]
candidate_base=tuple_of[1]
phase2stopwordList=tuple_of[4]
elapsedTime= tuple_of[3] - tuple_of[2]
total_time+=elapsedTime
print(elapsedTime,total_time)
print (0,' ', 'Produced')
print("**********************************************************")
tweets_been_processed=tweets_been_processed+len(tweet_base)
tweets_been_processed_list.append(tweets_been_processed)
time_in,time_out,total_mentions=Phase2.executor(tweet_base,candidate_base,phase2stopwordList,-0.5,eviction_parameter,my_classifier)
elapsedTime= time_out-time_in
total_time+=elapsedTime
execution_time_list.append(total_time)
total_mentions_discovered.append(total_mentions)
converted=Phase2.convertedd_ones()
print(converted)
| 25.566667
| 235
| 0.771186
|
e0ae48767c961007f511a77e018b364c76f5b7ba
| 8,218
|
py
|
Python
|
pyzo/codeeditor/style.py
|
l33tlinuxh4x0r/pyzo
|
1e5e1cea6345dc8f78ab93321a7f9a01f8101be0
|
[
"BSD-2-Clause"
] | 235
|
2016-03-05T17:12:12.000Z
|
2022-03-22T06:35:45.000Z
|
pyzo/codeeditor/style.py
|
l33tlinuxh4x0r/pyzo
|
1e5e1cea6345dc8f78ab93321a7f9a01f8101be0
|
[
"BSD-2-Clause"
] | 423
|
2016-02-15T20:23:46.000Z
|
2022-03-26T16:36:37.000Z
|
pyzo/codeeditor/style.py
|
l33tlinuxh4x0r/pyzo
|
1e5e1cea6345dc8f78ab93321a7f9a01f8101be0
|
[
"BSD-2-Clause"
] | 115
|
2016-04-01T14:31:33.000Z
|
2022-03-17T10:59:45.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2013, the codeeditor development team
#
# Pyzo is distributed under the terms of the 2-Clause BSD License.
# The full license can be found in 'license.txt'.
""" Modyule style
Provides basic functionaliy for styling.
Styling is done using a dictionary of StyleFormat instances. Each
such instance reprsents a certain element being styled (e.g. keywords,
line numbers, indentation guides).
All possible style elements are represented using StyleElementDescription
instances. These have a name, description and default format, which
makes it easy to build a UI to allow the user to change the syle.
"""
from .qt import QtGui, QtCore
Qt = QtCore.Qt
class StyleElementDescription:
"""StyleElementDescription(name, defaultFormat, description)
Describes a style element by its name, default format, and description.
A style description is a simple placeholder for something
that can be styled.
"""
def __init__(self, name, description, defaultFormat):
self._name = name
self._description = description
self._defaultFormat = StyleFormat(defaultFormat)
def __repr__(self):
return '<"%s": "%s">' % (self.name, self.defaultFormat)
@property
def name(self):
return self._name
@property
def key(self):
return self._name.replace(" ", "").lower()
@property
def description(self):
return self._description
@property
def defaultFormat(self):
return self._defaultFormat
class StyleFormat:
"""StyleFormat(format='')
Represents the style format for a specific style element.
A "style" is a dictionary that maps names (of style elements)
to StyleFormat instances.
The given format can be a string or another StyleFormat instance.
Style formats can be combined using their update() method.
A style format consists of multiple parts, where each "part" consists
of a key and a value. The keys can be anything, depending
on what kind of thing is being styled. The value can be obtained using
the index operator (e.g. styleFomat['fore'])
For a few special keys, properties are defined that return the Qt object
corresponding to the value. These values are also buffered to enable
fast access. These keys are:
* fore: (QColor) the foreground color
* back: (QColor) the background color
* bold: (bool) whether the text should be bold
* italic: (bool) whether the text should be in italic
* underline: (int) whether an underline should be used (and which one)
* linestyle: (int) what line style to use (e.g. for indent guides)
* textCharFOrmat: (QTextCharFormat) for the syntax styles
The format neglects spaces and case. Parts are separated by commas
or semicolons. If only a key is given it's value is interpreted
as 'yes'. If only a color is given, its key is interpreted as 'fore'
and back. Colors should be given using the '#' hex formatting.
An example format string: 'fore:#334, bold, underline:dotLine'
By calling str(styleFormatInstance) the string representing of the
format can be obtained. By iterating over the instance, a series
of key-value pairs is obtained.
"""
def __init__(self, format=""):
self._parts = {}
self.update(format)
def _resetProperties(self):
self._fore = None
self._back = None
self._bold = None
self._italic = None
self._underline = None
self._linestyle = None
self._textCharFormat = None
def __str__(self):
"""Get a (cleaned up) string representation of this style format."""
parts = []
for key in self._parts:
parts.append("%s:%s" % (key, self._parts[key]))
return ", ".join(parts)
def __repr__(self):
return '<StyleFormat "%s">' % str(self)
def __getitem__(self, key):
try:
return self._parts[key]
except KeyError:
raise KeyError("Invalid part key " + key + " for style format.")
def __iter__(self):
"""Yields a series of tuples (key, val)."""
parts = []
for key in self._parts:
parts.append((key, self._parts[key]))
return parts.__iter__()
def update(self, format):
"""update(format)
Update this style format with the given format.
"""
# Reset buffered values
self._resetProperties()
# Make a string, so we update the format with the given one
if isinstance(format, StyleFormat):
format = str(format)
# Split on ',' and ',', ignore spaces
styleParts = [p for p in format.replace("=", ":").replace(";", ",").split(",")]
for stylePart in styleParts:
# Make sure it consists of identifier and value pair
# e.g. fore:#xxx, bold:yes, underline:no
if ":" not in stylePart:
if stylePart.startswith("#"):
stylePart = "foreandback:" + stylePart
else:
stylePart += ":yes"
# Get key value and strip and make lowecase
key, _, val = [i.strip().lower() for i in stylePart.partition(":")]
# Store in parts
if key == "foreandback":
self._parts["fore"] = val
self._parts["back"] = val
elif key:
self._parts[key] = val
## Properties
def _getValueSafe(self, key):
try:
return self._parts[key]
except KeyError:
return "no"
@property
def fore(self):
if self._fore is None:
self._fore = QtGui.QColor(self._parts["fore"])
return self._fore
@property
def back(self):
if self._back is None:
self._back = QtGui.QColor(self._parts["back"])
return self._back
@property
def bold(self):
if self._bold is None:
if self._getValueSafe("bold") in ["yes", "true"]:
self._bold = True
else:
self._bold = False
return self._bold
@property
def italic(self):
if self._italic is None:
if self._getValueSafe("italic") in ["yes", "true"]:
self._italic = True
else:
self._italic = False
return self._italic
@property
def underline(self):
if self._underline is None:
val = self._getValueSafe("underline")
if val in ["yes", "true"]:
self._underline = QtGui.QTextCharFormat.SingleUnderline
elif val in ["dotted", "dots", "dotline"]:
self._underline = QtGui.QTextCharFormat.DotLine
elif val in ["wave"]:
self._underline = QtGui.QTextCharFormat.WaveUnderline
else:
self._underline = QtGui.QTextCharFormat.NoUnderline
return self._underline
@property
def linestyle(self):
if self._linestyle is None:
val = self._getValueSafe("linestyle")
if val in ["yes", "true"]:
self._linestyle = Qt.SolidLine
elif val in ["dotted", "dot", "dots", "dotline"]:
self._linestyle = Qt.DotLine
elif val in ["dashed", "dash", "dashes", "dashline"]:
self._linestyle = Qt.DashLine
else:
self._linestyle = Qt.SolidLine # default to solid
return self._linestyle
@property
def textCharFormat(self):
if self._textCharFormat is None:
self._textCharFormat = QtGui.QTextCharFormat()
self._textCharFormat.setForeground(self.fore)
try: # not all styles have a back property
self._textCharFormat.setBackground(self.back)
except Exception:
pass
self._textCharFormat.setUnderlineStyle(self.underline)
if self.bold:
self._textCharFormat.setFontWeight(QtGui.QFont.Bold)
if self.italic:
self._textCharFormat.setFontItalic(True)
return self._textCharFormat
| 32.482213
| 87
| 0.607934
|
691348f4469a848ad7efb8542fb5c3695a43b27f
| 1,729
|
py
|
Python
|
lib/mpvariable.py
|
chuanglaipv/realtime_object_detection_xavier
|
694c91db953a2e18440fcdcf5f38df90ba063bfc
|
[
"MIT"
] | 102
|
2018-04-11T09:32:16.000Z
|
2021-11-04T16:52:09.000Z
|
lib/mpvariable.py
|
chuanglaipv/realtime_object_detection_xavier
|
694c91db953a2e18440fcdcf5f38df90ba063bfc
|
[
"MIT"
] | 24
|
2018-08-08T11:42:27.000Z
|
2021-01-15T16:09:34.000Z
|
lib/mpvariable.py
|
chuanglaipv/realtime_object_detection_xavier
|
694c91db953a2e18440fcdcf5f38df90ba063bfc
|
[
"MIT"
] | 39
|
2018-05-23T08:34:00.000Z
|
2020-04-07T12:01:57.000Z
|
import multiprocessing
import ctypes
class MPVariable():
"""
SHARED VARIABLES IN MULTIPROSESSING
"""
running = multiprocessing.Value(ctypes.c_bool,True)
frame_counter = multiprocessing.Value(ctypes.c_int,0)
fps = multiprocessing.Value(ctypes.c_float,0.0)
fps_frames = multiprocessing.Value(ctypes.c_int,0)
fps_seconds = multiprocessing.Value(ctypes.c_float,0.0) # FPS ave in 5sec (fps_interval)
fps_snapshot = multiprocessing.Value(ctypes.c_float,0.0) # FPS ave in 0.2sec
cap_proc_time = multiprocessing.Value(ctypes.c_float,0.0)
worker_proc_time = multiprocessing.Value(ctypes.c_float,0.0)
gpu_proc_time = multiprocessing.Value(ctypes.c_float,0.0)
cpu_proc_time = multiprocessing.Value(ctypes.c_float,0.0)
vis_proc_time = multiprocessing.Value(ctypes.c_float,0.0)
lost_proc_time = multiprocessing.Value(ctypes.c_float,0.0)
total_proc_time = multiprocessing.Value(ctypes.c_float,0.0)
first_complete_time = multiprocessing.Value(ctypes.c_float,0.0)
sleep_interval = multiprocessing.Value(ctypes.c_float,0.005)
vis_frame_counter = multiprocessing.Value(ctypes.c_int,0)
vis_fps = multiprocessing.Value(ctypes.c_float,0.0)
vis_fps_frames = multiprocessing.Value(ctypes.c_int,0)
vis_fps_seconds = multiprocessing.Value(ctypes.c_float,0.0) # FPS ave in 5sec (fps_interval)
send_proc_time = multiprocessing.Value(ctypes.c_float,0.0)
vis_drop_frames = multiprocessing.Value(ctypes.c_int,0)
vis_skip_rate = multiprocessing.Value(ctypes.c_float,0.0)
"""
MULTI-PROCESSING PIPE
"""
vis_in_con, det_out_con = multiprocessing.Pipe(duplex=False)
def __del__(self):
det_out_con.close()
vis_in_con.close()
| 43.225
| 96
| 0.748988
|
7cea8f3abfc8c5518d16914018b2328570c1ef47
| 5,110
|
py
|
Python
|
api-dev/venv/lib/python3.6/site-packages/ethpm/_utils/deployments.py
|
twaddle-dev/CoVid-19
|
37b1ca12d7ca1a066ab1c0d4a02d9b038d44e62f
|
[
"Apache-2.0"
] | 1
|
2021-05-15T12:00:27.000Z
|
2021-05-15T12:00:27.000Z
|
api-dev/venv/lib/python3.6/site-packages/ethpm/_utils/deployments.py
|
twaddle-dev/CoVid-19
|
37b1ca12d7ca1a066ab1c0d4a02d9b038d44e62f
|
[
"Apache-2.0"
] | null | null | null |
api-dev/venv/lib/python3.6/site-packages/ethpm/_utils/deployments.py
|
twaddle-dev/CoVid-19
|
37b1ca12d7ca1a066ab1c0d4a02d9b038d44e62f
|
[
"Apache-2.0"
] | null | null | null |
from typing import (
Any,
Dict,
Generator,
List,
Tuple,
)
from eth_utils import (
is_same_address,
to_bytes,
to_tuple,
)
from eth_utils.toolz import (
get_in,
)
from hexbytes import (
HexBytes,
)
from ethpm.exceptions import (
BytecodeLinkingError,
EthPMValidationError,
)
from web3 import Web3
def get_linked_deployments(deployments: Dict[str, Any]) -> Dict[str, Any]:
"""
Returns all deployments found in a chain URI's deployment data that contain link dependencies.
"""
linked_deployments = {
dep: data
for dep, data in deployments.items()
if get_in(("runtime_bytecode", "link_dependencies"), data)
}
for deployment, data in linked_deployments.items():
if any(
link_dep["value"] == deployment
for link_dep in data["runtime_bytecode"]["link_dependencies"]
):
raise BytecodeLinkingError(
f"Link dependency found in {deployment} deployment that references its "
"own contract instance, which is disallowed"
)
return linked_deployments
def validate_linked_references(
link_deps: Tuple[Tuple[int, bytes], ...], bytecode: HexBytes
) -> None:
"""
Validates that normalized linked_references (offset, expected_bytes)
match the corresponding bytecode.
"""
offsets, values = zip(*link_deps)
for idx, offset in enumerate(offsets):
value = values[idx]
# https://github.com/python/mypy/issues/4975
offset_value = int(offset)
dep_length = len(value)
end_of_bytes = offset_value + dep_length
# Ignore b/c whitespace around ':' conflict b/w black & flake8
actual_bytes = bytecode[offset_value:end_of_bytes] # noqa: E203
if actual_bytes != values[idx]:
raise EthPMValidationError(
"Error validating linked reference. "
f"Offset: {offset} "
f"Value: {values[idx]} "
f"Bytecode: {bytecode} ."
)
@to_tuple
def normalize_linked_references(
data: List[Dict[str, Any]]
) -> Generator[Tuple[int, str, str], None, None]:
"""
Return a tuple of information representing all insertions of a linked reference.
(offset, type, value)
"""
for deployment in data:
for offset in deployment["offsets"]:
yield offset, deployment["type"], deployment["value"]
def validate_deployments_tx_receipt(
deployments: Dict[str, Any], w3: Web3, allow_missing_data: bool = False
) -> None:
"""
Validate that address and block hash found in deployment data match what is found on-chain.
:allow_missing_data: by default, enforces validation of address and blockHash.
"""
# todo: provide hook to lazily look up tx receipt via binary search if missing data
for name, data in deployments.items():
if "transaction" in data:
tx_hash = data["transaction"]
tx_receipt = w3.eth.getTransactionReceipt(tx_hash)
# tx_address will be None if contract created via contract factory
tx_address = tx_receipt["contractAddress"]
if tx_address is None and allow_missing_data is False:
raise EthPMValidationError(
"No contract address found in tx receipt. Unable to verify "
"address found in tx receipt matches address in manifest's deployment data. "
"If this validation is not necessary, please enable `allow_missing_data` arg. "
)
if tx_address is not None and not is_same_address(
tx_address, data["address"]
):
raise EthPMValidationError(
f"Error validating tx_receipt for {name} deployment. "
f"Address found in manifest's deployment data: {data['address']} "
f"Does not match address found on tx_receipt: {tx_address}."
)
if "block" in data:
if tx_receipt["blockHash"] != to_bytes(hexstr=data["block"]):
raise EthPMValidationError(
f"Error validating tx_receipt for {name} deployment. "
f"Block found in manifest's deployment data: {data['block']} does not "
f"Does not match block found on tx_receipt: {tx_receipt['blockHash']}."
)
elif allow_missing_data is False:
raise EthPMValidationError(
"No block hash found in deployment data. "
"Unable to verify block hash on tx receipt. "
"If this validation is not necessary, please enable `allow_missing_data` arg."
)
elif allow_missing_data is False:
raise EthPMValidationError(
"No transaction hash found in deployment data. "
"Unable to validate tx_receipt. "
"If this validation is not necessary, please enable `allow_missing_data` arg."
)
| 37.29927
| 99
| 0.609002
|
7bc430ae43af9e038362d1e563a67ef9530d7b0d
| 1,291
|
py
|
Python
|
client/paddleflow/common/exception/paddleflow_sdk_exception.py
|
qiaoshuangshuang/PaddleFlow
|
15155a553587f18b09d4edfc604cb64a1fbe01a7
|
[
"Apache-2.0"
] | 23
|
2021-12-31T02:36:13.000Z
|
2022-03-29T07:38:43.000Z
|
client/paddleflow/common/exception/paddleflow_sdk_exception.py
|
qiaoshuangshuang/PaddleFlow
|
15155a553587f18b09d4edfc604cb64a1fbe01a7
|
[
"Apache-2.0"
] | 10
|
2022-01-05T08:47:37.000Z
|
2022-03-29T11:59:43.000Z
|
client/paddleflow/common/exception/paddleflow_sdk_exception.py
|
qiaoshuangshuang/PaddleFlow
|
15155a553587f18b09d4edfc604cb64a1fbe01a7
|
[
"Apache-2.0"
] | 19
|
2021-12-30T14:37:24.000Z
|
2022-03-23T03:30:32.000Z
|
"""
Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding:utf8 -*-
import sys
class PaddleFlowSDKException(Exception):
"""paddleflowapi sdk 异常类"""
def __init__(self, code=None, message=None, requestId=None):
self.code = code
self.message = message
self.requestId = requestId
def __str__(self):
s = "[PaddleFlowSDKException] code:%s message:%s requestId:%s" % (
self.code, self.message, self.requestId)
return s
def get_code(self):
"""get code"""
return self.code
def get_message(self):
"""get message"""
return self.message
def get_request_id(self):
"""get request_id"""
return self.requestId
| 29.340909
| 74
| 0.687064
|
f93c32b9c9f39627ec16f71bb3ca94f9a868b7f3
| 3,041
|
py
|
Python
|
venv/Lib/site-packages/metapensiero/pj/js_ast/expressions.py
|
mintzer/pupillometry-rf-back
|
cfa86fa984a49dce0123798f8de5b838c02e10d5
|
[
"CC-BY-4.0"
] | null | null | null |
venv/Lib/site-packages/metapensiero/pj/js_ast/expressions.py
|
mintzer/pupillometry-rf-back
|
cfa86fa984a49dce0123798f8de5b838c02e10d5
|
[
"CC-BY-4.0"
] | null | null | null |
venv/Lib/site-packages/metapensiero/pj/js_ast/expressions.py
|
mintzer/pupillometry-rf-back
|
cfa86fa984a49dce0123798f8de5b838c02e10d5
|
[
"CC-BY-4.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# :Project: metapensiero.pj -- expressions
# :Created: gio 08 feb 2018 02:46:38 CET
# :Author: Alberto Berti <alberto@metapensiero.it>
# :License: GNU General Public License version 3 or later
# :Copyright: © 2018 Alberto Berti
#
import re
from .base import JSNode
from .operators import JSLeftSideUnaryOp
from ..processor.util import delimited, delimited_multi_line
from . util import _check_keywords
class JSExpression(JSNode):
def emit(self, expr):
yield self.part('(', expr, ')')
class JSAssignmentExpression(JSNode):
def emit(self, left, right):
yield self.part(left, ' = ', right)
class JSIfExp(JSNode):
def emit(self, test, body, orelse):
yield self.part('(', test, ' ? ', body, ' : ', orelse, ')')
class JSCall(JSNode):
operator = ''
def emit(self, func, args, kwargs=None, operator=None):
operator = operator or self.operator
kwargs = kwargs or []
arr = [operator, func, '(']
fargs = args.copy()
if kwargs:
fargs.append(kwargs)
delimited(', ', fargs, dest=arr)
arr.append(')')
yield self.part(*arr)
class JSNewCall(JSCall):
operator = 'new '
class JSAttribute(JSNode):
def emit(self, obj, s):
assert re.search(r'^[a-zA-Z$_][a-zA-Z$_0-9]*$', s)
_check_keywords(self, s)
yield self.part(obj, '.', s, name=True)
class JSSubscript(JSNode):
def emit(self, obj, key):
yield self.part(self.part(obj, name=True), '[',
self.part(key, name=True), ']')
class JSKeySubscript(JSNode):
def emit(self, key):
yield self.part('[', self.part(key), ']')
class JSBinOp(JSNode):
def emit(self, left, op, right):
yield self.part('(', left, ' ', op, ' ', right, ')')
class JSMultipleArgsOp(JSNode):
def emit(self, binop, conj, *args):
assert len(args) > 1
parts = []
for ix, arg in enumerate(args):
if isinstance(binop, (tuple, list)):
op = binop[ix]
else:
op = binop
if ix > 0:
parts += [' ', conj, ' ']
parts += ['(', arg[0], ' ', op, ' ', arg[1], ')']
yield self.part('(', *parts, ')')
class JSUnaryOp(JSNode):
def emit(self, op, right):
assert isinstance(op, JSLeftSideUnaryOp)
yield self.part('(', op, ' ', right, ')')
class JSName(JSNode):
def emit(self, name):
_check_keywords(self, name)
yield self.part(name, name=True)
class JSTaggedTemplate(JSNode):
def emit(self, value, func):
text = list(delimited_multi_line(self, value, '`'))
func = list(func.serialize())
yield self.part(*func, *text)
class JSTemplateLiteral(JSNode):
def emit(self, value):
yield from delimited_multi_line(self, value, '`')
class JSSuper(JSNode):
def emit(self):
yield self.part('super')
class JSThis(JSNode):
def emit(self):
yield self.part('this')
| 24.134921
| 67
| 0.575469
|
1eec338d37ad13ab8ee997842984844664473e20
| 5,698
|
py
|
Python
|
Deep Learning nano degree/sagemaker-deployment/train.py
|
farabi1038/Online-Course-Project
|
9471a6ed616a08cbacbf8db402cedbe465fe261f
|
[
"MIT"
] | 2
|
2020-08-07T07:59:53.000Z
|
2021-05-09T18:11:06.000Z
|
Deep Learning nano degree/sagemaker-deployment/train.py
|
farabi1038/Online-Course-Project
|
9471a6ed616a08cbacbf8db402cedbe465fe261f
|
[
"MIT"
] | null | null | null |
Deep Learning nano degree/sagemaker-deployment/train.py
|
farabi1038/Online-Course-Project
|
9471a6ed616a08cbacbf8db402cedbe465fe261f
|
[
"MIT"
] | null | null | null |
import argparse
import json
import os
import pickle
import sys
import sagemaker_containers
import pandas as pd
import torch
import torch.optim as optim
import torch.utils.data
from model import LSTMClassifier
def model_fn(model_dir):
"""Load the PyTorch model from the `model_dir` directory."""
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])
# Load the stored model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# Load the saved word_dict.
word_dict_path = os.path.join(model_dir, 'word_dict.pkl')
with open(word_dict_path, 'rb') as f:
model.word_dict = pickle.load(f)
model.to(device).eval()
print("Done loading model.")
return model
def _get_train_data_loader(batch_size, training_dir):
print("Get train data loader.")
train_data = pd.read_csv(os.path.join(training_dir, "train.csv"), header=None, names=None)
train_y = torch.from_numpy(train_data[[0]].values).float().squeeze()
train_X = torch.from_numpy(train_data.drop([0], axis=1).values).long()
train_ds = torch.utils.data.TensorDataset(train_X, train_y)
return torch.utils.data.DataLoader(train_ds, batch_size=batch_size)
def train(model, train_loader, epochs, optimizer, loss_fn, device):
for epoch in range(1, epochs + 1):
model.train()
total_loss = 0
for batch in train_loader:
batch_X, batch_y = batch
batch_X = batch_X.to(device)
batch_y = batch_y.to(device)
# TODO: Complete this train method to train the model provided.
optimizer.zero_grad()
val = model.forward(batch_X)
loss_single = loss_fn(val, batch_y)
loss_single.backward()
optimizer.step()
loss_total += loss_single.data.item()
print("Epoch: {}, BCELoss: {}".format(epoch, loss_total / len(train_loader)))
if __name__ == '__main__':
# All of the model parameters and training parameters are sent as arguments when the script
# is executed. Here we set up an argument parser to easily access the parameters.
parser = argparse.ArgumentParser()
# Training Parameters
parser.add_argument('--batch-size', type=int, default=512, metavar='N',
help='input batch size for training (default: 512)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# Model Parameters
parser.add_argument('--embedding_dim', type=int, default=32, metavar='N',
help='size of the word embeddings (default: 32)')
parser.add_argument('--hidden_dim', type=int, default=100, metavar='N',
help='size of the hidden dimension (default: 100)')
parser.add_argument('--vocab_size', type=int, default=5000, metavar='N',
help='size of the vocabulary (default: 5000)')
# SageMaker Parameters
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAINING'])
parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device {}.".format(device))
torch.manual_seed(args.seed)
# Load the training data.
train_loader = _get_train_data_loader(args.batch_size, args.data_dir)
# Build the model.
model = LSTMClassifier(args.embedding_dim, args.hidden_dim, args.vocab_size).to(device)
with open(os.path.join(args.data_dir, "word_dict.pkl"), "rb") as f:
model.word_dict = pickle.load(f)
print("Model loaded with embedding_dim {}, hidden_dim {}, vocab_size {}.".format(
args.embedding_dim, args.hidden_dim, args.vocab_size
))
# Train the model.
optimizer = optim.Adam(model.parameters())
loss_fn = torch.nn.BCELoss()
train(model, train_loader, args.epochs, optimizer, loss_fn, device)
# Save the parameters used to construct the model
model_info_path = os.path.join(args.model_dir, 'model_info.pth')
with open(model_info_path, 'wb') as f:
model_info = {
'embedding_dim': args.embedding_dim,
'hidden_dim': args.hidden_dim,
'vocab_size': args.vocab_size,
}
torch.save(model_info, f)
# Save the word_dict
word_dict_path = os.path.join(args.model_dir, 'word_dict.pkl')
with open(word_dict_path, 'wb') as f:
pickle.dump(model.word_dict, f)
# Save the model parameters
model_path = os.path.join(args.model_dir, 'model.pth')
with open(model_path, 'wb') as f:
torch.save(model.cpu().state_dict(), f)
| 37.486842
| 107
| 0.660407
|
d84fb8cd889d52fc36c2d4abd94bac62e69f2a85
| 7,003
|
py
|
Python
|
Models/regressionTemplateTF/model.py
|
UTS-AnimalLogicAcademy/nuke-ML-server
|
3bec5e9efc1f3101e7506401eb57e7b8c955f84c
|
[
"Apache-2.0"
] | 123
|
2019-05-14T19:50:42.000Z
|
2022-03-21T11:32:30.000Z
|
Models/regressionTemplateTF/model.py
|
UTS-AnimalLogicAcademy/nuke-ML-server
|
3bec5e9efc1f3101e7506401eb57e7b8c955f84c
|
[
"Apache-2.0"
] | 30
|
2019-05-23T18:48:29.000Z
|
2021-06-26T01:17:13.000Z
|
Models/regressionTemplateTF/model.py
|
UTS-AnimalLogicAcademy/nuke-ML-server
|
3bec5e9efc1f3101e7506401eb57e7b8c955f84c
|
[
"Apache-2.0"
] | 34
|
2019-05-14T17:43:26.000Z
|
2021-11-10T23:53:02.000Z
|
# Copyright (c) 2020 Foundry.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import print_function
import sys
import os
import time
import scipy.misc
import numpy as np
import cv2
import tensorflow as tf
tf.compat.v1.disable_eager_execution() # For TF 2.x compatibility
from models.baseModel import BaseModel
from models.common.model_builder import baseline_model
from models.common.util import print_, get_ckpt_list, linear_to_srgb, srgb_to_linear
import message_pb2
class Model(BaseModel):
"""Load your trained model and do inference in Nuke"""
def __init__(self):
super(Model, self).__init__()
self.name = 'Regression Template TF'
self.n_levels = 3
self.scale = 0.5
dir_path = os.path.dirname(os.path.realpath(__file__))
self.checkpoints_dir = os.path.join(dir_path, 'checkpoints')
self.patch_size = 50
self.output_param_number = 1
# Initialise checkpoint name to the latest checkpoint
ckpt_names = get_ckpt_list(self.checkpoints_dir)
if not ckpt_names: # empty list
self.checkpoint_name = ''
else:
latest_ckpt = tf.compat.v1.train.latest_checkpoint(self.checkpoints_dir)
if latest_ckpt is not None:
self.checkpoint_name = latest_ckpt.split('/')[-1]
else:
self.checkpoint_name = ckpt_names[-1]
self.prev_ckpt_name = self.checkpoint_name
# Silence TF log when creating tf.Session()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Define options
self.gamma_to_predict = 1.0
self.predict = False
self.options = ('checkpoint_name', 'gamma_to_predict',)
self.buttons = ('predict',)
# Define inputs/outputs
self.inputs = {'input': 3}
self.outputs = {'output': 3}
def load(self, model):
# Check if empty or invalid checkpoint name
if self.checkpoint_name=='':
ckpt_names = get_ckpt_list(self.checkpoints_dir)
if not ckpt_names:
raise ValueError("No checkpoints found in {}".format(self.checkpoints_dir))
else:
raise ValueError("Empty checkpoint name, try an available checkpoint in {} (ex: {})"
.format(self.checkpoints_dir, ckpt_names[-1]))
print_("Loading trained model checkpoint...\n", 'm')
# Load from given checkpoint file name
self.saver.restore(self.sess, os.path.join(self.checkpoints_dir, self.checkpoint_name))
print_("...Checkpoint {} loaded\n".format(self.checkpoint_name), 'm')
def inference(self, image_list):
"""Do an inference on the model with a set of inputs.
# Arguments:
image_list: The input image list
Return the result of the inference.
"""
image = image_list[0]
image = linear_to_srgb(image).copy()
if not hasattr(self, 'sess'):
# Initialise tensorflow graph
tf.compat.v1.reset_default_graph()
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth=True
self.sess=tf.compat.v1.Session(config=config)
# Input is stacked histograms of original and gamma-graded images.
input_shape = [1, 2, 100]
# Initialise input placeholder size
self.input = tf.compat.v1.placeholder(tf.float32, shape=input_shape)
self.model = baseline_model(
input_shape=input_shape[1:],
output_param_number=self.output_param_number)
self.infer_op = self.model(self.input)
# Load latest model checkpoint
self.saver = tf.compat.v1.train.Saver()
self.load(self.model)
self.prev_ckpt_name = self.checkpoint_name
# If checkpoint name has changed, load new checkpoint
if self.prev_ckpt_name != self.checkpoint_name or self.checkpoint_name == '':
self.load(self.model)
# If checkpoint correctly loaded, update previous checkpoint name
self.prev_ckpt_name = self.checkpoint_name
# Preprocess image same way we preprocessed it for training
# Here for gamma correction compute histograms
def histogram(x, value_range=[0.0, 1.0], nbins=100):
"""Return histogram of tensor x"""
h, w, c = x.shape
hist = tf.histogram_fixed_width(x, value_range, nbins=nbins)
hist = tf.divide(hist, h * w * c)
return hist
with tf.compat.v1.Session() as sess:
# Convert to grayscale
img_gray = tf.image.rgb_to_grayscale(image)
img_gray = tf.image.resize(img_gray, [self.patch_size, self.patch_size])
# Apply gamma correction
img_gray_grade = tf.math.pow(img_gray, self.gamma_to_predict)
img_grade = tf.math.pow(image, self.gamma_to_predict)
# Compute histograms
img_hist = histogram(img_gray)
img_grade_hist = histogram(img_gray_grade)
hists_op = tf.stack([img_hist, img_grade_hist], axis=0)
hists, img_grade = sess.run([hists_op, img_grade])
res_img = srgb_to_linear(img_grade)
hists_batch = np.expand_dims(hists, 0)
start = time.time()
# Run model inference
inference = self.sess.run(self.infer_op, feed_dict={self.input: hists_batch})
duration = time.time() - start
print('Inference duration: {:4.3f}s'.format(duration))
res = inference[-1]
print("Predicted gamma: {}".format(res))
# If predict button is pressed in Nuke
if self.predict:
script_msg = message_pb2.FieldValuePairAttrib()
script_msg.name = "PythonScript"
# Create a Python script message to run in Nuke
python_script = self.nuke_script(res)
script_msg_val = script_msg.values.add()
script_msg_str = script_msg_val.string_attributes.add()
script_msg_str.values.extend([python_script])
return [res_img, script_msg]
return [res_img]
def nuke_script(self, res):
"""Return the Python script function to create a pop up window in Nuke."""
popup_msg = "Predicted gamma: {}".format(res)
script = "nuke.message('{}')\n".format(popup_msg)
return script
| 41.194118
| 100
| 0.630872
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.