hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
61d7fa6f2eaaf84d4581ee8514d08b4d4a714e47
| 1,099
|
py
|
Python
|
text_to_speech/configs.py
|
PyText2Speech/PyText2Speech
|
485e30569c7c410527dfdec86ab5c391dfb04c96
|
[
"MIT"
] | 6
|
2016-06-13T02:46:43.000Z
|
2017-02-02T17:18:39.000Z
|
text_to_speech/configs.py
|
PyText2Speech/PyText2Speech
|
485e30569c7c410527dfdec86ab5c391dfb04c96
|
[
"MIT"
] | 11
|
2016-06-11T02:34:00.000Z
|
2016-06-11T11:56:26.000Z
|
text_to_speech/configs.py
|
PyText2Speech/PyText2Speech
|
485e30569c7c410527dfdec86ab5c391dfb04c96
|
[
"MIT"
] | 7
|
2016-06-11T03:20:52.000Z
|
2017-02-02T09:20:09.000Z
|
# -*- encoding=utf8 -*-
import json
import os
from pprint import pprint
file = os.path.expanduser('~/pytext2speech.json')
ck_key = ['BAIDU', 'ITRI', 'WATSON']
try:
with open(file, 'r') as fp:
server = json.load(fp)
for k, v in server.items():
if k in ck_key:
if v['name'] == '' or v['pwd'] == '':
raise ValueError(str(k), u'JSON setting is empty,' \
'name = ',str(v['name']), 'pwd = ',\
str(v['pwd']))
except Exception as e:
server = {
'BAIDU': {'name': '','pwd': ''},
'GOOGLE': {'name': '', 'pwd': ''},
'ITRI': {'name': '', 'pwd': ''},
'WATSON': {'name': '','pwd': ''}
}
with open(file, 'w') as fp:
json.dump(server, fp)
msg = "Not exist pytext2speech.json file at %s. " \
"Already create JSON file, please maintenance it." \
"message." % os.path.expanduser('~/')
raise Exception(msg, e)
except ValueError as e:
raise Exception(e)
| 33.30303
| 72
| 0.458599
|
d4964c1a319455f31e124b7d2f6bcb10f620b306
| 1,286
|
py
|
Python
|
Tests.py
|
WeazelDev/ML_Project
|
8963f15712bd0aa4fbafa96e6dafd0a59d616a53
|
[
"MIT"
] | null | null | null |
Tests.py
|
WeazelDev/ML_Project
|
8963f15712bd0aa4fbafa96e6dafd0a59d616a53
|
[
"MIT"
] | null | null | null |
Tests.py
|
WeazelDev/ML_Project
|
8963f15712bd0aa4fbafa96e6dafd0a59d616a53
|
[
"MIT"
] | null | null | null |
import csv
import numpy as np
import matplotlib.pyplot as plt
anime_data = []
with open("Data/AnimeList.csv", "r", encoding="utf8") as csv_data:
csv_reader = csv.reader(csv_data, delimiter=',')
firstLine = True
for row in csv_reader:
if firstLine: firstLine = False
else:
anime_data.append(row)
anime_meta = [[int(a[0]), float(a[15]), int(a[16]), int(a[19])] for a in anime_data]
anime_members_ranked = sorted(anime_meta, key=lambda m: m[3], reverse=True)
anime_scoredby_ranked = sorted(anime_meta, key=lambda m: m[2], reverse=True)
scoredby = [m[2] for m in anime_meta]
print(max(scoredby), min(scoredby), np.mean(np.array(scoredby)), np.median(np.array(scoredby)))
# plt.subplot(1, 2, 1)
# plt.plot([m[3] for m in anime_members_ranked])
# plt.ylabel('people watching')
# plt.xlabel('ranked items')
# plt.title("Pareto's curve of items watchings")
# plt.grid(True)
# plt.subplot(1, 2, 2)
plt.plot([m[2] for m in anime_scoredby_ranked])
plt.ylabel('nratings')
plt.xlabel('ranked items')
plt.title("Pareto's curve of items ratings")
plt.grid(True)
plt.show()
# plt.scatter([m[3] for m in anime_meta], [m[2] for m in anime_meta])
# plt.ylabel('ratings')
# plt.xlabel('people watching')
# plt.title('Items ratings vs. items watching')
# plt.grid(True)
# plt.show()
| 25.72
| 95
| 0.702955
|
6684a4321934ad8e78608fbb3cf81173e7091f03
| 3,872
|
py
|
Python
|
tests/smoke/test_international.py
|
mayank-sfdc/directory-tests
|
6e978bc1a27c19389e99e454143122aa27e47b85
|
[
"MIT"
] | 4
|
2017-06-02T09:09:10.000Z
|
2018-01-25T19:06:12.000Z
|
tests/smoke/test_international.py
|
mayank-sfdc/directory-tests
|
6e978bc1a27c19389e99e454143122aa27e47b85
|
[
"MIT"
] | 53
|
2016-10-27T22:31:03.000Z
|
2022-03-07T11:18:25.000Z
|
tests/smoke/test_international.py
|
mayank-sfdc/directory-tests
|
6e978bc1a27c19389e99e454143122aa27e47b85
|
[
"MIT"
] | 3
|
2017-11-22T11:42:40.000Z
|
2022-02-21T01:20:04.000Z
|
# -*- coding: utf-8 -*-
import pytest
from rest_framework.status import HTTP_200_OK
import allure
from directory_tests_shared import URLs
from tests.smoke.cms_api_helpers import get_and_assert
pytestmark = [allure.suite("International site"), allure.feature("International site")]
@pytest.mark.dev
@pytest.mark.parametrize(
"url",
[
URLs.INTERNATIONAL_REGIONS_MIDLANDS.absolute,
URLs.INTERNATIONAL_REGIONS_NORTH_ENGLAND.absolute,
URLs.INTERNATIONAL_REGIONS_NORTHERN_IRELAND.absolute,
URLs.INTERNATIONAL_REGIONS_SOUTH_ENGLAND.absolute,
URLs.INTERNATIONAL_REGIONS_WALES.absolute,
],
)
def test_region_pages(url, basic_auth):
get_and_assert(
url=url, status_code=HTTP_200_OK, auth=basic_auth, allow_redirects=True
)
@pytest.mark.dev
@pytest.mark.parametrize(
"url",
[
URLs.INTERNATIONAL_INDUSTRY_AEROSPACE.absolute,
URLs.INTERNATIONAL_INDUSTRY_AUTOMOTIVE.absolute,
URLs.INTERNATIONAL_INDUSTRY_CREATIVE_INDUSTRIES.absolute,
URLs.INTERNATIONAL_INDUSTRY_EDUCATION.absolute,
URLs.INTERNATIONAL_INDUSTRY_ENERGY.absolute,
URLs.INTERNATIONAL_INDUSTRY_ENGINEERING_AND_MANUFACTURING.absolute,
URLs.INTERNATIONAL_INDUSTRY_FINANCIAL_SERVICES.absolute,
URLs.INTERNATIONAL_INDUSTRY_HEALTH_AND_LIFE_SCIENCES.absolute,
URLs.INTERNATIONAL_INDUSTRY_LEGAL_SERVICES.absolute,
URLs.INTERNATIONAL_INDUSTRY_REAL_ESTATE.absolute,
URLs.INTERNATIONAL_INDUSTRY_SPACE.absolute,
URLs.INTERNATIONAL_INDUSTRY_TECHNOLOGY.absolute,
],
)
def test_industry_pages_dev(url, basic_auth):
get_and_assert(
url=url, status_code=HTTP_200_OK, auth=basic_auth, allow_redirects=True
)
@pytest.mark.stage
@pytest.mark.parametrize(
"url",
[
URLs.INTERNATIONAL_INDUSTRY_CREATIVE_INDUSTRIES.absolute,
URLs.INTERNATIONAL_INDUSTRY_ENERGY.absolute,
URLs.INTERNATIONAL_INDUSTRY_ENGINEERING_AND_MANUFACTURING.absolute,
URLs.INTERNATIONAL_INDUSTRY_FINANCIAL_AND_PROFESSIONAL_SERVICES.absolute,
URLs.INTERNATIONAL_INDUSTRY_FINANCIAL_SERVICES.absolute,
URLs.INTERNATIONAL_INDUSTRY_HEALTH_AND_LIFE_SCIENCES.absolute,
URLs.INTERNATIONAL_INDUSTRY_LEGAL_SERVICES.absolute,
URLs.INTERNATIONAL_INDUSTRY_REAL_ESTATE.absolute,
URLs.INTERNATIONAL_INDUSTRY_TECHNOLOGY.absolute,
],
)
def test_industry_pages_stage(url, basic_auth):
get_and_assert(
url=url, status_code=HTTP_200_OK, auth=basic_auth, allow_redirects=True
)
@pytest.mark.uat
@pytest.mark.parametrize(
"url",
[
URLs.INTERNATIONAL_INDUSTRY_AEROSPACE.absolute,
URLs.INTERNATIONAL_INDUSTRY_AGRICULTURAL_TECHNOLOGY.absolute,
URLs.INTERNATIONAL_INDUSTRY_AUTOMOTIVE.absolute,
URLs.INTERNATIONAL_INDUSTRY_CREATIVE_INDUSTRIES.absolute,
URLs.INTERNATIONAL_INDUSTRY_CYBER_SECURITY.absolute,
URLs.INTERNATIONAL_INDUSTRY_EDUCATION.absolute,
URLs.INTERNATIONAL_INDUSTRY_ENGINEERING_AND_MANUFACTURING.absolute,
URLs.INTERNATIONAL_INDUSTRY_FINANCIAL_SERVICES.absolute,
URLs.INTERNATIONAL_INDUSTRY_FOOD_AND_DRINK.absolute,
URLs.INTERNATIONAL_INDUSTRY_HEALTH_AND_LIFE_SCIENCES.absolute,
URLs.INTERNATIONAL_INDUSTRY_LEGAL_SERVICES.absolute,
URLs.INTERNATIONAL_INDUSTRY_MARITIME.absolute,
URLs.INTERNATIONAL_INDUSTRY_NUCLEAR_ENERGY.absolute,
URLs.INTERNATIONAL_INDUSTRY_OIL_AND_GAS.absolute,
URLs.INTERNATIONAL_INDUSTRY_RETAIL.absolute,
URLs.INTERNATIONAL_INDUSTRY_SPACE.absolute,
URLs.INTERNATIONAL_INDUSTRY_SPORTS_ECONOMY.absolute,
URLs.INTERNATIONAL_INDUSTRY_TECHNOLOGY.absolute,
],
)
def test_industry_pages_uat(url, basic_auth):
get_and_assert(
url=url, status_code=HTTP_200_OK, auth=basic_auth, allow_redirects=True
)
| 37.960784
| 87
| 0.775052
|
821eb222d15ef209f697df9d68f31ee999b55772
| 7,223
|
py
|
Python
|
projects/landmarks_regression/train.py
|
ricklentz/deep-object-reid
|
bf4d30d78e4a34847496d0efb50d98541f5274f9
|
[
"MIT"
] | null | null | null |
projects/landmarks_regression/train.py
|
ricklentz/deep-object-reid
|
bf4d30d78e4a34847496d0efb50d98541f5274f9
|
[
"MIT"
] | null | null | null |
projects/landmarks_regression/train.py
|
ricklentz/deep-object-reid
|
bf4d30d78e4a34847496d0efb50d98541f5274f9
|
[
"MIT"
] | null | null | null |
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import datetime
import os.path as osp
import glog as log
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
from datasets import NDG, CelebA, VGGFace2
from evaluate import evaluate
from landnet import LandmarksNet
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from torchvision.transforms import transforms
from utils import landmarks_augmentation
from utils.alignment_losses import AlignmentLoss
from utils.utils import load_model_state, save_model_cpu
def train(args):
"""Launches training of landmark regression model"""
if args.dataset == 'vgg':
drops_schedule = [1, 6, 9, 13]
dataset = VGGFace2(args.train, args.t_list, args.t_land, landmarks_training=True)
elif args.dataset == 'celeba':
drops_schedule = [10, 20]
dataset = CelebA(args.train, args.t_land)
else:
drops_schedule = [90, 140, 200]
dataset = NDG(args.train, args.t_land)
if dataset.have_landmarks:
log.info('Use alignment for the train data')
dataset.transform = transforms.Compose([landmarks_augmentation.Rescale((56, 56)),
landmarks_augmentation.Blur(k=3, p=.2),
landmarks_augmentation.HorizontalFlip(p=.5),
landmarks_augmentation.RandomRotate(50),
landmarks_augmentation.RandomScale(.8, .9, p=.4),
landmarks_augmentation.RandomCrop(48),
landmarks_augmentation.ToTensor(switch_rb=True)])
else:
log.info('Error: training dataset has no landmarks data')
exit()
train_loader = DataLoader(dataset, batch_size=args.train_batch_size, num_workers=4, shuffle=True)
writer = SummaryWriter('./logs_landm/{:%Y_%m_%d_%H_%M}_'.format(datetime.datetime.now()) + args.snap_prefix)
model = LandmarksNet()
set_dropout_fn = model.set_dropout_ratio
if args.snap_to_resume is not None:
log.info('Resuming snapshot ' + args.snap_to_resume + ' ...')
model = load_model_state(model, args.snap_to_resume, args.device, eval_state=False)
model = torch.nn.DataParallel(model, device_ids=[args.device])
else:
model = torch.nn.DataParallel(model, device_ids=[args.device])
model.cuda()
model.train()
cudnn.enabled = True
cudnn.benchmark = True
log.info('Face landmarks model:')
log.info(model)
criterion = AlignmentLoss('wing')
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, drops_schedule)
log.info('Epoch length: %d' % len(train_loader))
for epoch_num in range(args.epoch_total_num):
log.info('Epoch: %d' % epoch_num)
if epoch_num > 5:
set_dropout_fn(0.)
for i, data in enumerate(train_loader, 0):
iteration = epoch_num * len(train_loader) + i
if iteration % args.val_step == 0 and iteration > 0:
snapshot_name = osp.join(args.snap_folder,
args.snap_prefix + '_{0}.pt'.format(iteration))
log.info('Saving Snapshot: ' + snapshot_name)
save_model_cpu(model, optimizer, snapshot_name, epoch_num)
model.eval()
log.info('Evaluating Snapshot: ' + snapshot_name)
avg_err, per_point_avg_err, failures_rate = evaluate(train_loader, model)
weights = per_point_avg_err / np.sum(per_point_avg_err)
criterion.set_weights(weights)
log.info(str(weights))
log.info('Avg train error: {}'.format(avg_err))
log.info('Train failure rate: {}'.format(failures_rate))
writer.add_scalar('Quality/Avg_error', avg_err, iteration)
writer.add_scalar('Quality/Failure_rate', failures_rate, iteration)
writer.add_scalar('Epoch', epoch_num, iteration)
model.train()
data, gt_landmarks = data['img'].cuda(), data['landmarks'].cuda()
predicted_landmarks = model(data)
optimizer.zero_grad()
loss = criterion(predicted_landmarks, gt_landmarks)
loss.backward()
optimizer.step()
if i % 10 == 0:
log.info('Iteration %d, Loss: %.4f' % (iteration, loss))
log.info('Learning rate: %f' % scheduler.get_lr()[0])
writer.add_scalar('Loss/train_loss', loss.item(), iteration)
writer.add_scalar('Learning_rate', scheduler.get_lr()[0], iteration)
scheduler.step()
def main():
"""Creates a command line parser"""
parser = argparse.ArgumentParser(description='Training Landmarks detector in PyTorch')
parser.add_argument('--train_data_root', dest='train', required=True, type=str, help='Path to train data.')
parser.add_argument('--train_list', dest='t_list', required=False, type=str, help='Path to train data image list.')
parser.add_argument('--train_landmarks', default='', dest='t_land', required=False, type=str,
help='Path to landmarks for the train images.')
parser.add_argument('--train_batch_size', type=int, default=170, help='Train batch size.')
parser.add_argument('--epoch_total_num', type=int, default=30, help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.4, help='Learning rate.')
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')
parser.add_argument('--val_step', type=int, default=2000, help='Evaluate model each val_step during each epoch.')
parser.add_argument('--weight_decay', type=float, default=0.0001, help='Weight decay.')
parser.add_argument('--device', '-d', default=0, type=int)
parser.add_argument('--snap_folder', type=str, default='./snapshots/', help='Folder to save snapshots.')
parser.add_argument('--snap_prefix', type=str, default='LandmarksNet', help='Prefix for snapshots.')
parser.add_argument('--snap_to_resume', type=str, default=None, help='Snapshot to resume.')
parser.add_argument('--dataset', choices=['vgg', 'celeb', 'ngd'], type=str, default='vgg', help='Dataset.')
arguments = parser.parse_args()
with torch.cuda.device(arguments.device):
train(arguments)
if __name__ == '__main__':
main()
| 47.20915
| 119
| 0.648484
|
4968b859fcea225ce0816f01e6bb9e22b11d9027
| 1,415
|
py
|
Python
|
utils/github_service.py
|
sujpac/github-read-service
|
b4d691df91482867d4921c0df033a61040273fef
|
[
"MIT"
] | null | null | null |
utils/github_service.py
|
sujpac/github-read-service
|
b4d691df91482867d4921c0df033a61040273fef
|
[
"MIT"
] | null | null | null |
utils/github_service.py
|
sujpac/github-read-service
|
b4d691df91482867d4921c0df033a61040273fef
|
[
"MIT"
] | null | null | null |
import os, requests, logging
from github import Github
from . import constants
logger = logging.getLogger(__name__)
def is_github_available():
"""Checks if the Github service is available"""
response = proxy_request('zen')
return response and response.status_code == 200
def get_org(org_name):
"""Gets org from Github"""
token = os.getenv(constants.API_KEY_NAME, '...')
ghub = Github(token)
try:
return ghub.get_organization(org_name)
except BaseException as err:
logger.error(f"Unexpected {err=}, {type(err)=}")
return None
def get_repos(org_name, org=None):
"""Gets all repos of a given org from Github"""
if not org:
org = get_org(org_name)
if not org:
return None
try:
repos = org.get_repos()
except BaseException as err:
logger.error(f"Unexpected {err=}, {type(err)=}")
return None
return [repo for repo in repos]
def proxy_request(resource):
"""Proxies a GET request to the Github API"""
token = os.getenv(constants.API_KEY_NAME, '...')
query_url = f'https://api.github.com/{resource}'
params = {"state": "open",}
headers = {'Authorization': f'token {token}'}
try:
return requests.get(query_url, headers=headers, params=params)
except BaseException as err:
logger.error(f"Unexpected {err=}, {type(err)=}")
return None
| 27.211538
| 70
| 0.640989
|
49cb8f34b2297b36dca9082d4db72f0858e6a0ce
| 652
|
py
|
Python
|
xendit/models/creditcard/charge/credit_card_charge_installment.py
|
adyaksaw/xendit-python
|
47b05f2a6582104a274dc12a172c6421de86febc
|
[
"MIT"
] | 10
|
2020-10-31T23:34:34.000Z
|
2022-03-08T19:08:55.000Z
|
xendit/models/creditcard/charge/credit_card_charge_installment.py
|
adyaksaw/xendit-python
|
47b05f2a6582104a274dc12a172c6421de86febc
|
[
"MIT"
] | 22
|
2020-07-30T14:25:07.000Z
|
2022-03-31T03:55:46.000Z
|
xendit/models/creditcard/charge/credit_card_charge_installment.py
|
adyaksaw/xendit-python
|
47b05f2a6582104a274dc12a172c6421de86febc
|
[
"MIT"
] | 11
|
2020-07-28T08:09:40.000Z
|
2022-03-18T00:14:02.000Z
|
from xendit.models._base_model import BaseModel
from xendit.models._base_query import BaseQuery
class CreditCardChargeInstallment(BaseModel):
"""Installment class of Charge (API Reference: Credit Card)
Optional Attributes:
- count (int)
- interval (str)
"""
# Optional
count: int
interval: str
class Query(BaseQuery):
"""Installment class of Charge Query (API Reference: Credit Card)
Used for create_authorization and create_charge
Optional Attributes:
- count (int)
- interval (str)
"""
# Optional
count: int
interval: str
| 21.733333
| 73
| 0.631902
|
d36b6ce376e9883135c1c6733bc2690fa6c2e671
| 1,048
|
py
|
Python
|
bottom-up-attention/tools/read_tsv.py
|
daveredrum/meshed-memory-transformer
|
6dfbc2ba241b7c1c8deac6114d66542190a77619
|
[
"BSD-3-Clause"
] | 1,311
|
2017-08-14T20:57:36.000Z
|
2022-03-31T03:54:41.000Z
|
bottom-up-attention/tools/read_tsv.py
|
daveredrum/meshed-memory-transformer
|
6dfbc2ba241b7c1c8deac6114d66542190a77619
|
[
"BSD-3-Clause"
] | 109
|
2017-08-23T14:50:07.000Z
|
2022-02-28T17:14:56.000Z
|
bottom-up-attention/tools/read_tsv.py
|
daveredrum/meshed-memory-transformer
|
6dfbc2ba241b7c1c8deac6114d66542190a77619
|
[
"BSD-3-Clause"
] | 391
|
2017-08-19T16:36:59.000Z
|
2022-03-24T08:37:37.000Z
|
#!/usr/bin/env python
import base64
import numpy as np
import csv
import sys
import zlib
import time
import mmap
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ['image_id', 'image_w','image_h','num_boxes', 'boxes', 'features']
infile = '/data/coco/tsv/trainval/karpathy_val_resnet101_faster_rcnn_genome.tsv'
if __name__ == '__main__':
# Verify we can read a tsv
in_data = {}
with open(infile, "r+b") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames = FIELDNAMES)
for item in reader:
item['image_id'] = int(item['image_id'])
item['image_h'] = int(item['image_h'])
item['image_w'] = int(item['image_w'])
item['num_boxes'] = int(item['num_boxes'])
for field in ['boxes', 'features']:
item[field] = np.frombuffer(base64.decodestring(item[field]),
dtype=np.float32).reshape((item['num_boxes'],-1))
in_data[item['image_id']] = item
break
print in_data
| 27.578947
| 85
| 0.612595
|
c535070baa0a55557480ac9f630260f31fe0cad1
| 11,262
|
py
|
Python
|
intersight/model/search_tag_item_list.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 5
|
2021-12-16T15:13:32.000Z
|
2022-03-29T16:09:54.000Z
|
intersight/model/search_tag_item_list.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 4
|
2022-01-25T19:05:51.000Z
|
2022-03-29T20:18:37.000Z
|
intersight/model/search_tag_item_list.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 2
|
2020-07-07T15:01:08.000Z
|
2022-01-31T04:27:35.000Z
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.mo_base_response import MoBaseResponse
from intersight.model.search_tag_item import SearchTagItem
from intersight.model.search_tag_item_list_all_of import SearchTagItemListAllOf
globals()['MoBaseResponse'] = MoBaseResponse
globals()['SearchTagItem'] = SearchTagItem
globals()['SearchTagItemListAllOf'] = SearchTagItemListAllOf
class SearchTagItemList(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'object_type': (str,), # noqa: E501
'count': (int,), # noqa: E501
'results': ([SearchTagItem], none_type,), # noqa: E501
}
@cached_property
def discriminator():
val = {
}
if not val:
return None
return {'object_type': val}
attribute_map = {
'object_type': 'ObjectType', # noqa: E501
'count': 'Count', # noqa: E501
'results': 'Results', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, object_type, *args, **kwargs): # noqa: E501
"""SearchTagItemList - a model defined in OpenAPI
Args:
object_type (str): A discriminator value to disambiguate the schema of a HTTP GET response body.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
count (int): The total number of 'search.TagItem' resources matching the request, accross all pages. The 'Count' attribute is included when the HTTP GET request includes the '$inlinecount' parameter.. [optional] # noqa: E501
results ([SearchTagItem], none_type): The array of 'search.TagItem' resources matching the request.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
MoBaseResponse,
SearchTagItemListAllOf,
],
'oneOf': [
],
}
| 47.121339
| 1,678
| 0.635145
|
b42e24f358d688db7682fe6948643b220f0f2dbd
| 3,467
|
py
|
Python
|
Python/sir_example_main.py
|
Wasim5620/SIRmodel
|
bbca1431673dc5450f290db1235eb73e92e74979
|
[
"MIT"
] | 26
|
2018-08-08T20:40:21.000Z
|
2022-01-13T19:46:40.000Z
|
Python/sir_example_main.py
|
Wasim5620/SIRmodel
|
bbca1431673dc5450f290db1235eb73e92e74979
|
[
"MIT"
] | 24
|
2020-03-25T19:35:43.000Z
|
2022-02-10T11:46:50.000Z
|
Python/sir_example_main.py
|
Wasim5620/SIRmodel
|
bbca1431673dc5450f290db1235eb73e92e74979
|
[
"MIT"
] | 9
|
2017-07-22T04:23:15.000Z
|
2021-03-19T09:42:35.000Z
|
# SIR model example for python 2.7
# Marisa Eisenberg (marisae@umich.edu)
# Yu-Han Kao (kaoyh@umich.edu) -7-9-17
#### Import all the packages ####
import scipy.optimize as optimize
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
import sir_ode
import sir_cost
import minifim
import proflike
from scipy.integrate import odeint as ode
#### Load Data ####
times = [0, 7, 14, 21, 28, 35, 42, 49, 56, 63, 70, 77, 84, 91, 98]
data = [97, 271, 860, 1995, 4419, 6549, 6321, 4763, 2571, 1385, 615, 302, 159, 72, 34]
#shortened version for seeing how truncated data affects the estimation
#times = times[0:7]
#data = data[0:7]
#### Set initial parameter values and initial states ####
params = [0.4, 0.25, 80000.0]#make sure all the params and inition states are float
paramnames = ['beta', 'gamma', 'k']
ini = sir_ode.x0fcn(params,data)
print ini
#### Simulate and plot the model ####
res = ode(sir_ode.model, ini, times, args=(params,))
print res
sim_measure = sir_ode.yfcn(res, params)
print sim_measure
plt.plot(times, sim_measure, 'b-', linewidth=3, label='Model simulation')
plt.plot(times, data, 'k-o', linewidth=2, label='Data')
plt.xlabel('Time')
plt.ylabel('Individuals')
plt.legend()
plt.show()
#### Parameter estimation ####
optimizer = optimize.minimize(sir_cost.NLL, params, args=(data, times), method='Nelder-Mead')
paramests = np.abs(optimizer.x)
iniests = sir_ode.x0fcn(paramests, data)
#### Re-simulate and plot the model with the final parameter estimates ####
xest = ode(sir_ode.model, iniests, times, args=(paramests,))
est_measure = sir_ode.yfcn(xest, paramests)
plt.plot(times, est_measure, 'b-', linewidth=3, label='Model simulation')
plt.plot(times, data, 'k-o', linewidth=2, label='Data')
plt.xlabel('Time')
plt.ylabel('Individuals')
plt.legend()
plt.show()
#### Calculate the simplified Fisher Information Matrix (FIM) ####
FIM = minifim.minifisher(times, params, data, delta = 0.001)
print np.linalg.matrix_rank(FIM) #calculate rank of FIM
print FIM
#### Generate profile likelihoods and confidence bounds ####
threshold = stats.chi2.ppf(0.95,len(paramests))/2.0 + optimizer.fun
perrange = 0.25 #percent range for profile to run across
profiles={}
for i in range(len(paramests)):
profiles[paramnames[i]] = proflike.proflike(paramests, i, sir_cost.NLL, times, data, perrange=perrange)
plt.figure()
plt.scatter(paramests[i], optimizer.fun, marker='*',label='True value', color='k',s=150, facecolors='w', edgecolors='k')
plt.plot(profiles[paramnames[i]]['profparam'], profiles[paramnames[i]]['fcnvals'], 'k-', linewidth=2, label='Profile likelihood')
plt.axhline(y=threshold, ls='--',linewidth=1.0, label='Threshold', color='k')
plt.xlabel(paramnames[i])
plt.ylabel('Negative log likelihood')
plt.legend(scatterpoints = 1)
paramnames_fit = [ n for n in paramnames if n not in [paramnames[i]]]
paramests_fit = [v for v in paramests if v not in [paramests[i]]]
print paramnames_fit
print paramests_fit
#plot parameter relationships
#for j in range(profiles[paramnames[i]]['fitparam'].shape[1]):
# plt.figure()
# plt.plot(profiles[paramnames[i]]['profparam'],profiles[paramnames[i]]['fitparam'][:,j],'k-', linewidth=2, label=paramnames_fit[j])
# plt.scatter(paramests[i], paramests_fit[j], marker='*',label='True value', color='k',s=150, facecolors='w', edgecolors='k')
# plt.xlabel(paramnames[i])
# plt.ylabel(paramnames_fit[j])
# plt.legend(scatterpoints = 1)
print profiles
plt.show()
| 36.114583
| 133
| 0.715027
|
84575c6d1af97c6e2bf0b21489a7343ce851c1fb
| 3,800
|
py
|
Python
|
scripts/brute_force.py
|
zhongguotu/ecs289c-precimonious
|
2c33f99eef616de9ffa5edc4c6e37ff09760ce6b
|
[
"BSD-3-Clause"
] | 22
|
2015-08-13T12:57:50.000Z
|
2021-12-29T13:02:07.000Z
|
scripts/brute_force.py
|
huiguoo/precimonious
|
5bc4c7fb522859cddb58743c9a14fab69b3ed8f6
|
[
"BSD-3-Clause"
] | 2
|
2018-05-19T15:09:24.000Z
|
2021-11-23T00:54:21.000Z
|
scripts/brute_force.py
|
huiguoo/precimonious
|
5bc4c7fb522859cddb58743c9a14fab69b3ed8f6
|
[
"BSD-3-Clause"
] | 13
|
2015-10-16T11:06:37.000Z
|
2022-03-28T07:54:26.000Z
|
#!/usr/bin/env python
import json
import os
import types
import sys
import transform
import utilities
def print_config(config, configFile):
f = open(configFile, 'w+')
f.write("{\n")
changeList = config["config"]
for change in changeList:
f.write("\t\"" + change.keys()[0] + "\": {\n")
changeValue = change.values()[0]
for valueInfo in changeValue.keys():
f.write("\t\t\"" + valueInfo + "\": \"" + changeValue[valueInfo] + "\",\n")
f.write("\t},\n")
f.write("}\n")
def print_diff(changeConfig, originalConfig, diffFile):
f = open(diffFile, 'w+')
originalList = originalConfig["config"]
changeList = changeConfig["config"]
count = 0
while count < len(originalList):
change = changeList[count]
origin = originalList[count]
newType = change.values()[0]["type"]
originType = origin.values()[0]["type"]
if newType != originType:
changeType = change.keys()[0]
if changeType == "localVar":
f.write("localVar: " + change.values()[0]["name"] + " " + originType + " -> " + newType)
elif changeType == "op":
f.write("op: " + change.values()[0]["id"] + originType + " -> " + newType)
count += 1
def run_config(config, search, bitcodefile, originalConfig, limit):
print_config(config, "config_temp.json")
result = transform.transform(bitcodefile, "config_temp.json")
if result == 1:
print "check VALID_config_" + str(search) + ".json for a valid config file"
print_config(config, "VALID_config_" + bitcodefile + "_" + str(search) + ".json")
print_diff(config, originalConfig, "diff_" + str(search) + ".cov")
utilities.log_config(config, "VALID", "log.bf", search)
elif result == 0:
print "\tINVALID CONFIG"
print_config(config, "INVALID_config_" + bitcodefile + "_" + str(search) + ".json")
utilities.log_config(config, "INVALID", "log.bf", search)
elif result == -1:
print "\tFAIL TYPE 1"
print_config(config, "FAIL1_config_" + bitcodefile + "_" + str(search) + ".json")
utilities.log_config(config, "FAIL1", "log.bf", search)
elif result == -2:
print "\tFAIL TYPE 2"
print_config(config, "FAIL2_config_" + bitcodefile + "_" + str(search) + ".json")
utilities.log_config(config, "FAIL2", "log.bf", search)
elif result == -3:
print "\tFAIL TYPE 3"
print_config(config, "FAIL3_config_" + bitcodefile + "_" + str(search) + ".json")
utilities.log_config(config, "FAIL3", "log.bf", search)
search += 1
if search > limit and limit != -1:
sys.exit(0)
return search
def search_config(changeList, changeTypeList, config, search, bitcodefile, originalConfig, limit):
if len(changeList) == 1:
for changeType in changeTypeList[0]:
changeList[0]["type"] = changeType
search = run_config(config, search, bitcodefile, originalConfig, limit)
else:
change = changeList.pop()
changeTypes = changeTypeList.pop()
for changeType in changeTypes:
change["type"] = changeType
search = search_config(changeList, changeTypeList, config, search, bitcodefile, originalConfig, limit)
changeList.append(change)
changeTypeList.append(changeTypes)
return search
def main():
bitcodefile = sys.argv[1]
configSearch = sys.argv[2]
original = sys.argv[3]
limit = -1
if len(sys.argv) >= 5:
limit = int(sys.argv[4])
# remove config file
try:
os.remove("log.bf")
except OSError:
pass
config = json.loads(open(configSearch, 'r').read())
originalConfig = json.loads(open(original, 'r').read())
allChange = config["config"]
allChangeList = []
allTypeList = []
for change in allChange:
typeList = change.values()[0]["type"]
if isinstance(typeList, list):
allTypeList.append(typeList)
allChangeList.append(change.values()[0])
print "Searching for valid config ..."
search_config(allChangeList, allTypeList, config, 0, bitcodefile, originalConfig, limit)
if __name__ == "__main__":
main()
| 31.932773
| 105
| 0.682105
|
77fc4fe50b58ff1166eac74ef7167357d00deb48
| 203
|
py
|
Python
|
python/hail/typecheck2/__init__.py
|
maccum/hail
|
e9e8a40bb4f0c2337e5088c26186a4da4948bed2
|
[
"MIT"
] | 1
|
2020-03-09T21:25:00.000Z
|
2020-03-09T21:25:00.000Z
|
python/hail/typecheck2/__init__.py
|
maccum/hail
|
e9e8a40bb4f0c2337e5088c26186a4da4948bed2
|
[
"MIT"
] | null | null | null |
python/hail/typecheck2/__init__.py
|
maccum/hail
|
e9e8a40bb4f0c2337e5088c26186a4da4948bed2
|
[
"MIT"
] | null | null | null |
from .check import typecheck, register_conversion
__all__ = ['typecheck',
'register_conversion']
__doc__ = """
Heavily inspired by typeguard:
https://github.com/agronholm/typeguard/
"""
| 22.555556
| 49
| 0.704433
|
feb65d3c32f29b8eb5ba9868ac4744efa769cc8b
| 854
|
py
|
Python
|
paramak/parametric_shapes/extruded_straight_shape.py
|
moatazharb/paramak
|
785c3ed7304e22eac7d58bb1bdc6515fbd20b9a8
|
[
"MIT"
] | 1
|
2021-12-14T15:53:46.000Z
|
2021-12-14T15:53:46.000Z
|
paramak/parametric_shapes/extruded_straight_shape.py
|
bam241/paramak
|
785c3ed7304e22eac7d58bb1bdc6515fbd20b9a8
|
[
"MIT"
] | null | null | null |
paramak/parametric_shapes/extruded_straight_shape.py
|
bam241/paramak
|
785c3ed7304e22eac7d58bb1bdc6515fbd20b9a8
|
[
"MIT"
] | null | null | null |
from typing import Optional
from paramak import ExtrudeMixedShape
class ExtrudeStraightShape(ExtrudeMixedShape):
"""Extrudes a 3d CadQuery solid from points connected with straight lines.
Args:
distance: the extrusion distance to use (cm units if used for
neutronics)
stp_filename: Defaults to "ExtrudeStraightShape.stp".
stl_filename: Defaults to "ExtrudeStraightShape.stl".
"""
def __init__(
self,
distance: float,
stp_filename: Optional[str] = "ExtrudeStraightShape.stp",
stl_filename: Optional[str] = "ExtrudeStraightShape.stl",
**kwargs
):
super().__init__(
distance=distance,
stp_filename=stp_filename,
stl_filename=stl_filename,
connection_type="straight",
**kwargs
)
| 26.6875
| 78
| 0.638173
|
8cd7b7ec3fa6309048ff8a0ce11f1fa040e63778
| 68,062
|
py
|
Python
|
ims_legacy.py
|
katrinleinweber/miniBibServer
|
9750e1e0773c7d6e57c0f71c096967879dfa631d
|
[
"MIT"
] | null | null | null |
ims_legacy.py
|
katrinleinweber/miniBibServer
|
9750e1e0773c7d6e57c0f71c096967879dfa631d
|
[
"MIT"
] | null | null | null |
ims_legacy.py
|
katrinleinweber/miniBibServer
|
9750e1e0773c7d6e57c0f71c096967879dfa631d
|
[
"MIT"
] | 1
|
2018-11-03T08:06:43.000Z
|
2018-11-03T08:06:43.000Z
|
import json
import glob
import time
import datetime
time_stamp = unicode( datetime.datetime.now().isoformat().split('.')[0], 'utf-8')
import random
import text_json
from pprint import pprint
## set some global variables for convenience of use
global_vars = {}
global_vars['published_files_directory'] = './published_files/'
# Save these here so we don't have to pass them around
global_vars['link_ls'] = [{'anchor': u'Faculty Research Lecture Committee, University of California, Berkeley',
'category_ls': [u'committee'],
'href': u'http://academic-senate.berkeley.edu/committees/frl'},
{'anchor': u'Faculty Research Lecturer, University of California, Berkeley',
'category_ls': [u'lecture'],
'href': u'http://www.urel.berkeley.edu/faculty/history.html'},
{'anchor': u'The Royal Netherlands Academy of Arts and Sciences',
'category_ls': [u'academy'],
'href': u'http://en.wikipedia.org/wiki/Royal_Netherlands_Academy_of_Arts_and_Sciences'},
{'anchor': u'American Association for Advancement of Science',
'category_ls': [u'society'],
'href': u'http://www.aaas.org/'},
{'anchor': u'President, Institute of Mathematical Statistics',
'category_ls': [u'president_list'],
'href': u'http://imstat.org/officials/past_officials.html'},
{'anchor': u'Commander of the Order of the British Empire',
'category_ls': [],
'href': u'http://en.wikipedia.org/wiki/Order_of_the_British_Empire'},
{'anchor': u'Royal Danish Academy of Sciences and Letters',
'category_ls': [u'academy'],
'href': u'http://en.wikipedia.org/wiki/Royal_Danish_Academy_of_Sciences_and_Letters'},
{'anchor': u'Academy of the Social Sciences in Australia',
'category_ls': [u'academy'],
'href': u'http://en.wikipedia.org/wiki/Academy_of_the_Social_Sciences_in_Australia'},
{'anchor': u'American Mathematical Society, Fellows List',
'category_ls': [u'fellow_ls'],
'href': u'http://www.ams.org/profession/fellows-list'},
{'anchor': u'National Medal of Technology and Innovation',
'category_ls': [],
'href': u'http://en.wikipedia.org/wiki/National_Medal_of_Technology_and_Innovation'},
{'anchor': u'Norbert Wiener Prize in Applied Mathematics',
'category_ls': [u'prize'],
'href': u'http://en.wikipedia.org/wiki/Norbert_Wiener_Prize_in_Applied_Mathematics'},
{'anchor': u'Israel Academy of Sciences and Humanities',
'category_ls': [u'academy'],
'href': u'http://en.wikipedia.org/wiki/Israel_Academy_of_Sciences_and_Humanities'},
{'anchor': u'Norwegian Academy of Science and Letters',
'category_ls': [u'academy'],
'href': u'https://en.wikipedia.org/wiki/Norwegian_Academy_of_Science_and_Letters'},
{'anchor': u'The Institute of Statistical Mathematics',
'category_ls': [],
'href': u'http://www.ism.ac.jp/index_e.html'},
{'anchor': u'American Academy of Arts and Sciences',
'category_ls': [u'academy'],
'href': u'http://en.wikipedia.org/wiki/American_Academy_of_Arts_and_Sciences'},
{'anchor': u'American Mathematical Society, Fellow',
'category_ls': [u'fellow'],
'href': u'http://www.ams.org/profession/ams-fellows/ams-fellows'},
{'anchor': u'Institute of Mathematical Statistics',
'category_ls': [u'society'],
'href': u'http://imstat.org/'},
{'anchor': u'Officer of the Order of Australia',
'category_ls': [u'honor'],
'href': u'http://en.wikipedia.org/wiki/Order_of_Australia'},
{'anchor': u'Royal Spanish Academy of Sciences',
'category_ls': [u'academy'],
'href': u'http://en.wikipedia.org/wiki/Spanish_Royal_Academy_of_Sciences'},
{'anchor': u'Nobel Prize in Economic Sciences',
'category_ls': [u'prize'],
'href': u'http://www.nobelprize.org/nobel_prizes/economics/laureates/'},
{'anchor': u'Indian National Science Academy',
'category_ls': [u'academy'],
'href': u'http://en.wikipedia.org/wiki/Indian_National_Science_Academy'},
{'anchor': u'National Academy of Engineering',
'category_ls': [u'academy'],
'href': u'http://en.wikipedia.org/wiki/National_Academy_of_Engineering'},
{'anchor': u'The Royal Society of Literature',
'category_ls': [u'society'],
'href': u'https://en.wikipedia.org/wiki/Royal_Society_of_Literature'},
{'anchor': u'Third World Academy of Sciences',
'category_ls': [u'academy'],
'href': u'http://en.wikipedia.org/wiki/TWAS'},
{'anchor': u'Accademia Nazionale dei Lincei',
'category_ls': [u'academy'],
'href': u'http://en.wikipedia.org/wiki/Accademia_dei_Lincei'},
{'anchor': u'American Philosophical Society',
'category_ls': [u'society'],
'href': u'http://en.wikipedia.org/wiki/American_Philosophical_Society'},
{'anchor': u'Officer of the Order of Canada',
'category_ls': [u'honor'],
'href': u'https://en.wikipedia.org/wiki/Order_of_Canada'},
{'anchor': u'The Royal Society of Edinburgh',
'category_ls': [u'society'],
'href': u'http://en.wikipedia.org/wiki/Royal_Society_of_Edinburgh'},
{'anchor': u'American Mathematical Society',
'category_ls': [u'society'],
'href': u'http://www.ams.org/'},
{'anchor': u'Australian Academy of Science',
'category_ls': [u'academy'],
'href': u'http://en.wikipedia.org/wiki/Australian_Academy_of_Science'},
{'anchor': u'John von Neumann Theory Prize',
'category_ls': [u'prize'],
'href': u'http://en.wikipedia.org/wiki/John_von_Neumann_Theory_Prize'},
{'anchor': u'Presidential Medal of Freedom',
'category_ls': [u'award'],
'href': u'http://en.wikipedia.org/wiki/Presidential_Medal_of_Freedom'},
{'anchor': u'National Academy of Sciences',
'category_ls': [u'academy'],
'href': u'http://en.wikipedia.org/wiki/National_Academy_of_Sciences'},
{'anchor': u'The George B. Dantzig Prize',
'category_ls': [u'prize'],
'href': u'http://en.wikipedia.org/wiki/George_B._Dantzig_Prize#George_B._Dantzig_Prize'},
{'anchor': u'The Royal Society of Canada',
'category_ls': [u'society'],
'href': u'http://en.wikipedia.org/wiki/Royal_Society_of_Canada'},
{'anchor': u'Académie des sciences',
'category_ls': [u'academy'],
'href': u'http://en.wikipedia.org/wiki/French_Academy_of_Sciences'},
{'anchor': u'Indian Academy of Sciences',
'category_ls': [u'academy'],
'href': u'http://en.wikipedia.org/wiki/Indian_Academy_of_Sciences'},
{'anchor': u'John von Neumann Lecturer',
'category_ls': [u'lecture'],
'href': u'http://www.siam.org/prizes/sponsored/vonneumann.php'},
{'anchor': u'National Medal of Science',
'category_ls': [u'award'],
'href': u'http://en.wikipedia.org/wiki/National_Medal_of_Science'},
{'anchor': u'Royal Statistical Society',
'category_ls': [u'society'],
'href': u'https://www.rss.org.uk/'},
{'anchor': u'Elizabeth L. Scott Award',
'category_ls': [u'award'],
'href': u'http://nisla05.niss.org/copss/PastAwardsScott.pdf'},
{'anchor': u'George W. Snedecor Award',
'category_ls': [u'award'],
'href': u'http://nisla05.niss.org/copss/PastAwardsSnedecor.pdf'},
{'anchor': u"COPSS Presidents' Award",
'category_ls': [u'award'],
'href': u'http://en.wikipedia.org/wiki/COPSS_Presidents%27_Award'},
{'anchor': u'Académie des sciences',
'category_ls': [u'academy'],
'href': u'http://en.wikipedia.org/wiki/French_Academy_of_Sciences'},
{'anchor': u'Harry C. Carver Medal',
'category_ls': [u'award'],
'href': u'http://imstat.org/awards/awards_IMS_recipients.htm'},
{'anchor': u'Institute of Medicine',
'category_ls': [u'society'],
'href': u'https://en.wikipedia.org/wiki/Institute_of_Medicine'},
{'anchor': u'R. A. Fisher Lecturer',
'category_ls': [u'award'],
'href': u'http://nisla05.niss.org/copss/PastAwardsFisher.pdf'},
{'anchor': u'Samuel S. Wilks Award',
'category_ls': [u'award'],
'href': u'http://en.wikipedia.org/wiki/Wilks_Memorial_Award'},
{'anchor': u'Wolf Prize in Physics',
'category_ls': [u'prize'],
'href': u'http://en.wikipedia.org/wiki/Wolf_Prize_in_Physics'},
{'anchor': u'Rollo Davidson Prize',
'category_ls': [u'prize'],
'href': u'http://en.wikipedia.org/wiki/Rollo_Davidson_Prize'},
{'anchor': u'Guy Medal in Bronze',
'category_ls': [u'award'],
'href': u'http://en.wikipedia.org/wiki/Guy_Medal#Bronze_Medallists'},
{'anchor': u'Guy Medal in Silver',
'category_ls': [u'award'],
'href': u'http://en.wikipedia.org/wiki/Guy_Medal#Silver_Medallists'},
{'anchor': u'IMS Lecture Awards',
'category_ls': [u'award'],
'href': u'http://www.imstat.org/awards/lectures.htm'},
{'anchor': u'F. N. David Award',
'category_ls': [u'award'],
'href': u'http://nisla05.niss.org/copss/PastAwardsFNDavid.pdf'},
{'anchor': u'Guy Medal in Gold',
'category_ls': [u'award'],
'href': u'http://en.wikipedia.org/wiki/Guy_Medal#Gold_Medallists'},
{'anchor': u'The Royal Society',
'category_ls': [u'society'],
'href': u'http://en.wikipedia.org/wiki/Royal_Society'},
{'anchor': u'Loève Prize',
'category_ls': [u'prize'],
'href': u'http://en.wikipedia.org/wiki/Lo%C3%A8ve_Prize'},
{'anchor': u'MacArthur Fellow',
'category_ls': [u'fellow'],
'href': u'http://en.wikipedia.org/wiki/MacArthur_Fellows_Program'},
{'anchor': u'Medal of Freedom',
'category_ls': [u'award'],
'href': u'http://en.wikipedia.org/wiki/Medal_of_Freedom'},
{'anchor': u'British Academy',
'category_ls': [u'academy'],
'href': u'http://en.wikipedia.org/wiki/British_Academy'},
{'anchor': u'Chauvenet Prize',
'category_ls': [u'prize'],
'href': u'http://en.wikipedia.org/wiki/Chauvenet_Prize'},
{'anchor': u'Medal for Merit',
'category_ls': [u'award'],
'href': u'http://en.wikipedia.org/wiki/Medal_for_Merit'},
{'anchor': u'Neyman Lecturer',
'category_ls': [u'lecture'],
'href': u'http://imstat.org/awards/lectures_winners.htm'},
{'anchor': u'Birkhoff Prize',
'category_ls': [u'prize'],
'href': u'http://en.wikipedia.org/wiki/George_David_Birkhoff_Prize'},
{'anchor': u'Rietz Lecturer',
'category_ls': [u'lecture'],
'href': u'http://www.imstat.org/awards/lectures_winners.htm#RietzLectures'},
{'anchor': u'IMS President',
'category_ls': [],
'href': u'http://en.wikipedia.org/wiki/List_of_presidents_of_the_Institute_of_Mathematical_Statistics'},
{'anchor': u'Wald Lecturer',
'category_ls': [u'lecture'],
'href': u'http://www.imstat.org/awards/lectures_winners.htm'},
{'anchor': u'Copley Medal',
'category_ls': [u'award'],
'href': u'http://en.wikipedia.org/wiki/Copley_Medal'},
{'anchor': u'Israel Prize',
'category_ls': [u'prize'],
'href': u'http://en.wikipedia.org/wiki/Israel_Prize'},
{'anchor': u'Steele Prize',
'category_ls': [u'prize'],
'href': u'http://en.wikipedia.org/wiki/Leroy_P._Steele_Prize'},
{'anchor': u'Kyoto Prize',
'category_ls': [u'prize'],
'href': u'http://en.wikipedia.org/wiki/Kyoto_Prize'},
{'anchor': u'Abel Prize',
'category_ls': [u'prize'],
'href': u'http://en.wikipedia.org/wiki/Abel_Prize'},
{'anchor': u'Doob Prize',
'category_ls': [u'prize'],
'href': u'http://www.ams.org/profession/prizes-awards/ams-prizes/doob-prize'},
{'anchor': u'IMS Fellow',
'category_ls': [u'fellow'],
'href': u'http://www.imstat.org/awards/honored_fellows.htm'},
{'anchor': u'Knighted',
'category_ls': [],
'href': u'http://en.wikipedia.org/wiki/Orders,_decorations,_and_medals_of_the_United_Kingdom'}]
### tool for breaking long lines nicely
txt = ''' :citation For developing new statistical methods for diffusion and other stochastic processes, for identifying and opening new fields of applications of statistics including nonparametric inference and testing to the analysis and pricing of financial instruments, and for his conscientious professional service'''
def break_lines(txt,nchars):
words = txt.split()
broken_lines = []
thisline = ''
while 1:
thisline += words[0] + ' '
if len(thisline) > nchars:
thisline = ' '.join(thisline.split()[0:-1])
broken_lines += [ thisline ]
thisline = words[0] + ' '
else:
pass
words = words[1:]
if not words:
broken_lines += [ thisline ]
break
return broken_lines
def indent(txt):
words = txt.split()
if len(words) > 0:
word = words[0]
pre,post = txt.split(word,1)
return pre
else:
return ''
def normalize_indent(txt):
if not indent(txt): return txt
else:
words = txt.split()
return '\t' + ' '.join(txt.split())
def break_line_txt(txt,nchars):
lines = break_lines(txt,nchars)
if len(lines) == 0: return ''
else:
lines = [ indent(txt) + lines[0]] + [ '\t' + line for line in lines[1:]]
lines = [ normalize_indent(line) for line in lines]
return '\n'.join(lines)
def break_txt(txt, nchars):
p = ''
for line in txt.split('\n'):
if len(line) > nchars:
if line.strip()[0] == ':':
p += '\n' + break_line_txt(line,nchars)
else: p += '\n' + normalize_indent(line)
else: p += '\n' + normalize_indent(line)
p += '\n'
return p
### tools for handling years
digits = '0123456789'
def get_year2(line,digs):
y12 = ''
y34 = ''
rest = ''
yinsts = line.split(digs)
if len(yinsts) > 0:
for yinst in yinsts[1:]:
try:
y34 = yinst.split()[0]
for x in (',.<)'): y34 = y34.split(x)[0]
if len(y34) == 2 and y34[0] in digits and y34[1] in digits:
return digs + y34
else: pass
except: pass
return ''
def get_years2(line,digs):
""" returns a list of 4 digit strings that are credibly years beginning with 2 digits digs """
y12 = ''
y34 = ''
rest = ''
years = []
yinsts = line.split(digs)
if len(yinsts) > 0:
for yinst in yinsts[1:]:
try:
y34 = yinst.split()[0]
for x in (',.<)'): y34 = y34.split(x)[0]
if len(y34) == 2 and y34[0] in digits and y34[1] in digits:
years += [digs + y34]
except: pass
return years
def get_year_from_str(line):
y = get_year2(line,'19')
if not y: y = get_year2(line,'20')
try:
if int(y) > 2014: y = str(2014)
except: y = ''
return y
def get_years_from_str(line):
return get_years2(line,'19') + get_years2(line,'20')
def get_year_from_record(r):
""" make best effort to extract a year string from a record, returns '' if none found """
y = r.get('year','')
if not y:
y = get_year_from_str( r.get('howpublished','') )
if not y: ## this works for arxiv records #arxiv:published": "2005-03-03T08:34:51Z"
y = r.get('arxiv:published','').split('-')[0]
return y
def get_year(x):
if type(x) in [type(u'txt'),type('txt')]:
return get_year_from_str(x)
elif type(x) == type({}):
return get_year_from_record(x)
else: return ''
def year_examples():
line = 'Electron. Trans. Numer. Anal. (ETNA) 36 (2009), 27-38. ps '
line2 = 'Electron. Trans. Numer. Anal. (ETNA) 36 (2009), 27-38. ps  1984 1963)'
line = '(<y>2006</y>). <rel>In </rel><inc>booktitle:encyclopedia-of-statistical-sciences</inc>'
print get_year(line)
#print get_years_from_str(line2)
def year_cmp(a,b):
try: return cmp( a.get('year','3000'),b.get('year','3000'))
except:
print 'year_cmp failed for '
print a
print b
return 0
month_ls = 'January February March April May June July August September October November December'.split()
month = {}
for i in range(12): month[ str( i + 1) ] = month_ls[i]
for i in range(12): month[ '0' + str( i + 1) ] = month_ls[i]
def parse_date(dt):
d = {}
ss = dt.split('-')
if len(ss) == 0:
pass
if len(ss) >= 1:
d['year'] = ss[0].strip()
if len(ss) >= 2:
d['month'] = month[ss[1]].strip()
if len(ss) >= 3:
day = ss[2].strip()
if day and day[0] == '0':
day = day[1]
d['day'] = day.strip()
return d
def show_date(dt):
d = parse_date(dt)
if d.has_key('day'): return d['day'] + ' ' + d['month'] + ' ' + d['year']
elif d.has_key('month'): return d['month'] + ' ' + d['year']
elif d.has_key('year'): return d['year']
else: return ''
bio_cat_plurals = '''
Biography Biographies
Symposium Symposia
Archive Archives
Art_Exhibition Art_Exhibitions
Collected_Works Collected_Works
Selected_Works Selected_Works
DVD DVDs
Endowment Endowments
Festschrift Festschriften
Memoir Memoirs
Oral_History Oral_History
Death_Notice Death_Notices
Obituary Obituaries
In_Memoriam Memorials
'''
bio_cat_plural = {}
bio_cat_order = []
lines = bio_cat_plurals.split('\n')
for line in lines:
try:
k,v = line.split()
bio_cat_plural[k] = v
bio_cat_order += [k]
except: pass
def make_tag_map():
tag_map_lines = '''
<author> <span class="bib-author">
<cit> <span class="bib-citation">
<au> <span class="bib-author">
<ed> <span class="bib-editor">
<year> <span class="bib-year">
<rel> <span class="bib-relation">
<y> <span class="bib-year">
<journal> <span class="bib-journal">
<j> <span class="bib-journal">
<title> <span class="bib-title">
<t> <span class="bib-title">
<bt> <span class="bib-title">
<booktitle> <span class="bib-booktitle">
<webcollection> <span class="bib-booktitle">
<v> <span class="bib-jvolume">
<vol> <span class="bib-jvolume">
<volume> <span class="bib-jvolume">
<howpublished> <span class="bib-howpublished">
<howpub> <span class="bib-howpublished">
<pages> <span class="bib-pages">
<pp> <span class="bib-pages">
<series> <span class="bib-series">
<publisher> <span class="bib-publisher">
<address> <span class="bib-address">
'''
tag_map = {}
lines = tag_map_lines.split('\n')
for line in lines:
try:
k,v = line.strip().split('>',1)
k = k.strip()
v = v.strip()
tag_map[k+ '>'] = v
tag_map['<' + short_tag[k[1:]] + '>'] = v
except:
pass
for tag in tag_map.keys():
tag_map[tag.replace('<','</')] = '</span>'
return tag_map
tag_map = make_tag_map()
def make_link_ls():
this_dir = './'
link_ls = []
infile = open(this_dir + 'organizations.txt')
inlines = infile.readlines()
for line in inlines:
line = unicode(line,'utf-8')
try:
k,v = line.split(None,1)
if k[0:4] == 'http':
link = {}
link['href'] = k
try: v = json.loads('"' + v + '"')
except: pass
link['anchor'] = ' '.join(v.split())
link_ls += [link]
else: pass # print line
except: pass # print line
return link_ls
def make_link_dict():
d = {}
link_ls = make_link_ls()
for link in link_ls:
d[ link['anchor'] ] = link['href']
return d
link_dict = make_link_dict()
def randtag():
return str(random.randint(100,1000000) )
def lines2link_ls(lines):
link_ls = []
for line in lines.split('\n'):
line = line.strip()
if not line[0:4] == 'http': pass ## print line
else:
words = line.split()
link = {}
link['href'] = words[0]
anchor = ' '.join(words[1:])
if anchor: rel = words[1]
else: rel = ''
if rel in 'homepage pubspage bookspage profilepage'.split():
link['rel'] = rel
link['anchor'] = ' '.join(words[2:])
elif anchor:
link['anchor'] = anchor
else: link['anchor'] = ''
link_ls += [link]
return link_ls
def item_cmp(x,y):
return - cmp( len(x['anchor']), len(y['anchor']) )
def links_txt2ls(links_txt):
inlines = links_txt.split('\n')
lines = []
dls = []
for line in inlines:
words = line.split()
if words:
d = {}
urls = [ w for w in words if w[0:4] == 'http']
if not len(urls) == 1:
print 'Error with ', str(len(urls)) , 'urls in: ' ,line
continue
text_words = [ w for w in words if not w[0:4] == 'http']
text_words = [ w for w in text_words if not w[0] == '[']
text_words = [ w for w in text_words if not w[-1] == ']']
key_words = [ w for w in words if w[0] == '[' and w[-1] == ']']
text = ' '.join(text_words)
d['anchor'] = text
d['href'] = urls[0]
d['category_ls'] = [w[1:-1] for w in key_words]
dls += [d]
dls.sort(item_cmp)
return dls
def item2link(d):
return '<a href="' + d['href'] + '">' + d['anchor'] + '</a>'
def item2txt(d):
return d['anchor'] + ' ' + ' '.join(d.get('category_ls',[])) + ' ' + d['href'] + '\n'
def add_links(txt, link_ls):
for ii in range( len(link_ls) ):
item = link_ls[ii]
txt = txt.replace( item['anchor'], 'XX' + str(ii) + 'XX' )
for ii in range( len(link_ls) ):
item = link_ls[ii]
txt = txt.replace('XX' + str(ii) + 'XX' , item2link(item))
return txt
def list_links(txt, link_ls):
out_txt = '::links\n'
for ii in range( len(link_ls) ):
item = link_ls[ii]
txt = txt.replace( item['anchor'], 'XX' + str(ii) + 'XX' )
for ii in range( len(link_ls) ):
item = link_ls[ii]
if txt.find('XX' + str(ii) + 'XX') >= 0:
out_txt += item2txt(item)
return out_txt
#links = links_txt2ls()
#for link in links:
# print json.dumps(link,indent=4)
#print add_links(txt, links)
#print list_links(txt, links)
def read_ims_legacy(f='ims_legacy'):
data = {}
dls = []
infile = open(f + '.txt')
instring = infile.read()
##instring = instring.replace(':description',' :citation') ## to be deprecated soon
instring = unicode(instring,'utf-8')
meta = {}
metatxt = instring.split('::')[1]
meta['record_txt'] = '::' + metatxt.strip() + '\n\n'
meta['bibtype'],rest= metatxt.split(None,1)
meta['id'],rest= rest.split(None,1)
rest = '\n' + rest.strip()
secs = rest.split('\n:')[1:]
for sec in secs:
k,v = sec.split(None,1)
meta[k] = v
data['metadata'] = meta
for x in instring.split('::person')[1:]:
x = x.split('\n::')[0]
d = {}
d['record_txt'] = '::person ' + x.strip() + '\n'
lines = d['record_txt'].split('\n')
lines = [line for line in lines if not line.find('Email') >= 0 ]
pubrecord = '\n'.join(lines) + '\n'
d['public_record_txt'] = break_txt(pubrecord,80)
secs = x.split('\n:')
toplines = secs[0].strip()
d['id'], toplines = toplines.split(None,1)
try: name,toplines = toplines.split('\n',1)
except:
name = toplines
toplines = ''
d['complete_name'] = name
#d['link_lines'] = toplines
d['link_ls'] = lines2link_ls(toplines)
for sec in secs[1:]:
words = sec.split()
key = words[0]
if len(words) > 1:
val = sec.split(None,1)[1] ## keep newlines
else: val = ''
if d.has_key(key): d[key] += [val]
else: d[key] = [val]
d['Honor'] = [ read_honor(x) for x in d.get('Honor',[]) ]
d['Degree'] = [ read_degree(x) for x in d.get('Degree',[]) ]
d['Education'] = [ read_education(x) for x in d.get('Education',[]) ]
d['Service'] = [ read_service(x) for x in d.get('Service',[]) ]
d['Position'] = [ read_position(x) for x in d.get('Position',[]) ]
d['Member'] = [ read_member(x) for x in d.get('Member',[]) ]
d['Image'] = [ read_image(x) for x in d.get('Image',[]) ]
d['Homepage'] = [ read_homepage(x) for x in d.get('Homepage',[]) ]
for k in bio_cat_order:
#d['Biography'] = [ read_bio(x) for x in d.get('Biography',[]) ]
d[k] = [ read_bio(x) for x in d.get(k,[]) ]
dls += [d]
links_txt = instring.split('::links',1)[1].split('\n::')[0]
data['link_ls'] = links_txt2ls(links_txt)
data['records'] = dls
books = instring.split('::book')[1:]
book_dict = {}
for b in books:
b = 'book' + b
d = read_book(b)
del d['top_line']
book_dict[ d['id'] ] = d
data['books'] = book_dict
links_txt = instring.split('::links',1)[1].split('\n::')[0]
data['link_ls'] = links_txt2ls(links_txt)
return data
def publish_ims_legacy(f='ims_legacy'):
""" create public version of data by deletion of all email addresses """
out_dir = global_vars['published_files_directory']
data = {}
dls = []
infile = open(f + '.txt')
instring = infile.read()
##instring = instring.replace(':description',' :citation') ## to be deprecated soon
instring = unicode(instring,'utf-8')
instring = instring.replace('$time_stamp$', time_stamp)
lines = instring.split('\n')
lines = [line for line in lines if not line.find('Email') >= 0 ]
txt = '\n'.join(lines) + '\n'
out_fname = out_dir + 'imslegacy_data'
#outfile = open('/accounts/projects/jpopen/apache/htdocs/tmp/imslegacy/imslegacy_data.txt','w')
outfile = open(out_fname + '.txt','w')
outfile.write(txt.encode('utf-8'))
outfile.close()
print ' wrote to ' + out_fname + '.txt'
##print ' published at http://bibserver.berkeley.edu/tmp/imslegacy/imslegacy_data.txt'
#data = read_ims_legacy('/accounts/projects/jpopen/apache/htdocs/tmp/imslegacy/imslegacy_data')
data = read_ims_legacy(out_fname)
#outfile = open('/accounts/projects/jpopen/apache/htdocs/tmp/imslegacy/imslegacy_data.json','w')
outfile = open(out_fname + '.json','w')
outfile.write(json.dumps(data,indent=4).encode('utf-8'))
outfile.close()
print ' wrote to ' + out_fname + '.json'
#print ' wrote to /accounts/projects/jpopen/apache/htdocs/tmp/imslegacy/imslegacy_data.json'
#print ' published at http://bibserver.berkeley.edu/tmp/imslegacy/imslegacy_data.json'
def write_ims_legacy(data,fname):
meta = data['metadata']
p = ''
p += '::' + meta['bibtype'] + ' ' + meta['id'] + '\n'
p += ':title ' + meta['title'] + '\n'
for k in meta.keys():
if not k in 'bibtype id title'.split():
if not k[-3:] == '_ls':
meta[k] = [ meta[k]]
for v in meta[k]:
p += ':' + k + ' ' + meta[k] + '\n'
p += '\n'
for d in data['records']:
p += person_record2txt(d)
outfile = open(fname + '.txt','w')
outfile.write(p.encode('utf-8'))
outfile.close()
def id2url(k,v):
if k == 'zb-author-id': h = 'http://zbmath.org/authors/?q=ai:' + v
elif k == 'mr-author-id': h = 'http://www.ams.org/mathscinet/search/author.html?mrauthid=' + v
elif k == 'mg-author-id': h = 'http://genealogy.math.ndsu.nodak.edu/id.php?id=' + v
elif k == 'mas-author-id': h = 'http://academic.research.microsoft.com/Author/' + v
else: h = k + ':' + v
return h
def ids_dict(d):
iii = {}
print d
for link in d['link_ls']:
try:
iii['zb-author-id'] = link['href'].split('http://zbmath.org/authors/?q=ai:',1)[1]
continue
except: pass
try:
iii['mr-author-id'] = link['href'].split('http://www.ams.org/mathscinet/search/author.html?mrauthid=',1)[1]
continue
except: pass
try:
iii['mg-author-id'] = link['href'].split('http://genealogy.math.ndsu.nodak.edu/id.php?id=',1)[1]
continue
except: pass
return iii
def make_xml_person(d):
iii = ids_dict(d)
d.update(iii)
infile = open('template.xml')
instring = infile.read()
xml = unicode(instring,'utf-8')
passd = {}
passd['id'] = d['id']
for k in 'display_name complete_name last_name DOB DOD mr-author-id zb-author-id mg-author-id'.split():
v = d.get(k,'')
if type(v) == type([]):
try: passd[k] = v[0]
except: passd[k] = ''
else: passd[k] = v
#for k in 'DOD DOB'.split():
# passd[k] = clean_date(passd[k])
name = passd.get('display_name', '').strip()
if not name:
name = passd.get('complete_name','')
if not name:
print 'missing name for '
print json.dumps(d,indent=4)
passd['last_first_name'] = name
try: passd['first_first_name'] = first_first(name)
except:
print 'Unable to make first_first for :' ,name
passd['last_name'] = name.split(',')[0]
passd['html'] = d['covertext_html']
for k in passd.keys():
xml = xml.replace('$' + k + '$',passd[k])
return xml
def make_xml(dls):
out_dir = global_vars['published_files_directory']
xml_wrapper = '''<?xml version="1.0" encoding="UTF-8"?>
<ims-authors-data>
<!-- Generated by BibServer System $time_stamp$ -->
$xml_data$
</ims-authors-data>
'''
xml = ''
for d in dls:
xml += make_xml_person(d)
xml_wrapper = xml_wrapper.replace('$time_stamp$',time_stamp)
xml_wrapper = xml_wrapper.replace('$xml_data$',xml)
#outfile = open('/accounts/projects/jpopen//apache/htdocs/tmp/celebratio_ims_legacy.xml','w')
#outfile = open('/accounts/projects/jpopen/apache/htdocs/tmp/imslegacy/celebratio.xml','w')
outfname = out_dir + 'celebratio.xml'
outfile = open(outfname, 'w')
outfile.write(xml_wrapper.encode('utf-8'))
outfile.close()
print 'wrote to ' + outfname
###print 'wrote to http://bibserver.berkeley.edu/tmp/imslegacy/celebratio.xml'
def display_function(display_style,r):
if display_style == 'json_display':
h = '<td><pre>' + json.dumps(r,indent=4) + '</pre></td>'
elif display_style == 'one_line':
h = '<td>' + r['complete_name'] + '</td><td>'
h += make_dates_html(r)
h += '</td>'
elif display_style == 'images':
h = '<td>' + r['complete_name'] + '</td><td>'
h += top_links_html(r)
h += make_images_html(r)
elif display_style == 'html':
h = '<td>'
d = add_html(r)
h += d['html']
h += '</td>'
else:
h = '<td> display style "' + display_style +'" not available for this source</td>'
return h
def display_data(passd):
meta = passd
q = passd['q'].strip()
try: pageNumber=int(passd['pageNumber'])
except: pageNumber =1
try: itemsPerPage=int(passd['itemsPerPage'])
except: itemsPerPage=20
data = read_ims_legacy()
records = data['records']
for w in q.split():
records = [ r for r in records if str(r).find(w) >= 0]
## should fold as much as possible of passd into meta
meta.update( data['metadata'] ) ## so passd gets overwritten. Hopefully no issues there
meta['pageNumber'] = pageNumber
totalResults = len(records)
meta['totalResults'] = totalResults
format_options_txt = '''
two_lines Two Lines
one_line One Line
images Images
json_display JSON Display
html HTML
'''
'''
json JSON Export
'''
## these are keys to the dictionary of display_functions returned by the display_function module
format_ls = []
for line in format_options_txt.split('\n'):
if line.strip():
k,v = line.split(None,1)
format_ls += [ (k,v)]
meta['formatOption_ls'] = format_ls
meta['itemsPerPageOption_ls'] = '1 2 5 10 20 50 100 200 500 1000 2000'.split()
if (pageNumber - 1) * itemsPerPage > meta['totalResults']:
pageNumber = 1
startIndex = (pageNumber - 1)* itemsPerPage
endIndex = startIndex + itemsPerPage
data['records'] = records[startIndex:endIndex] ## or get this set of records by a source-specific function
meta['pageNumber'] = pageNumber
meta['itemsPerPage'] = itemsPerPage
meta['totalPages'] = meta['totalResults']/ itemsPerPage
if meta['totalResults'] % itemsPerPage >0: meta['totalPages'] += 1
data['metadata'] = meta
return data
table_template = '''
<!-- begin segm content -->
<div>
<!-- text content -->
<div class="j">
<table>
$table_rows$
</table>
<!-- end text content -->
</div>
<!-- end segm content -->
</div>
<!-- end segm container -->
<!-- end segm container ===================== -->
'''
dl_template = '''
<!-- begin segm content -->
<div>
<!-- text content -->
<div class="j">
<dl class="cvlike">
$rows$
</dl>
<!-- end text content -->
</div>
<!-- end segm content -->
</div>
<!-- end segm container -->
<!-- end segm container ===================== -->
'''
#<dt>1930</dt>
#<dd>Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Donec id elit non mi porta gravida at eget metus.</dd>
#<dt>1930–1967</dt>
#<dd>Praesent commodo cursus magna, vel scelerisque nisl consectetur et. Vivamus sagittis lacus vel augue laoreet rutrum faucibus dolor auctor. Maecenas sed diam eget risus varius blandit sit amet non >
def make_honor_rows(honor):
d = honor
#print d
link_honor = add_links(d['text'],global_vars['link_ls'])
rows = '<dt>' + d['year'] + '</dt><dd>' + link_honor + '</dd>\n'
if d.get('citation',''):
rows += '<dt></dt><dd>"' + d['citation'] + '."</dd>\n'
return rows
def make_row(d):
text = d.get('text','')
#print text
text = add_links(text, global_vars['link_ls'])
return '<dt>' + d.get('year','').decode("utf-8") + '</dt><dd>' + text + '</dd>\n'
def make_images_html(d):
name = d['complete_name']
html = ''
if d.get('Image',[]):
link = d.get('Image',[])[0] ## use only the first one
href = link.get('href','')
src = link.get('src','')
if href and src:
html += '<a href="' + href + '"> <img class="profilephoto" src="' + src + '" height="100px" alt="Profile - ' + name + '"></a>'
return html
return ''
def make_biblio_rows(b):
return '<dt></dt><dd>' + link2html(b)+ '</dd>\n'
def make_bio_rows(b):
#pprint(b)
title = b.get('title','').strip()
if title == 'Untitled': title = ''
author = b.get('author','').strip()
if author == 'Anonymous': author = ''
howpub = b.get('howpublished','')
year = b.get('year','')
if year == 'year?': year = 'Undated'
ref = ''
if title:
if not title[-1] in '?.!': title += '.'
ref += '<t>' + title + '</t> '
if author: ref += '<au>' + author + '</au>. '
ref += howpub
book_link_ls = []
books = global_vars['books']
# Books: now a list not a dictionary
'''
for bkey in books.keys():
if ref.find(bkey) >= 0:
ref = ref.replace(bkey,books[bkey]['ref'])
book_link_ls = books[bkey].get('link_ls',[])
'''
for t in tag_map.keys():
ref = ref.replace(t,tag_map[t])
ref += ' '
for link in b.get('link_ls',[]) + book_link_ls:
link = enhance_link(link)
ref += link2html(link)
return '<dt>' + year + '</dt><dd>' + ref + '</dd>\n'
def honor_cmp(h,k):
if h['text'] == k['text']:
return cmp( h['year'], k['year'] )
else:
return cmp( h['text'], k['text'] )
def link2html(link):
href = link.get('href','').strip()
if not href: return ''
anchor = link['anchor']
link['title'] = link.get('title',link.get('href',''))
link['target'] = link.get('target','_blank')
return '<a class="bracketed" href="' + link['href'] + '" title="' + link['title'] + '" target="' + link['target'] + '">' + anchor + '</a>'
def link_honor(h):
link = {}
href = h.get('href','')
if href:
link['href'] = href
link['anchor'] = honor['text']
return link2html(link)
else: return h['text']
#elif h['text'] in org_url.keys():
# link['href'] = org_url[h['text']]
# link['anchor'] = h['text']
# return link2html(link)
#else:
# try:
# pre,post = h['text'].split(',',1)
# if pre in org_url.keys():
# link['href'] = org_url[pre]
# link['anchor'] = pre
# return link2html(link) + ',' + post
# except: pass
#return h['text']
def linkify(text):
text1 = ' '.join(text.split())
if text1 in org_url.keys():
linkz = {}
linkz['href'] = org_url[text1]
linkz['anchor'] = text
return link2html(linkz)
else: return text
def link_position(p):
ll = linkify(p)
if ll != p: return ll
else:
llinks = [ linkify(ph) for ph in p.split(',') ]
return ', '.join(llinks)
def read_section(section_txt):
#1963 Ph.D. University of California, Berkeley
# http://genealogy.math.ndsu.nodak.edu/id.php?id=34373
# :thesis_title On Optimal Stopping
print section_txt['text']
txt = '\n'.join( [ line.strip() for line in section_txt.split('\n') ])
d = {}
subsecs = txt.split('\n:')
toplines = subsecs[0]
try: top_line,toplines = toplines.split('\n',1)
except:
top_line = toplines
toplines = ''
d['top_line'] = top_line
#d['link_lines'] = toplines
d['link_ls'] = lines2link_ls(toplines)
for subsec in subsecs[1:]:
words = subsec.split()
key = words[0]
if len(words) > 1:
val = subsec.split(None,1)[1] ## keep newlines
else: val = ''
d[key] = val.strip() ## last value rules, like BibTeX
#if d.has_key(key): d[key] += [val]
#else: d[key] = [val]
return d
def read_bibitem(section_txt):
txt = '\n'.join( [ line.strip() for line in section_txt.split('\n') ])
d = {}
subsecs = txt.split('\n:')
toplines = subsecs[0]
try: top_line,toplines = toplines.split('\n',1)
except:
top_line = toplines
toplines = ''
d['top_line'] = top_line
d['bibtype'],d['id'] = d['top_line'].split()
try:
d['ref'],linklines = toplines.split('\n',1)
d['link_ls'] = lines2link_ls(linklines)
except:
d['ref'] = toplines
d['link_ls'] = []
for subsec in subsecs[1:]:
words = subsec.split()
key = words[0]
if len(words) > 1:
val = subsec.split(None,1)[1] ## keep newlines
else: val = ''
d[key] = val.strip() ## last value rules, like BibTeX
#if d.has_key(key): d[key] += [val]
#else: d[key] = [val]
return d
def read_book(section_txt):
txt = '\n'.join( [ line.strip() for line in section_txt.split('\n') ])
d = {}
subsecs = txt.split('\n:')
toplines = subsecs[0]
try: top_line,toplines = toplines.split('\n',1)
except:
top_line = toplines
toplines = ''
d['top_line'] = top_line
d['bibtype'],d['id'] = d['top_line'].split()
top_line_ls = toplines.split('\n')
d['title'] = top_line_ls[0]
d['creator'] = top_line_ls[1] ## au or editor. with (ed if editor)
d['howpublished'] = top_line_ls[2]
d['year'] = top_line_ls[3]
linklines = '\n'.join(top_line_ls)[4:]
d['link_ls'] = lines2link_ls(linklines)
for subsec in subsecs[1:]:
words = subsec.split()
key = words[0]
if len(words) > 1:
val = subsec.split(None,1)[1] ## keep newlines
else: val = ''
d[key] = val.strip() ## last value rules, like BibTeX
#if d.has_key(key): d[key] += [val]
#else: d[key] = [val]
d['ref'] = '<t>' + d['title'] + '</t>. '
if d.has_key('edition'): d['ref'] += '(' + d['edition'] + ' edition). '
d['ref'] += d['creator'] + '. '
d['ref'] += d['howpublished'] + '. '
return d
def read_4line_bibitem(section_txt):
txt = '\n'.join( [ line.strip() for line in section_txt.split('\n') ])
d = {}
subsecs = txt.split('\n:')
toplines = subsecs[0]
try: top_line,toplines = toplines.split('\n',1)
except:
top_line = toplines
toplines = ''
d['top_line'] = top_line
try:
d['bibtype'],d['id'] = d['top_line'].split()
except:
print 'Error parsing: ' + d['top_line']
toplines = toplines.split('\n')
d['title'] = toplines[0]
try:
d['author'] = toplines[1]
d['howpublished'] = toplines[2]
d['year'] = toplines[3]
except:
print 'Error parsing:'
print toplines
linklines = '\n'.join(toplines[3:])
d['link_ls'] = lines2link_ls(linklines)
for subsec in subsecs[1:]:
words = subsec.split()
key = words[0]
if len(words) > 1:
val = subsec.split(None,1)[1] ## keep newlines
else: val = ''
d[key] = val.strip() ## last value rules, like BibTeX
#if d.has_key(key): d[key] += [val]
#else: d[key] = [val]
return d
def read_degree(degree_txt):
#1963 Ph.D. University of California, Berkeley
# http://genealogy.math.ndsu.nodak.edu/id.php?id=34373
# :thesis_title On Optimal Stopping
d = read_section(degree_txt)
d['category'] = 'degree'
try: d['year'],rest = d['top_line'].split(None,1)
except:
d['year'] = d['top_line']
rest = ''
try: d['type'],rest = rest.split(None,1)
except:
d['type'] = rest
rest = ''
d['school'] = rest
del d['top_line']
return d
def read_generic(txt):
d = read_section(txt)
try:
d['year'],rest = d['top_line'].split(None,1)
d['year'] = d['year'].replace('_',' ')
except:
d['year'] = ''
rest = d['top_line']
d['text'] = rest
del d['top_line']
return d
def read_education(txt):
d = read_generic(txt)
d['category'] = 'education'
return d
def read_service(txt):
d = read_generic(txt)
d['category'] = 'service'
return d
def read_member(txt):
d = read_section(txt)
d['category'] = 'member'
d['text'] = d['top_line']
del d['top_line']
return d
def read_image(txt):
d = read_section(txt)
del d['link_ls'] ## all bare urls expected in the top_line
d['category'] = 'image'
words = d['top_line'].split()
hrefs = [ w for w in words if w[0:4] == 'http']
textwords = [ w for w in words if not w[0:4] == 'http']
if hrefs:
d['src'] = hrefs[0]
if len(hrefs) == 1: ## href and src are identical
d['href'] = hrefs[0]
elif len(hrefs) > 1:
d['href'] = hrefs[1]
## ignore if more than 2 urls
d['text'] = ' '.join(textwords)
del d['top_line']
return d
def read_homepage(txt):
d = read_section(txt)
del d['link_ls'] ## all bare urls expected in the top_line
d['category'] = 'homepage'
d['rel'] = 'homepage'
words = d['top_line'].split()
hrefs = [ w for w in words if w[0:4] == 'http']
textwords = [ w for w in words if not w[0:4] == 'http']
if hrefs:
if len(hrefs) >= 1: ## href
d['href'] = hrefs[0]
## ignore if more than 2 urls
d['anchor'] = ' '.join(textwords)
del d['top_line']
#print json.dumps(d)
return d
def read_honor(txt):
d = read_generic(txt)
d['category'] = 'honor'
return d
def read_position(txt):
d = read_generic(txt)
d['category'] = 'position'
return d
def read_bio(txt):
#d = read_bibitem(txt)
d = read_4line_bibitem(txt)
return d
def enhance_link(link):
rel = link.get('rel','')
if link['href'].find('mathscinet') >= 0:
link['anchor'] = 'MathSciNet'
link['rel'] = 'biblio'
link['status'] = 'top'
elif link['href'].find('academic.research.microsoft.com') >= 0:
if not link.get('anchor','').find('Microsoft Academic:') >= 0:
link['anchor'] = 'Microsoft Academic: ' + link.get('anchor','')
link['rel'] = 'biblio'
elif link['href'].find('scholar.google') >= 0:
link['anchor'] = 'Google Scholar'
link['rel'] = 'biblio'
link['status'] = 'top'
elif link['href'].find('celebratio') >= 0:
link['anchor'] = 'Celebratio'
link['rel'] = 'biblio'
link['status'] = 'top'
elif link['href'].find('zbmath') >= 0:
link['anchor'] = 'zbMATH'
link['rel'] = 'biblio'
link['status'] = 'top'
elif rel.find('pubspage') >= 0:
link['anchor'] = 'Publication List'
link['rel'] = 'biblio'
elif rel.find('homepage') >= 0:
link['anchor'] = 'Homepage'
link['rel'] = 'homepage'
link['status'] = 'top'
elif rel.find('bookspage') >= 0:
link['anchor'] = 'Books List'
link['rel'] = 'biblio'
elif link['href'].find('genealogy.math') >= 0:
link['anchor'] = 'Math. Genealogy'
link['rel'] = 'educ'
elif link['href'].find('wikipedia') >= 0:
link['anchor'] = 'Wikipedia'
link['rel'] = 'bio'
link['status'] = 'top'
else:
link['rel'] = 'misc'
anchor = link.get('anchor','').strip()
if not anchor:
link['anchor'] = link['href'].split('/')[2]
return link
#def read_bio(person):
# bios = []
# for link in person.get('link_ls',[]):
# rel = link.get('rel','')
# if link['href'].find('wikipedia') >= 0:
# link['anchor'] = 'Wikipedia'
# bios += [link]
# return bios
def honors_html(person):
honors = person['Honor']
#print honors
h = ''
if len(honors) == 1: heading = '<h4>Honor</h4>\n'
else: heading = '<h4>Honors</h4>\n'
rows = ''
honors.sort(year_cmp)
for h in honors:
rows += make_honor_rows(h)
return heading + dl_template.replace('$rows$',rows)
def career_html(person):
positions = person['Position']
if not positions: return ''
heading = '<h4>Career</h4>\n'
positions.sort(year_cmp)
rows = ''
for h in positions:
rows += make_row(h)
return heading + dl_template.replace('$rows$',rows)
def service_html(person):
positions = person['Service']
#print positions
if not positions: return ''
heading = '<h4>Professional Service</h4>\n'
positions.sort(year_cmp)
rows = ''
for h in positions:
#print h.get('text','')
rows += make_row(h)
return heading + dl_template.replace('$rows$',rows)
def member_html(person):
person.get('Member',[])
positions = person['Member']
if not positions: return ''
heading = '<h4>Membership</h4>\n'
rows = ''
for h in positions:
rows += make_row(h)
return heading + dl_template.replace('$rows$',rows)
def educ_html(person):
educ = person.get('Education',[]) + person.get('Degree',[])
if not educ: return ''
#educ.sort(year_cmp)
h = ''
heading = '<h4>Education</h4>\n'
rows = ''
for d in educ:
if d['category'] == 'degree':
# pprint (d)
d['text'] = d.get('type','') + ', ' + d.get('school','') + ' ' ## could hyperlink here
for link in d.get('link_ls',[]):
link = enhance_link(link)
d['text'] += "(" + link2html(link) + ")"
rows += make_row(d)
if d.get('thesis_title','').strip():
e = {}
e['year'] = ''
#e['text'] = '<em>Thesis: </em>' +
e['text'] = '"' + d['thesis_title'] + '"'
rows += make_row(e)
else:
rows += make_row(d)
return heading + dl_template.replace('$rows$',rows)
def biblio_html(person):
heading = '<h4>Bibliography</h4>\n'
rows = ''
for b in person.get('link_ls',[]):
bb = enhance_link(b)
if bb.get('rel','') == 'biblio':
rows += make_biblio_rows(bb)
return heading + dl_template.replace('$rows$',rows)
def source_data_html(person):
templ = '''<h4 class="alt js-collapse js-collapseoff">Source Data</h4>
<div class="collapsed-region">
<pre>$text$</pre>
</div>'''
h = templ.replace('$text$',person['public_record_txt'])
return h
def bio_html(person):
p = ''
for c in bio_cat_order[0:]:
bios = person.get(c,[])
if not bios: continue
elif len(bios) == 1:
heading = '<h4>' + c.replace('_',' ') + '</h4>\n'
elif len(bios) > 1:
heading = '<h4>' + bio_cat_plural[c].replace('_',' ') + '</h4>\n'
rows = ''
bios.sort(year_cmp)
for b in bios:
## Let's check what's going on with wilks__samuel_s where one of the bios comes back as None
if b:
# print "BIOROW:"
# pprint (b)
rows += make_bio_rows(b)
p += heading + dl_template.replace('$rows$',rows)
return p
def make_mg_data(data):
p = ''
mg2ims = {}
ims2mg = {}
ims2mg_txt = {}
urls = []
for d in data['records']:
for link in d['link_ls']:
if link['href'].find('genealogy') >= 0:
mg2ims[ link['href'] ] = d['id']
urls += [link['href']]
mgrecords = math_genealogy_api.get_items_by_id(urls)
for i in range(len(urls))[0:]:
u = urls[i]
r = mgrecords[i]
ims2mg[ mg2ims[u] ] = r
items = []
for k in ims2mg.keys():
r = ims2mg[ k ]
p = ':: ' + k + '\n'
mgurl = ims2mg[k]['id'].replace('math_genealogy_author:','http://genealogy.math.ndsu.nodak.edu/id.php?id=')
p += ':Degree ' + r.get('degree_year','?year?') + ' ' + r.get('degree_type','?type?') + ' ' + r.get('degree_school','?school?') + '\n\t' + mgurl + '\n'
p += '\t:thesis_title ' + ' '.join( r.get('thesis_title', '').split() ) + '\n'
ims2mg_txt[k] = p
#items += [p]
#items.sort()
#p = '\n'.join(items) + '\n'
#outfile = open('degree_data.txt','w')
#outfile.write(p.encode('utf-8'))
#outfile.close()
return ims2mg,ims2mg_txt
def year_test(txt):
if len(txt) != 4: return False
try:
txt = str(int(txt))
if txt[0:2] in '18 19 20'.split(): return True
else: return False
except: return False
def add_mg_data():
data = read_ims_legacy('ims_legacy_0')
ims2mg,ims2mg_txt = make_mg_data(data)
p = data['metadata']['record_txt']
p += '\n'
for d in data['records']:
r = d['record_txt']
r = r.replace(':Honor \n',':Honor ') ## to be deprecated soon
r = r.replace(':Honor\n',':Honor ') ## to be deprecated soon
r = r.replace(':Honor \n',':Honor ') ## to be deprecated soon
p += r
degree_text = ims2mg_txt.get(d['id'],'')
try: degree_text = degree_text.split(':Degree',1)[1].strip()
except: degree_text = ''
words = degree_text.split()
if words:
y = words[0]
if not year_test(y):
if not y == '?year?':
degree_text = '?year? ' + degree_text
degree_text = ':Degree ' + degree_text
p += degree_text
p += '\n\n'
p = p.encode('utf-8')
outfile = open('xxims_legacy.txt','w')
outfile.write(p)
outfile.close()
#print p
def add_microsoft_data():
data = read_ims_legacy('ims_legacy_14')
#import microsoft_academic_api
#microsoft_data = microsoft_academic_api.read_dict()
p = data['metadata']['record_txt']
for d in data['records']:
addtxt = ''
for link in d['link_ls']:
if link['href'] in microsoft_data.keys():
md = microsoft_data[link['href']]
src_url = md.get('microsoft_photo_url','')
ph_url = md.get('photo_url','')
home_url = md.get('homepage_url','')
if home_url:
addtxt += ':Homepage ' + home_url + '\n'
if src_url:
addtxt += ':Image ' + src_url + ' '
if ph_url:
addtxt += ph_url + '\n'
pre,post = d['record_txt'].split('\n:Honor',1)
p += '\n' + pre + '\n' + addtxt + ':Honor' + post
outfile = open('ims_legacy.txt','w')
outfile.write(p.encode('utf-8'))
outfile.close()
def add_bio_data():
data = read_ims_legacy('ims_legacy_3')
bios = open('items/biographies.txt')
t = bios.read()
misc = open('items/misc.txt')
t += misc.read()
obits = open('items/obituaries.txt')
t += obits.read()
#t = unicode(t,'utf-8')
t = '\n' + t.strip()
items = t.split('\n::')[1:]
bios = {}
for item in items[1:]:
lines = item.split('\n')
lines = [ json.loads('"' + line + '"') for line in lines ]
lines = ['\t' + line for line in lines ]
try:
cat, ii = lines[-2].split()
#bios[ii] = bios.get(ii,'') + ':Biography ' + '\n'.join(lines[0:-2]) + '\n'
bios[ii] = bios.get(ii,'') + ':' + cat.split('_of')[0].replace('@','') + ' ' + '\n'.join(lines[0:-2]) + '\n'
except: print lines
#for ii in bios.keys():
# print bios[ii].encode('utf-8')
p = data['metadata']['record_txt']
p += '\n'
for d in data['records']:
r = d['record_txt']
p += r
bio_txt = bios.get(d['id'],'')
p += bio_txt
p += '\n\n'
p = p.encode('utf-8')
outfile = open('ims_legacy.txt','w')
outfile.write(p)
outfile.close()
#print p
def getf(d,k):
dd = d.get(k,[])
if len(dd) > 0: return dd[0]
else: return ''
def make_basic_bio_html(d):
html = ''
# Let's store the IMS ids as *comments* in the HTML, since I don't think the end user really wants to see them
if d.has_key('ims_id'):
html += '\n<!--<p><b>IMS Id:</b> '+ d['ims_id'] + ' </p>-->'
if d.has_key('alt_ims_id'):
html += '\n<!--<p><b>Alt. IMS Id:</b> '+ d['alt_ims_id'] + ' </p>-->'
if d.has_key('alt_name'):
html += '\n<p><b>Also known as:</b> '+ d['alt_name'] + ' </p>'
if d.has_key('birth_place'):
html += '\n<p><b>Place of birth:</b> '+ d['birth_place'] + ' </p>'
return html
def make_dates_html(d):
templ = '<p class="lifespan">DOB – DOD</p>'
if d.has_key('DOB'):
templ = templ.replace('DOB', show_date( d['DOB'] ) )
else:
templ = templ.replace('DOB', '?' )
if d.has_key('DOD'):
templ = templ.replace('DOD', show_date( d['DOD'] ) )
else:
if d.has_key('DOB'):
templ = templ.replace('DOD', '--' )
else:
templ = templ.replace('DOD', '?' )
return templ
def make_dates_plain_html(d):
templ = 'DOB - DOD<br>'
if d.has_key('DOB') and d.has_key('DOD'):
for k in 'DOB DOD'.split(): templ = templ.replace(k, show_date( getf(d,k) ) )
return templ
else: return ''
def top_links_html(d):
html = ''
for link in d.get('link_ls',[]) + d.get('Homepage',[]):
link = enhance_link(link)
if link.get('status','') == 'top':
html += link2html(link) + "<br>"
return html
# I've added a fallback mechanism in case, in case there is no comma.
def first_first(name):
# print "NAME: " + name
ret = ''
try:
last,first = name.split(',',1)
ret = first.strip() + ' ' + last.strip()
except:
ret = name
return ret
def add_html(d):
# pprint (d)
d['heading_html'] = unicode('<h2>' + first_first(d['complete_name']) + '</h2>','utf-8')
d['basic_bio_html'] = make_basic_bio_html(d)
d['photo_html'] = make_images_html(d)
d['dates_html'] = make_dates_html(d)
d['dates_plain_html'] = make_dates_plain_html(d)
d['top_links_html'] = top_links_html(d)
# print "Name: "+d['heading_html']
content = ''
content += educ_html(d)
content += career_html(d)
content += honors_html(d)
content += service_html(d)
content += member_html(d)
content += bio_html(d)
# content += biblio_html(d)
#content += source_data_html(d)
d['body_html'] = content
d['html'] = d['heading_html'] + d['photo_html'] + d['basic_bio_html'] + d['dates_html'] + d['top_links_html'] + d['body_html']
return d
def make_all():
#infile = open('celebratio_cv.html')
#infile = open('/accounts/projects/jpopen/apache/htdocs/tmp/imslegacy/celebratio_cv_template.html')
out_dir = global_vars['published_files_directory']
infile = open('./celebratio_cv_template.html')
html = infile.read()
html = unicode(html,'utf-8')
#Xh2topX Xh3topX XbodytitleX XtestdisplayX XasideX XcontentX
data = text_json.read_latex("blackwell_new")
#data = read_ims_legacy('ims_legacy')
#print data['link_ls']
#print data['records']
print 'Read ' + str(len(data['records'])) + ' records from ims_legacy.txt'
global_vars['link_ls'] = data['link_ls']
global_vars['books'] = data['books']
#print global_vars['link_ls']
#for d in data['records']:
#print len(d)
#SHOULD BE A DICT
records = [ add_html(d) for d in data['records'] ]
tot = len(records)
print 'Making xml for ' + str(tot) + ' records'
#make_xml(records)
content = ''
#finish = False
for d in records:
#content = ''
name = d['complete_name']
content += d['html']
content += '<hr><br><br>'
filename = name + '.html'
f = open(filename, 'w')
f.write(content.encode('utf-8'))
f.close()
print "made new file: " + filename
html = html.replace('$covertext$',content)
html = html.replace('$title$','IMS Scientific Legacy')
#tmpfname = 'tmp/imslegacy/html/test_images.html'
#tmpfname = 'tmp/imslegacy/celebratio.html'
tmpfname = out_dir + 'celebratio_three.html'
outfile = open(tmpfname , 'w')
outfile.write(html.encode('utf-8'))
outfile.close()
#print 'wrote to http://bibserver.berkeley.edu/' + tmpfname
print 'wrote ' + filename
def enhance_bio(d):
ref = d['ref']
ref = ref.replace('author>','au>')
ref = ref.replace('year>','y>')
ref = ref.replace('journal>','j>')
ref = ref.replace('title>','t>')
ref = ref.replace('bt>','t>')
try:
ayt, howpublished = ref.split('</t>',1)
howpublished = ' '.join(howpublished.split())
if howpublished[0] == '.':
howpublished = howpublished[1:].strip()
d['howpublished'] = howpublished
except:
d['howpublished'] = ref
try: title = ref.split('<t>',1)[1].split('</t>')[0].strip()
except: title = ''
if title: d['title'] = title
else: d['title'] = 'Untitled'
try: author = ref.split('<au>',1)[1].split('</au>')[0].strip()
except: author = ''
if author: d['author'] = author
else: d['author'] = 'Anonymous'
try: year = ref.split('<y>',1)[1].split('</y>')[0].strip()
except: year = ''
if not year:
try: year = ref.split('(',1)[1].split(')')[0]
except: year = ''
if not year: year = get_year_from_str(ref)
if year: d['year'] = year
else: d['year'] = 'year?'
return d
def parse(text,category):
#data = read_ims_legacy('ims_legacy_6')
bios = text.split(':' + category)
p = bios[0]
for b in bios[1:]:
try:
b,rest = b.split('\n:',1)
rest = '\n:' + rest
except: rest = ''
d = read_bibitem(b)
d = enhance_bio(d)
t = ''
t += ':' + category + ' ' + d['top_line'] + '\n'
t += '\t' + ' '.join(d.get('title','Untitled').split()) + '\n'
t += '\t' + ' '.join(d.get('author','Anonymous').split()) + '\n'
t += '\t' + ' '.join(d.get('howpublished','howpublished?').split()) + '\n'
t += '\t' + ' '.join(d.get('year','year?').split()) + '\n'
for link in d.get('link_ls',[]):
t += '\t' + link['href'] + ' ' + link.get('anchor','') + '\n'
t+= '\t:ref ' + d['ref'] + '\n'
p += t
p += rest
return p
def parse_all():
categories = '''
Biography
Archive
Art_Exhibition
Collected_Works
DVD
Endowment
Festschrift
Memoir
Oral_History
Selected_Works
Symposium
Death_Notice
In_Memoriam
Obituary
'''.split()
infile = open('ims_legacy_7.txt')
txt = infile.read()
txt = unicode(txt, 'utf-8')
for c in categories[0:]:
txt = parse(txt,c)
txt = txt.encode('utf-8')
outfile = open('xxims_legacy.txt','w')
outfile.write(txt)
outfile.close()
def missing_emails():
data = read_ims_legacy()
for d in data['records'][0:]:
if not d.get('Deceased',[]):
if not d.get('Email',[]):
print d['record_txt'].encode('utf-8')
def make_one(filename):
html = ""
data = text_json.read_latex(filename)
global_vars['books'] = data['books']
records = [ add_html(d) for d in data['records'] ]
tot = len(records)
# print 'Making xml for ' + str(tot) + ' records'
for d in records:
content = ''
name = d['complete_name']
content += d['html']
content += '<hr><br><br>'
html = content
# print html
return html
if __name__ == '__main__':
make_all()
publish_ims_legacy()
#missing_emails()
| 37.833241
| 325
| 0.511534
|
fce77e9fb7a177f20a71fb337c8eb9e006e192f9
| 60
|
py
|
Python
|
udemy/python-video-workbook/my_progress/060.py
|
djrgit/coursework
|
2a91da9b76cb1acbd12f3d8049f15d2e71f475a1
|
[
"MIT"
] | null | null | null |
udemy/python-video-workbook/my_progress/060.py
|
djrgit/coursework
|
2a91da9b76cb1acbd12f3d8049f15d2e71f475a1
|
[
"MIT"
] | null | null | null |
udemy/python-video-workbook/my_progress/060.py
|
djrgit/coursework
|
2a91da9b76cb1acbd12f3d8049f15d2e71f475a1
|
[
"MIT"
] | 3
|
2018-08-13T23:14:22.000Z
|
2019-01-11T22:50:07.000Z
|
# Exercise 60 - Infinite Printer
while True:
print('Hello')
| 20
| 32
| 0.733333
|
f5e9588d5b8f7deb4bea91935ca7cd4a8a3a9b2a
| 19,929
|
py
|
Python
|
tests/Hydro/RayleighTaylor/RT-const-rho.py
|
as1m0n/spheral
|
4d72822f56aca76d70724c543d389d15ff6ca48e
|
[
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 19
|
2020-10-21T01:49:17.000Z
|
2022-03-15T12:29:17.000Z
|
tests/Hydro/RayleighTaylor/RT-const-rho.py
|
markguozhiming/spheral
|
bbb982102e61edb8a1d00cf780bfa571835e1b61
|
[
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 41
|
2020-09-28T23:14:27.000Z
|
2022-03-28T17:01:33.000Z
|
tests/Hydro/RayleighTaylor/RT-const-rho.py
|
markguozhiming/spheral
|
bbb982102e61edb8a1d00cf780bfa571835e1b61
|
[
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 5
|
2020-11-03T16:14:26.000Z
|
2022-01-03T19:07:24.000Z
|
#ATS:test(SELF, "--CRKSPH=True --nx1=128 --nx2=128 --ny1=256 --ny2=512 --cfl=0.25 --Cl=1.0 --Cq=1.0 --clearDirectories=False --filter=0 --nPerh=1.51 --serialDump=True", label="RT CRK, nPerh=1.5", np=16)
#ATS:test(SELF, "--CRKSPH=True --nx1=128 --nx2=128 --ny1=256 --ny2=512 --cfl=0.25 --Cl=1.0 --Cq=1.0 --clearDirectories=False --filter=0 --nPerh=2.01 --serialDump=True", label="RT CRK, nPerh=2.0", np=16)
#ATS:test(SELF, "--CRKSPH=False --nx1=128 --nx2=128 --ny1=256 --ny2=512 --cfl=0.25 --Cl=1.0 --Cq=1.0 --clearDirectories=False --filter=0 --nPerh=1.51 --serialDump=True", label="RT Spheral, nPerh=1.5", np=16)
#ATS:test(SELF, "--CRKSPH=False --nx1=128 --nx2=128 --ny1=256 --ny2=512 --cfl=0.25 --Cl=0.0 --Cq=0.0 --clearDirectories=False --filter=0 --nPerh=1.51 --serialDump=True", label="RT Spheral-NoQ, nPerh=1.5", np=16)
#ATS:test(SELF, "--CRKSPH=False --nx1=128 --nx2=128 --ny1=256 --ny2=512 --cfl=0.25 --Cl=0.0 --Cq=0.0 --clearDirectories=False --filter=0 --nPerh=1.51 --serialDump=True --compatibleEnergy=False", label="RT TSPH-NoQ, nPerh=1.5", np=16)
#-------------------------------------------------------------------------------
# This is the basic Rayleigh-Taylor Problem
#-------------------------------------------------------------------------------
import shutil
from math import *
from Spheral2d import *
from SpheralTestUtilities import *
from SpheralGnuPlotUtilities import *
from findLastRestart import *
from GenerateNodeDistribution2d import *
from CompositeNodeDistribution import *
from CentroidalVoronoiRelaxation import *
import mpi
import DistributeNodes
title("Rayleigh-Taylor test problem in 2D")
class ExponentialDensity:
def __init__(self,
y1,
rho0,
alpha):
self.y1 = y1
self.rho0 = rho0
self.alpha = alpha
return
def __call__(self, r):
return self.rho0*exp(self.alpha*(r.y - self.y1))
#-------------------------------------------------------------------------------
# Generic problem parameters
#-------------------------------------------------------------------------------
commandLine(nx1 = 50,
ny1 = 100,
nx2 = 50,
ny2 = 200,
reso = 1, # optional scale modifier for the resolution in all directions
rho0 = 1.0,
eps0 = 1.0,
x0 = 0.0,
x1 = 1.0,
y0 = 0.0,
y1 = 2.0, # position of the interface
y2 = 6.0,
P0 = 1.0, # pressure at top of simulation (y2)
freq = 1.0,
alpha = 0.01, # amplitude of displacement
beta = 5.0, # speed at which displacement decays away from midline
S = 2.0, # density jump at surface
g0 = -2.0, # gravitational acceleration
gamma = 5.0/3.0,
mu = 1.0,
nPerh = 1.51,
SVPH = False,
CRKSPH = False,
ASPH = False,
SPH = True, # This just chooses the H algorithm -- you can use this with CRKSPH for instance.
filter = 0.0, # CRKSPH filtering
Qconstructor = MonaghanGingoldViscosity,
#Qconstructor = TensorMonaghanGingoldViscosity,
linearConsistent = False,
fcentroidal = 0.0,
fcellPressure = 0.0,
boolReduceViscosity = False,
nh = 5.0,
aMin = 0.1,
aMax = 2.0,
Qhmult = 1.0,
Cl = 1.0,
Cq = 1.0,
linearInExpansion = False,
Qlimiter = False,
balsaraCorrection = False,
epsilon2 = 1e-2,
hmin = 0.0001,
hmax = 0.5,
hminratio = 0.1,
cfl = 0.5,
useVelocityMagnitudeForDt = False,
XSPH = False,
epsilonTensile = 0.0,
nTensile = 8,
IntegratorConstructor = CheapSynchronousRK2Integrator,
goalTime = 5.0,
steps = None,
vizCycle = None,
vizTime = 0.01,
dt = 0.0001,
dtMin = 1.0e-8,
dtMax = 0.1,
dtGrowth = 2.0,
maxSteps = None,
statsStep = 10,
smoothIters = 0,
HUpdate = IdealH,
domainIndependent = False,
rigorousBoundaries = False,
dtverbose = False,
densityUpdate = RigorousSumDensity, # VolumeScaledDensity,
compatibleEnergy = True, # <--- Important! rigorousBoundaries does not work with the compatibleEnergy algorithm currently.
gradhCorrection = False,
useVoronoiOutput = False,
clearDirectories = False,
restoreCycle = None,
restartStep = 100,
redistributeStep = 500,
checkRestart = False,
dataDir = "dumps-Rayleigh-Taylor-2d-constRho",
outputFile = "None",
comparisonFile = "None",
serialDump = False, #whether to dump a serial ascii file at the end for viz
bArtificialConduction = False,
arCondAlpha = 0.5,
)
nx1 = nx1*reso
nx2 = nx2*reso
ny1 = ny1*reso
ny2 = ny2*reso
#-------------------------------------------------------------------------------
# Computing and printing the growth rate
#-------------------------------------------------------------------------------
atwood = (S-1.0)/(S+1.0)
zdot = sqrt(freq*atwood*abs(g0))
print "\n\n\nzdot = exp({0:3.3e}*t) <-<-<-<-<-<-<-<-<-<------\n\n\n".format(zdot)
# Decide on our hydro algorithm.
if SVPH:
if ASPH:
HydroConstructor = ASVPHFacetedHydro
else:
HydroConstructor = SVPHFacetedHydro
elif CRKSPH:
if ASPH:
HydroConstructor = ACRKSPHHydro
else:
HydroConstructor = CRKSPHHydro
else:
if ASPH:
HydroConstructor = ASPHHydro
else:
HydroConstructor = SPHHydro
dataDir = os.path.join(dataDir,
"S=%g" % (S),
"CRKSPH=%s" % CRKSPH,
str(HydroConstructor).split("'")[1].split(".")[-1],
"densityUpdate=%s" % (densityUpdate),
"XSPH=%s" % XSPH,
"filter=%s" % filter,
"%s-Cl=%g-Cq=%g" % (str(Qconstructor).split("'")[1].split(".")[-1], Cl, Cq),
"%ix%i" % (nx1, ny1 + ny2),
"nPerh=%g-Qhmult=%g" % (nPerh, Qhmult))
restartDir = os.path.join(dataDir, "restarts")
vizDir = os.path.join(dataDir, "visit")
restartBaseName = os.path.join(restartDir, "Rayleigh-Taylor-2d")
vizBaseName = "Rayleigh-Taylor-2d"
#-------------------------------------------------------------------------------
# CRKSPH Switches to ensure consistency
#-------------------------------------------------------------------------------
if CRKSPH:
Qconstructor = CRKSPHMonaghanGingoldViscosity
#-------------------------------------------------------------------------------
# Check if the necessary output directories exist. If not, create them.
#-------------------------------------------------------------------------------
import os, sys
if mpi.rank == 0:
if clearDirectories and os.path.exists(dataDir):
shutil.rmtree(dataDir)
if not os.path.exists(restartDir):
os.makedirs(restartDir)
if not os.path.exists(vizDir):
os.makedirs(vizDir)
mpi.barrier()
#-------------------------------------------------------------------------------
# If we're restarting, find the set of most recent restart files.
#-------------------------------------------------------------------------------
if restoreCycle is None:
restoreCycle = findLastRestart(restartBaseName)
#-------------------------------------------------------------------------------
# Material properties.
#-------------------------------------------------------------------------------
eos = GammaLawGasMKS(gamma, mu)
#-------------------------------------------------------------------------------
# Interpolation kernels.
#-------------------------------------------------------------------------------
WT = TableKernel(BSplineKernel(), 1000)
WTPi = TableKernel(BSplineKernel(), 1000, Qhmult)
output("WT")
output("WTPi")
kernelExtent = WT.kernelExtent
#-------------------------------------------------------------------------------
# Make the NodeList.
#-------------------------------------------------------------------------------
nodes1 = makeFluidNodeList("High density gas", eos,
hmin = hmin,
hmax = hmax,
hminratio = hminratio,
nPerh = nPerh)
nodes2 = makeFluidNodeList("Low density gas", eos,
hmin = hmin,
hmax = hmax,
hminratio = hminratio,
nPerh = nPerh)
nodeSet = [nodes1, nodes2]
for nodes in nodeSet:
output("nodes.name")
output("nodes.hmin")
output("nodes.hmax")
output("nodes.hminratio")
output("nodes.nodesPerSmoothingScale")
#-------------------------------------------------------------------------------
# Set the node properties.
#-------------------------------------------------------------------------------
if restoreCycle is None:
generator1 = GenerateNodeDistribution2d(nx1, ny1,
rho = rho0/S,
distributionType = "lattice",
xmin = (x0,y0),
xmax = (x1,y1),
nNodePerh = nPerh,
SPH = SPH)
generator2 = GenerateNodeDistribution2d(nx2, ny2,
rho = rho0,
distributionType = "lattice",
xmin = (x0,y1),
xmax = (x1,y2),
nNodePerh = nPerh,
SPH = SPH)
if mpi.procs > 1:
from VoronoiDistributeNodes import distributeNodes2d
else:
from DistributeNodes import distributeNodes2d
distributeNodes2d((nodes1, generator1),
(nodes2, generator2))
# A helpful method for setting y displacement.
def dy(ri):
thpt = alpha*cos(2.0*pi*ri.x*freq)
return thpt*exp(-beta*abs(ri.y-y1))
# Finish initial conditions.
eps1 = nodes1.specificThermalEnergy()
eps2 = nodes2.specificThermalEnergy()
pos1 = nodes1.positions()
pos2 = nodes2.positions()
rho1 = rho0/S
rho2 = rho0
P01 = P0 + g0*(y1-y2)*(rho2-rho1)
P02 = P0
for i in xrange(nodes1.numInternalNodes):
y = pos1[i].y
eps1[i] = (P01+g0*rho1*(y-y2))/((gamma-1.0)*rho1)
for i in xrange(nodes2.numInternalNodes):
y = pos2[i].y
eps2[i] = (P02+g0*rho2*(y-y2))/((gamma-1.0)*rho2)
#nodes1.specificThermalEnergy(ScalarField("tmp", nodes1, eps0))
#nodes2.specificThermalEnergy(ScalarField("tmp", nodes2, eps0/S))
for nodes in (nodes1,nodes2):
pos = nodes.positions()
vel = nodes.velocity()
for i in xrange(nodes.numInternalNodes):
pos[i].y += dy(pos[i])
#-------------------------------------------------------------------------------
# Construct a DataBase to hold our node list
#-------------------------------------------------------------------------------
db = DataBase()
output("db")
for nodes in nodeSet:
db.appendNodeList(nodes)
output("db.numNodeLists")
output("db.numFluidNodeLists")
#-------------------------------------------------------------------------------
# Construct the artificial viscosity.
#-------------------------------------------------------------------------------
q = Qconstructor(Cl, Cq, linearInExpansion)
q.epsilon2 = epsilon2
q.limiter = Qlimiter
q.balsaraShearCorrection = balsaraCorrection
output("q")
output("q.Cl")
output("q.Cq")
output("q.epsilon2")
output("q.limiter")
output("q.balsaraShearCorrection")
output("q.linearInExpansion")
output("q.quadraticInExpansion")
#-------------------------------------------------------------------------------
# Construct the hydro physics object.
#-------------------------------------------------------------------------------
if SVPH:
hydro = HydroConstructor(W = WT,
Q = q,
cfl = cfl,
useVelocityMagnitudeForDt = useVelocityMagnitudeForDt,
compatibleEnergyEvolution = compatibleEnergy,
densityUpdate = densityUpdate,
XSVPH = XSPH,
linearConsistent = linearConsistent,
generateVoid = False,
HUpdate = HUpdate,
fcentroidal = fcentroidal,
fcellPressure = fcellPressure,
xmin = Vector(-2.0, -2.0),
xmax = Vector(3.0, 3.0))
# xmin = Vector(x0 - 0.5*(x2 - x0), y0 - 0.5*(y2 - y0)),
# xmax = Vector(x2 + 0.5*(x2 - x0), y2 + 0.5*(y2 - y0)))
elif CRKSPH:
hydro = HydroConstructor(W = WT,
WPi = WTPi,
Q = q,
filter = filter,
cfl = cfl,
useVelocityMagnitudeForDt = useVelocityMagnitudeForDt,
compatibleEnergyEvolution = compatibleEnergy,
XSPH = XSPH,
densityUpdate = densityUpdate,
HUpdate = HUpdate)
else:
hydro = HydroConstructor(W = WT,
WPi = WTPi,
Q = q,
cfl = cfl,
useVelocityMagnitudeForDt = useVelocityMagnitudeForDt,
compatibleEnergyEvolution = compatibleEnergy,
gradhCorrection = gradhCorrection,
XSPH = XSPH,
densityUpdate = densityUpdate,
HUpdate = HUpdate,
epsTensile = epsilonTensile,
nTensile = nTensile)
output("hydro")
output("hydro.kernel()")
output("hydro.PiKernel()")
output("hydro.cfl")
output("hydro.compatibleEnergyEvolution")
output("hydro.densityUpdate")
output("hydro.HEvolution")
packages = [hydro]
#-------------------------------------------------------------------------------
# Construct the MMRV physics object.
#-------------------------------------------------------------------------------
if boolReduceViscosity:
evolveReducingViscosityMultiplier = MorrisMonaghanReducingViscosity(q,nh,aMin,aMax)
packages.append(evolveReducingViscosityMultiplier)
#-------------------------------------------------------------------------------
# Construct the Artificial Conduction physics object.
#-------------------------------------------------------------------------------
if bArtificialConduction:
#q.reducingViscosityCorrection = True
ArtyCond = ArtificialConduction(WT,arCondAlpha)
packages.append(ArtyCond)
#-------------------------------------------------------------------------------
# Construct the gravitational acceleration object.
#-------------------------------------------------------------------------------
gravity1 = ConstantAcceleration2d(Vector2d(0.0, g0),
nodes1)
gravity2 = ConstantAcceleration2d(Vector2d(0.0, g0),
nodes2)
packages.append(gravity1)
packages.append(gravity2)
#-------------------------------------------------------------------------------
# Create boundary conditions.
#-------------------------------------------------------------------------------
xp1 = Plane(Vector(x0, y0), Vector( 1.0, 0.0))
xp2 = Plane(Vector(x1, y0), Vector(-1.0, 0.0))
yp1 = Plane(Vector(x0, y0), Vector(0.0, 1.0))
yp2 = Plane(Vector(x0, y2), Vector(0.0, -1.0))
xbc = PeriodicBoundary(xp1, xp2)
#ybc = PeriodicBoundary(yp1, yp2)
ybc1 = ReflectingBoundary(yp1)
ybc2 = ReflectingBoundary(yp2)
bcSet = [xbc, ybc1, ybc2]
#bcSet = [xbc,ybc1]
for bc in bcSet:
for p in packages:
p.appendBoundary(bc)
#-------------------------------------------------------------------------------
# Construct a time integrator, and add the physics packages.
#-------------------------------------------------------------------------------
integrator = IntegratorConstructor(db)
for p in packages:
integrator.appendPhysicsPackage(p)
integrator.lastDt = dt
integrator.dtMin = dtMin
integrator.dtMax = dtMax
integrator.dtGrowth = dtGrowth
integrator.domainDecompositionIndependent = domainIndependent
integrator.verbose = dtverbose
integrator.rigorousBoundaries = rigorousBoundaries
output("integrator")
output("integrator.havePhysicsPackage(hydro)")
output("integrator.lastDt")
output("integrator.dtMin")
output("integrator.dtMax")
output("integrator.dtGrowth")
output("integrator.domainDecompositionIndependent")
output("integrator.rigorousBoundaries")
output("integrator.verbose")
#-------------------------------------------------------------------------------
# Make the problem controller.
#-------------------------------------------------------------------------------
if useVoronoiOutput:
import SpheralVoronoiSiloDump
vizMethod = SpheralVoronoiSiloDump.dumpPhysicsState
else:
import SpheralPointmeshSiloDump
vizMethod = SpheralPointmeshSiloDump.dumpPhysicsState
control = SpheralController(integrator, WT,
initializeDerivatives = True,
statsStep = statsStep,
restartStep = restartStep,
restartBaseName = restartBaseName,
restoreCycle = restoreCycle,
redistributeStep = redistributeStep,
vizMethod = vizMethod,
vizBaseName = vizBaseName,
vizDir = vizDir,
vizStep = vizCycle,
vizTime = vizTime,
SPH = SPH)
output("control")
#-------------------------------------------------------------------------------
# Advance to the end time.
#-------------------------------------------------------------------------------
if not steps is None:
control.step(steps)
else:
control.advance(goalTime, maxSteps)
control.updateViz(control.totalSteps, integrator.currentTime, 0.0)
control.dropRestartFile()
if serialDump:
procs = mpi.procs
rank = mpi.rank
serialData = []
i,j = 0,0
for i in xrange(procs):
for nodeL in nodeSet:
if rank == i:
for j in xrange(nodeL.numInternalNodes):
serialData.append([nodeL.positions()[j],3.0/(nodeL.Hfield()[j].Trace()),nodeL.mass()[j],nodeL.massDensity()[j],nodeL.specificThermalEnergy()[j]])
serialData = mpi.reduce(serialData,mpi.SUM)
if rank == 0:
f = open(dataDir + "/serialDump.ascii",'w')
for i in xrange(len(serialData)):
f.write("{0} {1} {2} {3} {4} {5} {6} {7}\n".format(i,serialData[i][0][0],serialData[i][0][1],0.0,serialData[i][1],serialData[i][2],serialData[i][3],serialData[i][4]))
f.close()
| 39.778443
| 234
| 0.461639
|
4746981218592920a10201e52490c67bf9063e83
| 22,685
|
py
|
Python
|
Code/study_code.py
|
mit-ll/DephyBootInterface
|
0eb31d2c723f1567b63777163ad45c9b06fb5c43
|
[
"BSD-3-Clause"
] | null | null | null |
Code/study_code.py
|
mit-ll/DephyBootInterface
|
0eb31d2c723f1567b63777163ad45c9b06fb5c43
|
[
"BSD-3-Clause"
] | null | null | null |
Code/study_code.py
|
mit-ll/DephyBootInterface
|
0eb31d2c723f1567b63777163ad45c9b06fb5c43
|
[
"BSD-3-Clause"
] | null | null | null |
# DISTRIBUTION STATEMENT A. Approved for public release. Distribution is unlimited.
#
# This material is based upon work supported by the Under Secretary of Defense for Research and Engineering under Air Force Contract No. FA8702-15-D-0001. Any opinions, findings, conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of the Under Secretary of Defense for Research and Engineering.
#
# © 2020 Massachusetts Institute of Technology.
#
# The software/firmware is provided to you on an As-Is basis
#
# Delivered to the U.S. Government with Unlimited Rights, as defined in DFARS Part 252.227-7013 or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government rights in this work are defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed above. Use of this work other than as specifically authorized by the U.S. Government may violate any copyrights that exist in this work.
#
# P. Stegall 2020
# !!!
# Make sure to change the port to match what is in the tcp_server_gui.py or what is controlling the system. Also change the ip address to the computer that is handling the controls.
# !!!
import os, sys
thisdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(thisdir)
from pyFlexsea import *
from pyFlexsea_def import *
from fxUtil import *
from exo_defs import *
from sync_led_def import *
import time
from tcp_handler_def import *
from study_one import *
from study_zero import *
HOST = <IP_ADDRESS_OF_THE_HOST> # example keep the quotes : '192.168.1.1'
PORT = <PORT_THE_HOST_IS_LISTENING_TO> # example: 67876
STUDY_IDX = 0
TRIAL_IDX = 1
MSG_IDX = 2
USER_MASS_IDX = MSG_IDX + 1
PROGRAM_EXIT_MSG = 255
READY_FOR_TRIAL_MSG = 254
STOP_CURRENT_TRIAL_MSG = 254
CHECK_IN_MSG = 253
SYNC_FINISHED_END_MSG = 252
TRIAL_EXIT_MSG = 251
TRIAL_BEGIN_MSG = 250
SYNC_FINISHED_BEGINNING_MSG = 249
TREADMILL_STOP_MSG = 252
ZEROING_CURRENT = 1000 # mA
def zero_boots (leftExo,rightExo) :
# TODO: Change this so it is speed controlled till a current limit is hit.
# the gains need to be defined before this is called
leftExo.set_controller(CTRL_CURRENT)
rightExo.set_controller(CTRL_CURRENT)
leftExo.set_exo_current(ZEROING_CURRENT)
rightExo.set_exo_current(ZEROING_CURRENT)
time.sleep(1) # wait a second
# not sure if this will update the original encoder offsets or if this is a new instance of the boots.
leftExo.zero_encoders()
rightExo.zero_encoders()
print("zeroBoots : leftExo.ankle_ticks_offset = " + str(leftExo.ankle_ticks_offset) )
print("zeroBoots : rightExo.ankle_ticks_offset = " + str(rightExo.ankle_ticks_offset) )
leftExo.set_exo_current(0)
rightExo.set_exo_current(0)
leftExo.set_controller(CTRL_NONE)
rightExo.set_controller(CTRL_NONE)
def zero_boot (exo) :
# the gains need to be defined before this is called
exo.set_controller(CTRL_CURRENT)
exo.set_exo_current(ZEROING_CURRENT)
time.sleep(2) # wait a second
# not sure if this will update the original encoder offsets or if this is a new instance of the boots.
exo.zero_encoders()
print("zeroBoots : exo.ankle_ticks_offset = " + str(exo.ankle_ticks_offset) )
exo.set_exo_current(0)
exo.set_controller(CTRL_NONE)
def startStopCapture(sync_light, leftExo, rightExo) :
print('starting sync pattern')
# low for 1 s capture both ends
sync_light.set_state(0)
leftExo.read_data()
rightExo.read_data()
time.sleep(sync_light.period)
leftExo.read_data()
rightExo.read_data()
time.sleep(.001)
# high for 2 s capture both ends
sync_light.set_state(1)
leftExo.read_data()
rightExo.read_data()
time.sleep(2 * sync_light.period)
leftExo.read_data()
rightExo.read_data()
time.sleep(.001)
# low for 1 s capture both ends
sync_light.set_state(0)
leftExo.read_data()
rightExo.read_data()
time.sleep(sync_light.period)
leftExo.read_data()
rightExo.read_data()
def trial_simple_start (sync_light, leftExo, rightExo, com, study_num, trial_num):
# restart trial timer
# start new recording file
leftExo.data_file = leftExo.log_init()
rightExo.data_file = rightExo.log_init()
startStopCapture(sync_light, leftExo, rightExo)
com.send_recv(send_msg = [study_num, trial_num, SYNC_FINISHED_BEGINNING_MSG])
time.sleep(.010) # pause for a bit to let messages pass
com.send_recv(send_msg = [study_num, trial_num, TRIAL_BEGIN_MSG])
com.send_msg = [study_num, trial_num, CHECK_IN_MSG]
def trial_simple_end (sync_light, leftExo, rightExo, com, study_num, trial_num):
com.send_recv(send_msg = [study_num, trial_num, TRIAL_EXIT_MSG]);
com.send_msg = [study_num, trial_num, CHECK_IN_MSG]
# wait for the treadmill to stop if program exit received go strait to exit
while com.recv_msg[MSG_IDX] != TREADMILL_STOP_MSG and (com.recv_msg[MSG_IDX] != PROGRAM_EXIT_MSG):
com.heartbeat()
sync_light.check()
startStopCapture(sync_light, leftExo, rightExo)
com.send_recv(send_msg = [study_num, trial_num, SYNC_FINISHED_END_MSG]);
sync_light.set_state(1)
com.send_msg = [0, 0, READY_FOR_TRIAL_MSG]; # this just preps for the next
def main():
# socket_host = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# socket_send_msg = bytearray([0, 254])
# socket_recv_msg = None
coms = TcpHandler(port = PORT, host = HOST)
# coms.send_recv()
sync_led = SyncLed(period = 2)
scriptPath = os.path.dirname(os.path.abspath(__file__))
fpath = scriptPath + '/com.txt'
ports, baudRate = loadPortsFromFile(fpath)
print('Loaded ports: ' + str(ports))
print('Baud Rate: ' + str(baudRate))
# must turn on left boot then right boot. Otherwise this will break.
# TODO: find a better solution for this but currently we don't have access to boot specific IDs.
idx = 0
print('creating left boot')
left_boot = ExoBoot (LEFT, ports[0], int(baudRate), idx,shouldExoLog = False, shouldLog = False, frequency = 1000) # recent change the first line is now the baud rate
idx +=1
right_boot = ExoBoot (RIGHT, ports[1], int(baudRate), idx, shouldExoLog = False, shouldLog = False, frequency = 1000)
if False : # ( (not left_boot.id) or (not right_boot.id) ):
print("At least one boot is missing")
else :
# TODO: Add in gains for current and position
left_boot.define_current_gains(100,32)
right_boot.define_current_gains(100,32)
left_boot.define_position_gains(10,0)
right_boot.define_position_gains(10,0)
zero_boots(left_boot, right_boot);
left_boot.should_log = True
right_boot.should_log = True
#zero_boot(left_boot);
#print("main : left_boot.ankle_ticks_offset = " + str(left_boot.ankle_ticks_offset ) )
left_boot.set_controller(CTRL_CURRENT)
right_boot.set_controller(CTRL_CURRENT)
# model = TorquePredictionModelTFLite(offline_testing=False)
# torque_pred_pipeline = TorquePredictionPipeline(model)
start_time = 0
#startStopCapture(sync_led, left_boot, right_boot)
# left_boot.init_collins_profile(mass = 100, ramp_start_percent_gait = 0, onset_percent_gait = 27.1, peak_percent_gait = 52.4, stop_percent_gait = 62.7, onset_torque = 2, normalized_peak_torque = .25)
# right_boot.init_collins_profile(mass = 100, ramp_start_percent_gait = 0, onset_percent_gait = 27.1, peak_percent_gait = 52.4, stop_percent_gait = 62.7, onset_torque = 2, normalized_peak_torque = .25)
# left_boot.tactor_trigger_percent = 25
# right_boot.tactor_trigger_percent = 25
coms.send_recv(send_msg = [0, 0, READY_FOR_TRIAL_MSG])
sync_led.set_state(1)
user_mass = 0
try:
while coms.recv_msg[MSG_IDX] != PROGRAM_EXIT_MSG :
# put main code loop here it will stop with a CTRL+c
# while ((time.monotonic()-start_time) <= (1/left_boot.frequency)) : # if you run this loop faster than the dephy frequency the cue will fill up and it will take a long time shut down and to have the data come in.
# a=0
# start_time = time.monotonic()
coms.heartbeat()
if coms.recv_msg[USER_MASS_IDX] != user_mass :
user_mass = coms.recv_msg[USER_MASS_IDX]
# Study 0 ------------------------------------------------------------------------------------
if coms.recv_msg[STUDY_IDX] == 0 :
coms.send_msg[STUDY_IDX] = 0
if coms.recv_msg[TRIAL_IDX] == 0 and coms.recv_msg[MSG_IDX] == TRIAL_BEGIN_MSG:
try :
if user_mass != -1 :
s0 = STUDY0( user_mass, left_boot, right_boot)
# coms.send_msg[MSG_IDX] = TRIAL_EXIT_MSG
coms.send_recv(send_msg = [0, 0, TRIAL_EXIT_MSG]);
coms.send_msg = [0, 0, READY_FOR_TRIAL_MSG]; # this just preps for the next heartbeat
time.sleep(3) # this is just to give me a chance to change values with packetsender
else :
print('Study 0 initialization called but user mass not set')
except IndexError :
print('Study 0 initialization index error')
elif coms.recv_msg[TRIAL_IDX] == 1 and coms.recv_msg[MSG_IDX] == TRIAL_BEGIN_MSG:
print('Trial 1')
trial_simple_start (sync_led, left_boot, right_boot, coms, 0, 1)
trial_running = s0.trial1(restart_trial = True)
while ( trial_running and (coms.recv_msg[MSG_IDX] != STOP_CURRENT_TRIAL_MSG) and (coms.recv_msg[MSG_IDX] != PROGRAM_EXIT_MSG)) :
#print('S0T1 : in Loop')
time.sleep(1/(2*left_boot.frequency))
sync_led.check()
coms.heartbeat()
trial_running = s0.trial1()
trial_simple_end (sync_led, left_boot, right_boot, coms, 0, 1)
elif coms.recv_msg[TRIAL_IDX] == 2 and coms.recv_msg[MSG_IDX] == TRIAL_BEGIN_MSG:
sync_led.set_state(not sync_led.get_state() )
# coms.send_msg[MSG_IDX] = 251
coms.send_recv(send_msg = [0, 0, TRIAL_EXIT_MSG]);
coms.send_msg = [0, 0, READY_FOR_TRIAL_MSG]; # this just preps for the next heartbeat
elif coms.recv_msg[TRIAL_IDX] == 3 and coms.recv_msg[MSG_IDX] == TRIAL_BEGIN_MSG:
zero_boots(left_boot,right_boot)
# coms.send_msg[MSG_IDX] = 251
coms.send_recv(send_msg = [0, 0, TRIAL_EXIT_MSG]);
coms.send_msg = [0, 0, READY_FOR_TRIAL_MSG]; # this just preps for the next heartbeat
elif coms.recv_msg[TRIAL_IDX] == 4 and coms.recv_msg[MSG_IDX] == TRIAL_BEGIN_MSG:
zero_boot(left_boot)
# coms.send_msg[MSG_IDX] = 251
coms.send_recv(send_msg = [0, 0, TRIAL_EXIT_MSG]);
coms.send_msg = [0, 0, READY_FOR_TRIAL_MSG]; # this just preps for the next heartbeat
elif coms.recv_msg[TRIAL_IDX] == 5 and coms.recv_msg[MSG_IDX] == TRIAL_BEGIN_MSG:
zero_boot(right_boot)
# coms.send_msg[MSG_IDX] = 251
coms.send_recv(send_msg = [0, 0, TRIAL_EXIT_MSG]);
coms.send_msg = [0, 0, READY_FOR_TRIAL_MSG]; # this just preps for the next heartbeat
elif coms.recv_msg[TRIAL_IDX] == 6 and coms.recv_msg[MSG_IDX] == TRIAL_BEGIN_MSG:
left_boot.set_exo_current(0)
right_boot.set_exo_current(0)
left_boot.set_controller(CTRL_NONE)
right_boot.set_controller(CTRL_NONE)
# coms.send_msg[MSG_IDX] = 251
coms.send_recv(send_msg = [0, 0, TRIAL_EXIT_MSG]);
coms.send_msg = [0, 0, READY_FOR_TRIAL_MSG]; # this just preps for the next heartbeat
elif coms.recv_msg[TRIAL_IDX] == 7 and coms.recv_msg[MSG_IDX] == TRIAL_BEGIN_MSG:
left_boot.set_exo_current(0)
left_boot.set_controller(CTRL_NONE)
# coms.send_msg[MSG_IDX] = 251
coms.send_recv(send_msg = [0, 0, TRIAL_EXIT_MSG]);
coms.send_msg = [0, 0, READY_FOR_TRIAL_MSG]; # this just preps for the next heartbeat
elif coms.recv_msg[TRIAL_IDX] == 8 and coms.recv_msg[MSG_IDX] == TRIAL_BEGIN_MSG:
right_boot.set_exo_current(0)
right_boot.set_controller(CTRL_NONE)
# coms.send_msg[MSG_IDX] = 251
coms.send_recv(send_msg = [0, 0, TRIAL_EXIT_MSG]);
coms.send_msg = [0, 0, READY_FOR_TRIAL_MSG]; # this just preps for the next heartbeat
# Study 1 ------------------------------------------------------------------------------------
elif coms.recv_msg[STUDY_IDX] == 1 :
coms.send_msg[STUDY_IDX] = 1
if coms.recv_msg[TRIAL_IDX] == 0 and coms.recv_msg[MSG_IDX] == TRIAL_BEGIN_MSG:
try :
if user_mass != -1 :
s1 = STUDY1( user_mass, left_boot, right_boot)
print('Study 1 initilized')
# coms.send_msg[MSG_IDX] = 251
coms.send_recv(send_msg = [1, 0, TRIAL_EXIT_MSG]);
coms.send_msg = [0, 0, READY_FOR_TRIAL_MSG]; # this just preps for the next heartbeat
time.sleep(3) # this is just to give me a chance to change values with packetsender
else :
print('Study 1 initialization called but user mass not set')
except IndexError :
print('Study 1 initialization index error')
elif coms.recv_msg[TRIAL_IDX] == 1 and coms.recv_msg[MSG_IDX] == TRIAL_BEGIN_MSG:
# restart trial timer
# start new recording file
# left_boot.log_init()
# right_boot.log_init()
# startStopCapture(sync_led, left_boot, right_boot)
# coms.send_recv(send_msg = [1,1, SYNC_FINISHED_BEGINNING_MSG])
# time.sleep(.010) # pause for a bit to let messages pass
# coms.send_recv(send_msg = [1,1, TRIAL_BEGIN_MSG])
# coms.send_msg = [1, 1, CHECK_IN_MSG]
print('Trial 1')
trial_simple_start (sync_led, left_boot, right_boot, coms, 1, 1)
trial_running = s1.trial1(restart_trial = True)
while ( trial_running and (coms.recv_msg[MSG_IDX] != STOP_CURRENT_TRIAL_MSG) and (coms.recv_msg[MSG_IDX] != PROGRAM_EXIT_MSG)) :
#print('S1T1 : in Loop')
time.sleep(1/(2*left_boot.frequency))
sync_led.check()
coms.heartbeat()
trial_running = s1.trial1()
trial_simple_end (sync_led, left_boot, right_boot, coms, 1, 1)
# coms.send_recv(send_msg = [1, 1, TRIAL_EXIT_MSG]);
# coms.send_msg = [1, 1, CHECK_IN_MSG]
# # wait for the treadmill to stop if program exit received go strait to exit
# while coms.recv_msg[TRIAL_IDX] != TREADMILL_STOP_MSG and (coms.recv_msg[TRIAL_IDX] != PROGRAM_EXIT_MSG):
# coms.heartbeat()
# startStopCapture(sync_led, left_boot, right_boot)
# coms.send_recv(send_msg = [1, 1, SYNC_FINISHED_END_MSG]);
# sync_led.set_state(1)
# coms.send_msg = [0, 0, READY_FOR_TRIAL_MSG]; # this just preps for the next
elif coms.recv_msg[TRIAL_IDX] == 2 and coms.recv_msg[MSG_IDX] == TRIAL_BEGIN_MSG:
print('Trial 2')
# restart trial timer
# start new recording file
# left_boot.data_file = left_boot.log_init()
# right_boot.data_file = right_boot.log_init()
# startStopCapture(sync_led, left_boot, right_boot)
# coms.send_recv(send_msg = [1,2, SYNC_FINISHED_BEGINNING_MSG])
# time.sleep(.010) # pause for a bit to let messages pass
# coms.send_recv(send_msg = [1,2, TRIAL_BEGIN_MSG])
# coms.send_msg = [1, 1, CHECK_IN_MSG]
trial_simple_start (sync_led, left_boot, right_boot, coms, 1, 2)
trial_running = s1.trial2(restart_trial = True)
while ( trial_running and (coms.recv_msg[MSG_IDX] != STOP_CURRENT_TRIAL_MSG) and (coms.recv_msg[MSG_IDX] != PROGRAM_EXIT_MSG)) :
#print('S1T2 : in Loop')
time.sleep(1/(2*left_boot.frequency))
sync_led.check()
coms.heartbeat()
trial_running = s1.trial2()
trial_simple_end (sync_led, left_boot, right_boot, coms, 1, 2)
# coms.send_recv(send_msg = [1, 2, TRIAL_EXIT_MSG]);
# coms.send_msg = [1, 1, CHECK_IN_MSG]
# # wait for the treadmill to stop if program exit received go strait to exit
# while coms.recv_msg[TRIAL_IDX] != TREADMILL_STOP_MSG and (coms.recv_msg[TRIAL_IDX] != PROGRAM_EXIT_MSG):
# coms.heartbeat()
# startStopCapture(sync_led, left_boot, right_boot)
# coms.send_recv(send_msg = [1, 2, SYNC_FINISHED_END_MSG]);
# sync_led.set_state(1)
# coms.send_msg = [0, 0, READY_FOR_TRIAL_MSG]; # this just preps for the next
elif coms.recv_msg[TRIAL_IDX] == 3 and coms.recv_msg[MSG_IDX] == TRIAL_BEGIN_MSG:
print('Trial 3')
# restart trial timer
# start new recording file
# left_boot.data_file = left_boot.log_init()
# right_boot.data_file = right_boot.log_init()
# startStopCapture(sync_led, left_boot, right_boot)
# coms.send_recv(send_msg = [1,3, SYNC_FINISHED_BEGINNING_MSG])
# time.sleep(.010) # pause for a bit to let messages pass
# coms.send_recv(send_msg = [1,3, TRIAL_BEGIN_MSG])
# coms.send_msg = [1, 1, CHECK_IN_MSG]
trial_simple_start (sync_led, left_boot, right_boot, coms, 1, 3)
trial_running = s1.trial3(restart_trial = True)
while ( trial_running and (coms.recv_msg[MSG_IDX] != STOP_CURRENT_TRIAL_MSG) and (coms.recv_msg[MSG_IDX] != PROGRAM_EXIT_MSG)) :
#print('S1T3 : in Loop')
time.sleep(1/(2*left_boot.frequency))
sync_led.check()
coms.heartbeat()
trial_running = s1.trial3()
trial_simple_end (sync_led, left_boot, right_boot, coms, 1, 3)
# coms.send_recv(send_msg = [1, 3, TRIAL_EXIT_MSG]);
# coms.send_msg = [1, 1, CHECK_IN_MSG]
# # wait for the treadmill to stop if program exit received go strait to exit
# while coms.recv_msg[TRIAL_IDX] != TREADMILL_STOP_MSG and (coms.recv_msg[TRIAL_IDX] != PROGRAM_EXIT_MSG):
# coms.heartbeat()
# startStopCapture(sync_led, left_boot, right_boot)
# coms.send_recv(send_msg = [1, 3, SYNC_FINISHED_END_MSG]);
# sync_led.set_state(1)
# coms.send_msg = [0, 0, READY_FOR_TRIAL_MSG]; # this just preps for the next
elif coms.recv_msg[TRIAL_IDX] == 4 and coms.recv_msg[MSG_IDX] == TRIAL_BEGIN_MSG:
print('Trial 4')
# restart trial timer
# start new recording file
# left_boot.data_file = left_boot.log_init()
# right_boot.data_file = right_boot.log_init()
# startStopCapture(sync_led, left_boot, right_boot)
# coms.send_recv(send_msg = [1,2, SYNC_FINISHED_BEGINNING_MSG])
# time.sleep(.010) # pause for a bit to let messages pass
# coms.send_recv(send_msg = [1,2, TRIAL_BEGIN_MSG])
# coms.send_msg = [1, 1, CHECK_IN_MSG]
trial_simple_start (sync_led, left_boot, right_boot, coms, 1, 4)
trial_running = s1.trial4(restart_trial = True)
while ( trial_running and (coms.recv_msg[MSG_IDX] != STOP_CURRENT_TRIAL_MSG) and (coms.recv_msg[MSG_IDX] != PROGRAM_EXIT_MSG)) :
#print('S1T2 : in Loop')
time.sleep(1/(2*left_boot.frequency))
sync_led.check()
coms.heartbeat()
trial_running = s1.trial4()
trial_simple_end (sync_led, left_boot, right_boot, coms, 1, 4)
elif coms.recv_msg[TRIAL_IDX] == 5 and coms.recv_msg[MSG_IDX] == TRIAL_BEGIN_MSG:
print('Trial 5')
# restart trial timer
# start new recording file
# left_boot.data_file = left_boot.log_init()
# right_boot.data_file = right_boot.log_init()
# startStopCapture(sync_led, left_boot, right_boot)
# coms.send_recv(send_msg = [1,2, SYNC_FINISHED_BEGINNING_MSG])
# time.sleep(.010) # pause for a bit to let messages pass
# coms.send_recv(send_msg = [1,2, TRIAL_BEGIN_MSG])
# coms.send_msg = [1, 1, CHECK_IN_MSG]
trial_simple_start (sync_led, left_boot, right_boot, coms, 1, 5)
trial_running = s1.trial5(restart_trial = True)
while ( trial_running and (coms.recv_msg[MSG_IDX] != STOP_CURRENT_TRIAL_MSG) and (coms.recv_msg[MSG_IDX] != PROGRAM_EXIT_MSG)) :
#print('S1T2 : in Loop')
time.sleep(1/(2*left_boot.frequency))
sync_led.check()
coms.heartbeat()
trial_running = s1.trial5()
trial_simple_end (sync_led, left_boot, right_boot, coms, 1, 5)
# Study 2 ------------------------------------------------------------------------------------
left_boot.set_exo_current(0)
right_boot.set_exo_current(0)
time.sleep(.03)
left_boot.set_controller(CTRL_NONE)
right_boot.set_controller(CTRL_NONE)
time.sleep(1/(2*left_boot.frequency))
#print(str(left_boot.data_current[left_boot.idx_time]) + "\t" + str(time.monotonic() * 1000))
###################################################
# if len(left_boot.data_que) == 50 :
# pred_torque_sequence = torque_pred_pipeline.run(left_boot.data_que, right_boot.data_que)
# print("torque estimate : " + str(pred_torque_sequence[0]))
#clearTerminal()
#printData(left_boot.labels_current, left_boot.data_current)
#printData(right_boot.labels_current, right_boot.data_current)
# commanding 1 Nm = 1000 Nmm , set exo current takes mA and ankle_torque_to_current outputs A.
# left_boot.set_exo_current(left_boot.ankle_torque_to_current(pred_torque_sequence[0]*50*1000)*1000) # pred torque is currently 0-1 shaping it to 50000 Nmm
#right_boot.set_exo_current(right_boot.ankle_torque_to_current(1000)*1000)
###################################################
# commanding 1 Nm = 1000 Nmm , set exo current takes mA and ankle_torque_to_current outputs A.
#
# left_boot.set_exo_current(left_boot.ankle_torque_to_current(0)*1000)
# right_boot.set_exo_current(right_boot.ankle_torque_to_current(0)*1000)
#left_boot.set_exo_current(2500*sync_led.state)
#right_boot.set_exo_current(2500*sync_led.state)
#left_boot.set_exo_current(2500)
#right_boot.set_exo_current(2500)
except KeyboardInterrupt:
print("KeyboardInterrupt has been caught.")
# except IndexError:
# print('Index out of range')
# sync_led.set_state(0)
left_boot.set_exo_current(0)
right_boot.set_exo_current(0)
time.sleep(.03)
left_boot.set_controller(CTRL_NONE)
right_boot.set_controller(CTRL_NONE)
# startStopCapture(sync_led, left_boot, right_boot)
# sync_led.set_state(0)
# left_boot.read_data()
# right_boot.read_data()
# time.sleep(1)
# left_boot.read_data()
# right_boot.read_data()
# time.sleep(.001)
# sync_led.set_state(1)
# left_boot.read_data()
# right_boot.read_data()
# time.sleep(2)
# left_boot.read_data()
# right_boot.read_data()
# time.sleep(.001)
# sync_led.set_state(0)
# left_boot.read_data()
# right_boot.read_data()
# time.sleep(1)
# left_boot.read_data()
# right_boot.read_data()
del left_boot
del right_boot
cleanupPlanStack()
main()
| 36.471061
| 394
| 0.681331
|
d4585d953560b9f9f303b889851ef25fefd0559b
| 7,137
|
py
|
Python
|
get_pulse.py
|
krahd/bw
|
9c7ff5ed2a16ff5e750ead5ee576c56420b41312
|
[
"Apache-2.0"
] | 7
|
2018-10-12T15:12:50.000Z
|
2021-11-21T02:52:43.000Z
|
get_pulse.py
|
krahd/bw
|
9c7ff5ed2a16ff5e750ead5ee576c56420b41312
|
[
"Apache-2.0"
] | null | null | null |
get_pulse.py
|
krahd/bw
|
9c7ff5ed2a16ff5e750ead5ee576c56420b41312
|
[
"Apache-2.0"
] | 3
|
2019-06-17T02:31:12.000Z
|
2021-09-16T10:30:41.000Z
|
from lib.device import Camera
from lib.processors_noopenmdao import findFaceGetPulse
from lib.interface import plotXY, imshow, waitKey, destroyWindow
from cv2 import moveWindow
import argparse
import numpy as np
import datetime
#TODO: work on serial port comms, if anyone asks for it
#from serial import Serial
import socket
import sys
class getPulseApp(object):
"""
Python application that finds a face in a webcam stream, then isolates the
forehead.
Then the average green-light intensity in the forehead region is gathered
over time, and the detected person's pulse is estimated.
"""
def __init__(self, args):
# Imaging device - must be a connected camera (not an ip camera or mjpeg
# stream)
serial = args.serial
baud = args.baud
self.send_serial = False
self.send_udp = False
if serial:
self.send_serial = True
if not baud:
baud = 9600
else:
baud = int(baud)
self.serial = Serial(port=serial, baudrate=baud)
udp = args.udp
if udp:
self.send_udp = True
if ":" not in udp:
ip = udp
port = 5005
else:
ip, port = udp.split(":")
port = int(port)
self.udp = (ip, port)
self.sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
self.cameras = []
self.selected_cam = 0
for i in range(3):
camera = Camera(camera=i) # first camera by default
if camera.valid or not len(self.cameras):
self.cameras.append(camera)
else:
break
self.w, self.h = 0, 0
self.pressed = 0
# Containerized analysis of recieved image frames (an openMDAO assembly)
# is defined next.
# This assembly is designed to handle all image & signal analysis,
# such as face detection, forehead isolation, time series collection,
# heart-beat detection, etc.
# Basically, everything that isn't communication
# to the camera device or part of the GUI
self.processor = findFaceGetPulse()
# Init parameters for the cardiac data plot
self.bpm_plot = False
self.plot_title = "Data display - raw signal (top) and PSD (bottom)"
# Maps keystrokes to specified methods
#(A GUI window must have focus for these to work)
self.key_controls = {"s": self.toggle_search,
"d": self.toggle_display_plot,
"c": self.toggle_cam,
"f": self.write_csv}
def toggle_cam(self):
if len(self.cameras) > 1:
self.processor.find_faces = True
self.bpm_plot = False
destroyWindow(self.plot_title)
self.selected_cam += 1
self.selected_cam = self.selected_cam % len(self.cameras)
def write_csv(self):
"""
Writes current data to a csv file
"""
fn = "Webcam-pulse" + str(datetime.datetime.now())
fn = fn.replace(":", "_").replace(".", "_")
data = np.vstack((self.processor.times, self.processor.samples)).T
np.savetxt(fn + ".csv", data, delimiter=',')
print("Writing csv")
def toggle_search(self):
"""
Toggles a motion lock on the processor's face detection component.
Locking the forehead location in place significantly improves
data quality, once a forehead has been sucessfully isolated.
"""
#state = self.processor.find_faces.toggle()
state = self.processor.find_faces_toggle()
print("face detection lock =", not state)
def toggle_display_plot(self):
"""
Toggles the data display.
"""
if self.bpm_plot:
print("bpm plot disabled")
self.bpm_plot = False
destroyWindow(self.plot_title)
else:
print("bpm plot enabled")
if self.processor.find_faces:
self.toggle_search()
self.bpm_plot = True
self.make_bpm_plot()
moveWindow(self.plot_title, self.w, 0)
def make_bpm_plot(self):
"""
Creates and/or updates the data display
"""
plotXY([[self.processor.times,
self.processor.samples],
[self.processor.freqs,
self.processor.fft]],
labels=[False, True],
showmax=[False, "bpm"],
label_ndigits=[0, 0],
showmax_digits=[0, 1],
skip=[3, 3],
name=self.plot_title,
bg=self.processor.slices[0])
def key_handler(self):
"""
Handle keystrokes, as set at the bottom of __init__()
A plotting or camera frame window must have focus for keypresses to be
detected.
"""
self.pressed = waitKey(10) & 255 # wait for keypress for 10 ms
if self.pressed == 27: # exit program on 'esc'
print("Exiting")
for cam in self.cameras:
cam.cam.release()
if self.send_serial:
self.serial.close()
sys.exit()
for key in self.key_controls.keys():
if chr(self.pressed) == key:
self.key_controls[key]()
def main_loop(self):
"""
Single iteration of the application's main loop.
"""
# Get current image frame from the camera
frame = self.cameras[self.selected_cam].get_frame()
self.h, self.w, _c = frame.shape
# display unaltered frame
# imshow("Original",frame)
# set current image frame to the processor's input
self.processor.frame_in = frame
# process the image frame to perform all needed analysis
self.processor.run(self.selected_cam)
# collect the output frame for display
output_frame = self.processor.frame_out
# show the processed/annotated output frame
imshow("Processed", output_frame)
# create and/or update the raw data display if needed
if self.bpm_plot:
self.make_bpm_plot()
if self.send_serial:
self.serial.write(str(self.processor.bpm) + "\r\n")
if self.send_udp:
self.sock.sendto(str(self.processor.bpm), self.udp)
# handle any key presses
self.key_handler()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Webcam pulse detector.')
parser.add_argument('--serial', default=None,
help='serial port destination for bpm data')
parser.add_argument('--baud', default=None,
help='Baud rate for serial transmission')
parser.add_argument('--udp', default=None,
help='udp address:port destination for bpm data')
args = parser.parse_args()
App = getPulseApp(args)
while True:
App.main_loop()
| 33.507042
| 80
| 0.574611
|
4d785140e0a0f8e4931d932c0f24af1d1a159386
| 108
|
py
|
Python
|
goldclip/__init__.py
|
bakerwm/goldclip
|
a1ed8b63d51ed05f165a15cce66a6ace2c3976a9
|
[
"MIT"
] | null | null | null |
goldclip/__init__.py
|
bakerwm/goldclip
|
a1ed8b63d51ed05f165a15cce66a6ace2c3976a9
|
[
"MIT"
] | null | null | null |
goldclip/__init__.py
|
bakerwm/goldclip
|
a1ed8b63d51ed05f165a15cce66a6ace2c3976a9
|
[
"MIT"
] | null | null | null |
from .helper import *
from .goldcliplib import *
from .bin import bed_annotation
from .bin import bed_fixer
| 21.6
| 31
| 0.796296
|
fbbc3b1889c80353708758931bc1a4d0578347a4
| 578
|
py
|
Python
|
astr-119-hw-2/data_types.py
|
cnojiri/astr-119
|
e4fae90e75276c1800fda5bb559f988767c0d870
|
[
"MIT"
] | 1
|
2021-12-07T07:41:33.000Z
|
2021-12-07T07:41:33.000Z
|
astr-119-hw-2/data_types.py
|
cnojiri/astr-119
|
e4fae90e75276c1800fda5bb559f988767c0d870
|
[
"MIT"
] | 6
|
2021-09-29T22:13:31.000Z
|
2021-11-24T16:00:50.000Z
|
astr-119-hw-2/data_types.py
|
cnojiri/astr-119
|
e4fae90e75276c1800fda5bb559f988767c0d870
|
[
"MIT"
] | null | null | null |
import numpy as np #import numpy library
#integers
i = 10 #integer
print(type(i)) #print out the data tyoe of i
a_i = np.zeros(i,dtype=int) #declare an array of ints
print(type(a_i)) #will return ndarray
print(type(a_i[0])) #will return int64
#floats
x = 119.0 #floating point number
print(type(x)) #print out the data type of x
y = 1.19e2 #float 119 in scientific notation
print(type(y)) #print out the data type of y
z = np.zeros(i,dtype=float) #declare array of floats
print(type(z)) #will return nd array
print(type(z[0])) #will return float64
| 25.130435
| 53
| 0.693772
|
da8e0439feff3c29c1cceee30b5fb138e4cdfdec
| 105,902
|
py
|
Python
|
lib/sqlalchemy/orm/strategies.py
|
sdspikes/sqlalchemy
|
cf6f342150791ace82c6a099e8ea2923138bd61e
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/orm/strategies.py
|
sdspikes/sqlalchemy
|
cf6f342150791ace82c6a099e8ea2923138bd61e
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/orm/strategies.py
|
sdspikes/sqlalchemy
|
cf6f342150791ace82c6a099e8ea2923138bd61e
|
[
"MIT"
] | null | null | null |
# orm/strategies.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""sqlalchemy.orm.interfaces.LoaderStrategy
implementations, and related MapperOptions."""
from __future__ import absolute_import
import collections
import itertools
from . import attributes
from . import exc as orm_exc
from . import interfaces
from . import loading
from . import path_registry
from . import properties
from . import query
from . import relationships
from . import unitofwork
from . import util as orm_util
from .base import _DEFER_FOR_STATE
from .base import _RAISE_FOR_STATE
from .base import _SET_DEFERRED_EXPIRED
from .context import _column_descriptions
from .context import ORMCompileState
from .context import QueryContext
from .interfaces import LoaderStrategy
from .interfaces import StrategizedProperty
from .session import _state_session
from .state import InstanceState
from .util import _none_set
from .util import aliased
from .. import event
from .. import exc as sa_exc
from .. import inspect
from .. import log
from .. import sql
from .. import util
from ..sql import util as sql_util
from ..sql import visitors
from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
def _register_attribute(
prop,
mapper,
useobject,
compare_function=None,
typecallable=None,
callable_=None,
proxy_property=None,
active_history=False,
impl_class=None,
**kw
):
listen_hooks = []
uselist = useobject and prop.uselist
if useobject and prop.single_parent:
listen_hooks.append(single_parent_validator)
if prop.key in prop.parent.validators:
fn, opts = prop.parent.validators[prop.key]
listen_hooks.append(
lambda desc, prop: orm_util._validator_events(
desc, prop.key, fn, **opts
)
)
if useobject:
listen_hooks.append(unitofwork.track_cascade_events)
# need to assemble backref listeners
# after the singleparentvalidator, mapper validator
if useobject:
backref = prop.back_populates
if backref and prop._effective_sync_backref:
listen_hooks.append(
lambda desc, prop: attributes.backref_listeners(
desc, backref, uselist
)
)
# a single MapperProperty is shared down a class inheritance
# hierarchy, so we set up attribute instrumentation and backref event
# for each mapper down the hierarchy.
# typically, "mapper" is the same as prop.parent, due to the way
# the configure_mappers() process runs, however this is not strongly
# enforced, and in the case of a second configure_mappers() run the
# mapper here might not be prop.parent; also, a subclass mapper may
# be called here before a superclass mapper. That is, can't depend
# on mappers not already being set up so we have to check each one.
for m in mapper.self_and_descendants:
if prop is m._props.get(
prop.key
) and not m.class_manager._attr_has_impl(prop.key):
desc = attributes.register_attribute_impl(
m.class_,
prop.key,
parent_token=prop,
uselist=uselist,
compare_function=compare_function,
useobject=useobject,
trackparent=useobject
and (
prop.single_parent
or prop.direction is interfaces.ONETOMANY
),
typecallable=typecallable,
callable_=callable_,
active_history=active_history,
impl_class=impl_class,
send_modified_events=not useobject or not prop.viewonly,
doc=prop.doc,
**kw
)
for hook in listen_hooks:
hook(desc, prop)
@properties.ColumnProperty.strategy_for(instrument=False, deferred=False)
class UninstrumentedColumnLoader(LoaderStrategy):
"""Represent a non-instrumented MapperProperty.
The polymorphic_on argument of mapper() often results in this,
if the argument is against the with_polymorphic selectable.
"""
__slots__ = ("columns",)
def __init__(self, parent, strategy_key):
super(UninstrumentedColumnLoader, self).__init__(parent, strategy_key)
self.columns = self.parent_property.columns
def setup_query(
self,
compile_state,
query_entity,
path,
loadopt,
adapter,
column_collection=None,
**kwargs
):
for c in self.columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
pass
@log.class_logger
@properties.ColumnProperty.strategy_for(instrument=True, deferred=False)
class ColumnLoader(LoaderStrategy):
"""Provide loading behavior for a :class:`.ColumnProperty`."""
__slots__ = "columns", "is_composite"
def __init__(self, parent, strategy_key):
super(ColumnLoader, self).__init__(parent, strategy_key)
self.columns = self.parent_property.columns
self.is_composite = hasattr(self.parent_property, "composite_class")
def setup_query(
self,
compile_state,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
check_for_adapt=False,
**kwargs
):
for c in self.columns:
if adapter:
if check_for_adapt:
c = adapter.adapt_check_present(c)
if c is None:
return
else:
c = adapter.columns[c]
column_collection.append(c)
fetch = self.columns[0]
if adapter:
fetch = adapter.columns[fetch]
memoized_populators[self.parent_property] = fetch
def init_class_attribute(self, mapper):
self.is_class_level = True
coltype = self.columns[0].type
# TODO: check all columns ? check for foreign key as well?
active_history = (
self.parent_property.active_history
or self.columns[0].primary_key
or (
mapper.version_id_col is not None
and mapper._columntoproperty.get(mapper.version_id_col, None)
is self.parent_property
)
)
_register_attribute(
self.parent_property,
mapper,
useobject=False,
compare_function=coltype.compare_values,
active_history=active_history,
)
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
# look through list of columns represented here
# to see which, if any, is present in the row.
for col in self.columns:
if adapter:
col = adapter.columns[col]
getter = result._getter(col, False)
if getter:
populators["quick"].append((self.key, getter))
break
else:
populators["expire"].append((self.key, True))
@log.class_logger
@properties.ColumnProperty.strategy_for(query_expression=True)
class ExpressionColumnLoader(ColumnLoader):
def __init__(self, parent, strategy_key):
super(ExpressionColumnLoader, self).__init__(parent, strategy_key)
null = sql.null()
self._have_default_expression = any(
not c.compare(null) for c in self.parent_property.columns
)
def setup_query(
self,
compile_state,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
**kwargs
):
columns = None
if loadopt and "expression" in loadopt.local_opts:
columns = [loadopt.local_opts["expression"]]
elif self._have_default_expression:
columns = self.parent_property.columns
if columns is None:
return
for c in columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
fetch = columns[0]
if adapter:
fetch = adapter.columns[fetch]
memoized_populators[self.parent_property] = fetch
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
# look through list of columns represented here
# to see which, if any, is present in the row.
if loadopt and "expression" in loadopt.local_opts:
columns = [loadopt.local_opts["expression"]]
for col in columns:
if adapter:
col = adapter.columns[col]
getter = result._getter(col, False)
if getter:
populators["quick"].append((self.key, getter))
break
else:
populators["expire"].append((self.key, True))
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self.parent_property,
mapper,
useobject=False,
compare_function=self.columns[0].type.compare_values,
accepts_scalar_loader=False,
)
@log.class_logger
@properties.ColumnProperty.strategy_for(deferred=True, instrument=True)
@properties.ColumnProperty.strategy_for(
deferred=True, instrument=True, raiseload=True
)
@properties.ColumnProperty.strategy_for(do_nothing=True)
class DeferredColumnLoader(LoaderStrategy):
"""Provide loading behavior for a deferred :class:`.ColumnProperty`."""
__slots__ = "columns", "group", "raiseload"
def __init__(self, parent, strategy_key):
super(DeferredColumnLoader, self).__init__(parent, strategy_key)
if hasattr(self.parent_property, "composite_class"):
raise NotImplementedError(
"Deferred loading for composite " "types not implemented yet"
)
self.raiseload = self.strategy_opts.get("raiseload", False)
self.columns = self.parent_property.columns
self.group = self.parent_property.group
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
# for a DeferredColumnLoader, this method is only used during a
# "row processor only" query; see test_deferred.py ->
# tests with "rowproc_only" in their name. As of the 1.0 series,
# loading._instance_processor doesn't use a "row processing" function
# to populate columns, instead it uses data in the "populators"
# dictionary. Normally, the DeferredColumnLoader.setup_query()
# sets up that data in the "memoized_populators" dictionary
# and "create_row_processor()" here is never invoked.
if not self.is_class_level:
if self.raiseload:
set_deferred_for_local_state = (
self.parent_property._raise_column_loader
)
else:
set_deferred_for_local_state = (
self.parent_property._deferred_column_loader
)
populators["new"].append((self.key, set_deferred_for_local_state))
else:
populators["expire"].append((self.key, False))
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self.parent_property,
mapper,
useobject=False,
compare_function=self.columns[0].type.compare_values,
callable_=self._load_for_state,
load_on_unexpire=False,
)
def setup_query(
self,
compile_state,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
only_load_props=None,
**kw
):
if (
(
loadopt
and "undefer_pks" in loadopt.local_opts
and set(self.columns).intersection(
self.parent._should_undefer_in_wildcard
)
)
or (
loadopt
and self.group
and loadopt.local_opts.get(
"undefer_group_%s" % self.group, False
)
)
or (only_load_props and self.key in only_load_props)
):
self.parent_property._get_strategy(
(("deferred", False), ("instrument", True))
).setup_query(
compile_state,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
**kw
)
elif self.is_class_level:
memoized_populators[self.parent_property] = _SET_DEFERRED_EXPIRED
elif not self.raiseload:
memoized_populators[self.parent_property] = _DEFER_FOR_STATE
else:
memoized_populators[self.parent_property] = _RAISE_FOR_STATE
def _load_for_state(self, state, passive):
if not state.key:
return attributes.ATTR_EMPTY
if not passive & attributes.SQL_OK:
return attributes.PASSIVE_NO_RESULT
localparent = state.manager.mapper
if self.group:
toload = [
p.key
for p in localparent.iterate_properties
if isinstance(p, StrategizedProperty)
and isinstance(p.strategy, DeferredColumnLoader)
and p.group == self.group
]
else:
toload = [self.key]
# narrow the keys down to just those which have no history
group = [k for k in toload if k in state.unmodified]
session = _state_session(state)
if session is None:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"deferred load operation of attribute '%s' cannot proceed"
% (orm_util.state_str(state), self.key)
)
if self.raiseload:
self._invoke_raise_load(state, passive, "raise")
if (
loading.load_on_ident(
session,
sql.select(localparent).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
),
state.key,
only_load_props=group,
refresh_state=state,
)
is None
):
raise orm_exc.ObjectDeletedError(state)
return attributes.ATTR_WAS_SET
def _invoke_raise_load(self, state, passive, lazy):
raise sa_exc.InvalidRequestError(
"'%s' is not available due to raiseload=True" % (self,)
)
class LoadDeferredColumns(object):
"""serializable loader object used by DeferredColumnLoader"""
def __init__(self, key, raiseload=False):
self.key = key
self.raiseload = raiseload
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
localparent = state.manager.mapper
prop = localparent._props[key]
if self.raiseload:
strategy_key = (
("deferred", True),
("instrument", True),
("raiseload", True),
)
else:
strategy_key = (("deferred", True), ("instrument", True))
strategy = prop._get_strategy(strategy_key)
return strategy._load_for_state(state, passive)
class AbstractRelationshipLoader(LoaderStrategy):
"""LoaderStratgies which deal with related objects."""
__slots__ = "mapper", "target", "uselist", "entity"
def __init__(self, parent, strategy_key):
super(AbstractRelationshipLoader, self).__init__(parent, strategy_key)
self.mapper = self.parent_property.mapper
self.entity = self.parent_property.entity
self.target = self.parent_property.target
self.uselist = self.parent_property.uselist
def _size_alert(self, lru_cache):
util.warn("LRU cache size alert for loader strategy: %s" % self)
@log.class_logger
@relationships.RelationshipProperty.strategy_for(do_nothing=True)
class DoNothingLoader(LoaderStrategy):
"""Relationship loader that makes no change to the object's state.
Compared to NoLoader, this loader does not initialize the
collection/attribute to empty/none; the usual default LazyLoader will
take effect.
"""
@log.class_logger
@relationships.RelationshipProperty.strategy_for(lazy="noload")
@relationships.RelationshipProperty.strategy_for(lazy=None)
class NoLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=None".
"""
__slots__ = ()
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self.parent_property,
mapper,
useobject=True,
typecallable=self.parent_property.collection_class,
)
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
def invoke_no_load(state, dict_, row):
if self.uselist:
attributes.init_state_collection(state, dict_, self.key)
else:
dict_[self.key] = None
populators["new"].append((self.key, invoke_no_load))
@log.class_logger
@relationships.RelationshipProperty.strategy_for(lazy=True)
@relationships.RelationshipProperty.strategy_for(lazy="select")
@relationships.RelationshipProperty.strategy_for(lazy="raise")
@relationships.RelationshipProperty.strategy_for(lazy="raise_on_sql")
@relationships.RelationshipProperty.strategy_for(lazy="baked_select")
class LazyLoader(AbstractRelationshipLoader, util.MemoizedSlots):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=True", that is loads when first accessed.
"""
__slots__ = (
"_lazywhere",
"_rev_lazywhere",
"_lazyload_reverse_option",
"_order_by",
"use_get",
"is_aliased_class",
"_bind_to_col",
"_equated_columns",
"_rev_bind_to_col",
"_rev_equated_columns",
"_simple_lazy_clause",
"_raise_always",
"_raise_on_sql",
"_query_cache",
)
def __init__(self, parent, strategy_key):
super(LazyLoader, self).__init__(parent, strategy_key)
self._raise_always = self.strategy_opts["lazy"] == "raise"
self._raise_on_sql = self.strategy_opts["lazy"] == "raise_on_sql"
self.is_aliased_class = inspect(self.entity).is_aliased_class
join_condition = self.parent_property._join_condition
(
self._lazywhere,
self._bind_to_col,
self._equated_columns,
) = join_condition.create_lazy_clause()
(
self._rev_lazywhere,
self._rev_bind_to_col,
self._rev_equated_columns,
) = join_condition.create_lazy_clause(reverse_direction=True)
if self.parent_property.order_by:
self._order_by = [
sql_util._deep_annotate(elem, {"_orm_adapt": True})
for elem in util.to_list(self.parent_property.order_by)
]
else:
self._order_by = None
self.logger.info("%s lazy loading clause %s", self, self._lazywhere)
# determine if our "lazywhere" clause is the same as the mapper's
# get() clause. then we can just use mapper.get()
#
# TODO: the "not self.uselist" can be taken out entirely; a m2o
# load that populates for a list (very unusual, but is possible with
# the API) can still set for "None" and the attribute system will
# populate as an empty list.
self.use_get = (
not self.is_aliased_class
and not self.uselist
and self.entity._get_clause[0].compare(
self._lazywhere,
use_proxies=True,
compare_keys=False,
equivalents=self.mapper._equivalent_columns,
)
)
if self.use_get:
for col in list(self._equated_columns):
if col in self.mapper._equivalent_columns:
for c in self.mapper._equivalent_columns[col]:
self._equated_columns[c] = self._equated_columns[col]
self.logger.info(
"%s will use Session.get() to " "optimize instance loads", self
)
def init_class_attribute(self, mapper):
self.is_class_level = True
active_history = (
self.parent_property.active_history
or self.parent_property.direction is not interfaces.MANYTOONE
or not self.use_get
)
# MANYTOONE currently only needs the
# "old" value for delete-orphan
# cascades. the required _SingleParentValidator
# will enable active_history
# in that case. otherwise we don't need the
# "old" value during backref operations.
_register_attribute(
self.parent_property,
mapper,
useobject=True,
callable_=self._load_for_state,
typecallable=self.parent_property.collection_class,
active_history=active_history,
)
def _memoized_attr__simple_lazy_clause(self):
lazywhere = sql_util._deep_annotate(
self._lazywhere, {"_orm_adapt": True}
)
criterion, bind_to_col = (lazywhere, self._bind_to_col)
params = []
def visit_bindparam(bindparam):
bindparam.unique = False
visitors.traverse(criterion, {}, {"bindparam": visit_bindparam})
def visit_bindparam(bindparam):
if bindparam._identifying_key in bind_to_col:
params.append(
(
bindparam.key,
bind_to_col[bindparam._identifying_key],
None,
)
)
elif bindparam.callable is None:
params.append((bindparam.key, None, bindparam.value))
criterion = visitors.cloned_traverse(
criterion, {}, {"bindparam": visit_bindparam}
)
return criterion, params
def _generate_lazy_clause(self, state, passive):
criterion, param_keys = self._simple_lazy_clause
if state is None:
return sql_util.adapt_criterion_to_null(
criterion, [key for key, ident, value in param_keys]
)
mapper = self.parent_property.parent
o = state.obj() # strong ref
dict_ = attributes.instance_dict(o)
if passive & attributes.INIT_OK:
passive ^= attributes.INIT_OK
params = {}
for key, ident, value in param_keys:
if ident is not None:
if passive and passive & attributes.LOAD_AGAINST_COMMITTED:
value = mapper._get_committed_state_attr_by_column(
state, dict_, ident, passive
)
else:
value = mapper._get_state_attr_by_column(
state, dict_, ident, passive
)
params[key] = value
return criterion, params
def _invoke_raise_load(self, state, passive, lazy):
raise sa_exc.InvalidRequestError(
"'%s' is not available due to lazy='%s'" % (self, lazy)
)
def _load_for_state(self, state, passive, loadopt=None, extra_criteria=()):
if not state.key and (
(
not self.parent_property.load_on_pending
and not state._load_pending
)
or not state.session_id
):
return attributes.ATTR_EMPTY
pending = not state.key
primary_key_identity = None
use_get = self.use_get and (not loadopt or not loadopt._extra_criteria)
if (not passive & attributes.SQL_OK and not use_get) or (
not passive & attributes.NON_PERSISTENT_OK and pending
):
return attributes.PASSIVE_NO_RESULT
if (
# we were given lazy="raise"
self._raise_always
# the no_raise history-related flag was not passed
and not passive & attributes.NO_RAISE
and (
# if we are use_get and related_object_ok is disabled,
# which means we are at most looking in the identity map
# for history purposes or otherwise returning
# PASSIVE_NO_RESULT, don't raise. This is also a
# history-related flag
not use_get
or passive & attributes.RELATED_OBJECT_OK
)
):
self._invoke_raise_load(state, passive, "raise")
session = _state_session(state)
if not session:
if passive & attributes.NO_RAISE:
return attributes.PASSIVE_NO_RESULT
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"lazy load operation of attribute '%s' cannot proceed"
% (orm_util.state_str(state), self.key)
)
# if we have a simple primary key load, check the
# identity map without generating a Query at all
if use_get:
primary_key_identity = self._get_ident_for_use_get(
session, state, passive
)
if attributes.PASSIVE_NO_RESULT in primary_key_identity:
return attributes.PASSIVE_NO_RESULT
elif attributes.NEVER_SET in primary_key_identity:
return attributes.NEVER_SET
if _none_set.issuperset(primary_key_identity):
return None
if self.key in state.dict:
return attributes.ATTR_WAS_SET
# look for this identity in the identity map. Delegate to the
# Query class in use, as it may have special rules for how it
# does this, including how it decides what the correct
# identity_token would be for this identity.
instance = session._identity_lookup(
self.entity,
primary_key_identity,
passive=passive,
lazy_loaded_from=state,
)
if instance is not None:
if instance is attributes.PASSIVE_CLASS_MISMATCH:
return None
else:
return instance
elif (
not passive & attributes.SQL_OK
or not passive & attributes.RELATED_OBJECT_OK
):
return attributes.PASSIVE_NO_RESULT
return self._emit_lazyload(
session,
state,
primary_key_identity,
passive,
loadopt,
extra_criteria,
)
def _get_ident_for_use_get(self, session, state, passive):
instance_mapper = state.manager.mapper
if passive & attributes.LOAD_AGAINST_COMMITTED:
get_attr = instance_mapper._get_committed_state_attr_by_column
else:
get_attr = instance_mapper._get_state_attr_by_column
dict_ = state.dict
return [
get_attr(state, dict_, self._equated_columns[pk], passive=passive)
for pk in self.mapper.primary_key
]
def _memoized_attr__query_cache(self):
# cache is per lazy loader; stores not only cached SQL but also
# sqlalchemy.sql.lambdas.AnalyzedCode and
# sqlalchemy.sql.lambdas.AnalyzedFunction objects which are generated
# from the StatementLambda used.
return util.LRUCache(30, size_alert=self._size_alert)
@util.preload_module("sqlalchemy.orm.strategy_options")
def _emit_lazyload(
self,
session,
state,
primary_key_identity,
passive,
loadopt,
extra_criteria,
):
strategy_options = util.preloaded.orm_strategy_options
stmt = sql.lambda_stmt(
lambda: sql.select(self.entity)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
._set_compile_options(ORMCompileState.default_compile_options),
global_track_bound_values=False,
lambda_cache=self._query_cache,
track_on=(self,),
)
if not self.parent_property.bake_queries:
stmt = stmt.spoil()
load_options = QueryContext.default_load_options
load_options += {
"_invoke_all_eagers": False,
"_lazy_loaded_from": state,
}
if self.parent_property.secondary is not None:
stmt = stmt.add_criteria(
lambda stmt: stmt.select_from(
self.mapper, self.parent_property.secondary
),
track_on=[self.parent_property],
)
pending = not state.key
# don't autoflush on pending
if pending or passive & attributes.NO_AUTOFLUSH:
stmt += lambda stmt: stmt.execution_options(autoflush=False)
use_get = self.use_get
if state.load_options or (loadopt and loadopt._extra_criteria):
effective_path = state.load_path[self.parent_property]
opts = list(state.load_options)
if loadopt and loadopt._extra_criteria:
use_get = False
opts += (
orm_util.LoaderCriteriaOption(self.entity, extra_criteria),
)
stmt += lambda stmt: stmt.options(*opts)
else:
# this path is used if there are not already any options
# in the query, but an event may want to add them
effective_path = state.mapper._path_registry[self.parent_property]
stmt += lambda stmt: stmt._update_compile_options(
{"_current_path": effective_path}
)
if use_get:
if self._raise_on_sql:
self._invoke_raise_load(state, passive, "raise_on_sql")
return loading.load_on_pk_identity(
session,
stmt,
primary_key_identity,
load_options=load_options,
execution_options={"compiled_cache": self._query_cache},
)
if self._order_by:
stmt = stmt.add_criteria(
lambda stmt: stmt.order_by(*self._order_by), track_on=[self]
)
def _lazyload_reverse(compile_context):
for rev in self.parent_property._reverse_property:
# reverse props that are MANYTOONE are loading *this*
# object from get(), so don't need to eager out to those.
if (
rev.direction is interfaces.MANYTOONE
and rev._use_get
and not isinstance(rev.strategy, LazyLoader)
):
strategy_options.Load.for_existing_path(
compile_context.compile_options._current_path[
rev.parent
]
).lazyload(rev).process_compile_state(compile_context)
stmt = stmt.add_criteria(
lambda stmt: stmt._add_context_option(
_lazyload_reverse, self.parent_property
),
track_on=[self],
)
lazy_clause, params = self._generate_lazy_clause(state, passive)
execution_options = {
"_sa_orm_load_options": load_options,
}
if not self.parent_property.bake_queries:
execution_options["compiled_cache"] = None
if self.key in state.dict:
return attributes.ATTR_WAS_SET
if pending:
if util.has_intersection(orm_util._none_set, params.values()):
return None
elif util.has_intersection(orm_util._never_set, params.values()):
return None
if self._raise_on_sql:
self._invoke_raise_load(state, passive, "raise_on_sql")
stmt = stmt.add_criteria(
lambda stmt: stmt.where(lazy_clause), enable_tracking=False
)
result = session.execute(
stmt, params, execution_options=execution_options
)
result = result.unique().scalars().all()
if self.uselist:
return result
else:
l = len(result)
if l:
if l > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for lazily-loaded attribute '%s' "
% self.parent_property
)
return result[0]
else:
return None
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
key = self.key
if not self.is_class_level or (loadopt and loadopt._extra_criteria):
# we are not the primary manager for this attribute
# on this class - set up a
# per-instance lazyloader, which will override the
# class-level behavior.
# this currently only happens when using a
# "lazyload" option on a "no load"
# attribute - "eager" attributes always have a
# class-level lazyloader installed.
set_lazy_callable = (
InstanceState._instance_level_callable_processor
)(
mapper.class_manager,
LoadLazyAttribute(
key,
self,
loadopt,
loadopt._generate_extra_criteria(context)
if loadopt._extra_criteria
else None,
),
key,
)
populators["new"].append((self.key, set_lazy_callable))
elif context.populate_existing or mapper.always_refresh:
def reset_for_lazy_callable(state, dict_, row):
# we are the primary manager for this attribute on
# this class - reset its
# per-instance attribute state, so that the class-level
# lazy loader is
# executed when next referenced on this instance.
# this is needed in
# populate_existing() types of scenarios to reset
# any existing state.
state._reset(dict_, key)
populators["new"].append((self.key, reset_for_lazy_callable))
class LoadLazyAttribute(object):
"""semi-serializable loader object used by LazyLoader
Historically, this object would be carried along with instances that
needed to run lazyloaders, so it had to be serializable to support
cached instances.
this is no longer a general requirement, and the case where this object
is used is exactly the case where we can't really serialize easily,
which is when extra criteria in the loader option is present.
We can't reliably serialize that as it refers to mapped entities and
AliasedClass objects that are local to the current process, which would
need to be matched up on deserialize e.g. the sqlalchemy.ext.serializer
approach.
"""
def __init__(self, key, initiating_strategy, loadopt, extra_criteria):
self.key = key
self.strategy_key = initiating_strategy.strategy_key
self.loadopt = loadopt
self.extra_criteria = extra_criteria
def __getstate__(self):
if self.extra_criteria is not None:
util.warn(
"Can't reliably serialize a lazyload() option that "
"contains additional criteria; please use eager loading "
"for this case"
)
return {
"key": self.key,
"strategy_key": self.strategy_key,
"loadopt": self.loadopt,
"extra_criteria": (),
}
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
instance_mapper = state.manager.mapper
prop = instance_mapper._props[key]
strategy = prop._strategies[self.strategy_key]
return strategy._load_for_state(
state,
passive,
loadopt=self.loadopt,
extra_criteria=self.extra_criteria,
)
class PostLoader(AbstractRelationshipLoader):
"""A relationship loader that emits a second SELECT statement."""
def _check_recursive_postload(self, context, path, join_depth=None):
effective_path = (
context.compile_state.current_path or orm_util.PathRegistry.root
) + path
if loading.PostLoad.path_exists(
context, effective_path, self.parent_property
):
return True
path_w_prop = path[self.parent_property]
effective_path_w_prop = effective_path[self.parent_property]
if not path_w_prop.contains(context.attributes, "loader"):
if join_depth:
if effective_path_w_prop.length / 2 > join_depth:
return True
elif effective_path_w_prop.contains_mapper(self.mapper):
return True
return False
def _immediateload_create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
return self.parent_property._get_strategy(
(("lazy", "immediate"),)
).create_row_processor(
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
)
@relationships.RelationshipProperty.strategy_for(lazy="immediate")
class ImmediateLoader(PostLoader):
__slots__ = ()
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
def load_immediate(state, dict_, row):
state.get_impl(self.key).get(state, dict_)
if self._check_recursive_postload(context, path):
return
populators["delayed"].append((self.key, load_immediate))
@log.class_logger
@relationships.RelationshipProperty.strategy_for(lazy="subquery")
class SubqueryLoader(PostLoader):
__slots__ = ("join_depth",)
def __init__(self, parent, strategy_key):
super(SubqueryLoader, self).__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def _get_leftmost(self, subq_path, current_compile_state, is_root):
given_subq_path = subq_path
subq_path = subq_path.path
subq_mapper = orm_util._class_to_mapper(subq_path[0])
# determine attributes of the leftmost mapper
if (
self.parent.isa(subq_mapper)
and self.parent_property is subq_path[1]
):
leftmost_mapper, leftmost_prop = self.parent, self.parent_property
else:
leftmost_mapper, leftmost_prop = subq_mapper, subq_path[1]
if is_root:
# the subq_path is also coming from cached state, so when we start
# building up this path, it has to also be converted to be in terms
# of the current state. this is for the specific case of the entity
# is an AliasedClass against a subquery that's not otherwise going
# to adapt
new_subq_path = current_compile_state._entities[
0
].entity_zero._path_registry[leftmost_prop]
additional = len(subq_path) - len(new_subq_path)
if additional:
new_subq_path += path_registry.PathRegistry.coerce(
subq_path[-additional:]
)
else:
new_subq_path = given_subq_path
leftmost_cols = leftmost_prop.local_columns
leftmost_attr = [
getattr(
new_subq_path.path[0].entity,
leftmost_mapper._columntoproperty[c].key,
)
for c in leftmost_cols
]
return leftmost_mapper, leftmost_attr, leftmost_prop, new_subq_path
def _generate_from_original_query(
self,
orig_compile_state,
orig_query,
leftmost_mapper,
leftmost_attr,
leftmost_relationship,
orig_entity,
):
# reformat the original query
# to look only for significant columns
q = orig_query._clone().correlate(None)
# LEGACY: make a Query back from the select() !!
# This suits at least two legacy cases:
# 1. applications which expect before_compile() to be called
# below when we run .subquery() on this query (Keystone)
# 2. applications which are doing subqueryload with complex
# from_self() queries, as query.subquery() / .statement
# has to do the full compile context for multiply-nested
# from_self() (Neutron) - see test_subqload_from_self
# for demo.
q2 = query.Query.__new__(query.Query)
q2.__dict__.update(q.__dict__)
q = q2
# set the query's "FROM" list explicitly to what the
# FROM list would be in any case, as we will be limiting
# the columns in the SELECT list which may no longer include
# all entities mentioned in things like WHERE, JOIN, etc.
if not q._from_obj:
q._enable_assertions = False
q.select_from.non_generative(
q,
*{
ent["entity"]
for ent in _column_descriptions(
orig_query, compile_state=orig_compile_state
)
if ent["entity"] is not None
}
)
# select from the identity columns of the outer (specifically, these
# are the 'local_cols' of the property). This will remove other
# columns from the query that might suggest the right entity which is
# why we do set select_from above. The attributes we have are
# coerced and adapted using the original query's adapter, which is
# needed only for the case of adapting a subclass column to
# that of a polymorphic selectable, e.g. we have
# Engineer.primary_language and the entity is Person. All other
# adaptations, e.g. from_self, select_entity_from(), will occur
# within the new query when it compiles, as the compile_state we are
# using here is only a partial one. If the subqueryload is from a
# with_polymorphic() or other aliased() object, left_attr will already
# be the correct attributes so no adaptation is needed.
target_cols = orig_compile_state._adapt_col_list(
[
sql.coercions.expect(sql.roles.ColumnsClauseRole, o)
for o in leftmost_attr
],
orig_compile_state._get_current_adapter(),
)
q._raw_columns = target_cols
distinct_target_key = leftmost_relationship.distinct_target_key
if distinct_target_key is True:
q._distinct = True
elif distinct_target_key is None:
# if target_cols refer to a non-primary key or only
# part of a composite primary key, set the q as distinct
for t in set(c.table for c in target_cols):
if not set(target_cols).issuperset(t.primary_key):
q._distinct = True
break
# don't need ORDER BY if no limit/offset
if not q._has_row_limiting_clause:
q._order_by_clauses = ()
if q._distinct is True and q._order_by_clauses:
# the logic to automatically add the order by columns to the query
# when distinct is True is deprecated in the query
to_add = sql_util.expand_column_list_from_order_by(
target_cols, q._order_by_clauses
)
if to_add:
q._set_entities(target_cols + to_add)
# the original query now becomes a subquery
# which we'll join onto.
# LEGACY: as "q" is a Query, the before_compile() event is invoked
# here.
embed_q = q.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL).subquery()
left_alias = orm_util.AliasedClass(
leftmost_mapper, embed_q, use_mapper_path=True
)
return left_alias
def _prep_for_joins(self, left_alias, subq_path):
# figure out what's being joined. a.k.a. the fun part
to_join = []
pairs = list(subq_path.pairs())
for i, (mapper, prop) in enumerate(pairs):
if i > 0:
# look at the previous mapper in the chain -
# if it is as or more specific than this prop's
# mapper, use that instead.
# note we have an assumption here that
# the non-first element is always going to be a mapper,
# not an AliasedClass
prev_mapper = pairs[i - 1][1].mapper
to_append = prev_mapper if prev_mapper.isa(mapper) else mapper
else:
to_append = mapper
to_join.append((to_append, prop.key))
# determine the immediate parent class we are joining from,
# which needs to be aliased.
if len(to_join) < 2:
# in the case of a one level eager load, this is the
# leftmost "left_alias".
parent_alias = left_alias
else:
info = inspect(to_join[-1][0])
if info.is_aliased_class:
parent_alias = info.entity
else:
# alias a plain mapper as we may be
# joining multiple times
parent_alias = orm_util.AliasedClass(
info.entity, use_mapper_path=True
)
local_cols = self.parent_property.local_columns
local_attr = [
getattr(parent_alias, self.parent._columntoproperty[c].key)
for c in local_cols
]
return to_join, local_attr, parent_alias
def _apply_joins(
self, q, to_join, left_alias, parent_alias, effective_entity
):
ltj = len(to_join)
if ltj == 1:
to_join = [
getattr(left_alias, to_join[0][1]).of_type(effective_entity)
]
elif ltj == 2:
to_join = [
getattr(left_alias, to_join[0][1]).of_type(parent_alias),
getattr(parent_alias, to_join[-1][1]).of_type(
effective_entity
),
]
elif ltj > 2:
middle = [
(
orm_util.AliasedClass(item[0])
if not inspect(item[0]).is_aliased_class
else item[0].entity,
item[1],
)
for item in to_join[1:-1]
]
inner = []
while middle:
item = middle.pop(0)
attr = getattr(item[0], item[1])
if middle:
attr = attr.of_type(middle[0][0])
else:
attr = attr.of_type(parent_alias)
inner.append(attr)
to_join = (
[getattr(left_alias, to_join[0][1]).of_type(inner[0].parent)]
+ inner
+ [
getattr(parent_alias, to_join[-1][1]).of_type(
effective_entity
)
]
)
for attr in to_join:
q = q.join(attr)
return q
def _setup_options(
self,
context,
q,
subq_path,
rewritten_path,
orig_query,
effective_entity,
loadopt,
):
opts = orig_query._with_options
if loadopt and loadopt._extra_criteria:
opts += (
orm_util.LoaderCriteriaOption(
self.entity,
loadopt._generate_extra_criteria(context),
),
)
# propagate loader options etc. to the new query.
# these will fire relative to subq_path.
q = q._with_current_path(rewritten_path)
q = q.options(*opts)
return q
def _setup_outermost_orderby(self, q):
if self.parent_property.order_by:
def _setup_outermost_orderby(compile_context):
compile_context.eager_order_by += tuple(
util.to_list(self.parent_property.order_by)
)
q = q._add_context_option(
_setup_outermost_orderby, self.parent_property
)
return q
class _SubqCollections(object):
"""Given a :class:`_query.Query` used to emit the "subquery load",
provide a load interface that executes the query at the
first moment a value is needed.
"""
__slots__ = (
"session",
"execution_options",
"load_options",
"params",
"subq",
"_data",
)
def __init__(self, context, subq):
# avoid creating a cycle by storing context
# even though that's preferable
self.session = context.session
self.execution_options = context.execution_options
self.load_options = context.load_options
self.params = context.params or {}
self.subq = subq
self._data = None
def get(self, key, default):
if self._data is None:
self._load()
return self._data.get(key, default)
def _load(self):
self._data = collections.defaultdict(list)
q = self.subq
assert q.session is None
if "compiled_cache" in self.execution_options:
q = q.execution_options(
compiled_cache=self.execution_options["compiled_cache"]
)
q = q.with_session(self.session)
if self.load_options._populate_existing:
q = q.populate_existing()
# to work with baked query, the parameters may have been
# updated since this query was created, so take these into account
rows = list(q.params(self.params))
for k, v in itertools.groupby(rows, lambda x: x[1:]):
self._data[k].extend(vv[0] for vv in v)
def loader(self, state, dict_, row):
if self._data is None:
self._load()
def _setup_query_from_rowproc(
self,
context,
path,
entity,
loadopt,
adapter,
):
compile_state = context.compile_state
if (
not compile_state.compile_options._enable_eagerloads
or compile_state.compile_options._for_refresh_state
):
return
context.loaders_require_buffering = True
path = path[self.parent_property]
# build up a path indicating the path from the leftmost
# entity to the thing we're subquery loading.
with_poly_entity = path.get(
compile_state.attributes, "path_with_polymorphic", None
)
if with_poly_entity is not None:
effective_entity = with_poly_entity
else:
effective_entity = self.entity
subq_path, rewritten_path = context.query._execution_options.get(
("subquery_paths", None),
(orm_util.PathRegistry.root, orm_util.PathRegistry.root),
)
is_root = subq_path is orm_util.PathRegistry.root
subq_path = subq_path + path
rewritten_path = rewritten_path + path
# if not via query option, check for
# a cycle
if not path.contains(compile_state.attributes, "loader"):
if self.join_depth:
if (
(
compile_state.current_path.length
if compile_state.current_path
else 0
)
+ path.length
) / 2 > self.join_depth:
return
elif subq_path.contains_mapper(self.mapper):
return
# use the current query being invoked, not the compile state
# one. this is so that we get the current parameters. however,
# it means we can't use the existing compile state, we have to make
# a new one. other approaches include possibly using the
# compiled query but swapping the params, seems only marginally
# less time spent but more complicated
orig_query = context.query._execution_options.get(
("orig_query", SubqueryLoader), context.query
)
# make a new compile_state for the query that's probably cached, but
# we're sort of undoing a bit of that caching :(
compile_state_cls = ORMCompileState._get_plugin_class_for_plugin(
orig_query, "orm"
)
if orig_query._is_lambda_element:
if context.load_options._lazy_loaded_from is None:
util.warn(
'subqueryloader for "%s" must invoke lambda callable '
"at %r in "
"order to produce a new query, decreasing the efficiency "
"of caching for this statement. Consider using "
"selectinload() for more effective full-lambda caching"
% (self, orig_query)
)
orig_query = orig_query._resolved
# this is the more "quick" version, however it's not clear how
# much of this we need. in particular I can't get a test to
# fail if the "set_base_alias" is missing and not sure why that is.
orig_compile_state = compile_state_cls._create_entities_collection(
orig_query, legacy=False
)
(
leftmost_mapper,
leftmost_attr,
leftmost_relationship,
rewritten_path,
) = self._get_leftmost(rewritten_path, orig_compile_state, is_root)
# generate a new Query from the original, then
# produce a subquery from it.
left_alias = self._generate_from_original_query(
orig_compile_state,
orig_query,
leftmost_mapper,
leftmost_attr,
leftmost_relationship,
entity,
)
# generate another Query that will join the
# left alias to the target relationships.
# basically doing a longhand
# "from_self()". (from_self() itself not quite industrial
# strength enough for all contingencies...but very close)
q = query.Query(effective_entity)
q._execution_options = q._execution_options.union(
{
("orig_query", SubqueryLoader): orig_query,
("subquery_paths", None): (subq_path, rewritten_path),
}
)
q = q._set_enable_single_crit(False)
to_join, local_attr, parent_alias = self._prep_for_joins(
left_alias, subq_path
)
q = q.add_columns(*local_attr)
q = self._apply_joins(
q, to_join, left_alias, parent_alias, effective_entity
)
q = self._setup_options(
context,
q,
subq_path,
rewritten_path,
orig_query,
effective_entity,
loadopt,
)
q = self._setup_outermost_orderby(q)
return q
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
if context.refresh_state:
return self._immediateload_create_row_processor(
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
)
# the subqueryloader does a similar check in setup_query() unlike
# the other post loaders, however we have this here for consistency
elif self._check_recursive_postload(context, path, self.join_depth):
return
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." % self
)
# a little dance here as the "path" is still something that only
# semi-tracks the exact series of things we are loading, still not
# telling us about with_polymorphic() and stuff like that when it's at
# the root.. the initial MapperEntity is more accurate for this case.
if len(path) == 1:
if not orm_util._entity_isa(query_entity.entity_zero, self.parent):
return
elif not orm_util._entity_isa(path[-1], self.parent):
return
subq = self._setup_query_from_rowproc(
context,
path,
path[-1],
loadopt,
adapter,
)
if subq is None:
return
assert subq.session is None
path = path[self.parent_property]
local_cols = self.parent_property.local_columns
# cache the loaded collections in the context
# so that inheriting mappers don't re-load when they
# call upon create_row_processor again
collections = path.get(context.attributes, "collections")
if collections is None:
collections = self._SubqCollections(context, subq)
path.set(context.attributes, "collections", collections)
if adapter:
local_cols = [adapter.columns[c] for c in local_cols]
if self.uselist:
self._create_collection_loader(
context, result, collections, local_cols, populators
)
else:
self._create_scalar_loader(
context, result, collections, local_cols, populators
)
def _create_collection_loader(
self, context, result, collections, local_cols, populators
):
tuple_getter = result._tuple_getter(local_cols)
def load_collection_from_subq(state, dict_, row):
collection = collections.get(tuple_getter(row), ())
state.get_impl(self.key).set_committed_value(
state, dict_, collection
)
def load_collection_from_subq_existing_row(state, dict_, row):
if self.key not in dict_:
load_collection_from_subq(state, dict_, row)
populators["new"].append((self.key, load_collection_from_subq))
populators["existing"].append(
(self.key, load_collection_from_subq_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append((self.key, collections.loader))
def _create_scalar_loader(
self, context, result, collections, local_cols, populators
):
tuple_getter = result._tuple_getter(local_cols)
def load_scalar_from_subq(state, dict_, row):
collection = collections.get(tuple_getter(row), (None,))
if len(collection) > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' " % self
)
scalar = collection[0]
state.get_impl(self.key).set_committed_value(state, dict_, scalar)
def load_scalar_from_subq_existing_row(state, dict_, row):
if self.key not in dict_:
load_scalar_from_subq(state, dict_, row)
populators["new"].append((self.key, load_scalar_from_subq))
populators["existing"].append(
(self.key, load_scalar_from_subq_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append((self.key, collections.loader))
@log.class_logger
@relationships.RelationshipProperty.strategy_for(lazy="joined")
@relationships.RelationshipProperty.strategy_for(lazy=False)
class JoinedLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
using joined eager loading.
"""
__slots__ = "join_depth", "_aliased_class_pool"
def __init__(self, parent, strategy_key):
super(JoinedLoader, self).__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
self._aliased_class_pool = []
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def setup_query(
self,
compile_state,
query_entity,
path,
loadopt,
adapter,
column_collection=None,
parentmapper=None,
chained_from_outerjoin=False,
**kwargs
):
"""Add a left outer join to the statement that's being constructed."""
if not compile_state.compile_options._enable_eagerloads:
return
elif self.uselist:
compile_state.multi_row_eager_loaders = True
path = path[self.parent_property]
with_polymorphic = None
user_defined_adapter = (
self._init_user_defined_eager_proc(
loadopt, compile_state, compile_state.attributes
)
if loadopt
else False
)
if user_defined_adapter is not False:
(
clauses,
adapter,
add_to_collection,
) = self._setup_query_on_user_defined_adapter(
compile_state,
query_entity,
path,
adapter,
user_defined_adapter,
)
else:
# if not via query option, check for
# a cycle
if not path.contains(compile_state.attributes, "loader"):
if self.join_depth:
if path.length / 2 > self.join_depth:
return
elif path.contains_mapper(self.mapper):
return
(
clauses,
adapter,
add_to_collection,
chained_from_outerjoin,
) = self._generate_row_adapter(
compile_state,
query_entity,
path,
loadopt,
adapter,
column_collection,
parentmapper,
chained_from_outerjoin,
)
with_poly_entity = path.get(
compile_state.attributes, "path_with_polymorphic", None
)
if with_poly_entity is not None:
with_polymorphic = inspect(
with_poly_entity
).with_polymorphic_mappers
else:
with_polymorphic = None
path = path[self.entity]
loading._setup_entity_query(
compile_state,
self.mapper,
query_entity,
path,
clauses,
add_to_collection,
with_polymorphic=with_polymorphic,
parentmapper=self.mapper,
chained_from_outerjoin=chained_from_outerjoin,
)
if with_poly_entity is not None and None in set(
compile_state.secondary_columns
):
raise sa_exc.InvalidRequestError(
"Detected unaliased columns when generating joined "
"load. Make sure to use aliased=True or flat=True "
"when using joined loading with with_polymorphic()."
)
def _init_user_defined_eager_proc(
self, loadopt, compile_state, target_attributes
):
# check if the opt applies at all
if "eager_from_alias" not in loadopt.local_opts:
# nope
return False
path = loadopt.path.parent
# the option applies. check if the "user_defined_eager_row_processor"
# has been built up.
adapter = path.get(
compile_state.attributes, "user_defined_eager_row_processor", False
)
if adapter is not False:
# just return it
return adapter
# otherwise figure it out.
alias = loadopt.local_opts["eager_from_alias"]
root_mapper, prop = path[-2:]
if alias is not None:
if isinstance(alias, str):
alias = prop.target.alias(alias)
adapter = sql_util.ColumnAdapter(
alias, equivalents=prop.mapper._equivalent_columns
)
else:
if path.contains(
compile_state.attributes, "path_with_polymorphic"
):
with_poly_entity = path.get(
compile_state.attributes, "path_with_polymorphic"
)
adapter = orm_util.ORMAdapter(
with_poly_entity,
equivalents=prop.mapper._equivalent_columns,
)
else:
adapter = compile_state._polymorphic_adapters.get(
prop.mapper, None
)
path.set(
target_attributes,
"user_defined_eager_row_processor",
adapter,
)
return adapter
def _setup_query_on_user_defined_adapter(
self, context, entity, path, adapter, user_defined_adapter
):
# apply some more wrapping to the "user defined adapter"
# if we are setting up the query for SQL render.
adapter = entity._get_entity_clauses(context)
if adapter and user_defined_adapter:
user_defined_adapter = user_defined_adapter.wrap(adapter)
path.set(
context.attributes,
"user_defined_eager_row_processor",
user_defined_adapter,
)
elif adapter:
user_defined_adapter = adapter
path.set(
context.attributes,
"user_defined_eager_row_processor",
user_defined_adapter,
)
add_to_collection = context.primary_columns
return user_defined_adapter, adapter, add_to_collection
def _gen_pooled_aliased_class(self, context):
# keep a local pool of AliasedClass objects that get re-used.
# we need one unique AliasedClass per query per appearance of our
# entity in the query.
if inspect(self.entity).is_aliased_class:
alt_selectable = inspect(self.entity).selectable
else:
alt_selectable = None
key = ("joinedloader_ac", self)
if key not in context.attributes:
context.attributes[key] = idx = 0
else:
context.attributes[key] = idx = context.attributes[key] + 1
if idx >= len(self._aliased_class_pool):
to_adapt = orm_util.AliasedClass(
self.mapper,
alias=alt_selectable._anonymous_fromclause(flat=True)
if alt_selectable is not None
else None,
flat=True,
use_mapper_path=True,
)
# load up the .columns collection on the Alias() before
# the object becomes shared among threads. this prevents
# races for column identities.
inspect(to_adapt).selectable.c
self._aliased_class_pool.append(to_adapt)
return self._aliased_class_pool[idx]
def _generate_row_adapter(
self,
compile_state,
entity,
path,
loadopt,
adapter,
column_collection,
parentmapper,
chained_from_outerjoin,
):
with_poly_entity = path.get(
compile_state.attributes, "path_with_polymorphic", None
)
if with_poly_entity:
to_adapt = with_poly_entity
else:
to_adapt = self._gen_pooled_aliased_class(compile_state)
clauses = inspect(to_adapt)._memo(
("joinedloader_ormadapter", self),
orm_util.ORMAdapter,
to_adapt,
equivalents=self.mapper._equivalent_columns,
adapt_required=True,
allow_label_resolve=False,
anonymize_labels=True,
)
assert clauses.aliased_class is not None
innerjoin = (
loadopt.local_opts.get("innerjoin", self.parent_property.innerjoin)
if loadopt is not None
else self.parent_property.innerjoin
)
if not innerjoin:
# if this is an outer join, all non-nested eager joins from
# this path must also be outer joins
chained_from_outerjoin = True
compile_state.create_eager_joins.append(
(
self._create_eager_join,
entity,
path,
adapter,
parentmapper,
clauses,
innerjoin,
chained_from_outerjoin,
loadopt._extra_criteria if loadopt else (),
)
)
add_to_collection = compile_state.secondary_columns
path.set(compile_state.attributes, "eager_row_processor", clauses)
return clauses, adapter, add_to_collection, chained_from_outerjoin
def _create_eager_join(
self,
compile_state,
query_entity,
path,
adapter,
parentmapper,
clauses,
innerjoin,
chained_from_outerjoin,
extra_criteria,
):
if parentmapper is None:
localparent = query_entity.mapper
else:
localparent = parentmapper
# whether or not the Query will wrap the selectable in a subquery,
# and then attach eager load joins to that (i.e., in the case of
# LIMIT/OFFSET etc.)
should_nest_selectable = (
compile_state.multi_row_eager_loaders
and compile_state._should_nest_selectable
)
query_entity_key = None
if (
query_entity not in compile_state.eager_joins
and not should_nest_selectable
and compile_state.from_clauses
):
indexes = sql_util.find_left_clause_that_matches_given(
compile_state.from_clauses, query_entity.selectable
)
if len(indexes) > 1:
# for the eager load case, I can't reproduce this right
# now. For query.join() I can.
raise sa_exc.InvalidRequestError(
"Can't identify which query entity in which to joined "
"eager load from. Please use an exact match when "
"specifying the join path."
)
if indexes:
clause = compile_state.from_clauses[indexes[0]]
# join to an existing FROM clause on the query.
# key it to its list index in the eager_joins dict.
# Query._compile_context will adapt as needed and
# append to the FROM clause of the select().
query_entity_key, default_towrap = indexes[0], clause
if query_entity_key is None:
query_entity_key, default_towrap = (
query_entity,
query_entity.selectable,
)
towrap = compile_state.eager_joins.setdefault(
query_entity_key, default_towrap
)
if adapter:
if getattr(adapter, "aliased_class", None):
# joining from an adapted entity. The adapted entity
# might be a "with_polymorphic", so resolve that to our
# specific mapper's entity before looking for our attribute
# name on it.
efm = inspect(adapter.aliased_class)._entity_for_mapper(
localparent
if localparent.isa(self.parent)
else self.parent
)
# look for our attribute on the adapted entity, else fall back
# to our straight property
onclause = getattr(efm.entity, self.key, self.parent_property)
else:
onclause = getattr(
orm_util.AliasedClass(
self.parent, adapter.selectable, use_mapper_path=True
),
self.key,
self.parent_property,
)
else:
onclause = self.parent_property
assert clauses.aliased_class is not None
attach_on_outside = (
not chained_from_outerjoin
or not innerjoin
or innerjoin == "unnested"
or query_entity.entity_zero.represents_outer_join
)
extra_join_criteria = extra_criteria
additional_entity_criteria = compile_state.global_attributes.get(
("additional_entity_criteria", self.mapper), ()
)
if additional_entity_criteria:
extra_join_criteria += tuple(
ae._resolve_where_criteria(self.mapper)
for ae in additional_entity_criteria
if ae.propagate_to_loaders
)
if attach_on_outside:
# this is the "classic" eager join case.
eagerjoin = orm_util._ORMJoin(
towrap,
clauses.aliased_class,
onclause,
isouter=not innerjoin
or query_entity.entity_zero.represents_outer_join
or (chained_from_outerjoin and isinstance(towrap, sql.Join)),
_left_memo=self.parent,
_right_memo=self.mapper,
_extra_criteria=extra_join_criteria,
)
else:
# all other cases are innerjoin=='nested' approach
eagerjoin = self._splice_nested_inner_join(
path, towrap, clauses, onclause, extra_join_criteria
)
compile_state.eager_joins[query_entity_key] = eagerjoin
# send a hint to the Query as to where it may "splice" this join
eagerjoin.stop_on = query_entity.selectable
if not parentmapper:
# for parentclause that is the non-eager end of the join,
# ensure all the parent cols in the primaryjoin are actually
# in the
# columns clause (i.e. are not deferred), so that aliasing applied
# by the Query propagates those columns outward.
# This has the effect
# of "undefering" those columns.
for col in sql_util._find_columns(
self.parent_property.primaryjoin
):
if localparent.persist_selectable.c.contains_column(col):
if adapter:
col = adapter.columns[col]
compile_state.primary_columns.append(col)
if self.parent_property.order_by:
compile_state.eager_order_by += tuple(
(eagerjoin._target_adapter.copy_and_process)(
util.to_list(self.parent_property.order_by)
)
)
def _splice_nested_inner_join(
self, path, join_obj, clauses, onclause, extra_criteria, splicing=False
):
if splicing is False:
# first call is always handed a join object
# from the outside
assert isinstance(join_obj, orm_util._ORMJoin)
elif isinstance(join_obj, sql.selectable.FromGrouping):
return self._splice_nested_inner_join(
path,
join_obj.element,
clauses,
onclause,
extra_criteria,
splicing,
)
elif not isinstance(join_obj, orm_util._ORMJoin):
if path[-2] is splicing:
return orm_util._ORMJoin(
join_obj,
clauses.aliased_class,
onclause,
isouter=False,
_left_memo=splicing,
_right_memo=path[-1].mapper,
_extra_criteria=extra_criteria,
)
else:
# only here if splicing == True
return None
target_join = self._splice_nested_inner_join(
path,
join_obj.right,
clauses,
onclause,
extra_criteria,
join_obj._right_memo,
)
if target_join is None:
right_splice = False
target_join = self._splice_nested_inner_join(
path,
join_obj.left,
clauses,
onclause,
extra_criteria,
join_obj._left_memo,
)
if target_join is None:
# should only return None when recursively called,
# e.g. splicing==True
assert (
splicing is not False
), "assertion failed attempting to produce joined eager loads"
return None
else:
right_splice = True
if right_splice:
# for a right splice, attempt to flatten out
# a JOIN b JOIN c JOIN .. to avoid needless
# parenthesis nesting
if not join_obj.isouter and not target_join.isouter:
eagerjoin = join_obj._splice_into_center(target_join)
else:
eagerjoin = orm_util._ORMJoin(
join_obj.left,
target_join,
join_obj.onclause,
isouter=join_obj.isouter,
_left_memo=join_obj._left_memo,
)
else:
eagerjoin = orm_util._ORMJoin(
target_join,
join_obj.right,
join_obj.onclause,
isouter=join_obj.isouter,
_right_memo=join_obj._right_memo,
)
eagerjoin._target_adapter = target_join._target_adapter
return eagerjoin
def _create_eager_adapter(self, context, result, adapter, path, loadopt):
compile_state = context.compile_state
user_defined_adapter = (
self._init_user_defined_eager_proc(
loadopt, compile_state, context.attributes
)
if loadopt
else False
)
if user_defined_adapter is not False:
decorator = user_defined_adapter
# user defined eagerloads are part of the "primary"
# portion of the load.
# the adapters applied to the Query should be honored.
if compile_state.compound_eager_adapter and decorator:
decorator = decorator.wrap(
compile_state.compound_eager_adapter
)
elif compile_state.compound_eager_adapter:
decorator = compile_state.compound_eager_adapter
else:
decorator = path.get(
compile_state.attributes, "eager_row_processor"
)
if decorator is None:
return False
if self.mapper._result_has_identity_key(result, decorator):
return decorator
else:
# no identity key - don't return a row
# processor, will cause a degrade to lazy
return False
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." % self
)
if self.uselist:
context.loaders_require_uniquing = True
our_path = path[self.parent_property]
eager_adapter = self._create_eager_adapter(
context, result, adapter, our_path, loadopt
)
if eager_adapter is not False:
key = self.key
_instance = loading._instance_processor(
query_entity,
self.mapper,
context,
result,
our_path[self.entity],
eager_adapter,
)
if not self.uselist:
self._create_scalar_loader(context, key, _instance, populators)
else:
self._create_collection_loader(
context, key, _instance, populators
)
else:
self.parent_property._get_strategy(
(("lazy", "select"),)
).create_row_processor(
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
)
def _create_collection_loader(self, context, key, _instance, populators):
def load_collection_from_joined_new_row(state, dict_, row):
# note this must unconditionally clear out any existing collection.
# an existing collection would be present only in the case of
# populate_existing().
collection = attributes.init_state_collection(state, dict_, key)
result_list = util.UniqueAppender(
collection, "append_without_event"
)
context.attributes[(state, key)] = result_list
inst = _instance(row)
if inst is not None:
result_list.append(inst)
def load_collection_from_joined_existing_row(state, dict_, row):
if (state, key) in context.attributes:
result_list = context.attributes[(state, key)]
else:
# appender_key can be absent from context.attributes
# with isnew=False when self-referential eager loading
# is used; the same instance may be present in two
# distinct sets of result columns
collection = attributes.init_state_collection(
state, dict_, key
)
result_list = util.UniqueAppender(
collection, "append_without_event"
)
context.attributes[(state, key)] = result_list
inst = _instance(row)
if inst is not None:
result_list.append(inst)
def load_collection_from_joined_exec(state, dict_, row):
_instance(row)
populators["new"].append(
(self.key, load_collection_from_joined_new_row)
)
populators["existing"].append(
(self.key, load_collection_from_joined_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append(
(self.key, load_collection_from_joined_exec)
)
def _create_scalar_loader(self, context, key, _instance, populators):
def load_scalar_from_joined_new_row(state, dict_, row):
# set a scalar object instance directly on the parent
# object, bypassing InstrumentedAttribute event handlers.
dict_[key] = _instance(row)
def load_scalar_from_joined_existing_row(state, dict_, row):
# call _instance on the row, even though the object has
# been created, so that we further descend into properties
existing = _instance(row)
# conflicting value already loaded, this shouldn't happen
if key in dict_:
if existing is not dict_[key]:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' "
% self
)
else:
# this case is when one row has multiple loads of the
# same entity (e.g. via aliasing), one has an attribute
# that the other doesn't.
dict_[key] = existing
def load_scalar_from_joined_exec(state, dict_, row):
_instance(row)
populators["new"].append((self.key, load_scalar_from_joined_new_row))
populators["existing"].append(
(self.key, load_scalar_from_joined_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append(
(self.key, load_scalar_from_joined_exec)
)
@log.class_logger
@relationships.RelationshipProperty.strategy_for(lazy="selectin")
class SelectInLoader(PostLoader, util.MemoizedSlots):
__slots__ = (
"join_depth",
"omit_join",
"_parent_alias",
"_query_info",
"_fallback_query_info",
"_query_cache",
)
query_info = collections.namedtuple(
"queryinfo",
[
"load_only_child",
"load_with_join",
"in_expr",
"pk_cols",
"zero_idx",
"child_lookup_cols",
],
)
_chunksize = 500
def __init__(self, parent, strategy_key):
super(SelectInLoader, self).__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
is_m2o = self.parent_property.direction is interfaces.MANYTOONE
if self.parent_property.omit_join is not None:
self.omit_join = self.parent_property.omit_join
else:
lazyloader = self.parent_property._get_strategy(
(("lazy", "select"),)
)
if is_m2o:
self.omit_join = lazyloader.use_get
else:
self.omit_join = self.parent._get_clause[0].compare(
lazyloader._rev_lazywhere,
use_proxies=True,
compare_keys=False,
equivalents=self.parent._equivalent_columns,
)
if self.omit_join:
if is_m2o:
self._query_info = self._init_for_omit_join_m2o()
self._fallback_query_info = self._init_for_join()
else:
self._query_info = self._init_for_omit_join()
else:
self._query_info = self._init_for_join()
def _init_for_omit_join(self):
pk_to_fk = dict(
self.parent_property._join_condition.local_remote_pairs
)
pk_to_fk.update(
(equiv, pk_to_fk[k])
for k in list(pk_to_fk)
for equiv in self.parent._equivalent_columns.get(k, ())
)
pk_cols = fk_cols = [
pk_to_fk[col] for col in self.parent.primary_key if col in pk_to_fk
]
if len(fk_cols) > 1:
in_expr = sql.tuple_(*fk_cols)
zero_idx = False
else:
in_expr = fk_cols[0]
zero_idx = True
return self.query_info(False, False, in_expr, pk_cols, zero_idx, None)
def _init_for_omit_join_m2o(self):
pk_cols = self.mapper.primary_key
if len(pk_cols) > 1:
in_expr = sql.tuple_(*pk_cols)
zero_idx = False
else:
in_expr = pk_cols[0]
zero_idx = True
lazyloader = self.parent_property._get_strategy((("lazy", "select"),))
lookup_cols = [lazyloader._equated_columns[pk] for pk in pk_cols]
return self.query_info(
True, False, in_expr, pk_cols, zero_idx, lookup_cols
)
def _init_for_join(self):
self._parent_alias = aliased(self.parent.class_)
pa_insp = inspect(self._parent_alias)
pk_cols = [
pa_insp._adapt_element(col) for col in self.parent.primary_key
]
if len(pk_cols) > 1:
in_expr = sql.tuple_(*pk_cols)
zero_idx = False
else:
in_expr = pk_cols[0]
zero_idx = True
return self.query_info(False, True, in_expr, pk_cols, zero_idx, None)
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def _memoized_attr__query_cache(self):
return util.LRUCache(30, size_alert=self._size_alert)
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
if context.refresh_state:
return self._immediateload_create_row_processor(
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
)
elif self._check_recursive_postload(context, path, self.join_depth):
return
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." % self
)
# a little dance here as the "path" is still something that only
# semi-tracks the exact series of things we are loading, still not
# telling us about with_polymorphic() and stuff like that when it's at
# the root.. the initial MapperEntity is more accurate for this case.
if len(path) == 1:
if not orm_util._entity_isa(query_entity.entity_zero, self.parent):
return
elif not orm_util._entity_isa(path[-1], self.parent):
return
selectin_path = (
context.compile_state.current_path or orm_util.PathRegistry.root
) + path
path_w_prop = path[self.parent_property]
# build up a path indicating the path from the leftmost
# entity to the thing we're subquery loading.
with_poly_entity = path_w_prop.get(
context.attributes, "path_with_polymorphic", None
)
if with_poly_entity is not None:
effective_entity = inspect(with_poly_entity)
else:
effective_entity = self.entity
loading.PostLoad.callable_for_path(
context,
selectin_path,
self.parent,
self.parent_property,
self._load_for_path,
effective_entity,
loadopt,
)
def _load_for_path(
self, context, path, states, load_only, effective_entity, loadopt
):
if load_only and self.key not in load_only:
return
query_info = self._query_info
if query_info.load_only_child:
our_states = collections.defaultdict(list)
none_states = []
mapper = self.parent
for state, overwrite in states:
state_dict = state.dict
related_ident = tuple(
mapper._get_state_attr_by_column(
state,
state_dict,
lk,
passive=attributes.PASSIVE_NO_FETCH,
)
for lk in query_info.child_lookup_cols
)
# if the loaded parent objects do not have the foreign key
# to the related item loaded, then degrade into the joined
# version of selectinload
if attributes.PASSIVE_NO_RESULT in related_ident:
query_info = self._fallback_query_info
break
# organize states into lists keyed to particular foreign
# key values.
if None not in related_ident:
our_states[related_ident].append(
(state, state_dict, overwrite)
)
else:
# For FK values that have None, add them to a
# separate collection that will be populated separately
none_states.append((state, state_dict, overwrite))
# note the above conditional may have changed query_info
if not query_info.load_only_child:
our_states = [
(state.key[1], state, state.dict, overwrite)
for state, overwrite in states
]
pk_cols = query_info.pk_cols
in_expr = query_info.in_expr
if not query_info.load_with_join:
# in "omit join" mode, the primary key column and the
# "in" expression are in terms of the related entity. So
# if the related entity is polymorphic or otherwise aliased,
# we need to adapt our "pk_cols" and "in_expr" to that
# entity. in non-"omit join" mode, these are against the
# parent entity and do not need adaption.
if effective_entity.is_aliased_class:
pk_cols = [
effective_entity._adapt_element(col) for col in pk_cols
]
in_expr = effective_entity._adapt_element(in_expr)
q = sql.lambda_stmt(
lambda: sql.select(
orm_util.Bundle("pk", *pk_cols), effective_entity
)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
._set_compile_options(ORMCompileState.default_compile_options)
._set_propagate_attrs(
{
"compile_state_plugin": "orm",
"plugin_subject": effective_entity,
}
),
lambda_cache=self._query_cache,
global_track_bound_values=False,
track_on=(self, effective_entity) + tuple(pk_cols),
)
if not self.parent_property.bake_queries:
q = q.spoil()
if not query_info.load_with_join:
# the Bundle we have in the "omit_join" case is against raw, non
# annotated columns, so to ensure the Query knows its primary
# entity, we add it explicitly. If we made the Bundle against
# annotated columns, we hit a performance issue in this specific
# case, which is detailed in issue #4347.
q = q.add_criteria(lambda q: q.select_from(effective_entity))
else:
# in the non-omit_join case, the Bundle is against the annotated/
# mapped column of the parent entity, but the #4347 issue does not
# occur in this case.
q = q.add_criteria(
lambda q: q.select_from(self._parent_alias).join(
getattr(
self._parent_alias, self.parent_property.key
).of_type(effective_entity)
),
track_on=[self],
)
q = q.add_criteria(
lambda q: q.filter(in_expr.in_(sql.bindparam("primary_keys")))
)
# a test which exercises what these comments talk about is
# test_selectin_relations.py -> test_twolevel_selectin_w_polymorphic
#
# effective_entity above is given to us in terms of the cached
# statement, namely this one:
orig_query = context.compile_state.select_statement
# the actual statement that was requested is this one:
# context_query = context.query
#
# that's not the cached one, however. So while it is of the identical
# structure, if it has entities like AliasedInsp, which we get from
# aliased() or with_polymorphic(), the AliasedInsp will likely be a
# different object identity each time, and will not match up
# hashing-wise to the corresponding AliasedInsp that's in the
# cached query, meaning it won't match on paths and loader lookups
# and loaders like this one will be skipped if it is used in options.
#
# Now we want to transfer loader options from the parent query to the
# "selectinload" query we're about to run. Which query do we transfer
# the options from? We use the cached query, because the options in
# that query will be in terms of the effective entity we were just
# handed.
#
# But now the selectinload query we are running is *also*
# cached. What if it's cached and running from some previous iteration
# of that AliasedInsp? Well in that case it will also use the previous
# iteration of the loader options. If the query expires and
# gets generated again, it will be handed the current effective_entity
# and the current _with_options, again in terms of whatever
# compile_state.select_statement happens to be right now, so the
# query will still be internally consistent and loader callables
# will be correctly invoked.
effective_path = path[self.parent_property]
options = orig_query._with_options
if loadopt and loadopt._extra_criteria:
options += (
orm_util.LoaderCriteriaOption(
effective_entity,
loadopt._generate_extra_criteria(context),
),
)
q = q.add_criteria(
lambda q: q.options(*options)._update_compile_options(
{"_current_path": effective_path}
)
)
if context.populate_existing:
q = q.add_criteria(
lambda q: q.execution_options(populate_existing=True)
)
if self.parent_property.order_by:
if not query_info.load_with_join:
eager_order_by = self.parent_property.order_by
if effective_entity.is_aliased_class:
eager_order_by = [
effective_entity._adapt_element(elem)
for elem in eager_order_by
]
q = q.add_criteria(lambda q: q.order_by(*eager_order_by))
else:
def _setup_outermost_orderby(compile_context):
compile_context.eager_order_by += tuple(
util.to_list(self.parent_property.order_by)
)
q = q.add_criteria(
lambda q: q._add_context_option(
_setup_outermost_orderby, self.parent_property
),
track_on=[self],
)
if query_info.load_only_child:
self._load_via_child(
our_states, none_states, query_info, q, context
)
else:
self._load_via_parent(our_states, query_info, q, context)
def _load_via_child(self, our_states, none_states, query_info, q, context):
uselist = self.uselist
# this sort is really for the benefit of the unit tests
our_keys = sorted(our_states)
while our_keys:
chunk = our_keys[0 : self._chunksize]
our_keys = our_keys[self._chunksize :]
data = {
k: v
for k, v in context.session.execute(
q,
params={
"primary_keys": [
key[0] if query_info.zero_idx else key
for key in chunk
]
},
).unique()
}
for key in chunk:
# for a real foreign key and no concurrent changes to the
# DB while running this method, "key" is always present in
# data. However, for primaryjoins without real foreign keys
# a non-None primaryjoin condition may still refer to no
# related object.
related_obj = data.get(key, None)
for state, dict_, overwrite in our_states[key]:
if not overwrite and self.key in dict_:
continue
state.get_impl(self.key).set_committed_value(
state,
dict_,
related_obj if not uselist else [related_obj],
)
# populate none states with empty value / collection
for state, dict_, overwrite in none_states:
if not overwrite and self.key in dict_:
continue
# note it's OK if this is a uselist=True attribute, the empty
# collection will be populated
state.get_impl(self.key).set_committed_value(state, dict_, None)
def _load_via_parent(self, our_states, query_info, q, context):
uselist = self.uselist
_empty_result = () if uselist else None
while our_states:
chunk = our_states[0 : self._chunksize]
our_states = our_states[self._chunksize :]
primary_keys = [
key[0] if query_info.zero_idx else key
for key, state, state_dict, overwrite in chunk
]
data = collections.defaultdict(list)
for k, v in itertools.groupby(
context.session.execute(
q, params={"primary_keys": primary_keys}
).unique(),
lambda x: x[0],
):
data[k].extend(vv[1] for vv in v)
for key, state, state_dict, overwrite in chunk:
if not overwrite and self.key in state_dict:
continue
collection = data.get(key, _empty_result)
if not uselist and collection:
if len(collection) > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded "
"attribute '%s' " % self
)
state.get_impl(self.key).set_committed_value(
state, state_dict, collection[0]
)
else:
# note that empty tuple set on uselist=False sets the
# value to None
state.get_impl(self.key).set_committed_value(
state, state_dict, collection
)
def single_parent_validator(desc, prop):
def _do_check(state, value, oldvalue, initiator):
if value is not None and initiator.key == prop.key:
hasparent = initiator.hasparent(attributes.instance_state(value))
if hasparent and oldvalue is not value:
raise sa_exc.InvalidRequestError(
"Instance %s is already associated with an instance "
"of %s via its %s attribute, and is only allowed a "
"single parent."
% (orm_util.instance_str(value), state.class_, prop),
code="bbf1",
)
return value
def append(state, value, initiator):
return _do_check(state, value, None, initiator)
def set_(state, value, oldvalue, initiator):
return _do_check(state, value, oldvalue, initiator)
event.listen(
desc, "append", append, raw=True, retval=True, active_history=True
)
event.listen(desc, "set", set_, raw=True, retval=True, active_history=True)
| 34.250323
| 79
| 0.574182
|
4f933af75485425665901deb2a04eb76721d5702
| 1,602
|
py
|
Python
|
qtpyvcp/widgets/hal_widgets/hal_led.py
|
Lcvette/qtpyvcp
|
4143a4a4e1f557f7d0c8998c886b4a254f0be60b
|
[
"BSD-3-Clause-LBNL",
"MIT"
] | 71
|
2018-12-13T20:31:18.000Z
|
2022-03-26T08:44:22.000Z
|
qtpyvcp/widgets/hal_widgets/hal_led.py
|
Lcvette/qtpyvcp
|
4143a4a4e1f557f7d0c8998c886b4a254f0be60b
|
[
"BSD-3-Clause-LBNL",
"MIT"
] | 78
|
2019-01-10T18:16:33.000Z
|
2022-03-18T19:30:49.000Z
|
qtpyvcp/widgets/hal_widgets/hal_led.py
|
Lcvette/qtpyvcp
|
4143a4a4e1f557f7d0c8998c886b4a254f0be60b
|
[
"BSD-3-Clause-LBNL",
"MIT"
] | 38
|
2018-10-10T19:02:26.000Z
|
2022-01-30T04:38:14.000Z
|
from qtpyvcp import hal
from qtpyvcp.widgets import HALWidget
from qtpyvcp.widgets.base_widgets.led_widget import LEDWidget
# Setup logging
from qtpyvcp.utilities.logger import getLogger
log = getLogger(__name__)
class HalLedIndicator(LEDWidget, HALWidget):
"""HAL LED
LED for indicated the state of `bit` HAL pins.
.. table:: Generated HAL Pins
========================= ========= =========
HAL Pin Name Type Direction
========================= ========= =========
qtpyvcp.led.on bit in
qtpyvcp.led.flash bit in
qtpyvcp.led.flash-rate u32 out
========================= ========= =========
"""
def __init__(self, parent=None):
super(HalLedIndicator, self).__init__(parent)
self._value_pin = None
self._enabled_pin = None
def initialize(self):
comp = hal.getComponent()
obj_name = self.getPinBaseName()
# add led.on HAL pin
self._on_pin = comp.addPin(obj_name + ".on", "bit", "in")
# self._on_pin.value = self.isO()
self._on_pin.valueChanged.connect(lambda state: self.setState(state))
# add led.flash HAL pin
self._flash_pin = comp.addPin(obj_name + ".flash", "bit", "in")
self._flash_pin.valueChanged.connect(lambda flash: self.setFlashing(flash))
# add led.flash-rate HAL pin
self._flash_rate_pin = comp.addPin(obj_name + ".flash-rate", "u32", "in")
self._flash_rate_pin.valueChanged.connect(lambda rate: self.setFlashRate(rate))
| 33.375
| 87
| 0.576155
|
aec7ad809a033c343809030391214458db5ccb19
| 2,205
|
py
|
Python
|
lg_common/scripts/readiness.py
|
FuriousJulius/lg_ros_nodes
|
15a84c5022ab2f5b038d11a5589cd4a34010b1d6
|
[
"Apache-2.0"
] | null | null | null |
lg_common/scripts/readiness.py
|
FuriousJulius/lg_ros_nodes
|
15a84c5022ab2f5b038d11a5589cd4a34010b1d6
|
[
"Apache-2.0"
] | null | null | null |
lg_common/scripts/readiness.py
|
FuriousJulius/lg_ros_nodes
|
15a84c5022ab2f5b038d11a5589cd4a34010b1d6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import rospy
from lg_msg_defs.msg import AdhocBrowsers
from lg_common import ReadinessNode
from lg_common import ReadinessHandbrake
from std_msgs.msg import String
from lg_msg_defs.srv import NodeReady
from lg_msg_defs.msg import Ready
from lg_common.helpers import handle_initial_state
from interactivespaces_msgs.msg import GenericMessage
from lg_common.helpers import run_with_influx_exception_handler
NODE_NAME = 'readiness_node'
def main():
rospy.init_node(NODE_NAME)
common_topic_name = '/browser_service/browsers'
readiness_topic_name = '/director/ready'
window_instances_topic_name = '/director/window/ready'
readiness_timeout = rospy.get_param("/readiness/timeout", 10)
readiness_publisher = rospy.Publisher(readiness_topic_name,
Ready,
queue_size=20)
timeout_publisher = rospy.Publisher('/director/window/error',
String,
queue_size=10)
readiness_node = ReadinessNode(
readiness_publisher=readiness_publisher,
timeout_publisher=timeout_publisher
)
readiness_handbrake = ReadinessHandbrake(
callback=readiness_node.try_to_become_ready,
timeout=readiness_timeout,
)
handle_initial_state(readiness_node.save_uscs_message)
handle_initial_state(readiness_handbrake.handle_uscs_message)
rospy.Subscriber('/director/scene',
GenericMessage,
readiness_node.save_uscs_message)
rospy.Subscriber('/director/scene',
GenericMessage,
readiness_handbrake.handle_uscs_message)
rospy.Subscriber(window_instances_topic_name,
String,
readiness_node.handle_readiness)
rospy.Subscriber(common_topic_name,
AdhocBrowsers,
readiness_node.aggregate_browser_instances)
rospy.Service('/readiness_node/ready', NodeReady, readiness_node.node_ready)
rospy.spin()
if __name__ == "__main__":
run_with_influx_exception_handler(main, NODE_NAME)
| 31.5
| 80
| 0.679819
|
530399364dd0ca6ca50d55aff541a653b7db9186
| 434
|
py
|
Python
|
Library/lib/python3.9/site-packages/numpy/core/_operand_flag_tests.py
|
Jeverett3000/codeapp
|
fa1c6f91e7b22e22628773282a308bcd8db0920f
|
[
"MIT"
] | 603
|
2020-12-23T13:49:32.000Z
|
2022-03-31T23:38:03.000Z
|
Library/lib/python3.9/site-packages/numpy/core/_operand_flag_tests.py
|
Jeverett3000/codeapp
|
fa1c6f91e7b22e22628773282a308bcd8db0920f
|
[
"MIT"
] | 387
|
2020-12-15T14:54:04.000Z
|
2022-03-31T07:00:21.000Z
|
Library/lib/python3.9/site-packages/numpy/core/_operand_flag_tests.py
|
Jeverett3000/codeapp
|
fa1c6f91e7b22e22628773282a308bcd8db0920f
|
[
"MIT"
] | 35
|
2021-03-26T03:12:04.000Z
|
2022-03-23T10:15:10.000Z
|
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, importlib.util
__file__ = pkg_resources.resource_filename(__name__, '_operand_flag_tests.cpython-39-darwin.so')
__loader__ = None; del __bootstrap__, __loader__
spec = importlib.util.spec_from_file_location(__name__,__file__)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
__bootstrap__()
| 43.4
| 100
| 0.781106
|
574589942adc23a25b55e7fec58e90053b9cab4d
| 1,516
|
py
|
Python
|
randomize_files.py
|
aidan-fitz/SolarTracer
|
31cc77ca974640be277d00c6ca23d82292f178c1
|
[
"Apache-2.0"
] | 1
|
2021-11-08T14:37:10.000Z
|
2021-11-08T14:37:10.000Z
|
randomize_files.py
|
aidan-fitz/SolarTracer
|
31cc77ca974640be277d00c6ca23d82292f178c1
|
[
"Apache-2.0"
] | null | null | null |
randomize_files.py
|
aidan-fitz/SolarTracer
|
31cc77ca974640be277d00c6ca23d82292f178c1
|
[
"Apache-2.0"
] | null | null | null |
import argparse
from itertools import product
import os
import shutil
import numpy as np
'''Randomly assign files to training, validation, or test subsets.'''
rng = np.random.default_rng()
def parse_args():
parser = argparse.ArgumentParser(description='Randomly assign files to training, validation, or test subsets')
parser.add_argument('directory', metavar='path/to/directory', default='outputs/images')
return parser.parse_args()
def create_subsets(root_dir):
for subset, cls in product({'train', 'val', 'eval'}, {'0', '1'}):
dir = os.path.join(root_dir, subset, cls)
print(f'Create directory {dir}')
os.makedirs(dir, exist_ok=True, mode=0o775)
def copy_to_random_subset(file, root_dir, cls):
subset = rng.choice(['train', 'val', 'eval'], p=[0.7, 0.2, 0.1])
dest = os.path.join(root_dir, subset, cls)
print(f'Copy file {file} to {dest} (subset {subset})')
shutil.copy(file, dest)
def randomize_class(root_dir, cls):
path = os.path.join(root_dir, cls)
with os.scandir(path) as it:
for entry in it:
if not entry.name.startswith('.') and entry.is_file():
copy_to_random_subset(entry.path, root_dir, cls)
def main():
args = parse_args()
root_dir = args.directory
# Create train/val/test directory structure
create_subsets(root_dir)
# Iterate through positive and negative classes
randomize_class(root_dir, '0')
randomize_class(root_dir, '1')
if __name__ == '__main__':
main()
| 32.255319
| 114
| 0.677441
|
649bcccef7a0301a43da998abc8cf10d7c51d606
| 2,092
|
py
|
Python
|
azure/upload.py
|
tcsong456/news_category_classificaiton
|
8431d9d9f5aff207e918661b6e69342a54129be0
|
[
"MIT"
] | null | null | null |
azure/upload.py
|
tcsong456/news_category_classificaiton
|
8431d9d9f5aff207e918661b6e69342a54129be0
|
[
"MIT"
] | null | null | null |
azure/upload.py
|
tcsong456/news_category_classificaiton
|
8431d9d9f5aff207e918661b6e69342a54129be0
|
[
"MIT"
] | null | null | null |
from azureml.core import Workspace
from env_variables import ENV
from azureml.core.authentication import ServicePrincipalAuthentication
import json
import pandas as pd
from azure_utils import use_or_create_datastore
import argparse
def parserargs():
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg('--corpus_train',type=str,
help='the path to train corpus')
arg('--corpus_eval',type=str,
help='the path to eval corpus')
arg('--vocab',type=str,
help='the path to vocab.pkl')
args = parser.parse_args()
return args
def dataframe(datapath:str,name:str):
corpus = []
with open(datapath,'r') as f:
for line in f:
_line = line.split('\t')
label,text = _line[0],' '.join(_line[1:]).strip()
corpus.append([label,text])
corpus = pd.DataFrame(corpus,columns=['label','text'])
corpus.to_csv(name,index=False)
def main():
args = parserargs()
env = ENV
with open('config.json','r') as f:
config = json.load(f)
auth = ServicePrincipalAuthentication(tenant_id=config['tenant_id'],
service_principal_id=config['service_principal_id'],
service_principal_password=config['service_principal_password'])
ws = Workspace.get(name=env.workspace,
resource_group=env.resource_group,
subscription_id=env.subscription_id,
auth=auth)
datastore = use_or_create_datastore(ws=ws,
datastore_name=env.datastore_name,
use_default=False)
dataframe(args.corpus_train,'corpus_train.csv')
dataframe(args.corpus_eval,'corpus_eval.csv')
for input in ['corpus_train.csv','corpus_eval.csv',args.vocab]:
datastore.upload_files([input],
target_path='corpus',
overwrite=True)
if __name__ == '__main__':
main()
#%%
| 35.457627
| 106
| 0.590344
|
7b13d379f4e3d550845c60ab3fc0ea995ff638f7
| 14,005
|
py
|
Python
|
src/pathpicker/parse.py
|
houbie/PathPicker
|
48276541453803a47e85a30037df05f313ef37c7
|
[
"MIT"
] | 5,167
|
2015-05-07T17:04:03.000Z
|
2022-03-24T18:09:58.000Z
|
src/pathpicker/parse.py
|
code-reaper08/PathPicker
|
86aff624299e96456c647b5f68dd813aa0caf2f8
|
[
"MIT"
] | 319
|
2015-05-07T17:38:54.000Z
|
2022-03-21T10:50:15.000Z
|
src/pathpicker/parse.py
|
code-reaper08/PathPicker
|
86aff624299e96456c647b5f68dd813aa0caf2f8
|
[
"MIT"
] | 374
|
2015-05-07T17:34:53.000Z
|
2022-03-21T08:42:51.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import re
import subprocess
from functools import partial
from typing import Callable, List, Match, NamedTuple, NewType, Optional, Pattern, Tuple
from pathpicker import logger
from pathpicker.repos import REPOS
MatchResult = NewType("MatchResult", Tuple[str, int, Match])
MASTER_REGEX = re.compile(
r"(/?([a-z.A-Z0-9\-_]+/)+[@a-zA-Z0-9\-_+.]+\.[a-zA-Z0-9]{1,10})[:-]?(\d+)?"
)
MASTER_REGEX_MORE_EXTENSIONS = re.compile(
r"(/?([a-z.A-Z0-9\-_]+/)+[@a-zA-Z0-9\-_+.]+\.[a-zA-Z0-9-~]{1,30})[:-]?(\d+)?"
)
HOMEDIR_REGEX = re.compile(
r"(~/([a-z.A-Z0-9\-_]+/)+[@a-zA-Z0-9\-_+.]+\.[a-zA-Z0-9]{1,10})[:-]?(\d+)?"
)
OTHER_BGS_RESULT_REGEX = re.compile(
r"(/?([a-z.A-Z0-9\-_]+/)+[a-zA-Z0-9_.]{3,})[:-]?(\d+)"
)
ENTIRE_TRIMMED_LINE_IF_NOT_WHITESPACE = re.compile(r"(\S.*\S|\S)")
JUST_FILE_WITH_NUMBER = re.compile(
r"([@%+a-z.A-Z0-9\-_]+\.[a-zA-Z]{1,10})[:-](\d+)(\s|$|:)+"
)
JUST_FILE = re.compile(r"([@%+a-z.A-Z0-9\-_]+\.[a-zA-Z]{1,10})(\s|$|:)+")
JUST_EMACS_TEMP_FILE = re.compile(r"([@%+a-z.A-Z0-9\-_]+\.[a-zA-Z]{1,10}~)(\s|$|:)+")
JUST_VIM_TEMP_FILE = re.compile(r"(#[@%+a-z.A-Z0-9\-_]+\.[a-zA-Z]{1,10}#)(\s|$|:)+")
# start with a normal char for ls -l
JUST_FILE_WITH_SPACES = re.compile(
r"([a-zA-Z][@+a-z. A-Z0-9\-_]+\.[a-zA-Z]{1,10})(\s|$|:)+"
)
FILE_NO_PERIODS = re.compile(
(
r"("
# Recognized files starting with a dot followed by at least 3 characters
r"((/?([a-z.A-Z0-9\-_]+/))?\.[a-zA-Z0-9\-_]{3,}[a-zA-Z0-9\-_/]*)"
# or
r"|"
# Recognize files containing at least one slash
r"([a-z.A-Z0-9\-_/]+/[a-zA-Z0-9\-_]+)"
# or
r"|"
# Recognize files starting with capital letter and ending in "file".
# eg. Makefile
r"([A-Z][a-zA-Z]{2,}file)"
# end trying to capture
r")"
# Regardless of the above case, here's how the file name should terminate
r"(\s|$|:)+"
)
)
MASTER_REGEX_WITH_SPACES_AND_WEIRD_FILES = re.compile(
(
# begin the capture
r"("
# capture some pre-dir stuff like / and ./
r"(?:"
r"\.?/"
r")?" # thats optional
# now we look at directories. The 'character class ' allowed before the '/'
# is either a real character or a character and a space. This allows
# multiple spaces in a directory as long as each space is followed by
# a normal character, but it does not allow multiple continguous spaces
# which would otherwise gobble up too much whitespace.
#
# Thus, these directories will match:
# /something foo/
# / a b c d e/
# /normal/
#
# but these will not:
# /two spaces here/
# /ending in a space /
r"(([a-z.A-Z0-9\-_]|\s[a-zA-Z0-9\-_])+/)+"
# Recognized files starting with a dot followed by at least 3 characters
r"((/?([a-z.A-Z0-9\-_]+/))?\.[a-zA-Z0-9\-_]{3,}[a-zA-Z0-9\-_/]*)"
# or
r"|"
# Recognize files containing at least one slash
r"([a-z.A-Z0-9\-_/]+/[a-zA-Z0-9\-_]+)"
# or
r"|"
# Recognize files starting with capital letter and ending in "file".
# eg. Makefile
r"([A-Z][a-zA-Z]{2,}file)"
r")"
)
)
MASTER_REGEX_WITH_SPACES = re.compile(
(
# begin the capture
r"("
# capture some pre-dir stuff like / and ./
r"(?:"
r"\.?/"
r")?" # thats optional
# now we look at directories. The 'character class ' allowed before the '/'
# is either a real character or a character and a space. This allows
# multiple spaces in a directory as long as each space is followed by
# a normal character, but it does not allow multiple continguous spaces
# which would otherwise gobble up too much whitespace.
#
# Thus, these directories will match:
# /something foo/
# / a b c d e/
# /normal/
#
# but these will not:
# /two spaces here/
# /ending in a space /
r"(([a-z.A-Z0-9\-_]|\s[a-zA-Z0-9\-_])+/)+"
# we do similar for the filename part. the 'character class' is
# char or char with space following, with some added tokens like @
# for retina files.
r"([(),%@a-zA-Z0-9\-_+.]|\s[,()@%a-zA-Z0-9\-_+.])+"
# extensions dont allow spaces
r"\.[a-zA-Z0-9-]{1,30}"
# end capture
")"
# optionally capture the line number
r"[:-]?(\d+)?"
)
)
class RegexConfig(NamedTuple):
name: str
regex: Pattern
preferred_regex: Optional[Pattern] = None
num_index: int = 2
no_num: bool = False
only_with_file_inspection: bool = False
with_all_lines_matched: bool = False
REGEX_WATERFALL: List[RegexConfig] = [
# Homedirs need a separate regex.
RegexConfig("HOMEDIR_REGEX", HOMEDIR_REGEX),
# The master regex matches tbgs results with
# line numbers, so we prefer that and test it first.
RegexConfig(
"MASTER_REGEX",
MASTER_REGEX,
# one real quick check -- did we find a better match
# earlier in the regex?
preferred_regex=OTHER_BGS_RESULT_REGEX,
),
# If something clearly looks like an *bgs result but
# just has a weird filename (like all caps with no extension)
# then we can match that as well. Note that we require
# the line number match since otherwise this would be too lax
# of a regex.
RegexConfig("OTHER_BGS_RESULT_REGEX", OTHER_BGS_RESULT_REGEX),
RegexConfig(
"MASTER_REGEX_MORE_EXTENSIONS",
MASTER_REGEX_MORE_EXTENSIONS,
only_with_file_inspection=True,
),
# We would overmatch on wayyyyy too many things if we
# allowed spaces everywhere, but with filesystem validation
# and the final fallback we can include them.
RegexConfig(
"MASTER_REGEX_WITH_SPACES",
MASTER_REGEX_WITH_SPACES,
num_index=4,
only_with_file_inspection=True,
),
RegexConfig(
"MASTER_REGEX_WITH_SPACES_AND_WEIRD_FILES",
MASTER_REGEX_WITH_SPACES_AND_WEIRD_FILES,
num_index=4,
only_with_file_inspection=True,
),
# An Emacs and Vim backup/temporary/save file of the form: #example.txt#
RegexConfig(
"JUST_VIM_TEMP_FILE",
JUST_VIM_TEMP_FILE,
no_num=True,
),
# An Emacs backup/temporary/save file with a tilde at the end: example.txt~
RegexConfig(
"JUST_EMACS_TEMP_FILE",
JUST_EMACS_TEMP_FILE,
no_num=True,
),
# File (without directory) and a number. Ex:
# $ grep -n my_pattern A.txt B.txt
# A.txt:100 my_pattern
RegexConfig(
"JUST_FILE_WITH_NUMBER",
JUST_FILE_WITH_NUMBER,
num_index=1,
),
# Ok maybe its just a normal file (with a dot)
# so lets test for that if the above fails
RegexConfig(
"JUST_FILE",
JUST_FILE,
no_num=True,
),
# Ok if that's not there, try do to filesystem validation
# for just files with spaces
RegexConfig(
"JUST_FILE_WITH_SPACES",
JUST_FILE_WITH_SPACES,
no_num=True,
only_with_file_inspection=True,
),
# Ok finally it might be a file with no periods. we test
# this last since its more restrictive, because we don't
# want to match things like cx('foo/root'). hence
# we require some minimum number of slashes and minimum
# file name length
RegexConfig(
"FILE_NO_PERIODS",
FILE_NO_PERIODS,
no_num=True,
),
RegexConfig(
"ENTIRE_TRIMMED_LINE_IF_NOT_WHITESPACE",
ENTIRE_TRIMMED_LINE_IF_NOT_WHITESPACE,
no_num=True,
with_all_lines_matched=True,
),
]
# Attempts to resolve the root directory of the
# repository in which path resides (i.e. the current directory).
# both git and hg have commands for this, so let's just use those.
def get_repo_path() -> str:
proc = subprocess.Popen(
["git rev-parse --show-toplevel"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True,
)
stdout, stderr = proc.communicate()
# If there was no error return the output
if not stderr:
logger.add_event("using_git")
return stdout
proc = subprocess.Popen(
["hg root"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True,
)
stdout, stderr = proc.communicate()
# If there was no error return the output
if not stderr:
logger.add_event("using_hg")
return stdout
# Not a git or hg repo, go with current dir as a default
logger.add_event("used_outside_repo")
return "./"
PREPEND_PATH = f"{get_repo_path().strip()}/"
# returns a filename and (optional) line number
# if it matches
def match_line(
line: str, validate_file_exists: bool = False, all_input: bool = False
) -> Optional[MatchResult]:
if not validate_file_exists:
results = match_line_impl(line, with_all_lines_matched=all_input)
return results[0] if results else None
results = match_line_impl(
line, with_file_inspection=True, with_all_lines_matched=all_input
)
if not results:
return None
# ok now we are going to check if this result is an actual
# file...
for result in results:
(file_path, _, _) = result
if (
os.path.isfile(prepend_dir(file_path, with_file_inspection=True))
or file_path[0:4] == ".../"
):
return result
return None
def match_line_impl(
line: str, with_file_inspection: bool = False, with_all_lines_matched: bool = False
) -> List[MatchResult]:
# ok new behavior -- we will actually collect **ALL** results
# of the regexes since filesystem validation might filter some
# of the earlier ones out (particularly those with hyphens)
results = []
for regex_config in REGEX_WATERFALL:
regex = regex_config.regex
if regex_config.with_all_lines_matched != with_all_lines_matched:
continue
if regex_config.only_with_file_inspection and not with_file_inspection:
continue
matches = regex.search(line)
if not matches:
continue
# mypy needs some help here to resolve types correctly
unpack_matches_num_index_var: Callable = partial(
unpack_matches, num_index=regex_config.num_index
)
unpack_matches_no_num_var: Callable = unpack_matches_no_num
unpack_func: Callable = (
unpack_matches_no_num_var
if regex_config.no_num
else unpack_matches_num_index_var
)
if not regex_config.preferred_regex:
results.append(unpack_func(matches))
continue
# check the preferred_regex
preferred_regex = regex_config.preferred_regex
other_matches = preferred_regex.search(line)
if not other_matches:
results.append(unpack_func(matches))
continue
if other_matches.start() < matches.start():
# we found a better result earlier, so return that
results.append(unpack_func(other_matches))
continue
results.append(unpack_func(matches))
# nothing matched at all
return results
def prepend_dir(file: str, with_file_inspection: bool = False) -> str:
if not file or len(file) < 2:
return file
if file[0] == "/":
return file
if file[0:4] == ".../":
# these are the gross git abbreviated paths, so
# return early since we cant do anything here
return file
if file[0:2] == "~/":
# need to absolute it
return os.path.expanduser(file)
# if it starts with relative dirs (grep), then that's the easiest
# because abspath will resolve this
if file[0:2] == "./" or file[0:3] == "../":
return file
# some peeps do forcedir and expand the path beforehand,
# so lets check for that case here
first = file.split(os.sep)[0]
if first == "home" and not os.environ.get("FPP_DISABLE_PREPENDING_HOME_WITH_SLASH"):
# already absolute, easy
return "/" + file
if first in REPOS + (os.environ.get("FPP_REPOS") or "").split(","):
return os.path.expanduser("~/" + file)
if "/" not in file:
# assume current dir like ./
return "./" + file
# git show and diff has a/stuff and b/stuff, so handle that. git
# status never does this so we don't need to worry about relative dirs
if file[0:2] == "a/" or file[0:2] == "b/":
return PREPEND_PATH + file[2:]
split_up = file.split("/")
if split_up[0] == "www":
return PREPEND_PATH + "/".join(split_up[1:])
if not with_file_inspection:
# hope
return PREPEND_PATH + "/".join(split_up)
# Alright we need to handle the case where git status returns
# relative paths where every other git command returns paths relative
# to the top-level dir. so lets see if PREPEND_PATH is not a file whereas
# relative is...
top_level_path = PREPEND_PATH + "/".join(split_up)
relative_path = "./" + "/".join(split_up)
if not os.path.isfile(top_level_path) and os.path.isfile(relative_path):
return relative_path
return top_level_path
def unpack_matches_no_num(matches: Match) -> MatchResult:
return MatchResult((matches.groups()[0], 0, matches))
def unpack_matches(matches: Match, num_index: int) -> MatchResult:
groups = matches.groups()
file = groups[0]
num = 0 if groups[num_index] is None else int(groups[num_index])
return MatchResult((file, num, matches))
| 33.424821
| 88
| 0.61221
|
1e3a6efb04637444a4c6d782531c857f7de386f8
| 2,102
|
py
|
Python
|
sdk/python/pulumi_mailgun/__init__.py
|
pulumi/pulumi-mailgun
|
a01d240c5ad79232e626d1ace13fd3c5dc9d3c6c
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-05-09T23:28:56.000Z
|
2020-05-09T23:28:56.000Z
|
sdk/python/pulumi_mailgun/__init__.py
|
pulumi/pulumi-mailgun
|
a01d240c5ad79232e626d1ace13fd3c5dc9d3c6c
|
[
"ECL-2.0",
"Apache-2.0"
] | 26
|
2020-01-26T12:17:45.000Z
|
2022-03-25T19:42:23.000Z
|
sdk/python/pulumi_mailgun/__init__.py
|
pulumi/pulumi-mailgun
|
a01d240c5ad79232e626d1ace13fd3c5dc9d3c6c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .domain import *
from .domain_credential import *
from .get_domain import *
from .provider import *
from .route import *
from ._inputs import *
from . import outputs
# Make subpackages available:
from . import (
config,
)
def _register_module():
import pulumi
from . import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "mailgun:index/domain:Domain":
return Domain(name, pulumi.ResourceOptions(urn=urn))
elif typ == "mailgun:index/domainCredential:DomainCredential":
return DomainCredential(name, pulumi.ResourceOptions(urn=urn))
elif typ == "mailgun:index/route:Route":
return Route(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("mailgun", "index/domain", _module_instance)
pulumi.runtime.register_resource_module("mailgun", "index/domainCredential", _module_instance)
pulumi.runtime.register_resource_module("mailgun", "index/route", _module_instance)
class Package(pulumi.runtime.ResourcePackage):
_version = _utilities.get_semver_version()
def version(self):
return Package._version
def construct_provider(self, name: str, typ: str, urn: str) -> pulumi.ProviderResource:
if typ != "pulumi:providers:mailgun":
raise Exception(f"unknown provider type {typ}")
return Provider(name, pulumi.ResourceOptions(urn=urn))
pulumi.runtime.register_resource_package("mailgun", Package())
_register_module()
| 33.903226
| 98
| 0.677926
|
4f36db592907f27a7d9732a18b2772d64858dff4
| 1,971
|
py
|
Python
|
measmisc/apps/apa.py
|
molkoback/measmisc
|
44a6e77a5146bd751ce0203f6805e423d3866c9d
|
[
"MIT"
] | null | null | null |
measmisc/apps/apa.py
|
molkoback/measmisc
|
44a6e77a5146bd751ce0203f6805e423d3866c9d
|
[
"MIT"
] | null | null | null |
measmisc/apps/apa.py
|
molkoback/measmisc
|
44a6e77a5146bd751ce0203f6805e423d3866c9d
|
[
"MIT"
] | null | null | null |
from measmisc.app import App
from measmisc.database import Database
from measmisc.device import Device
from measmisc.meas import DateTime, Measurement
from measmisc.hw.i2c import create_i2c
from measmisc.hw.fmamsdxx import FMAMSDXX
from adafruit_tca9548a import TCA9548A
import asyncio
import logging
import time
class APADatabase(Database):
def open(self):
super().open()
cmd = "CREATE DATABASE IF NOT EXISTS `{}`;".format(self.database)
self.exec(cmd)
cmd = ""\
"CREATE TABLE IF NOT EXISTS `{}`.`{}` ("\
"ID INT UNSIGNED NOT NULL AUTO_INCREMENT,"\
"DateTime DATETIME NOT NULL,"\
"Weight FLOAT NOT NULL,"\
"Temp FLOAT NOT NULL,"\
"PRIMARY KEY (ID),"\
"INDEX (DateTime)"\
");".format(self.database, self.table)
self.exec(cmd)
class APAException(Exception):
pass
class APA(Device):
def __init__(self, config):
super().__init__()
self.config = config
self._tca = None
async def init(self):
cfg = self.config["i2c"]
i2c = create_i2c(cfg["name"], **cfg["opts"])
self._tca = TCA9548A(i2c)
return True
async def read(self):
F, T, n = 0, 0, 0
for chan in range(3):
if not self._tca[chan].try_lock():
raise APAException("Channel locking failed")
try:
sensor = FMAMSDXX(self._tca[chan])
_, Fi, Ti = sensor.read()
T += Ti
F += Fi
n += 1
except:
pass
self._tca[chan].unlock()
return Measurement({
"DateTime": DateTime(),
"Weight": F / n * 3 / 9.81 * 1000,
"Temp": T / n
})
class APAApp(App):
def __init__(self):
super().__init__(name="APA")
def create_database(self):
database = APADatabase(**self.config["sql"])
database.open()
return database
def create_device(self):
device = APA(self.config)
logging.info("I2C {}".format(self.config["i2c"]["name"]))
return device
def on_measure(self, meas):
logging.info("[{}] {:.1f}g, {:.1f}°C".format(meas.data["DateTime"], meas.data["Weight"], meas.data["Temp"]))
def main():
APAApp().start()
| 23.464286
| 110
| 0.658549
|
d1ff75f587709f027c2454f06e312a40b38a13b9
| 2,066
|
py
|
Python
|
antlir/signed_source.py
|
facebookincubator/fs_image
|
3515a24bb0e93176a5584bdc8839464fa28390d7
|
[
"MIT"
] | 9
|
2019-12-02T20:17:35.000Z
|
2020-06-13T16:34:25.000Z
|
antlir/signed_source.py
|
facebookincubator/fs_image
|
3515a24bb0e93176a5584bdc8839464fa28390d7
|
[
"MIT"
] | 19
|
2019-11-22T23:30:04.000Z
|
2020-07-16T18:05:48.000Z
|
antlir/signed_source.py
|
facebookincubator/fs_image
|
3515a24bb0e93176a5584bdc8839464fa28390d7
|
[
"MIT"
] | 4
|
2019-12-04T19:03:28.000Z
|
2020-06-13T16:34:29.000Z
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hashlib
import re
from .errors import UserError
_N_HEADER = 2048 # NB: The current linter does whole-file replacements
def signed_source_sigil() -> str:
return "<<SignedSource::*O*zOeWoEQle#+L!plEphiEmie@IsG>>" # Value lint uses
def sign_source(src: str) -> str:
"""
`signed_source_sigil()` must occur in the header of `src`.
We'll replace the sigil with the MD5 of the contents of the source.
Lint will error if the MD5 in the header does not match the contents.
This is not a security measure. It is only intended to discourage
people from manually fixing generated files, which is error-prone.
"""
sigil = signed_source_sigil()
try:
idx = src.index(sigil, 0, _N_HEADER)
except ValueError:
raise RuntimeError(
f"First {_N_HEADER} bytes of `src` lack `signed_source_sigil()`: "
+ src[:_N_HEADER]
)
md5hex = hashlib.md5(src.encode()).hexdigest()
return src[:idx] + f"SignedSource<<{md5hex}>>" + src[idx + len(sigil) :]
def assert_signed_source(signed_src: str, which: str) -> None:
"""
Raises `UserError` if `signed_src` does not have the right checksum.
`which` must be a human-readable description of how to find the file
being checked.
"""
m = re.search("SignedSource<<[a-f0-9]{32}>>", signed_src[:_N_HEADER])
if not m:
raise UserError(
f"Invalid signed source: {which}. The file's header lacks a "
"SignedSource token, please revert it to trunk, and re-generate "
"as described in the header."
)
if signed_src != sign_source(
signed_src[: m.start()] + signed_source_sigil() + signed_src[m.end() :]
):
raise UserError(
f"Invalid signed source: {which}. The file's header should "
"explain how to re-generate it correctly."
)
| 33.322581
| 80
| 0.649564
|
4939a86226e042c56477c745b5e8ab60167d622c
| 891
|
py
|
Python
|
1dbrownian/fts_test/run.py
|
muhammadhasyim/tps-torch
|
21201458d037649fa66794b993ccfba7d7414028
|
[
"BSD-3-Clause"
] | 3
|
2021-05-05T12:09:45.000Z
|
2021-05-17T18:39:54.000Z
|
1dbrownian/fts_test/run.py
|
muhammadhasyim/tps-torch
|
21201458d037649fa66794b993ccfba7d7414028
|
[
"BSD-3-Clause"
] | null | null | null |
1dbrownian/fts_test/run.py
|
muhammadhasyim/tps-torch
|
21201458d037649fa66794b993ccfba7d7414028
|
[
"BSD-3-Clause"
] | null | null | null |
#Start up torch dist package
import torch
import torch.distributed as dist
dist.init_process_group(backend='mpi')
#Load classes for simulations and controls
from brownian_fts import BrownianParticle, CustomFTSMethod
import numpy as np
#Starting and ending configuration.
start = torch.tensor([[-1.0]])
end = torch.tensor([[1.0]])
def initializer(s):
return (1-s)*start+s*end
alphas = torch.linspace(0.0,1,dist.get_world_size()+2)[1:-1]
bp_simulator = BrownianParticle(dt=2e-3,gamma=1.0, kT = 0.4, initial=initializer(alphas[dist.get_rank()]),prefix='test',save_config=True)
fts_method = CustomFTSMethod(sampler=bp_simulator,initial_config=start,final_config=end,num_nodes=dist.get_world_size()+2,deltatau=0.01,kappa=0.01)
import tqdm
for i in tqdm.tqdm(range(40000)):
#Run the simulation a single time-step
fts_method.run(1)
if i % 10 == 0:
fts_method.dump(True)
| 34.269231
| 147
| 0.751964
|
2a24cf665a72fe6149b152d32340cdf46ff85ff6
| 9,634
|
py
|
Python
|
synthesis/enunu_ground_truth_duration.py
|
stakira/ENUNU
|
3ee981785507ca8a1612d7501b89c47c87c400c3
|
[
"MIT"
] | null | null | null |
synthesis/enunu_ground_truth_duration.py
|
stakira/ENUNU
|
3ee981785507ca8a1612d7501b89c47c87c400c3
|
[
"MIT"
] | null | null | null |
synthesis/enunu_ground_truth_duration.py
|
stakira/ENUNU
|
3ee981785507ca8a1612d7501b89c47c87c400c3
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
# coding: utf-8
# Copyright (c) 2020 oatsu
"""
フルラベルと、タイミング補正済みモノラベルからWAVファイルを生成する。
モデルは…?
"""
import copy
import shutil
# from copy import deepcopy
from datetime import datetime
from os import chdir, makedirs, startfile
from os.path import basename, dirname, exists, join, splitext
from sys import argv
import utaupy
from omegaconf import DictConfig, OmegaConf
from utaupy.utils import hts2json, ustobj2songobj
try:
from hts2wav import hts2wav
except ModuleNotFoundError:
print('----------------------------------------------------------')
print('初回起動ですね。')
print('PC環境に合わせてPyTorchを自動インストールします。')
print('インストール完了までしばらくお待ちください。')
print('----------------------------------------------------------')
from install_torch import pip_install_torch
pip_install_torch(join('.', 'python-3.8.10-embed-amd64', 'python.exe'))
print('----------------------------------------------------------')
print('インストール成功しました。歌声合成を始めます。')
print('----------------------------------------------------------\n')
from hts2wav import hts2wav # pylint: disable=ungrouped-imports
def get_project_path(utauplugin: utaupy.utauplugin.UtauPlugin):
"""
キャッシュパスとプロジェクトパスを取得する。
"""
setting = utauplugin.setting
# ustのパス
path_ust = setting.get('Project')
# 音源フォルダ
voice_dir = setting['VoiceDir']
# 音声キャッシュのフォルダ(LABとJSONを設置する)
cache_dir = setting['CacheDir']
return path_ust, voice_dir, cache_dir
def utauplugin2hts(path_plugin_in, path_table, path_full_out, path_mono_out=None,
strict_sinsy_style=False):
"""
USTじゃなくてUTAUプラグイン用に最適化する。
ust2hts.py 中の ust2hts を改変して、
[#PREV] と [#NEXT] に対応させている。
"""
# プラグイン用一時ファイルを読み取る
plugin = utaupy.utauplugin.load(path_plugin_in)
# 変換テーブルを読み取る
table = utaupy.table.load(path_table, encoding='utf-8')
# 2ノート以上選択されているかチェックする
if len(plugin.notes) < 2:
raise Exception('ENUNU requires at least 2 notes. / ENUNUを使うときは2ノート以上選択してください。')
# 歌詞が無いか空白のノートを休符にする。
for note in plugin.notes:
if note.lyric.strip(' ') == '':
note.lyric = 'R'
# [#PREV] や [#NEXT] が含まれているか判定
prev_exists = plugin.previous_note is not None
next_exists = plugin.next_note is not None
if prev_exists:
plugin.notes.insert(0, plugin.previous_note)
if next_exists:
plugin.notes.append(plugin.next_note)
# Ust → HTSFullLabel
song = ustobj2songobj(plugin, table)
full_label = utaupy.hts.HTSFullLabel()
full_label.song = song
full_label.fill_contexts_from_songobj()
# [#PREV] と [#NEXT] を消す前の状態での休符周辺のコンテキストを調整する
if prev_exists or next_exists:
full_label = utaupy.hts.adjust_pau_contexts(full_label, strict=strict_sinsy_style)
# [#PREV] のノート(の情報がある行)を削る
if prev_exists:
target_note = full_label[0].note
while full_label[0].note is target_note:
del full_label[0]
# PREVを消しても前のノート分ずれているので、最初の音素開始時刻が0になるようにする。
# ずれを取得
offset = full_label[0].start
# 全音素の開始と終了時刻をずらす
for oneline in full_label:
oneline.start -= offset
oneline.end -= offset
# [#NEXT] のノート(の情報がある行)を削る
if next_exists:
target_note = full_label[-1].note
while full_label[-1].note is target_note:
del full_label[-1]
# ファイル出力
s = '\n'.join(list(map(str, full_label)))
with open(path_full_out, mode='w', encoding='utf-8') as f:
f.write(s)
if path_mono_out is not None:
full_label.as_mono().write(path_mono_out)
def repair_too_short_phoneme(label, threshold=5) -> None:
"""
LABファイルの中の発声時刻が短すぎる音素(5ms未満の時とか)を修正する。
直前の音素の長さを削る。
一番最初の音素が短い場合のみ修正できない。
"""
threshold_100ns = threshold * 10000
# 短い音素が一つもない場合はスルー
if all(phoneme.duration >= threshold_100ns for phoneme in label):
return None
# 短い音素が連続しても不具合が起こらないように逆向きにループする
if label[0].duration < threshold_100ns:
raise ValueError(f'最初の音素が短いです。修正できません。: {label[0]}')
for i, phoneme in enumerate(reversed(label)):
# 発声時間が閾値より短い場合
if phoneme.duration < threshold_100ns:
print('短い音素を修正します。:', phoneme)
# 閾値との差分を計算する。この分だけずらす。
delta_t = threshold_100ns - phoneme.duration
# 対象の音素の開始時刻をずらして、発生時間を伸ばす。
phoneme.start -= delta_t
# 直前の音素の終了時刻もずらす。
# label[-(i + 1) - 1]
label[-i - 2].end -= delta_t
return None
def generate_full_align_lab(path_mono_align_lab_in,
path_full_score_lab_in,
path_full_align_lab_out):
"""
タイミング補正済みのモノラベルと、
タイミング補正前のフルラベルから、
タイミング補正済みのフルラベルを作る。
"""
mono_align_lab = utaupy.label.load(path_mono_align_lab_in)
full_score_lab = utaupy.label.load(path_full_score_lab_in)
mono_align_lab.is_valid(threshold=0)
repair_too_short_phoneme(mono_align_lab)
assert len(mono_align_lab) == len(full_score_lab)
full_align_lab = utaupy.label.Label()
# タイミング補正をフルラベルに適用したLabelオブジェクトを作る。
for mono_align_phoneme, full_score_phoneme in zip(mono_align_lab, full_score_lab):
phoneme = utaupy.label.Phoneme()
phoneme.start = mono_align_phoneme.start
phoneme.end = mono_align_phoneme.end
phoneme.symbol = full_score_phoneme.symbol
full_align_lab.append(phoneme)
# ファイル出力
full_align_lab.write(path_full_align_lab_out)
def generate_mono_score_lab(path_full_score_lab_in,
path_mono_align_lab_in,
path_mono_score_lab_out):
"""
タイミング補正済みのモノラベルと、
タイミング補正前のフルラベルから、
タイミング補正済みのフルラベルを作る。
"""
full_score_lab = utaupy.label.load(path_full_score_lab_in)
mono_align_lab = utaupy.label.load(path_mono_align_lab_in)
# mono_align_lab.is_valid(threshold=0)
# repair_too_short_phoneme(mono_align_lab)
assert len(mono_align_lab) == len(full_score_lab)
mono_score_lab = copy.copy(full_score_lab)
# タイミング補正をフルラベルに適用したLabelオブジェクトを作る。s
mono_score_lab.contexts = mono_align_lab.contexts
# ファイル出力
mono_score_lab.write(path_mono_score_lab_out)
def get_original_songname(path) -> str:
"""
dirname/songname__datetime.ext から、songname部分を取り出す。
"""
basename_without_ext = splitext(basename(path))[0]
return basename_without_ext.split('__')[0]
def main_as_plugin(path_plugin: str) -> str:
"""
UtauPluginオブジェクトから音声ファイルを作る
"""
print(f'{datetime.now()} : reading setting in ust')
# UTAUの一時ファイルに書いてある設定を読み取る
plugin = utaupy.utauplugin.load(path_plugin)
_, voice_dir, _ = get_project_path(plugin)
path_enuconfig = join(voice_dir, 'enuconfig.yaml')
if not exists(path_enuconfig):
raise Exception(
'音源フォルダに enuconfig.yaml が見つかりません。'
'UTAU音源選択でENUNU用モデルを指定してください。'
)
# NOTE: ここGTS追加機能
path_mono_align_lab = input('タイミング補正済みの LABファイル (timing) を指定してください。\n>>> ').strip('"')
path_full_score_lab = input('タイミング補正前の LABファイル (full_score) を指定してください。\n>>> ').strip('"')
print("")
# カレントディレクトリを音源フォルダに変更する
chdir(voice_dir)
# configファイルを読み取る
print(f'{datetime.now()} : reading enuconfig')
config = DictConfig(OmegaConf.load(path_enuconfig))
# 入出力パスを設定する
str_now = datetime.now().strftime('%Y%m%d%H%M%S')
songname = f'{get_original_songname(path_mono_align_lab)}__{str_now}'
out_dir = join(dirname(path_mono_align_lab), songname)
# 出力フォルダがなければつくる
makedirs(out_dir, exist_ok=True)
# 各種出力ファイルのパスを設定
path_full_align_lab = join(out_dir, f'{songname}_full_align.lab')
path_mono_score_lab = join(out_dir, f'{songname}_mono_score.lab')
# path_ust_out = join(out_dir, f'{songname}.ust')
path_score_json = join(out_dir, f'{songname}_full_score.json')
path_align_json = join(out_dir, f'{songname}_full_align.json')
path_wav = join(out_dir, f'{songname}.wav')
# TODO: ここ英語にする
# NOTE: ここGTD追加機能
# タイミング補正済みのフルラベルを生成
print(f'{datetime.now()} : タイミング補正済みの LABファイル (full_align) を生成します。')
generate_full_align_lab(path_mono_align_lab, path_full_score_lab, path_full_align_lab)
config.ground_truth_duration = True
print(f'{datetime.now()} : converting LAB (full_score) to JSON')
hts2json(path_full_score_lab, path_score_json)
print(f'{datetime.now()} : converting LAB (full_align) to JSON')
hts2json(path_full_align_lab, path_align_json)
print(f'{datetime.now()} : converting LAB (full_align) to WAV')
hts2wav(config, path_full_align_lab, path_wav)
print(f'{datetime.now()} : generating WAV ({path_wav})')
# おまけ機能
print(f'{datetime.now()} : full_score をコピーします。')
shutil.copy2(path_full_score_lab, join(out_dir, f'{songname}_full_score.lab'))
print(f'{datetime.now()} : mono_score を生成します。')
generate_mono_score_lab(path_full_score_lab, path_mono_align_lab, path_mono_score_lab)
# Windowsの時は音声を再生する。
startfile(path_wav)
return path_wav
def main(path: str):
"""
入力ファイルによって処理を分岐する。
"""
# logging.basicConfig(level=logging.INFO)
if path.endswith('.tmp'):
main_as_plugin(path)
else:
raise ValueError('Input file must be TMP(plugin).')
if __name__ == '__main__':
print('_____ξ ・ヮ・)ξ < ENUNU v0.2.5 ________')
print('_____ξ ^ω^)ξ < Ground Truth Duration (20211022) ________')
print(f'argv: {argv}')
if len(argv) == 2:
path_utauplugin = argv[1]
elif len(argv) == 1:
path_utauplugin = \
input('Input file path of TMP(plugin)\n>>> ').strip('"')
main(path_utauplugin)
| 33.685315
| 93
| 0.664002
|
e6357570a4a90c70d546a765b70a1437c53a06c5
| 1,732
|
py
|
Python
|
src/utils.py
|
AUVSL/ROS-ANFIS-RL
|
d0df3d9a53f4cc2a49cadeac62be6879aaf699fd
|
[
"BSD-3-Clause"
] | 1
|
2022-01-26T22:23:11.000Z
|
2022-01-26T22:23:11.000Z
|
src/utils.py
|
AUVSL/anfis_rl
|
d0df3d9a53f4cc2a49cadeac62be6879aaf699fd
|
[
"BSD-3-Clause"
] | null | null | null |
src/utils.py
|
AUVSL/anfis_rl
|
d0df3d9a53f4cc2a49cadeac62be6879aaf699fd
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from torch.utils.tensorboard.summary import hparams
def angdiff(th1, th2):
d = th1 - th2
d = np.mod(d + np.pi, 2 * np.pi) - np.pi
return -d
def wraptopi(x):
pi = np.pi
x = x - np.floor(x / (2 * pi)) * 2 * pi
if x >= pi:
return x - 2 * pi
return x
def add_hparams(summary, hparam_dict, metric_dict, hparam_domain_discrete=None, step=0):
if type(hparam_dict) is not dict or type(metric_dict) is not dict:
raise TypeError('hparam_dict and metric_dict should be dictionary.')
exp, ssi, sei = hparams(hparam_dict, metric_dict, hparam_domain_discrete)
summary.file_writer.add_summary(exp, global_step=step)
summary.file_writer.add_summary(ssi, global_step=step)
summary.file_writer.add_summary(sei, global_step=step)
for k, v in metric_dict.items():
summary.add_scalar(k, v, global_step=step)
def markdown_rule_table(anfis):
vardefs = anfis.layer['fuzzify'].varmfs
vardefs_names = list(vardefs.keys())
rules = anfis.layer['rules'].mamdani_ruleset
var_index = rules['variable_rule_index']
mem_index = rules['membership_indices']
out_index = rules['outputs_membership']
out_name = anfis.layer['consequent'].mamdani_defs.names
rules = ['| Rule ID | Membership 1 | Membership 2| Output |',
'| --------| ------------ | ----------- | ------ | ']
for i in range(len(var_index)):
temp = []
for var, mem in zip(var_index[i], mem_index[i]):
name = vardefs_names[var]
temp.append(f"{name} is {list(vardefs[name].mfdefs.keys())[mem]}")
rules.append(f'| {i} | {" | ".join(temp)}| {out_name[out_index[i][0]]} |')
return '\n'.join(rules)
| 30.385965
| 88
| 0.62933
|
45862c5948c7305e821a30a40326b188dd9ade27
| 15,135
|
py
|
Python
|
experiment_scripts/exp4/baseDefsPsychoPy.py
|
mzettersten/word-choice
|
fee5cbed17ebdc4d988258e6384a59cc9ff6c8a7
|
[
"MIT"
] | null | null | null |
experiment_scripts/exp4/baseDefsPsychoPy.py
|
mzettersten/word-choice
|
fee5cbed17ebdc4d988258e6384a59cc9ff6c8a7
|
[
"MIT"
] | null | null | null |
experiment_scripts/exp4/baseDefsPsychoPy.py
|
mzettersten/word-choice
|
fee5cbed17ebdc4d988258e6384a59cc9ff6c8a7
|
[
"MIT"
] | null | null | null |
#from pyglet.media import avbin
import numpy
from psychopy import prefs
try:
import winsound
except ImportError:
print "Warning: winsound not found; will try using pyo/pyaudio"
try:
import pyo
print "Attempting to use pyo for sounds"
prefs.general['audioLib'] = ['pyo']
except:
print 'could not load pyo'
from psychopy import sound,core, visual
#if prefs.general['audioLib'][0] == 'pyo':
# print 'initializing pyo to 48000'
# sound.init(48000,buffer=128)
# print 'Using %s(with %s) for sounds' %(sound.audioLib, sound.audioDriver)
from psychopy import core,logging,event,visual,data,gui,misc
import glob,os,random,sys,gc,time,hashlib,subprocess
from math import *
try:
import pygame
from pygame.locals import *
except ImportError:
print "Warning: pygame not found; will be using pyglet for stim presentation"
#pygame.mixer.pre_init(44100,-16,1, 4096) # pre-initialize to reduce the delay
try:
from scipy import ndimage
except ImportError:
pass
def killDropbox():
try:
error = os.system('taskkill /f /im Dropbox.exe /t')
if not error:
print "Dropbox terminated"
else:
print "Dropbox not running, so can't terminate"
except:
pass
def startDropbox():
#try:
# os.spawnl(os.P_DETACH,'c:/Users/Gary/AppData/Roaming/Dropbox/bin/Dropbox.exe') #this works but only for execs
try:
subprocess.Popen('start /B %USERPROFILE%/desktop/dropboxShortcut.lnk',shell=True) #this works for links. nice!
#subprocess.Popen('start /B c:/Users/Gary/Documents/dropboxShortcut.lnk',shell=True) #this works for links. nice!
print "Dropbox restarted"
except:
print "Could not re-start dropbox"
def getHash(files):
if not isinstance(files,list):
files = [files]
hashes = [(fname, hashlib.md5(file(fname, 'r').read()).hexdigest()) for fname in files]
return hashes
def circularList(lst,seed):
if not isinstance(lst,list):
lst = range(lst)
i = 0
random.seed(seed)
random.shuffle(lst)
while True:
yield lst[i]
if (i+1) % len(lst) ==0:
random.shuffle(lst)
i = (i + 1)%len(lst)
def getRunTimeVars(varsToGet,order,expVersion):
"""Get run time variables, see http://www.psychopy.org/api/gui.html for explanation"""
infoDlg = gui.DlgFromDict(dictionary=varsToGet, title=expVersion, fixed=[expVersion],order=order)
order.append('dateStr')
order.append('expVersion')
varsToGet['dateStr']= data.getDateStr()
varsToGet['expVersion']= expVersion
if infoDlg.OK:
return varsToGet
else: print 'User Cancelled'
def enterSubjInfo(expName,optionList):
""" Brings up a GUI in which to enter all the subject info."""
def inputsOK(optionList,expInfo):
for curOption in sorted(optionList.items()):
if curOption[1]['options'] != 'any' and expInfo[curOption[1]['name']] not in curOption[1]['options']:
return [False,"The option you entered for " + curOption[1]['name'] + " is not in the allowable list of options: " + str(curOption[1]['options'])]
print "inputsOK passed"
return [True,'']
try:
expInfo = misc.fromFile(expName+'_lastParams.pickle')
except:
expInfo={} #make the kind of dictionary that this gui can understand
for curOption in sorted(optionList.items()):
expInfo[curOption[1]['name']]=curOption[1]['default']
#load the tips
tips={}
for curOption in sorted(optionList.items()):
tips[curOption[1]['name']]=curOption[1]['prompt']
expInfo['dateStr']= data.getDateStr()
expInfo['expName']= expName
dlg = gui.DlgFromDict(expInfo, title=expName, fixed=['dateStr','expName'],order=[optionName[1]['name'] for optionName in sorted(optionList.items())],tip=tips)
if dlg.OK:
misc.toFile(expName+'_lastParams.pickle', expInfo)
[success,error] = inputsOK(optionList,expInfo)
if success:
return [True,expInfo]
else:
return [False,error]
else:
core.quit()
def popupError(text):
errorDlg = gui.Dlg(title="Error", pos=(200,400))
errorDlg.addText('Error: '+text, color='Red')
errorDlg.show()
def setupSubjectVariables():
parser = OptionParser()
parser.add_option("-s", "--subject-id", dest="subjid", help="specify the subject id")
(options, args) = parser.parse_args()
self.subjID = options.subjid
if not self.subjID:
print "You must provide a Subject ID"
parser.print_help()
sys.exit()
def writeHeader(curTrial,headerText,fileName='header'):
try:
if curTrial['trialNum']==1:
if os.path.isfile(fileName+'.txt'): # file already exists
print 'header file exists'
return False
else:
headerFile = open(fileName+'.txt','w')
writeToFile(headerFile,headerText)
return True
except:
return False
#def writeToFile(fileHandle,trial,sync=True):
# """Writes a trial (array of lists) to a fileHandle"""
# line = '\t'.join([str(i) for i in trial]) #TABify
# line += '\n' #add a newline
# fileHandle.write(line)
# if sync:
# fileHandle.flush()
# os.fsync(fileHandle)
def syncFile(fileHandle):
"""syncs file to prevent buffer loss"""
fileHandle.flush()
os.fsync(fileHandle)
def getSubjVariables(allSubjVariables):
def checkInput(value,options,type):
"""Checks input. Uses 'any' as an option to check for any <str> or <int>"""
try:
#if the user typed something and it's an instance of the correct type and it's in the options list...
if value and isinstance(value,type) and ('any' in options or value.upper() in options ):
return True
return False
except:
print"Try again..."
subjVariables = {}
for curNum, varInfo in sorted(allSubjVariables.items()):
curValue=''
while not checkInput(curValue,varInfo['options'],varInfo['type']):
curValue = raw_input(varInfo['prompt'])
if not 'any' in varInfo['options']:
curValue = str(curValue.upper())
subjVariables[varInfo['name']] = curValue
return subjVariables
def loadFiles(directory,extension,fileType,win='',whichFiles='*',stimList=[]):
""" Load all the pics and sounds"""
path = os.getcwd() #set path to current directory
if isinstance(extension,list):
fileList = []
for curExtension in extension:
fileList.extend(glob.glob(os.path.join(path,directory,whichFiles+curExtension)))
else:
fileList = glob.glob(os.path.join(path,directory,whichFiles+extension))
fileMatrix = {} #initialize fileMatrix as a dict because it'll be accessed by picture names, cound names, whatver
for num,curFile in enumerate(fileList):
fullPath = curFile
fullFileName = os.path.basename(fullPath)
stimFile = os.path.splitext(fullFileName)[0]
if fileType=="image":
try:
surface = pygame.image.load(fullPath) #gets height/width of the image
stim = visual.ImageStim(win, image=fullPath,mask=None,interpolate=True)
fileMatrix[stimFile] = ((stim,fullFileName,num,surface.get_width(),surface.get_height(),stimFile))
except: #no pygame, so don't store the image dims
stim = visual.ImageStim(win, image=fullPath,mask=None,interpolate=True)
fileMatrix[stimFile] = ((stim,fullFileName,num,'','',stimFile))
elif fileType=="sound":
soundRef = sound.Sound(fullPath)
fileMatrix[stimFile] = ((soundRef))
elif fileType=="winSound":
soundRef = open(fullPath,"rb").read()
fileMatrix[stimFile] = ((soundRef))
fileMatrix[stimFile+'-path'] = fullPath #this allows asynchronous playing in winSound.
#check
if stimList and set(fileMatrix.keys()).intersection(stimList) != set(stimList):
popupError(str(set(stimList).difference(fileMatrix.keys())) + " does not exist in " + path+'\\'+directory)
return fileMatrix
def loadFilesOld(directory,extension,fileType,win='',whichFiles='*',stimList=[]):
""" Load all the pics and sounds"""
path = os.getcwd() #set path to current directory
if type(extension).__name__=='list':
fileList = []
for curExtension in extension:
fileList.extend(glob.glob(os.path.join(path,directory,whichFiles+curExtension)))
else:
fileList = glob.glob(os.path.join(path,directory,whichFiles+extension))
fileMatrix = {} #initialize fileMatrix as a dict because it'll be accessed by picture names, cound names, whatver
for i in range(len(fileList)):
fullPath = fileList[i]
fullFileName = os.path.basename(fullPath)
stimFile = fullFileName[:len(fullFileName)-4] #chops off the extension
if fileType=="image":
try:
surface = pygame.image.load(fullPath) #this is just to get heigh/width of the image
#stim = visual.PatchStim(win, tex=fullPath)
stim = visual.SimpleImageStim(win, image=fullPath)
fileMatrix[stimFile] = ((stim,fullFileName,i,surface.get_width(),surface.get_height(),stimFile))
except: #no pygame, so don't store the image dims
stim = visual.SimpleImageStim(win, image=fullPath)
fileMatrix[stimFile] = ((stim,fullFileName,i,'','',stimFile))
elif fileType=="sound":
soundRef = sound.Sound(fullPath)
fileMatrix[stimFile] = ((soundRef))
elif fileType=="winSound":
soundRef = highPitch = open(fullPath,"rb").read()
fileMatrix[stimFile] = ((soundRef))
#check
if stimList and set(fileMatrix.keys()).intersection(stimList) != set(stimList):
#print stimList, fileMatrix.keys(),set(stimList).difference(fileMatrix.keys())
popupError(str(set(stimList).difference(fileMatrix.keys())) + " does not exist in " + path+'\\'+directory)
return fileMatrix
def sortDictValues(someDict,returnWhat='values'):
keys = someDict.keys()
keys.sort()
if returnWhat=='values':
return map(someDict.get, keys)
else:
return keys
def createResp(allSubjVariables,subjVariables,fieldVars,**respVars):
trial = [] #initalize array
for curSubjVar, varInfo in sorted(allSubjVariables.items()):
trial.append(subjVariables[varInfo['name']])
trial.append(subjVariables['expName'])
trial.append(subjVariables['dateStr'])
for curFieldVar in fieldVars:
trial.append(curFieldVar)
for curRespVar in sortDictValues(respVars):
trial.append(str(curRespVar))
return trial
def createRespNew(allSubjVariables,subjVariables,fieldVarNames,fieldVars,**respVars):
"""Creates a key and value list of all the variables passed in from various sources (runtime, trial params, dep. vars."""
def stripUnderscores(keyList):
return [curKey.split('_')[1] for curKey in keyList]
trial = [] #initalize array
header=[]
for curSubjVar, varInfo in sorted(allSubjVariables.items()):
header.append(allSubjVariables[curSubjVar]['name'])
trial.append(subjVariables[varInfo['name']])
for curFieldVar in fieldVars:
trial.append(curFieldVar)
for curRespVar in sortDictValues(respVars):
trial.append(str(curRespVar))
header.extend(fieldVarNames)
header.extend(stripUnderscores(sortDictValues(respVars,'keys')))
return [header,trial]
def importTrials(fileName,method="sequential",seed=random.randint(1,100)):
(stimList,fieldNames) = data.importConditions(fileName,returnFieldNames=True)
trials = data.TrialHandler(stimList,1,method=method,seed=seed) #seed is ignored for sequential; used for 'random'
return (trials,fieldNames)
def initGamepad():
pygame.joystick.init() # init main joystick device system
try:
stick = pygame.joystick.Joystick(0)
stick.init() # init instance
return stick
except pygame.error:
raise SystemExit("---->No joystick/gamepad found. Make sure one is plugged in<--")
def getKeyboardResponse(validResponses,duration=0):
event.clearEvents()
responded = False
done = False
rt = '*'
responseTimer = core.Clock()
while True:
if not responded:
responded = event.getKeys(keyList=validResponses, timeStamped=responseTimer)
if duration>0:
if responseTimer.getTime() > duration:
break
else: #end on response
if responded:
break
if not responded:
return ['*','*']
else:
return responded[0] #only get the first resp
def getKeyboardResponseEndResp(validResponses,duration=0,endOnResponse=True):
event.clearEvents()
responded = False
done = False
rt = '*'
responseTimer = core.Clock()
while True:
if not responded:
responded = event.getKeys(keyList=validResponses, timeStamped=responseTimer)
if duration>0:
if responseTimer.getTime() > duration or responded:
break
else: #end on response
if responded:
break
if not responded:
return ['*','*']
else:
return responded[0] #only get the first resp
def getMouseResponse(mouse,duration=0):
event.clearEvents()
responseTimer = core.Clock()
numButtons=len(event.mouseButtons)
response = [0]*numButtons
timeElapsed = False
mouse.clickReset()
responseTimer.reset()
rt = '*'
while not any(response) and not timeElapsed:
(response,rt) = mouse.getPressed(getTime=True)
if duration>0 and responseTimer.getTime() > duration:
timeElapsed=True
if not any(response): #if there was no response (would only happen if duration is set)
return ('*','*')
else:
nonZeroResponses = filter(lambda x: x>0,rt)
firstResponseButtonIndex = rt.index(min(nonZeroResponses)) #only care about the first (earliest) click
return (firstResponseButtonIndex,rt[firstResponseButtonIndex])
#TO DO: move stick to the last parameter so it's treated as optional - that way we can have a generic response function that either takes or doesn't take a joystick parameter as provided.
def getGamepadResponse(stick,validResponses,duration=0):
"""joystick needs to be initialized (with initGamepad or manually). Only returns the first response. """
def getJoystickResponses(): #returns buttons. If none are pressed, checks the hat.
for n in range(stick.get_numbuttons()):
if stick.get_button(n) : # if this is down
return n
for n in range(stick.get_numhats()):
return stick.get_hat(n)
responded = False
timeElapsed = False
responseTimer = core.Clock()
response="*"
rt = '*'
pygame.event.clear() #clear event cue
responseTimer.reset()
while not responded and not timeElapsed:
for event in pygame.event.get(): # iterate over event stack
if event.type==pygame.JOYBUTTONDOWN or event.type==pygame.JOYHATMOTION:
response = getJoystickResponses()
print 'responded',response
if response in validResponses:
rt = responseTimer.getTime()
responded = True
break
if duration>0 and responseTimer.getTime() > duration:
timeElapsed=True
if not responded:
return ['*','*']
else:
return (response,rt)
def euclidDistance(pointA,pointB):
return sqrt((pointA[0]-pointB[0])**2 + (pointA[1]-pointB[1])**2)
def pressedSomething(validKeys):
for event in pygame.event.get():
if event.type in (QUIT,KEYDOWN,MOUSEBUTTONDOWN):
if event.key in validKeys:
return True
def makeBorder(width=128, height=128, borderColor=-1, xborder=10,yborder=10):
# creates a bitmap with a border
borderColor=-1
array = numpy.zeros([height, width])
array[xborder:-xborder,yborder:-yborder] = 1
if xborder>0:
array[:xborder,:] = array[-xborder:,:] = borderColor
if yborder>0:
array[:,:yborder] = array[:,-yborder:] = borderColor
return array
| 34.164786
| 188
| 0.702346
|
67e557432809b8823f3f3c2366f1887d8a9b4e70
| 1,033
|
py
|
Python
|
example/cities/migrations/0002_auto_20160605_1602.py
|
HOYINWUN/django-map-widgets
|
2dd9bbd6833a781ea1683de9fa63d964c5936371
|
[
"MIT"
] | null | null | null |
example/cities/migrations/0002_auto_20160605_1602.py
|
HOYINWUN/django-map-widgets
|
2dd9bbd6833a781ea1683de9fa63d964c5936371
|
[
"MIT"
] | null | null | null |
example/cities/migrations/0002_auto_20160605_1602.py
|
HOYINWUN/django-map-widgets
|
2dd9bbd6833a781ea1683de9fa63d964c5936371
|
[
"MIT"
] | 1
|
2019-08-05T14:15:55.000Z
|
2019-08-05T14:15:55.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-06-05 16:02
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cities', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='city',
name='coordinates',
field=django.contrib.gis.db.models.fields.PointField(help_text='To generate the map for your location', srid=4326),
),
migrations.AlterField(
model_name='district',
name='city',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='cities.City'),
),
migrations.AlterField(
model_name='district',
name='location',
field=django.contrib.gis.db.models.fields.PointField(help_text='To generate the map for your location', srid=4326),
),
]
| 31.30303
| 127
| 0.635044
|
4fc9259c185b96bdffc93e801cdd268c303d5cd7
| 542
|
py
|
Python
|
django_example/metrics/middleware.py
|
ChielWH/prometheus_redis_client
|
90852fb0eaf3aee1937a74cad6181c304dc6999a
|
[
"Apache-2.0"
] | 19
|
2019-04-02T11:46:45.000Z
|
2022-03-21T03:38:56.000Z
|
django_example/metrics/middleware.py
|
ChielWH/prometheus_redis_client
|
90852fb0eaf3aee1937a74cad6181c304dc6999a
|
[
"Apache-2.0"
] | 12
|
2019-08-17T05:55:23.000Z
|
2022-02-10T07:37:38.000Z
|
django_example/metrics/middleware.py
|
ChielWH/prometheus_redis_client
|
90852fb0eaf3aee1937a74cad6181c304dc6999a
|
[
"Apache-2.0"
] | 6
|
2019-11-20T15:12:38.000Z
|
2022-03-21T03:36:44.000Z
|
import time
from metrics import general
from django.urls import resolve
class MetricsMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
viewname = resolve(request.path).view_name
general.count_of_requests.labels(viewname=viewname).inc()
start_time = time.time()
try:
return self.get_response(request)
finally:
general.request_latency.labels(viewname=viewname).observe(time.time() - start_time)
| 30.111111
| 95
| 0.693727
|
defc21381806d478c5e9876793c58f161febaf3f
| 7,767
|
py
|
Python
|
RealFunctions/MorphingFunctions.py
|
Prdcc/manim-Student-Shapers
|
a04072e67995838acb77c1c921db97ee050aa6e0
|
[
"MIT"
] | null | null | null |
RealFunctions/MorphingFunctions.py
|
Prdcc/manim-Student-Shapers
|
a04072e67995838acb77c1c921db97ee050aa6e0
|
[
"MIT"
] | null | null | null |
RealFunctions/MorphingFunctions.py
|
Prdcc/manim-Student-Shapers
|
a04072e67995838acb77c1c921db97ee050aa6e0
|
[
"MIT"
] | null | null | null |
from manimlib.imports import *
import numpy as np
from numpy import sqrt
def sin_graph(x):
return np.sin(x)
def parabola_graph(x):
return x*x
# circle shape should also be here
def exponential_cos_graph(x):
return np.cos(np.exp(-0.1 * x**2 + 2))
def cubed(x):
return x**3
def quartic(x):
return x**4
def one_over_x(x):
return 1/x
def positive_sigmoid(x):
return 1/(1+np.exp(x)) - 0.5
def negative_sigmoid(x):
return 1/(1+np.exp(-x)) - 0.5
def just_a_line(x):
return 0.5*x
def hidden_flat_line(x):
return 0
def three(x):
return 3
def two(x):
return 2
def positive_circle(x):
sqrt(1-x**2)
def positive_circle(x):
-sqrt(1-x**2)
class PlotSinInv(GraphScene):
CONFIG = {
"x_min" : -1,
"x_max" : 1,
"y_min" : -1.5,
"y_max" : 1.5,
"graph_origin" : ORIGIN ,
"function_color" : BLUE ,
"axes_color" : GREEN,
"center_point" : 0,
"funcs" : [sin_graph,parabola_graph, exponential_cos_graph,just_a_line]#,cubed,quartic]
}
def construct(self):
self.setup_axes(animate=True)
func_graph=self.get_graph(self.sinInv,x_max=(-0.01))
func_graphR=self.get_graph(self.sinInv,x_min=(0.01),color=BLUE)
self.play(ShowCreation(func_graph),ShowCreation(func_graphR),run_time=1.5)
self.wait(4)
def sinInv(self,x):
return np.sin(1/x)
class PlotFunctions(GraphScene):
CONFIG = {
"x_min" : -10,
"x_max" : 10.3,
"y_min" : -1.5,
"y_max" : 1.5,
"graph_origin" : ORIGIN ,
"function_color" : BLUE ,
"axes_color" : GREEN,
"x_labeled_nums" :range(-10,12,2),
"center_point" : 0,
"funcs" : [sin_graph,parabola_graph, exponential_cos_graph,just_a_line]#,cubed,quartic]
}
def construct(self):
self.setup_axes(animate=True)
circle = Circle(color =BLUE)
func_graphs = [self.get_graph(f,self.function_color) for f in self.funcs]
func_graphs.insert(4,circle)
#all_my_shapes = func_graphs
#func_graphs = func_graphs.append(circle)
graph_object = self.get_graph(sin_graph,self.function_color) #change this if you start with something else
hidden_line = self.get_graph(hidden_flat_line,self.function_color)
positive_sigmoid_graph = self.get_graph(positive_sigmoid,self.function_color)
negative_sigmoid_graph = self.get_graph(negative_sigmoid,self.function_color)
positive_one_over_x = self.get_graph(lambda x : 1/x, color = BLUE, x_min = 0.1, x_max = 10)
negative_one_over_x = self.get_graph(lambda x : 1/x, color = BLUE, x_min = -10, x_max = -0.1)
dt=0.2
self.play(ShowCreation(graph_object))
self.wait(dt)
for i in range(1,len(func_graphs)):#this is a bit messed up, try to make graph_object a flat line first
self.play(Transform(graph_object, func_graphs[i]),run_time = 1.5)
self.wait(dt)
self.play(ShowCreation(hidden_line),Transform(graph_object, positive_sigmoid_graph),Transform(hidden_line, negative_sigmoid_graph),run_time =1.5)
self.wait(dt)
self.play(Transform(graph_object,positive_one_over_x),Transform(hidden_line,negative_one_over_x))
self.play(FadeOut(hidden_line, run_time = 1)) #double sigmoid graph
self.play(Transform(graph_object, func_graphs[0],runtime = 1.5)) #make the animation loopr
self.wait(6)
#tk you wanted to include the indicator function for the rationals
class DoubleSigmoidNotFunction(GraphScene):
CONFIG = {
"x_min" : -10,
"x_max" : 10.3,
"y_min" : -5,
"y_max" : 5,
"graph_origin" : ORIGIN ,
"function_color" : BLUE ,
"axes_color" : GREEN,
"center_point" : 0,
}
def construct(self):
self.setup_axes(animate=False)
positivePar = lambda x: sqrt(x+10)
negativePar = lambda x: -sqrt(x+10)
negative_sigmoid_graph = self.get_graph(negativePar,self.function_color)
positive_sigmoid_graph = self.get_graph(positivePar,self.function_color)
vert_line_pos = self.get_vertical_line_to_graph(2,positive_sigmoid_graph,color=YELLOW)
vert_line_neg = self.get_vertical_line_to_graph(2,negative_sigmoid_graph,color=YELLOW) #if you want to make it nice, add a ball that'll go to the graph, will visualise the fuckedupedness of multifunctions
self.play(ShowCreation(negative_sigmoid_graph),ShowCreation(positive_sigmoid_graph))
self.wait()
self.play(ShowCreation(vert_line_pos))
self.play(ShowCreation(vert_line_neg))
self.wait(10)
class EmptyFunctionNotFunction(GraphScene):
CONFIG = {
"x_min" : -10,
"x_max" : 10.3,
"y_min" : -1.5,
"y_max" : 1.5,
"graph_origin" : ORIGIN ,
"function_color" : BLUE ,
"axes_color" : GREEN,
"center_point" : 0,
}
def construct(self):
self.setup_axes(animate=False)
hori_line_three = self.get_graph(lambda x : 0.5, color = BLUE, x_min = -10, x_max = 0)
hori_line_two = self.get_graph(lambda x : 1, color = BLUE, x_min = 2, x_max = 10)
vert_line_three = self.get_vertical_line_to_graph(-5,hori_line_three,color=YELLOW)
vert_line_two = self.get_vertical_line_to_graph(4,hori_line_two,color=YELLOW)#if you want to make it nice, add a ball that'll go to the graph, will visualise the fuckedupedness of multifunctions
vert_line_doesnt_work = self.get_vertical_line_to_graph(1,self.get_graph(lambda x: 2), color=YELLOW)
question_marks = TexMobject("???")
label_coord = self.coords_to_point(1,0)
question_marks = question_marks.next_to(label_coord,RIGHT+ UP)
self.play(ShowCreation(hori_line_three),ShowCreation(hori_line_two))
self.wait()
self.play(ShowCreation(vert_line_three),ShowCreation(vert_line_two),ShowCreation(vert_line_doesnt_work),ShowCreation(question_marks),run_time=1)
self.wait(10)
#tk do the circle one and the rescreen showing all three graphs
class CircleNotFunction(GraphScene):
CONFIG = {
"x_min" : -3.5,
"x_max" : 3.5,
"y_min" : -2.5,
"y_max" : 2.5,
"graph_origin" : ORIGIN ,
"function_color" : BLUE ,
"axes_color" : GREEN,
"center_point" : 0,
}
def construct(self):
self.setup_axes(animate=False)
positive_circ_graph = self.get_graph(lambda x : sqrt(1-x**2), color = BLUE, x_min = -1, x_max = 1)
negative_circ_graph = self.get_graph(lambda x : -sqrt(1-x**2), color = BLUE, x_min = -1, x_max = 1)
vert_line_pos_circ = self.get_vertical_line_to_graph(1/sqrt(2),positive_circ_graph,color=YELLOW)
vert_line_neg_circ = self.get_vertical_line_to_graph(1/sqrt(2),negative_circ_graph,color=YELLOW) #if you want to make it nice, add a ball that'll go to the graph, will visualise the fuckedupedness of multifunctions
vert_line_doesnt_work_circ = self.get_vertical_line_to_graph(2,self.get_graph(lambda x: 2), color=YELLOW)
question_marks = TexMobject("???")
label_coord = self.coords_to_point(2,0)
question_marks = question_marks.next_to(label_coord,RIGHT+ UP)
self.play(ShowCreation(negative_circ_graph),ShowCreation(positive_circ_graph))
self.wait()
self.play(ShowCreation(vert_line_pos_circ),ShowCreation(vert_line_neg_circ),ShowCreation(vert_line_doesnt_work_circ),ShowCreation(question_marks))
self.wait(10)
| 38.835
| 222
| 0.647869
|
a31e0ff75a1e85eebe0e7cf32a47a7659acaf671
| 984
|
py
|
Python
|
venv/lib/python3.6/site-packages/tests/test_level1/test_link.py
|
Guillaume-Fernandez/phishfinder
|
b459a30202fd5dfb1340b43c70363705de7cedd9
|
[
"MIT"
] | 2
|
2019-09-26T17:46:18.000Z
|
2022-02-26T03:07:59.000Z
|
venv/lib/python3.6/site-packages/tests/test_level1/test_link.py
|
Guillaume-Fernandez/phishfinder
|
b459a30202fd5dfb1340b43c70363705de7cedd9
|
[
"MIT"
] | 2
|
2018-08-19T17:12:26.000Z
|
2020-09-18T03:51:33.000Z
|
venv/lib/python3.6/site-packages/tests/test_level1/test_link.py
|
Guillaume-Fernandez/phishfinder
|
b459a30202fd5dfb1340b43c70363705de7cedd9
|
[
"MIT"
] | 2
|
2020-09-14T06:19:39.000Z
|
2020-12-29T10:54:43.000Z
|
"""Test link selectors."""
from __future__ import unicode_literals
from .. import util
class TestLink(util.TestCase):
"""Test link selectors."""
MARKUP = """
<div>
<p>Some text <span id="1" class="foo:bar:foobar"> in a paragraph</span>.
<a id="2" class="bar" href="http://google.com">Link</a>
<a id="3">Placeholder text.</a>
</p>
</div>
"""
def test_link(self):
"""Test link (all links are unvisited)."""
self.assert_selector(
self.MARKUP,
":link",
["2"],
flags=util.HTML
)
def test_tag_and_link(self):
"""Test link and tag (all links are unvisited)."""
self.assert_selector(
self.MARKUP,
"a:link",
[],
flags=util.XML
)
class TestLinkQuirks(TestLink):
"""Test link selectors with quirks."""
def setUp(self):
"""Setup."""
self.purge()
self.quirks = True
| 20.93617
| 76
| 0.522358
|
0ecf14589f1fc9d295e3e9ee4c4fd707eb425b28
| 877
|
py
|
Python
|
Morse Potential/Morse_analatic.py
|
aniruddha-seal/Solving-1D-Schrodinger-Eqn
|
c3fece2f7c2b043551e23878655b0f9b8dd8b604
|
[
"MIT"
] | null | null | null |
Morse Potential/Morse_analatic.py
|
aniruddha-seal/Solving-1D-Schrodinger-Eqn
|
c3fece2f7c2b043551e23878655b0f9b8dd8b604
|
[
"MIT"
] | null | null | null |
Morse Potential/Morse_analatic.py
|
aniruddha-seal/Solving-1D-Schrodinger-Eqn
|
c3fece2f7c2b043551e23878655b0f9b8dd8b604
|
[
"MIT"
] | 1
|
2022-02-19T06:05:00.000Z
|
2022-02-19T06:05:00.000Z
|
import numpy as np
from matplotlib import rc
import matplotlib.pyplot as plt
from scipy.constants import h, c
from morse import Morse, FAC
rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 14})
rc('text', usetex=True)
COLOUR1 = (0.6196, 0.0039, 0.2588, 1.0)
# Atom masses and equilibrium bond length for (1H)(35Cl).
mA, mB = 1., 35.
X_re = 1.27455e-10
X_Te = 0
X_we, X_wexe = 2990.945, 52.818595
X = Morse(mA, mB, X_we, X_wexe, X_re, X_Te)
X.make_rgrid()
X.V = X.Vmorse(X.r)
fig, ax = plt.subplots()
X.plot_V(ax, color='k')
X.draw_Elines(range(X.vmax), ax)
X.draw_Elines(X.get_vmax(), ax, linestyles='--', linewidths=1)
X.plot_psi([2, 14], ax, scaling=2, color=COLOUR1)
X.label_levels([2, 14], ax)
ax.set_xlabel(r'$r\;/\mathrm{\\A}$')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.savefig('morse-psi.png')
plt.show()
| 25.057143
| 75
| 0.677309
|
0d778af5b017e62126dd2e444cb6fb2f540bdf2f
| 3,675
|
py
|
Python
|
wagtail/contrib/settings/registry.py
|
stevedya/wagtail
|
52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/contrib/settings/registry.py
|
stevedya/wagtail
|
52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/contrib/settings/registry.py
|
stevedya/wagtail
|
52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c
|
[
"BSD-3-Clause"
] | null | null | null |
from django.apps import apps
from django.contrib.auth.models import Permission
from django.urls import reverse
from django.utils.text import capfirst
from wagtail import hooks
from wagtail.admin.admin_url_finder import (
ModelAdminURLFinder,
register_admin_url_finder,
)
from wagtail.admin.menu import MenuItem
from wagtail.permission_policies import ModelPermissionPolicy
from .permissions import user_can_edit_setting_type
class SettingMenuItem(MenuItem):
def __init__(self, model, icon="cog", classnames="", **kwargs):
# Special-case FontAwesome icons to avoid the breaking changes for those customisations.
if icon.startswith("fa-"):
icon_name = ""
icon_classes = "icon icon-" + icon
if classnames:
classnames += " " + icon_classes
else:
classnames = icon_classes
else:
icon_name = icon
self.model = model
super().__init__(
label=capfirst(model._meta.verbose_name),
url=reverse(
"wagtailsettings:edit",
args=[model._meta.app_label, model._meta.model_name],
),
classnames=classnames,
icon_name=icon_name,
**kwargs,
)
def is_shown(self, request):
return user_can_edit_setting_type(request.user, self.model)
class SettingsAdminURLFinder(ModelAdminURLFinder):
def construct_edit_url(self, instance):
return reverse(
"wagtailsettings:edit",
args=[
self.model._meta.app_label,
self.model._meta.model_name,
instance.site_id,
],
)
class Registry(list):
def register(self, model, **kwargs):
"""
Register a model as a setting, adding it to the wagtail admin menu
"""
# Don't bother registering this if it is already registered
if model in self:
return model
self.append(model)
# Register a new menu item in the settings menu
@hooks.register("register_settings_menu_item")
def menu_hook():
return SettingMenuItem(model, **kwargs)
@hooks.register("register_permissions")
def permissions_hook():
return Permission.objects.filter(
content_type__app_label=model._meta.app_label,
codename="change_{}".format(model._meta.model_name),
)
# Register an admin URL finder
permission_policy = ModelPermissionPolicy(model)
finder_class = type(
"_SettingsAdminURLFinder",
(SettingsAdminURLFinder,),
{"model": model, "permission_policy": permission_policy},
)
register_admin_url_finder(model, finder_class)
return model
def register_decorator(self, model=None, **kwargs):
"""
Register a model as a setting in the Wagtail admin
"""
if model is None:
return lambda model: self.register(model, **kwargs)
return self.register(model, **kwargs)
def get_by_natural_key(self, app_label, model_name):
"""
Get a setting model using its app_label and model_name.
If the app_label.model_name combination is not a valid model, or the
model is not registered as a setting, returns None.
"""
try:
Model = apps.get_model(app_label, model_name)
except LookupError:
return None
if Model not in registry:
return None
return Model
registry = Registry()
register_setting = registry.register_decorator
| 30.882353
| 96
| 0.620408
|
8385c211f0e4ec450e2301d1cfccb27298411fd9
| 2,766
|
py
|
Python
|
predict.py
|
amezaa/ASLMobilenet
|
231e44907f5afe5a076543909ca094e834007d6a
|
[
"Apache-2.0"
] | null | null | null |
predict.py
|
amezaa/ASLMobilenet
|
231e44907f5afe5a076543909ca094e834007d6a
|
[
"Apache-2.0"
] | null | null | null |
predict.py
|
amezaa/ASLMobilenet
|
231e44907f5afe5a076543909ca094e834007d6a
|
[
"Apache-2.0"
] | null | null | null |
import shutil
import random
import os
import numpy as np
import cv2
import argparse
import sys
import time
from scripts.label_image import *
from vid_frame import *
from mode_accuracy import *
def run(video_path, frame_dir):
segment_vid(video_path, frame_dir)
predicted_word = predict_on_vid(frame_dir, model_file, label_file, input_height, input_width, input_mean, input_std, input_layer, output_layer, graph, input_name, output_name, input_operation, output_operation, sample_size)
return predicted_word
if __name__ == "__main__":
file_name = ""
direct_name = "segmented_frames/"
model_file = "tf_files/retrained_graph.pb"
label_file = "tf_files/retrained_labels.txt"
#Uncomment 'model_file' and 'label_file' below to switch to 64 word model:
#model_file = "tf_files/retrained_graph2.pb"
#label_file = "tf_files/retrained_labels2.txt"
input_height = 224
input_width = 224
input_mean = 128
input_std = 128
input_layer = "input"
output_layer = "final_result"
input_name = "import/" + input_layer
output_name = "import/" + output_layer
graph = load_graph(model_file)
input_operation = graph.get_operation_by_name(input_name);
output_operation = graph.get_operation_by_name(output_name);
sample_size = 20 # number of images to randomly sample from the processed video
parser = argparse.ArgumentParser()
parser.add_argument("--video", help="video to be processed")
parser.add_argument("--directory", help="directory in which processed frames are saved")
parser.add_argument("--graph", help="graph/model to be executed")
parser.add_argument("--labels", help="name of file containing labels")
parser.add_argument("--input_height", type=int, help="input height")
parser.add_argument("--input_width", type=int, help="input width")
parser.add_argument("--input_mean", type=int, help="input mean")
parser.add_argument("--input_std", type=int, help="input std")
parser.add_argument("--input_layer", help="name of input layer")
parser.add_argument("--output_layer", help="name of output layer")
args = parser.parse_args()
if args.graph:
model_file = args.graph
if args.video:
file_name = args.video
if args.directory:
direct_name = args.directory
if args.labels:
label_file = args.labels
if args.input_height:
input_height = args.input_height
if args.input_width:
input_width = args.input_width
if args.input_mean:
input_mean = args.input_mean
if args.input_std:
input_std = args.input_std
if args.input_layer:
input_layer = args.input_layer
if args.output_layer:
output_layer = args.output_layer
predicted_word = run(file_name, direct_name)
#remove segmented frames from directory in case you're
#running multiple videos sequentially
shutil.rmtree(direct_name)
os.mkdir(direct_name)
| 29.115789
| 224
| 0.769342
|
cf4fc654465689cf15808e16aa64b5179e114f11
| 383
|
py
|
Python
|
pizza_home/migrations/0002_alter_pizza_image.py
|
jayaemekar/CalPizzaDeliverySystem
|
9e7f1f378b42b410d358c28359a9915e30bb270d
|
[
"MIT"
] | null | null | null |
pizza_home/migrations/0002_alter_pizza_image.py
|
jayaemekar/CalPizzaDeliverySystem
|
9e7f1f378b42b410d358c28359a9915e30bb270d
|
[
"MIT"
] | null | null | null |
pizza_home/migrations/0002_alter_pizza_image.py
|
jayaemekar/CalPizzaDeliverySystem
|
9e7f1f378b42b410d358c28359a9915e30bb270d
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-12-12 21:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pizza_home', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='pizza',
name='image',
field=models.ImageField(upload_to='uploads'),
),
]
| 20.157895
| 57
| 0.590078
|
5bcd974fc9b4c8306e01ca5f9a8c770aec90f9fb
| 3,282
|
py
|
Python
|
ophiuchus/experiments/lyapunovgrid.py
|
adrn/ophiuchus
|
fe7e937bf421d506ec252165f044d514f571667b
|
[
"MIT"
] | 1
|
2015-09-25T10:12:52.000Z
|
2015-09-25T10:12:52.000Z
|
ophiuchus/experiments/lyapunovgrid.py
|
adrn/ophiuchus
|
fe7e937bf421d506ec252165f044d514f571667b
|
[
"MIT"
] | null | null | null |
ophiuchus/experiments/lyapunovgrid.py
|
adrn/ophiuchus
|
fe7e937bf421d506ec252165f044d514f571667b
|
[
"MIT"
] | null | null | null |
# coding: utf-8
""" Class for computing Lyapunov exponents for the orbit grid """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os
# Third-party
from astropy import log as logger
import numpy as np
import gala.integrate as gi
import gala.dynamics as gd
from gala.units import galactic
# Project
from ..mockstream import ophiuchus_stream
from .core import GridExperiment
from .. import potential as op
__all__ = ['MockStreamGrid']
class LyapunovGrid(GridExperiment):
# failure error codes
error_codes = {
1: "Failed to compute lyapunov exponent",
2: "Unexpected failure."
}
required_kwargs = ['potential_name', 'nperiods', 'nsteps_per_period', 'noffset_orbits']
config_defaults = {
'noffset_orbits': 2, # Number of offset orbits to integrate and average.
'cache_filename': 'lyapunovgrid.npy'
}
cache_dtype = [
('dt','f8'),
('mle_avg','f8'),
('mle_end','f8'),
('success','b1'),
('error_code','i8'), # if not successful, why did it fail? see above
]
@property
def grid(self):
if not hasattr(self, '_grid'):
path = os.path.abspath(os.path.join(self.cache_path, "..", "orbitfit", "w0.npy"))
self._grid = np.load(path)
return self._grid
def run(self, index):
# return dict
result = dict()
nsteps_per_period = self.config.nsteps_per_period
nperiods = self.config.nperiods
noffset = self.config.noffset_orbits
# read potential, initial conditions
potential = op.load_potential(self.config.potential_name)
w0 = self.grid[index]
# I guess this could be determined automatically...but whatever
T = 200. # The orbits have periods ~200 Myr
# timestep and number of steps
dt = T / nsteps_per_period
nsteps = int(nperiods * nsteps_per_period) # 16384 orbital periods
try:
lyap = gd.fast_lyapunov_max(np.ascontiguousarray(w0), potential,
dt=dt, nsteps=nsteps,
noffset_orbits=noffset,
return_orbit=False)
except RuntimeError:
logger.warning("Failed to compute lyapunov exponent")
result['mle_avg'] = np.nan
result['mle_end'] = np.nan
result['success'] = False
result['error_code'] = 1
return result
except KeyboardInterrupt:
raise
except BaseException as e:
logger.warning("Unexpected failure: {}".format(str(e)))
result['mle_avg'] = np.nan
result['mle_end'] = np.nan
result['success'] = False
result['error_code'] = 2
return result
# estimate the FTMLE
lyap = np.mean(lyap, axis=1)
ix = max(1,nsteps_per_period*nperiods//64)
FTMLE = np.mean(lyap[-ix:])
result['dt'] = dt
result['mle_avg'] = FTMLE.decompose(galactic).value
result['mle_end'] = lyap[-1].decompose(galactic).value
result['success'] = True
result['error_code'] = 0
return result
| 30.110092
| 93
| 0.593845
|
02e46643fbf58bbdf05b84a61877495cc6cb9a39
| 1,873
|
py
|
Python
|
python/buffer_test.py
|
alphaliang/pcammls
|
0ae59a9b3e0d73ea85e6d03b43721f79239be089
|
[
"Unlicense"
] | 6
|
2020-08-07T07:02:20.000Z
|
2021-03-15T10:38:22.000Z
|
python/buffer_test.py
|
alphaliang/pcammls
|
0ae59a9b3e0d73ea85e6d03b43721f79239be089
|
[
"Unlicense"
] | null | null | null |
python/buffer_test.py
|
alphaliang/pcammls
|
0ae59a9b3e0d73ea85e6d03b43721f79239be089
|
[
"Unlicense"
] | 2
|
2020-06-15T09:31:02.000Z
|
2021-05-06T05:31:00.000Z
|
import pcammls as sdk
import numpy as np
def test_array_equal(nparr, carr,sz):
print('data is {}'.format(nparr))
res = []
arr = np.reshape(nparr,(sz))
for idx in range(sz):
res.append(carr[idx])
for idx in range(sz):
if res[idx]!=arr[idx]:
print ('not equal at {}'.format(idx))
def test_numpy_2_c_buffer():
''' convert numpy array to c array buffer '''
print('-'*10)
print('test_numpy_2_c_buffer')
#2d array test
data = [list(range(10))]*2
arr = np.array(data,np.uint16)
carr = sdk.uint16_t_ARRAY.from_nparray(arr)
test_array_equal(arr,carr,20)
#3d numpy array
res=[]
data = [[[1,2]]*5]*2
arr = np.array(data,np.uint8)
carr = sdk.uint8_t_ARRAY.from_nparray(arr)
test_array_equal(arr,carr,20)
print('test done')
def test_c_buffer_2_numpy():
''' convert c array buffer to numpy array '''
print('-'*10)
print('test_c_buffer_2_numpy')
carr = sdk.uint8_t_ARRAY(20)
for x in range(20):
carr[x]=x
nparr = carr.as_nparray2d(2,10)
test_array_equal(nparr,carr,20)
print('test done')
def test_vect_3f():
print('-'*10)
print('test_vect_3f')
data = []
for k in range(10):
data.append([k,k+1,k+2])
arr = np.array(data,np.float)
carr = sdk.TY_VECT_3F_ARRAY.from_nparray(arr)
arr_out = carr.as_nparray(10)
for k in range(10):
for s in range(3):
if (arr[k][s]!=arr_out[k][s]) : print('{} not equal'.format(k))
for k in range(10):
if (arr[k][0]!=carr[k].x) : kprint('{} not equal'.format(k))
if (arr[k][1]!=carr[k].y) : kprint('{} not equal'.format(k))
if (arr[k][2]!=carr[k].z) : kprint('{} not equal'.format(k))
print('test done')
def main():
test_numpy_2_c_buffer()
test_c_buffer_2_numpy()
test_vect_3f()
main()
| 26.380282
| 76
| 0.590497
|
2864200c86ca44cda955ea9233ec6ba3a00ccab2
| 6,073
|
py
|
Python
|
esphomeyaml/components/sensor/bme680.py
|
sherbang/esphomeyaml
|
eb7db04d08fd8c1e6fd81934c6ab89959b5678ed
|
[
"MIT"
] | 2
|
2020-12-03T17:38:24.000Z
|
2021-03-10T04:11:44.000Z
|
esphomeyaml/components/sensor/bme680.py
|
dreed47/esphomeyaml
|
9890a51c5136c356c2cc7f14a3ab8fe547bbb72a
|
[
"MIT"
] | null | null | null |
esphomeyaml/components/sensor/bme680.py
|
dreed47/esphomeyaml
|
9890a51c5136c356c2cc7f14a3ab8fe547bbb72a
|
[
"MIT"
] | null | null | null |
import voluptuous as vol
import esphomeyaml.config_validation as cv
from esphomeyaml import core
from esphomeyaml.components import sensor
from esphomeyaml.const import CONF_ADDRESS, CONF_GAS_RESISTANCE, CONF_HUMIDITY, CONF_IIR_FILTER, \
CONF_MAKE_ID, CONF_NAME, CONF_OVERSAMPLING, CONF_PRESSURE, CONF_TEMPERATURE, \
CONF_UPDATE_INTERVAL, CONF_HEATER, CONF_DURATION
from esphomeyaml.cpp_generator import variable, add
from esphomeyaml.cpp_helpers import setup_component
from esphomeyaml.cpp_types import Application, App
DEPENDENCIES = ['i2c']
BME680Oversampling = sensor.sensor_ns.enum('BME680Oversampling')
OVERSAMPLING_OPTIONS = {
'NONE': BME680Oversampling.BME680_OVERSAMPLING_NONE,
'1X': BME680Oversampling.BME680_OVERSAMPLING_1X,
'2X': BME680Oversampling.BME680_OVERSAMPLING_2X,
'4X': BME680Oversampling.BME680_OVERSAMPLING_4X,
'8X': BME680Oversampling.BME680_OVERSAMPLING_8X,
'16X': BME680Oversampling.BME680_OVERSAMPLING_16X,
}
BME680IIRFilter = sensor.sensor_ns.enum('BME680IIRFilter')
IIR_FILTER_OPTIONS = {
'OFF': BME680IIRFilter.BME680_IIR_FILTER_OFF,
'1X': BME680IIRFilter.BME680_IIR_FILTER_1X,
'3X': BME680IIRFilter.BME680_IIR_FILTER_3X,
'7X': BME680IIRFilter.BME680_IIR_FILTER_7X,
'15X': BME680IIRFilter.BME680_IIR_FILTER_15X,
'31X': BME680IIRFilter.BME680_IIR_FILTER_31X,
'63X': BME680IIRFilter.BME680_IIR_FILTER_63X,
'127X': BME680IIRFilter.BME680_IIR_FILTER_127X,
}
BME680_OVERSAMPLING_SENSOR_SCHEMA = sensor.SENSOR_SCHEMA.extend({
vol.Optional(CONF_OVERSAMPLING): cv.one_of(*OVERSAMPLING_OPTIONS, upper=True),
})
MakeBME680Sensor = Application.struct('MakeBME680Sensor')
BME680TemperatureSensor = sensor.sensor_ns.class_('BME680TemperatureSensor',
sensor.EmptyPollingParentSensor)
BME680PressureSensor = sensor.sensor_ns.class_('BME680PressureSensor',
sensor.EmptyPollingParentSensor)
BME680HumiditySensor = sensor.sensor_ns.class_('BME680HumiditySensor',
sensor.EmptyPollingParentSensor)
BME680GasResistanceSensor = sensor.sensor_ns.class_('BME680GasResistanceSensor',
sensor.EmptyPollingParentSensor)
PLATFORM_SCHEMA = sensor.PLATFORM_SCHEMA.extend({
cv.GenerateID(CONF_MAKE_ID): cv.declare_variable_id(MakeBME680Sensor),
vol.Optional(CONF_ADDRESS, default=0x76): cv.i2c_address,
vol.Required(CONF_TEMPERATURE): cv.nameable(BME680_OVERSAMPLING_SENSOR_SCHEMA.extend({
cv.GenerateID(): cv.declare_variable_id(BME680TemperatureSensor),
})),
vol.Required(CONF_PRESSURE): cv.nameable(BME680_OVERSAMPLING_SENSOR_SCHEMA.extend({
cv.GenerateID(): cv.declare_variable_id(BME680PressureSensor),
})),
vol.Required(CONF_HUMIDITY): cv.nameable(BME680_OVERSAMPLING_SENSOR_SCHEMA.extend({
cv.GenerateID(): cv.declare_variable_id(BME680HumiditySensor),
})),
vol.Required(CONF_GAS_RESISTANCE): cv.nameable(sensor.SENSOR_SCHEMA.extend({
cv.GenerateID(): cv.declare_variable_id(BME680GasResistanceSensor),
})),
vol.Optional(CONF_IIR_FILTER): cv.one_of(*IIR_FILTER_OPTIONS, upper=True),
vol.Optional(CONF_HEATER): vol.Any(None, vol.All(vol.Schema({
vol.Optional(CONF_TEMPERATURE, default=320): vol.All(vol.Coerce(int), vol.Range(200, 400)),
vol.Optional(CONF_DURATION, default='150ms'): vol.All(
cv.positive_time_period_milliseconds, vol.Range(max=core.TimePeriod(milliseconds=4032)))
}, cv.has_at_least_one_key(CONF_TEMPERATURE, CONF_DURATION)))),
vol.Optional(CONF_UPDATE_INTERVAL): cv.update_interval,
}).extend(cv.COMPONENT_SCHEMA.schema)
def to_code(config):
rhs = App.make_bme680_sensor(config[CONF_TEMPERATURE][CONF_NAME],
config[CONF_PRESSURE][CONF_NAME],
config[CONF_HUMIDITY][CONF_NAME],
config[CONF_GAS_RESISTANCE][CONF_NAME],
config[CONF_ADDRESS],
config.get(CONF_UPDATE_INTERVAL))
make = variable(config[CONF_MAKE_ID], rhs)
bme680 = make.Pbme680
if CONF_OVERSAMPLING in config[CONF_TEMPERATURE]:
constant = OVERSAMPLING_OPTIONS[config[CONF_TEMPERATURE][CONF_OVERSAMPLING]]
add(bme680.set_temperature_oversampling(constant))
if CONF_OVERSAMPLING in config[CONF_PRESSURE]:
constant = OVERSAMPLING_OPTIONS[config[CONF_PRESSURE][CONF_OVERSAMPLING]]
add(bme680.set_pressure_oversampling(constant))
if CONF_OVERSAMPLING in config[CONF_HUMIDITY]:
constant = OVERSAMPLING_OPTIONS[config[CONF_HUMIDITY][CONF_OVERSAMPLING]]
add(bme680.set_humidity_oversampling(constant))
if CONF_IIR_FILTER in config:
constant = IIR_FILTER_OPTIONS[config[CONF_IIR_FILTER]]
add(bme680.set_iir_filter(constant))
if CONF_HEATER in config:
conf = config[CONF_HEATER]
if not conf:
add(bme680.set_heater(0, 0))
else:
add(bme680.set_heater(conf[CONF_TEMPERATURE], conf[CONF_DURATION]))
sensor.setup_sensor(bme680.Pget_temperature_sensor(), make.Pmqtt_temperature,
config[CONF_TEMPERATURE])
sensor.setup_sensor(bme680.Pget_pressure_sensor(), make.Pmqtt_pressure,
config[CONF_PRESSURE])
sensor.setup_sensor(bme680.Pget_humidity_sensor(), make.Pmqtt_humidity,
config[CONF_HUMIDITY])
sensor.setup_sensor(bme680.Pget_gas_resistance_sensor(), make.Pmqtt_gas_resistance,
config[CONF_GAS_RESISTANCE])
setup_component(bme680, config)
BUILD_FLAGS = '-DUSE_BME680'
def to_hass_config(data, config):
return [sensor.core_to_hass_config(data, config[CONF_TEMPERATURE]),
sensor.core_to_hass_config(data, config[CONF_PRESSURE]),
sensor.core_to_hass_config(data, config[CONF_HUMIDITY]),
sensor.core_to_hass_config(data, config[CONF_GAS_RESISTANCE])]
| 49.373984
| 100
| 0.721719
|
8913fe66f74564fbe1b9e779e09c5ab9f43d023f
| 134
|
py
|
Python
|
newsfeedsystem/angular/urls.py
|
bakowroc/newsfeed-system
|
6a7363700416e331fcb51c1118a90c0b3d959673
|
[
"MIT"
] | null | null | null |
newsfeedsystem/angular/urls.py
|
bakowroc/newsfeed-system
|
6a7363700416e331fcb51c1118a90c0b3d959673
|
[
"MIT"
] | null | null | null |
newsfeedsystem/angular/urls.py
|
bakowroc/newsfeed-system
|
6a7363700416e331fcb51c1118a90c0b3d959673
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from angular import views
urlpatterns = [
url(r'^', views.HomePageView.as_view(), name ='/')
]
| 16.75
| 54
| 0.686567
|
49b5da7e3608c63d4fffbf06e8e92941c9f3d4cf
| 1,977
|
py
|
Python
|
Examples/python_server/openapi_server/com/h21lab/TS29510_Nnrf_NFDiscovery/handler/preferred_search.py
|
H21lab/5GC_build
|
bbc8bbd9158a2124ca486572906020a91789733e
|
[
"Apache-2.0"
] | 12
|
2020-06-04T06:58:13.000Z
|
2021-12-15T14:26:56.000Z
|
Examples/python_server/openapi_server/com/h21lab/TS29510_Nnrf_NFDiscovery/handler/preferred_search.py
|
H21lab/5GC_build
|
bbc8bbd9158a2124ca486572906020a91789733e
|
[
"Apache-2.0"
] | 3
|
2021-05-27T12:13:58.000Z
|
2021-12-15T05:45:44.000Z
|
Examples/python_server/openapi_server/com/h21lab/TS29510_Nnrf_NFDiscovery/handler/preferred_search.py
|
H21lab/5GC_build
|
bbc8bbd9158a2124ca486572906020a91789733e
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.com.h21lab.TS29510_Nnrf_NFDiscovery.handler.base_model_ import Model
from openapi_server import util
class PreferredSearch(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, preferred_tai_match_ind=False): # noqa: E501
"""PreferredSearch - a model defined in OpenAPI
:param preferred_tai_match_ind: The preferred_tai_match_ind of this PreferredSearch. # noqa: E501
:type preferred_tai_match_ind: bool
"""
self.openapi_types = {
'preferred_tai_match_ind': bool
}
self.attribute_map = {
'preferred_tai_match_ind': 'preferredTaiMatchInd'
}
self._preferred_tai_match_ind = preferred_tai_match_ind
@classmethod
def from_dict(cls, dikt) -> 'PreferredSearch':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The PreferredSearch of this PreferredSearch. # noqa: E501
:rtype: PreferredSearch
"""
return util.deserialize_model(dikt, cls)
@property
def preferred_tai_match_ind(self):
"""Gets the preferred_tai_match_ind of this PreferredSearch.
:return: The preferred_tai_match_ind of this PreferredSearch.
:rtype: bool
"""
return self._preferred_tai_match_ind
@preferred_tai_match_ind.setter
def preferred_tai_match_ind(self, preferred_tai_match_ind):
"""Sets the preferred_tai_match_ind of this PreferredSearch.
:param preferred_tai_match_ind: The preferred_tai_match_ind of this PreferredSearch.
:type preferred_tai_match_ind: bool
"""
self._preferred_tai_match_ind = preferred_tai_match_ind
| 30.415385
| 106
| 0.693981
|
61ed8d811683ff309a84dfbadf848a4d35de9f64
| 5,289
|
py
|
Python
|
external/BB2SegNet/refinement_net/datasets/DAVIS/COCO_for_DAVIS.py
|
zhuhu00/MOTSFusion_modify
|
190224a7c3fbded69fedf9645a0ebbf08227fb6d
|
[
"MIT"
] | 154
|
2019-07-25T02:27:39.000Z
|
2022-02-18T19:40:43.000Z
|
external/BB2SegNet/refinement_net/datasets/DAVIS/COCO_for_DAVIS.py
|
zhuhu00/MOTSFusion_modify
|
190224a7c3fbded69fedf9645a0ebbf08227fb6d
|
[
"MIT"
] | 4
|
2019-11-12T00:38:54.000Z
|
2021-08-14T08:40:12.000Z
|
external/BB2SegNet/refinement_net/datasets/DAVIS/COCO_for_DAVIS.py
|
zhuhu00/MOTSFusion_modify
|
190224a7c3fbded69fedf9645a0ebbf08227fb6d
|
[
"MIT"
] | 25
|
2019-09-17T08:49:57.000Z
|
2022-03-21T20:11:57.000Z
|
import tensorflow as tf
from PIL import Image
import numpy as np
from random import shuffle
from external.BB2SegNet.refinement_net.datasets.Loader import register_dataset
from external.BB2SegNet.refinement_net.datasets.Mapillary.MapillaryLike_instance import MapillaryLikeInstanceDataset
from external.BB2SegNet.refinement_net.datasets.util.Util import username
DEFAULT_PATH = "/fastwork/" + username() + "/mywork/data/coco/train2014/"
LIST_PATH_ROOT = "/home/luiten/vision/youtubevos/refinement_net/"
NAME = "COCO_for_davis"
@register_dataset(NAME)
class DAVISLucidDataset(MapillaryLikeInstanceDataset):
def __init__(self, config, subset):
davis_sequence = config.string("model", '')
# data_list_path = LIST_PATH_ROOT + davis_sequence + '/'
self.data_dir = config.string('data_dir',DEFAULT_PATH)
annotation_file = "/fastwork/" + username() + "/mywork/data/coco/annotations/instances_train2014.json"
self.build_filename_to_coco_anns_dict(annotation_file)
super().__init__(config, subset, NAME, self.data_dir, "", 100, cat_ids_to_use=None)
# Two things:
# # 1.) Load a dict to be used later
# # 2.) Load in the annotations live as needed
###################################################################################################
# # 1.)
###################################################################################################
def read_inputfile_lists(self):
imgs_ans = []
for f,anns in self.filename_to_coco_anns.items():
for id,ann in enumerate(anns):
id_ = str(id)
if ann['area'] < self.min_size:
continue
imgs_ans.append((f, id_))
shuffle(imgs_ans)
imgs = [x[0] for x in imgs_ans]
ans = [x[1] for x in imgs_ans]
return imgs, ans
def build_filename_to_coco_anns_dict(self, annotation_file):
import pycocotools.coco as coco
self.coco = coco.COCO(annotation_file)
ann_ids = self.coco.getAnnIds([])
all_anns = self.coco.loadAnns(ann_ids)
imgs = self.coco.loadImgs(self.coco.getImgIds())
self.filename_to_coco_anns = {self.data_dir + img['file_name']: [] for img in imgs}
self.filename_to_img_ids = {self.data_dir + img['file_name']: img['id'] for img in imgs}
# load all annotations for images
for ann in all_anns:
img_id = ann['image_id']
img = self.coco.loadImgs(img_id)
file_name = self.data_dir + img[0]['file_name']
self.filename_to_coco_anns[file_name].append(ann)
# Remove crowd anns
self.filename_to_coco_anns = {f: [ann for ann in anns if not ann["iscrowd"]]
for f, anns in self.filename_to_coco_anns.items()}
# filter out images without annotations
self.filename_to_coco_anns = {f: anns for f, anns in self.filename_to_coco_anns.items() if len(anns) > 0}
###################################################################################################
# # 2.)
###################################################################################################
def segmentation_to_mask(self,polys, height, width):
import pycocotools.mask as cocomask
polys = [p.flatten().tolist() for p in polys]
rles = cocomask.frPyObjects(polys, height, width)
rle = cocomask.merge(rles)
return cocomask.decode(rle)
def load_ann(img,img_filename,annotation_filename):
img_filename = img_filename.decode('utf-8')
anns_for_img = self.filename_to_coco_anns[img_filename.split("/")[-1]]
ann_id = int(annotation_filename.decode('utf-8'))
ann = anns_for_img[ann_id]
img_h, img_w = img.shape[:-1]
if ann['area'] > 1 and isinstance(ann['segmentation'], list):
segs = ann['segmentation']
valid_segs = [np.asarray(p).reshape(-1, 2) for p in segs if len(p) >= 6]
if len(valid_segs) < len(segs):
print("Image {} has invalid polygons!".format(img_filename))
output_ann = np.asarray(self.segmentation_to_mask(valid_segs, img_h, img_w), dtype='uint8')[
..., np.newaxis] # Should be 1s and 0s
else:
output_ann = np.zeros((img_h, img_w, 1), dtype="uint8")
return output_ann
def load_annotation(self, img, img_filename, annotation_filename):
def load_ann(img, img_filename, annotation_filename):
img_filename = img_filename.decode('utf-8')
anns_for_img = self.filename_to_coco_anns[img_filename]
ann_id = int(annotation_filename.decode('utf-8'))
ann = anns_for_img[ann_id]
img_h = img.shape[0]
img_w = img.shape[1]
if ann['area'] > 1 and isinstance(ann['segmentation'], list):
segs = ann['segmentation']
valid_segs = [np.asarray(p).reshape(-1, 2) for p in segs if len(p) >= 6]
if len(valid_segs) < len(segs):
print("Image {} has invalid polygons!".format(img_filename))
output_ann = np.asarray(self.segmentation_to_mask(valid_segs, img_h, img_w), dtype='uint8')[..., np.newaxis] # Should be 1s and 0s
else:
output_ann = np.zeros((img_h, img_w, 1), dtype="uint8")
return output_ann
ann, = tf.py_func(load_ann, [img,img_filename,annotation_filename], [tf.uint8])
# print(ann)
# ann = tf.Print(ann,[ann,])
# ann = ann[0]
ann.set_shape(img.get_shape().as_list()[:-1] + [1])
ann = tf.cast(ann, tf.uint8)
return ann
| 41.97619
| 139
| 0.630176
|
7f681f5ec7a5e274a77b938cce31fd3e025b8077
| 2,474
|
py
|
Python
|
test/test_api_fact_ref.py
|
abcdcamey/stock-data
|
bfdc67e60b7d4de59c66dbb52159574b4e0a5e51
|
[
"MIT"
] | 1
|
2019-04-10T09:07:59.000Z
|
2019-04-10T09:07:59.000Z
|
test/test_api_fact_ref.py
|
abcdcamey/stock-data
|
bfdc67e60b7d4de59c66dbb52159574b4e0a5e51
|
[
"MIT"
] | 2
|
2021-06-01T23:39:34.000Z
|
2021-12-13T19:58:31.000Z
|
test/test_api_fact_ref.py
|
abcdcamey/stock-data
|
bfdc67e60b7d4de59c66dbb52159574b4e0a5e51
|
[
"MIT"
] | null | null | null |
from api.fact.Fact_Market_Ref_Data import Fact_Market_Ref_Data
from api.common.Saver import Saver
fmrd = Fact_Market_Ref_Data()
#data = fmrd.get_fact_moneyflow_hsgt(_trade_date='20190508')
#Saver.save_to_mysql(data,'fact_moneyflow_hsgt')
#data = fmrd.get_fact_hsgt_top10(_trade_date='20190508')
#Saver.save_to_mysql(data,'fact_hsgt_top10')
#data = fmrd.get_fact_ggt_top10(_trade_date='20190508')
#Saver.save_to_mysql(data, 'fact_ggt_top10')
#data = fmrd.get_fact_margin(_trade_date='20190508')
#Saver.save_to_mysql(data, 'fact_margin')
#data = fmrd.get_fact_margin_detail(_trade_date='20190508')
#Saver.save_to_mysql(data, 'fact_margin_detail')
# 每次100条,每支股3年取一次
#data = fmrd.get_fact_top10_holders(_ts_code='000002.SZ',_start_date='20000101',_end_date='20041231')
#Saver.save_to_mysql(data, 'fact_stock_top10_holders')
# 每次100条,每支股3年取一次
#data = fmrd.get_fact_top10_floatholders(_ts_code='000002.SZ',_start_date='20000101',_end_date='20041231')
#Saver.save_to_mysql(data, 'fact_stock_top10_floatholders')
#data = fmrd.get_fact_stock_daily_top_list(_trade_date='20190508')
#Saver.save_to_mysql(data, 'fact_stock_daily_top_list')
#data = fmrd.get_fact_stock_daily_top_inst(_trade_date='20190508')
#Saver.save_to_mysql(data, 'fact_stock_daily_top_inst')
#data = fmrd.get_fact_stock_pledge_stat(_ts_code='000002.SZ')
#Saver.save_to_mysql(data, 'fact_stock_pledge_stat')
# 遍历股票
#data = fmrd.get_fact_stock_pledge_detail(_ts_code='000002.SZ')
#Saver.save_to_mysql(data, 'fact_stock_pledge_detail')
#data = fmrd.get_fact_stock_repurchase(_ann_date='20190508')
#Saver.save_to_mysql(data, 'fact_stock_repurchase')
#不用跑历史数据,每天更新即可
#data = fmrd.get_fact_stock_concept()
#Saver.save_to_mysql(data, 'fact_stock_concept')
#data = fmrd.get_fact_stock_concept_detail(_ts_code='600848.SH')
#Saver.save_to_mysql(data, table_name='fact_stock_concept_detail',column_dict= {'id':'concept_code'})
#data = fmrd.get_fact_stock_share_float(_ann_date='20190508')
#Saver.save_to_mysql(data, table_name='fact_stock_share_float')
#data = fmrd.get_fact_stock_block_trade(_trade_date='20190510')
#Saver.save_to_mysql(data, table_name='fact_stock_block_trade')
# 目前只到2019年2月22号的数据
data = fmrd.get_fact_stock_stk_account(_date='20190222')
Saver.save_to_mysql(data, table_name='fact_stock_stk_account')
#用_ts_code跑历史数据,之后加_ann_date跑每天数据
#data = fmrd.get_fact_stock_stk_holdernumber(_ts_code='600848.SH',)
#Saver.save_to_mysql(data, table_name='fact_stock_stk_holdernumber')
| 30.925
| 106
| 0.812854
|
71b9e29e1c8c138703b10a7068b9030a88e4bfd6
| 929
|
py
|
Python
|
question_bank/island-perimeter/island-perimeter.py
|
yatengLG/leetcode-python
|
5d48aecb578c86d69835368fad3d9cc21961c226
|
[
"Apache-2.0"
] | 9
|
2020-08-12T10:01:00.000Z
|
2022-01-05T04:37:48.000Z
|
question_bank/island-perimeter/island-perimeter.py
|
yatengLG/leetcode-python
|
5d48aecb578c86d69835368fad3d9cc21961c226
|
[
"Apache-2.0"
] | 1
|
2021-02-16T10:19:31.000Z
|
2021-02-16T10:19:31.000Z
|
question_bank/island-perimeter/island-perimeter.py
|
yatengLG/leetcode-python
|
5d48aecb578c86d69835368fad3d9cc21961c226
|
[
"Apache-2.0"
] | 4
|
2020-08-12T10:13:31.000Z
|
2021-11-05T01:26:58.000Z
|
# -*- coding: utf-8 -*-
# @Author : LG
"""
执行用时:152 ms, 在所有 Python3 提交中击败了68.28% 的用户
内存消耗:13.7 MB, 在所有 Python3 提交中击败了58.86% 的用户
解题思路:
每存在一个相邻岛屿,则边各少一条。
具体实现见代码注释
"""
class Solution:
def islandPerimeter(self, grid: List[List[int]]) -> int:
num = 0 # 记录岛屿数
neighbor = 0 # 记录相邻岛屿数
m, n = len(grid), len(grid[0]) #
for i in range(m):
for j in range(n):
if grid[i][j] == 1: # 找到岛屿
num += 1 # 岛屿数+1
if 0 <= i-1 and grid[i-1][j] == 1: # 查看岛屿四周,找相邻岛屿
neighbor += 1 # 存在相邻岛屿,相邻岛屿数+1
if i+1 < m and grid[i+1][j] == 1:
neighbor += 1
if 0 <= j-1 and grid[i][j-1] == 1:
neighbor += 1
if j+1 < n and grid[i][j+1] == 1:
neighbor += 1
return num*4-neighbor
| 32.034483
| 70
| 0.420883
|
45d1cb1f05cdf1d0521061a13431ed8a6facc4d0
| 3,027
|
py
|
Python
|
unit_tests/test_utils.py
|
oprinmarius/charm-cinder-ceph
|
0be26a2684737bb0683e00b282b26d9d5af279b4
|
[
"ECL-2.0",
"Apache-2.0"
] | 16
|
2016-04-17T04:00:44.000Z
|
2020-04-03T08:12:36.000Z
|
unit_tests/test_utils.py
|
oprinmarius/charm-cinder-ceph
|
0be26a2684737bb0683e00b282b26d9d5af279b4
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
unit_tests/test_utils.py
|
oprinmarius/charm-cinder-ceph
|
0be26a2684737bb0683e00b282b26d9d5af279b4
|
[
"ECL-2.0",
"Apache-2.0"
] | 8
|
2016-11-01T19:24:23.000Z
|
2020-11-02T17:36:14.000Z
|
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
import os
import yaml
from unittest.mock import patch
def load_config():
'''
Walk backwords from __file__ looking for config.yaml, load and return the
'options' section'
'''
config = None
f = __file__
while config is None:
d = os.path.dirname(f)
if os.path.isfile(os.path.join(d, 'config.yaml')):
config = os.path.join(d, 'config.yaml')
break
f = d
if not config:
logging.error('Could not find config.yaml in any parent directory '
'of %s. ' % __file__)
raise Exception
return yaml.safe_load(open(config).read())['options']
def get_default_config():
'''
Load default charm config from config.yaml return as a dict.
If no default is set in config.yaml, its value is None.
'''
default_config = {}
config = load_config()
for k, v in config.items():
if 'default' in v:
default_config[k] = v['default']
else:
default_config[k] = None
return default_config
class CharmTestCase(unittest.TestCase):
def setUp(self, obj, patches):
super(CharmTestCase, self).setUp()
self.patches = patches
self.obj = obj
self.test_config = TestConfig()
self.test_relation = TestRelation()
self.patch_all()
def patch(self, method):
_m = patch.object(self.obj, method)
mock = _m.start()
self.addCleanup(_m.stop)
return mock
def patch_all(self):
for method in self.patches:
setattr(self, method, self.patch(method))
class TestConfig(object):
def __init__(self):
self.config = get_default_config()
def get(self, attr):
try:
return self.config[attr]
except KeyError:
return None
def get_all(self):
return self.config
def set(self, attr, value):
if attr not in self.config:
raise KeyError
self.config[attr] = value
class TestRelation(object):
def __init__(self, relation_data={}):
self.relation_data = relation_data
def set(self, relation_data):
self.relation_data = relation_data
def get(self, attr=None, unit=None, rid=None):
if attr is None:
return self.relation_data
elif attr in self.relation_data:
return self.relation_data[attr]
return None
| 26.321739
| 77
| 0.6333
|
48f39cfd43d1550a0a36789868a18828fae90df8
| 5,155
|
py
|
Python
|
bigml/api_handlers/batchtopicdistributionhandler.py
|
axellos162/python-4
|
569f6655871ea60b41e9442396433567b9daaea7
|
[
"Apache-2.0"
] | 137
|
2015-01-12T06:04:10.000Z
|
2022-03-06T21:00:04.000Z
|
bigml/api_handlers/batchtopicdistributionhandler.py
|
axellos162/python-4
|
569f6655871ea60b41e9442396433567b9daaea7
|
[
"Apache-2.0"
] | 78
|
2015-01-13T18:28:51.000Z
|
2022-03-04T19:18:28.000Z
|
bigml/api_handlers/batchtopicdistributionhandler.py
|
axellos162/python-4
|
569f6655871ea60b41e9442396433567b9daaea7
|
[
"Apache-2.0"
] | 144
|
2015-01-16T06:13:33.000Z
|
2022-03-29T17:53:16.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2016-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for batch topic distributions' REST calls
https://bigml.com/developers/batchtopicdistributions
"""
try:
import simplejson as json
except ImportError:
import json
from bigml.api_handlers.resourcehandler import ResourceHandlerMixin
from bigml.api_handlers.resourcehandler import check_resource_type
from bigml.constants import BATCH_TOPIC_DISTRIBUTION_PATH, TOPIC_MODEL_PATH
class BatchTopicDistributionHandlerMixin(ResourceHandlerMixin):
"""This class is used by the BigML class as
a mixin that provides the REST calls models. It should not
be instantiated independently.
"""
def __init__(self):
"""Initializes the BatchTopidDistributionHandler. This class is
intended to be used as a mixin on ResourceHandler, that inherits its
attributes and basic method from BigMLConnection, and must not be
instantiated independently.
"""
self.batch_topic_distribution_url = self.prediction_base_url + \
BATCH_TOPIC_DISTRIBUTION_PATH
def create_batch_topic_distribution(self, topic_model, dataset,
args=None, wait_time=3, retries=10):
"""Creates a new batch topic distribution.
"""
create_args = {}
if args is not None:
create_args.update(args)
origin_resources_checked = self.check_origins(
dataset, topic_model, create_args, model_types=[TOPIC_MODEL_PATH],
wait_time=wait_time, retries=retries)
if origin_resources_checked:
body = json.dumps(create_args)
return self._create(self.batch_topic_distribution_url, body)
return
def get_batch_topic_distribution(self, batch_topic_distribution,
query_string=''):
"""Retrieves a batch topic distribution.
The batch_topic_distribution parameter should be a string
containing the batch_topic_distribution id or the dict
returned by create_batch_topic_distribution.
As batch_topic_distribution is an evolving object that is processed
until it reaches the FINISHED or FAULTY state, the function will
return a dict that encloses the batch_topic_distribution values
and state info available at the time it is called.
"""
check_resource_type(batch_topic_distribution,
BATCH_TOPIC_DISTRIBUTION_PATH,
message="A batch topic distribution id is needed.")
return self.get_resource(batch_topic_distribution,
query_string=query_string)
def download_batch_topic_distribution(self,
batch_topic_distribution,
filename=None, retries=10):
"""Retrieves the batch topic distribution file.
Downloads topic distributions, that are stored in a remote CSV file.
If a path is given in filename, the contents of the file are
downloaded and saved locally. A file-like object is returned
otherwise.
"""
check_resource_type(batch_topic_distribution,
BATCH_TOPIC_DISTRIBUTION_PATH,
message="A batch topic distribution id is needed.")
return self._download_resource(batch_topic_distribution, filename,
retries=retries)
def list_batch_topic_distributions(self, query_string=''):
"""Lists all your batch topic distributions.
"""
return self._list(self.batch_topic_distribution_url, query_string)
def update_batch_topic_distribution(self, batch_topic_distribution,
changes):
"""Updates a batch topic distributions.
"""
check_resource_type(batch_topic_distribution,
BATCH_TOPIC_DISTRIBUTION_PATH,
message="A batch topic distribution id is needed.")
return self.update_resource(batch_topic_distribution, changes)
def delete_batch_topic_distribution(self, batch_topic_distribution):
"""Deletes a batch topic distribution.
"""
check_resource_type(batch_topic_distribution,
BATCH_TOPIC_DISTRIBUTION_PATH,
message="A batch topic distribution id is needed.")
return self.delete_resource(batch_topic_distribution)
| 41.24
| 79
| 0.660524
|
ed9af062b4928f1243ded41c7a8e3d051dae8f7f
| 4,748
|
py
|
Python
|
lensesio/lenses.py
|
rsaggino/lenses-python
|
325c221a9e34fa24a949672459409186db682447
|
[
"Apache-2.0"
] | 13
|
2018-04-23T11:58:01.000Z
|
2019-06-22T20:17:56.000Z
|
lensesio/lenses.py
|
rsaggino/lenses-python
|
325c221a9e34fa24a949672459409186db682447
|
[
"Apache-2.0"
] | 10
|
2019-12-19T04:47:49.000Z
|
2022-03-24T10:12:42.000Z
|
lensesio/lenses.py
|
Landoop/lenses-python
|
645b44c333e119dcbcd29f2d0e07ffdd6f66dbc0
|
[
"Apache-2.0"
] | 5
|
2020-02-17T12:08:20.000Z
|
2022-03-09T15:32:39.000Z
|
#!/usr/bin/env python3
from lensesio.pulsar.pulsar_client import SetupPulsar
from lensesio.core.exception import lenses_exception
from lensesio.registry.schemas import SchemaRegistry
from lensesio.data.processors import DataProcessor
from lensesio.data.connectors import DataConnector
from lensesio.data.consumers import DataConsumers
from threading import Thread, enumerate, RLock
from lensesio.kafka.quotas import KafkaQuotas
from lensesio.flows.flows import LensesFlows
from lensesio.kafka.topics import KafkaTopic
from lensesio.data.topology import Topology
from lensesio.core.admin import AdminPanel
from lensesio.core.basic_auth import Basic
from lensesio.kafka.acls import KafkaACL
from lensesio.data.policy import Policy
from lensesio.data.sql import SQLExec
from sys import modules as sys_mods
from sys import exit
import platform
ThreadLock = RLock()
active_threads = {
'sql': {
"t": 0,
},
'subscribe': {
"t": 0,
},
'pulsar_consumer': {
"t": 0,
},
'pulsar_reader': {
"t": 0,
},
"thread_lock": ThreadLock
}
class main(
Basic, KafkaTopic, SchemaRegistry, SQLExec,
KafkaQuotas, Policy, DataProcessor, DataConnector,
KafkaACL, LensesFlows, lenses_exception,
DataConsumers, Topology, AdminPanel, SetupPulsar,
):
def __init__(
self,
auth_type="basic",
url=None,
username=None,
password=None,
krb_service=None,
service_account=None,
verify_cert=True):
if auth_type is None:
return
self.active_threads = active_threads
try:
if auth_type not in ['basic', 'service', 'krb5']:
print('''
Parameters:
Mandatory:
auth_type=basic/krb5/service
url=lenses endpoint
Optional:
username (
if auth_type is basic
)
password (
if username was defined
)
service_account (
if auth_type is basic
)
krb_service (
if auth_type is krb5 and platform
is either one of linux, darwin
)
''')
exit(1)
except NameError:
print("Please provide auth_type [basic, krb5, service]")
exit(1)
self.auth_type = auth_type
self.url = url
if self.auth_type == 'basic':
Basic.__init__(self, url=url, username=username, password=password, verify_cert=verify_cert)
self.connect()
if self.auth_type == 'service':
Basic.__init__(self, url=url, service_account=service_account, verify_cert=verify_cert)
self.serviceConnect()
elif self.auth_type == 'krb5':
if platform.system().lower() not in ['linux', 'linux2', 'darwin']:
msg = "Error: gssapi kerberos integration is not supported for "
print(msg + platform.system())
exit(1)
try:
from lensesio.core.krb_auth import krb5
self.krb5 = krb5
self.krb5.__init__(self, url=url, service=krb_service)
self.krb5.KrbAuth(self)
except NameError:
print("Kerberos client lib is not installed")
return None
if self.ConnectionValidation() == 1:
print("Could not login to lenses. Please check the auth options")
exit(1)
AdminPanel.__init__(self, verify_cert=verify_cert)
Topology.__init__(self, verify_cert=verify_cert)
KafkaTopic.__init__(self, verify_cert=verify_cert)
SchemaRegistry.__init__(self, verify_cert=verify_cert)
SQLExec.__init__(self, active_threads=active_threads, verify_cert=verify_cert)
KafkaQuotas.__init__(self, verify_cert=verify_cert)
Policy.__init__(self, verify_cert=verify_cert)
DataProcessor.__init__(self, verify_cert=verify_cert)
DataConnector.__init__(self, verify_cert=verify_cert)
KafkaACL.__init__(self, verify_cert=verify_cert)
DataConsumers.__init__(self, verify_cert=verify_cert)
def InitPulsarClient(self, host, **kwargs):
try:
self.Pulsar = SetupPulsar.__init__(self, active_threads, host)
except NameError:
print("Pulsar client lib is not installed")
return None
| 34.405797
| 104
| 0.587616
|
c03c8b8234653fda30c73d65e75cf62e121809ce
| 6,691
|
py
|
Python
|
apps/user/views.py
|
Devin6Tam/site_demo
|
c8766b574da0f0c8259995f56cc1a201fc2281be
|
[
"Apache-2.0"
] | null | null | null |
apps/user/views.py
|
Devin6Tam/site_demo
|
c8766b574da0f0c8259995f56cc1a201fc2281be
|
[
"Apache-2.0"
] | 10
|
2021-03-19T01:23:35.000Z
|
2022-03-12T00:21:44.000Z
|
apps/user/views.py
|
Devin6Tam/site_demo
|
c8766b574da0f0c8259995f56cc1a201fc2281be
|
[
"Apache-2.0"
] | null | null | null |
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.template import loader
from django.shortcuts import render, redirect, reverse, get_object_or_404
from django.core.paginator import Paginator
from django.views import generic
import logging
from django import forms
from django.conf import settings
from .models import User, ConfirmString
from .forms import UserForm, RegisterForm
import hashlib
import datetime
# Create your views here.
logger = logging.getLogger(__name__)
def hash_code(s, salt='site_demo'):# 加点盐
h = hashlib.sha256()
s += salt
h.update(s.encode()) # update方法只接收bytes类型
return h.hexdigest()
def make_confirm_string(user):
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
code = hash_code(user.name, now)
ConfirmString.objects.create(code=code, user=user,)
return code
def send_email(email, code):
from django.core.mail import EmailMultiAlternatives
subject = '来自www.mzbloc.com的注册确认邮件'
text_content = '''感谢注册www.mzbloc.com,欢迎加入我们,让我们一起专注于Python、Django和机器学习技术的分享!\
如果你看到这条消息,说明你的邮箱服务器不提供HTML链接功能,请联系管理员!'''
html_content = '''
<p>感谢注册<a href="http://{}/user/confirm/?code={}" target=blank>www.mzbloc.com</a>,\
这里是demo示例展示,专注于Python、Django和机器学习技术的分享!</p>
<p>请点击站点链接完成注册确认!</p>
<p>此链接有效期为{}天!</p>
'''.format('127.0.0.1:8000', code, settings.CONFIRM_DAYS)
msg = EmailMultiAlternatives(subject, text_content, settings.EMAIL_HOST_USER, [email])
msg.attach_alternative(html_content, "text/html")
msg.send()
def index(request):
if not request.session.get('is_login', None):
return redirect('user:login')
return render(request, 'login/index.html')
def login(request):
if request.method == "POST":
login_form = UserForm(request.POST)
message = '请检查填写的内容!'
if login_form.is_valid():
username = login_form.cleaned_data.get('username')
password = login_form.cleaned_data.get('password')
try:
user = User.objects.get(name=username)
except:
message = '用户不存在!'
return render(request, 'login/login.html', {'message': message})
if not user.has_confirmed:
message = '该用户还未经过邮件确认!'
return render(request, 'login/login.html', locals())
if user.password == hash_code(password):
print(username, password)
request.session['is_login'] = True
request.session['user_id'] = user.id
request.session['user_name'] = user.name
return redirect('user:index')
else:
message = '密码不正确!'
return render(request, 'login/login.html', locals())
else:
return render(request, 'login/login.html', locals())
# return redirect('user:index')
login_form = UserForm()
return render(request, 'login/login.html', locals())
def login1(request):
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
print(username, password)
message = '请检查填写的内容!'
if username.strip() and password:
# 用户名字符合法性验证
# 密码长度验证
# 更多的其它验证.....
try:
user = User.objects.get(name=username)
except:
message = '用户不存在!'
return render(request, 'login/login.html', {'message': message})
if user.password == password:
print(username, password)
return redirect('user:index')
else:
message = '密码不正确!'
return render(request, 'login/login.html', {'message': message})
else:
return render(request, 'login/login.html', {'message': message})
# return redirect('user:index')
return render(request, 'login/login.html')
def register(request):
if request.session.get('is_login', None):
return redirect('user:index')
if request.method == 'POST':
register_form = RegisterForm(request.POST)
message = "请检查填写的内容!"
if register_form.is_valid():
username = register_form.cleaned_data.get('username')
password1 = register_form.cleaned_data.get('password1')
password2 = register_form.cleaned_data.get('password2')
email = register_form.cleaned_data.get('email')
sex = register_form.cleaned_data.get('sex')
if password1 != password2:
message = '两次输入的密码不同!'
return render(request, 'login/register.html', locals())
else:
same_name_user = User.objects.filter(name=username)
if same_name_user:
message = '用户名已经存在'
return render(request, 'login/register.html', locals())
same_email_user = User.objects.filter(email=email)
if same_email_user:
message = '该邮箱已经被注册了!'
return render(request, 'login/register.html', locals())
new_user = User()
new_user.name = username
new_user.password = hash_code(password1)
new_user.email = email
new_user.sex = sex
new_user.save()
code = make_confirm_string(new_user)
send_email(email, code)
message = '请前往邮箱进行确认!'
return redirect('user:login')
else:
return render(request, 'login/register.html', locals())
register_form = RegisterForm()
return render(request, 'login/register.html', locals())
def user_confirm(request):
code = request.GET.get('code', None)
message = ''
try:
confirm = ConfirmString.objects.get(code=code)
except:
message = '无效的确认请求!'
return render(request, 'login/confirm.html', locals())
c_time = confirm.c_time
now = datetime.datetime.now()
if now > c_time + datetime.timedelta(settings.CONFIRM_DAYS):
confirm.user.delete()
message = '您的邮件已经过期!请重新注册!'
return render(request, 'login/confirm.html', locals())
else:
confirm.user.has_confirmed = True
confirm.user.save()
confirm.delete()
message = '感谢确认,请使用账户登录!'
return render(request, 'login/confirm.html', locals())
def logout(request):
if request.session.get('is_login', None):
request.session.flush()
return redirect('user:login')
| 36.167568
| 102
| 0.595277
|
37db85fc237e532dfed465775bf343b661086046
| 468
|
py
|
Python
|
adv/irfan.py.r.lt.py
|
betairylia/dl
|
4eb06837b13decc569066a43b58e205f3b716ff8
|
[
"Apache-2.0"
] | null | null | null |
adv/irfan.py.r.lt.py
|
betairylia/dl
|
4eb06837b13decc569066a43b58e205f3b716ff8
|
[
"Apache-2.0"
] | null | null | null |
adv/irfan.py.r.lt.py
|
betairylia/dl
|
4eb06837b13decc569066a43b58e205f3b716ff8
|
[
"Apache-2.0"
] | null | null | null |
import adv_test
from adv import *
def module():
return Irfan
class Irfan(Adv):
pass
if __name__ == '__main__':
conf = {}
conf['acl'] = """
`rotation
"""
conf['rotation_init'] = """
c4fs c4fs c1
"""
conf['rotation'] = """
S1 C4FS C4FS C1- S1 C2- S2 C4FS C5- S3 C3- S1 C4FS
C5- S2 C1- S1 C4FS C4FS C1- S1 C4FS C4- S3 C3- S1 C1- S2 C4FS C5-
"""
adv_test.test(module(), conf, verbose=0, mass=0)
| 18.72
| 73
| 0.536325
|
e18f193bb093fe91cf4bd03537a64da4f46ae21c
| 1,581
|
py
|
Python
|
modules/audio_processing/test/py_quality_assessment/quality_assessment/evaluation.py
|
Aexyn/webrtc2
|
daea5bf2deb843567a792f22ea2047a037e09d78
|
[
"DOC",
"BSD-3-Clause"
] | 2
|
2018-01-16T13:29:45.000Z
|
2018-08-10T09:15:23.000Z
|
modules/audio_processing/test/py_quality_assessment/quality_assessment/evaluation.py
|
Aexyn/webrtc2
|
daea5bf2deb843567a792f22ea2047a037e09d78
|
[
"DOC",
"BSD-3-Clause"
] | null | null | null |
modules/audio_processing/test/py_quality_assessment/quality_assessment/evaluation.py
|
Aexyn/webrtc2
|
daea5bf2deb843567a792f22ea2047a037e09d78
|
[
"DOC",
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Evaluator of the APM module.
"""
import logging
class ApmModuleEvaluator(object):
"""APM evaluator class.
"""
def __init__(self):
pass
@classmethod
def Run(cls, evaluation_score_workers, apm_output_filepath,
reference_input_filepath, output_path):
"""Runs the evaluation.
Iterates over the given evaluation score workers.
Args:
evaluation_score_workers: list of EvaluationScore instances.
apm_output_filepath: path to the audio track file with the APM output.
reference_input_filepath: path to the reference audio track file.
output_path: output path.
Returns:
A dict of evaluation score name and score pairs.
"""
# Init.
scores = {}
for evaluation_score_worker in evaluation_score_workers:
logging.info(' computing <%s> score', evaluation_score_worker.NAME)
evaluation_score_worker.SetReferenceSignalFilepath(
reference_input_filepath)
evaluation_score_worker.SetTestedSignalFilepath(
apm_output_filepath)
evaluation_score_worker.Run(output_path)
scores[evaluation_score_worker.NAME] = evaluation_score_worker.score
return scores
| 30.403846
| 76
| 0.73561
|
dc454d862b0a9b3d03b293696e6929f783b1a959
| 4,119
|
py
|
Python
|
education/urls.py
|
Amirsorouri00/neolej
|
8fa18f2c1a38b0a59ed7eeeed7ed37ef7b9dad97
|
[
"MIT"
] | null | null | null |
education/urls.py
|
Amirsorouri00/neolej
|
8fa18f2c1a38b0a59ed7eeeed7ed37ef7b9dad97
|
[
"MIT"
] | null | null | null |
education/urls.py
|
Amirsorouri00/neolej
|
8fa18f2c1a38b0a59ed7eeeed7ed37ef7b9dad97
|
[
"MIT"
] | null | null | null |
from django.urls import include, path
from django.conf.urls import url
from django.views.decorators.csrf import csrf_exempt
from education.views.rest.workshop import test1 as workshop_test1, test2 as workshop_test2, WorkshopAPI
from education.views.rest.workshop_file import test2 as workshop_file_test2
from education.views.rest.discount import test2 as discount_file_test2, test1 as discount_file_test1
from education.views.rest.payment import test1 as invoice_test1, test2 as invoice_test2, WorkshopInvoiceApi, WorkshopPaymentApi
from education.views.rest.discount import WorkshopDateDiscountAPI, WorkshopPersonalDiscountAPI, WorkshopRaceDiscountAPI
from education.views.rest.buy_workshop import BuyAPI
app_name = 'education'
urlpatterns = [
# path('form/', include(([
# ], 'education'), namespace='form')),
path('rest/', include(([
path('workshop/', include(([
path('test2/', workshop_test2, name='rest_workshop_test2'),
path('test1/', workshop_test1, name='rest_workshop_test1'),
path('<int:uuid>/', WorkshopAPI.as_view(), name='rest_workshop_put'),
path('', WorkshopAPI.as_view(), name='rest_workshop'),
path('buy/', include(([
path('', BuyAPI.as_view(), name='rest_workshop_buy_post'),
path('<int:workshop_uuid>/<int:user_uuid>/', BuyAPI.as_view(), name='rest_workshop_buy_delete'),
# path('test2/', discount_file_test2, name='rest_discount_test2'),
# path('test1/', discount_file_test1, name='rest_discount_test1'),
path('invoice/', include(([
path('', WorkshopInvoiceApi.as_view(), name='rest_workshop_invoice'),
path('<int:uuid>/', WorkshopInvoiceApi.as_view(), name='rest_workshop_invoice_pud'),
# path('test2/', discount_file_test2, name='rest_discount_test2'),
# path('test1/', discount_file_test1, name='rest_discount_test1'),
], 'education'), namespace='rest_workshop_invoice')),
path('payment/', include(([
path('', WorkshopPaymentApi.as_view(), name='rest_workshop_payment'),
path('<int:uuid>/', WorkshopPaymentApi.as_view(), name='rest_workshop_payment_pud'),
# path('test2/', discount_file_test2, name='rest_discount_test2'),
# path('test1/', discount_file_test1, name='rest_discount_test1'),
], 'education'), namespace='rest_workshop_payment')),
], 'education'), namespace='rest_workshop_buy')),
path('discount/', include(([
path('personal/', WorkshopPersonalDiscountAPI.as_view(), name='rest_workshop_personal_discount'),
path('personal/<str:uuid>/', WorkshopPersonalDiscountAPI.as_view(), name='rest_workshop_personal_discount_pud'),
path('date/', WorkshopDateDiscountAPI.as_view(), name='rest_workshop_date_discount'),
path('date/<str:uuid>/', WorkshopDateDiscountAPI.as_view(), name='rest_workshop_date_discount_pud'),
path('race/', WorkshopRaceDiscountAPI.as_view(), name='rest_workshop_race_discount'),
path('race/<str:uuid>/', WorkshopRaceDiscountAPI.as_view(), name='rest_workshop_race_discount_pud'),
# path('test2/', discount_file_test2, name='rest_discount_test2'),
# path('test1/', discount_file_test1, name='rest_discount_test1'),
], 'education'), namespace='rest_workshop_discount')),
], 'education'), namespace='rest_workshops')),
path('workshop_file/', include(([
path('test2/', workshop_file_test2, name='rest_workshop_file_test2'),
], 'education'), namespace='rest_workshop_files')),
path('discount/', include(([
path('test2/', discount_file_test2, name='rest_discount_test2'),
path('test1/', discount_file_test1, name='rest_discount_test1'),
], 'education'), namespace='rest_discount')),
], 'education'), namespace='rest')),
]
| 56.424658
| 128
| 0.648701
|
68e3d57fa5f425ddf4de5ca89deb24ea37bc3844
| 473
|
py
|
Python
|
web/servicecalls/staffservice_gets.py
|
vacoj/mbodjango
|
e9a6df563862c587e4cc2c2713ed7f8ea0a6e4e3
|
[
"MIT"
] | 8
|
2015-10-27T12:38:54.000Z
|
2018-02-23T03:03:24.000Z
|
web/servicecalls/staffservice_gets.py
|
vacoj/mbodjango
|
e9a6df563862c587e4cc2c2713ed7f8ea0a6e4e3
|
[
"MIT"
] | 3
|
2015-10-28T22:23:58.000Z
|
2016-01-13T04:05:04.000Z
|
web/servicecalls/staffservice_gets.py
|
vacoj/mbodjango
|
e9a6df563862c587e4cc2c2713ed7f8ea0a6e4e3
|
[
"MIT"
] | 9
|
2015-09-28T17:32:17.000Z
|
2018-02-01T00:01:04.000Z
|
from helpers.mbosoap.StaffService import StaffServiceCalls
def GetStaff():
stafflist = StaffServiceCalls().GetStaff(staffIds=None,
staffUsername=None,
staffPassword=None,
siteIds=None,
filters=None,
sessionTypeId=None,
startDateTime=None,
locationId=None)
return stafflist
| 31.533333
| 59
| 0.484144
|
3b8de7e8b81bdf78277eee8853733092adc13c35
| 4,525
|
py
|
Python
|
maskrcnn_benchmark/utils/comm.py
|
CityU-AIM-Group/NLTE
|
a111390bcc38bd0c759a3a9d971a7d9defce88fb
|
[
"MIT"
] | 1
|
2022-03-30T14:04:55.000Z
|
2022-03-30T14:04:55.000Z
|
maskrcnn_benchmark/utils/comm.py
|
CityU-AIM-Group/NLTE
|
a111390bcc38bd0c759a3a9d971a7d9defce88fb
|
[
"MIT"
] | null | null | null |
maskrcnn_benchmark/utils/comm.py
|
CityU-AIM-Group/NLTE
|
a111390bcc38bd0c759a3a9d971a7d9defce88fb
|
[
"MIT"
] | null | null | null |
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import pickle
import time
import torch
import torch.distributed as dist
import operator
from numbers import Number
from collections import OrderedDict
class ParamDict(OrderedDict):
"""A dictionary where the values are Tensors, meant to represent weights of
a model. This subclass lets you perform arithmetic on weights directly."""
def __init__(self, *args, **kwargs):
super().__init__(*args, *kwargs)
def _prototype(self, other, op):
if isinstance(other, Number):
return ParamDict({k: op(v, other) for k, v in self.items()})
elif isinstance(other, dict):
return ParamDict({k: op(self[k], other[k]) for k in self})
else:
raise NotImplementedError
def __add__(self, other):
return self._prototype(other, operator.add)
def __rmul__(self, other):
return self._prototype(other, operator.mul)
__mul__ = __rmul__
def __neg__(self):
return ParamDict({k: -v for k, v in self.items()})
def __rsub__(self, other):
# a- b := a + (-b)
return self.__add__(other.__neg__())
__sub__ = __rsub__
def __truediv__(self, other):
return self._prototype(other, operator.truediv)
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.IntTensor([tensor.numel()]).to("cuda")
size_list = [torch.IntTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
| 29.00641
| 84
| 0.647072
|
0e4a23289a950a7bc9fb07d096c19dd7ba8bb634
| 12,670
|
py
|
Python
|
portextract.py
|
byte-of-reyn/portextract
|
923b97d73b58f55467d8abe37f9ed00e603cbacf
|
[
"MIT"
] | null | null | null |
portextract.py
|
byte-of-reyn/portextract
|
923b97d73b58f55467d8abe37f9ed00e603cbacf
|
[
"MIT"
] | null | null | null |
portextract.py
|
byte-of-reyn/portextract
|
923b97d73b58f55467d8abe37f9ed00e603cbacf
|
[
"MIT"
] | null | null | null |
"""
Script used to extract L2 port configuration from provided Cisco configuration backup files
"""
#!/usr/bin/python
import sys
import os.path
import re
from xlsxwriter.workbook import Workbook
VERSION = '0.02'
#excel header list
excel_head = [
'port_desc',
'port_speed',
'port_duplex',
'port_status',
'port_mode',
'port_access_vlan',
'port_trunk_native_vlan',
'port_trunk_allow_vlan',
'port_stp_type',
'port_channel_group',
'port_mtu',
'port_bpduguard',
'port_portfast'
]
#init excel strings, ready for entry
def_port_vals = {
excel_head[0] : 'n/a',
excel_head[1] : 'n/a',
excel_head[2] : 'n/a',
excel_head[3] : 'n/a',
excel_head[4] : 'n/a',
excel_head[5] : 'n/a',
excel_head[6] : 'n/a',
excel_head[7] : 'n/a',
excel_head[8] : 'n/a',
excel_head[9] : 'n/a',
excel_head[10] : '1500',
excel_head[11] : 'n/a',
excel_head[12] : 'n/a'
}
db_dev = {} #database for device interfaces, vlans, VRFs, etc.
db_err = [] #contains a listing of runtime errors
"""
Checks for file validity on native OS
"""
def is_valid_file(file):
valid = None
if os.path.isfile(file):
valid = True
return valid
"""
Dumps config file into buffer line by line
"""
def buffer_file(files):
buffer = {}
for file in files["in_files"]:
#file check
if is_valid_file(file) is not None:
print("Performing configuration extraction from file", file, "...")
line_buffer = ""
try:
config_file = open(file, 'r')
#extract config file content to buffer
while True:
line = config_file.readline()
if len(line) == 0:#EOF
break
else:
line_buffer += line
buffer[file] = line_buffer
config_file.close()
except:
print("File access error:", file)
break
else:
print("Skipping file read for", file, ". Access error...")
return buffer
"""
Searches array elements for string match
Return Type: Tuple
"""
def array_string_search(str_var, arr_regex):
#holds arr location of matched regex
loc = 0#
res = False
if str_var:
try:
for regex in arr_regex:
res = re.search((re.compile(regex)), str_var)
if res:
try:
res = regex, res.group(2), loc
except:
res = regex, res.group(0), loc
break
loc = loc + 1
except:
pass### FIX UP ERROR RETURN ###
return res
"""
Interface extraction in preparation for parsing
"""
def interface_extract(file_buffer):
#initial config file parse
flag_int = False #denotes if currently in interface
#holder for device interface details
devices = {}
#start/end int regex for search
arr_regex = [
r'(^[Ii]nterface) (.*\d+)',
r'(^[Ii]nterface) (.*\d+/*\d*/*\d*)',
r'^[\s]+.*',
r'^([Rr]outer) ((eigrp)*(bgp)*(ospf)*) \d+'
]
#console status update
print("Extracting configuration elements from provided configuration files...")
#outer loop for each provided buffer/config file
for filename, buffer in file_buffer.items():
#reset temporary variables
interfaces = {}
sub_cmd = []
curr_int = ''
#Split solid config file based on newline returns
lines = buffer.split('\n')
#inner loop to parse buffer lines
for line in lines:
#search line for commands of interest
str_match = array_string_search(line, arr_regex)
#interface sub-command parse
if flag_int:
#current config line matches regex
if str_match:
#line beginning with 'interface' found
if (str_match[2] == 0) or (str_match[2] == 1):
sub_cmd = None
curr_int = str_match[1].strip()
interfaces[curr_int] = []
#sub cmd found under interface
elif str_match[2] == 2: #sub-cmd found
if sub_cmd:
interfaces[curr_int].append(str_match[1])
else:
interfaces[curr_int].append(str_match[1])
#router config found, drop out of loop
elif str_match[2] == 4:
flag_int = False
else:#end of int found
if sub_cmd:
interfaces[curr_int] = sub_cmd
flag_int = False
#no regex match - end of interface configuration
if not str_match:
if sub_cmd:
interfaces[curr_int].append(sub_cmd)
sub_cmd = None
flag_int = False
#search for next interface
else:
if str_match:
if str_match[0] == arr_regex[0]:#new interface found
curr_int = str_match[1].strip()
interfaces[curr_int] = []
flag_int = True
#router config found, drop out of loop
elif str_match[2] == 4:#line beginning with 'interface' found
break
#place interfaces into device dictionary
devices[filename] = interfaces
#return parsed interface buffer
return devices
"""
Interface extraction in preparation for parsing.
Searches through defined dictionary of regexp for excel column allocation
Uses dic key as excel headers
"""
def interface_parse(devices):
#init temp holders
parsed_device = {}
#initialize regex patterns
arr_regex = [
r'(^\s+description )(.*)',
r'(^\s+speed )(.*)',
r'(^\s+duplex )(.*)',
r'(^\s+)([no]*\s+shutdown)',
r'(^\s+switchport mode )(.*)',
r'(^\s+switchport access vlan )(.*)',
r'(^\s+switchport trunk native vlan )(.*)',
r'(^\s+switchport trunk allowed vlan )(\d+.*)',
r'(^\s+spanning-tree port type )(.*)',
r'(^\s+channel-group )(.*)',
r'(^\s+mtu )(\d+)',
r'(^\s+spanning-tree bpduguard)(enable)\s+',
r'(^\s+spanning-tree )(portfast)\s+',
]
#console status update
print("Parsing extracted interface items...")
#outside regex to match additional VLANs permitted over trunk
trunk_allowed_add = r'(^\s+switchport trunk allowed vlan add )(\d+.*)'
#outer loop to iterate through each device
for key, port_arr in devices.items():
#temporary holder for stripped configuration elements
interfaces = {}
#inner loop to iterate through the device interface array
for port in port_arr:
#instantiate dictionary for this port
interfaces[port] = {}
for val in def_port_vals:
interfaces[port][val] = "n/a"
#check for valid commands and insert into port dictionary
for item in port_arr[port]:
cmd_match = array_string_search(item, arr_regex) #pass regex_arr to search function
#standard switchport configuration item identified
if cmd_match:
interfaces[port][excel_head[cmd_match[2]]] = cmd_match[1]
#check for 'switchport trunk allowed add' line item
res = re.search((re.compile(trunk_allowed_add)), item)
#append allowed VLANs onto previous cell string
if res:
vlan = interfaces[port][excel_head[7]]
vlan = (vlan + "," + res.group(2))
interfaces[port][excel_head[7]] = vlan
#add cleaned output to dictionary
parsed_device[key] = interfaces
#return parsed dictionary
return parsed_device
"""
Check for minimum init args passed from cmd line
"""
def argument_check():
in_flag = False
out_flag = False
files = {
"in_files" : [],
"out_file" : []
}
if len(sys.argv) > 2:
for arg in sys.argv:
#check for input start
if arg == '-i':
in_flag = True
out_flag = False
continue
#set for output start
if arg == '-o':
out_flag = True
in_flag = False
continue
#check input file validity
if in_flag and arg != '-i':
if os.path.exists(arg):
files["in_files"].append(arg)
else:
print("Input file", arg, "does not exist. Skipping file...")
continue
if out_flag and arg != '-o':
files["out_file"].append(arg)
continue
return files
"""
Output sorted port information to .xlsx file
"""
def xlsx_output(devices, out_file_name):
try:
#create the new file/worksheet
workbook = Workbook(out_file_name, {'constant_memory':True})
#console status update
print("Writing output to", out_file_name, "...")
#outer loop to iterate through devices
for device, interfaces in devices.items():
#init vars for temp location in worksheet
xls_col = 0
xls_row = 0
#new worksheet per device config file
worksheet = workbook.add_worksheet(device)
#print("\n\n\nDEVICE:",device)
#create headers in current worksheet
worksheet.write_string(xls_row, xls_col, ("Interfaces"))
for header in excel_head:
xls_col = xls_col + 1
worksheet.write_string(xls_row, xls_col, (header.strip()))
xls_row = 1
#inner loop to iterate through interfaces and their configuration elements
#fill columns with port data
for port, children in interfaces.items():
xls_col = 0
worksheet.write_string(xls_row, xls_col, (port.strip()))
#print("PORT:",port.strip())
#insert string into current cell
for header in excel_head:
xls_col = xls_col + 1
worksheet.write_string(xls_row, xls_col, children[header])
#move to next row
xls_row = xls_row + 1
#close the workbook
workbook.close()
#console status update
print("Process complete!")
except:
print("Error: Workbook updates failed.")
print("Check if your destination excel file is open at the moment...")
sys.exit(1)
#main function
def main():
#verify provided arguments
files = argument_check()
if files:
if len(files["in_files"]) < 0:
print("Error: Input file/s not provided.")
print("python portextract.py -i input -o output")
sys.exit(1)
if len(files["out_file"]) > 0:
out_file_name = files["out_file"][0]
else:
print("Error: Output file not provided.")
print("python portextract.py -i input -o output")
sys.exit(1)
else:
print("python portextract.py -i input -o output")
sys.exit(1)
#buffer file to temp var
file_buff = buffer_file(files)
if file_buff:
#interface extraction and parse for excel output
db_dev["interfaces"] = interface_extract(file_buff)
#sort interfaces for excel input
db_dev["interfaces_xls"] = interface_parse(db_dev["interfaces"])
#output file to .xlsx
xlsx_output(db_dev["interfaces_xls"], out_file_name)
else:
#empty file buffer
print("Empty file buffer. Please check your provided configuration files...")
sys.exit(1)
#call main method
if __name__ == '__main__':
main()
| 32.570694
| 100
| 0.522889
|
a21715d49e787dac76567c089cfef33369227f86
| 10,333
|
py
|
Python
|
networks/utils.py
|
xupei0610/PFPN
|
7ad049f84d1cc03200bb6cea383d5c00721f9ac4
|
[
"MIT"
] | 9
|
2020-12-07T02:32:18.000Z
|
2022-02-07T02:38:54.000Z
|
networks/utils.py
|
xupei0610/PFPN
|
7ad049f84d1cc03200bb6cea383d5c00721f9ac4
|
[
"MIT"
] | null | null | null |
networks/utils.py
|
xupei0610/PFPN
|
7ad049f84d1cc03200bb6cea383d5c00721f9ac4
|
[
"MIT"
] | 1
|
2022-01-01T01:45:01.000Z
|
2022-01-01T01:45:01.000Z
|
from .ops import *
import numpy as np
import tensorflow_probability as tfp
def discount(val, factor, boostrap_val, normalize=False):
result = [None]*len(val)
v_ = boostrap_val
for t in reversed(range(len(val))):
v_ = val[t] + factor * v_
result[t] = v_
if normalize:
result = np.subtract(result, np.mean(result))
result = np.divide(result, np.std(result)+1e-6)
return list(result[::-1])
return result
def build_conv_fc_net(trainable, last_layer, net_shape,
weight_initializer, activator, last_activator,
init_name_index=1):
for i in range(len(net_shape)):
if not hasattr(net_shape[i], "__len__") or len(net_shape[i]) == 1:
index = 2 if last_layer.shape[1].value is None else 1
last_layer = fc_layer(
"fc{}".format(i+init_name_index), last_layer,
last_layer.shape[index:].num_elements(), net_shape[i],
weight_initializer=weight_initializer,
activator=last_activator if i+1 == len(net_shape) else activator,
trainable=trainable
)
else:
assert(len(net_shape[i]) == 4)
last_layer = conv_layer(
"conv{}".format(i+init_name_index), last_layer,
# in channel (NHWC), out channel
last_layer.shape[-1], net_shape[i][0],
# kernel size, stride size, padding type
net_shape[i][1], net_shape[i][2], net_shape[i][3],
weight_initializer=weight_initializer,
activator=last_activator if i+1 == len(net_shape) else activator,
trainable=trainable
)
return last_layer
def online_normalizer(trainable, shape, moving_average=False):
mean = tf.get_variable("mean", shape=shape, dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
std = tf.get_variable("std", shape=shape, dtype=tf.float32,
trainable=False,
initializer=tf.ones_initializer())
if not moving_average:
count = tf.get_variable("counter", shape=(), dtype=tf.float32,
trainable=False,
initializer=tf.constant_initializer(1e-4))
if trainable:
if moving_average:
def update_op(X):
decay = tf.constant(0.9999, dtype=tf.float32)
s = tf.cast(tf.train.get_or_create_global_step(), dtype=tf.float32)
decay = tf.minimum(decay, (1+s)/(10+s))
m, v = tf.nn.moments(X, axes=[0])
new_mean = decay*mean+(1-decay)*m
new_std = tf.maximum(1e-6, decay*std+(1-decay)*tf.sqrt(v))
return [(mean, new_mean), (std, new_std)]
else:
def update_op(X):
batch_mean, batch_var = tf.nn.moments(X, axes=[0])
batch_count = tf.cast(tf.shape(X)[0], count.dtype)
delta = batch_mean - mean
new_count = count + batch_count
m_a = tf.math.square(std) * count
m_b = batch_var * batch_count
M2 = m_a + m_b + tf.math.square(delta) * count * batch_count / new_count
new_std = tf.maximum(1e-6, tf.sqrt(M2 / new_count))
new_mean = mean + delta * batch_count / new_count
return [(mean, new_mean), (std, new_std), (count, new_count)]
return mean, std, update_op
return mean, std, None
class MixtureGaussianDistribution(object):
def __init__(self, logits, loc, scale, normalize_output):
self._identical = True
if hasattr(logits, "__len__"):
assert(len(logits) == len(loc) and len(loc) == len(scale))
self._identical = False
self.logits, self.loc, self.scale = logits, loc, scale
self.normalize_output = normalize_output
cont_dist = tf.distributions.Normal
if self._identical:
self.dis_dist_ = tf.distributions.Categorical(logits=logits)
self.dis_dist = tfp.distributions.RelaxedOneHotCategorical(1.0, logits=logits)
self.dis_dist.probs = self.dis_dist.distribution.probs
self.dis_dist.logits = self.dis_dist.distribution.logits
self.sample_dist = cont_dist(loc, scale, allow_nan_stats=False)
else:
raise NotImplementedError
def prob(self, value, name="prob"):
with tf.name_scope(name):
p = tf.exp(self.log_prob(value))
return p
def log_prob(self, value, name="log_prob"):
@tf.custom_gradient
def foo(p):
def grad(dy):
# the movement of particles may cause target prob to be 0 (log prob to -inf)
# for a proper choice of hyperparameters, this barely happens
return tf.where(tf.logical_or(tf.is_nan(dy), tf.is_inf(dy)),
tf.zeros_like(dy), dy
)
return p, grad
if self._identical:
if self.normalize_output:
if hasattr(value, "__len__"):
value, value_before_tanh = value
else:
value_before_tanh = tf.math.atanh(value)
else:
value_before_tanh = value
p = self.sample_dist.prob(tf.expand_dims(value_before_tanh, -1))
p = tf.reduce_sum(self.dis_dist.probs*p, axis=-1)
p = foo(p)
lp = tf.log(p)
if self.normalize_output:
lp -= 2*(np.log(2) - value_before_tanh - tf.nn.softplus(-2*value_before_tanh))
lp = tf.reduce_sum(lp, axis=-1)
else:
lp = []
for dis, cont, a in zip(self.dis_dist.policy, self.sample_dist, value):
p = cont.prob(a)
p = dis.probs*p
p = tf.reduce_sum(p, axis=1)
p = foo(p)
lp.append(tf.log(p))
lp = tf.add_n(lp)
return lp
def entropy(self, name="entropy"):
v = self.dis_dist.logits - tf.reduce_max(self.dis_dist.logits, axis=-1, keepdims=True)
s0 = tf.exp(v)
s1 = tf.reduce_sum(s0, axis=-1, keepdims=True)
p = s0 / s1
return tf.reduce_sum(p * (tf.log(s1) - v), axis=-1)
def sample(self, n):
assert(n == 1)
if self._identical:
if self.normalize_output: # rsample
d = self.dis_dist.sample(n)
w = tf.reshape(d, [-1]+list(self.logits.shape[1:]))
p = self.sample_dist.sample(tf.shape(w)[0])
self.dis_action = tf.argmax(w, axis=-1)
m = tf.one_hot(self.dis_action, w.shape[-1], dtype=w.dtype)
if self.normalize_output:
tanh_p = tf.nn.tanh(p)
@tf.custom_gradient
def mask2(w, p):
y = m*p
tanh_t = tf.reduce_sum(m*tanh_p, axis=-1, keepdims=True)
def grad(dy):
gap = (tanh_p-tanh_t)/tf.maximum(1e-6, 1-tanh_t**2)
return gap*dy, m*dy
return y, grad
s_ = mask2(w, p)
s_ = tf.reduce_sum(s_, -1)
s_ = tf.reshape(s_, [n, -1]+list(self.logits.shape[1:-1]))
p = tanh_p
@tf.custom_gradient
def mask(w, p):
y = m*p
t = tf.reduce_sum(y, axis=-1, keepdims=True)
def grad(dy):
gap = p-t
return gap*dy, m*dy
return y, grad
sample = mask(w, p)
sample = tf.reduce_sum(sample, -1)
sample = tf.reshape(sample, [n, -1]+list(self.logits.shape[1:-1]))
else:
d = self.dis_dist_.sample(n)
self.dis_action = w = tf.reshape(d, [-1]+list(self.logits.shape[1:-1]))
p = self.sample_dist.sample(tf.shape(w)[0])
mask = tf.one_hot(self.dis_action, self.logits.shape[-1], dtype=p.dtype)
sample = tf.reduce_sum(mask*p, -1)
sample = tf.reshape(sample, [n, -1]+list(self.logits.shape[1:-1]))
return sample
else:
raise NotImplementedError
if self.normalize_output:
return sample, s_
else:
return sample
def mean(self):
if not hasattr(self, "_determinstic_action"):
if self._identical:
if self.normalize_output: # rsample
t = 1.
w = tf.nn.softmax(self.logits / t)
p = tf.expand_dims(self.sample_dist.mean(), 0)
self.dis_action = tf.argmax(w, axis=-1)
m = tf.one_hot(self.dis_action, w.shape[-1], dtype=w.dtype)
if self.normalize_output: p = tf.nn.tanh(p)
@tf.custom_gradient
def mask(w, p):
y = m*p
t = tf.reduce_sum(y, axis=-1, keepdims=True)
def grad(dy):
gap = p-t
return gap*dy, tf.reduce_sum(m*dy, 0, keepdims=True)
return y, grad
cont_action = tf.reduce_sum(mask(w, p), -1)
else:
dis_action = tf.argmax(self.logits, axis=-1)
dis_action = tf.transpose(dis_action, [1, 0])
cont_action = tf.batch_gather(self.loc, dis_action)
cont_action = tf.transpose(cont_action, [1, 0])
else:
raise NotImplementedError
dis_action = [tf.argmax(p.logits, axis=1) for p in self.policy]
cont_action = []
for d, a in zip(self.sample_dist, dis_action):
a = tf.gather(d.loc, a)
cont_action.append(a)
cont_action = tf.stack(cont_action, axis=1)
self._determinstic_action = cont_action
return self._determinstic_action
| 43.599156
| 94
| 0.517275
|
4c3c3864553c6b28605056cd44403b6b95d5533a
| 1,187
|
py
|
Python
|
generate_release_docs.py
|
madhuri7112/Singularity
|
11a533ecf2baaa1a4a74404b3de435e8d5b7d1a3
|
[
"Apache-2.0"
] | 692
|
2015-01-02T02:30:23.000Z
|
2022-03-18T08:16:05.000Z
|
generate_release_docs.py
|
madhuri7112/Singularity
|
11a533ecf2baaa1a4a74404b3de435e8d5b7d1a3
|
[
"Apache-2.0"
] | 1,399
|
2015-01-01T10:52:44.000Z
|
2022-03-17T18:27:23.000Z
|
generate_release_docs.py
|
mikebell90/Singularity
|
290d647ee3cd5ddfbf381d09d22fdce1896e3388
|
[
"Apache-2.0"
] | 280
|
2015-01-02T02:30:33.000Z
|
2022-03-03T21:08:33.000Z
|
import argparse
import requests
def base_text(release):
return """## Changes in `{0}`
Check out the [{0} milestone](https://github.com/HubSpot/Singularity/issues?q=milestone%3A%{0}+is%3Aclosed) to see new features / bugfixes in detail.
### New Features
### Improvements
### Bug Fixes
### Documentation
""".format(release)
def main(args):
pulls = [p for p in requests.get('https://api.github.com/repos/HubSpot/Singularity/pulls?state=closed&per_page=200&sort=updated&direction=desc').json() if 'milestone' in p and p['milestone'] and args.release.encode('utf-8') == p['milestone']['title']]
print 'Found {0} pull requests'.format(len(pulls))
message = base_text(args.release)
for p in pulls:
message = message + '- [{0}]({1}) - {2}'.format(p['number'], p['html_url'], p['title']) + '\n'
outfile = open('Docs/releases/{0}.md'.format(args.release), 'w')
outfile.write(message)
outfile.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate release docs from milestone')
parser.add_argument('-r', '--release', dest='release', help='release version')
args = parser.parse_args()
main(args)
| 32.081081
| 255
| 0.671441
|
b606e1d5ab0004be323e09e3605707561f1dc50e
| 16,535
|
py
|
Python
|
ForVictoria.py
|
njhovorka/mini-rpg
|
c3af63839925a873302b2fbe89b692c29f7fea8f
|
[
"MIT"
] | null | null | null |
ForVictoria.py
|
njhovorka/mini-rpg
|
c3af63839925a873302b2fbe89b692c29f7fea8f
|
[
"MIT"
] | null | null | null |
ForVictoria.py
|
njhovorka/mini-rpg
|
c3af63839925a873302b2fbe89b692c29f7fea8f
|
[
"MIT"
] | null | null | null |
import pygame
import random
pygame.init()
win = pygame.display.set_mode((500,480))
pygame.display.set_caption('For Victoria')
idle_list = [[pygame.image.load('images/heavy_idle1.png'), pygame.image.load('images/heavy_idle1.png'), pygame.image.load('images/heavy_idle2.png'), pygame.image.load('images/heavy_idle2.png')] , \
[pygame.image.load('images/captain_idle1.png'), pygame.image.load('images/captain_idle1.png'), pygame.image.load('images/captain_idle2.png'), pygame.image.load('images/captain_idle2.png')], \
[pygame.image.load('images/engineer_idle1.png'), pygame.image.load('images/engineer_idle1.png'), pygame.image.load('images/engineer_idle2.png'), pygame.image.load('images/engineer_idle2.png')], \
[pygame.image.load('images/bot_idle1.png'), pygame.image.load('images/bot_idle1.png'), pygame.image.load('images/bot_idle2.png'), pygame.image.load('images/bot_idle2.png')], \
[pygame.image.load('images/rifler_idle1.png'), pygame.image.load('images/rifler_idle1.png'), pygame.image.load('rimages/ifler_idle2.png'), pygame.image.load('images/rifler_idle2.png')], \
[pygame.image.load('images/alchemist_idle1.png'), pygame.image.load('images/alchemist_idle1.png'), pygame.image.load('images/alchemist_idle2.png'), pygame.image.load('images/alchemist_idle2.png')]]
firing_list = [[pygame.image.load('images/heavy_firing1.png'), pygame.image.load('images/heavy_firing2.png'), pygame.image.load('images/heavy_firing1.png'), pygame.image.load('images/heavy_firing2.png')], \
[pygame.image.load('images/captain_firing1.png'), pygame.image.load('images/captain_firing2.png'), pygame.image.load('images/captain_firing1.png'), pygame.image.load('images/captain_firing2.png')], \
[pygame.image.load('images/engineer_firing1.png'), pygame.image.load('images/engineer_firing2.png'), pygame.image.load('images/engineer_firing1.png'), pygame.image.load('images/engineer_firing2.png')], \
[pygame.image.load('images/bot_firing1.png'), pygame.image.load('images/bot_firing2.png'), pygame.image.load('images/bot_firing1.png'), pygame.image.load('images/bot_firing2.png')],\
[pygame.image.load('images/rifler_firing1.png'), pygame.image.load('images/rifler_firing1.png'), pygame.image.load('images/rifler_firing2.png'), pygame.image.load('images/rifler_firing1.png')], \
[pygame.image.load('images/alchemist_firing1.png'), pygame.image.load('images/alchemist_firing2.png'), pygame.image.load('images/alchemist_firing1.png'), pygame.image.load('images/alchemist_firing2.png')]]
action_list = ["Pick Action: 'a', 'h', 's'", "Pick Target: 1-4"]
hp_list = [20, 20, 20, 20]
enemy_idle = []
enemy_firing = []
action_count = 0
bg = pygame.image.load('images/background.png')
fighter_pick = pygame.image.load('images/fighter_pick.png')
invalid_pic = pygame.image.load('images/invalid_pic.png')
clock = pygame.time.Clock()
tick_count = 0
invalid = False
combat_page = False
reinforcement_text = False
fighter_list = []
fighters = []
target_list = []
y_lineup = 30
x_lineup = 30
setup = True
run = True
enemy_setup = False
combat_buffer = False
turn_pick = False
turn_pick_buffer = False
target_pick = False
combat =False
delay = 0
turn_attack = False
turn_heal = False
turn_special = False
hp_list = []
bot_count = 4
death_delay = [False, False, False, False]
death_count = 0
rifler_buff = False
heavy_buff = False
alchemist_buff = False
captain_buff = False
engineer_buff = False
bot_choice = ['a', 'd', 'b']
class fighter(object):
def __init__(self, x, y, width, height, name, idle_index, firing_index):
self.x = x
self.y = y
self.name = name
self.width = width
self.height = height
self.idle = True
self.firing = False
self.tick_count = 0
self.idle_index = idle_index
self.firing_index = firing_index
self.hp = 10
self.dead = False
self.defend = False
self.delay = False
self.rifler_buff = False
self.heavy_buff = False
self.alchemist_buff = False
self.captain_buff = False
self.engineer_buff = False
def draw(self, win):
if self.tick_count == 3:
if self.idle:
win.blit(idle_list[self.idle_index][self.tick_count], (self.x, self.y))
elif self.firing:
win.blit(firing_list[self.firing_index][self.tick_count], (self.x, self.y))
self.tick_count = 0
elif self.idle:
win.blit(idle_list[self.idle_index][self.tick_count], (self.x, self.y))
self.tick_count += 1
elif self.firing:
win.blit(firing_list[self.firing_index][self.tick_count], (self.x, self.y))
self.tick_count += 1
if self.captain_buff:
pygame.draw.rect(win, (255, 0, 0), (self.x - 5, self.y + 40, 4, 4))
if self.rifler_buff:
pygame.draw.rect(win, (0, 0, 255), (self.x - 5, self.y + 45, 4, 4))
if self.heavy_buff:
pygame.draw.rect(win, (0, 255, 0), (self.x - 5, self.y + 50, 4, 4))
if self.engineer_buff:
pygame.draw.rect(win, (255, 255, 0), (self.x - 5, self.y + 55, 4, 4))
if self.alchemist_buff:
pygame.draw.rect(win, (255, 0, 255), (self.x - 5, self.y + 60, 4, 4))
def heal(self, target):
healed_person = (fighters[target - 1])
healed_person_2 = (fighters[random.randint(0, 3)])
healed_person.hp += 1
if self.name == 'alchemist':
healed_person.hp += 1
if self.captain_buff:
heal = [10]
for x in fighters:
try:
if x.hp < heal[0].hp:
heal[0] = x
except:
heal[0] = 10
healed_person_2.hp = heal[0]
if self.name == 'alchemist':
healed_person_2.hp += 1
self.captain_buff = False
return
def attack(self, target):
if self.idle_index == 3:
attacked_person = (fighters[target - 1])
attacked_person.hp -= 1
if attacked_person.rifler_buff:
self.hp -= 2
attacked_person.rifler_buff = False
if attacked_person.heavy_buff:
attacked_person.hp += 1
attacked_person.heavy_buff = False
elif self.idle_index != 3:
attacked_person = (fighters[target + 3])
attacked_person.hp -= 1
if self.engineer_buff:
attacked_person.hp -= 2
self.engineer_buff = False
if self.idle_index == 4 and self.rifler_buff:
attacked_person.hp -= 1
self.rifler_buff = False
if self.captain_buff:
attacked_person = (fighters[random.randint(0,3)])
attacked_person.hp -= 1
self.captain_buff = False
return
class bot(fighter):
def __init__(self, x, y, width, height, name, idle_index, firing_index):
fighter.__init__(self, x, y, width, height, name, idle_index, firing_index)
def __repr__(self):
return(self.name)
def buff(self, target):
return
class alchemist(fighter):
def __init__(self, x, y, width, height, name, idle_index, firing_index):
fighter.__init__(self, x, y, width, height, name, idle_index, firing_index)
def __repr__(self):
return(self.name)
def special(self, target):
fighters[target - 1].alchemist_buff = True
return
class heavy(fighter):
def __init__(self, x, y, width, height, name, idle_index, firing_index):
fighter.__init__(self, x, y, width, height, name, idle_index, firing_index)
def __repr__(self):
return(self.name)
def special(self, target):
fighters[target - 1].heavy_buff = True
return
class captain(fighter):
def __init__(self, x, y, width, height, name, idle_index, firing_index):
fighter.__init__(self, x, y, width, height, name, idle_index, firing_index)
def __repr__(self):
return(self.name)
def special(self, target):
fighters[target - 1].captain_buff = True
return
class engineer(fighter):
def __init__(self, x, y, width, height, name, idle_index, firing_index):
fighter.__init__(self, x, y, width, height, name, idle_index, firing_index)
def __repr__(self):
return(self.name)
def special(self, target):
fighters[target - 1].engineer_buff = True
return
class rifler(fighter):
def __init__(self, x, y, width, height, name, idle_index, firing_index):
fighter.__init__(self, x, y, width, height, name, idle_index, firing_index)
def __repr__(self):
return(self.name)
def special(self, target):
fighters[target - 1].rifler_buff = True
return
enemy_list = [bot(385, 25, 96, 96, 'Bot No. 1', 3, 3), bot(385, 130, 96, 96, 'Bot No. 2', 3, 3), \
bot(385, 240, 96, 96, 'Bot No. 3', 3, 3), bot(385, 340, 96, 96, 'Bot No. 4', 3, 3)]
def drawWindow():
global tick_count
win.blit(bg, (0,0))
if not setup:
for x in fighters:
if x.hp > 0 and (not x.delay):
x.draw(win)
if setup:
win.blit(fighter_pick, (130, 450))
if reinforcement_text:
text = font.render('reinforcement: Bot No. ' + str(bot_count), 1, (255,255,255))
win.blit(text, (125,15))
if turn_pick or target_pick:
turn_text = font.render(action_list[action_count], 1, (255,255,255))
win.blit(turn_text, (125,450))
hp_x = 10
hp_y = 45
enemy_x = 475
if len(fighters) >= 4:
hp_list = [fighters[0].hp, fighters[1].hp, fighters[2].hp, fighters[3].hp]
for k in hp_list:
text = font.render(str(k) , 1, (0,0,0))
win.blit(text, (hp_x, hp_y))
hp_y += 105
hp_y = 45
for k in fighters:
if k.idle_index == 3:
enemy_text = font.render(str(k.hp) , 1, (0,0,0))
win.blit(enemy_text, (enemy_x, hp_y))
hp_y += 105
pygame.display.update()
#Main Loop--------------------------------------------------
font = pygame.font.SysFont('comicsans', 30, True)
while run:
keys = pygame.key.get_pressed()
if setup:
invalid = False
if keys[pygame.K_c]:
fighter_list.append('c')
if keys[pygame.K_h]:
fighter_list.append('h')
if keys[pygame.K_e]:
fighter_list.append('e')
if keys[pygame.K_r]:
fighter_list.append('r')
if keys[pygame.K_a]:
fighter_list.append('a')
if len(fighter_list) == 4:
turn_pick_buffer = True
setup = False
for x in fighter_list:
if x == 'h':
new = heavy(x_lineup, y_lineup, 96, 96, 'heavy', 0, 0)
fighters.append(new)
if x == 'a':
new = alchemist(x_lineup, y_lineup, 96, 96, 'alchemist', 5, 5)
fighters.append(new)
if x == 'r':
new = rifler(x_lineup, y_lineup, 96, 96, 'rifler', 4, 4)
fighters.append(new)
if x == 'e':
new = engineer(x_lineup, y_lineup, 96, 96, 'engineer', 2, 2)
fighters.append(new)
if x == 'c':
new = captain(x_lineup, y_lineup, 96, 96, 'captain', 1, 1)
fighters.append(new)
y_lineup += 105
for x in enemy_list:
fighters.append(x)
x.hp = 8
if combat:
for x in fighters:
x.idle = False
x.firing = True
if turn_attack and x.idle_index == 2:
x.idle = True
x.firing = False
delay += 1
if delay == 10:
for k in fighters:
if k.idle_index == 3 and not k.delay:
choice = bot_choice[random.randint(0,2)]
if choice == 'd':
k.defend = True
if choice == 'a':
k.attack(random.randint (1,4))
if choice == 'b':
k.buff(random.randint(1,4))
#automate bot!
if turn_attack:
for x in fighters:
if x.idle_index != 3 and x.idle_index != 2:
x.attack(target_list[0])
if x.idle_index == 0:
x.attack(target_list[0])
elif x.idle_index == 2 and x.captain_buff:
x.attack(target_list[0])
if turn_heal:
for x in fighters:
if x.idle_index != 0 and x.idle_index != 3:
x.heal(target_list[0])
elif x.idle_index == 0:
x.heal(fighters.index(x) + 1)
if x.hp > 10:
x.hp = 10
for x in fighters:
if x.hp > 10:
x.hp = 10
if turn_special:
for k in fighters:
if k.idle_index != 3:
k.special(target_list[0])
if k.alchemist_buff:
valid = False
while not valid:
index = random.randint(0,3)
if index != target_list:
valid = True
k.special(index)
alchemist_buff = False
for k in fighters:
if k.delay:
k.delay = False
if k.hp <= 0:
if k.idle_index == 3:
bot_count += 1
new_bot = bot(k.x, k.y, 96, 96, ('Bot No: ' + str(bot_count)), 3, 3)
fighters[fighters.index(k)] = new_bot
new_bot.hp = 4 + bot_count
#new_bot.hp = 8 for not scaling
new_bot.tick_count = fighters[0].tick_count
reinforcement_text = True
new_bot.delay = True
else:
del(fighters[fighters.index(k)])
k.idle = True
k.firing = False
target_list = []
combat = False
turn_attack = False
turn_heal = False
turn_special = False
turn_pick = True
delay = 0
if target_pick:
action_count = 1
if keys[pygame.K_1]:
target_list.append(1)
if keys[pygame.K_2]:
target_list.append(2)
if keys[pygame.K_3]:
target_list.append(3)
if keys[pygame.K_4]:
target_list.append(4)
if len(target_list) == 1:
combat = True
target_pick = False
if turn_pick:
action_count = 0
if keys[pygame.K_a]:
turn_pick = False
turn_attack = True
target_pick = True
if keys[pygame.K_h]:
turn_pick = False
turn_heal = True
target_pick = True
if keys[pygame.K_s]:
turn_pick = False
turn_special = True
target_pick = True
if turn_pick_buffer:
turn_pick = True
turn_pick_buffer = False
tick_count += 1
clock.tick(4)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
main_menu = True
drawWindow()
pygame.quit()
| 34.519833
| 220
| 0.526822
|
6d37c28ba06a4c472baf21f58569c13385c5432b
| 7,220
|
py
|
Python
|
neutron/common/rpc.py
|
glove747/liberty-neutron
|
35a4c85e781d10da4521565c3a367e4ecb50739d
|
[
"Apache-2.0"
] | null | null | null |
neutron/common/rpc.py
|
glove747/liberty-neutron
|
35a4c85e781d10da4521565c3a367e4ecb50739d
|
[
"Apache-2.0"
] | null | null | null |
neutron/common/rpc.py
|
glove747/liberty-neutron
|
35a4c85e781d10da4521565c3a367e4ecb50739d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2012 OpenStack Foundation.
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_messaging import serializer as om_serializer
from oslo_service import service
from neutron.common import exceptions
from neutron import context
LOG = logging.getLogger(__name__)
TRANSPORT = None
NOTIFIER = None
ALLOWED_EXMODS = [
exceptions.__name__,
]
EXTRA_EXMODS = []
TRANSPORT_ALIASES = {
'neutron.openstack.common.rpc.impl_fake': 'fake',
'neutron.openstack.common.rpc.impl_qpid': 'qpid',
'neutron.openstack.common.rpc.impl_kombu': 'rabbit',
'neutron.openstack.common.rpc.impl_zmq': 'zmq',
'neutron.rpc.impl_fake': 'fake',
'neutron.rpc.impl_qpid': 'qpid',
'neutron.rpc.impl_kombu': 'rabbit',
'neutron.rpc.impl_zmq': 'zmq',
}
# NOTE(salv-orlando): I am afraid this is a global variable. While not ideal,
# they're however widely used throughout the code base. It should be set to
# true if the RPC server is not running in the current process space. This
# will prevent get_connection from creating connections to the AMQP server
RPC_DISABLED = False
def init(conf):
global TRANSPORT, NOTIFIER
exmods = get_allowed_exmods()
TRANSPORT = oslo_messaging.get_transport(conf,
allowed_remote_exmods=exmods,
aliases=TRANSPORT_ALIASES)
serializer = RequestContextSerializer()
NOTIFIER = oslo_messaging.Notifier(TRANSPORT, serializer=serializer)
def cleanup():
global TRANSPORT, NOTIFIER
assert TRANSPORT is not None
assert NOTIFIER is not None
TRANSPORT.cleanup()
TRANSPORT = NOTIFIER = None
def add_extra_exmods(*args):
EXTRA_EXMODS.extend(args)
def clear_extra_exmods():
del EXTRA_EXMODS[:]
def get_allowed_exmods():
return ALLOWED_EXMODS + EXTRA_EXMODS
def get_client(target, version_cap=None, serializer=None):
assert TRANSPORT is not None
serializer = RequestContextSerializer(serializer)
return oslo_messaging.RPCClient(TRANSPORT,
target,
version_cap=version_cap,
serializer=serializer)
def get_server(target, endpoints, serializer=None):
assert TRANSPORT is not None
serializer = RequestContextSerializer(serializer)
return oslo_messaging.get_rpc_server(TRANSPORT, target, endpoints,
'eventlet', serializer)
def get_notifier(service=None, host=None, publisher_id=None):
assert NOTIFIER is not None
if not publisher_id:
publisher_id = "%s.%s" % (service, host or cfg.CONF.host)
return NOTIFIER.prepare(publisher_id=publisher_id)
class RequestContextSerializer(om_serializer.Serializer):
"""This serializer is used to convert RPC common context into
Neutron Context.
"""
def __init__(self, base=None):
super(RequestContextSerializer, self).__init__()
self._base = base
def serialize_entity(self, ctxt, entity):
if not self._base:
return entity
return self._base.serialize_entity(ctxt, entity)
def deserialize_entity(self, ctxt, entity):
if not self._base:
return entity
return self._base.deserialize_entity(ctxt, entity)
def serialize_context(self, ctxt):
return ctxt.to_dict()
def deserialize_context(self, ctxt):
rpc_ctxt_dict = ctxt.copy()
user_id = rpc_ctxt_dict.pop('user_id', None)
if not user_id:
user_id = rpc_ctxt_dict.pop('user', None)
tenant_id = rpc_ctxt_dict.pop('tenant_id', None)
if not tenant_id:
tenant_id = rpc_ctxt_dict.pop('project_id', None)
return context.Context(user_id, tenant_id, **rpc_ctxt_dict)
class Service(service.Service):
"""Service object for binaries running on hosts.
A service enables rpc by listening to queues based on topic and host.
"""
def __init__(self, host, topic, manager=None, serializer=None):
super(Service, self).__init__()
self.host = host
self.topic = topic
self.serializer = serializer
if manager is None:
self.manager = self
else:
self.manager = manager
def start(self):
super(Service, self).start()
self.conn = create_connection(new=True)
LOG.debug("Creating Consumer connection for Service %s",
self.topic)
endpoints = [self.manager]
self.conn.create_consumer(self.topic, endpoints)
# Hook to allow the manager to do other initializations after
# the rpc connection is created.
if callable(getattr(self.manager, 'initialize_service_hook', None)):
self.manager.initialize_service_hook(self)
# Consume from all consumers in threads
self.conn.consume_in_threads()
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.conn.close()
except Exception:
pass
super(Service, self).stop()
class Connection(object):
def __init__(self):
super(Connection, self).__init__()
self.servers = []
def create_consumer(self, topic, endpoints, fanout=False):
target = oslo_messaging.Target(
topic=topic, server=cfg.CONF.host, fanout=fanout)
server = get_server(target, endpoints)
self.servers.append(server)
def consume_in_threads(self):
for server in self.servers:
server.start()
return self.servers
def close(self):
for server in self.servers:
server.stop()
for server in self.servers:
server.wait()
class VoidConnection(object):
def create_consumer(self, topic, endpoints, fanout=False):
pass
def consume_in_threads(self):
pass
def close(self):
pass
# functions
def create_connection(new=True):
# NOTE(salv-orlando): This is a clever interpretation of the factory design
# patter aimed at preventing plugins from initializing RPC servers upon
# initialization when they are running in the REST over HTTP API server.
# The educated reader will perfectly be able that this a fairly dirty hack
# to avoid having to change the initialization process of every plugin.
if RPC_DISABLED:
return VoidConnection()
return Connection()
| 31.12069
| 79
| 0.669114
|
4a87e4400116e42a0e3618d5d7323e645ed4e58a
| 772
|
py
|
Python
|
parser/fase2/team08/Tytus_SQLPARSER_G8/Instrucciones/FunctionMathematical/TrimScale.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | null | null | null |
parser/fase2/team08/Tytus_SQLPARSER_G8/Instrucciones/FunctionMathematical/TrimScale.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | null | null | null |
parser/fase2/team08/Tytus_SQLPARSER_G8/Instrucciones/FunctionMathematical/TrimScale.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 4
|
2020-12-19T17:12:13.000Z
|
2021-01-07T20:29:53.000Z
|
import math
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
class TrimScale(Instruccion):
def __init__(self, valor, tipo, strGram, linea, columna):
Instruccion.__init__(self,tipo,linea,columna, strGram)
self.valor = valor
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
arbol.consola.append('Función en proceso...')
def analizar(self, tabla, arbol):
pass
def traducir(self, tabla, arbol):
retorno = self.valor.traducir(tabla,arbol)
#print(retorno.temporalAnterior)
#print(type(self.valor))
#print(self.valor.opIzq.traducir(tabla,arbol).temporalAnterior)
return f"TRIM_SCALE({self.valor.traducir(tabla,arbol).temporalAnterior})"
| 35.090909
| 81
| 0.680052
|
8fd14ff13559c76f4be64565ce9189db84986baf
| 405
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/densitymapbox/_radiussrc.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
venv/Lib/site-packages/plotly/validators/densitymapbox/_radiussrc.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
venv/Lib/site-packages/plotly/validators/densitymapbox/_radiussrc.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class RadiussrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="radiussrc", parent_name="densitymapbox", **kwargs):
super(RadiussrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| 33.75
| 87
| 0.681481
|
ac82aaf7fcdf6ff7c3b51050d18d476facf37e10
| 660
|
py
|
Python
|
tests/helpers/settings.py
|
AdamSwenson/TwitterProject
|
8c5dc7a57eac611b555058736d609f2f204cb836
|
[
"MIT"
] | null | null | null |
tests/helpers/settings.py
|
AdamSwenson/TwitterProject
|
8c5dc7a57eac611b555058736d609f2f204cb836
|
[
"MIT"
] | 6
|
2020-03-24T17:34:24.000Z
|
2021-12-13T20:14:34.000Z
|
tests/helpers/settings.py
|
AdamSwenson/TwitterProject
|
8c5dc7a57eac611b555058736d609f2f204cb836
|
[
"MIT"
] | null | null | null |
"""
This defines global variables for testing.
Most importantly, this exposes settings.Session
which is the scoped session maker for anything
needing to use sqlalchemy's session for testing
Usage for factories
# Use the not-so-global scoped_session
# Warning: DO NOT USE common.Session()!
sqlalchemy_session = settings.Session
Created by adam on 6/27/18
"""
__author__ = 'adam'
from sqlalchemy import orm, create_engine
Session = orm.scoped_session(orm.sessionmaker())
# an Engine, which the Session will use for connection
# resources
engine = create_engine( 'sqlite://' )
# create a configured "Session" class
Session.configure(bind=engine )
| 23.571429
| 54
| 0.766667
|
e7bfaca2198763692d2f4fe6bb84fc7cdc55484d
| 10,217
|
py
|
Python
|
discord/ui/button.py
|
Vondyy/pycord
|
26e11e8a8f6ae95bf81d86261a2526c6f8e3432d
|
[
"MIT"
] | 4
|
2021-11-13T14:47:41.000Z
|
2021-12-20T23:55:10.000Z
|
discord/ui/button.py
|
pycord/pycord
|
3d098a82d2a1450297bd187eb4f4d06ba30478fe
|
[
"MIT"
] | null | null | null |
discord/ui/button.py
|
pycord/pycord
|
3d098a82d2a1450297bd187eb4f4d06ba30478fe
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Pycord Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import inspect
import os
from typing import TYPE_CHECKING, Callable, Optional, Tuple, Type, TypeVar, Union
from ..components import Button as ButtonComponent
from ..enums import ButtonStyle, ComponentType
from ..partial_emoji import PartialEmoji, _EmojiTag
from .item import Item, ItemCallbackType
__all__ = (
"Button",
"button",
)
if TYPE_CHECKING:
from ..emoji import Emoji
from .view import View
B = TypeVar("B", bound="Button")
V = TypeVar("V", bound="View", covariant=True)
class Button(Item[V]):
"""Represents a UI button.
.. versionadded:: 2.0
Parameters
------------
style: :class:`discord.ButtonStyle`
The style of the button.
custom_id: Optional[:class:`str`]
The ID of the button that gets received during an interaction.
If this button is for a URL, it does not have a custom ID.
url: Optional[:class:`str`]
The URL this button sends you to.
disabled: :class:`bool`
Whether the button is disabled or not.
label: Optional[:class:`str`]
The label of the button, if any.
emoji: Optional[Union[:class:`.PartialEmoji`, :class:`.Emoji`, :class:`str`]]
The emoji of the button, if available.
row: Optional[:class:`int`]
The relative row this button belongs to. A Discord component can only have 5
rows. By default, items are arranged automatically into those 5 rows. If you'd
like to control the relative positioning of the row then passing an index is advised.
For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic
ordering. The row number must be between 0 and 4 (i.e. zero indexed).
"""
__item_repr_attributes__: Tuple[str, ...] = (
"style",
"url",
"disabled",
"label",
"emoji",
"row",
)
def __init__(
self,
*,
style: ButtonStyle = ButtonStyle.secondary,
label: Optional[str] = None,
disabled: bool = False,
custom_id: Optional[str] = None,
url: Optional[str] = None,
emoji: Optional[Union[str, Emoji, PartialEmoji]] = None,
row: Optional[int] = None,
):
super().__init__()
if custom_id is not None and url is not None:
raise TypeError("cannot mix both url and custom_id with Button")
self._provided_custom_id = custom_id is not None
if url is None and custom_id is None:
custom_id = os.urandom(16).hex()
if url is not None:
style = ButtonStyle.link
if emoji is not None:
if isinstance(emoji, str):
emoji = PartialEmoji.from_str(emoji)
elif isinstance(emoji, _EmojiTag):
emoji = emoji._to_partial()
else:
raise TypeError(f"expected emoji to be str, Emoji, or PartialEmoji not {emoji.__class__}")
self._underlying = ButtonComponent._raw_construct(
type=ComponentType.button,
custom_id=custom_id,
url=url,
disabled=disabled,
label=label,
style=style,
emoji=emoji,
)
self.row = row
@property
def style(self) -> ButtonStyle:
""":class:`discord.ButtonStyle`: The style of the button."""
return self._underlying.style
@style.setter
def style(self, value: ButtonStyle):
self._underlying.style = value
@property
def custom_id(self) -> Optional[str]:
"""Optional[:class:`str`]: The ID of the button that gets received during an interaction.
If this button is for a URL, it does not have a custom ID.
"""
return self._underlying.custom_id
@custom_id.setter
def custom_id(self, value: Optional[str]):
if value is not None and not isinstance(value, str):
raise TypeError("custom_id must be None or str")
self._underlying.custom_id = value
@property
def url(self) -> Optional[str]:
"""Optional[:class:`str`]: The URL this button sends you to."""
return self._underlying.url
@url.setter
def url(self, value: Optional[str]):
if value is not None and not isinstance(value, str):
raise TypeError("url must be None or str")
self._underlying.url = value
@property
def disabled(self) -> bool:
""":class:`bool`: Whether the button is disabled or not."""
return self._underlying.disabled
@disabled.setter
def disabled(self, value: bool):
self._underlying.disabled = bool(value)
@property
def label(self) -> Optional[str]:
"""Optional[:class:`str`]: The label of the button, if available."""
return self._underlying.label
@label.setter
def label(self, value: Optional[str]):
self._underlying.label = str(value) if value is not None else value
@property
def emoji(self) -> Optional[PartialEmoji]:
"""Optional[:class:`.PartialEmoji`]: The emoji of the button, if available."""
return self._underlying.emoji
@emoji.setter
def emoji(self, value: Optional[Union[str, Emoji, PartialEmoji]]): # type: ignore
if value is None:
self._underlying.emoji = None
elif isinstance(value, str):
self._underlying.emoji = PartialEmoji.from_str(value)
elif isinstance(value, _EmojiTag):
self._underlying.emoji = value._to_partial()
else:
raise TypeError(f"expected str, Emoji, or PartialEmoji, received {value.__class__} instead")
@classmethod
def from_component(cls: Type[B], button: ButtonComponent) -> B:
return cls(
style=button.style,
label=button.label,
disabled=button.disabled,
custom_id=button.custom_id,
url=button.url,
emoji=button.emoji,
row=None,
)
@property
def type(self) -> ComponentType:
return self._underlying.type
def to_component_dict(self):
return self._underlying.to_dict()
def is_dispatchable(self) -> bool:
return self.custom_id is not None
def is_persistent(self) -> bool:
if self.style is ButtonStyle.link:
return self.url is not None
return super().is_persistent()
def refresh_component(self, button: ButtonComponent) -> None:
self._underlying = button
def button(
*,
label: Optional[str] = None,
custom_id: Optional[str] = None,
disabled: bool = False,
style: ButtonStyle = ButtonStyle.secondary,
emoji: Optional[Union[str, Emoji, PartialEmoji]] = None,
row: Optional[int] = None,
) -> Callable[[ItemCallbackType], ItemCallbackType]:
"""A decorator that attaches a button to a component.
The function being decorated should have three parameters, ``self`` representing
the :class:`discord.ui.View`, the :class:`discord.ui.Button` being pressed and
the :class:`discord.Interaction` you receive.
.. note::
Buttons with a URL cannot be created with this function.
Consider creating a :class:`Button` manually instead.
This is because buttons with a URL do not have a callback
associated with them since Discord does not do any processing
with it.
Parameters
------------
label: Optional[:class:`str`]
The label of the button, if any.
custom_id: Optional[:class:`str`]
The ID of the button that gets received during an interaction.
It is recommended not to set this parameter to prevent conflicts.
style: :class:`.ButtonStyle`
The style of the button. Defaults to :attr:`.ButtonStyle.grey`.
disabled: :class:`bool`
Whether the button is disabled or not. Defaults to ``False``.
emoji: Optional[Union[:class:`str`, :class:`.Emoji`, :class:`.PartialEmoji`]]
The emoji of the button. This can be in string form or a :class:`.PartialEmoji`
or a full :class:`.Emoji`.
row: Optional[:class:`int`]
The relative row this button belongs to. A Discord component can only have 5
rows. By default, items are arranged automatically into those 5 rows. If you'd
like to control the relative positioning of the row then passing an index is advised.
For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic
ordering. The row number must be between 0 and 4 (i.e. zero indexed).
"""
def decorator(func: ItemCallbackType) -> ItemCallbackType:
if not inspect.iscoroutinefunction(func):
raise TypeError("button function must be a coroutine function")
func.__discord_ui_model_type__ = Button
func.__discord_ui_model_kwargs__ = {
"style": style,
"custom_id": custom_id,
"url": None,
"disabled": disabled,
"label": label,
"emoji": emoji,
"row": row,
}
return func
return decorator
| 35.231034
| 106
| 0.646374
|
1eada36f90d10c6b95e2ef6899d7e1ee064d1fca
| 186
|
py
|
Python
|
tests/test_package.py
|
davidt0x/lca_onnx
|
11123c6d7e14daa6071222c6c5205ea49ce60946
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_package.py
|
davidt0x/lca_onnx
|
11123c6d7e14daa6071222c6c5205ea49ce60946
|
[
"BSD-3-Clause"
] | 2
|
2021-11-29T08:26:29.000Z
|
2021-12-15T08:24:29.000Z
|
tests/test_package.py
|
davidt0x/lca_onnx
|
11123c6d7e14daa6071222c6c5205ea49ce60946
|
[
"BSD-3-Clause"
] | 1
|
2021-10-13T15:47:04.000Z
|
2021-10-13T15:47:04.000Z
|
import lca_onnx as m
import lca_onnx._core as cpp
def test_pybind11():
assert cpp.add(1, 2) == 3
assert cpp.subtract(1, 2) == -1
def test_version():
assert m.__version__
| 15.5
| 35
| 0.672043
|
b5d41715f774c8ab3755c00764c48d85f4312c38
| 72,863
|
py
|
Python
|
sympy/physics/quantum/spin.py
|
sid21g/sympy
|
440d289ee1e8b54e5e3e5e7262e06651f5ec5878
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/physics/quantum/spin.py
|
sid21g/sympy
|
440d289ee1e8b54e5e3e5e7262e06651f5ec5878
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/physics/quantum/spin.py
|
sid21g/sympy
|
440d289ee1e8b54e5e3e5e7262e06651f5ec5878
|
[
"BSD-3-Clause"
] | 1
|
2020-02-06T17:54:20.000Z
|
2020-02-06T17:54:20.000Z
|
"""Quantum mechanical angular momemtum."""
from __future__ import print_function, division
from sympy import (Add, binomial, cos, exp, Expr, factorial, I, Integer, Mul,
pi, Rational, S, sin, simplify, sqrt, Sum, symbols, sympify,
Tuple, Dummy)
from sympy.core.compatibility import unicode, range
from sympy.matrices import zeros
from sympy.printing.pretty.stringpict import prettyForm, stringPict
from sympy.printing.pretty.pretty_symbology import pretty_symbol
from sympy.physics.quantum.qexpr import QExpr
from sympy.physics.quantum.operator import (HermitianOperator, Operator,
UnitaryOperator)
from sympy.physics.quantum.state import Bra, Ket, State
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.physics.quantum.constants import hbar
from sympy.physics.quantum.hilbert import ComplexSpace, DirectSumHilbertSpace
from sympy.physics.quantum.tensorproduct import TensorProduct
from sympy.physics.quantum.cg import CG
from sympy.physics.quantum.qapply import qapply
__all__ = [
'm_values',
'Jplus',
'Jminus',
'Jx',
'Jy',
'Jz',
'J2',
'Rotation',
'WignerD',
'JxKet',
'JxBra',
'JyKet',
'JyBra',
'JzKet',
'JzBra',
'JzOp',
'J2Op',
'JxKetCoupled',
'JxBraCoupled',
'JyKetCoupled',
'JyBraCoupled',
'JzKetCoupled',
'JzBraCoupled',
'couple',
'uncouple'
]
def m_values(j):
j = sympify(j)
size = 2*j + 1
if not size.is_Integer or not size > 0:
raise ValueError(
'Only integer or half-integer values allowed for j, got: : %r' % j
)
return size, [j - i for i in range(int(2*j + 1))]
#-----------------------------------------------------------------------------
# Spin Operators
#-----------------------------------------------------------------------------
class SpinOpBase(object):
"""Base class for spin operators."""
@classmethod
def _eval_hilbert_space(cls, label):
# We consider all j values so our space is infinite.
return ComplexSpace(S.Infinity)
@property
def name(self):
return self.args[0]
def _print_contents(self, printer, *args):
return '%s%s' % (unicode(self.name), self._coord)
def _print_contents_pretty(self, printer, *args):
a = stringPict(unicode(self.name))
b = stringPict(self._coord)
return self._print_subscript_pretty(a, b)
def _print_contents_latex(self, printer, *args):
return r'%s_%s' % ((unicode(self.name), self._coord))
def _represent_base(self, basis, **options):
j = options.get('j', S.Half)
size, mvals = m_values(j)
result = zeros(size, size)
for p in range(size):
for q in range(size):
me = self.matrix_element(j, mvals[p], j, mvals[q])
result[p, q] = me
return result
def _apply_op(self, ket, orig_basis, **options):
state = ket.rewrite(self.basis)
# If the state has only one term
if isinstance(state, State):
ret = (hbar*state.m)*state
# state is a linear combination of states
elif isinstance(state, Sum):
ret = self._apply_operator_Sum(state, **options)
else:
ret = qapply(self*state)
if ret == self*state:
raise NotImplementedError
return ret.rewrite(orig_basis)
def _apply_operator_JxKet(self, ket, **options):
return self._apply_op(ket, 'Jx', **options)
def _apply_operator_JxKetCoupled(self, ket, **options):
return self._apply_op(ket, 'Jx', **options)
def _apply_operator_JyKet(self, ket, **options):
return self._apply_op(ket, 'Jy', **options)
def _apply_operator_JyKetCoupled(self, ket, **options):
return self._apply_op(ket, 'Jy', **options)
def _apply_operator_JzKet(self, ket, **options):
return self._apply_op(ket, 'Jz', **options)
def _apply_operator_JzKetCoupled(self, ket, **options):
return self._apply_op(ket, 'Jz', **options)
def _apply_operator_TensorProduct(self, tp, **options):
# Uncoupling operator is only easily found for coordinate basis spin operators
# TODO: add methods for uncoupling operators
if not (isinstance(self, JxOp) or isinstance(self, JyOp) or isinstance(self, JzOp)):
raise NotImplementedError
result = []
for n in range(len(tp.args)):
arg = []
arg.extend(tp.args[:n])
arg.append(self._apply_operator(tp.args[n]))
arg.extend(tp.args[n + 1:])
result.append(tp.__class__(*arg))
return Add(*result).expand()
# TODO: move this to qapply_Mul
def _apply_operator_Sum(self, s, **options):
new_func = qapply(self*s.function)
if new_func == self*s.function:
raise NotImplementedError
return Sum(new_func, *s.limits)
def _eval_trace(self, **options):
#TODO: use options to use different j values
#For now eval at default basis
# is it efficient to represent each time
# to do a trace?
return self._represent_default_basis().trace()
class JplusOp(SpinOpBase, Operator):
"""The J+ operator."""
_coord = '+'
basis = 'Jz'
def _eval_commutator_JminusOp(self, other):
return 2*hbar*JzOp(self.name)
def _apply_operator_JzKet(self, ket, **options):
j = ket.j
m = ket.m
if m.is_Number and j.is_Number:
if m >= j:
return S.Zero
return hbar*sqrt(j*(j + S.One) - m*(m + S.One))*JzKet(j, m + S.One)
def _apply_operator_JzKetCoupled(self, ket, **options):
j = ket.j
m = ket.m
jn = ket.jn
coupling = ket.coupling
if m.is_Number and j.is_Number:
if m >= j:
return S.Zero
return hbar*sqrt(j*(j + S.One) - m*(m + S.One))*JzKetCoupled(j, m + S.One, jn, coupling)
def matrix_element(self, j, m, jp, mp):
result = hbar*sqrt(j*(j + S.One) - mp*(mp + S.One))
result *= KroneckerDelta(m, mp + 1)
result *= KroneckerDelta(j, jp)
return result
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JzOp(self, basis, **options):
return self._represent_base(basis, **options)
def _eval_rewrite_as_xyz(self, *args, **kwargs):
return JxOp(args[0]) + I*JyOp(args[0])
class JminusOp(SpinOpBase, Operator):
"""The J- operator."""
_coord = '-'
basis = 'Jz'
def _apply_operator_JzKet(self, ket, **options):
j = ket.j
m = ket.m
if m.is_Number and j.is_Number:
if m <= -j:
return S.Zero
return hbar*sqrt(j*(j + S.One) - m*(m - S.One))*JzKet(j, m - S.One)
def _apply_operator_JzKetCoupled(self, ket, **options):
j = ket.j
m = ket.m
jn = ket.jn
coupling = ket.coupling
if m.is_Number and j.is_Number:
if m <= -j:
return S.Zero
return hbar*sqrt(j*(j + S.One) - m*(m - S.One))*JzKetCoupled(j, m - S.One, jn, coupling)
def matrix_element(self, j, m, jp, mp):
result = hbar*sqrt(j*(j + S.One) - mp*(mp - S.One))
result *= KroneckerDelta(m, mp - 1)
result *= KroneckerDelta(j, jp)
return result
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JzOp(self, basis, **options):
return self._represent_base(basis, **options)
def _eval_rewrite_as_xyz(self, *args, **kwargs):
return JxOp(args[0]) - I*JyOp(args[0])
class JxOp(SpinOpBase, HermitianOperator):
"""The Jx operator."""
_coord = 'x'
basis = 'Jx'
def _eval_commutator_JyOp(self, other):
return I*hbar*JzOp(self.name)
def _eval_commutator_JzOp(self, other):
return -I*hbar*JyOp(self.name)
def _apply_operator_JzKet(self, ket, **options):
jp = JplusOp(self.name)._apply_operator_JzKet(ket, **options)
jm = JminusOp(self.name)._apply_operator_JzKet(ket, **options)
return (jp + jm)/Integer(2)
def _apply_operator_JzKetCoupled(self, ket, **options):
jp = JplusOp(self.name)._apply_operator_JzKetCoupled(ket, **options)
jm = JminusOp(self.name)._apply_operator_JzKetCoupled(ket, **options)
return (jp + jm)/Integer(2)
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JzOp(self, basis, **options):
jp = JplusOp(self.name)._represent_JzOp(basis, **options)
jm = JminusOp(self.name)._represent_JzOp(basis, **options)
return (jp + jm)/Integer(2)
def _eval_rewrite_as_plusminus(self, *args, **kwargs):
return (JplusOp(args[0]) + JminusOp(args[0]))/2
class JyOp(SpinOpBase, HermitianOperator):
"""The Jy operator."""
_coord = 'y'
basis = 'Jy'
def _eval_commutator_JzOp(self, other):
return I*hbar*JxOp(self.name)
def _eval_commutator_JxOp(self, other):
return -I*hbar*J2Op(self.name)
def _apply_operator_JzKet(self, ket, **options):
jp = JplusOp(self.name)._apply_operator_JzKet(ket, **options)
jm = JminusOp(self.name)._apply_operator_JzKet(ket, **options)
return (jp - jm)/(Integer(2)*I)
def _apply_operator_JzKetCoupled(self, ket, **options):
jp = JplusOp(self.name)._apply_operator_JzKetCoupled(ket, **options)
jm = JminusOp(self.name)._apply_operator_JzKetCoupled(ket, **options)
return (jp - jm)/(Integer(2)*I)
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JzOp(self, basis, **options):
jp = JplusOp(self.name)._represent_JzOp(basis, **options)
jm = JminusOp(self.name)._represent_JzOp(basis, **options)
return (jp - jm)/(Integer(2)*I)
def _eval_rewrite_as_plusminus(self, *args, **kwargs):
return (JplusOp(args[0]) - JminusOp(args[0]))/(2*I)
class JzOp(SpinOpBase, HermitianOperator):
"""The Jz operator."""
_coord = 'z'
basis = 'Jz'
def _eval_commutator_JxOp(self, other):
return I*hbar*JyOp(self.name)
def _eval_commutator_JyOp(self, other):
return -I*hbar*JxOp(self.name)
def _eval_commutator_JplusOp(self, other):
return hbar*JplusOp(self.name)
def _eval_commutator_JminusOp(self, other):
return -hbar*JminusOp(self.name)
def matrix_element(self, j, m, jp, mp):
result = hbar*mp
result *= KroneckerDelta(m, mp)
result *= KroneckerDelta(j, jp)
return result
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JzOp(self, basis, **options):
return self._represent_base(basis, **options)
class J2Op(SpinOpBase, HermitianOperator):
"""The J^2 operator."""
_coord = '2'
def _eval_commutator_JxOp(self, other):
return S.Zero
def _eval_commutator_JyOp(self, other):
return S.Zero
def _eval_commutator_JzOp(self, other):
return S.Zero
def _eval_commutator_JplusOp(self, other):
return S.Zero
def _eval_commutator_JminusOp(self, other):
return S.Zero
def _apply_operator_JxKet(self, ket, **options):
j = ket.j
return hbar**2*j*(j + 1)*ket
def _apply_operator_JxKetCoupled(self, ket, **options):
j = ket.j
return hbar**2*j*(j + 1)*ket
def _apply_operator_JyKet(self, ket, **options):
j = ket.j
return hbar**2*j*(j + 1)*ket
def _apply_operator_JyKetCoupled(self, ket, **options):
j = ket.j
return hbar**2*j*(j + 1)*ket
def _apply_operator_JzKet(self, ket, **options):
j = ket.j
return hbar**2*j*(j + 1)*ket
def _apply_operator_JzKetCoupled(self, ket, **options):
j = ket.j
return hbar**2*j*(j + 1)*ket
def matrix_element(self, j, m, jp, mp):
result = (hbar**2)*j*(j + 1)
result *= KroneckerDelta(m, mp)
result *= KroneckerDelta(j, jp)
return result
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JzOp(self, basis, **options):
return self._represent_base(basis, **options)
def _print_contents_pretty(self, printer, *args):
a = prettyForm(unicode(self.name))
b = prettyForm(u'2')
return a**b
def _print_contents_latex(self, printer, *args):
return r'%s^2' % str(self.name)
def _eval_rewrite_as_xyz(self, *args, **kwargs):
return JxOp(args[0])**2 + JyOp(args[0])**2 + JzOp(args[0])**2
def _eval_rewrite_as_plusminus(self, *args, **kwargs):
a = args[0]
return JzOp(a)**2 + \
S.Half*(JplusOp(a)*JminusOp(a) + JminusOp(a)*JplusOp(a))
class Rotation(UnitaryOperator):
"""Wigner D operator in terms of Euler angles.
Defines the rotation operator in terms of the Euler angles defined by
the z-y-z convention for a passive transformation. That is the coordinate
axes are rotated first about the z-axis, giving the new x'-y'-z' axes. Then
this new coordinate system is rotated about the new y'-axis, giving new
x''-y''-z'' axes. Then this new coordinate system is rotated about the
z''-axis. Conventions follow those laid out in [1]_.
Parameters
==========
alpha : Number, Symbol
First Euler Angle
beta : Number, Symbol
Second Euler angle
gamma : Number, Symbol
Third Euler angle
Examples
========
A simple example rotation operator:
>>> from sympy import pi
>>> from sympy.physics.quantum.spin import Rotation
>>> Rotation(pi, 0, pi/2)
R(pi,0,pi/2)
With symbolic Euler angles and calculating the inverse rotation operator:
>>> from sympy import symbols
>>> a, b, c = symbols('a b c')
>>> Rotation(a, b, c)
R(a,b,c)
>>> Rotation(a, b, c).inverse()
R(-c,-b,-a)
See Also
========
WignerD: Symbolic Wigner-D function
D: Wigner-D function
d: Wigner small-d function
References
==========
.. [1] Varshalovich, D A, Quantum Theory of Angular Momentum. 1988.
"""
@classmethod
def _eval_args(cls, args):
args = QExpr._eval_args(args)
if len(args) != 3:
raise ValueError('3 Euler angles required, got: %r' % args)
return args
@classmethod
def _eval_hilbert_space(cls, label):
# We consider all j values so our space is infinite.
return ComplexSpace(S.Infinity)
@property
def alpha(self):
return self.label[0]
@property
def beta(self):
return self.label[1]
@property
def gamma(self):
return self.label[2]
def _print_operator_name(self, printer, *args):
return 'R'
def _print_operator_name_pretty(self, printer, *args):
if printer._use_unicode:
return prettyForm(u'\N{SCRIPT CAPITAL R}' + u' ')
else:
return prettyForm("R ")
def _print_operator_name_latex(self, printer, *args):
return r'\mathcal{R}'
def _eval_inverse(self):
return Rotation(-self.gamma, -self.beta, -self.alpha)
@classmethod
def D(cls, j, m, mp, alpha, beta, gamma):
"""Wigner D-function.
Returns an instance of the WignerD class corresponding to the Wigner-D
function specified by the parameters.
Parameters
===========
j : Number
Total angular momentum
m : Number
Eigenvalue of angular momentum along axis after rotation
mp : Number
Eigenvalue of angular momentum along rotated axis
alpha : Number, Symbol
First Euler angle of rotation
beta : Number, Symbol
Second Euler angle of rotation
gamma : Number, Symbol
Third Euler angle of rotation
Examples
========
Return the Wigner-D matrix element for a defined rotation, both
numerical and symbolic:
>>> from sympy.physics.quantum.spin import Rotation
>>> from sympy import pi, symbols
>>> alpha, beta, gamma = symbols('alpha beta gamma')
>>> Rotation.D(1, 1, 0,pi, pi/2,-pi)
WignerD(1, 1, 0, pi, pi/2, -pi)
See Also
========
WignerD: Symbolic Wigner-D function
"""
return WignerD(j, m, mp, alpha, beta, gamma)
@classmethod
def d(cls, j, m, mp, beta):
"""Wigner small-d function.
Returns an instance of the WignerD class corresponding to the Wigner-D
function specified by the parameters with the alpha and gamma angles
given as 0.
Parameters
===========
j : Number
Total angular momentum
m : Number
Eigenvalue of angular momentum along axis after rotation
mp : Number
Eigenvalue of angular momentum along rotated axis
beta : Number, Symbol
Second Euler angle of rotation
Examples
========
Return the Wigner-D matrix element for a defined rotation, both
numerical and symbolic:
>>> from sympy.physics.quantum.spin import Rotation
>>> from sympy import pi, symbols
>>> beta = symbols('beta')
>>> Rotation.d(1, 1, 0, pi/2)
WignerD(1, 1, 0, 0, pi/2, 0)
See Also
========
WignerD: Symbolic Wigner-D function
"""
return WignerD(j, m, mp, 0, beta, 0)
def matrix_element(self, j, m, jp, mp):
result = self.__class__.D(
jp, m, mp, self.alpha, self.beta, self.gamma
)
result *= KroneckerDelta(j, jp)
return result
def _represent_base(self, basis, **options):
j = sympify(options.get('j', S.Half))
# TODO: move evaluation up to represent function/implement elsewhere
evaluate = sympify(options.get('doit'))
size, mvals = m_values(j)
result = zeros(size, size)
for p in range(size):
for q in range(size):
me = self.matrix_element(j, mvals[p], j, mvals[q])
if evaluate:
result[p, q] = me.doit()
else:
result[p, q] = me
return result
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JzOp(self, basis, **options):
return self._represent_base(basis, **options)
def _apply_operator_uncoupled(self, state, ket, **options):
a = self.alpha
b = self.beta
g = self.gamma
j = ket.j
m = ket.m
if j.is_number:
s = []
size = m_values(j)
sz = size[1]
for mp in sz:
r = Rotation.D(j, m, mp, a, b, g)
z = r.doit()
s.append(z*state(j, mp))
return Add(*s)
else:
if options.pop('dummy', True):
mp = Dummy('mp')
else:
mp = symbols('mp')
return Sum(Rotation.D(j, m, mp, a, b, g)*state(j, mp), (mp, -j, j))
def _apply_operator_JxKet(self, ket, **options):
return self._apply_operator_uncoupled(JxKet, ket, **options)
def _apply_operator_JyKet(self, ket, **options):
return self._apply_operator_uncoupled(JyKet, ket, **options)
def _apply_operator_JzKet(self, ket, **options):
return self._apply_operator_uncoupled(JzKet, ket, **options)
def _apply_operator_coupled(self, state, ket, **options):
a = self.alpha
b = self.beta
g = self.gamma
j = ket.j
m = ket.m
jn = ket.jn
coupling = ket.coupling
if j.is_number:
s = []
size = m_values(j)
sz = size[1]
for mp in sz:
r = Rotation.D(j, m, mp, a, b, g)
z = r.doit()
s.append(z*state(j, mp, jn, coupling))
return Add(*s)
else:
if options.pop('dummy', True):
mp = Dummy('mp')
else:
mp = symbols('mp')
return Sum(Rotation.D(j, m, mp, a, b, g)*state(
j, mp, jn, coupling), (mp, -j, j))
def _apply_operator_JxKetCoupled(self, ket, **options):
return self._apply_operator_coupled(JxKetCoupled, ket, **options)
def _apply_operator_JyKetCoupled(self, ket, **options):
return self._apply_operator_coupled(JyKetCoupled, ket, **options)
def _apply_operator_JzKetCoupled(self, ket, **options):
return self._apply_operator_coupled(JzKetCoupled, ket, **options)
class WignerD(Expr):
r"""Wigner-D function
The Wigner D-function gives the matrix elements of the rotation
operator in the jm-representation. For the Euler angles `\alpha`,
`\beta`, `\gamma`, the D-function is defined such that:
.. math ::
<j,m| \mathcal{R}(\alpha, \beta, \gamma ) |j',m'> = \delta_{jj'} D(j, m, m', \alpha, \beta, \gamma)
Where the rotation operator is as defined by the Rotation class [1]_.
The Wigner D-function defined in this way gives:
.. math ::
D(j, m, m', \alpha, \beta, \gamma) = e^{-i m \alpha} d(j, m, m', \beta) e^{-i m' \gamma}
Where d is the Wigner small-d function, which is given by Rotation.d.
The Wigner small-d function gives the component of the Wigner
D-function that is determined by the second Euler angle. That is the
Wigner D-function is:
.. math ::
D(j, m, m', \alpha, \beta, \gamma) = e^{-i m \alpha} d(j, m, m', \beta) e^{-i m' \gamma}
Where d is the small-d function. The Wigner D-function is given by
Rotation.D.
Note that to evaluate the D-function, the j, m and mp parameters must
be integer or half integer numbers.
Parameters
==========
j : Number
Total angular momentum
m : Number
Eigenvalue of angular momentum along axis after rotation
mp : Number
Eigenvalue of angular momentum along rotated axis
alpha : Number, Symbol
First Euler angle of rotation
beta : Number, Symbol
Second Euler angle of rotation
gamma : Number, Symbol
Third Euler angle of rotation
Examples
========
Evaluate the Wigner-D matrix elements of a simple rotation:
>>> from sympy.physics.quantum.spin import Rotation
>>> from sympy import pi
>>> rot = Rotation.D(1, 1, 0, pi, pi/2, 0)
>>> rot
WignerD(1, 1, 0, pi, pi/2, 0)
>>> rot.doit()
sqrt(2)/2
Evaluate the Wigner-d matrix elements of a simple rotation
>>> rot = Rotation.d(1, 1, 0, pi/2)
>>> rot
WignerD(1, 1, 0, 0, pi/2, 0)
>>> rot.doit()
-sqrt(2)/2
See Also
========
Rotation: Rotation operator
References
==========
.. [1] Varshalovich, D A, Quantum Theory of Angular Momentum. 1988.
"""
is_commutative = True
def __new__(cls, *args, **hints):
if not len(args) == 6:
raise ValueError('6 parameters expected, got %s' % args)
args = sympify(args)
evaluate = hints.get('evaluate', False)
if evaluate:
return Expr.__new__(cls, *args)._eval_wignerd()
return Expr.__new__(cls, *args)
@property
def j(self):
return self.args[0]
@property
def m(self):
return self.args[1]
@property
def mp(self):
return self.args[2]
@property
def alpha(self):
return self.args[3]
@property
def beta(self):
return self.args[4]
@property
def gamma(self):
return self.args[5]
def _latex(self, printer, *args):
if self.alpha == 0 and self.gamma == 0:
return r'd^{%s}_{%s,%s}\left(%s\right)' % \
(
printer._print(self.j), printer._print(
self.m), printer._print(self.mp),
printer._print(self.beta) )
return r'D^{%s}_{%s,%s}\left(%s,%s,%s\right)' % \
(
printer._print(
self.j), printer._print(self.m), printer._print(self.mp),
printer._print(self.alpha), printer._print(self.beta), printer._print(self.gamma) )
def _pretty(self, printer, *args):
top = printer._print(self.j)
bot = printer._print(self.m)
bot = prettyForm(*bot.right(','))
bot = prettyForm(*bot.right(printer._print(self.mp)))
pad = max(top.width(), bot.width())
top = prettyForm(*top.left(' '))
bot = prettyForm(*bot.left(' '))
if pad > top.width():
top = prettyForm(*top.right(' '*(pad - top.width())))
if pad > bot.width():
bot = prettyForm(*bot.right(' '*(pad - bot.width())))
if self.alpha == 0 and self.gamma == 0:
args = printer._print(self.beta)
s = stringPict('d' + ' '*pad)
else:
args = printer._print(self.alpha)
args = prettyForm(*args.right(','))
args = prettyForm(*args.right(printer._print(self.beta)))
args = prettyForm(*args.right(','))
args = prettyForm(*args.right(printer._print(self.gamma)))
s = stringPict('D' + ' '*pad)
args = prettyForm(*args.parens())
s = prettyForm(*s.above(top))
s = prettyForm(*s.below(bot))
s = prettyForm(*s.right(args))
return s
def doit(self, **hints):
hints['evaluate'] = True
return WignerD(*self.args, **hints)
def _eval_wignerd(self):
j = sympify(self.j)
m = sympify(self.m)
mp = sympify(self.mp)
alpha = sympify(self.alpha)
beta = sympify(self.beta)
gamma = sympify(self.gamma)
if not j.is_number:
raise ValueError(
'j parameter must be numerical to evaluate, got %s' % j)
r = 0
if beta == pi/2:
# Varshalovich Equation (5), Section 4.16, page 113, setting
# alpha=gamma=0.
for k in range(2*j + 1):
if k > j + mp or k > j - m or k < mp - m:
continue
r += (S.NegativeOne)**k*binomial(j + mp, k)*binomial(j - mp, k + m - mp)
r *= (S.NegativeOne)**(m - mp) / 2**j*sqrt(factorial(j + m) *
factorial(j - m) / (factorial(j + mp)*factorial(j - mp)))
else:
# Varshalovich Equation(5), Section 4.7.2, page 87, where we set
# beta1=beta2=pi/2, and we get alpha=gamma=pi/2 and beta=phi+pi,
# then we use the Eq. (1), Section 4.4. page 79, to simplify:
# d(j, m, mp, beta+pi) = (-1)**(j-mp)*d(j, m, -mp, beta)
# This happens to be almost the same as in Eq.(10), Section 4.16,
# except that we need to substitute -mp for mp.
size, mvals = m_values(j)
for mpp in mvals:
r += Rotation.d(j, m, mpp, pi/2).doit()*(cos(-mpp*beta) + I*sin(-mpp*beta))*\
Rotation.d(j, mpp, -mp, pi/2).doit()
# Empirical normalization factor so results match Varshalovich
# Tables 4.3-4.12
# Note that this exact normalization does not follow from the
# above equations
r = r*I**(2*j - m - mp)*(-1)**(2*m)
# Finally, simplify the whole expression
r = simplify(r)
r *= exp(-I*m*alpha)*exp(-I*mp*gamma)
return r
Jx = JxOp('J')
Jy = JyOp('J')
Jz = JzOp('J')
J2 = J2Op('J')
Jplus = JplusOp('J')
Jminus = JminusOp('J')
#-----------------------------------------------------------------------------
# Spin States
#-----------------------------------------------------------------------------
class SpinState(State):
"""Base class for angular momentum states."""
_label_separator = ','
def __new__(cls, j, m):
j = sympify(j)
m = sympify(m)
if j.is_number:
if 2*j != int(2*j):
raise ValueError(
'j must be integer or half-integer, got: %s' % j)
if j < 0:
raise ValueError('j must be >= 0, got: %s' % j)
if m.is_number:
if 2*m != int(2*m):
raise ValueError(
'm must be integer or half-integer, got: %s' % m)
if j.is_number and m.is_number:
if abs(m) > j:
raise ValueError('Allowed values for m are -j <= m <= j, got j, m: %s, %s' % (j, m))
if int(j - m) != j - m:
raise ValueError('Both j and m must be integer or half-integer, got j, m: %s, %s' % (j, m))
return State.__new__(cls, j, m)
@property
def j(self):
return self.label[0]
@property
def m(self):
return self.label[1]
@classmethod
def _eval_hilbert_space(cls, label):
return ComplexSpace(2*label[0] + 1)
def _represent_base(self, **options):
j = self.j
m = self.m
alpha = sympify(options.get('alpha', 0))
beta = sympify(options.get('beta', 0))
gamma = sympify(options.get('gamma', 0))
size, mvals = m_values(j)
result = zeros(size, 1)
# TODO: Use KroneckerDelta if all Euler angles == 0
# breaks finding angles on L930
for p, mval in enumerate(mvals):
if m.is_number:
result[p, 0] = Rotation.D(
self.j, mval, self.m, alpha, beta, gamma).doit()
else:
result[p, 0] = Rotation.D(self.j, mval,
self.m, alpha, beta, gamma)
return result
def _eval_rewrite_as_Jx(self, *args, **options):
if isinstance(self, Bra):
return self._rewrite_basis(Jx, JxBra, **options)
return self._rewrite_basis(Jx, JxKet, **options)
def _eval_rewrite_as_Jy(self, *args, **options):
if isinstance(self, Bra):
return self._rewrite_basis(Jy, JyBra, **options)
return self._rewrite_basis(Jy, JyKet, **options)
def _eval_rewrite_as_Jz(self, *args, **options):
if isinstance(self, Bra):
return self._rewrite_basis(Jz, JzBra, **options)
return self._rewrite_basis(Jz, JzKet, **options)
def _rewrite_basis(self, basis, evect, **options):
from sympy.physics.quantum.represent import represent
j = self.j
args = self.args[2:]
if j.is_number:
if isinstance(self, CoupledSpinState):
if j == int(j):
start = j**2
else:
start = (2*j - 1)*(2*j + 1)/4
else:
start = 0
vect = represent(self, basis=basis, **options)
result = Add(
*[vect[start + i]*evect(j, j - i, *args) for i in range(2*j + 1)])
if isinstance(self, CoupledSpinState) and options.get('coupled') is False:
return uncouple(result)
return result
else:
i = 0
mi = symbols('mi')
# make sure not to introduce a symbol already in the state
while self.subs(mi, 0) != self:
i += 1
mi = symbols('mi%d' % i)
break
# TODO: better way to get angles of rotation
if isinstance(self, CoupledSpinState):
test_args = (0, mi, (0, 0))
else:
test_args = (0, mi)
if isinstance(self, Ket):
angles = represent(
self.__class__(*test_args), basis=basis)[0].args[3:6]
else:
angles = represent(self.__class__(
*test_args), basis=basis)[0].args[0].args[3:6]
if angles == (0, 0, 0):
return self
else:
state = evect(j, mi, *args)
lt = Rotation.D(j, mi, self.m, *angles)
return Sum(lt*state, (mi, -j, j))
def _eval_innerproduct_JxBra(self, bra, **hints):
result = KroneckerDelta(self.j, bra.j)
if bra.dual_class() is not self.__class__:
result *= self._represent_JxOp(None)[bra.j - bra.m]
else:
result *= KroneckerDelta(
self.j, bra.j)*KroneckerDelta(self.m, bra.m)
return result
def _eval_innerproduct_JyBra(self, bra, **hints):
result = KroneckerDelta(self.j, bra.j)
if bra.dual_class() is not self.__class__:
result *= self._represent_JyOp(None)[bra.j - bra.m]
else:
result *= KroneckerDelta(
self.j, bra.j)*KroneckerDelta(self.m, bra.m)
return result
def _eval_innerproduct_JzBra(self, bra, **hints):
result = KroneckerDelta(self.j, bra.j)
if bra.dual_class() is not self.__class__:
result *= self._represent_JzOp(None)[bra.j - bra.m]
else:
result *= KroneckerDelta(
self.j, bra.j)*KroneckerDelta(self.m, bra.m)
return result
def _eval_trace(self, bra, **hints):
# One way to implement this method is to assume the basis set k is
# passed.
# Then we can apply the discrete form of Trace formula here
# Tr(|i><j| ) = \Sum_k <k|i><j|k>
#then we do qapply() on each each inner product and sum over them.
# OR
# Inner product of |i><j| = Trace(Outer Product).
# we could just use this unless there are cases when this is not true
return (bra*self).doit()
class JxKet(SpinState, Ket):
"""Eigenket of Jx.
See JzKet for the usage of spin eigenstates.
See Also
========
JzKet: Usage of spin states
"""
@classmethod
def dual_class(self):
return JxBra
@classmethod
def coupled_class(self):
return JxKetCoupled
def _represent_default_basis(self, **options):
return self._represent_JxOp(None, **options)
def _represent_JxOp(self, basis, **options):
return self._represent_base(**options)
def _represent_JyOp(self, basis, **options):
return self._represent_base(alpha=pi*Rational(3, 2), **options)
def _represent_JzOp(self, basis, **options):
return self._represent_base(beta=pi/2, **options)
class JxBra(SpinState, Bra):
"""Eigenbra of Jx.
See JzKet for the usage of spin eigenstates.
See Also
========
JzKet: Usage of spin states
"""
@classmethod
def dual_class(self):
return JxKet
@classmethod
def coupled_class(self):
return JxBraCoupled
class JyKet(SpinState, Ket):
"""Eigenket of Jy.
See JzKet for the usage of spin eigenstates.
See Also
========
JzKet: Usage of spin states
"""
@classmethod
def dual_class(self):
return JyBra
@classmethod
def coupled_class(self):
return JyKetCoupled
def _represent_default_basis(self, **options):
return self._represent_JyOp(None, **options)
def _represent_JxOp(self, basis, **options):
return self._represent_base(gamma=pi/2, **options)
def _represent_JyOp(self, basis, **options):
return self._represent_base(**options)
def _represent_JzOp(self, basis, **options):
return self._represent_base(alpha=pi*Rational(3, 2), beta=-pi/2, gamma=pi/2, **options)
class JyBra(SpinState, Bra):
"""Eigenbra of Jy.
See JzKet for the usage of spin eigenstates.
See Also
========
JzKet: Usage of spin states
"""
@classmethod
def dual_class(self):
return JyKet
@classmethod
def coupled_class(self):
return JyBraCoupled
class JzKet(SpinState, Ket):
"""Eigenket of Jz.
Spin state which is an eigenstate of the Jz operator. Uncoupled states,
that is states representing the interaction of multiple separate spin
states, are defined as a tensor product of states.
Parameters
==========
j : Number, Symbol
Total spin angular momentum
m : Number, Symbol
Eigenvalue of the Jz spin operator
Examples
========
*Normal States:*
Defining simple spin states, both numerical and symbolic:
>>> from sympy.physics.quantum.spin import JzKet, JxKet
>>> from sympy import symbols
>>> JzKet(1, 0)
|1,0>
>>> j, m = symbols('j m')
>>> JzKet(j, m)
|j,m>
Rewriting the JzKet in terms of eigenkets of the Jx operator:
Note: that the resulting eigenstates are JxKet's
>>> JzKet(1,1).rewrite("Jx")
|1,-1>/2 - sqrt(2)*|1,0>/2 + |1,1>/2
Get the vector representation of a state in terms of the basis elements
of the Jx operator:
>>> from sympy.physics.quantum.represent import represent
>>> from sympy.physics.quantum.spin import Jx, Jz
>>> represent(JzKet(1,-1), basis=Jx)
Matrix([
[ 1/2],
[sqrt(2)/2],
[ 1/2]])
Apply innerproducts between states:
>>> from sympy.physics.quantum.innerproduct import InnerProduct
>>> from sympy.physics.quantum.spin import JxBra
>>> i = InnerProduct(JxBra(1,1), JzKet(1,1))
>>> i
<1,1|1,1>
>>> i.doit()
1/2
*Uncoupled States:*
Define an uncoupled state as a TensorProduct between two Jz eigenkets:
>>> from sympy.physics.quantum.tensorproduct import TensorProduct
>>> j1,m1,j2,m2 = symbols('j1 m1 j2 m2')
>>> TensorProduct(JzKet(1,0), JzKet(1,1))
|1,0>x|1,1>
>>> TensorProduct(JzKet(j1,m1), JzKet(j2,m2))
|j1,m1>x|j2,m2>
A TensorProduct can be rewritten, in which case the eigenstates that make
up the tensor product is rewritten to the new basis:
>>> TensorProduct(JzKet(1,1),JxKet(1,1)).rewrite('Jz')
|1,1>x|1,-1>/2 + sqrt(2)*|1,1>x|1,0>/2 + |1,1>x|1,1>/2
The represent method for TensorProduct's gives the vector representation of
the state. Note that the state in the product basis is the equivalent of the
tensor product of the vector representation of the component eigenstates:
>>> represent(TensorProduct(JzKet(1,0),JzKet(1,1)))
Matrix([
[0],
[0],
[0],
[1],
[0],
[0],
[0],
[0],
[0]])
>>> represent(TensorProduct(JzKet(1,1),JxKet(1,1)), basis=Jz)
Matrix([
[ 1/2],
[sqrt(2)/2],
[ 1/2],
[ 0],
[ 0],
[ 0],
[ 0],
[ 0],
[ 0]])
See Also
========
JzKetCoupled: Coupled eigenstates
sympy.physics.quantum.tensorproduct.TensorProduct: Used to specify uncoupled states
uncouple: Uncouples states given coupling parameters
couple: Couples uncoupled states
"""
@classmethod
def dual_class(self):
return JzBra
@classmethod
def coupled_class(self):
return JzKetCoupled
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JxOp(self, basis, **options):
return self._represent_base(beta=pi*Rational(3, 2), **options)
def _represent_JyOp(self, basis, **options):
return self._represent_base(alpha=pi*Rational(3, 2), beta=pi/2, gamma=pi/2, **options)
def _represent_JzOp(self, basis, **options):
return self._represent_base(**options)
class JzBra(SpinState, Bra):
"""Eigenbra of Jz.
See the JzKet for the usage of spin eigenstates.
See Also
========
JzKet: Usage of spin states
"""
@classmethod
def dual_class(self):
return JzKet
@classmethod
def coupled_class(self):
return JzBraCoupled
# Method used primarily to create coupled_n and coupled_jn by __new__ in
# CoupledSpinState
# This same method is also used by the uncouple method, and is separated from
# the CoupledSpinState class to maintain consistency in defining coupling
def _build_coupled(jcoupling, length):
n_list = [ [n + 1] for n in range(length) ]
coupled_jn = []
coupled_n = []
for n1, n2, j_new in jcoupling:
coupled_jn.append(j_new)
coupled_n.append( (n_list[n1 - 1], n_list[n2 - 1]) )
n_sort = sorted(n_list[n1 - 1] + n_list[n2 - 1])
n_list[n_sort[0] - 1] = n_sort
return coupled_n, coupled_jn
class CoupledSpinState(SpinState):
"""Base class for coupled angular momentum states."""
def __new__(cls, j, m, jn, *jcoupling):
# Check j and m values using SpinState
SpinState(j, m)
# Build and check coupling scheme from arguments
if len(jcoupling) == 0:
# Use default coupling scheme
jcoupling = []
for n in range(2, len(jn)):
jcoupling.append( (1, n, Add(*[jn[i] for i in range(n)])) )
jcoupling.append( (1, len(jn), j) )
elif len(jcoupling) == 1:
# Use specified coupling scheme
jcoupling = jcoupling[0]
else:
raise TypeError("CoupledSpinState only takes 3 or 4 arguments, got: %s" % (len(jcoupling) + 3) )
# Check arguments have correct form
if not (isinstance(jn, list) or isinstance(jn, tuple) or isinstance(jn, Tuple)):
raise TypeError('jn must be Tuple, list or tuple, got %s' %
jn.__class__.__name__)
if not (isinstance(jcoupling, list) or isinstance(jcoupling, tuple) or isinstance(jcoupling, Tuple)):
raise TypeError('jcoupling must be Tuple, list or tuple, got %s' %
jcoupling.__class__.__name__)
if not all(isinstance(term, list) or isinstance(term, tuple) or isinstance(term, Tuple) for term in jcoupling):
raise TypeError(
'All elements of jcoupling must be list, tuple or Tuple')
if not len(jn) - 1 == len(jcoupling):
raise ValueError('jcoupling must have length of %d, got %d' %
(len(jn) - 1, len(jcoupling)))
if not all(len(x) == 3 for x in jcoupling):
raise ValueError('All elements of jcoupling must have length 3')
# Build sympified args
j = sympify(j)
m = sympify(m)
jn = Tuple( *[sympify(ji) for ji in jn] )
jcoupling = Tuple( *[Tuple(sympify(
n1), sympify(n2), sympify(ji)) for (n1, n2, ji) in jcoupling] )
# Check values in coupling scheme give physical state
if any(2*ji != int(2*ji) for ji in jn if ji.is_number):
raise ValueError('All elements of jn must be integer or half-integer, got: %s' % jn)
if any(n1 != int(n1) or n2 != int(n2) for (n1, n2, _) in jcoupling):
raise ValueError('Indices in jcoupling must be integers')
if any(n1 < 1 or n2 < 1 or n1 > len(jn) or n2 > len(jn) for (n1, n2, _) in jcoupling):
raise ValueError('Indices must be between 1 and the number of coupled spin spaces')
if any(2*ji != int(2*ji) for (_, _, ji) in jcoupling if ji.is_number):
raise ValueError('All coupled j values in coupling scheme must be integer or half-integer')
coupled_n, coupled_jn = _build_coupled(jcoupling, len(jn))
jvals = list(jn)
for n, (n1, n2) in enumerate(coupled_n):
j1 = jvals[min(n1) - 1]
j2 = jvals[min(n2) - 1]
j3 = coupled_jn[n]
if sympify(j1).is_number and sympify(j2).is_number and sympify(j3).is_number:
if j1 + j2 < j3:
raise ValueError('All couplings must have j1+j2 >= j3, '
'in coupling number %d got j1,j2,j3: %d,%d,%d' % (n + 1, j1, j2, j3))
if abs(j1 - j2) > j3:
raise ValueError("All couplings must have |j1+j2| <= j3, "
"in coupling number %d got j1,j2,j3: %d,%d,%d" % (n + 1, j1, j2, j3))
if int(j1 + j2) == j1 + j2:
pass
jvals[min(n1 + n2) - 1] = j3
if len(jcoupling) > 0 and jcoupling[-1][2] != j:
raise ValueError('Last j value coupled together must be the final j of the state')
# Return state
return State.__new__(cls, j, m, jn, jcoupling)
def _print_label(self, printer, *args):
label = [printer._print(self.j), printer._print(self.m)]
for i, ji in enumerate(self.jn, start=1):
label.append('j%d=%s' % (
i, printer._print(ji)
))
for jn, (n1, n2) in zip(self.coupled_jn[:-1], self.coupled_n[:-1]):
label.append('j(%s)=%s' % (
','.join(str(i) for i in sorted(n1 + n2)), printer._print(jn)
))
return ','.join(label)
def _print_label_pretty(self, printer, *args):
label = [self.j, self.m]
for i, ji in enumerate(self.jn, start=1):
symb = 'j%d' % i
symb = pretty_symbol(symb)
symb = prettyForm(symb + '=')
item = prettyForm(*symb.right(printer._print(ji)))
label.append(item)
for jn, (n1, n2) in zip(self.coupled_jn[:-1], self.coupled_n[:-1]):
n = ','.join(pretty_symbol("j%d" % i)[-1] for i in sorted(n1 + n2))
symb = prettyForm('j' + n + '=')
item = prettyForm(*symb.right(printer._print(jn)))
label.append(item)
return self._print_sequence_pretty(
label, self._label_separator, printer, *args
)
def _print_label_latex(self, printer, *args):
label = [self.j, self.m]
for i, ji in enumerate(self.jn, start=1):
label.append('j_{%d}=%s' % (i, printer._print(ji)) )
for jn, (n1, n2) in zip(self.coupled_jn[:-1], self.coupled_n[:-1]):
n = ','.join(str(i) for i in sorted(n1 + n2))
label.append('j_{%s}=%s' % (n, printer._print(jn)) )
return self._print_sequence(
label, self._label_separator, printer, *args
)
@property
def jn(self):
return self.label[2]
@property
def coupling(self):
return self.label[3]
@property
def coupled_jn(self):
return _build_coupled(self.label[3], len(self.label[2]))[1]
@property
def coupled_n(self):
return _build_coupled(self.label[3], len(self.label[2]))[0]
@classmethod
def _eval_hilbert_space(cls, label):
j = Add(*label[2])
if j.is_number:
return DirectSumHilbertSpace(*[ ComplexSpace(x) for x in range(int(2*j + 1), 0, -2) ])
else:
# TODO: Need hilbert space fix, see issue 5732
# Desired behavior:
#ji = symbols('ji')
#ret = Sum(ComplexSpace(2*ji + 1), (ji, 0, j))
# Temporary fix:
return ComplexSpace(2*j + 1)
def _represent_coupled_base(self, **options):
evect = self.uncoupled_class()
if not self.j.is_number:
raise ValueError(
'State must not have symbolic j value to represent')
if not self.hilbert_space.dimension.is_number:
raise ValueError(
'State must not have symbolic j values to represent')
result = zeros(self.hilbert_space.dimension, 1)
if self.j == int(self.j):
start = self.j**2
else:
start = (2*self.j - 1)*(1 + 2*self.j)/4
result[start:start + 2*self.j + 1, 0] = evect(
self.j, self.m)._represent_base(**options)
return result
def _eval_rewrite_as_Jx(self, *args, **options):
if isinstance(self, Bra):
return self._rewrite_basis(Jx, JxBraCoupled, **options)
return self._rewrite_basis(Jx, JxKetCoupled, **options)
def _eval_rewrite_as_Jy(self, *args, **options):
if isinstance(self, Bra):
return self._rewrite_basis(Jy, JyBraCoupled, **options)
return self._rewrite_basis(Jy, JyKetCoupled, **options)
def _eval_rewrite_as_Jz(self, *args, **options):
if isinstance(self, Bra):
return self._rewrite_basis(Jz, JzBraCoupled, **options)
return self._rewrite_basis(Jz, JzKetCoupled, **options)
class JxKetCoupled(CoupledSpinState, Ket):
"""Coupled eigenket of Jx.
See JzKetCoupled for the usage of coupled spin eigenstates.
See Also
========
JzKetCoupled: Usage of coupled spin states
"""
@classmethod
def dual_class(self):
return JxBraCoupled
@classmethod
def uncoupled_class(self):
return JxKet
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JxOp(self, basis, **options):
return self._represent_coupled_base(**options)
def _represent_JyOp(self, basis, **options):
return self._represent_coupled_base(alpha=pi*Rational(3, 2), **options)
def _represent_JzOp(self, basis, **options):
return self._represent_coupled_base(beta=pi/2, **options)
class JxBraCoupled(CoupledSpinState, Bra):
"""Coupled eigenbra of Jx.
See JzKetCoupled for the usage of coupled spin eigenstates.
See Also
========
JzKetCoupled: Usage of coupled spin states
"""
@classmethod
def dual_class(self):
return JxKetCoupled
@classmethod
def uncoupled_class(self):
return JxBra
class JyKetCoupled(CoupledSpinState, Ket):
"""Coupled eigenket of Jy.
See JzKetCoupled for the usage of coupled spin eigenstates.
See Also
========
JzKetCoupled: Usage of coupled spin states
"""
@classmethod
def dual_class(self):
return JyBraCoupled
@classmethod
def uncoupled_class(self):
return JyKet
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JxOp(self, basis, **options):
return self._represent_coupled_base(gamma=pi/2, **options)
def _represent_JyOp(self, basis, **options):
return self._represent_coupled_base(**options)
def _represent_JzOp(self, basis, **options):
return self._represent_coupled_base(alpha=pi*Rational(3, 2), beta=-pi/2, gamma=pi/2, **options)
class JyBraCoupled(CoupledSpinState, Bra):
"""Coupled eigenbra of Jy.
See JzKetCoupled for the usage of coupled spin eigenstates.
See Also
========
JzKetCoupled: Usage of coupled spin states
"""
@classmethod
def dual_class(self):
return JyKetCoupled
@classmethod
def uncoupled_class(self):
return JyBra
class JzKetCoupled(CoupledSpinState, Ket):
r"""Coupled eigenket of Jz
Spin state that is an eigenket of Jz which represents the coupling of
separate spin spaces.
The arguments for creating instances of JzKetCoupled are ``j``, ``m``,
``jn`` and an optional ``jcoupling`` argument. The ``j`` and ``m`` options
are the total angular momentum quantum numbers, as used for normal states
(e.g. JzKet).
The other required parameter in ``jn``, which is a tuple defining the `j_n`
angular momentum quantum numbers of the product spaces. So for example, if
a state represented the coupling of the product basis state
`\left|j_1,m_1\right\rangle\times\left|j_2,m_2\right\rangle`, the ``jn``
for this state would be ``(j1,j2)``.
The final option is ``jcoupling``, which is used to define how the spaces
specified by ``jn`` are coupled, which includes both the order these spaces
are coupled together and the quantum numbers that arise from these
couplings. The ``jcoupling`` parameter itself is a list of lists, such that
each of the sublists defines a single coupling between the spin spaces. If
there are N coupled angular momentum spaces, that is ``jn`` has N elements,
then there must be N-1 sublists. Each of these sublists making up the
``jcoupling`` parameter have length 3. The first two elements are the
indices of the product spaces that are considered to be coupled together.
For example, if we want to couple `j_1` and `j_4`, the indices would be 1
and 4. If a state has already been coupled, it is referenced by the
smallest index that is coupled, so if `j_2` and `j_4` has already been
coupled to some `j_{24}`, then this value can be coupled by referencing it
with index 2. The final element of the sublist is the quantum number of the
coupled state. So putting everything together, into a valid sublist for
``jcoupling``, if `j_1` and `j_2` are coupled to an angular momentum space
with quantum number `j_{12}` with the value ``j12``, the sublist would be
``(1,2,j12)``, N-1 of these sublists are used in the list for
``jcoupling``.
Note the ``jcoupling`` parameter is optional, if it is not specified, the
default coupling is taken. This default value is to coupled the spaces in
order and take the quantum number of the coupling to be the maximum value.
For example, if the spin spaces are `j_1`, `j_2`, `j_3`, `j_4`, then the
default coupling couples `j_1` and `j_2` to `j_{12}=j_1+j_2`, then,
`j_{12}` and `j_3` are coupled to `j_{123}=j_{12}+j_3`, and finally
`j_{123}` and `j_4` to `j=j_{123}+j_4`. The jcoupling value that would
correspond to this is:
``((1,2,j1+j2),(1,3,j1+j2+j3))``
Parameters
==========
args : tuple
The arguments that must be passed are ``j``, ``m``, ``jn``, and
``jcoupling``. The ``j`` value is the total angular momentum. The ``m``
value is the eigenvalue of the Jz spin operator. The ``jn`` list are
the j values of argular momentum spaces coupled together. The
``jcoupling`` parameter is an optional parameter defining how the spaces
are coupled together. See the above description for how these coupling
parameters are defined.
Examples
========
Defining simple spin states, both numerical and symbolic:
>>> from sympy.physics.quantum.spin import JzKetCoupled
>>> from sympy import symbols
>>> JzKetCoupled(1, 0, (1, 1))
|1,0,j1=1,j2=1>
>>> j, m, j1, j2 = symbols('j m j1 j2')
>>> JzKetCoupled(j, m, (j1, j2))
|j,m,j1=j1,j2=j2>
Defining coupled spin states for more than 2 coupled spaces with various
coupling parameters:
>>> JzKetCoupled(2, 1, (1, 1, 1))
|2,1,j1=1,j2=1,j3=1,j(1,2)=2>
>>> JzKetCoupled(2, 1, (1, 1, 1), ((1,2,2),(1,3,2)) )
|2,1,j1=1,j2=1,j3=1,j(1,2)=2>
>>> JzKetCoupled(2, 1, (1, 1, 1), ((2,3,1),(1,2,2)) )
|2,1,j1=1,j2=1,j3=1,j(2,3)=1>
Rewriting the JzKetCoupled in terms of eigenkets of the Jx operator:
Note: that the resulting eigenstates are JxKetCoupled
>>> JzKetCoupled(1,1,(1,1)).rewrite("Jx")
|1,-1,j1=1,j2=1>/2 - sqrt(2)*|1,0,j1=1,j2=1>/2 + |1,1,j1=1,j2=1>/2
The rewrite method can be used to convert a coupled state to an uncoupled
state. This is done by passing coupled=False to the rewrite function:
>>> JzKetCoupled(1, 0, (1, 1)).rewrite('Jz', coupled=False)
-sqrt(2)*|1,-1>x|1,1>/2 + sqrt(2)*|1,1>x|1,-1>/2
Get the vector representation of a state in terms of the basis elements
of the Jx operator:
>>> from sympy.physics.quantum.represent import represent
>>> from sympy.physics.quantum.spin import Jx
>>> from sympy import S
>>> represent(JzKetCoupled(1,-1,(S(1)/2,S(1)/2)), basis=Jx)
Matrix([
[ 0],
[ 1/2],
[sqrt(2)/2],
[ 1/2]])
See Also
========
JzKet: Normal spin eigenstates
uncouple: Uncoupling of coupling spin states
couple: Coupling of uncoupled spin states
"""
@classmethod
def dual_class(self):
return JzBraCoupled
@classmethod
def uncoupled_class(self):
return JzKet
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JxOp(self, basis, **options):
return self._represent_coupled_base(beta=pi*Rational(3, 2), **options)
def _represent_JyOp(self, basis, **options):
return self._represent_coupled_base(alpha=pi*Rational(3, 2), beta=pi/2, gamma=pi/2, **options)
def _represent_JzOp(self, basis, **options):
return self._represent_coupled_base(**options)
class JzBraCoupled(CoupledSpinState, Bra):
"""Coupled eigenbra of Jz.
See the JzKetCoupled for the usage of coupled spin eigenstates.
See Also
========
JzKetCoupled: Usage of coupled spin states
"""
@classmethod
def dual_class(self):
return JzKetCoupled
@classmethod
def uncoupled_class(self):
return JzBra
#-----------------------------------------------------------------------------
# Coupling/uncoupling
#-----------------------------------------------------------------------------
def couple(expr, jcoupling_list=None):
""" Couple a tensor product of spin states
This function can be used to couple an uncoupled tensor product of spin
states. All of the eigenstates to be coupled must be of the same class. It
will return a linear combination of eigenstates that are subclasses of
CoupledSpinState determined by Clebsch-Gordan angular momentum coupling
coefficients.
Parameters
==========
expr : Expr
An expression involving TensorProducts of spin states to be coupled.
Each state must be a subclass of SpinState and they all must be the
same class.
jcoupling_list : list or tuple
Elements of this list are sub-lists of length 2 specifying the order of
the coupling of the spin spaces. The length of this must be N-1, where N
is the number of states in the tensor product to be coupled. The
elements of this sublist are the same as the first two elements of each
sublist in the ``jcoupling`` parameter defined for JzKetCoupled. If this
parameter is not specified, the default value is taken, which couples
the first and second product basis spaces, then couples this new coupled
space to the third product space, etc
Examples
========
Couple a tensor product of numerical states for two spaces:
>>> from sympy.physics.quantum.spin import JzKet, couple
>>> from sympy.physics.quantum.tensorproduct import TensorProduct
>>> couple(TensorProduct(JzKet(1,0), JzKet(1,1)))
-sqrt(2)*|1,1,j1=1,j2=1>/2 + sqrt(2)*|2,1,j1=1,j2=1>/2
Numerical coupling of three spaces using the default coupling method, i.e.
first and second spaces couple, then this couples to the third space:
>>> couple(TensorProduct(JzKet(1,1), JzKet(1,1), JzKet(1,0)))
sqrt(6)*|2,2,j1=1,j2=1,j3=1,j(1,2)=2>/3 + sqrt(3)*|3,2,j1=1,j2=1,j3=1,j(1,2)=2>/3
Perform this same coupling, but we define the coupling to first couple
the first and third spaces:
>>> couple(TensorProduct(JzKet(1,1), JzKet(1,1), JzKet(1,0)), ((1,3),(1,2)) )
sqrt(2)*|2,2,j1=1,j2=1,j3=1,j(1,3)=1>/2 - sqrt(6)*|2,2,j1=1,j2=1,j3=1,j(1,3)=2>/6 + sqrt(3)*|3,2,j1=1,j2=1,j3=1,j(1,3)=2>/3
Couple a tensor product of symbolic states:
>>> from sympy import symbols
>>> j1,m1,j2,m2 = symbols('j1 m1 j2 m2')
>>> couple(TensorProduct(JzKet(j1,m1), JzKet(j2,m2)))
Sum(CG(j1, m1, j2, m2, j, m1 + m2)*|j,m1 + m2,j1=j1,j2=j2>, (j, m1 + m2, j1 + j2))
"""
a = expr.atoms(TensorProduct)
for tp in a:
# Allow other tensor products to be in expression
if not all([ isinstance(state, SpinState) for state in tp.args]):
continue
# If tensor product has all spin states, raise error for invalid tensor product state
if not all([state.__class__ is tp.args[0].__class__ for state in tp.args]):
raise TypeError('All states must be the same basis')
expr = expr.subs(tp, _couple(tp, jcoupling_list))
return expr
def _couple(tp, jcoupling_list):
states = tp.args
coupled_evect = states[0].coupled_class()
# Define default coupling if none is specified
if jcoupling_list is None:
jcoupling_list = []
for n in range(1, len(states)):
jcoupling_list.append( (1, n + 1) )
# Check jcoupling_list valid
if not len(jcoupling_list) == len(states) - 1:
raise TypeError('jcoupling_list must be length %d, got %d' %
(len(states) - 1, len(jcoupling_list)))
if not all( len(coupling) == 2 for coupling in jcoupling_list):
raise ValueError('Each coupling must define 2 spaces')
if any([n1 == n2 for n1, n2 in jcoupling_list]):
raise ValueError('Spin spaces cannot couple to themselves')
if all([sympify(n1).is_number and sympify(n2).is_number for n1, n2 in jcoupling_list]):
j_test = [0]*len(states)
for n1, n2 in jcoupling_list:
if j_test[n1 - 1] == -1 or j_test[n2 - 1] == -1:
raise ValueError('Spaces coupling j_n\'s are referenced by smallest n value')
j_test[max(n1, n2) - 1] = -1
# j values of states to be coupled together
jn = [state.j for state in states]
mn = [state.m for state in states]
# Create coupling_list, which defines all the couplings between all
# the spaces from jcoupling_list
coupling_list = []
n_list = [ [i + 1] for i in range(len(states)) ]
for j_coupling in jcoupling_list:
# Least n for all j_n which is coupled as first and second spaces
n1, n2 = j_coupling
# List of all n's coupled in first and second spaces
j1_n = list(n_list[n1 - 1])
j2_n = list(n_list[n2 - 1])
coupling_list.append( (j1_n, j2_n) )
# Set new j_n to be coupling of all j_n in both first and second spaces
n_list[ min(n1, n2) - 1 ] = sorted(j1_n + j2_n)
if all(state.j.is_number and state.m.is_number for state in states):
# Numerical coupling
# Iterate over difference between maximum possible j value of each coupling and the actual value
diff_max = [ Add( *[ jn[n - 1] - mn[n - 1] for n in coupling[0] +
coupling[1] ] ) for coupling in coupling_list ]
result = []
for diff in range(diff_max[-1] + 1):
# Determine available configurations
n = len(coupling_list)
tot = binomial(diff + n - 1, diff)
for config_num in range(tot):
diff_list = _confignum_to_difflist(config_num, diff, n)
# Skip the configuration if non-physical
# This is a lazy check for physical states given the loose restrictions of diff_max
if any( [ d > m for d, m in zip(diff_list, diff_max) ] ):
continue
# Determine term
cg_terms = []
coupled_j = list(jn)
jcoupling = []
for (j1_n, j2_n), coupling_diff in zip(coupling_list, diff_list):
j1 = coupled_j[ min(j1_n) - 1 ]
j2 = coupled_j[ min(j2_n) - 1 ]
j3 = j1 + j2 - coupling_diff
coupled_j[ min(j1_n + j2_n) - 1 ] = j3
m1 = Add( *[ mn[x - 1] for x in j1_n] )
m2 = Add( *[ mn[x - 1] for x in j2_n] )
m3 = m1 + m2
cg_terms.append( (j1, m1, j2, m2, j3, m3) )
jcoupling.append( (min(j1_n), min(j2_n), j3) )
# Better checks that state is physical
if any([ abs(term[5]) > term[4] for term in cg_terms ]):
continue
if any([ term[0] + term[2] < term[4] for term in cg_terms ]):
continue
if any([ abs(term[0] - term[2]) > term[4] for term in cg_terms ]):
continue
coeff = Mul( *[ CG(*term).doit() for term in cg_terms] )
state = coupled_evect(j3, m3, jn, jcoupling)
result.append(coeff*state)
return Add(*result)
else:
# Symbolic coupling
cg_terms = []
jcoupling = []
sum_terms = []
coupled_j = list(jn)
for j1_n, j2_n in coupling_list:
j1 = coupled_j[ min(j1_n) - 1 ]
j2 = coupled_j[ min(j2_n) - 1 ]
if len(j1_n + j2_n) == len(states):
j3 = symbols('j')
else:
j3_name = 'j' + ''.join(["%s" % n for n in j1_n + j2_n])
j3 = symbols(j3_name)
coupled_j[ min(j1_n + j2_n) - 1 ] = j3
m1 = Add( *[ mn[x - 1] for x in j1_n] )
m2 = Add( *[ mn[x - 1] for x in j2_n] )
m3 = m1 + m2
cg_terms.append( (j1, m1, j2, m2, j3, m3) )
jcoupling.append( (min(j1_n), min(j2_n), j3) )
sum_terms.append((j3, m3, j1 + j2))
coeff = Mul( *[ CG(*term) for term in cg_terms] )
state = coupled_evect(j3, m3, jn, jcoupling)
return Sum(coeff*state, *sum_terms)
def uncouple(expr, jn=None, jcoupling_list=None):
""" Uncouple a coupled spin state
Gives the uncoupled representation of a coupled spin state. Arguments must
be either a spin state that is a subclass of CoupledSpinState or a spin
state that is a subclass of SpinState and an array giving the j values
of the spaces that are to be coupled
Parameters
==========
expr : Expr
The expression containing states that are to be coupled. If the states
are a subclass of SpinState, the ``jn`` and ``jcoupling`` parameters
must be defined. If the states are a subclass of CoupledSpinState,
``jn`` and ``jcoupling`` will be taken from the state.
jn : list or tuple
The list of the j-values that are coupled. If state is a
CoupledSpinState, this parameter is ignored. This must be defined if
state is not a subclass of CoupledSpinState. The syntax of this
parameter is the same as the ``jn`` parameter of JzKetCoupled.
jcoupling_list : list or tuple
The list defining how the j-values are coupled together. If state is a
CoupledSpinState, this parameter is ignored. This must be defined if
state is not a subclass of CoupledSpinState. The syntax of this
parameter is the same as the ``jcoupling`` parameter of JzKetCoupled.
Examples
========
Uncouple a numerical state using a CoupledSpinState state:
>>> from sympy.physics.quantum.spin import JzKetCoupled, uncouple
>>> from sympy import S
>>> uncouple(JzKetCoupled(1, 0, (S(1)/2, S(1)/2)))
sqrt(2)*|1/2,-1/2>x|1/2,1/2>/2 + sqrt(2)*|1/2,1/2>x|1/2,-1/2>/2
Perform the same calculation using a SpinState state:
>>> from sympy.physics.quantum.spin import JzKet
>>> uncouple(JzKet(1, 0), (S(1)/2, S(1)/2))
sqrt(2)*|1/2,-1/2>x|1/2,1/2>/2 + sqrt(2)*|1/2,1/2>x|1/2,-1/2>/2
Uncouple a numerical state of three coupled spaces using a CoupledSpinState state:
>>> uncouple(JzKetCoupled(1, 1, (1, 1, 1), ((1,3,1),(1,2,1)) ))
|1,-1>x|1,1>x|1,1>/2 - |1,0>x|1,0>x|1,1>/2 + |1,1>x|1,0>x|1,0>/2 - |1,1>x|1,1>x|1,-1>/2
Perform the same calculation using a SpinState state:
>>> uncouple(JzKet(1, 1), (1, 1, 1), ((1,3,1),(1,2,1)) )
|1,-1>x|1,1>x|1,1>/2 - |1,0>x|1,0>x|1,1>/2 + |1,1>x|1,0>x|1,0>/2 - |1,1>x|1,1>x|1,-1>/2
Uncouple a symbolic state using a CoupledSpinState state:
>>> from sympy import symbols
>>> j,m,j1,j2 = symbols('j m j1 j2')
>>> uncouple(JzKetCoupled(j, m, (j1, j2)))
Sum(CG(j1, m1, j2, m2, j, m)*|j1,m1>x|j2,m2>, (m1, -j1, j1), (m2, -j2, j2))
Perform the same calculation using a SpinState state
>>> uncouple(JzKet(j, m), (j1, j2))
Sum(CG(j1, m1, j2, m2, j, m)*|j1,m1>x|j2,m2>, (m1, -j1, j1), (m2, -j2, j2))
"""
a = expr.atoms(SpinState)
for state in a:
expr = expr.subs(state, _uncouple(state, jn, jcoupling_list))
return expr
def _uncouple(state, jn, jcoupling_list):
if isinstance(state, CoupledSpinState):
jn = state.jn
coupled_n = state.coupled_n
coupled_jn = state.coupled_jn
evect = state.uncoupled_class()
elif isinstance(state, SpinState):
if jn is None:
raise ValueError("Must specify j-values for coupled state")
if not (isinstance(jn, list) or isinstance(jn, tuple)):
raise TypeError("jn must be list or tuple")
if jcoupling_list is None:
# Use default
jcoupling_list = []
for i in range(1, len(jn)):
jcoupling_list.append(
(1, 1 + i, Add(*[jn[j] for j in range(i + 1)])) )
if not (isinstance(jcoupling_list, list) or isinstance(jcoupling_list, tuple)):
raise TypeError("jcoupling must be a list or tuple")
if not len(jcoupling_list) == len(jn) - 1:
raise ValueError("Must specify 2 fewer coupling terms than the number of j values")
coupled_n, coupled_jn = _build_coupled(jcoupling_list, len(jn))
evect = state.__class__
else:
raise TypeError("state must be a spin state")
j = state.j
m = state.m
coupling_list = []
j_list = list(jn)
# Create coupling, which defines all the couplings between all the spaces
for j3, (n1, n2) in zip(coupled_jn, coupled_n):
# j's which are coupled as first and second spaces
j1 = j_list[n1[0] - 1]
j2 = j_list[n2[0] - 1]
# Build coupling list
coupling_list.append( (n1, n2, j1, j2, j3) )
# Set new value in j_list
j_list[min(n1 + n2) - 1] = j3
if j.is_number and m.is_number:
diff_max = [ 2*x for x in jn ]
diff = Add(*jn) - m
n = len(jn)
tot = binomial(diff + n - 1, diff)
result = []
for config_num in range(tot):
diff_list = _confignum_to_difflist(config_num, diff, n)
if any( [ d > p for d, p in zip(diff_list, diff_max) ] ):
continue
cg_terms = []
for coupling in coupling_list:
j1_n, j2_n, j1, j2, j3 = coupling
m1 = Add( *[ jn[x - 1] - diff_list[x - 1] for x in j1_n ] )
m2 = Add( *[ jn[x - 1] - diff_list[x - 1] for x in j2_n ] )
m3 = m1 + m2
cg_terms.append( (j1, m1, j2, m2, j3, m3) )
coeff = Mul( *[ CG(*term).doit() for term in cg_terms ] )
state = TensorProduct(
*[ evect(j, j - d) for j, d in zip(jn, diff_list) ] )
result.append(coeff*state)
return Add(*result)
else:
# Symbolic coupling
m_str = "m1:%d" % (len(jn) + 1)
mvals = symbols(m_str)
cg_terms = [(j1, Add(*[mvals[n - 1] for n in j1_n]),
j2, Add(*[mvals[n - 1] for n in j2_n]),
j3, Add(*[mvals[n - 1] for n in j1_n + j2_n])) for j1_n, j2_n, j1, j2, j3 in coupling_list[:-1] ]
cg_terms.append(*[(j1, Add(*[mvals[n - 1] for n in j1_n]),
j2, Add(*[mvals[n - 1] for n in j2_n]),
j, m) for j1_n, j2_n, j1, j2, j3 in [coupling_list[-1]] ])
cg_coeff = Mul(*[CG(*cg_term) for cg_term in cg_terms])
sum_terms = [ (m, -j, j) for j, m in zip(jn, mvals) ]
state = TensorProduct( *[ evect(j, m) for j, m in zip(jn, mvals) ] )
return Sum(cg_coeff*state, *sum_terms)
def _confignum_to_difflist(config_num, diff, list_len):
# Determines configuration of diffs into list_len number of slots
diff_list = []
for n in range(list_len):
prev_diff = diff
# Number of spots after current one
rem_spots = list_len - n - 1
# Number of configurations of distributing diff among the remaining spots
rem_configs = binomial(diff + rem_spots - 1, diff)
while config_num >= rem_configs:
config_num -= rem_configs
diff -= 1
rem_configs = binomial(diff + rem_spots - 1, diff)
diff_list.append(prev_diff - diff)
return diff_list
| 34.048131
| 131
| 0.580363
|
3f7469488053355fd4e40e749641bd484d069e65
| 45,874
|
py
|
Python
|
pandas/core/indexes/interval.py
|
mwaskom/pandas
|
d1010643fea058ba43c2c7124af75cc462ccf242
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | 3
|
2017-02-09T20:01:04.000Z
|
2021-08-11T00:33:41.000Z
|
pandas/core/indexes/interval.py
|
mwaskom/pandas
|
d1010643fea058ba43c2c7124af75cc462ccf242
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | null | null | null |
pandas/core/indexes/interval.py
|
mwaskom/pandas
|
d1010643fea058ba43c2c7124af75cc462ccf242
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | null | null | null |
""" define the IntervalIndex """
import numpy as np
from pandas.core.dtypes.missing import notna, isna
from pandas.core.dtypes.generic import ABCPeriodIndex
from pandas.core.dtypes.dtypes import IntervalDtype
from pandas.core.dtypes.cast import maybe_convert_platform
from pandas.core.dtypes.common import (
_ensure_platform_int,
is_list_like,
is_datetime_or_timedelta_dtype,
is_integer_dtype,
is_object_dtype,
is_categorical_dtype,
is_float_dtype,
is_interval_dtype,
is_scalar,
is_float,
is_number,
is_integer)
from pandas.core.indexes.base import (
Index, _ensure_index,
default_pprint, _index_shared_docs)
from pandas._libs import Timestamp, Timedelta
from pandas._libs.interval import (
Interval, IntervalMixin, IntervalTree,
intervals_to_interval_bounds)
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.timedeltas import timedelta_range
from pandas.core.indexes.multi import MultiIndex
from pandas.compat.numpy import function as nv
from pandas.core.common import (
_all_not_none, _any_none, _asarray_tuplesafe, _count_not_none,
is_bool_indexer, _maybe_box_datetimelike, _not_none)
from pandas.util._decorators import cache_readonly, Appender
from pandas.core.config import get_option
from pandas.tseries.frequencies import to_offset
from pandas.tseries.offsets import DateOffset
import pandas.core.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass='IntervalIndex',
target_klass='IntervalIndex or list of Intervals'))
_VALID_CLOSED = set(['left', 'right', 'both', 'neither'])
def _get_next_label(label):
dtype = getattr(label, 'dtype', type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = 'datetime64'
if is_datetime_or_timedelta_dtype(dtype):
return label + np.timedelta64(1, 'ns')
elif is_integer_dtype(dtype):
return label + 1
elif is_float_dtype(dtype):
return np.nextafter(label, np.infty)
else:
raise TypeError('cannot determine next label for type %r'
% type(label))
def _get_prev_label(label):
dtype = getattr(label, 'dtype', type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = 'datetime64'
if is_datetime_or_timedelta_dtype(dtype):
return label - np.timedelta64(1, 'ns')
elif is_integer_dtype(dtype):
return label - 1
elif is_float_dtype(dtype):
return np.nextafter(label, -np.infty)
else:
raise TypeError('cannot determine next label for type %r'
% type(label))
def _get_interval_closed_bounds(interval):
"""
Given an Interval or IntervalIndex, return the corresponding interval with
closed bounds.
"""
left, right = interval.left, interval.right
if interval.open_left:
left = _get_next_label(left)
if interval.open_right:
right = _get_prev_label(right)
return left, right
def _new_IntervalIndex(cls, d):
""" This is called upon unpickling,
rather than the default which doesn't
have arguments and breaks __new__ """
return cls.from_arrays(**d)
class IntervalIndex(IntervalMixin, Index):
"""
Immutable Index implementing an ordered, sliceable set. IntervalIndex
represents an Index of intervals that are all closed on the same side.
.. versionadded:: 0.20.0
.. warning::
The indexing behaviors are provisional and may change in
a future version of pandas.
Attributes
----------
left, right : array-like (1-dimensional)
Left and right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, optional
Whether the intervals are closed on the left-side, right-side, both or
neither. Defaults to 'right'.
name : object, optional
Name to be stored in the index.
copy : boolean, default False
Copy the meta-data
mid
values
is_non_overlapping_monotonic
Methods
-------
from_arrays
from_tuples
from_breaks
from_intervals
contains
Examples
---------
A new ``IntervalIndex`` is typically constructed using
:func:`interval_range`:
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]]
closed='right', dtype='interval[int64]')
It may also be constructed using one of the constructor
methods :meth:`IntervalIndex.from_arrays`,
:meth:`IntervalIndex.from_breaks`, :meth:`IntervalIndex.from_intervals`
and :meth:`IntervalIndex.from_tuples`.
See further examples in the doc strings of ``interval_range`` and the
mentioned constructor methods.
Notes
------
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/advanced.html#intervalindex>`_
for more.
See Also
--------
Index : The base pandas Index type
Interval : A bounded slice-like interval
interval_range : Function to create a fixed frequency
IntervalIndex, IntervalIndex.from_arrays, IntervalIndex.from_breaks,
IntervalIndex.from_intervals, IntervalIndex.from_tuples
cut, qcut : convert arrays of continuous data into categoricals/series of
``Interval``.
"""
_typ = 'intervalindex'
_comparables = ['name']
_attributes = ['name', 'closed']
_allow_index_ops = True
# we would like our indexing holder to defer to us
_defer_to_indexing = True
_mask = None
def __new__(cls, data, closed=None,
name=None, copy=False, dtype=None,
fastpath=False, verify_integrity=True):
if fastpath:
return cls._simple_new(data.left, data.right, closed, name,
copy=copy, verify_integrity=False)
if name is None and hasattr(data, 'name'):
name = data.name
if isinstance(data, IntervalIndex):
left = data.left
right = data.right
closed = data.closed
else:
# don't allow scalars
if is_scalar(data):
cls._scalar_data_error(data)
data = maybe_convert_platform(data)
left, right, infer_closed = intervals_to_interval_bounds(data)
if _all_not_none(closed, infer_closed) and closed != infer_closed:
# GH 18421
msg = ("conflicting values for closed: constructor got "
"'{closed}', inferred from data '{infer_closed}'"
.format(closed=closed, infer_closed=infer_closed))
raise ValueError(msg)
closed = closed or infer_closed
return cls._simple_new(left, right, closed, name,
copy=copy, verify_integrity=verify_integrity)
@classmethod
def _simple_new(cls, left, right, closed=None, name=None,
copy=False, verify_integrity=True):
result = IntervalMixin.__new__(cls)
if closed is None:
closed = 'right'
left = _ensure_index(left, copy=copy)
right = _ensure_index(right, copy=copy)
# coerce dtypes to match if needed
if is_float_dtype(left) and is_integer_dtype(right):
right = right.astype(left.dtype)
if is_float_dtype(right) and is_integer_dtype(left):
left = left.astype(right.dtype)
if type(left) != type(right):
raise ValueError("must not have differing left [{}] "
"and right [{}] types".format(
type(left), type(right)))
if isinstance(left, ABCPeriodIndex):
raise ValueError("Period dtypes are not supported, "
"use a PeriodIndex instead")
result._left = left
result._right = right
result._closed = closed
result.name = name
if verify_integrity:
result._validate()
result._reset_identity()
return result
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, left=None, right=None, **kwargs):
if left is None:
# no values passed
left, right = self.left, self.right
elif right is None:
# only single value passed, could be an IntervalIndex
# or array of Intervals
if not isinstance(left, IntervalIndex):
left = type(self).from_intervals(left)
left, right = left.left, left.right
else:
# both left and right are values
pass
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['verify_integrity'] = False
return self._simple_new(left, right, **attributes)
def _validate(self):
"""
Verify that the IntervalIndex is valid.
"""
if self.closed not in _VALID_CLOSED:
raise ValueError("invalid options for 'closed': %s" % self.closed)
if len(self.left) != len(self.right):
raise ValueError('left and right must have the same length')
left_mask = notna(self.left)
right_mask = notna(self.right)
if not (left_mask == right_mask).all():
raise ValueError('missing values must be missing in the same '
'location both left and right sides')
if not (self.left[left_mask] <= self.right[left_mask]).all():
raise ValueError('left side of interval must be <= right side')
self._mask = ~left_mask
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
return self._isnan.any()
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
if self._mask is None:
self._mask = isna(self.left)
return self._mask
@cache_readonly
def _engine(self):
return IntervalTree(self.left, self.right, closed=self.closed)
@property
def _constructor(self):
return type(self).from_intervals
def __contains__(self, key):
"""
return a boolean if this key is IN the index
We *only* accept an Interval
Parameters
----------
key : Interval
Returns
-------
boolean
"""
if not isinstance(key, Interval):
return False
try:
self.get_loc(key)
return True
except KeyError:
return False
def contains(self, key):
"""
return a boolean if this key is IN the index
We accept / allow keys to be not *just* actual
objects.
Parameters
----------
key : int, float, Interval
Returns
-------
boolean
"""
try:
self.get_loc(key)
return True
except KeyError:
return False
@classmethod
def from_breaks(cls, breaks, closed='right', name=None, copy=False):
"""
Construct an IntervalIndex from an array of splits
Parameters
----------
breaks : array-like (1-dimensional)
Left and right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, optional
Whether the intervals are closed on the left-side, right-side, both
or neither. Defaults to 'right'.
name : object, optional
Name to be stored in the index.
copy : boolean, default False
copy the data
Examples
--------
>>> pd.IntervalIndex.from_breaks([0, 1, 2, 3])
IntervalIndex([(0, 1], (1, 2], (2, 3]]
closed='right',
dtype='interval[int64]')
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex
IntervalIndex.from_arrays : Construct an IntervalIndex from a left and
right array
IntervalIndex.from_intervals : Construct an IntervalIndex from an array
of Interval objects
IntervalIndex.from_tuples : Construct an IntervalIndex from a
list/array of tuples
"""
breaks = maybe_convert_platform(breaks)
return cls.from_arrays(breaks[:-1], breaks[1:], closed,
name=name, copy=copy)
@classmethod
def from_arrays(cls, left, right, closed='right', name=None, copy=False):
"""
Construct an IntervalIndex from a a left and right array
Parameters
----------
left : array-like (1-dimensional)
Left bounds for each interval.
right : array-like (1-dimensional)
Right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, optional
Whether the intervals are closed on the left-side, right-side, both
or neither. Defaults to 'right'.
name : object, optional
Name to be stored in the index.
copy : boolean, default False
copy the data
Examples
--------
>>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3])
IntervalIndex([(0, 1], (1, 2], (2, 3]]
closed='right',
dtype='interval[int64]')
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex
IntervalIndex.from_breaks : Construct an IntervalIndex from an array of
splits
IntervalIndex.from_intervals : Construct an IntervalIndex from an array
of Interval objects
IntervalIndex.from_tuples : Construct an IntervalIndex from a
list/array of tuples
"""
left = maybe_convert_platform(left)
right = maybe_convert_platform(right)
return cls._simple_new(left, right, closed, name=name,
copy=copy, verify_integrity=True)
@classmethod
def from_intervals(cls, data, name=None, copy=False):
"""
Construct an IntervalIndex from a 1d array of Interval objects
Parameters
----------
data : array-like (1-dimensional)
Array of Interval objects. All intervals must be closed on the same
sides.
name : object, optional
Name to be stored in the index.
copy : boolean, default False
by-default copy the data, this is compat only and ignored
Examples
--------
>>> pd.IntervalIndex.from_intervals([pd.Interval(0, 1),
... pd.Interval(1, 2)])
IntervalIndex([(0, 1], (1, 2]]
closed='right', dtype='interval[int64]')
The generic Index constructor work identically when it infers an array
of all intervals:
>>> pd.Index([pd.Interval(0, 1), pd.Interval(1, 2)])
IntervalIndex([(0, 1], (1, 2]]
closed='right', dtype='interval[int64]')
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex
IntervalIndex.from_arrays : Construct an IntervalIndex from a left and
right array
IntervalIndex.from_breaks : Construct an IntervalIndex from an array of
splits
IntervalIndex.from_tuples : Construct an IntervalIndex from a
list/array of tuples
"""
if isinstance(data, IntervalIndex):
left, right, closed = data.left, data.right, data.closed
name = name or data.name
else:
data = maybe_convert_platform(data)
left, right, closed = intervals_to_interval_bounds(data)
return cls.from_arrays(left, right, closed, name=name, copy=False)
@classmethod
def from_tuples(cls, data, closed='right', name=None, copy=False):
"""
Construct an IntervalIndex from a list/array of tuples
Parameters
----------
data : array-like (1-dimensional)
Array of tuples
closed : {'left', 'right', 'both', 'neither'}, optional
Whether the intervals are closed on the left-side, right-side, both
or neither. Defaults to 'right'.
name : object, optional
Name to be stored in the index.
copy : boolean, default False
by-default copy the data, this is compat only and ignored
Examples
--------
>>> pd.IntervalIndex.from_tuples([(0, 1), (1,2)])
IntervalIndex([(0, 1], (1, 2]],
closed='right', dtype='interval[int64]')
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex
IntervalIndex.from_arrays : Construct an IntervalIndex from a left and
right array
IntervalIndex.from_breaks : Construct an IntervalIndex from an array of
splits
IntervalIndex.from_intervals : Construct an IntervalIndex from an array
of Interval objects
"""
if len(data):
left, right = [], []
else:
left = right = data
for d in data:
if isna(d):
left.append(np.nan)
right.append(np.nan)
continue
l, r = d
left.append(l)
right.append(r)
# TODO
# if we have nulls and we previous had *only*
# integer data, then we have changed the dtype
return cls.from_arrays(left, right, closed, name=name, copy=False)
def to_tuples(self):
return Index(_asarray_tuplesafe(zip(self.left, self.right)))
@cache_readonly
def _multiindex(self):
return MultiIndex.from_arrays([self.left, self.right],
names=['left', 'right'])
@property
def left(self):
return self._left
@property
def right(self):
return self._right
@property
def closed(self):
return self._closed
def __len__(self):
return len(self.left)
@cache_readonly
def values(self):
"""
Returns the IntervalIndex's data as a numpy array of Interval
objects (with dtype='object')
"""
left = self.left
right = self.right
mask = self._isnan
closed = self._closed
result = np.empty(len(left), dtype=object)
for i in range(len(left)):
if mask[i]:
result[i] = np.nan
else:
result[i] = Interval(left[i], right[i], closed)
return result
def __array__(self, result=None):
""" the array interface, return my values """
return self.values
def __array_wrap__(self, result, context=None):
# we don't want the superclass implementation
return result
def _array_values(self):
return self.values
def __reduce__(self):
d = dict(left=self.left,
right=self.right)
d.update(self._get_attributes_dict())
return _new_IntervalIndex, (self.__class__, d), None
@Appender(_index_shared_docs['copy'])
def copy(self, deep=False, name=None):
left = self.left.copy(deep=True) if deep else self.left
right = self.right.copy(deep=True) if deep else self.right
name = name if name is not None else self.name
closed = self.closed
return type(self).from_arrays(left, right, closed=closed, name=name)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_interval_dtype(dtype):
if copy:
self = self.copy()
return self
elif is_object_dtype(dtype):
return Index(self.values, dtype=object)
elif is_categorical_dtype(dtype):
from pandas import Categorical
return Categorical(self, ordered=True)
raise ValueError('Cannot cast IntervalIndex to dtype %s' % dtype)
@cache_readonly
def dtype(self):
return IntervalDtype.construct_from_string(str(self.left.dtype))
@property
def inferred_type(self):
return 'interval'
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep=False):
# we don't use an explict engine
# so return the bytes here
return (self.left.memory_usage(deep=deep) +
self.right.memory_usage(deep=deep))
@cache_readonly
def mid(self):
"""Returns the mid-point of each interval in the index as an array
"""
try:
return Index(0.5 * (self.left.values + self.right.values))
except TypeError:
# datetime safe version
delta = self.right.values - self.left.values
return Index(self.left.values + 0.5 * delta)
@cache_readonly
def is_monotonic(self):
return self._multiindex.is_monotonic
@cache_readonly
def is_monotonic_increasing(self):
return self._multiindex.is_monotonic_increasing
@cache_readonly
def is_monotonic_decreasing(self):
return self._multiindex.is_monotonic_decreasing
@cache_readonly
def is_unique(self):
return self._multiindex.is_unique
@cache_readonly
def is_non_overlapping_monotonic(self):
# must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... )
# or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...)
# we already require left <= right
# strict inequality for closed == 'both'; equality implies overlapping
# at a point when both sides of intervals are included
if self.closed == 'both':
return bool((self.right[:-1] < self.left[1:]).all() or
(self.left[:-1] > self.right[1:]).all())
# non-strict inequality when closed != 'both'; at least one side is
# not included in the intervals, so equality does not imply overlapping
return bool((self.right[:-1] <= self.left[1:]).all() or
(self.left[:-1] >= self.right[1:]).all())
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
if kind == 'iloc':
return super(IntervalIndex, self)._convert_scalar_indexer(
key, kind=kind)
return key
def _maybe_cast_slice_bound(self, label, side, kind):
return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
"""
we are passed a list-like indexer. Return the
indexer for matching intervals.
"""
locs = self.get_indexer_for(keyarr)
# we have missing values
if (locs == -1).any():
raise KeyError
return locs
def _maybe_cast_indexed(self, key):
"""
we need to cast the key, which could be a scalar
or an array-like to the type of our subtype
"""
if isinstance(key, IntervalIndex):
return key
subtype = self.dtype.subtype
if is_float_dtype(subtype):
if is_integer(key):
key = float(key)
elif isinstance(key, (np.ndarray, Index)):
key = key.astype('float64')
elif is_integer_dtype(subtype):
if is_integer(key):
key = int(key)
return key
def _check_method(self, method):
if method is None:
return
if method in ['bfill', 'backfill', 'pad', 'ffill', 'nearest']:
raise NotImplementedError(
'method {} not yet implemented for '
'IntervalIndex'.format(method))
raise ValueError("Invalid fill method")
def _searchsorted_monotonic(self, label, side, exclude_label=False):
if not self.is_non_overlapping_monotonic:
raise KeyError('can only get slices from an IntervalIndex if '
'bounds are non-overlapping and all monotonic '
'increasing or decreasing')
if isinstance(label, IntervalMixin):
raise NotImplementedError
if ((side == 'left' and self.left.is_monotonic_increasing) or
(side == 'right' and self.left.is_monotonic_decreasing)):
sub_idx = self.right
if self.open_right or exclude_label:
label = _get_next_label(label)
else:
sub_idx = self.left
if self.open_left or exclude_label:
label = _get_prev_label(label)
return sub_idx._searchsorted_monotonic(label, side)
def _get_loc_only_exact_matches(self, key):
if isinstance(key, Interval):
if not self.is_unique:
raise ValueError("cannot index with a slice Interval"
" and a non-unique index")
# TODO: this expands to a tuple index, see if we can
# do better
return Index(self._multiindex.values).get_loc(key)
raise KeyError
def _find_non_overlapping_monotonic_bounds(self, key):
if isinstance(key, IntervalMixin):
start = self._searchsorted_monotonic(
key.left, 'left', exclude_label=key.open_left)
stop = self._searchsorted_monotonic(
key.right, 'right', exclude_label=key.open_right)
elif isinstance(key, slice):
# slice
start, stop = key.start, key.stop
if (key.step or 1) != 1:
raise NotImplementedError("cannot slice with a slice step")
if start is None:
start = 0
else:
start = self._searchsorted_monotonic(start, 'left')
if stop is None:
stop = len(self)
else:
stop = self._searchsorted_monotonic(stop, 'right')
else:
# scalar or index-like
start = self._searchsorted_monotonic(key, 'left')
stop = self._searchsorted_monotonic(key, 'right')
return start, stop
def get_loc(self, key, method=None):
"""Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None}, optional
* default: matches where the label is within an interval only.
Returns
-------
loc : int if unique index, slice if monotonic index, else mask
Examples
---------
>>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)
>>> index = pd.IntervalIndex.from_intervals([i1, i2])
>>> index.get_loc(1)
0
You can also supply an interval or an location for a point inside an
interval.
>>> index.get_loc(pd.Interval(0, 2))
array([0, 1], dtype=int64)
>>> index.get_loc(1.5)
1
If a label is in several intervals, you get the locations of all the
relevant intervals.
>>> i3 = pd.Interval(0, 2)
>>> overlapping_index = pd.IntervalIndex.from_intervals([i2, i3])
>>> overlapping_index.get_loc(1.5)
array([0, 1], dtype=int64)
"""
self._check_method(method)
original_key = key
key = self._maybe_cast_indexed(key)
if self.is_non_overlapping_monotonic:
if isinstance(key, Interval):
left = self._maybe_cast_slice_bound(key.left, 'left', None)
right = self._maybe_cast_slice_bound(key.right, 'right', None)
key = Interval(left, right, key.closed)
else:
key = self._maybe_cast_slice_bound(key, 'left', None)
start, stop = self._find_non_overlapping_monotonic_bounds(key)
if start is None or stop is None:
return slice(start, stop)
elif start + 1 == stop:
return start
elif start < stop:
return slice(start, stop)
else:
raise KeyError(original_key)
else:
# use the interval tree
if isinstance(key, Interval):
left, right = _get_interval_closed_bounds(key)
return self._engine.get_loc_interval(left, right)
else:
return self._engine.get_loc(key)
def get_value(self, series, key):
if is_bool_indexer(key):
loc = key
elif is_list_like(key):
loc = self.get_indexer(key)
elif isinstance(key, slice):
if not (key.step is None or key.step == 1):
raise ValueError("cannot support not-default "
"step in a slice")
try:
loc = self.get_loc(key)
except TypeError:
# we didn't find exact intervals
# or are non-unique
raise ValueError("unable to slice with "
"this key: {}".format(key))
else:
loc = self.get_loc(key)
return series.iloc[loc]
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
self._check_method(method)
target = _ensure_index(target)
target = self._maybe_cast_indexed(target)
if self.equals(target):
return np.arange(len(self), dtype='intp')
if self.is_non_overlapping_monotonic:
start, stop = self._find_non_overlapping_monotonic_bounds(target)
start_plus_one = start + 1
if not ((start_plus_one < stop).any()):
return np.where(start_plus_one == stop, start, -1)
if not self.is_unique:
raise ValueError("cannot handle non-unique indices")
# IntervalIndex
if isinstance(target, IntervalIndex):
indexer = self._get_reindexer(target)
# non IntervalIndex
else:
indexer = np.concatenate([self.get_loc(i) for i in target])
return _ensure_platform_int(indexer)
def _get_reindexer(self, target):
"""
Return an indexer for a target IntervalIndex with self
"""
# find the left and right indexers
lindexer = self._engine.get_indexer(target.left.values)
rindexer = self._engine.get_indexer(target.right.values)
# we want to return an indexer on the intervals
# however, our keys could provide overlapping of multiple
# intervals, so we iterate thru the indexers and construct
# a set of indexers
indexer = []
n = len(self)
for i, (l, r) in enumerate(zip(lindexer, rindexer)):
target_value = target[i]
# matching on the lhs bound
if (l != -1 and
self.closed == 'right' and
target_value.left == self[l].right):
l += 1
# matching on the lhs bound
if (r != -1 and
self.closed == 'left' and
target_value.right == self[r].left):
r -= 1
# not found
if l == -1 and r == -1:
indexer.append(np.array([-1]))
elif r == -1:
indexer.append(np.arange(l, n))
elif l == -1:
# care about left/right closed here
value = self[i]
# target.closed same as self.closed
if self.closed == target.closed:
if target_value.left < value.left:
indexer.append(np.array([-1]))
continue
# target.closed == 'left'
elif self.closed == 'right':
if target_value.left <= value.left:
indexer.append(np.array([-1]))
continue
# target.closed == 'right'
elif self.closed == 'left':
if target_value.left <= value.left:
indexer.append(np.array([-1]))
continue
indexer.append(np.arange(0, r + 1))
else:
indexer.append(np.arange(l, r + 1))
return np.concatenate(indexer)
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = self._maybe_cast_indexed(_ensure_index(target))
return super(IntervalIndex, self).get_indexer_non_unique(target)
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
return self._shallow_copy(values)
def delete(self, loc):
new_left = self.left.delete(loc)
new_right = self.right.delete(loc)
return self._shallow_copy(new_left, new_right)
def insert(self, loc, item):
if isinstance(item, Interval):
if item.closed != self.closed:
raise ValueError('inserted item must be closed on the same '
'side as the index')
left_insert = item.left
right_insert = item.right
elif is_scalar(item) and isna(item):
# GH 18295
left_insert = right_insert = item
else:
raise ValueError('can only insert Interval objects and NA into '
'an IntervalIndex')
new_left = self.left.insert(loc, left_insert)
new_right = self.right.insert(loc, right_insert)
return self._shallow_copy(new_left, new_right)
def _as_like_interval_index(self, other, error_msg):
self._assert_can_do_setop(other)
other = _ensure_index(other)
if (not isinstance(other, IntervalIndex) or
self.closed != other.closed):
raise ValueError(error_msg)
return other
def _concat_same_dtype(self, to_concat, name):
"""
assert that we all have the same .closed
we allow a 0-len index here as well
"""
if not len({i.closed for i in to_concat if len(i)}) == 1:
msg = ('can only append two IntervalIndex objects '
'that are closed on the same side')
raise ValueError(msg)
return super(IntervalIndex, self)._concat_same_dtype(to_concat, name)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
left, right = self.left, self.right
if fill_value is None:
fill_value = self._na_value
mask = indices == -1
if not mask.any():
# we won't change dtype here in this case
# if we don't need
allow_fill = False
taker = lambda x: x.take(indices, allow_fill=allow_fill,
fill_value=fill_value)
try:
new_left = taker(left)
new_right = taker(right)
except ValueError:
# we need to coerce; migth have NA's in an
# integer dtype
new_left = taker(left.astype(float))
new_right = taker(right.astype(float))
return self._shallow_copy(new_left, new_right)
def __getitem__(self, value):
mask = self._isnan[value]
if is_scalar(mask) and mask:
return self._na_value
left = self.left[value]
right = self.right[value]
# scalar
if not isinstance(left, Index):
return Interval(left, right, self.closed)
return self._shallow_copy(left, right)
# __repr__ associated methods are based on MultiIndex
def _format_with_header(self, header, **kwargs):
return header + list(self._format_native_types(**kwargs))
def _format_native_types(self, na_rep='', quoting=None, **kwargs):
""" actually format my specific types """
from pandas.io.formats.format import IntervalArrayFormatter
return IntervalArrayFormatter(values=self,
na_rep=na_rep,
justify='all').get_result()
def _format_data(self, name=None):
# TODO: integrate with categorical and make generic
# name argument is unused here; just for compat with base / categorical
n = len(self)
max_seq_items = min((get_option(
'display.max_seq_items') or n) // 10, 10)
formatter = str
if n == 0:
summary = '[]'
elif n == 1:
first = formatter(self[0])
summary = '[{}]'.format(first)
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
summary = '[{}, {}]'.format(first, last)
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
summary = '[{} ... {}]'.format(', '.join(head),
', '.join(tail))
else:
head = []
tail = [formatter(x) for x in self]
summary = '[{}]'.format(', '.join(tail))
return summary + self._format_space()
def _format_attrs(self):
attrs = [('closed', repr(self.closed))]
if self.name is not None:
attrs.append(('name', default_pprint(self.name)))
attrs.append(('dtype', "'%s'" % self.dtype))
return attrs
def _format_space(self):
return "\n%s" % (' ' * (len(self.__class__.__name__) + 1))
def argsort(self, *args, **kwargs):
return np.lexsort((self.right, self.left))
def equals(self, other):
if self.is_(other):
return True
# if we can coerce to an II
# then we can compare
if not isinstance(other, IntervalIndex):
if not is_interval_dtype(other):
return False
other = Index(getattr(other, '.values', other))
return (self.left.equals(other.left) and
self.right.equals(other.right) and
self.closed == other.closed)
def _setop(op_name):
def func(self, other):
msg = ('can only do set operations between two IntervalIndex '
'objects that are closed on the same side')
other = self._as_like_interval_index(other, msg)
result = getattr(self._multiindex, op_name)(other._multiindex)
result_name = self.name if self.name == other.name else None
return type(self).from_tuples(result.values, closed=self.closed,
name=result_name)
return func
union = _setop('union')
intersection = _setop('intersection')
difference = _setop('difference')
symmetric_difference = _setop('symmetric_difference')
# TODO: arithmetic operations
IntervalIndex._add_logical_methods_disabled()
def _is_valid_endpoint(endpoint):
"""helper for interval_range to check if start/end are valid types"""
return any([is_number(endpoint),
isinstance(endpoint, Timestamp),
isinstance(endpoint, Timedelta),
endpoint is None])
def _is_type_compatible(a, b):
"""helper for interval_range to check type compat of start/end/freq"""
is_ts_compat = lambda x: isinstance(x, (Timestamp, DateOffset))
is_td_compat = lambda x: isinstance(x, (Timedelta, DateOffset))
return ((is_number(a) and is_number(b)) or
(is_ts_compat(a) and is_ts_compat(b)) or
(is_td_compat(a) and is_td_compat(b)) or
_any_none(a, b))
def interval_range(start=None, end=None, periods=None, freq=None,
name=None, closed='right'):
"""
Return a fixed frequency IntervalIndex
Parameters
----------
start : numeric or datetime-like, default None
Left bound for generating intervals
end : numeric or datetime-like, default None
Right bound for generating intervals
periods : integer, default None
Number of periods to generate
freq : numeric, string, or DateOffset, default None
The length of each interval. Must be consistent with the type of start
and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1
for numeric and 'D' (calendar daily) for datetime-like.
name : string, default None
Name of the resulting IntervalIndex
closed : string, default 'right'
options are: 'left', 'right', 'both', 'neither'
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
Returns
-------
rng : IntervalIndex
Examples
--------
Numeric ``start`` and ``end`` is supported.
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]]
closed='right', dtype='interval[int64]')
Additionally, datetime-like input is also supported.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
end=pd.Timestamp('2017-01-04'))
IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],
(2017-01-03, 2017-01-04]]
closed='right', dtype='interval[datetime64[ns]]')
The ``freq`` parameter specifies the frequency between the left and right.
endpoints of the individual intervals within the ``IntervalIndex``. For
numeric ``start`` and ``end``, the frequency must also be numeric.
>>> pd.interval_range(start=0, periods=4, freq=1.5)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]]
closed='right', dtype='interval[float64]')
Similarly, for datetime-like ``start`` and ``end``, the frequency must be
convertible to a DateOffset.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
periods=3, freq='MS')
IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],
(2017-03-01, 2017-04-01]]
closed='right', dtype='interval[datetime64[ns]]')
The ``closed`` parameter specifies which endpoints of the individual
intervals within the ``IntervalIndex`` are closed.
>>> pd.interval_range(end=5, periods=4, closed='both')
IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]]
closed='both', dtype='interval[int64]')
See Also
--------
IntervalIndex : an Index of intervals that are all closed on the same side.
"""
if _count_not_none(start, end, periods) != 2:
raise ValueError('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
start = _maybe_box_datetimelike(start)
end = _maybe_box_datetimelike(end)
endpoint = next(_not_none(start, end))
if not _is_valid_endpoint(start):
msg = 'start must be numeric or datetime-like, got {start}'
raise ValueError(msg.format(start=start))
if not _is_valid_endpoint(end):
msg = 'end must be numeric or datetime-like, got {end}'
raise ValueError(msg.format(end=end))
if is_float(periods):
periods = int(periods)
elif not is_integer(periods) and periods is not None:
msg = 'periods must be a number, got {periods}'
raise TypeError(msg.format(periods=periods))
freq = freq or (1 if is_number(endpoint) else 'D')
if not is_number(freq):
try:
freq = to_offset(freq)
except ValueError:
raise ValueError('freq must be numeric or convertible to '
'DateOffset, got {freq}'.format(freq=freq))
# verify type compatibility
if not all([_is_type_compatible(start, end),
_is_type_compatible(start, freq),
_is_type_compatible(end, freq)]):
raise TypeError("start, end, freq need to be type compatible")
if is_number(endpoint):
if periods is None:
periods = int((end - start) // freq)
if start is None:
start = end - periods * freq
# force end to be consistent with freq (lower if freq skips over end)
end = start + periods * freq
# end + freq for inclusive endpoint
breaks = np.arange(start, end + freq, freq)
elif isinstance(endpoint, Timestamp):
# add one to account for interval endpoints (n breaks = n-1 intervals)
if periods is not None:
periods += 1
breaks = date_range(start=start, end=end, periods=periods, freq=freq)
else:
# add one to account for interval endpoints (n breaks = n-1 intervals)
if periods is not None:
periods += 1
breaks = timedelta_range(start=start, end=end, periods=periods,
freq=freq)
return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
| 34.336826
| 79
| 0.582051
|
522468155ac7c64ffb929ce098546aad885e3b0d
| 5,625
|
py
|
Python
|
cogdl/datasets/gtn_data.py
|
cenyk1230/cogdl
|
fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce
|
[
"MIT"
] | null | null | null |
cogdl/datasets/gtn_data.py
|
cenyk1230/cogdl
|
fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce
|
[
"MIT"
] | null | null | null |
cogdl/datasets/gtn_data.py
|
cenyk1230/cogdl
|
fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce
|
[
"MIT"
] | null | null | null |
import os.path as osp
import pickle
import numpy as np
import torch
from cogdl.data import Graph, Dataset
from cogdl.utils import download_url, untar
from . import register_dataset
class GTNDataset(Dataset):
r"""The network datasets "ACM", "DBLP" and "IMDB" from the
`"Graph Transformer Networks"
<https://arxiv.org/abs/1911.06455>`_ paper.
Args:
root (string): Root directory where the dataset should be saved.
name (string): The name of the dataset (:obj:`"gtn-acm"`,
:obj:`"gtn-dblp"`, :obj:`"gtn-imdb"`).
"""
def __init__(self, root, name):
self.name = name
self.url = f"https://github.com/cenyk1230/gtn-data/blob/master/{name}.zip?raw=true"
super(GTNDataset, self).__init__(root)
self.data = torch.load(self.processed_paths[0])
self.num_edge = len(self.data.adj)
self.num_nodes = self.data.x.shape[0]
@property
def raw_file_names(self):
names = ["edges.pkl", "labels.pkl", "node_features.pkl"]
return names
@property
def processed_file_names(self):
return ["data.pt"]
@property
def num_classes(self):
return torch.max(self.data.train_target).item() + 1
def read_gtn_data(self, folder):
edges = pickle.load(open(osp.join(folder, "edges.pkl"), "rb"))
labels = pickle.load(open(osp.join(folder, "labels.pkl"), "rb"))
node_features = pickle.load(open(osp.join(folder, "node_features.pkl"), "rb"))
data = Graph()
data.x = torch.from_numpy(node_features).type(torch.FloatTensor)
num_nodes = edges[0].shape[0]
node_type = np.zeros((num_nodes), dtype=int)
assert len(edges) == 4
assert len(edges[0].nonzero()) == 2
node_type[edges[0].nonzero()[0]] = 0
node_type[edges[0].nonzero()[1]] = 1
node_type[edges[1].nonzero()[0]] = 1
node_type[edges[1].nonzero()[1]] = 0
node_type[edges[2].nonzero()[0]] = 0
node_type[edges[2].nonzero()[1]] = 2
node_type[edges[3].nonzero()[0]] = 2
node_type[edges[3].nonzero()[1]] = 0
print(node_type)
data.pos = torch.from_numpy(node_type)
edge_list = []
for i, edge in enumerate(edges):
edge_tmp = torch.from_numpy(np.vstack((edge.nonzero()[0], edge.nonzero()[1]))).type(torch.LongTensor)
edge_list.append(edge_tmp)
data.edge_index = torch.cat(edge_list, 1)
A = []
for i, edge in enumerate(edges):
edge_tmp = torch.from_numpy(np.vstack((edge.nonzero()[0], edge.nonzero()[1]))).type(torch.LongTensor)
value_tmp = torch.ones(edge_tmp.shape[1]).type(torch.FloatTensor)
A.append((edge_tmp, value_tmp))
edge_tmp = torch.stack((torch.arange(0, num_nodes), torch.arange(0, num_nodes))).type(torch.LongTensor)
value_tmp = torch.ones(num_nodes).type(torch.FloatTensor)
A.append((edge_tmp, value_tmp))
data.adj = A
data.train_node = torch.from_numpy(np.array(labels[0])[:, 0]).type(torch.LongTensor)
data.train_target = torch.from_numpy(np.array(labels[0])[:, 1]).type(torch.LongTensor)
data.valid_node = torch.from_numpy(np.array(labels[1])[:, 0]).type(torch.LongTensor)
data.valid_target = torch.from_numpy(np.array(labels[1])[:, 1]).type(torch.LongTensor)
data.test_node = torch.from_numpy(np.array(labels[2])[:, 0]).type(torch.LongTensor)
data.test_target = torch.from_numpy(np.array(labels[2])[:, 1]).type(torch.LongTensor)
y = np.zeros((num_nodes), dtype=int)
x_index = torch.cat((data.train_node, data.valid_node, data.test_node))
y_index = torch.cat((data.train_target, data.valid_target, data.test_target))
y[x_index.numpy()] = y_index.numpy()
data.y = torch.from_numpy(y)
self.data = data
def get(self, idx):
assert idx == 0
return self.data
def apply_to_device(self, device):
self.data.x = self.data.x.to(device)
self.data.train_node = self.data.train_node.to(device)
self.data.valid_node = self.data.valid_node.to(device)
self.data.test_node = self.data.test_node.to(device)
self.data.train_target = self.data.train_target.to(device)
self.data.valid_target = self.data.valid_target.to(device)
self.data.test_target = self.data.test_target.to(device)
new_adj = []
for (t1, t2) in self.data.adj:
new_adj.append((t1.to(device), t2.to(device)))
self.data.adj = new_adj
def download(self):
download_url(self.url, self.raw_dir, name=self.name + ".zip")
untar(self.raw_dir, self.name + ".zip")
def process(self):
self.read_gtn_data(self.raw_dir)
torch.save(self.data, self.processed_paths[0])
def __repr__(self):
return "{}()".format(self.name)
@register_dataset("gtn-acm")
class ACM_GTNDataset(GTNDataset):
def __init__(self, data_path="data"):
dataset = "gtn-acm"
path = osp.join(data_path, dataset)
super(ACM_GTNDataset, self).__init__(path, dataset)
@register_dataset("gtn-dblp")
class DBLP_GTNDataset(GTNDataset):
def __init__(self, data_path="data"):
dataset = "gtn-dblp"
path = osp.join(data_path, dataset)
super(DBLP_GTNDataset, self).__init__(path, dataset)
@register_dataset("gtn-imdb")
class IMDB_GTNDataset(GTNDataset):
def __init__(self, data_path="data"):
dataset = "gtn-imdb"
path = osp.join(data_path, dataset)
super(IMDB_GTNDataset, self).__init__(path, dataset)
| 36.525974
| 113
| 0.632889
|
c41c4d5328470a51353999abe04ca1c1ed9eef82
| 2,237
|
py
|
Python
|
salvia/wallet/puzzles/prefarm/make_prefarm_ph.py
|
mikando/salvia-blockchain
|
02181d0b5a063374f01eea951570dbc661bddc34
|
[
"Apache-2.0"
] | null | null | null |
salvia/wallet/puzzles/prefarm/make_prefarm_ph.py
|
mikando/salvia-blockchain
|
02181d0b5a063374f01eea951570dbc661bddc34
|
[
"Apache-2.0"
] | null | null | null |
salvia/wallet/puzzles/prefarm/make_prefarm_ph.py
|
mikando/salvia-blockchain
|
02181d0b5a063374f01eea951570dbc661bddc34
|
[
"Apache-2.0"
] | null | null | null |
from clvm.casts import int_from_bytes
from clvm_tools import binutils
from salvia.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from salvia.types.blockchain_format.program import Program
from salvia.types.condition_opcodes import ConditionOpcode
from salvia.util.bech32m import decode_puzzle_hash, encode_puzzle_hash
from salvia.util.condition_tools import parse_sexp_to_conditions
from salvia.util.ints import uint32
address1 = "txch15gx26ndmacfaqlq8m0yajeggzceu7cvmaz4df0hahkukes695rss6lej7h" # Gene wallet (m/12381/8444/2/42):
address2 = "txch1c2cguswhvmdyz9hr3q6hak2h6p9dw4rz82g4707k2xy2sarv705qcce4pn" # Mariano address (m/12381/8444/2/0)
ph1 = decode_puzzle_hash(address1)
ph2 = decode_puzzle_hash(address2)
pool_amounts = int(calculate_pool_reward(uint32(0)) / 2)
farmer_amounts = int(calculate_base_farmer_reward(uint32(0)) / 2)
assert pool_amounts * 2 == calculate_pool_reward(uint32(0))
assert farmer_amounts * 2 == calculate_base_farmer_reward(uint32(0))
def make_puzzle(amount: int) -> int:
puzzle = f"(q . ((51 0x{ph1.hex()} {amount}) (51 0x{ph2.hex()} {amount})))"
# print(puzzle)
puzzle_prog = Program.to(binutils.assemble(puzzle))
print("Program: ", puzzle_prog)
puzzle_hash = puzzle_prog.get_tree_hash()
solution = "()"
prefix = "xslv"
print("PH", puzzle_hash)
print(f"Address: {encode_puzzle_hash(puzzle_hash, prefix)}")
result = puzzle_prog.run(solution)
error, result_human = parse_sexp_to_conditions(result)
total_salvia = 0
if error:
print(f"Error: {error}")
else:
assert result_human is not None
for cvp in result_human:
assert len(cvp.vars) == 2
total_salvia += int_from_bytes(cvp.vars[1])
print(
f"{ConditionOpcode(cvp.opcode).name}: {encode_puzzle_hash(cvp.vars[0], prefix)},"
f" amount: {int_from_bytes(cvp.vars[1])}"
)
return total_salvia
total_salvia = 0
print("Pool address: ")
total_salvia += make_puzzle(pool_amounts)
print("\nFarmer address: ")
total_salvia += make_puzzle(farmer_amounts)
assert total_salvia == calculate_base_farmer_reward(uint32(0)) + calculate_pool_reward(uint32(0))
| 36.080645
| 114
| 0.734019
|
254b6dede099cb3cb058a353025fcea46e8213ae
| 1,409
|
py
|
Python
|
tests/integration/fenics_tests/tutorials/page_9.py
|
muh-hassani/pyiron_continuum
|
44777f91a6c5defb0e5f8a7ad74908368bc37aba
|
[
"BSD-3-Clause"
] | null | null | null |
tests/integration/fenics_tests/tutorials/page_9.py
|
muh-hassani/pyiron_continuum
|
44777f91a6c5defb0e5f8a7ad74908368bc37aba
|
[
"BSD-3-Clause"
] | 50
|
2021-02-19T19:33:38.000Z
|
2022-03-22T14:26:04.000Z
|
tests/integration/fenics_tests/tutorials/page_9.py
|
muh-hassani/pyiron_continuum
|
44777f91a6c5defb0e5f8a7ad74908368bc37aba
|
[
"BSD-3-Clause"
] | 1
|
2021-09-11T09:25:19.000Z
|
2021-09-11T09:25:19.000Z
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from fenics import *
from ufl import nabla_div
def linear_elasticity():
# Scaled variables
L = 1;
W = 0.2
mu = 1
rho = 1
delta = W / L
gamma = 0.4 * delta ** 2
beta = 1.25
lambda_ = beta
g = gamma
# Create mesh and define function space
mesh = BoxMesh(Point(0, 0, 0), Point(L, W, W), 10, 3, 3)
V = VectorFunctionSpace(mesh, 'P', 1)
# Define boundary condition
tol = 1E-14
def clamped_boundary(x, on_boundary):
return on_boundary and x[0] < tol
bc = DirichletBC(V, Constant((0, 0, 0)), clamped_boundary)
# Define strain and stress
def epsilon(u):
return 0.5 * (nabla_grad(u) + nabla_grad(u).T)
# return sym(nabla_grad(u))
def sigma(u):
return lambda_ * nabla_div(u) * Identity(d) + 2 * mu * epsilon(u)
# Define variational problem
u = TrialFunction(V)
d = u.geometric_dimension() # space dimension
v = TestFunction(V)
f = Constant((0, 0, -rho * g))
T = Constant((0, 0, 0))
a = inner(sigma(u), epsilon(v)) * dx
L = dot(f, v) * dx + dot(T, v) * ds
# Compute solution
u = Function(V)
solve(a == L, u, bc)
return u.compute_vertex_values(mesh)
| 26.092593
| 108
| 0.605394
|
c2ed93936cef3e5050f2a18c10ba079293fdadb6
| 14,655
|
py
|
Python
|
onnx/backend/test/runner/__init__.py
|
How-Wang/onnx
|
c940fa3fea84948e46603cab2f86467291443beb
|
[
"Apache-2.0"
] | 1
|
2022-02-04T07:45:14.000Z
|
2022-02-04T07:45:14.000Z
|
onnx/backend/test/runner/__init__.py
|
How-Wang/onnx
|
c940fa3fea84948e46603cab2f86467291443beb
|
[
"Apache-2.0"
] | null | null | null |
onnx/backend/test/runner/__init__.py
|
How-Wang/onnx
|
c940fa3fea84948e46603cab2f86467291443beb
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
import functools
import glob
import os
import re
import shutil
import sys
import tarfile
import tempfile
import time
import unittest
import numpy as np # type: ignore
import onnx
from onnx import helper, numpy_helper, NodeProto, ModelProto, TypeProto
from onnx.backend.base import Backend
from urllib.request import urlretrieve
from ..loader import load_model_tests
from ..case.test_case import TestCase
from .item import TestItem
from typing import Optional, Pattern, Set, Dict, Text, Type, Sequence, Any, Callable, Union, Iterable, List
class BackendIsNotSupposedToImplementIt(unittest.SkipTest):
pass
def retry_excute(times: int) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
assert times >= 1
def wrapper(func: Callable[..., Any]) -> Callable[..., Any]:
@functools.wraps(func)
def wrapped(*args: Any, **kwargs: Any) -> Any:
for i in range(1, times + 1):
try:
return func(*args, **kwargs)
except Exception:
print('{} times tried'.format(i))
if i == times:
raise
time.sleep(5 * i)
return wrapped
return wrapper
class Runner(object):
def __init__(self, backend: Type[Backend], parent_module: Optional[str] = None) -> None:
self.backend = backend
self._parent_module = parent_module
self._include_patterns: Set[Pattern[Text]] = set()
self._exclude_patterns: Set[Pattern[Text]] = set()
self._xfail_patterns: Set[Pattern[Text]] = set()
# This is the source of the truth of all test functions.
# Properties `test_cases`, `test_suite` and `tests` will be
# derived from it.
# {category: {name: func}}
self._test_items: Dict[Text, Dict[Text, TestItem]] = defaultdict(dict)
for rt in load_model_tests(kind='node'):
self._add_model_test(rt, 'Node')
for rt in load_model_tests(kind='real'):
self._add_model_test(rt, 'Real')
for rt in load_model_tests(kind='simple'):
self._add_model_test(rt, 'Simple')
for ct in load_model_tests(kind='pytorch-converted'):
self._add_model_test(ct, 'PyTorchConverted')
for ot in load_model_tests(kind='pytorch-operator'):
self._add_model_test(ot, 'PyTorchOperator')
def _get_test_case(self, name: Text) -> Type[unittest.TestCase]:
test_case = type(str(name), (unittest.TestCase,), {})
if self._parent_module:
test_case.__module__ = self._parent_module
return test_case
# TODO: Use proper type annotation rather than string
# once we drop Python 3.6 support. See
# https://www.python.org/dev/peps/pep-0563/.
def include(self, pattern: Text) -> 'Runner':
self._include_patterns.add(re.compile(pattern))
return self
def exclude(self, pattern: Text) -> 'Runner':
self._exclude_patterns.add(re.compile(pattern))
return self
def xfail(self, pattern: Text) -> 'Runner':
self._xfail_patterns.add(re.compile(pattern))
return self
def enable_report(self) -> 'Runner':
import pytest # type: ignore
for category, items_map in self._test_items.items():
for name, item in items_map.items():
item.func = pytest.mark.onnx_coverage(item.proto, category)(item.func)
return self
@property
def _filtered_test_items(self) -> Dict[Text, Dict[Text, TestItem]]:
filtered: Dict[Text, Dict[Text, TestItem]] = {}
for category, items_map in self._test_items.items():
filtered[category] = {}
for name, item in items_map.items():
if (self._include_patterns
and (not any(include.search(name)
for include in self._include_patterns))):
item.func = unittest.skip(
'no matched include pattern'
)(item.func)
for exclude in self._exclude_patterns:
if exclude.search(name):
item.func = unittest.skip(
'matched exclude pattern "{}"'.format(
exclude.pattern)
)(item.func)
for xfail in self._xfail_patterns:
if xfail.search(name):
item.func = unittest.expectedFailure(item.func)
filtered[category][name] = item
return filtered
@property
def test_cases(self) -> Dict[str, Type[unittest.TestCase]]:
'''
List of test cases to be applied on the parent scope
Example usage:
globals().update(BackendTest(backend).test_cases)
'''
test_cases = {}
for category, items_map in self._filtered_test_items.items():
test_case_name = str('OnnxBackend{}Test').format(category)
test_case = self._get_test_case(test_case_name)
for name, item in sorted(items_map.items()):
setattr(test_case, name, item.func)
test_cases[test_case_name] = test_case
return test_cases
@property
def test_suite(self) -> unittest.TestSuite:
'''
TestSuite that can be run by TestRunner
Example usage:
unittest.TextTestRunner().run(BackendTest(backend).test_suite)
'''
suite = unittest.TestSuite()
for case in sorted(self.test_cases.values()):
suite.addTests(unittest.defaultTestLoader.loadTestsFromTestCase(case))
return suite
# For backward compatibility (we used to expose `.tests`)
@property
def tests(self) -> Type[unittest.TestCase]:
'''
One single unittest.TestCase that hosts all the test functions
Example usage:
onnx_backend_tests = BackendTest(backend).tests
'''
tests = self._get_test_case('OnnxBackendTest')
for items_map in sorted(self._filtered_test_items.values()):
for name, item in sorted(items_map.items()):
setattr(tests, name, item.func)
return tests
@classmethod
def assert_similar_outputs(cls, ref_outputs: Sequence[Any], outputs: Sequence[Any], rtol: float, atol: float) -> None:
np.testing.assert_equal(len(outputs), len(ref_outputs))
for i in range(len(outputs)):
if isinstance(outputs[i], (list, tuple)):
for j in range(len(outputs[i])):
cls.assert_similar_outputs(ref_outputs[i][j], outputs[i][j], rtol, atol)
else:
np.testing.assert_equal(outputs[i].dtype, ref_outputs[i].dtype)
if ref_outputs[i].dtype == np.object:
np.testing.assert_array_equal(outputs[i], ref_outputs[i])
else:
np.testing.assert_allclose(
outputs[i],
ref_outputs[i],
rtol=rtol,
atol=atol)
@classmethod
@retry_excute(3)
def download_model(cls, model_test: TestCase, model_dir: Text, models_dir: Text) -> None:
# On Windows, NamedTemporaryFile can not be opened for a
# second time
download_file = tempfile.NamedTemporaryFile(delete=False)
try:
download_file.close()
print('Start downloading model {} from {}'.format(
model_test.model_name,
model_test.url))
urlretrieve(model_test.url, download_file.name)
print('Done')
with tarfile.open(download_file.name) as t:
t.extractall(models_dir)
except Exception as e:
print('Failed to prepare data for model {}: {}'.format(
model_test.model_name, e))
raise
finally:
os.remove(download_file.name)
@classmethod
def prepare_model_data(cls, model_test: TestCase) -> Text:
onnx_home = os.path.expanduser(os.getenv('ONNX_HOME', os.path.join('~', '.onnx')))
models_dir = os.getenv('ONNX_MODELS',
os.path.join(onnx_home, 'models'))
model_dir: Text = os.path.join(models_dir, model_test.model_name)
if not os.path.exists(os.path.join(model_dir, 'model.onnx')):
if os.path.exists(model_dir):
bi = 0
while True:
dest = '{}.old.{}'.format(model_dir, bi)
if os.path.exists(dest):
bi += 1
continue
shutil.move(model_dir, dest)
break
os.makedirs(model_dir)
cls.download_model(model_test=model_test, model_dir=model_dir, models_dir=models_dir)
return model_dir
def _add_test(self,
category: Text,
test_name: Text,
test_func: Callable[..., Any],
report_item: List[Optional[Union[ModelProto, NodeProto]]],
devices: Iterable[Text] = ('CPU', 'CUDA'),
) -> None:
# We don't prepend the 'test_' prefix to improve greppability
if not test_name.startswith('test_'):
raise ValueError(
'Test name must start with test_: {}'.format(test_name))
def add_device_test(device: Text) -> None:
device_test_name = '{}_{}'.format(test_name, device.lower())
if device_test_name in self._test_items[category]:
raise ValueError(
'Duplicated test name "{}" in category "{}"'.format(
device_test_name, category))
@unittest.skipIf( # type: ignore
not self.backend.supports_device(device),
"Backend doesn't support device {}".format(device))
@functools.wraps(test_func)
def device_test_func(*args: Any, **kwargs: Any) -> Any:
try:
return test_func(*args, device=device, **kwargs)
except BackendIsNotSupposedToImplementIt as e:
# hacky verbose reporting
if '-v' in sys.argv or '--verbose' in sys.argv:
print('Test {} is effectively skipped: {}'.format(
device_test_name, e))
self._test_items[category][device_test_name] = TestItem(
device_test_func, report_item)
for device in devices:
add_device_test(device)
def _add_model_test(self, model_test: TestCase, kind: Text) -> None:
# model is loaded at runtime, note sometimes it could even
# never loaded if the test skipped
model_marker: List[Optional[Union[ModelProto, NodeProto]]] = [None]
def run(test_self: Any, device: Text) -> None:
if model_test.model_dir is None:
model_dir = self.prepare_model_data(model_test)
else:
model_dir = model_test.model_dir
model_pb_path = os.path.join(model_dir, 'model.onnx')
model = onnx.load(model_pb_path)
model_marker[0] = model
if hasattr(self.backend, 'is_compatible') \
and callable(self.backend.is_compatible) \
and not self.backend.is_compatible(model):
raise unittest.SkipTest('Not compatible with backend')
prepared_model = self.backend.prepare(model, device)
assert prepared_model is not None
# TODO after converting all npz files to protobuf, we can delete this.
for test_data_npz in glob.glob(
os.path.join(model_dir, 'test_data_*.npz')):
test_data = np.load(test_data_npz, encoding='bytes')
inputs = list(test_data['inputs'])
outputs = list(prepared_model.run(inputs))
ref_outputs = test_data['outputs']
self.assert_similar_outputs(ref_outputs, outputs,
rtol=model_test.rtol,
atol=model_test.atol)
for test_data_dir in glob.glob(
os.path.join(model_dir, "test_data_set*")):
inputs = []
inputs_num = len(glob.glob(os.path.join(test_data_dir, 'input_*.pb')))
for i in range(inputs_num):
input_file = os.path.join(test_data_dir, 'input_{}.pb'.format(i))
self._load_proto(input_file, inputs, model.graph.input[i].type)
ref_outputs = []
ref_outputs_num = len(glob.glob(os.path.join(test_data_dir, 'output_*.pb')))
for i in range(ref_outputs_num):
output_file = os.path.join(test_data_dir, 'output_{}.pb'.format(i))
self._load_proto(output_file, ref_outputs, model.graph.output[i].type)
outputs = list(prepared_model.run(inputs))
self.assert_similar_outputs(ref_outputs, outputs,
rtol=model_test.rtol,
atol=model_test.atol)
self._add_test(kind + 'Model', model_test.name, run, model_marker)
def _load_proto(self, proto_filename: Text, target_list: List[Union[np.ndarray, List[Any]]], model_type_proto: TypeProto) -> None:
with open(proto_filename, 'rb') as f:
protobuf_content = f.read()
if model_type_proto.HasField('sequence_type'):
sequence = onnx.SequenceProto()
sequence.ParseFromString(protobuf_content)
target_list.append(numpy_helper.to_list(sequence))
elif model_type_proto.HasField('tensor_type'):
tensor = onnx.TensorProto()
tensor.ParseFromString(protobuf_content)
target_list.append(numpy_helper.to_array(tensor))
elif model_type_proto.HasField('optional_type'):
optional = onnx.OptionalProto()
optional.ParseFromString(protobuf_content)
target_list.append(numpy_helper.to_optional(optional))
else:
print('Loading proto of that specific type (Map/Sparse Tensor) is currently not supported')
| 42.601744
| 134
| 0.583623
|
7095bdcfb2b29c4c897e71b7184aac06a00bbc86
| 19
|
py
|
Python
|
ion/runner.py
|
tor4z/ion
|
aa60ac23246f5fab5a5199704a65f7068d316179
|
[
"MIT"
] | null | null | null |
ion/runner.py
|
tor4z/ion
|
aa60ac23246f5fab5a5199704a65f7068d316179
|
[
"MIT"
] | null | null | null |
ion/runner.py
|
tor4z/ion
|
aa60ac23246f5fab5a5199704a65f7068d316179
|
[
"MIT"
] | null | null | null |
def run():
pass
| 9.5
| 10
| 0.526316
|
afd08d8f34e1e97f9d69458483ba9bef998f13f2
| 135
|
py
|
Python
|
main.py
|
thiago-rezende/se-covid-assignment
|
ea1b9f9c40bf5671cad43ee0cc4d297262d60f63
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
thiago-rezende/se-covid-assignment
|
ea1b9f9c40bf5671cad43ee0cc4d297262d60f63
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
thiago-rezende/se-covid-assignment
|
ea1b9f9c40bf5671cad43ee0cc4d297262d60f63
|
[
"Apache-2.0"
] | null | null | null |
# Application entry point
from horus.application import Application
if __name__ == "__main__":
app = Application()
app.run()
| 16.875
| 41
| 0.711111
|
fbad8dd55a681ac1c270ad1b0a6788711c694594
| 955
|
py
|
Python
|
temp.py
|
nadavleh/CNN-from-scratch
|
b865a15bae844b16d345eda9a5d40b08b07ccf04
|
[
"MIT"
] | null | null | null |
temp.py
|
nadavleh/CNN-from-scratch
|
b865a15bae844b16d345eda9a5d40b08b07ccf04
|
[
"MIT"
] | null | null | null |
temp.py
|
nadavleh/CNN-from-scratch
|
b865a15bae844b16d345eda9a5d40b08b07ccf04
|
[
"MIT"
] | null | null | null |
from cnn_layers import *
import numpy as np
# b = convLayer3D(6, (6,5,5), (-0.1,0.1))
# a = np.ones((6,12,12))
# b.forwardProp(a)
# import skimage.data
# from matplotlib import pyplot as plt
# # Reading the image
# img = skimage.data.chelsea()
# # Converting the image into gray.
# img = skimage.color.rgb2gray(img)
# plt.figure(0)
# plt.imshow(img, cmap = 'gray')
a = np.array([[i for i in range(k,k+10)] for k in range(0,100,10)])
b = np.array([[i for i in range(k,k+10)] for k in range(100,200,10)])
ab = np.zeros((2,10,10))
ab[0,:,:] = a
ab[1,:,:] = b
img2 = np.array([i for i in range(64)])
img2 = img2.reshape((8,8))
img1 = np.array([i for i in range(64,64+64)])
img1 = img1.reshape((8,8))
img = np.array([img2,img1])
# res = ab.flatten()
# print("the 3D matrix", ab)
# print("flatten() result", res)
m = maxPool()
a,b = m.forwardProp(ab)
| 13.642857
| 70
| 0.558115
|
ad2e8cc739c65f73b2c8fe63503b9a89fbaaad48
| 1,394
|
py
|
Python
|
python/cuml/neighbors/__init__.py
|
tzemicheal/cuml
|
377f3c2773e3fc64d93d3d64a3f2fcd6c8759044
|
[
"Apache-2.0"
] | 1
|
2021-01-01T10:52:18.000Z
|
2021-01-01T10:52:18.000Z
|
python/cuml/neighbors/__init__.py
|
tzemicheal/cuml
|
377f3c2773e3fc64d93d3d64a3f2fcd6c8759044
|
[
"Apache-2.0"
] | null | null | null |
python/cuml/neighbors/__init__.py
|
tzemicheal/cuml
|
377f3c2773e3fc64d93d3d64a3f2fcd6c8759044
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.common.import_utils import has_dask
from cuml.neighbors.nearest_neighbors import NearestNeighbors
from cuml.neighbors.nearest_neighbors import kneighbors_graph
from cuml.neighbors.kneighbors_classifier import KNeighborsClassifier
from cuml.neighbors.kneighbors_regressor import KNeighborsRegressor
VALID_METRICS = {
"brute": set([
"l2", "euclidean",
"l1", "cityblock", "manhattan", "taxicab",
"braycurtis", "canberra",
"minkowski", "lp",
"chebyshev", "linf",
"jensenshannon",
"cosine", "correlation",
"inner_product", "sqeuclidean"
]),
"sparse": set(["euclidean", "l2", "inner_product"]),
"ivfflat": set(["l2", "euclidean"]),
"ivfpq": set(["l2", "euclidean"]),
"ivfsq": set(["l2", "euclidean"])
}
| 34.85
| 74
| 0.697274
|
d323dbb12ab50a187d78829f8c255460431eddf8
| 458
|
py
|
Python
|
databases/seeds/user_table_seeder.py
|
vaibhavmule/orm
|
8eb7b4667dc97870df46ef7a6724b21d5fb58fdb
|
[
"MIT"
] | null | null | null |
databases/seeds/user_table_seeder.py
|
vaibhavmule/orm
|
8eb7b4667dc97870df46ef7a6724b21d5fb58fdb
|
[
"MIT"
] | null | null | null |
databases/seeds/user_table_seeder.py
|
vaibhavmule/orm
|
8eb7b4667dc97870df46ef7a6724b21d5fb58fdb
|
[
"MIT"
] | null | null | null |
"""UserTableSeeder Seeder."""
from src.masonite.orm.seeds import Seeder
from src.masonite.orm.factories import Factory as factory
from app.User import User
factory.register(User, lambda faker: {'email': faker.email()})
class UserTableSeeder(Seeder):
def run(self):
"""Run the database seeds."""
print('run table seeder for users')
factory(User, 5).create({
'name': 'Joe',
'password': 'joe',
})
| 25.444444
| 62
| 0.631004
|
b2670f945ea4985e2a0172449cf451256e089be2
| 1,937
|
py
|
Python
|
scripts/data/make_handlabel_data_7keypooints.py
|
Xingyu-Romantic/MoveNet.Paddle
|
aa6c36b5027092ad38a793117b7ee52bb2c94a18
|
[
"MIT"
] | 87
|
2021-11-13T11:05:55.000Z
|
2022-03-30T11:00:45.000Z
|
scripts/data/make_handlabel_data_7keypooints.py
|
Dyian-snow/movenet.pytorch
|
95ec8535245228aa4335243e68722810e50bcaf8
|
[
"MIT"
] | 18
|
2021-11-16T01:13:19.000Z
|
2022-03-31T16:04:31.000Z
|
scripts/data/make_handlabel_data_7keypooints.py
|
Dyian-snow/movenet.pytorch
|
95ec8535245228aa4335243e68722810e50bcaf8
|
[
"MIT"
] | 28
|
2021-11-13T11:22:05.000Z
|
2022-03-29T10:02:09.000Z
|
"""
@Fire
https://github.com/fire717
"""
import os
import json
import pickle
import cv2
import numpy as np
import glob
read_dir = "label5"
save_dir = "croped"
output_name = 'croped/%s.json' % read_dir
output_img_dir = "croped/imgs"
if __name__ == '__main__':
imgs = glob.glob(read_dir+'/*.jpg')
print(len(imgs))
new_label = []
for i, img_path in enumerate(imgs):
img_name = os.path.basename(img_path)
img = cv2.imread(img_path)
h,w = img.shape[:2]
label_path = img_path[:-3]+'txt'
with open(label_path, 'r') as f:
lines = f.readlines()
if len(lines)!=8:
continue
keypoints = []
for line in lines[1:]:
x,y = [float(x) for x in line.strip().split(' ')[:2]]
keypoints.extend([x,y,2])
# center = [(keypoints[2*3]+keypoints[4*3])/2,
# (keypoints[2*3+1]+keypoints[4*3+1])/2]
min_key_x = np.min(np.array(keypoints)[[0,3,6,9,12,15,18]])
max_key_x = np.max(np.array(keypoints)[[0,3,6,9,12,15,18]])
min_key_y = np.min(np.array(keypoints)[[1,4,7,10,13,16,19]])
max_key_y = np.max(np.array(keypoints)[[1,4,7,10,13,16,19]])
center = [(min_key_x+max_key_x)/2, (min_key_y+max_key_y)/2]
save_item = {
"img_name":img_name,
"keypoints":keypoints,
"center":center,
"other_centers":[],
"other_keypoints":[[] for _ in range(7)],
}
# print(save_item)
new_label.append(save_item)
cv2.imwrite(os.path.join(output_img_dir, img_name), img)
with open(output_name,'w') as f:
json.dump(new_label, f, ensure_ascii=False)
print('Total write ', len(new_label))
| 24.833333
| 69
| 0.512648
|
a5d1f1397b63fc37f5f4dc112c25930c88f6424a
| 1,177
|
py
|
Python
|
Deprecated Source Code/stockExpectedReturn.py
|
TFSM00/Efficient-Frontier-Calculator
|
6277c7c2d85f4639900931739e54d010057e22a6
|
[
"MIT"
] | null | null | null |
Deprecated Source Code/stockExpectedReturn.py
|
TFSM00/Efficient-Frontier-Calculator
|
6277c7c2d85f4639900931739e54d010057e22a6
|
[
"MIT"
] | null | null | null |
Deprecated Source Code/stockExpectedReturn.py
|
TFSM00/Efficient-Frontier-Calculator
|
6277c7c2d85f4639900931739e54d010057e22a6
|
[
"MIT"
] | null | null | null |
from stockStatistics import stockStatistics
import pandas as pd
tickers = ["AAPL","GOOG","AMZN","MSFT","INTC","IBM","ORCL","CSCO","NVDA"]
def Average_StdDev_Data(tickerList):
"""
Returns a dataframe with the average and std. deviation for every ticker
"""
data = stockStatistics(tickerList)
average = []
stdev = []
for key in data.keys():
average.append(data.get(key)[0]) #average is first value
stdev.append(data.get(key)[1]) #stdev is the second value
table = pd.DataFrame([average,stdev], index=["Expected Return Rate","Standard Deviation"],columns=tickerList)
return table
def stdDeviation(tickerList):
"""
Returns the standard deviations as a list
"""
data = stockStatistics(tickerList)
stdev = []
for key in data.keys():
stdev.append(data.get(key)[1])
return stdev
def average(tickerList):
"""
Returns the averages as a list
"""
data = stockStatistics(tickerList)
average = []
for key in data.keys():
average.append(data.get(key)[1])
return average
if __name__ == '__main__':
print(Average_StdDev_Data(tickers))
| 23.54
| 113
| 0.641461
|
be73a970fabf6a6900473be5d3508048aee1ffc3
| 307
|
py
|
Python
|
vaultier/vaultier/tasks.py
|
dz0ny/Vaultier
|
e23d86c7576f4785b4e369242d7b5f7125e4d8c6
|
[
"BSD-3-Clause"
] | 30
|
2015-07-13T11:11:23.000Z
|
2021-01-25T14:21:18.000Z
|
vaultier/vaultier/tasks.py
|
corpusops/vaultier
|
3baef4346add0b3bdff322257467f74b2a0c856c
|
[
"BSD-3-Clause"
] | null | null | null |
vaultier/vaultier/tasks.py
|
corpusops/vaultier
|
3baef4346add0b3bdff322257467f74b2a0c856c
|
[
"BSD-3-Clause"
] | 31
|
2015-08-10T12:10:16.000Z
|
2020-09-18T09:43:28.000Z
|
from __future__ import absolute_import
from vaultier.statistics.statistics import StatisticsManager
from celery import shared_task
@shared_task()
def task_statistics_collector():
"""
Collects statistics from database and sends them to vaultier.org
"""
StatisticsManager.send_statistics()
| 23.615385
| 68
| 0.791531
|
0f876536ce64275220dd54bb29ee85255b3862b3
| 6,285
|
py
|
Python
|
old_tests/test_forcefield.py
|
schmolly/timemachine
|
7d13a0406dc2d09ac67892988641ba4965bfb206
|
[
"Apache-2.0"
] | 3
|
2020-01-28T21:19:54.000Z
|
2020-01-29T16:03:44.000Z
|
old_tests/test_forcefield.py
|
schmolly/timemachine
|
7d13a0406dc2d09ac67892988641ba4965bfb206
|
[
"Apache-2.0"
] | null | null | null |
old_tests/test_forcefield.py
|
schmolly/timemachine
|
7d13a0406dc2d09ac67892988641ba4965bfb206
|
[
"Apache-2.0"
] | null | null | null |
import os
import unittest
import ast
from ff import forcefield
from ff import system
from ff import openmm_converter
import itertools
import numpy as np
import pathlib
from rdkit import Chem
from simtk.openmm import app
from simtk.openmm.app import PDBFile
def get_masses(m):
masses = []
for a in m.GetAtoms():
masses.append(a.GetMass())
return masses
class TestForcefield(unittest.TestCase):
def get_smirnoff(self):
cwd = pathlib.Path(__file__).parent.parent.absolute()
fpath = os.path.join(cwd, 'ff', "smirnoff_1.1.0.py")
ff = forcefield.Forcefield(fpath)
return ff
def test_forcefield(self):
cwd = pathlib.Path(__file__).parent.parent.absolute()
fpath = os.path.join(cwd, 'ff', "smirnoff_1.1.0.py")
ff = forcefield.Forcefield(fpath)
ff_res = ff.serialize()
ff_raw = ast.literal_eval(open(fpath).read())
self.assertDictEqual(ff_res, ff_raw)
def test_params_and_groups(self):
itercount = itertools.count()
handle = {
'Angle': {'params': [
['[*:1]~[#6X4:2]-[*:3]', next(itercount)/5, next(itercount)/5],
['[#1:1]-[#6X4:2]-[#1:3]', next(itercount)/5,next(itercount)/5],
['[*;r3:1]1~;@[*;r3:2]~;@[*;r3:3]1', next(itercount)/5, next(itercount)/5],
]},
'Bond': {'params': [['[#6X4:1]-[#6X4:2]', next(itercount)/5, next(itercount)/5],
['[#6X4:1]-[#6X3:2]', next(itercount)/5, next(itercount)/5],
['[#6X4:1]-[#6X3:2]=[#8X1+0]', next(itercount)/5, next(itercount)/5],
]},
'Improper': {'params': [['[*:1]~[#6X3:2](~[*:3])~[*:4]', next(itercount)/5, next(itercount)/5, next(itercount)/5],
['[*:1]~[#6X3:2](~[#8X1:3])~[#8:4]', next(itercount)/5, next(itercount)/5, next(itercount)/5]
]},
'Proper': {'params': [['[*:1]-[#6X4:2]-[#6X4:3]-[*:4]', [[next(itercount)/5, next(itercount)/5, next(itercount)/5]]],
['[#6X4:1]-[#6X4:2]-[#6X4:3]-[#6X4:4]', [[next(itercount)/5, next(itercount)/5, next(itercount)/5], [next(itercount)/5, next(itercount)/5, next(itercount)/5], [next(itercount)/5, next(itercount)/5, next(itercount)/5]]]
]},
'vdW': {'params': [['[#1:1]', next(itercount)/5, next(itercount)/5],
['[#1:1]-[#6X4]', next(itercount)/5, next(itercount)/5],
],
'props': {'combining_rules': 'Lorentz-Berthelot', 'method': 'cutoff', 'potential': 'Lennard-Jones-12-6', 'scale12': 0.0, 'scale13': 0.0, 'scale14': 0.5, 'scale15': 1.0}
},
'GBSA': {
'params': [
['[*:1]', next(itercount)/5, next(itercount)/5],
],
'props': {
'solvent_dielectric' : 78.3, # matches OBC2,
'solute_dielectric' : 1.0,
'probe_radius' : 0.14,
'surface_tension' : 28.3919551,
'dielectric_offset' : 0.009,
# GBOBC1
'alpha' : 0.8,
'beta' : 0.0,
'gamma' : 2.909125
}
},
'SimpleCharges': {
'params': [
['[#1:1]', next(itercount)/5],
['[#1:1]-[#6X4]', next(itercount)/5],
]
}
}
ff = forcefield.Forcefield(handle)
# the -1 is for exclusions
np.testing.assert_equal(np.arange(next(itercount))/5, ff.params[:-1])
ref_groups = [0,1, 0,1, 0,1, 2,3, 2,3, 2,3, 4,5,6, 4,5,6, 7,8,9, 7,8,9, 7,8,9, 7,8,9, 10,11, 10,11, 12,13, 14,14]
np.testing.assert_equal(ref_groups, ff.param_groups[:-1])
def test_parameterization(self):
ff = self.get_smirnoff()
mol1 = Chem.AddHs(Chem.MolFromSmiles("O=C(N)C[C@H](N)C(=O)O"))
nrg_fns_1 = ff.parameterize(mol1)
# mol2 = Chem.AddHs(Chem.MolFromSmiles("O=C(C)Oc1ccccc1C(=O)O"))
# nrg_fns_2 = ff.parameterize(mol2)
def test_merging(self):
ff = self.get_smirnoff()
mol1 = Chem.AddHs(Chem.MolFromSmiles("O=C(N)C[C@H](N)C(=O)O"))
nrg_fns1 = ff.parameterize(mol1)
mol1_masses = get_masses(mol1)
mol2 = Chem.AddHs(Chem.MolFromSmiles("O=C(C)Oc1ccccc1C(=O)O"))
nrg_fns2 = ff.parameterize(mol2)
mol2_masses = get_masses(mol2)
system1 = system.System(nrg_fns1, ff.params, ff.param_groups, mol1_masses)
system2 = system.System(nrg_fns2, ff.params, ff.param_groups, mol2_masses)
system3 = system1.merge(system2)
np.testing.assert_equal(system3.masses, np.concatenate([system1.masses, system2.masses]))
np.testing.assert_equal(system3.params, np.concatenate([system1.params, system2.params]))
np.testing.assert_equal(system3.param_groups, np.concatenate([system1.param_groups, system2.param_groups]))
system3.make_gradients(3, np.float64)
def test_merging_with_openmm(self):
ff = self.get_smirnoff()
mol1 = Chem.AddHs(Chem.MolFromSmiles("O=C(N)C[C@H](N)C(=O)O"))
nrg_fns1 = ff.parameterize(mol1)
mol1_masses = get_masses(mol1)
host_pdb = app.PDBFile("examples/BRD4_minimized.pdb")
amber_ff = app.ForceField('amber99sbildn.xml', 'amber99_obc.xml')
protein_system = amber_ff.createSystem(
host_pdb.topology,
nonbondedMethod=app.NoCutoff,
constraints=None,
rigidWater=False
)
system1 = system.System(nrg_fns1, ff.params, ff.param_groups, mol1_masses)
protein_sys = openmm_converter.deserialize_system(protein_system)
combined_system = protein_sys.merge(system1)
np.testing.assert_equal(combined_system.masses, np.concatenate([protein_sys.masses, system1.masses]))
np.testing.assert_equal(combined_system.params, np.concatenate([protein_sys.params, system1.params]))
np.testing.assert_equal(combined_system.param_groups, np.concatenate([protein_sys.param_groups, system1.param_groups]))
# open("examples/BRD4_minimized.pdb")
# print(system)
| 39.528302
| 241
| 0.561814
|
5b4e201d76ab3689bc8847d6141b2fff1a852174
| 3,341
|
py
|
Python
|
script/update.py
|
pranayaryal/electron
|
a7052efaf4fc6bb2aeedd6579e662e98aa2237dd
|
[
"MIT"
] | 2
|
2018-06-23T22:04:12.000Z
|
2018-06-28T08:59:52.000Z
|
script/update.py
|
pranayaryal/electron
|
a7052efaf4fc6bb2aeedd6579e662e98aa2237dd
|
[
"MIT"
] | 8
|
2020-06-26T00:58:18.000Z
|
2020-11-04T04:11:46.000Z
|
script/update.py
|
pranayaryal/electron
|
a7052efaf4fc6bb2aeedd6579e662e98aa2237dd
|
[
"MIT"
] | 1
|
2018-10-05T17:29:23.000Z
|
2018-10-05T17:29:23.000Z
|
#!/usr/bin/env python
import argparse
import os
import platform
import subprocess
import sys
from lib.config import get_target_arch, PLATFORM
from lib.util import get_host_arch, import_vs_env
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def main():
os.chdir(SOURCE_ROOT)
if PLATFORM != 'win32' and platform.architecture()[0] != '64bit':
print 'Electron is required to be built on a 64bit machine'
return 1
update_external_binaries()
return update_gyp()
def parse_args():
parser = argparse.ArgumentParser(description='Update build configurations')
parser.add_argument('--defines', default='',
help='The build variables passed to gyp')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--msvs', action='store_true',
help='Generate Visual Studio project')
group.add_argument('--xcode', action='store_true',
help='Generate XCode project')
return parser.parse_args()
def update_external_binaries():
uf = os.path.join('script', 'update-external-binaries.py')
subprocess.check_call([sys.executable, uf])
def update_gyp():
# Since gyp doesn't support specify link_settings for each configuration,
# we are not able to link to different libraries in "Debug" and "Release"
# configurations.
# In order to work around this, we decided to generate the configuration
# for twice, one is to generate "Debug" config, the other one to generate
# the "Release" config. And the settings are controlled by the variable
# "libchromiumcontent_component" which is defined before running gyp.
target_arch = get_target_arch()
return (run_gyp(target_arch, 0) or run_gyp(target_arch, 1))
def run_gyp(target_arch, component):
# Update the VS build env.
import_vs_env(target_arch)
env = os.environ.copy()
if PLATFORM == 'linux' and target_arch != get_host_arch():
env['GYP_CROSSCOMPILE'] = '1'
elif PLATFORM == 'win32':
env['GYP_MSVS_VERSION'] = '2017'
python = sys.executable
if sys.platform == 'cygwin':
# Force using win32 python on cygwin.
python = os.path.join('vendor', 'python_26', 'python.exe')
gyp = os.path.join('vendor', 'gyp', 'gyp_main.py')
gyp_pylib = os.path.join(os.path.dirname(gyp), 'pylib')
# Avoid using the old gyp lib in system.
env['PYTHONPATH'] = os.path.pathsep.join([gyp_pylib,
env.get('PYTHONPATH', '')])
# Whether to build for Mac App Store.
if os.environ.has_key('MAS_BUILD'):
mas_build = 1
else:
mas_build = 0
defines = [
'-Dlibchromiumcontent_component={0}'.format(component),
'-Dtarget_arch={0}'.format(target_arch),
'-Dhost_arch={0}'.format(get_host_arch()),
'-Dlibrary=static_library',
'-Dmas_build={0}'.format(mas_build),
]
# Add the defines passed from command line.
args = parse_args()
for define in [d.strip() for d in args.defines.split(' ')]:
if define:
defines += ['-D' + define]
generator = 'ninja'
if args.msvs:
generator = 'msvs-ninja'
elif args.xcode:
generator = 'xcode-ninja'
return subprocess.call([python, gyp, '-f', generator, '--depth', '.',
'electron.gyp', '-Icommon.gypi'] + defines, env=env)
if __name__ == '__main__':
sys.exit(main())
| 31.518868
| 78
| 0.675846
|
4c3564fd1062c3a7d2134b57a274eddc564fd172
| 2,717
|
py
|
Python
|
src/transformers/configuration_pegasus.py
|
abufadl/transformers
|
c84bb6eb92b654e04a82fada26417fcdab45f3af
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/configuration_pegasus.py
|
abufadl/transformers
|
c84bb6eb92b654e04a82fada26417fcdab45f3af
|
[
"Apache-2.0"
] | 2
|
2020-09-03T13:54:34.000Z
|
2020-09-25T19:01:29.000Z
|
src/transformers/configuration_pegasus.py
|
abufadl/transformers
|
c84bb6eb92b654e04a82fada26417fcdab45f3af
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 Google and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PEGASUS model configuration """
from .configuration_bart import BART_CONFIG_ARGS_DOC, BartConfig
from .file_utils import add_start_docstrings_to_callable
from .utils import logging
logger = logging.get_logger(__name__)
# These config values do not vary between checkpoints
DEFAULTS = dict(
vocab_size=96103,
max_position_embeddings=512,
d_model=1024,
encoder_ffn_dim=4096,
decoder_ffn_dim=4096,
encoder_attention_heads=16,
decoder_attention_heads=16,
encoder_layers=16,
decoder_layers=16,
dropout=0.1,
attention_dropout=0.1,
activation_dropout=0.1,
pad_token_id=0,
eos_token_id=1,
is_encoder_decoder=True,
normalize_before=True,
scale_embedding=True,
normalize_embedding=False,
add_final_layer_norm=True,
static_position_embeddings=True,
num_beams=8,
activation_function="relu",
)
# Config values that vary between checkpoints: for testing and conversion
max_gen_length = {
# See appendix C of paper
"xsum": 64,
"cnn_dailymail": 128,
"newsroom": 128,
"wikihow": 256,
"multi_news": 256,
"reddit_tifu": 128,
"big_patent": 256,
"arxiv": 256,
"pubmed": 256,
"gigaword": 32,
"aeslc": 32,
"billsum": 256,
"large": 256, # @sshleifer chose arbitrarily
}
max_model_length = {
"xsum": 512,
"cnn_dailymail": 1024,
"newsroom": 512,
"wikihow": 512,
"multi_news": 1024,
"reddit_tifu": 512,
"big_patent": 1024,
"arxiv": 1024,
"pubmed": 1024,
"gigaword": 128,
"aeslc": 512,
"billsum": 1024,
"large": 1024,
}
expected_alpha = {
"multinews": 0.9,
"wikihow": 0.6,
"reddit_tifu": 0.6,
"big_patent": 0.7,
"gigaword": 0.6,
"aeslc": 0.6,
"billsum": 0.6,
} # otherwise 0.8
@add_start_docstrings_to_callable(BART_CONFIG_ARGS_DOC)
class PegasusConfig(BartConfig):
r"""
:class:`~transformers.PegasusConfig` is the configuration class to store the configuration of a
`PegasusModel`.
"""
model_type = "pegasus"
# The implementation of the config object is in BartConfig
| 27.17
| 99
| 0.691572
|
8733e19444aa1963a9d23833ad9a02af35ca6e36
| 1,294
|
py
|
Python
|
tensorflow_io/ffmpeg/__init__.py
|
sshrdp/io
|
1b0f8087fa1faa76db1951148288d7f960fed6f6
|
[
"Apache-2.0"
] | 1
|
2019-10-10T06:11:23.000Z
|
2019-10-10T06:11:23.000Z
|
tensorflow_io/ffmpeg/__init__.py
|
VonRosenchild/io
|
de05464e53672389119a6215fea9ceacf7f77203
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_io/ffmpeg/__init__.py
|
VonRosenchild/io
|
de05464e53672389119a6215fea9ceacf7f77203
|
[
"Apache-2.0"
] | 1
|
2019-10-10T06:11:24.000Z
|
2019-10-10T06:11:24.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FFmpeg Dataset.
@@AudioDataset
@@VideoDataset
@@decode_video
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_io.ffmpeg.python.ops.ffmpeg_ops import AudioDataset
from tensorflow_io.ffmpeg.python.ops.ffmpeg_ops import VideoDataset
from tensorflow_io.ffmpeg.python.ops.ffmpeg_ops import decode_video
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"AudioDataset",
"VideoDataset",
"decode_video",
]
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
| 33.179487
| 80
| 0.746522
|
b96022b3d1d84a918bc30e04598aa937223a5506
| 3,641
|
py
|
Python
|
tests/core/test_hook.py
|
tomekr/cement
|
fece8629c48bcd598fd61d8aa7457a5df4c4f831
|
[
"BSD-3-Clause"
] | 826
|
2015-01-09T13:23:35.000Z
|
2022-03-18T01:19:40.000Z
|
tests/core/test_hook.py
|
tomekr/cement
|
fece8629c48bcd598fd61d8aa7457a5df4c4f831
|
[
"BSD-3-Clause"
] | 316
|
2015-01-14T10:35:22.000Z
|
2022-03-08T17:18:10.000Z
|
tests/core/test_hook.py
|
tomekr/cement
|
fece8629c48bcd598fd61d8aa7457a5df4c4f831
|
[
"BSD-3-Clause"
] | 112
|
2015-01-10T15:04:26.000Z
|
2022-03-16T08:11:58.000Z
|
"""Tests for cement.core.hook."""
from unittest.mock import Mock
from pytest import raises
from cement.core.exc import FrameworkError
from cement.core.foundation import TestApp
# module tests
class TestHookManager(object):
pass
# app functionality and coverage tests
def test_define():
with TestApp() as app:
app.hook.define('test_hook')
# is it defined?
assert app.hook.defined('test_hook')
# registering again should throw exception
with raises(FrameworkError, match='Hook name .* already defined!'):
app.hook.define('test_hook')
def test_register_and_run():
def hook_one():
return 'kapla 1'
def hook_two():
return 'kapla 2'
def hook_three():
return 'kapla 3'
with TestApp() as app:
app.hook.define('test_hook')
app.hook.register('test_hook', hook_one, weight=99)
app.hook.register('test_hook', hook_two, weight=-1)
app.hook.register('test_hook', hook_three, weight=-99)
assert len(app.hook.__hooks__['test_hook']) == 3
# and run it... track results to verify weight run order
results = []
for res in app.hook.run('test_hook'):
results.append(res)
assert results == ['kapla 3', 'kapla 2', 'kapla 1']
def test_register_hook_name_not_defined():
with TestApp() as app:
ret = app.hook.register('bogus_hook', print)
assert ret is False
def test_run_bad_hook():
with TestApp() as app:
with raises(FrameworkError, match='Hook name .* is not defined!'):
for res in app.hook.run('some_bogus_hook'):
pass
def test_framework_hooks():
test_hook = Mock(return_value='bogus')
test_hook.__name__ = 'bogusname'
test_hook_again = Mock(return_value='fake')
test_hook_again.__name__ = 'bogusname'
class MyApp(TestApp):
class Meta:
hooks = [
('pre_setup', test_hook),
('post_setup', test_hook),
('pre_run', test_hook),
('post_run', test_hook),
('pre_argument_parsing', test_hook),
('post_argument_parsing', test_hook),
('pre_close', test_hook),
('post_close', test_hook),
('signal', test_hook),
('pre_render', test_hook),
('pre_render', test_hook_again),
('post_render', test_hook),
('post_render', test_hook),
]
with MyApp() as app:
# Pre- and post- setup
assert test_hook.call_count == 2
test_hook.reset_mock()
# Pre/post run (+ pre/post argparse)
# App has no controller, so it also parses args here
app.run()
assert test_hook.call_count == 4
test_hook.reset_mock()
# pre/post render
# two hooks each, one is test_hook_again
app.render({1: 'bogus'})
assert test_hook.call_count == 3
assert test_hook_again.call_count == 1
test_hook.reset_mock()
test_hook_again.reset_mock()
# TODO: Test that signal hook gets called properly
# pre/post close
assert test_hook.call_count == 2
def test_generate_type_hook():
def my_generator():
for i in [1, 1, 1]:
yield i
with TestApp() as app:
app.hook.define('test_hook')
app.hook.register('test_hook', my_generator)
app.run()
for res in app.hook.run('test_hook'):
assert res == 1
def test_list():
with TestApp() as app:
assert 'pre_setup' in app.hook.list()
| 27.171642
| 75
| 0.592145
|
50458d53406099c4bf8e9d47bc50aa981ef2484b
| 90
|
py
|
Python
|
ciphey/__init__.py
|
usama7628674/Ciphey
|
e18801c506e93e7e9377d0bbc6870ecd84ae2f61
|
[
"MIT"
] | 1
|
2021-05-30T19:55:00.000Z
|
2021-05-30T19:55:00.000Z
|
ciphey/__init__.py
|
usama7628674/Ciphey
|
e18801c506e93e7e9377d0bbc6870ecd84ae2f61
|
[
"MIT"
] | null | null | null |
ciphey/__init__.py
|
usama7628674/Ciphey
|
e18801c506e93e7e9377d0bbc6870ecd84ae2f61
|
[
"MIT"
] | null | null | null |
from . import common
from . import iface
from . import basemods
from . import __main__
| 11.25
| 22
| 0.744444
|
deaf136c5924ed815bb2b4f78dd974d08e76afd3
| 17,758
|
py
|
Python
|
uds/message/nrc.py
|
mdabrowski1990/uds
|
1aee0c1de446ee3dd461706949504f2c218db1e8
|
[
"MIT"
] | 18
|
2021-03-28T22:39:18.000Z
|
2022-02-13T21:50:37.000Z
|
uds/message/nrc.py
|
mdabrowski1990/uds
|
1aee0c1de446ee3dd461706949504f2c218db1e8
|
[
"MIT"
] | 153
|
2021-02-09T09:27:05.000Z
|
2022-03-29T06:09:15.000Z
|
uds/message/nrc.py
|
mdabrowski1990/uds
|
1aee0c1de446ee3dd461706949504f2c218db1e8
|
[
"MIT"
] | 1
|
2021-05-13T16:01:46.000Z
|
2021-05-13T16:01:46.000Z
|
"""
Module with an entire Negative Response Code (NRC) data parameters implementation.
.. note:: Explanation of :ref:`NRC <knowledge-base-nrc>` values meaning is located in appendix A1 of
ISO 14229-1 standard.
"""
__all__ = ["NRC"]
from aenum import unique
from uds.utilities import ByteEnum, ValidatedEnum, ExtendableEnum
@unique
class NRC(ByteEnum, ValidatedEnum, ExtendableEnum):
"""
Negative Response Codes (NRC) values.
`Negative Response Code <knowledge-base-nrc>` is a data parameter located in the last byte of a negative response
message. NRC informs why a server is not sending a positive response message.
"""
# PositiveResponse = 0x00
# This NRC shall not be used in a negative response message as positiveResponse parameter value is reserved
# for server internal implementation. Refer to 8.7.5 of ISO 14229-1 for more details.
GeneralReject = 0x10 # noqa: F841
"""GeneralReject (0x10) NRC indicates that the requested action has been rejected by the server."""
ServiceNotSupported = 0x11 # noqa: F841
"""ServiceNotSupported (0x11) NRC indicates that the requested action will not be taken because the server does not
support the requested service."""
SubFunctionNotSupported = 0x12 # noqa: F841
"""SubFunctionNotSupported (0x12) NRC indicates that the requested action will not be taken because the server
does not support the service specific parameters of the request message."""
IncorrectMessageLengthOrInvalidFormat = 0x13 # noqa: F841
"""IncorrectMessageLengthOrInvalidFormat (0x13) NRC indicates that the requested action will not be taken because
the length of the received request message does not match the prescribed length for the specified service or
the format of the parameters do not match the prescribed format for the specified service."""
ResponseTooLong = 0x14 # noqa: F841
"""ResponseTooLong (0x14) NRC shall be reported by the server if the response to be generated exceeds the maximum
number of bytes available by the underlying network layer. This could occur if the response message exceeds
the maximum size allowed by the underlying transport protocol or if the response message exceeds the server buffer
size allocated for that purpose."""
BusyRepeatRequest = 0x21 # noqa: F841
"""BusyRepeatRequest (0x21) NRC indicates that the server is temporarily too busy to perform the requested
operation. In this circumstance the client shall perform repetition of the “identical request message”
or “another request message”. The repetition of the request shall be delayed by a time specified in the respective
implementation documents."""
ConditionsNotCorrect = 0x22 # noqa: F841
"""ConditionsNotCorrect (0x22) NRC indicates that the requested action will not be taken because the server
prerequisite conditions are not met."""
RequestSequenceError = 0x24 # noqa: F841
"""RequestSequenceError (0x24) NRC indicates that the requested action will not be taken because the server
expects a different sequence of request messages or message as sent by the client. This may occur when sequence
sensitive requests are issued in the wrong order."""
NoResponseFromSubnetComponent = 0x25 # noqa: F841
"""NoResponseFromSubnetComponent (0x25) NRC indicates that the server has received the request but the requested
action could not be performed by the server as a subnet component which is necessary to supply the requested
information did not respond within the specified time."""
FailurePreventsExecutionOfRequestedAction = 0x26 # noqa: F841
"""FailurePreventsExecutionOfRequestedAction (0x26) NRC indicates that the requested action will not be taken
because a failure condition, identified by a DTC (with at least one DTC status bit for TestFailed, Pending,
Confirmed or TestFailedSinceLastClear set to 1), has occurred and that this failure condition prevents
the server from performing the requested action."""
RequestOutOfRange = 0x31 # noqa: F841
"""RequestOutOfRange (0x31) NRC indicates that the requested action will not be taken because the server has
detected that the request message contains a parameter which attempts to substitute a value beyond its range of
authority (e.g. attempting to substitute a data byte of 111 when the data is only defined to 100), or which
attempts to access a DataIdentifier/RoutineIdentifer that is not supported or not supported in active session."""
SecurityAccessDenied = 0x33 # noqa: F841
"""SecurityAccessDenied (0x33) NRC indicates that the requested action will not be taken because the server’s
security strategy has not been satisfied by the client."""
AuthenticationRequired = 0x34 # noqa: F841
"""AuthenticationRequired (0x34) NRC indicates that the requested service will not be taken because the client
has insufficient rights based on its Authentication state."""
InvalidKey = 0x35 # noqa: F841
"""InvalidKey (0x35) NRC indicates that the server has not given security access because the key sent by
the client did not match with the key in the server’s memory. This counts as an attempt to gain security."""
ExceedNumberOfAttempts = 0x36 # noqa: F841
"""ExceedNumberOfAttempts (0x36) NRC indicates that the requested action will not be taken because the client
has unsuccessfully attempted to gain security access more times than the server’s security strategy will allow."""
RequiredTimeDelayNotExpired = 0x37 # noqa: F841
"""RequiredTimeDelayNotExpired (0x37) NRC indicates that the requested action will not be taken because
the client’s latest attempt to gain security access was initiated before the server’s required timeout period had
elapsed."""
SecureDataTransmissionRequired = 0x38 # noqa: F841
"""SecureDataTransmissionRequired (0x38) NRC indicates that the requested service will not be taken because
the requested action is required to be sent using a secured communication channel."""
SecureDataTransmissionNotAllowed = 0x39 # noqa: F841
"""SecureDataTransmissionNotAllowed (0x39) NRC indicates that this message was received using
the SecuredDataTransmission (SID 0x84) service. However, the requested action is not allowed to be sent using
the SecuredDataTransmission (0x84) service."""
SecureDataVerificationFailed = 0x3A # noqa: F841
"""SecureDataVerificationFailed (0x3A) NRC indicates that the message failed in the security sub-layer."""
CertificateVerificationFailed_InvalidTimePeriod = 0x50 # noqa: F841
"""CertificateVerificationFailed_InvalidTimePeriod (0x50) NRC indicates that date and time of the server does not
match the validity period of the Certificate."""
CertificateVerificationFailed_InvalidSignature = 0x51 # noqa: F841
"""CertificateVerificationFailed_InvalidSignature (0x51) NRC indicates that signature of the Certificate could
not be verified."""
CertificateVerificationFailed_InvalidChainOfTrust = 0x52 # noqa: F841
"""CertificateVerificationFailed_InvalidChainOfTrust (0x52) NRC indicates that The Certificate could not be
verified against stored information about the issuing authority."""
CertificateVerificationFailed_InvalidType = 0x53 # noqa: F841
"""CertificateVerificationFailed_InvalidType (0x53) NRC indicates that the Certificate does not match the current
requested use case."""
CertificateVerificationFailed_InvalidFormat = 0x54 # noqa: F841
"""CertificateVerificationFailed_InvalidFormat (0x54) NRC indicates that the Certificate could not be evaluated
because the format requirement has not been met."""
CertificateVerificationFailed_InvalidContent = 0x55 # noqa: F841
"""CertificateVerificationFailed_InvalidContent (0x55) NRC indicates that the Certificate could not be verified
because the content does not match."""
CertificateVerificationFailed_InvalidScope = 0x56 # noqa: F841
"""CertificateVerificationFailed_InvalidScope (0x56) NRC indicates that the scope of the Certificate does not match
the contents of the server."""
CertificateVerificationFailed_InvalidCertificate = 0x57 # noqa: F841
"""CertificateVerificationFailed_InvalidCertificate (0x57) NRC indicates that the Certificate received from client
is invalid, because the server has revoked access for some reason."""
OwnershipVerificationFailed = 0x58 # noqa: F841
"""OwnershipVerificationFailed (0x58) NRC indicates that delivered Ownership does not match the provided challenge
or could not verified with the own private key."""
ChallengeCalculationFailed = 0x59 # noqa: F841
"""ChallengeCalculationFailed (0x59) NRC indicates that the challenge could not be calculated on the server side."""
SettingAccessRightsFailed = 0x5A # noqa: F841
"""SettingAccessRightsFailed (0x5A) NRC indicates that the server could not set the access rights."""
SessionKeyCreationOrDerivationFailed = 0x5B # noqa: F841
"""SessionKeyCreationOrDerivationFailed (0x5B) NRC indicates that the server could not create or derive
a session key."""
ConfigurationDataUsageFailed = 0x5C # noqa: F841
"""ConfigurationDataUsageFailed (0x5C) NRC indicates that the server could not work with the provided
configuration data."""
DeAuthenticationFailed = 0x5D # noqa: F841
"""DeAuthenticationFailed (0x5D) NRC indicates that DeAuthentication was not successful, server could still be
unprotected."""
UploadDownloadNotAccepted = 0x70 # noqa: F841
"""UploadDownloadNotAccepted (0x70) NRC indicates that an attempt to upload/download to a server’s memory cannot
be accomplished due to some fault conditions."""
TransferDataSuspended = 0x71 # noqa: F841
"""TransferDataSuspended (0x71) NRC indicates that a data transfer operation was halted due to some fault.
The active transferData sequence shall be aborted."""
GeneralProgrammingFailure = 0x72 # noqa: F841
"""GeneralProgrammingFailure (0x72) NRC indicates that the server detected an error when erasing or programming
a memory location in the permanent memory device (e.g. Flash Memory)."""
WrongBlockSequenceCounter = 0x73 # noqa: F841
"""WrongBlockSequenceCounter (0x73) NRC indicates that the server detected an error in the sequence of
blockSequenceCounter values. Note that the repetition of a TransferData request message with a blockSequenceCounter
equal to the one included in the previous TransferData request message shall be accepted by the server."""
RequestCorrectlyReceived_ResponsePending = 0x78 # noqa: F841
"""RequestCorrectlyReceived_ResponsePending (0x78) NRC ndicates that the request message was received correctly,
and that all parameters in the request message were valid (these checks can be delayed until after sending this NRC
if executing the boot software), but the action to be performed is not yet completed and the server is not yet
ready to receive another request. As soon as the requested service has been completed, the server shall send
a positive response message or negative response message with a response code different from this."""
SubFunctionNotSupportedInActiveSession = 0x7E # noqa: F841
"""SubFunctionNotSupportedInActiveSession (0x7E) NRC indicates that the requested action will not be taken because
the server does not support the requested SubFunction in the session currently active. This NRC shall only be used
when the requested SubFunction is known to be supported in another session, otherwise response code
SubFunctionNotSupported shall be used."""
ServiceNotSupportedInActiveSession = 0x7F # noqa: F841
"""ServiceNotSupportedInActiveSession (0x7F) NRC indicates that the requested action will not be taken because
the server does not support the requested service in the session currently active. This NRC shall only be used when
the requested service is known to be supported in another session, otherwise response code serviceNotSupported
shall be used."""
RpmTooHigh = 0x81 # noqa: F841
"""RpmTooHigh (0x81) NRC indicates that the requested action will not be taken because the server prerequisite
condition for RPM is not met (current RPM is above a preprogrammed maximum threshold)."""
RpmTooLow = 0x82 # noqa: F841
"""RpmTooLow (0x82) NRC indicates that the requested action will not be taken because the server prerequisite
condition for RPM is not met (current RPM is below a preprogrammed minimum threshold)."""
EngineIsRunning = 0x83 # noqa: F841
"""EngineIsRunning (0x83) NRC is required for those actuator tests which cannot be actuated while the Engine
is running. This is different from RPM too high negative response, and shall be allowed."""
EngineIsNotRunning = 0x84 # noqa: F841
"""EngineIsNotRunning (0x84) NRC is required for those actuator tests which cannot be actuated unless the Engine
is running. This is different from RPM too low negative response, and shall be allowed."""
EngineRunTimeTooLow = 0x85 # noqa: F841
"""EngineRunTimeTooLow (0x85) NRC indicates that the requested action will not be taken because the server
prerequisite condition for engine run time is not met (current engine run time is below a preprogrammed limit)."""
TemperatureTooHigh = 0x86 # noqa: F841
"""TemperatureTooHigh (0x86) NRC indicates that the requested action will not be taken because the serve
prerequisite condition for temperature is not met (current temperature is above a preprogrammed maximum
threshold)."""
TemperatureTooLow = 0x87 # noqa: F841
"""TemperatureTooLow (0x87) NRC indicates that the requested action will not be taken because the server
prerequisite condition for temperature is not met (current temperature is below a preprogrammed minimum
threshold)."""
VehicleSpeedTooHigh = 0x88 # noqa: F841
"""VehicleSpeedTooHigh (0x88) NRC indicates that the requested action will not be taken because the server
prerequisite condition for vehicle speed is not met (current VS is above a preprogrammed maximum threshold)."""
VehicleSpeedTooLow = 0x89 # noqa: F841
"""VehicleSpeedTooLow (0x89) NRC indicates that the requested action will not be taken because the server
prerequisite condition for vehicle speed is not met (current VS is below a preprogrammed minimum threshold)."""
ThrottleOrPedalTooHigh = 0x8A # noqa: F841
"""ThrottleOrPedalTooHigh (0x8A) NRC indicates that the requested action will not be taken because the server
prerequisite condition for throttle/pedal position is not met (current throttle/pedal position is above
a preprogrammed maximum threshold)."""
ThrottleOrPedalTooLow = 0x8B # noqa: F841
"""ThrottleOrPedalTooLow (0x8B) NRC indicates that the requested action will not be taken because the server
prerequisite condition for throttle/pedal position is not met (current throttle/pedal position is below
a preprogrammed minimum threshold)."""
TransmissionRangeNotInNeutral = 0x8C # noqa: F841
"""TransmissionRangeNotInNeutral (0x8C) NRC indicates that the requested action will not be taken because
the server prerequisite condition for being in neutral is not met (current transmission range is not in neutral)."""
TransmissionRangeNotInGear = 0x8D # noqa: F841
"""TransmissionRangeNotInGear (0x8D) NRC indicates that the requested action will not be taken because
the server prerequisite condition for being in gear is not met (current transmission range is not in gear)."""
BrakeSwitchOrSwitchesNotClosed = 0x8F # noqa: F841
"""BrakeSwitchOrSwitchesNotClosed (0x8F) NRC indicates that for safety reasons, this is required for certain
tests before it begins, and shall be maintained for the entire duration of the test."""
ShifterLeverNotInPark = 0x90 # noqa: F841
"""ShifterLeverNotInPark (0x90) NRC indicates that for safety reasons, this is required for certain tests before
it begins, and shall be maintained for the entire duration of the test."""
TorqueConvertClutchLocked = 0x91 # noqa: F841
"""TorqueConvertClutchLocked (0x91) RC indicates that the requested action will not be taken because the server
prerequisite condition for torque converter clutch is not met (current torque converter clutch status above
a preprogrammed limit or locked)."""
VoltageTooHigh = 0x92 # noqa: F841
"""VoltageTooHigh (0x92) NRC indicates that the requested action will not be taken because the server prerequisite
condition for voltage at the primary pin of the server (ECU) is not met (current voltage is above a preprogrammed
maximum threshold)."""
VoltageTooLow = 0x93 # noqa: F841
"""VoltageTooLow (0x93) NRC indicates that the requested action will not be taken because the server prerequisite
condition for voltage at the primary pin of the server (ECU) is not met (current voltage is below a preprogrammed
minimum threshold)."""
ResourceTemporarilyNotAvailable = 0x94 # noqa: F841
"""ResourceTemporarilyNotAvailable (0x94) NRC indicates that the server has received the request but the requested
action could not be performed by the server because an application which is necessary to supply the requested
information is temporality not available. This NRC is in general supported by each diagnostic service, as not
otherwise stated in the data link specific implementation document, therefore it is not listed in the list of
applicable response codes of the diagnostic services."""
# TODO: add alias
| 76.214592
| 120
| 0.768837
|
710319a6f514f246ad36204b0d678dd54a257995
| 413
|
py
|
Python
|
backend/patient_frog_28586/wsgi.py
|
crowdbotics-apps/patient-frog-28586
|
882a4431d2aae31c535ad0845f17fee5503a231c
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/patient_frog_28586/wsgi.py
|
crowdbotics-apps/patient-frog-28586
|
882a4431d2aae31c535ad0845f17fee5503a231c
|
[
"FTL",
"AML",
"RSA-MD"
] | 20
|
2021-07-08T11:44:38.000Z
|
2021-07-08T11:44:40.000Z
|
backend/patient_frog_28586/wsgi.py
|
crowdbotics-apps/patient-frog-28586
|
882a4431d2aae31c535ad0845f17fee5503a231c
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
WSGI config for patient_frog_28586 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'patient_frog_28586.settings')
application = get_wsgi_application()
| 24.294118
| 78
| 0.79661
|
db2c97ae0f84d7ef10b854b5656dd53835714ecd
| 54
|
py
|
Python
|
mpl_scipub/__init__.py
|
dormrod/matplotlib_wrapper
|
40ef4c20ac0d394cbbbfcb91da92c2c919271c6b
|
[
"MIT"
] | 1
|
2020-08-02T00:13:01.000Z
|
2020-08-02T00:13:01.000Z
|
mpl_scipub/__init__.py
|
dormrod/mpl_scipub
|
40ef4c20ac0d394cbbbfcb91da92c2c919271c6b
|
[
"MIT"
] | null | null | null |
mpl_scipub/__init__.py
|
dormrod/mpl_scipub
|
40ef4c20ac0d394cbbbfcb91da92c2c919271c6b
|
[
"MIT"
] | null | null | null |
from .plotter import Plot
from .dataset import DataSet
| 27
| 28
| 0.833333
|
1a381b8511c7524f3f8331b5c64c3e07385898b1
| 2,512
|
py
|
Python
|
tools/build/genids.py
|
fraca7/dsremap
|
fb8f4fb13e74b512ed0cac05387fbe9694faebcf
|
[
"MIT"
] | 8
|
2020-09-06T02:15:10.000Z
|
2022-01-12T22:49:20.000Z
|
tools/build/genids.py
|
fraca7/dsremap
|
fb8f4fb13e74b512ed0cac05387fbe9694faebcf
|
[
"MIT"
] | 5
|
2021-03-29T20:37:46.000Z
|
2021-09-19T13:20:24.000Z
|
tools/build/genids.py
|
fraca7/dsremap
|
fb8f4fb13e74b512ed0cac05387fbe9694faebcf
|
[
"MIT"
] | 2
|
2020-09-16T01:45:49.000Z
|
2021-06-12T12:38:15.000Z
|
#!/usr/bin/env python3
import os
import time
import codecs
import getopt
import sys
import pickle
"""
Generates the C header file that #defines the numerical message log IDs
"""
def write_ids(defname, dstname, strname):
current = 0
strings = {}
seen = set()
with codecs.getreader('utf-8')(open(defname, 'rb')) as src:
with codecs.getwriter('utf-8')(open(dstname, 'wb')) as dst:
name = os.path.basename(dstname).upper().replace('.', '_')
dst.write('\n#ifndef %s\n' % name)
dst.write('#define %s\n\n' % name)
for line in src:
line = line.strip()
if not line:
dst.write('\n')
continue
if line.startswith('#'):
dst.write('/* %s */\n' % line[1:].strip())
continue
idx = line.index(' ')
name, value = line[:idx], line[idx:].strip()
if value in seen:
raise ValueError('Duplicate string ID "%s"' % value)
seen.add(value)
dst.write('#define %s %d // %s\n' % (name, current, value))
strings[current] = value
current += 1
if current == 65536:
raise ValueError('Too much string IDs')
dst.write('#endif\n')
with open(strname, 'wb') as fileobj:
pickle.dump(strings, fileobj)
def usage(code=1):
print('Usage: %s -o output_filename -s strings_filename input_filename' % sys.argv[0])
sys.exit(code)
def main(argv):
try:
opts, args = getopt.getopt(argv, 'ho:s:', ['help', 'output=', 'strings='])
except getopt.Error as exc:
print(exc)
usage()
if len(args) == 0:
print('No input file specified')
usage()
if len(args) > 1:
print('Too many arguments')
usage()
outname = None
strname = None
for opt, val in opts:
if opt in ('-h', '--help'):
usage(0)
if opt in ('-o', '--output'):
outname = val
if opt in ('-s', '--strings'):
strname = val
if outname is None:
print('No output file specified')
usage()
if strname is None:
print('No strings file specified')
usage()
try:
write_ids(args[0], outname, strname)
except Exception as exc:
print(exc)
usage()
if __name__ == '__main__':
main(sys.argv[1:])
| 26.166667
| 90
| 0.506768
|
d24142157aafef9fec1e4c59909c8fdd6d0c4adf
| 2,086
|
py
|
Python
|
PLAYLIST DURATION/playlistduration.py
|
Pratyay360/python-youtube
|
4d9200851705728e433c79680b11435648c87d35
|
[
"MIT"
] | 1
|
2021-06-16T14:27:20.000Z
|
2021-06-16T14:27:20.000Z
|
PLAYLIST DURATION/playlistduration.py
|
Pratyay360/python-youtube
|
4d9200851705728e433c79680b11435648c87d35
|
[
"MIT"
] | null | null | null |
PLAYLIST DURATION/playlistduration.py
|
Pratyay360/python-youtube
|
4d9200851705728e433c79680b11435648c87d35
|
[
"MIT"
] | null | null | null |
# CREATED BY PRATYAY
import os
import re
from datetime import timedelta
from googleapiclient.discovery import build
'''
ENTER API KEY COLLECTED FROM https://developers.google.com/youtube/v3/getting-started
'''
api_key = os.environ.get('API_KEY_HERE')
youtube = build('youtube', 'v3', developerKey=api_key)
hours_pattern = re.compile(r'(\d+)H')
minutes_pattern = re.compile(r'(\d+)M')
seconds_pattern = re.compile(r'(\d+)S')
total_seconds = 0
nextPageToken = None
while True:
pl_request = youtube.playlistItems().list(
part='contentDetails',
##
## Enter playlist id
# https://www.youtube.com/playlist?list=PL-kIBfSqQg3vm9LJsLW-ct_egdWKv3WKR
# this is a youtube playlist link.
# here code after https://www.youtube.com/playlist?list=
# is the playlist id........
playlistId="PLAYLIST_ID_HERE",
maxResults=5000,
pageToken=nextPageToken
)
pl_response = pl_request.execute()
vid_ids = []
for item in pl_response['items']:
vid_ids.append(item['contentDetails']['videoId'])
vid_request = youtube.videos().list(
part="contentDetails",
id=','.join(vid_ids)
)
vid_response = vid_request.execute()
for item in vid_response['items']:
duration = item['contentDetails']['duration']
hours = hours_pattern.search(duration)
minutes = minutes_pattern.search(duration)
seconds = seconds_pattern.search(duration)
hours = int(hours.group(1)) if hours else 0
minutes = int(minutes.group(1)) if minutes else 0
seconds = int(seconds.group(1)) if seconds else 0
video_seconds = timedelta(
hours=hours,
minutes=minutes,
seconds=seconds
).total_seconds()
total_seconds += video_seconds
nextPageToken = pl_response.get('nextPageToken')
if not nextPageToken:
break
total_seconds = int(total_seconds)
minutes, seconds = divmod(total_seconds, 60)
hours, minutes = divmod(minutes, 60)
print(f'{hours}:{minutes}:{seconds}')
| 26.405063
| 85
| 0.655801
|
22fa2f249da5df80119685484986ca4a0a849762
| 4,638
|
py
|
Python
|
LongTermMemory/src/LongTermMemoryServer.py
|
emschimmel/BrainPi
|
96c0ccb727507bcce4d48292f77bb80a77164a0c
|
[
"Apache-2.0"
] | 4
|
2017-11-12T12:55:32.000Z
|
2020-03-05T06:12:25.000Z
|
LongTermMemory/src/LongTermMemoryServer.py
|
emschimmel/BrainPi
|
96c0ccb727507bcce4d48292f77bb80a77164a0c
|
[
"Apache-2.0"
] | 21
|
2017-11-24T10:18:29.000Z
|
2018-01-24T10:09:34.000Z
|
LongTermMemory/src/LongTermMemoryServer.py
|
emschimmel/BrainPi
|
96c0ccb727507bcce4d48292f77bb80a77164a0c
|
[
"Apache-2.0"
] | null | null | null |
import signal
import sys
import consul
from multiprocessing.managers import SyncManager
sys.path.append('./gen-py')
from LongMemory import LongMemoryService
from LongMemory.ttypes import *
from ThriftException.ttypes import *
from Memory.PersonMemory import PersonMemory
from Memory.AutorisationActions import AutorisationActions
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
from ThriftException.ttypes import BadHashException
from ThriftException.ttypes import LoginFailedException
from ThriftException.ttypes import UniqueFailedException
sys.path.append('../../')
import config
import logging
import random
import statsd
port = random.randint(58850, 58860)
stat = statsd.StatsClient(config.statsd_ip, config.statsd_port)
log = logging.getLogger()
log.setLevel(logging.DEBUG)
class LongTermMemoryThriftServer:
def __init__(self):
self.log = {}
@stat.timer("LongTermMemory.loginCall")
def loginCall(self, loginobject):
try:
person = AutorisationActions().login(loginobject)
return person
except BadHashException as bad:
raise BadHashException
except LoginFailedException as fail:
raise LoginFailedException
@stat.timer("LongTermMemory.getPersonConfig")
def getPersonConfig(self, uniquename):
try:
return PersonMemory().getPerson(uniquename)
except Exception as ex:
print('invalid request %s' % ex)
@stat.timer("LongTermMemory.storeNewPerson")
def storeNewPerson(self, person):
try:
PersonMemory().storeNewPerson(person=person)
except UniqueFailedException as unique:
raise unique
except Exception as ex:
print('invalid request %s' % ex)
@stat.timer("LongTermMemory.updatePerson")
def updatePerson(self, field, person):
try:
PersonMemory().updatePerson(uniquename=person.uniquename, field=field, person=person)
except Exception as ex:
print('invalid request %s' % ex)
@stat.timer("LongTermMemory.updateActionConfig")
def updateActionConfig(self, uniquename, action, user_config):
try:
PersonMemory().updateActionConfig(uniquename=uniquename, action=action, user_config=user_config)
except Exception as ex:
print('invalid request %s' % ex)
@stat.timer("LongTermMemory.changePassword")
def changePassword(self, username, password):
try:
AutorisationActions().changePassword(username, password)
except BadHashException as bad:
raise bad
@stat.timer("LongTermMemory.getAll")
def getAll(self):
try:
return PersonMemory().getAll()
except Exception as ex:
print('invalid request %s' % ex)
def get_ip():
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('255.255.255.255', 1)) # isn't reachable intentionally
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
def create_server():
handler = LongTermMemoryThriftServer()
return TServer.TSimpleServer(
LongMemoryService.Processor(handler),
TSocket.TServerSocket(port=port),
TTransport.TBufferedTransportFactory(),
TBinaryProtocol.TBinaryProtocolFactory()
)
def register():
log.info("register started")
c = consul.Consul(host=config.consul_ip, port=config.consul_port)
check = consul.Check.tcp(host=get_ip(), port=port, interval=config.consul_interval,
timeout=config.consul_timeout, deregister=unregister())
c.agent.service.register(name="long-term-memory", service_id="long-term-memory-%d" % port, port=port, check=check)
log.info("services: " + str(c.agent.services()))
def unregister():
log.info("unregister started")
c = consul.Consul(host=config.consul_ip, port=config.consul_port)
c.agent.service.deregister("long-term-memory-%d" % port)
c.agent.service.deregister("long-term-memory")
log.info("services: " + str(c.agent.services()))
def interupt_manager():
signal.signal(signal.SIGINT, signal.SIG_IGN)
def main(args=None):
manager = SyncManager()
manager.start(interupt_manager)
try:
server = create_server()
register()
server.serve()
finally:
unregister()
print('finally Long term memory Shutting down')
manager.shutdown()
if __name__ == '__main__':
main()
| 31.55102
| 118
| 0.68564
|
5a711d138047e6627d1cbd27de035918f9afcea9
| 215
|
py
|
Python
|
tests/setup_test/test.py
|
khurrumsaleem/plotter
|
a8abcda71c20f6f51f635636338a3ee95a78733e
|
[
"MIT"
] | 17
|
2019-03-30T14:44:08.000Z
|
2022-03-15T16:00:47.000Z
|
tests/setup_test/test.py
|
khurrumsaleem/plotter
|
a8abcda71c20f6f51f635636338a3ee95a78733e
|
[
"MIT"
] | 55
|
2019-03-22T18:44:19.000Z
|
2022-03-03T19:11:06.000Z
|
tests/setup_test/test.py
|
khurrumsaleem/plotter
|
a8abcda71c20f6f51f635636338a3ee95a78733e
|
[
"MIT"
] | 11
|
2019-03-22T22:52:56.000Z
|
2021-03-18T22:59:00.000Z
|
from openmc_plotter.main_window import MainWindow, _openmcReload
def test_window(qtbot):
_openmcReload()
mw = MainWindow()
mw.loadGui()
mw.plotIm.figure.savefig("test.png")
qtbot.addWidget(mw)
| 21.5
| 64
| 0.72093
|
a9420af5c49eeb7db1623f370c6e48e76b87b7d2
| 2,452
|
py
|
Python
|
trax/tf_numpy/numpy_impl/dtypes.py
|
Nishant-Pall/trax
|
f714a271111578d4e6b3ac445eef86ca03dc7fa6
|
[
"Apache-2.0"
] | 2
|
2020-08-08T14:38:53.000Z
|
2021-03-04T01:00:17.000Z
|
trax/tf_numpy/numpy/dtypes.py
|
ZachT1711/trax
|
a0a3dd8d49e53fc48bb24cc08c10a8a53517e7bc
|
[
"Apache-2.0"
] | null | null | null |
trax/tf_numpy/numpy/dtypes.py
|
ZachT1711/trax
|
a0a3dd8d49e53fc48bb24cc08c10a8a53517e7bc
|
[
"Apache-2.0"
] | 1
|
2020-03-06T06:36:36.000Z
|
2020-03-06T06:36:36.000Z
|
# coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dtypes and dtype utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
# We use numpy's dtypes instead of TF's, because the user expects to use them
# with numpy facilities such as `np.dtype(np.int64)` and
# `if x.dtype.type is np.int64`.
# pylint: disable=unused-import
# pylint: disable=g-bad-import-order
from numpy import bool_
from numpy import int_
from numpy import int16
from numpy import int32
from numpy import int64
from numpy import int8
from numpy import uint16
from numpy import uint32
from numpy import uint64
from numpy import uint8
from numpy import float_
from numpy import float16
from numpy import float32
from numpy import float64
from numpy import complex_
from numpy import complex64
from numpy import complex128
from numpy import inexact
from numpy import iinfo
from numpy import issubdtype
from numpy import inf
# TODO(wangpeng): Make bfloat16 a numpy dtype instead of using TF's
from tensorflow.compat.v2 import bfloat16
# pylint: enable=g-bad-import-order
# pylint: enable=unused-import
_to_float32 = {
np.dtype('float64'): np.dtype('float32'),
np.dtype('complex128'): np.dtype('complex64'),
}
_allow_float64 = True
def is_allow_float64():
return _allow_float64
def set_allow_float64(b):
global _allow_float64
_allow_float64 = b
def canonicalize_dtype(dtype):
if not is_allow_float64():
return _to_float32.get(dtype, dtype)
else:
return dtype
def _result_type(*arrays_and_dtypes):
dtype = np.result_type(*arrays_and_dtypes)
return canonicalize_dtype(dtype)
def default_float_type():
"""Gets the default float type.
Returns:
If `is_allow_float64()` is true, returns float64; otherwise returns float32.
"""
if is_allow_float64():
return float64
else:
return float32
| 24.767677
| 80
| 0.766721
|
9526056407692c0d76e952e26eea096b8f9a29c2
| 814
|
py
|
Python
|
MLProjects/Phishing-URL-Detection/phishingurldetector/phishingurldetector/urls.py
|
evidawei/HacktoberFest_2021
|
3c950c6a6451ac732c4090f374c7dc4b6ef36c50
|
[
"MIT"
] | 33
|
2021-10-01T17:51:53.000Z
|
2022-03-20T11:30:09.000Z
|
MLProjects/Phishing-URL-Detection/phishingurldetector/phishingurldetector/urls.py
|
evidawei/HacktoberFest_2021
|
3c950c6a6451ac732c4090f374c7dc4b6ef36c50
|
[
"MIT"
] | 69
|
2021-10-01T09:07:22.000Z
|
2021-10-20T02:21:12.000Z
|
MLProjects/Phishing-URL-Detection/phishingurldetector/phishingurldetector/urls.py
|
evidawei/HacktoberFest_2021
|
3c950c6a6451ac732c4090f374c7dc4b6ef36c50
|
[
"MIT"
] | 187
|
2021-10-01T09:06:51.000Z
|
2022-01-29T03:18:30.000Z
|
"""phishingurldetector URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('', include('phishingapp.urls')),
path('admin/', admin.site.urls),
]
| 33.916667
| 77
| 0.707617
|
d5b726a2b257a4922f376184299305300848ca92
| 9,913
|
py
|
Python
|
tierpsy/analysis/int_ske_orient/checkFinalOrientation.py
|
SKFnordquist/tierpsy-tracker
|
7ed935fdd3ee5920914effb1bf52dec45b7d54fc
|
[
"MIT"
] | 1
|
2019-02-24T23:45:36.000Z
|
2019-02-24T23:45:36.000Z
|
tierpsy/analysis/int_ske_orient/checkFinalOrientation.py
|
SKFnordquist/tierpsy-tracker
|
7ed935fdd3ee5920914effb1bf52dec45b7d54fc
|
[
"MIT"
] | null | null | null |
tierpsy/analysis/int_ske_orient/checkFinalOrientation.py
|
SKFnordquist/tierpsy-tracker
|
7ed935fdd3ee5920914effb1bf52dec45b7d54fc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 15 15:37:12 2016
@author: ajaver
"""
import glob
import os
import numpy as np
import pandas as pd
import tables
from scipy.signal import savgol_filter
from tierpsy.analysis.ske_orient.checkHeadOrientation import isWormHTSwitched
from tierpsy.helper.misc import print_flush
def getHeadProbMov(
skeletons_file,
trajectories_worm,
max_gap_allowed=10,
window_std=25,
segment4angle=5,
min_block_size=250):
skel_group = (
trajectories_worm['skeleton_id'].min(),
trajectories_worm['skeleton_id'].max())
with tables.File(skeletons_file, 'r') as fid:
good_skeletons = trajectories_worm['int_map_id'].values != -1
skeletons_id = trajectories_worm['skeleton_id'].values[good_skeletons]
dd = fid.get_node('/skeleton').shape
skeletons = np.full((len(good_skeletons), dd[1], dd[2]), np.nan)
if len(skeletons_id) > 0:
skeletons[good_skeletons, :, :] = fid.get_node(
'/skeleton')[skeletons_id, :, :]
else:
return np.nan, skel_group
#savgol_filter does not accept even windows
if window_std % 2 == 0:
window_std += 1
#get the indexes of valid skeletons
skel_ind_valid = ~np.isnan(skeletons[:, 0, 0])
#smooth on time to reduce the variance but only on the points that are going to be used to calculate the angles
for nn in [0, segment4angle, -segment4angle -1 , -1]:
for ii in range(2):
dat = pd.Series(skeletons[:, nn, ii]).fillna(method='ffill').fillna(method='bfill')
dat = savgol_filter(dat, window_std, 3)
skeletons[:, nn, ii] = dat
#total range in coordinates of the head movement
top = np.max(skeletons[:, 0], axis=0)
bot = np.min(skeletons[:, 0], axis=0)
head_path_range = np.sqrt(np.sum((top-bot)**2))
#average head tail distance
ht_dist = np.sqrt(((skeletons[:, 0] - skeletons[:, -1])**2).sum(axis=1))
ht_dist_avg = np.nanmean(ht_dist)/2
#%%
if head_path_range < ht_dist_avg:
#here I find that it works better to use a very large window since the movement is less
window_std = window_std*50
#make sure the the window is at most half of the total size of the valid skeletons window
dd, = np.where(skel_ind_valid)
bot, top = dd[0], dd[-1]
N = max(1, (top-bot + 1)//2)
window_std = min(window_std, N)
is_switch_skel, roll_std = isWormHTSwitched(skeletons,
segment4angle=segment4angle,
max_gap_allowed=max_gap_allowed,
window_std = window_std,
min_block_size=min_block_size)
#%%
#calculate the head tail probability at each point and get the average
p_mov = roll_std['head_angle']/(roll_std['head_angle'] +roll_std['tail_angle'])
#remove values that are too small, if the std is zero i cannot really calculate this values
dd = roll_std['head_angle'] + roll_std['tail_angle']
ind_valid = skel_ind_valid & (dd > 1e-3)
p_mov = p_mov.values[ind_valid]
if p_mov.size == 0:
import pdb
pdb.set_trace()
#average using only the indexes of valid skeletons
p_mov_avg = np.nanmean(p_mov)
return p_mov_avg, skel_group
def searchIntPeaks(
median_int,
peak_search_limits=[
0.054,
0.192,
0.269,
0.346]):
'''
Look for local maxima in the intensity profile, it will first look for a minima within
0 and search lim 0, then for a maxima between the minima and the search lim 1,
then for a minima and finally for a maxima...
'''
length_resampling = median_int.shape[0]
peaks_ind = []
hh = 0
tt = length_resampling
for ii, ds in enumerate(peak_search_limits):
peak_lim = round(ds * length_resampling)
func_search = np.argmin if ii % 2 == 0 else np.argmax
hh = func_search(median_int[hh:peak_lim]) + hh
dd = length_resampling - peak_lim
tt = func_search(median_int[dd:tt]) + dd
peaks_ind.append((hh, tt))
return peaks_ind
def getHeadProvInt(
intensities_file,
trajectories_worm,
min_block_size,
peak_search_limits):
'''
Calculate the probability of an intensity profile being in the correct orientation according to the intensity profile
'''
with tables.File(intensities_file, 'r') as fid:
int_map_id = trajectories_worm.loc[
trajectories_worm['int_map_id'] != -1, 'int_map_id']
if int_map_id.size == 0 or int_map_id.size < min_block_size:
# the number of maps is too small let's return nan's nothing to do
# here
return np.nan, np.nan, []
worm_int = fid.get_node(
'/straighten_worm_intensity_median')[int_map_id].astype(np.float)
worm_int -= np.median(worm_int, axis=1)[:, np.newaxis]
# get the median intensity profile
median_int = np.median(worm_int, axis=0)
# search for the peaks in the intensity profile (the head typically have a
# minimum, follow by a maximum, then a minimum and then a maxima)
peaks_ind = searchIntPeaks(median_int,
peak_search_limits=peak_search_limits)
# calculate the distance between the second minima and the second maxima
headbot2neck = median_int[peaks_ind[3][0]] - median_int[peaks_ind[2][0]]
headbot2neck = 0 if headbot2neck < 0 else headbot2neck
tailbot2waist = median_int[peaks_ind[3][1]] - median_int[peaks_ind[2][1]]
tailbot2waist = 0 if tailbot2waist < 0 else tailbot2waist
p_int_bot = headbot2neck / (headbot2neck + tailbot2waist)
# calculate the distance between the second minima and the first maxima
headtop2bot = median_int[peaks_ind[1][0]] - median_int[peaks_ind[2][0]]
headtop2bot = 0 if headtop2bot < 0 else headtop2bot
tailtop2bot = median_int[peaks_ind[1][1]] - median_int[peaks_ind[2][1]]
tailtop2bot = 0 if tailtop2bot < 0 else tailtop2bot
p_int_top = headtop2bot / (headtop2bot + tailtop2bot)
int_group = (np.min(int_map_id), np.max(int_map_id))
# #%%
# plt.figure()
# plt.title(base_name)
# plt.plot(median_int, label ='0.3')
#
# strC = 'rgck'
# for ii, dd in enumerate(peaks_ind):
# for xx in dd:
# plt.plot(xx, median_int[xx], 'o' + strC[ii])
return p_int_top, p_int_bot, int_group
def checkFinalOrientation(
skeletons_file,
intensities_file,
trajectories_worm,
min_block_size,
head_tail_param):
peak_search_limits = [0.054, 0.192, 0.269, 0.346]
p_mov, skel_group = getHeadProbMov(
skeletons_file,
trajectories_worm,
**head_tail_param)
p_int_top, p_int_bot, int_group = getHeadProvInt(
intensities_file, trajectories_worm, min_block_size, peak_search_limits=peak_search_limits)
# The weights I am using will give the p_tot > 0.5 as long as the
# std of the head is twice than the tail, if it is less the intensity
# will take a higher role. The difference between the second minimum (traquea)
# and the second maximum (neck) given by p_int_bot, seems to be a better predictor,
# since typically the second maxima does not exists in the tail. However,
# the difference between the first maximum (head tip) and the second mimimum (traquea)
# given by p_int_top, can be important therefore both weights are similar.
p_tot = 0.75 * p_mov + 0.15 * p_int_bot + 0.1 * p_int_top
# if it is nan, both int changes where negatives, equal the probability to
# the p_mov
if p_tot != p_tot:
p_tot = p_mov
return p_tot, [skel_group], [int_group]
#%%
if __name__ == '__main__':
from tierpsy.analysis.int_ske_orient.correctHeadTailIntensity import switchBlocks
check_dir = '/Users/ajaver/Desktop/Videos/single_worm/agar_1/MaskedVideos/'
head_tail_param = {
'max_gap_allowed': 10,
'window_std': 25,
'segment4angle': 5,
'min_block_size': 250}
#peak_search_limits = [0.054, 0.192, 0.269, 0.346]
all_median = []
for ff in glob.glob(os.path.join(check_dir, '*')):
ff = ff.replace('MaskedVideos', 'Results')
base_name = os.path.split(ff)[1].rpartition('.')[0]
trajectories_file = ff[:-5] + '_trajectories.hdf5'
skeletons_file = ff[:-5] + '_skeletons.hdf5'
intensities_file = ff[:-5] + '_intensities.hdf5'
# check the file finished in the correct step
# with tables.File(skeletons_file, 'r') as fid:
# assert fid.get_node('/skeleton')._v_attrs['has_finished'] >= 4
with pd.HDFStore(skeletons_file, 'r') as fid:
trajectories_data = fid['/trajectories_data']
grouped_trajectories = trajectories_data.groupby('worm_index_joined')
tot_worms = len(grouped_trajectories)
# variables to report progress
base_name = skeletons_file.rpartition(
'.')[0].rpartition(os.sep)[-1].rpartition('_')[0]
print_flush(
base_name +
" Checking if the final Head-Tail orientation is correct")
for index_n, (worm_index, trajectories_worm) in enumerate(
grouped_trajectories):
p_tot, skel_group, int_group = checkFinalOrientation(
skeletons_file, intensities_file, trajectories_worm, head_tail_param)
if p_tot < 0.5:
switchBlocks(
skel_group,
skeletons_file,
int_group,
intensities_file)
| 35.530466
| 121
| 0.630082
|
e25373abb1704dba604263e3208608756befdea6
| 142
|
py
|
Python
|
src/141A.py
|
viing937/codeforces
|
d694eb6967cd56af02963c3a662066048cb78d07
|
[
"MIT"
] | 2
|
2016-08-19T09:47:03.000Z
|
2016-10-01T10:15:03.000Z
|
src/141A.py
|
viing937/codeforces
|
d694eb6967cd56af02963c3a662066048cb78d07
|
[
"MIT"
] | null | null | null |
src/141A.py
|
viing937/codeforces
|
d694eb6967cd56af02963c3a662066048cb78d07
|
[
"MIT"
] | 1
|
2015-07-01T23:57:32.000Z
|
2015-07-01T23:57:32.000Z
|
# coding: utf-8
s1 = input()
s2 = input()
s = input()
if sorted(list(s1)+list(s2)) == sorted(list(s)):
print('YES')
else:
print('NO')
| 15.777778
| 48
| 0.56338
|
f03284717bc211e6600613a99d2dc6cbf51fcafa
| 16,743
|
py
|
Python
|
models/stl10/cnn.py
|
mathczh/GANL2L
|
fdffbcb1547cf8f3a7287a4a21d3f4871f3e4e42
|
[
"MIT"
] | null | null | null |
models/stl10/cnn.py
|
mathczh/GANL2L
|
fdffbcb1547cf8f3a7287a4a21d3f4871f3e4e42
|
[
"MIT"
] | null | null | null |
models/stl10/cnn.py
|
mathczh/GANL2L
|
fdffbcb1547cf8f3a7287a4a21d3f4871f3e4e42
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import torch
from ..OrthConv import *
from ..SNConv import *
from ..SphereConv import *
__all__ = ['cnn_D','cnn_G','cnnv1_D','cnnv1_G','cnnv2_D','cnnv2_G','set_use_bias_cnn']
USE_BIAS = False
def set_use_bias_cnn(x):
global USE_BIAS
USE_BIAS = x
class CNN_D(nn.Module):
def __init__(self, isize, nc, ndf, ngpu, n_extra_layers=0, norm_type = 'none', loss_type='wgan', version = 1):
super(CNN_D, self).__init__()
self.ngpu = ngpu
main = nn.Sequential()
# input is nc x isize x isize
if 'OR' in norm_type:
if 'Mani' in norm_type:
self.myConv2d = Orth_Plane_Mani_Conv2d
else:
self.myConv2d = Orth_Plane_Conv2d
elif 'UVR' in norm_type:
if 'Mani' in norm_type:
self.myConv2d = Orth_UV_Mani_Conv2d
else:
self.myConv2d = Orth_UV_Conv2d
elif 'WN' in norm_type:
self.myConv2d = WN_Conv2d
elif 'SN' in norm_type:
self.myConv2d = SNConv2d
elif 'Sphere' in norm_type:
self.myConv2d = Sphere_Conv2d
else:
self.myConv2d = nn.Conv2d
if version == 1:
if 'OR' in norm_type:
main.add_module('initial_4x4conv_{0}-{1}'.format(nc, ndf),
GroupOrthConv(self.myConv2d, nc, ndf, 4, 2, 1, bias=USE_BIAS))
else:
main.add_module('initial_4x4conv_{0}-{1}'.format(nc, ndf),
self.myConv2d(nc, ndf, 4, 2, 1, bias=USE_BIAS))
if "BN" in norm_type:
main.add_module('initial_{0}_4x4batchnorm'.format(ndf),
nn.BatchNorm2d(ndf))
elif "LN" in norm_type:
main.add_module('initial_{0}_4x4layernorm'.format(ndf),
nn.LayerNorm((ndf,isize//2,isize//2)))
main.add_module('initial_4x4relu_{0}'.format(ndf),
nn.LeakyReLU(0.2, inplace=True))
if version == 2:
# 3*3 block
if 'OR' in norm_type:
main.add_module('initial_3x3conv_{0}-{1}'.format(nc, ndf),
GroupOrthConv(self.myConv2d, nc, ndf, 3, 1, 1, bias=USE_BIAS))
else:
main.add_module('initial_3x3conv_{0}-{1}'.format(nc, ndf),
self.myConv2d(nc, ndf, 3, 1, 1, bias=USE_BIAS))
if "BN" in norm_type:
main.add_module('initial_{0}_3x3batchnorm'.format(ndf),
nn.BatchNorm2d(ndf))
elif "LN" in norm_type:
main.add_module('initial_{0}_3x3layernorm'.format(ndf),
nn.LayerNorm((ndf,isize,isize)))
main.add_module('initial_3x3relu_{0}'.format(ndf),
nn.LeakyReLU(0.1, inplace=True))
# 4*4 block
main.add_module('initial_4x4conv_{0}-{1}'.format(ndf, ndf),
self.myConv2d(ndf, ndf, 4, 2, 1, bias=USE_BIAS))
if "BN" in norm_type:
main.add_module('initial_{0}_4x4batchnorm'.format(ndf),
nn.BatchNorm2d(ndf))
elif "LN" in norm_type:
main.add_module('initial_{0}_4x4layernorm'.format(ndf),
nn.LayerNorm((ndf,isize//2,isize//2)))
main.add_module('initial_4x4relu_{0}'.format(ndf),
nn.LeakyReLU(0.1, inplace=True))
csize, cndf = isize // 2, ndf
# Extra layers
for t in range(n_extra_layers):
main.add_module('extra-layers-{0}-{1}_conv'.format(t, cndf),
self.myConv2d(cndf, cndf, 3, 1, 1, bias=USE_BIAS))
if "BN" in norm_type:
main.add_module('extra-layers-{0}-{1}_batchnorm'.format(t, cndf),
nn.BatchNorm2d(cndf))
elif "LN" in norm_type:
main.add_module('extra-layers-{0}-{1}_layernorm'.format(t, cndf),
nn.LayerNorm((cndf,csize,csize)))
main.add_module('extra-layers-{0}-{1}_relu'.format(t, cndf),
nn.LeakyReLU(0.2, inplace=True))
while csize > 6:
in_feat = cndf
out_feat = cndf * 2
##############################################################
# V1
##############################################################
if version == 1:
main.add_module('pyramid_{0}-{1}_4x4conv'.format(in_feat, out_feat),
self.myConv2d(in_feat, out_feat, 4, 2, 1, bias=USE_BIAS))
if "BN" in norm_type:
main.add_module('pyramid_{0}_4x4batchnorm'.format(out_feat),
nn.BatchNorm2d(out_feat))
elif "LN" in norm_type:
main.add_module('pyramid_{0}_4x4layernorm'.format(out_feat),
nn.LayerNorm((out_feat,csize//2,csize//2)))
main.add_module('pyramid_{0}_4x4relu'.format(out_feat),
nn.LeakyReLU(0.2, inplace=True))
##############################################################
# V2
##############################################################
if version == 2:
# 3*3 block
main.add_module('pyramid_{0}-{1}_3x3conv'.format(in_feat, out_feat),
self.myConv2d(in_feat, out_feat, 3, 1, 1, bias=USE_BIAS))
if "BN" in norm_type:
main.add_module('pyramid_{0}_3x3batchnorm'.format(out_feat),
nn.BatchNorm2d(out_feat))
elif "LN" in norm_type:
main.add_module('pyramid_{0}_3x3layernorm'.format(out_feat),
nn.LayerNorm((out_feat,csize,csize)))
main.add_module('pyramid_{0}_3x3relu'.format(out_feat),
nn.LeakyReLU(0.1, inplace=True))
# 4*4 block
main.add_module('pyramid_{0}-{1}_4x4conv'.format(out_feat, out_feat),
self.myConv2d(out_feat, out_feat, 4, 2, 1, bias=USE_BIAS))
if "BN" in norm_type:
main.add_module('pyramid_{0}_4x4batchnorm'.format(out_feat),
nn.BatchNorm2d(out_feat))
elif "LN" in norm_type:
main.add_module('pyramid_{0}_4x4layernorm'.format(out_feat),
nn.LayerNorm((out_feat,csize//2,csize//2)))
main.add_module('pyramid_{0}_4x4relu'.format(out_feat),
nn.LeakyReLU(0.1, inplace=True))
cndf = cndf * 2
csize = csize // 2
# state size. K x 4 x 4
if version == 1:
main.add_module('final_{0}-{1}_4x4conv'.format(cndf, 1),
self.myConv2d(cndf, 1, 6, 1, 0, bias=USE_BIAS))
if version == 2:
in_feat = cndf
out_feat = cndf * 2
# 3*3 block
main.add_module('final_{0}-{1}_3x3conv'.format(in_feat, out_feat),
self.myConv2d(in_feat, out_feat, 3, 1, 1, bias=USE_BIAS))
if "BN" in norm_type:
main.add_module('final_{0}_3x3batchnorm'.format(out_feat),
nn.BatchNorm2d(out_feat))
elif "LN" in norm_type:
main.add_module('final_{0}_3x3layernorm'.format(out_feat),
nn.LayerNorm((out_feat,4,4)))
main.add_module('final_{0}_3x3relu'.format(out_feat),
nn.LeakyReLU(0.1, inplace=True))
main.add_module('final_{0}-{1}_4x4conv'.format(out_feat, 1),
self.myConv2d(out_feat, 1, 6, 1, 0, bias=USE_BIAS))
cndf = cndf * 2
if loss_type == "dcgan":
main.add_module('final_sigmoid', nn.Sigmoid())
self.main = main
for m_name, m in self.named_modules():
print(m_name+': '+m.__class__.__name__)
if isinstance(m, nn.Conv2d): # Special Convlayer has their own ini
m.weight.data.normal_(0,0.02)
m.bias.data.fill_(0)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.LayerNorm):
m.weight.data.normal_(1.0,0.02)
m.bias.data.fill_(0)
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output.squeeze()
def showBNInfo(self):
for m_name, m in self.named_modules():
if isinstance(m, nn.BatchNorm2d):
print("BN weight: max: %f; mean: %f; min: %f; var: %f" %( m.weight.data.max(), m.weight.data.mean(), m.weight.data.min(),m.weight.data.var()))
print("BN bias: max: %f; mean: %f; min: %f; var: %f" %( m.bias.data.max(), m.bias.data.mean(), m.bias.data.min(), m.bias.data.var()))
def showOrthInfo(self):
ss = []
for m_name, m in self.named_modules():
if hasattr(m, 'showOrthInfo') and isinstance(m, self.myConv2d):
s = m.showOrthInfo()
s,_ = s.sort()
ss.append(s.cpu().numpy())
return ss
# elif isinstance(m, nn.BatchNorm2d):
# print("BN Wei: ",m.weight.data," BN Bias: ",m.bias.data)
def setmode(self, mode):
for m_name, m in self.named_modules():
if hasattr(m, 'setmode') and isinstance(m, self.myConv2d):
m.setmode(mode)
def project(self):
for m_name, m in self.named_modules():
if hasattr(m, 'project') and isinstance(m, self.myConv2d):
m.project()
def update_sigma(self,**kwargs):
"""
Only for SN and UVR(mde 1 & 2)
"""
for m_name, m in self.named_modules():
if hasattr(m, 'update_sigma') and isinstance(m, self.myConv2d):
m.update_sigma(**kwargs)
def log_spectral(self):
"""
Only for SN and UVR(mde 1 & 2)
"""
log_s = 0
for m_name, m in self.named_modules():
if hasattr(m, 'log_spectral') and isinstance(m, self.myConv2d):
log_s = log_s + m.log_spectral()
return log_s
def orth_penalty(self):
penalty = 0
for m_name, m in self.named_modules():
if hasattr(m, 'orth_penalty') and isinstance(m, self.myConv2d):
penalty = penalty + m.orth_penalty()
return penalty
def spectral_penalty(self):
penalty = 0
for m_name, m in self.named_modules():
if hasattr(m, 'spectral_penalty') and isinstance(m, self.myConv2d):
penalty = penalty + m.spectral_penalty()
return penalty
def cnn_D(isize, nc, ndf, ngpu, n_extra_layers=0, norm_type = 'none', loss_type='wgan'):
model = CNN_D(isize, nc, ndf, ngpu, n_extra_layers, norm_type, loss_type, version = 1)
return model
cnnv1_D = cnn_D
def cnnv2_D(isize, nc, ndf, ngpu, n_extra_layers=0, norm_type = 'none', loss_type='wgan'):
model = CNN_D(isize, nc, ndf, ngpu, n_extra_layers, norm_type, loss_type, version = 2)
return model
class CNN_G(nn.Module):
def __init__(self, isize, nz, nc, ngf, ngpu, n_extra_layers=0):
super(CNN_G, self).__init__()
self.ngpu = ngpu
assert isize % 16 == 0, "isize has to be a multiple of 16"
cngf, tisize = ngf//2, 4
while tisize != isize:
cngf = cngf * 2
tisize = tisize * 2
main = nn.Sequential()
# input is Z, going into a convolution
main.add_module('initial_{0}-{1}_convt'.format(nz, cngf),
nn.ConvTranspose2d(nz, cngf, 6, 1, 0, bias=USE_BIAS))
main.add_module('initial_{0}_batchnorm'.format(cngf),
nn.BatchNorm2d(cngf))
main.add_module('initial_{0}_relu'.format(cngf),
nn.ReLU(True))
csize, cndf = 4, cngf
while csize < isize//2:
main.add_module('pyramid_{0}-{1}_convt'.format(cngf, cngf//2),
nn.ConvTranspose2d(cngf, cngf//2, 4, 2, 1, bias=USE_BIAS))
main.add_module('pyramid_{0}_batchnorm'.format(cngf//2),
nn.BatchNorm2d(cngf//2))
main.add_module('pyramid_{0}_relu'.format(cngf//2),
nn.ReLU(True))
cngf = cngf // 2
csize = csize * 2
# Extra layers
for t in range(n_extra_layers):
main.add_module('extra-layers-{0}-{1}_conv'.format(t, cngf),
nn.Conv2d(cngf, cngf, 3, 1, 1, bias=USE_BIAS))
main.add_module('extra-layers-{0}-{1}_batchnorm'.format(t, cngf),
nn.BatchNorm2d(cngf))
main.add_module('extra-layers-{0}-{1}_relu'.format(t, cngf),
nn.ReLU(True))
main.add_module('final_{0}-{1}_convt'.format(cngf, nc),
nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=USE_BIAS))
main.add_module('final_{0}_tanh'.format(nc),
nn.Tanh())
self.main = main
for m_name, m in self.named_modules():
if isinstance(m, nn.ConvTranspose2d): # Special Convlayer has their own ini
m.weight.data.normal_(0,0.02)
m.bias.data.fill_(0)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.LayerNorm):
m.weight.data.normal_(1.0,0.02)
m.bias.data.fill_(0)
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output
def cnn_G(isize, nz, nc, ngf, ngpu, n_extra_layers=0):
model = CNN_G(isize, nz, nc, ngf, ngpu, n_extra_layers)
return model
cnnv1_G = cnn_G
class CNNv2_G(nn.Module):
def __init__(self, isize, nz, nc, ngf, ngpu, n_extra_layers=0):
super(CNNv2_G, self).__init__()
self.ngpu = ngpu
assert isize % 16 == 0, "isize has to be a multiple of 16"
cngf, tisize = ngf//2, 6
while tisize != isize:
cngf = cngf * 2
tisize = tisize * 2
cngf = cngf * 2
tisize = tisize * 2
main = nn.Sequential()
# input is Z, going into a convolution
main.add_module('initial_{0}-{1}_convt'.format(nz, cngf),
nn.ConvTranspose2d(nz, cngf, 6, 1, 0))
main.add_module('initial_{0}_batchnorm'.format(cngf),
nn.BatchNorm2d(cngf))
main.add_module('initial_{0}_relu'.format(cngf),
nn.ReLU(True))
csize, cndf = 6, cngf
while csize <= isize//2:
main.add_module('pyramid_{0}-{1}_convt'.format(cngf, cngf//2),
nn.ConvTranspose2d(cngf, cngf//2, 4, 2, 1))
main.add_module('pyramid_{0}_batchnorm'.format(cngf//2),
nn.BatchNorm2d(cngf//2))
main.add_module('pyramid_{0}_relu'.format(cngf//2),
nn.ReLU(True))
cngf = cngf // 2
csize = csize * 2
main.add_module('final_{0}-{1}_convt'.format(cngf, nc),
nn.ConvTranspose2d(cngf, nc, 3, 1, 1))
main.add_module('final_{0}_tanh'.format(nc),
nn.Tanh())
self.main = main
for m_name, m in self.named_modules():
if isinstance(m, nn.ConvTranspose2d): # Special Convlayer has their own ini
m.weight.data.normal_(0,0.02)
m.bias.data.fill_(0)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.LayerNorm):
m.weight.data.normal_(1.0,0.02)
m.bias.data.fill_(0)
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output
def cnnv2_G(isize, nz, nc, ngf, ngpu):
model = CNNv2_G(isize, nz, nc, ngf, ngpu)
return model
| 42.820972
| 158
| 0.51747
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.