hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6007f9657a1d3a19cb045cca61bc7716d4f2e22f
| 144
|
py
|
Python
|
gomoku/networks/__init__.py
|
IllIIIllll/reinforcement-learning-omok
|
1c76ba76c203a3b7c99095fde0626aff45b1b94b
|
[
"Apache-2.0"
] | 1
|
2020-07-07T14:41:35.000Z
|
2020-07-07T14:41:35.000Z
|
gomoku/networks/__init__.py
|
IllIIIllll/reinforcement-learning-omok
|
1c76ba76c203a3b7c99095fde0626aff45b1b94b
|
[
"Apache-2.0"
] | 1
|
2020-08-27T08:22:03.000Z
|
2020-08-27T08:22:03.000Z
|
gomoku/networks/__init__.py
|
IllIIIllll/gomoku
|
1c76ba76c203a3b7c99095fde0626aff45b1b94b
|
[
"Apache-2.0"
] | null | null | null |
# © 2020 지성. all rights reserved.
# <[email protected]>
# Apache License 2.0
from .small import *
from .medium import *
from .large import *
| 20.571429
| 33
| 0.708333
| 22
| 144
| 4.681818
| 0.818182
| 0.194175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05042
| 0.173611
| 144
| 7
| 34
| 20.571429
| 0.806723
| 0.506944
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
600ca7297733fdb91cfe20784d6ed193a6eb6593
| 3,239
|
py
|
Python
|
portal/migrations/0007_auto_20170824_1341.py
|
nickmvincent/ugc-val-est
|
b5cceda14ef5830f1befaddfccfd90a694c9677a
|
[
"MIT"
] | 2
|
2019-11-13T19:56:05.000Z
|
2020-09-05T03:21:14.000Z
|
portal/migrations/0007_auto_20170824_1341.py
|
nickmvincent/ugc-val-est
|
b5cceda14ef5830f1befaddfccfd90a694c9677a
|
[
"MIT"
] | 6
|
2018-03-02T16:49:20.000Z
|
2021-06-10T18:55:02.000Z
|
portal/migrations/0007_auto_20170824_1341.py
|
nickmvincent/ugc-val-est
|
b5cceda14ef5830f1befaddfccfd90a694c9677a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-24 13:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portal', '0006_auto_20170824_0950'),
]
operations = [
migrations.AddField(
model_name='sampledstackoverflowpost',
name='num_question_comments',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='question_score',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title',
field=models.CharField(default='', max_length=1182),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_coleman_liau_index',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_length',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_lexicon_count',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_percent_punctuation',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_percent_spaces',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_percent_uppercase',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_sentence_count',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_starts_capitalized',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='sampledredditthread',
name='title',
field=models.CharField(default='', max_length=1182),
),
migrations.AlterField(
model_name='stackoverflowanswer',
name='owner_user_id',
field=models.IntegerField(blank=True, db_index=True, null=True),
),
migrations.AlterField(
model_name='stackoverflowanswer',
name='parent_id',
field=models.IntegerField(db_index=True),
),
migrations.AlterField(
model_name='stackoverflowquestion',
name='accepted_answer_id',
field=models.IntegerField(blank=True, db_index=True, null=True),
),
migrations.AlterField(
model_name='stackoverflowquestion',
name='owner_user_id',
field=models.IntegerField(db_index=True),
),
]
| 33.739583
| 76
| 0.592467
| 271
| 3,239
| 6.874539
| 0.280443
| 0.077295
| 0.160494
| 0.15942
| 0.782072
| 0.782072
| 0.719807
| 0.600107
| 0.600107
| 0.600107
| 0
| 0.022143
| 0.302871
| 3,239
| 95
| 77
| 34.094737
| 0.802923
| 0.020994
| 0
| 0.75
| 1
| 0
| 0.207386
| 0.140783
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.022727
| 0
| 0.056818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
602d85326ffa11df7e1d924f6cb4bf41ac71b284
| 984
|
py
|
Python
|
install.py
|
X-lab-3D/PANDORA
|
02912a03022e814ff8e0ae8ec52f5075f0e2e381
|
[
"Apache-2.0"
] | null | null | null |
install.py
|
X-lab-3D/PANDORA
|
02912a03022e814ff8e0ae8ec52f5075f0e2e381
|
[
"Apache-2.0"
] | 1
|
2022-03-14T19:51:26.000Z
|
2022-03-14T19:51:26.000Z
|
install.py
|
X-lab-3D/PANDORA
|
02912a03022e814ff8e0ae8ec52f5075f0e2e381
|
[
"Apache-2.0"
] | null | null | null |
import os
dirs = [
'./PANDORA_files', './PANDORA_files/data', './PANDORA_files/data/csv_pkl_files',
'./PANDORA_files/data/csv_pkl_files/mhcseqs', './PANDORA_files/data/PDBs',
'./PANDORA_files/data/PDBs/pMHCI', './PANDORA_files/data/PDBs/pMHCII',
'./PANDORA_files/data/PDBs/Bad', './PANDORA_files/data/PDBs/Bad/pMHCI',
'./PANDORA_files/data/PDBs/Bad/pMHCII', './PANDORA_files/data/PDBs/IMGT_retrieved',
'./PANDORA_files/data/outputs',
'./test/test_data/PDBs/Bad','./test/test_data/PDBs/Bad/pMHCI',
'./test/test_data/PDBs/Bad/pMHCII', './test/test_data/csv_pkl_files'
]
for D in dirs:
try:
os.mkdir(D)
except OSError:
print('Could not make directory: ' + D)
# Install dependenciess
# os.popen("alias KEY_MODELLER='XXXX'").read()
# os.popen("conda install -y -c salilab modeller").read()
# os.popen("conda install -y -c bioconda muscle").read()
# os.popen("pip install -e ./").read()
| 35.142857
| 91
| 0.646341
| 134
| 984
| 4.567164
| 0.335821
| 0.235294
| 0.287582
| 0.228758
| 0.477124
| 0.169935
| 0.081699
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 984
| 27
| 92
| 36.444444
| 0.746341
| 0.21748
| 0
| 0
| 0
| 0
| 0.669725
| 0.589777
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0.0625
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6063184472ef835deb60c56bca4bcbb89e09d477
| 136
|
py
|
Python
|
python/testData/inspections/PyTypeCheckerInspection/ModuleTypeParameter/a.py
|
06needhamt/intellij-community
|
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/inspections/PyTypeCheckerInspection/ModuleTypeParameter/a.py
|
06needhamt/intellij-community
|
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
|
[
"Apache-2.0"
] | null | null | null |
python/testData/inspections/PyTypeCheckerInspection/ModuleTypeParameter/a.py
|
06needhamt/intellij-community
|
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
|
[
"Apache-2.0"
] | null | null | null |
import module
from types import ModuleType
def foo(m: ModuleType):
pass
def bar(m):
return m.__name__
foo(module)
bar(module)
| 12.363636
| 28
| 0.720588
| 21
| 136
| 4.47619
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.191176
| 136
| 11
| 29
| 12.363636
| 0.854545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.125
| 0.25
| 0.125
| 0.625
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
6063f7fd8de4dfb10486579a5850fc07ac1891ee
| 102
|
py
|
Python
|
utils.py
|
lbesnard/subimporter
|
66affbca2acdb3c25e70dac23290b5e7b956c2d7
|
[
"MIT"
] | null | null | null |
utils.py
|
lbesnard/subimporter
|
66affbca2acdb3c25e70dac23290b5e7b956c2d7
|
[
"MIT"
] | 1
|
2021-05-05T02:06:23.000Z
|
2021-05-06T00:42:53.000Z
|
utils.py
|
lbesnard/subimporter
|
66affbca2acdb3c25e70dac23290b5e7b956c2d7
|
[
"MIT"
] | 1
|
2021-05-05T01:56:07.000Z
|
2021-05-05T01:56:07.000Z
|
def stringifySong(song):
return f"<'{song['title']}' by '{song['artist']}' in '{song['album']}'>"
| 34
| 76
| 0.578431
| 13
| 102
| 4.538462
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 102
| 3
| 76
| 34
| 0.655556
| 0
| 0
| 0
| 0
| 0
| 0.607843
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
606fa44df2b3928dca9a1f9a1a195390a91a5ba6
| 6,698
|
py
|
Python
|
tests/processing_components/test_image_iterators.py
|
cnwangfeng/algorithm-reference-library
|
9605eb01652fbfcb9ff003cc12b44c84093b7fb1
|
[
"Apache-2.0"
] | 22
|
2016-12-14T11:20:07.000Z
|
2021-08-13T15:23:41.000Z
|
tests/processing_components/test_image_iterators.py
|
cnwangfeng/algorithm-reference-library
|
9605eb01652fbfcb9ff003cc12b44c84093b7fb1
|
[
"Apache-2.0"
] | 30
|
2017-06-27T09:15:38.000Z
|
2020-09-11T18:16:37.000Z
|
tests/processing_components/test_image_iterators.py
|
cnwangfeng/algorithm-reference-library
|
9605eb01652fbfcb9ff003cc12b44c84093b7fb1
|
[
"Apache-2.0"
] | 20
|
2017-07-02T03:45:49.000Z
|
2019-12-11T17:19:01.000Z
|
"""Unit tests for image iteration
"""
import logging
import unittest
import numpy
from data_models.polarisation import PolarisationFrame
from processing_components.image.iterators import image_raster_iter, image_channel_iter, image_null_iter
from processing_components.image.operations import create_empty_image_like
from processing_components.simulation.testing_support import create_test_image
log = logging.getLogger(__name__)
class TestImageIterators(unittest.TestCase):
def test_raster(self):
m31original = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
assert numpy.max(numpy.abs(m31original.data)), "Original is empty"
for nraster in [1, 2, 4, 8, 9]:
m31model = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
for patch in image_raster_iter(m31model, facets=nraster):
assert patch.data.shape[3] == (m31model.data.shape[3] // nraster), \
"Number of pixels in each patch: %d not as expected: %d" % (patch.data.shape[3],
(m31model.data.shape[3] // nraster))
assert patch.data.shape[2] == (m31model.data.shape[2] // nraster), \
"Number of pixels in each patch: %d not as expected: %d" % (patch.data.shape[2],
(m31model.data.shape[2] // nraster))
patch.data *= 2.0
diff = m31model.data - 2.0 * m31original.data
assert numpy.max(numpy.abs(m31model.data)), "Raster is empty for %d" % nraster
assert numpy.max(numpy.abs(diff)) == 0.0, "Raster set failed for %d" % nraster
def test_raster_exception(self):
m31original = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
assert numpy.max(numpy.abs(m31original.data)), "Original is empty"
for nraster, overlap in [(-1, -1), (-1, 0), (2, 128), (1e6, 127)]:
with self.assertRaises(AssertionError) as context:
m31model = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
for patch in image_raster_iter(m31model, facets=nraster, overlap=overlap):
patch.data *= 2.0
def test_raster_overlap(self):
m31original = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
assert numpy.max(numpy.abs(m31original.data)), "Original is empty"
flat = create_empty_image_like(m31original)
for nraster, overlap in [(1, 0), (1, 16), (4, 8), (4, 16), (8, 8), (16, 4), (9, 5)]:
m31model = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
for patch, flat_patch in zip(image_raster_iter(m31model, facets=nraster, overlap=overlap),
image_raster_iter(flat, facets=nraster, overlap=overlap)):
patch.data *= 2.0
flat_patch.data[...] += 1.0
assert numpy.max(numpy.abs(m31model.data)), "Raster is empty for %d" % nraster
def test_raster_overlap_linear(self):
m31original = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
assert numpy.max(numpy.abs(m31original.data)), "Original is empty"
flat = create_empty_image_like(m31original)
for nraster, overlap in [(1, 0), (1, 16), (4, 8), (4, 16), (8, 8), (16, 4), (9, 5)]:
m31model = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
for patch, flat_patch in zip(image_raster_iter(m31model, facets=nraster, overlap=overlap,
taper='linear'),
image_raster_iter(flat, facets=nraster, overlap=overlap)):
patch.data *= 2.0
flat_patch.data[...] += 1.0
assert numpy.max(numpy.abs(m31model.data)), "Raster is empty for %d" % nraster
def test_raster_overlap_quadratic(self):
m31original = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
assert numpy.max(numpy.abs(m31original.data)), "Original is empty"
flat = create_empty_image_like(m31original)
for nraster, overlap in [(1, 0), (1, 16), (4, 8), (4, 16), (8, 8), (16, 4), (9, 5)]:
m31model = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
for patch, flat_patch in zip(image_raster_iter(m31model, facets=nraster, overlap=overlap,
taper='quadratic'),
image_raster_iter(flat, facets=nraster, overlap=overlap)):
patch.data *= 2.0
flat_patch.data[...] += 1.0
assert numpy.max(numpy.abs(m31model.data)), "Raster is empty for %d" % nraster
def test_raster_overlap_tukey(self):
m31original = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
assert numpy.max(numpy.abs(m31original.data)), "Original is empty"
flat = create_empty_image_like(m31original)
for nraster, overlap in [(1, 0), (1, 16), (4, 8), (4, 16), (8, 8), (16, 4), (9, 5)]:
m31model = create_test_image(polarisation_frame=PolarisationFrame('stokesI'))
for patch, flat_patch in zip(image_raster_iter(m31model, facets=nraster, overlap=overlap,
taper='tukey'),
image_raster_iter(flat, facets=nraster, overlap=overlap)):
patch.data *= 2.0
flat_patch.data[...] += 1.0
assert numpy.max(numpy.abs(m31model.data)), "Raster is empty for %d" % nraster
def test_channelise(self):
m31cube = create_test_image(polarisation_frame=PolarisationFrame('stokesI'),
frequency=numpy.linspace(1e8,1.1e8, 128))
for subimages in [128, 16, 8, 2, 1]:
for slab in image_channel_iter(m31cube, subimages=subimages):
assert slab.data.shape[0] == 128 // subimages
def test_null(self):
m31cube = create_test_image(polarisation_frame=PolarisationFrame('stokesI'),
frequency=numpy.linspace(1e8, 1.1e8, 128))
for i, im in enumerate(image_null_iter(m31cube)):
assert i<1, "Null iterator returns more than one value"
if __name__ == '__main__':
unittest.main()
| 49.985075
| 116
| 0.591371
| 760
| 6,698
| 5.040789
| 0.136842
| 0.039154
| 0.058731
| 0.098669
| 0.783868
| 0.772383
| 0.766119
| 0.766119
| 0.759593
| 0.728791
| 0
| 0.049459
| 0.296656
| 6,698
| 133
| 117
| 50.360902
| 0.763744
| 0.004479
| 0
| 0.55914
| 0
| 0
| 0.076738
| 0
| 0
| 0
| 0
| 0
| 0.182796
| 1
| 0.086022
| false
| 0
| 0.075269
| 0
| 0.172043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
609c0b0efb59d40f1ab4f4aa98dc75bbea0cab6a
| 23
|
py
|
Python
|
py.py
|
avr8082/Hadoop
|
64b2036e752ac01b9e2256e20b659b1b56a274c9
|
[
"Apache-2.0"
] | null | null | null |
py.py
|
avr8082/Hadoop
|
64b2036e752ac01b9e2256e20b659b1b56a274c9
|
[
"Apache-2.0"
] | null | null | null |
py.py
|
avr8082/Hadoop
|
64b2036e752ac01b9e2256e20b659b1b56a274c9
|
[
"Apache-2.0"
] | null | null | null |
printf("Hello world")
| 11.5
| 22
| 0.695652
| 3
| 23
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 23
| 1
| 23
| 23
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.478261
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
609c58346f33ce4d4cdeec7d9b46908849047083
| 21,070
|
py
|
Python
|
build/python-env/lib/python2.7/site-packages/elasticsearch/client/xpack/ml.py
|
imiMoisesEducation/beatcookie-discbot
|
59c8be23346d8d2fc1777a2b08856df88e2ae5c2
|
[
"Apache-2.0"
] | 1
|
2021-05-11T12:09:58.000Z
|
2021-05-11T12:09:58.000Z
|
build/python-env/lib/python2.7/site-packages/elasticsearch/client/xpack/ml.py
|
imiMoisesEducation/beatcookie-discbot
|
59c8be23346d8d2fc1777a2b08856df88e2ae5c2
|
[
"Apache-2.0"
] | null | null | null |
build/python-env/lib/python2.7/site-packages/elasticsearch/client/xpack/ml.py
|
imiMoisesEducation/beatcookie-discbot
|
59c8be23346d8d2fc1777a2b08856df88e2ae5c2
|
[
"Apache-2.0"
] | 2
|
2020-01-13T17:51:02.000Z
|
2020-07-24T17:50:44.000Z
|
from elasticsearch.client.utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
class MlClient(NamespacedClient):
@query_params('from_', 'size')
def get_filters(self, filter_id=None, params=None):
"""
:arg filter_id: The ID of the filter to fetch
:arg from_: skips a number of filters
:arg size: specifies a max number of filters to get
"""
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'filters', filter_id), params=params)
@query_params()
def get_datafeeds(self, datafeed_id=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed.html>`_
:arg datafeed_id: The ID of the datafeeds to fetch
"""
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id), params=params)
@query_params()
def get_datafeed_stats(self, datafeed_id=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed-stats.html>`_
:arg datafeed_id: The ID of the datafeeds stats to fetch
"""
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id, '_stats'), params=params)
@query_params('anomaly_score', 'desc', 'end', 'exclude_interim', 'expand',
'from_', 'size', 'sort', 'start')
def get_buckets(self, job_id, timestamp=None, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-bucket.html>`_
:arg job_id: ID of the job to get bucket results from
:arg timestamp: The timestamp of the desired single bucket result
:arg body: Bucket selection details if not provided in URI
:arg anomaly_score: Filter for the most anomalous buckets
:arg desc: Set the sort direction
:arg end: End time filter for buckets
:arg exclude_interim: Exclude interim results
:arg expand: Include anomaly records
:arg from_: skips a number of buckets
:arg size: specifies a max number of buckets to get
:arg sort: Sort buckets by a particular field
:arg start: Start time filter for buckets
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'results', 'buckets', timestamp),
params=params, body=body)
@query_params('reset_end', 'reset_start')
def post_data(self, job_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-data.html>`_
:arg job_id: The name of the job receiving the data
:arg body: The data to process
:arg reset_end: Optional parameter to specify the end of the bucket
resetting range
:arg reset_start: Optional parameter to specify the start of the bucket
resetting range
"""
for param in (job_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_data'), params=params,
body=self._bulk_body(body))
@query_params('force', 'timeout')
def stop_datafeed(self, datafeed_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to stop
:arg force: True if the datafeed should be forcefully stopped.
:arg timeout: Controls the time to wait until a datafeed has stopped.
Default to 20 seconds
"""
if datafeed_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'datafeed_id'.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id, '_stop'), params=params)
@query_params()
def get_jobs(self, job_id=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job.html>`_
:arg job_id: The ID of the jobs to fetch
"""
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id), params=params)
@query_params()
def delete_expired_data(self, params=None):
"""
"""
return self.transport.perform_request('DELETE',
'/_xpack/ml/_delete_expired_data', params=params)
@query_params()
def put_job(self, job_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html>`_
:arg job_id: The ID of the job to create
:arg body: The job
"""
for param in (job_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('PUT', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id), params=params, body=body)
@query_params()
def validate_detector(self, body, params=None):
"""
:arg body: The detector
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request('POST',
'/_xpack/ml/anomaly_detectors/_validate/detector', params=params,
body=body)
@query_params('end', 'start', 'timeout')
def start_datafeed(self, datafeed_id, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to start
:arg body: The start datafeed parameters
:arg end: The end time when the datafeed should stop. When not set, the
datafeed continues in real time
:arg start: The start time from where the datafeed should begin
:arg timeout: Controls the time to wait until a datafeed has started.
Default to 20 seconds
"""
if datafeed_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'datafeed_id'.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id, '_start'), params=params, body=body)
@query_params('desc', 'end', 'exclude_interim', 'from_', 'record_score',
'size', 'sort', 'start')
def get_records(self, job_id, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-record.html>`_
:arg job_id: None
:arg body: Record selection criteria
:arg desc: Set the sort direction
:arg end: End time filter for records
:arg exclude_interim: Exclude interim results
:arg from_: skips a number of records
:arg record_score:
:arg size: specifies a max number of records to get
:arg sort: Sort records by a particular field
:arg start: Start time filter for records
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'results', 'records'), params=params,
body=body)
@query_params()
def update_job(self, job_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.html>`_
:arg job_id: The ID of the job to create
:arg body: The job update settings
"""
for param in (job_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_update'), params=params, body=body)
@query_params()
def put_filter(self, filter_id, body, params=None):
"""
:arg filter_id: The ID of the filter to create
:arg body: The filter details
"""
for param in (filter_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('PUT', _make_path('_xpack', 'ml',
'filters', filter_id), params=params, body=body)
@query_params()
def update_datafeed(self, datafeed_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to update
:arg body: The datafeed update settings
"""
for param in (datafeed_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id, '_update'), params=params, body=body)
@query_params()
def preview_datafeed(self, datafeed_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-preview-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to preview
"""
if datafeed_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'datafeed_id'.")
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id, '_preview'), params=params)
@query_params('advance_time', 'calc_interim', 'end', 'skip_time', 'start')
def flush_job(self, job_id, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html>`_
:arg job_id: The name of the job to flush
:arg body: Flush parameters
:arg advance_time: Advances time to the given value generating results
and updating the model for the advanced interval
:arg calc_interim: Calculates interim results for the most recent bucket
or all buckets within the latency period
:arg end: When used in conjunction with calc_interim, specifies the
range of buckets on which to calculate interim results
:arg skip_time: Skips time to the given value without generating results
or updating the model for the skipped interval
:arg start: When used in conjunction with calc_interim, specifies the
range of buckets on which to calculate interim results
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_flush'), params=params, body=body)
@query_params('force', 'timeout')
def close_job(self, job_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-close-job.html>`_
:arg job_id: The name of the job to close
:arg force: True if the job should be forcefully closed
:arg timeout: Controls the time to wait until a job has closed. Default
to 30 minutes
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_close'), params=params)
@query_params()
def open_job(self, job_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html>`_
:arg job_id: The ID of the job to open
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_open'), params=params)
@query_params('force')
def delete_job(self, job_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html>`_
:arg job_id: The ID of the job to delete
:arg force: True if the job should be forcefully deleted
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('DELETE', _make_path('_xpack',
'ml', 'anomaly_detectors', job_id), params=params)
@query_params()
def update_model_snapshot(self, job_id, snapshot_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-snapshot.html>`_
:arg job_id: The ID of the job to fetch
:arg snapshot_id: The ID of the snapshot to update
:arg body: The model snapshot properties to update
"""
for param in (job_id, snapshot_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'model_snapshots', snapshot_id,
'_update'), params=params, body=body)
@query_params()
def delete_filter(self, filter_id, params=None):
"""
:arg filter_id: The ID of the filter to delete
"""
if filter_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'filter_id'.")
return self.transport.perform_request('DELETE', _make_path('_xpack',
'ml', 'filters', filter_id), params=params)
@query_params()
def validate(self, body, params=None):
"""
:arg body: The job config
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request('POST',
'/_xpack/ml/anomaly_detectors/_validate', params=params, body=body)
@query_params('from_', 'size')
def get_categories(self, job_id, category_id=None, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html>`_
:arg job_id: The name of the job
:arg category_id: The identifier of the category definition of interest
:arg body: Category selection details if not provided in URI
:arg from_: skips a number of categories
:arg size: specifies a max number of categories to get
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'results', 'categories', category_id),
params=params, body=body)
@query_params('desc', 'end', 'exclude_interim', 'from_', 'influencer_score',
'size', 'sort', 'start')
def get_influencers(self, job_id, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-influencer.html>`_
:arg job_id: None
:arg body: Influencer selection criteria
:arg desc: whether the results should be sorted in decending order
:arg end: end timestamp for the requested influencers
:arg exclude_interim: Exclude interim results
:arg from_: skips a number of influencers
:arg influencer_score: influencer score threshold for the requested
influencers
:arg size: specifies a max number of influencers to get
:arg sort: sort field for the requested influencers
:arg start: start timestamp for the requested influencers
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'results', 'influencers'),
params=params, body=body)
@query_params()
def put_datafeed(self, datafeed_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to create
:arg body: The datafeed config
"""
for param in (datafeed_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('PUT', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id), params=params, body=body)
@query_params('force')
def delete_datafeed(self, datafeed_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to delete
:arg force: True if the datafeed should be forcefully deleted
"""
if datafeed_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'datafeed_id'.")
return self.transport.perform_request('DELETE', _make_path('_xpack',
'ml', 'datafeeds', datafeed_id), params=params)
@query_params()
def get_job_stats(self, job_id=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-stats.html>`_
:arg job_id: The ID of the jobs stats to fetch
"""
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_stats'), params=params)
@query_params('delete_intervening_results')
def revert_model_snapshot(self, job_id, snapshot_id, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-revert-snapshot.html>`_
:arg job_id: The ID of the job to fetch
:arg snapshot_id: The ID of the snapshot to revert to
:arg body: Reversion options
:arg delete_intervening_results: Should we reset the results back to the
time of the snapshot?
"""
for param in (job_id, snapshot_id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'model_snapshots', snapshot_id,
'_revert'), params=params, body=body)
@query_params('desc', 'end', 'from_', 'size', 'sort', 'start')
def get_model_snapshots(self, job_id, snapshot_id=None, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-snapshot.html>`_
:arg job_id: The ID of the job to fetch
:arg snapshot_id: The ID of the snapshot to fetch
:arg body: Model snapshot selection criteria
:arg desc: True if the results should be sorted in descending order
:arg end: The filter 'end' query parameter
:arg from_: Skips a number of documents
:arg size: The default number of documents returned in queries as a
string.
:arg sort: Name of the field to sort on
:arg start: The filter 'start' query parameter
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'model_snapshots', snapshot_id),
params=params, body=body)
@query_params()
def delete_model_snapshot(self, job_id, snapshot_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-snapshot.html>`_
:arg job_id: The ID of the job to fetch
:arg snapshot_id: The ID of the snapshot to delete
"""
for param in (job_id, snapshot_id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('DELETE', _make_path('_xpack',
'ml', 'anomaly_detectors', job_id, 'model_snapshots', snapshot_id),
params=params)
| 43
| 102
| 0.64205
| 2,760
| 21,070
| 4.724638
| 0.072826
| 0.028758
| 0.045169
| 0.06181
| 0.815567
| 0.769018
| 0.731979
| 0.696933
| 0.671166
| 0.628604
| 0
| 0.00038
| 0.250451
| 21,070
| 489
| 103
| 43.087935
| 0.825302
| 0.366066
| 0
| 0.59596
| 0
| 0
| 0.224154
| 0.012103
| 0
| 0
| 0
| 0
| 0
| 1
| 0.156566
| false
| 0.126263
| 0.005051
| 0
| 0.323232
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
609f2b3abd12396a9fb221b6c6ef204f6a133c95
| 218
|
py
|
Python
|
python_packages_static/flopy/mf6/__init__.py
|
usgs/neversink_workflow
|
acd61435b8553e38d4a903c8cd7a3afc612446f9
|
[
"CC0-1.0"
] | 351
|
2015-01-03T15:18:48.000Z
|
2022-03-31T09:46:43.000Z
|
python_packages_static/flopy/mf6/__init__.py
|
usgs/neversink_workflow
|
acd61435b8553e38d4a903c8cd7a3afc612446f9
|
[
"CC0-1.0"
] | 1,256
|
2015-01-15T21:10:42.000Z
|
2022-03-31T22:43:06.000Z
|
python_packages_static/flopy/mf6/__init__.py
|
usgs/neversink_workflow
|
acd61435b8553e38d4a903c8cd7a3afc612446f9
|
[
"CC0-1.0"
] | 553
|
2015-01-31T22:46:48.000Z
|
2022-03-31T17:43:35.000Z
|
# imports
from . import coordinates
from . import data
from .modflow import *
from . import utils
from .data import mfdatascalar, mfdatalist, mfdataarray
from .mfmodel import MFModel
from .mfbase import ExtFileAction
| 21.8
| 55
| 0.798165
| 27
| 218
| 6.444444
| 0.481481
| 0.172414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151376
| 218
| 9
| 56
| 24.222222
| 0.940541
| 0.03211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
60a620c9bc97c103d049f3c1d9096836751f9133
| 161
|
py
|
Python
|
survos/core/__init__.py
|
paskino/SuRVoS
|
e01e784442e2e9f724826cdb70f3a50c034c6455
|
[
"Apache-2.0"
] | 22
|
2016-09-30T08:04:42.000Z
|
2022-03-05T07:24:18.000Z
|
survos/core/__init__.py
|
paskino/SuRVoS
|
e01e784442e2e9f724826cdb70f3a50c034c6455
|
[
"Apache-2.0"
] | 81
|
2016-11-21T15:32:14.000Z
|
2022-02-20T00:22:27.000Z
|
survos/core/__init__.py
|
paskino/SuRVoS
|
e01e784442e2e9f724826cdb70f3a50c034c6455
|
[
"Apache-2.0"
] | 6
|
2018-11-22T10:19:59.000Z
|
2022-02-04T06:15:48.000Z
|
from .launcher import Launcher
from .model import DataModel
from .layers import LayerManager
from .labels import LabelManager
from .singleton import Singleton
| 20.125
| 32
| 0.832298
| 20
| 161
| 6.7
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136646
| 161
| 7
| 33
| 23
| 0.964029
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
60ae0a74f0e5e8766035e68de7d7b1a1a948d0fa
| 321
|
py
|
Python
|
colbert/parameters.py
|
techthiyanes/ColBERT
|
6493193b98d95595f15cfc375fed2f0b24df4f83
|
[
"MIT"
] | 421
|
2020-06-03T05:30:00.000Z
|
2022-03-31T13:10:42.000Z
|
colbert/parameters.py
|
xrr233/ColBERT
|
88a5ecd8aa7dca70d0d52ab51422cb06c843fb4e
|
[
"MIT"
] | 87
|
2020-08-07T10:07:56.000Z
|
2022-03-30T03:49:16.000Z
|
colbert/parameters.py
|
xrr233/ColBERT
|
88a5ecd8aa7dca70d0d52ab51422cb06c843fb4e
|
[
"MIT"
] | 111
|
2020-06-28T03:02:14.000Z
|
2022-03-15T05:56:24.000Z
|
import torch
DEVICE = torch.device("cuda")
SAVED_CHECKPOINTS = [32*1000, 100*1000, 150*1000, 200*1000, 300*1000, 400*1000]
SAVED_CHECKPOINTS += [10*1000, 20*1000, 30*1000, 40*1000, 50*1000, 60*1000, 70*1000, 80*1000, 90*1000]
SAVED_CHECKPOINTS += [25*1000, 50*1000, 75*1000]
SAVED_CHECKPOINTS = set(SAVED_CHECKPOINTS)
| 32.1
| 102
| 0.725857
| 53
| 321
| 4.301887
| 0.471698
| 0.350877
| 0.263158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.395105
| 0.109034
| 321
| 9
| 103
| 35.666667
| 0.402098
| 0
| 0
| 0
| 0
| 0
| 0.012461
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
719810055bee113941d00e469e5cff1dcf6bfa92
| 114
|
py
|
Python
|
app/services/__init__.py
|
zeroday0619/XenXenXenSe
|
5af079e5edde3a6e4a1f5868052480d7b140d87c
|
[
"MIT"
] | 1
|
2021-04-23T08:56:05.000Z
|
2021-04-23T08:56:05.000Z
|
app/services/__init__.py
|
Alex4386/XenXenXenSe
|
c60e50f26a7c3b306ee3cbb140b3ad7f39c21d93
|
[
"MIT"
] | null | null | null |
app/services/__init__.py
|
Alex4386/XenXenXenSe
|
c60e50f26a7c3b306ee3cbb140b3ad7f39c21d93
|
[
"MIT"
] | null | null | null |
from app.services.console import Console
from app.services.server import Server
__main__ = ["server", "console"]
| 22.8
| 40
| 0.780702
| 15
| 114
| 5.666667
| 0.466667
| 0.164706
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114035
| 114
| 4
| 41
| 28.5
| 0.841584
| 0
| 0
| 0
| 0
| 0
| 0.114035
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
71d467c1cb4d72b8c1bd64020a221b9b3545fb65
| 34
|
py
|
Python
|
python/testData/quickFixes/PyRenameElementQuickFixTest/renameAwaitClassInPy36_after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/quickFixes/PyRenameElementQuickFixTest/renameAwaitClassInPy36_after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/quickFixes/PyRenameElementQuickFixTest/renameAwaitClassInPy36_after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
class A_NEW_NAME(object):
pass
| 17
| 25
| 0.735294
| 6
| 34
| 3.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 34
| 2
| 26
| 17
| 0.821429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
71d8ae81fc5cc4e5cfdae9050c0caf054c81bfb5
| 48
|
py
|
Python
|
GermanOK/run.py
|
romainledru/GermanOK
|
77bc86de0eabbd3d7413382a288fea286d608540
|
[
"MIT"
] | null | null | null |
GermanOK/run.py
|
romainledru/GermanOK
|
77bc86de0eabbd3d7413382a288fea286d608540
|
[
"MIT"
] | null | null | null |
GermanOK/run.py
|
romainledru/GermanOK
|
77bc86de0eabbd3d7413382a288fea286d608540
|
[
"MIT"
] | null | null | null |
from Pages import *
app = App()
app.mainloop()
| 9.6
| 19
| 0.666667
| 7
| 48
| 4.571429
| 0.714286
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 48
| 4
| 20
| 12
| 0.820513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e07c7e8ff8aa0c1088ab724943f3572b8b2fff02
| 68
|
py
|
Python
|
simulation/sensors/__init__.py
|
salinsiim/petssa-simulation
|
8f0f128d462831f86664bb8d246f2c7b659a0b8d
|
[
"MIT"
] | null | null | null |
simulation/sensors/__init__.py
|
salinsiim/petssa-simulation
|
8f0f128d462831f86664bb8d246f2c7b659a0b8d
|
[
"MIT"
] | null | null | null |
simulation/sensors/__init__.py
|
salinsiim/petssa-simulation
|
8f0f128d462831f86664bb8d246f2c7b659a0b8d
|
[
"MIT"
] | null | null | null |
from sensors.sensors import sense_characteristics, sense_pedestrians
| 68
| 68
| 0.911765
| 8
| 68
| 7.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 68
| 1
| 68
| 68
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e07ee60ec4a6fab177a6c8363ef9dc2508bf69c5
| 91
|
py
|
Python
|
src/helloworld/__main__.py
|
paulproteus/briefcase-toga-button-app-with-hacks
|
61ec41b154204bb4a7a59f55374193dd4f9ca377
|
[
"BSD-3-Clause"
] | 2
|
2020-05-01T23:41:55.000Z
|
2020-07-01T00:26:19.000Z
|
src/helloworld/__main__.py
|
paulproteus/briefcase-toga-button-app-with-hacks
|
61ec41b154204bb4a7a59f55374193dd4f9ca377
|
[
"BSD-3-Clause"
] | null | null | null |
src/helloworld/__main__.py
|
paulproteus/briefcase-toga-button-app-with-hacks
|
61ec41b154204bb4a7a59f55374193dd4f9ca377
|
[
"BSD-3-Clause"
] | null | null | null |
from helloworld.app import main
if True or __name__ == '__main__':
main().main_loop()
| 18.2
| 34
| 0.703297
| 13
| 91
| 4.230769
| 0.769231
| 0.290909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175824
| 91
| 4
| 35
| 22.75
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0.087912
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e0bae7400e763d4fa86d93ab435117f871afbd18
| 49
|
py
|
Python
|
rhea/build/toolflow/xilinx/__init__.py
|
meetps/rhea
|
f8a9a08fb5e14c5c4488ef68a2dff4d18222c2c0
|
[
"MIT"
] | 1
|
2022-03-16T23:56:09.000Z
|
2022-03-16T23:56:09.000Z
|
rhea/build/toolflow/xilinx/__init__.py
|
meetps/rhea
|
f8a9a08fb5e14c5c4488ef68a2dff4d18222c2c0
|
[
"MIT"
] | null | null | null |
rhea/build/toolflow/xilinx/__init__.py
|
meetps/rhea
|
f8a9a08fb5e14c5c4488ef68a2dff4d18222c2c0
|
[
"MIT"
] | null | null | null |
from .ise import ISE
from .vivado import Vivado
| 12.25
| 26
| 0.77551
| 8
| 49
| 4.75
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183673
| 49
| 3
| 27
| 16.333333
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
1ce2efac56c23c6a39d717edb12824108fd3d153
| 35,293
|
py
|
Python
|
muse_for_anything/api/v1_api/taxonomy_items.py
|
baireutherjonas/muse-for-anything
|
a625b4fc6468d74fa12886dc465d5694eed86e04
|
[
"MIT"
] | null | null | null |
muse_for_anything/api/v1_api/taxonomy_items.py
|
baireutherjonas/muse-for-anything
|
a625b4fc6468d74fa12886dc465d5694eed86e04
|
[
"MIT"
] | 1
|
2021-11-14T18:55:44.000Z
|
2021-11-14T18:55:44.000Z
|
muse_for_anything/api/v1_api/taxonomy_items.py
|
baireutherjonas/muse-for-anything
|
a625b4fc6468d74fa12886dc465d5694eed86e04
|
[
"MIT"
] | 1
|
2021-09-08T13:49:52.000Z
|
2021-09-08T13:49:52.000Z
|
"""Module containing the taxonomy items API endpoints of the v1 API."""
from datetime import datetime
from sqlalchemy.sql.schema import Sequence
from muse_for_anything.db.models.taxonomies import (
Taxonomy,
TaxonomyItem,
TaxonomyItemRelation,
TaxonomyItemVersion,
)
from marshmallow.utils import INCLUDE
from flask_babel import gettext
from muse_for_anything.api.util import template_url_for
from typing import Any, Callable, Dict, List, Optional, Union, cast
from flask.helpers import url_for
from flask.views import MethodView
from sqlalchemy.sql.expression import asc, desc, literal
from sqlalchemy.orm.query import Query
from sqlalchemy.orm import selectinload
from flask_smorest import abort
from http import HTTPStatus
from .root import API_V1
from ..base_models import (
ApiLink,
ApiResponse,
ChangedApiObject,
ChangedApiObjectSchema,
CursorPage,
CursorPageArgumentsSchema,
CursorPageSchema,
DynamicApiResponseSchema,
NewApiObject,
NewApiObjectSchema,
)
from ...db.db import DB
from ...db.pagination import get_page_info
from ...db.models.namespace import Namespace
from ...db.models.ontology_objects import OntologyObjectType, OntologyObjectTypeVersion
from .models.ontology import (
TaxonomyItemRelationPostSchema,
TaxonomyItemRelationSchema,
TaxonomyItemSchema,
TaxonomySchema,
)
from .namespace_helpers import (
query_params_to_api_key,
)
from .taxonomy_helpers import (
action_links_for_taxonomy_item,
action_links_for_taxonomy_item_relation,
create_action_link_for_taxonomy_item_relation_page,
nav_links_for_taxonomy_item,
nav_links_for_taxonomy_item_relation,
taxonomy_item_relation_to_api_link,
taxonomy_item_relation_to_api_response,
taxonomy_item_relation_to_taxonomy_item_relation_data,
taxonomy_item_to_api_link,
taxonomy_item_to_api_response,
taxonomy_item_to_taxonomy_item_data,
taxonomy_to_api_response,
taxonomy_to_items_links,
taxonomy_to_taxonomy_data,
)
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/"
)
class TaxonomyItemView(MethodView):
"""Endpoint for a single taxonomy item."""
def _check_path_params(self, namespace: str, taxonomy: str, taxonomy_item: str):
if not namespace or not namespace.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested namespace id has the wrong format!"),
)
if not taxonomy or not taxonomy.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy id has the wrong format!"),
)
if not taxonomy_item or not taxonomy_item.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy item id has the wrong format!"),
)
def _get_taxonomy_item(
self, namespace: str, taxonomy: str, taxonomy_item: str
) -> TaxonomyItem:
namespace_id = int(namespace)
taxonomy_id = int(taxonomy)
taxonomy_item_id = int(taxonomy_item)
found_taxonomy_item: Optional[TaxonomyItem] = (
TaxonomyItem.query.options(selectinload(TaxonomyItem.current_ancestors))
.filter(
TaxonomyItem.id == taxonomy_item_id,
TaxonomyItem.taxonomy_id == taxonomy_id,
)
.first()
)
if (
found_taxonomy_item is None
or found_taxonomy_item.taxonomy.namespace_id != namespace_id
):
abort(HTTPStatus.NOT_FOUND, message=gettext("Taxonomy item not found."))
return found_taxonomy_item # is not None because abort raises exception
def _check_if_taxonomy_modifiable(self, taxonomy: Taxonomy):
if taxonomy.namespace.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Namespace is marked as deleted and cannot be modified further."
),
)
if taxonomy.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy is marked as deleted and cannot be modified further."
),
)
def _check_if_modifiable(self, taxonomy_item: TaxonomyItem):
self._check_if_taxonomy_modifiable(taxonomy=taxonomy_item.taxonomy)
if taxonomy_item.deleted_on is not None:
# cannot modify deleted taxonomy!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy item is marked as deleted and cannot be modified further."
),
)
@API_V1.response(DynamicApiResponseSchema(TaxonomyItemSchema()))
def get(self, namespace: str, taxonomy: str, taxonomy_item: str, **kwargs: Any):
"""Get a single taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
found_taxonomy_item: TaxonomyItem = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
embedded: List[ApiResponse] = []
for relation in found_taxonomy_item.current_ancestors:
embedded.append(taxonomy_item_to_api_response(relation.taxonomy_item_source))
for relation in found_taxonomy_item.current_related:
embedded.append(taxonomy_item_relation_to_api_response(relation))
embedded.append(taxonomy_item_to_api_response(relation.taxonomy_item_target))
return ApiResponse(
links=[
ApiLink(
href=url_for(
"api-v1.NamespacesView",
_external=True,
**{"item-count": 50},
sort="name",
),
rel=("first", "page", "collection", "nav"),
resource_type="ont-namespace",
schema=url_for(
"api-v1.ApiSchemaView", schema_id="Namespace", _external=True
),
),
*nav_links_for_taxonomy_item(found_taxonomy_item),
*action_links_for_taxonomy_item(found_taxonomy_item),
],
embedded=embedded,
data=taxonomy_item_to_taxonomy_item_data(found_taxonomy_item),
)
@API_V1.arguments(TaxonomyItemSchema())
@API_V1.response(DynamicApiResponseSchema(NewApiObjectSchema()))
def put(self, data, namespace: str, taxonomy: str, taxonomy_item: str):
"""Update a taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
found_taxonomy_item: TaxonomyItem = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self._check_if_modifiable(found_taxonomy_item)
taxonomy_item_version = TaxonomyItemVersion(
taxonomy_item=found_taxonomy_item,
version=found_taxonomy_item.current_version.version + 1,
name=data["name"],
description=data.get("description", ""),
sort_key=data.get("sort_key", 10),
)
found_taxonomy_item.current_version = taxonomy_item_version
DB.session.add(found_taxonomy_item)
DB.session.add(taxonomy_item_version)
DB.session.commit()
taxonomy_item_link = taxonomy_item_to_taxonomy_item_data(found_taxonomy_item).self
taxonomy_item_data = taxonomy_item_to_api_response(found_taxonomy_item)
return ApiResponse(
links=[taxonomy_item_link],
embedded=[taxonomy_item_data],
data=ChangedApiObject(
self=ApiLink(
href=url_for(
"api-v1.TaxonomyItemView",
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
_external=True,
),
rel=(
"update",
"put",
"ont-taxonomy-item",
),
resource_type="changed",
),
changed=taxonomy_item_link,
),
)
@API_V1.response(DynamicApiResponseSchema(ChangedApiObjectSchema()))
def post(self, namespace: str, taxonomy: str, taxonomy_item: str): # restore action
"""Restore a deleted taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
found_taxonomy_item: TaxonomyItem = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self._check_if_taxonomy_modifiable(found_taxonomy_item.taxonomy)
changed_links: List[ApiLink] = []
embedded: List[ApiResponse] = []
# only actually restore when not already restored
if found_taxonomy_item.deleted_on is not None:
# restore taxonomy item
deleted_timestamp = found_taxonomy_item.deleted_on
found_taxonomy_item.deleted_on = None
# also restore relations
ancestors: Sequence[TaxonomyItemRelation] = TaxonomyItemRelation.query.filter(
TaxonomyItemRelation.taxonomy_item_target_id == found_taxonomy_item.id,
TaxonomyItemRelation.deleted_on == deleted_timestamp,
).all()
ancestor_ids = set()
relation: TaxonomyItemRelation
for relation in ancestors:
if relation.taxonomy_item_source.deleted_on is not None:
continue # do not restore relations to deleted items
ancestor_ids.add(relation.taxonomy_item_source_id)
relation.deleted_on = None
DB.session.add(relation)
def produces_circle(relation: TaxonomyItemRelation) -> bool:
if relation.taxonomy_item_target_id in ancestor_ids:
return True
for rel in relation.taxonomy_item_target.current_related:
if produces_circle(rel):
return True
return False
children: Sequence[TaxonomyItemRelation] = TaxonomyItemRelation.query.filter(
TaxonomyItemRelation.taxonomy_item_source_id == found_taxonomy_item.id,
TaxonomyItemRelation.deleted_on == deleted_timestamp,
).all()
for relation in children:
if relation.taxonomy_item_target.deleted_on is not None:
continue # do not restore relations to deleted items
if produces_circle(relation):
continue
relation.deleted_on = None
DB.session.add(relation)
DB.session.add(found_taxonomy_item)
DB.session.commit()
# add changed items to be embedded into the response
for relation in found_taxonomy_item.current_ancestors:
changed_links.append(taxonomy_item_relation_to_api_link(relation))
embedded.append(taxonomy_item_relation_to_api_response(relation))
changed_links.append(
taxonomy_item_to_api_link(relation.taxonomy_item_source)
)
embedded.append(
taxonomy_item_to_api_response(relation.taxonomy_item_source)
)
for relation in found_taxonomy_item.current_related:
changed_links.append(taxonomy_item_relation_to_api_link(relation))
embedded.append(taxonomy_item_relation_to_api_response(relation))
changed_links.append(
taxonomy_item_to_api_link(relation.taxonomy_item_target)
)
embedded.append(
taxonomy_item_to_api_response(relation.taxonomy_item_target)
)
taxonomy_item_link = taxonomy_item_to_taxonomy_item_data(found_taxonomy_item).self
taxonomy_item_data = taxonomy_item_to_api_response(found_taxonomy_item)
taxonomy_link = taxonomy_to_taxonomy_data(found_taxonomy_item.taxonomy).self
taxonomy_data = taxonomy_to_api_response(found_taxonomy_item.taxonomy)
return ApiResponse(
links=[taxonomy_item_link, taxonomy_link, *changed_links],
embedded=[taxonomy_item_data, taxonomy_data, *embedded],
data=ChangedApiObject(
self=ApiLink(
href=url_for(
"api-v1.TaxonomyItemView",
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
_external=True,
),
rel=(
"restore",
"post",
"ont-taxonomy-item",
),
resource_type="changed",
),
changed=taxonomy_item_link,
),
)
@API_V1.response(DynamicApiResponseSchema(ChangedApiObjectSchema()))
def delete(self, namespace: str, taxonomy: str, taxonomy_item: str): # restore action
"""Delete a taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
found_taxonomy_item: TaxonomyItem = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self._check_if_taxonomy_modifiable(found_taxonomy_item.taxonomy)
changed_links: List[ApiLink] = []
embedded: List[ApiResponse] = []
# only actually delete when not already deleted
if found_taxonomy_item.deleted_on is None:
# delete taxonomy item
deleted_timestamp = datetime.utcnow()
found_taxonomy_item.deleted_on = deleted_timestamp
# also delete incoming and outgoing relations to remove them
# from relations of existing items
ancestors = found_taxonomy_item.current_ancestors
for relation in found_taxonomy_item.current_ancestors:
relation.deleted_on = deleted_timestamp
DB.session.add(relation)
related = found_taxonomy_item.current_related
for relation in found_taxonomy_item.current_related:
relation.deleted_on = deleted_timestamp
DB.session.add(relation)
DB.session.add(found_taxonomy_item)
DB.session.commit()
# add changed items to be embedded into the response
for relation in ancestors:
changed_links.append(taxonomy_item_relation_to_api_link(relation))
embedded.append(taxonomy_item_relation_to_api_response(relation))
changed_links.append(
taxonomy_item_to_api_link(relation.taxonomy_item_source)
)
embedded.append(
taxonomy_item_to_api_response(relation.taxonomy_item_source)
)
for relation in related:
changed_links.append(taxonomy_item_relation_to_api_link(relation))
embedded.append(taxonomy_item_relation_to_api_response(relation))
changed_links.append(
taxonomy_item_to_api_link(relation.taxonomy_item_target)
)
embedded.append(
taxonomy_item_to_api_response(relation.taxonomy_item_target)
)
taxonomy_item_link = taxonomy_item_to_taxonomy_item_data(found_taxonomy_item).self
taxonomy_item_data = taxonomy_item_to_api_response(found_taxonomy_item)
taxonomy_link = taxonomy_to_taxonomy_data(found_taxonomy_item.taxonomy).self
taxonomy_data = taxonomy_to_api_response(found_taxonomy_item.taxonomy)
return ApiResponse(
links=[taxonomy_item_link, taxonomy_link, *changed_links],
embedded=[taxonomy_item_data, taxonomy_data, *embedded],
data=ChangedApiObject(
self=ApiLink(
href=url_for(
"api-v1.TaxonomyItemView",
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
_external=True,
),
rel=(
"delete",
"ont-taxonomy-item",
),
resource_type="changed",
),
changed=taxonomy_item_link,
),
)
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/relations/"
)
class TaxonomyItemRelationsView(MethodView):
"""Endpoint for manipulating taxonomy item relations."""
def _check_path_params(self, namespace: str, taxonomy: str, taxonomy_item: str):
if not namespace or not namespace.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested namespace id has the wrong format!"),
)
if not taxonomy or not taxonomy.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy id has the wrong format!"),
)
if not taxonomy_item or not taxonomy_item.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy item id has the wrong format!"),
)
def _get_taxonomy_item(
self, namespace: str, taxonomy: str, taxonomy_item: str
) -> TaxonomyItem:
namespace_id = int(namespace)
taxonomy_id = int(taxonomy)
taxonomy_item_id = int(taxonomy_item)
found_taxonomy_item: Optional[TaxonomyItem] = TaxonomyItem.query.filter(
TaxonomyItem.id == taxonomy_item_id,
TaxonomyItem.taxonomy_id == taxonomy_id,
).first()
if (
found_taxonomy_item is None
or found_taxonomy_item.taxonomy.namespace_id != namespace_id
):
abort(HTTPStatus.NOT_FOUND, message=gettext("Taxonomy item not found."))
return found_taxonomy_item # is not None because abort raises exception
def _check_if_modifiable(self, taxonomy_item: TaxonomyItem):
taxonomy = taxonomy_item.taxonomy
if taxonomy.namespace.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Namespace is marked as deleted and cannot be modified further."
),
)
if taxonomy.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy is marked as deleted and cannot be modified further."
),
)
if taxonomy_item.deleted_on is not None:
# cannot modify deleted taxonomy!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy item is marked as deleted and cannot be modified further."
),
)
def _check_item_circle(
self,
item_target: TaxonomyItem,
item_source: TaxonomyItem,
original_target: Optional[TaxonomyItem] = None,
):
"""Check for a path from target to source which would form a circular dependency. Abort if such a path is found!"""
if original_target is None:
original_target = item_target
relation: TaxonomyItemRelation
for relation in item_target.current_related:
if relation.taxonomy_item_target.deleted_on is not None:
continue # exclude deleted items as targets
if relation.taxonomy_item_target_id == item_source.id:
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Cannot add a relation from %(target)s to %(source)s as it would create a circle!",
target=original_target.name,
source=item_source.name,
),
)
else:
self._check_item_circle(
item_target=relation.taxonomy_item_target,
item_source=item_source,
original_target=original_target,
)
@API_V1.arguments(TaxonomyItemRelationPostSchema())
@API_V1.response(DynamicApiResponseSchema(NewApiObjectSchema()))
def post(
self,
data: Dict[str, str],
namespace: str,
taxonomy: str,
taxonomy_item: str,
):
"""Create a new relation to a taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
if namespace != data["namespace_id"] or taxonomy != data["taxonomy_id"]:
abort(
HTTPStatus.BAD_REQUEST,
message=gettext(
"Cannot create a relation to a taxonomy item of a different taxonomy!"
),
)
found_taxonomy_item = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self._check_if_modifiable(found_taxonomy_item)
found_taxonomy_item_target = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=data["taxonomy_item_id"]
)
self._check_item_circle(found_taxonomy_item_target, found_taxonomy_item)
relation = TaxonomyItemRelation(
taxonomy_item_source=found_taxonomy_item,
taxonomy_item_target=found_taxonomy_item_target,
)
DB.session.add(relation)
DB.session.commit()
taxonomy_item_relation_link = (
taxonomy_item_relation_to_taxonomy_item_relation_data(relation).self
)
taxonomy_item_relation_data = taxonomy_item_relation_to_api_response(relation)
taxonomy_item_source_link = taxonomy_item_to_api_link(found_taxonomy_item)
taxonomy_item_source_data = taxonomy_item_to_api_response(found_taxonomy_item)
taxonomy_item_target_link = taxonomy_item_to_api_link(found_taxonomy_item_target)
taxonomy_item_target_data = taxonomy_item_to_api_response(
found_taxonomy_item_target
)
self_link = create_action_link_for_taxonomy_item_relation_page(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self_link.rel = (*self_link.rel, "ont-taxonomy-item-relation")
self_link.resource_type = "new"
return ApiResponse(
links=[
taxonomy_item_relation_link,
taxonomy_item_source_link,
taxonomy_item_target_link,
],
embedded=[
taxonomy_item_relation_data,
taxonomy_item_source_data,
taxonomy_item_target_data,
],
data=NewApiObject(
self=self_link,
new=taxonomy_item_relation_link,
),
)
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/relations/<string:relation>/"
)
class TaxonomyItemRelationView(MethodView):
"""Endpoint for removing taxonomy item relations."""
def _check_path_params(
self, namespace: str, taxonomy: str, taxonomy_item: str, relation: str
):
if not namespace or not namespace.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested namespace id has the wrong format!"),
)
if not taxonomy or not taxonomy.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy id has the wrong format!"),
)
if not taxonomy_item or not taxonomy_item.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy item id has the wrong format!"),
)
if not relation or not relation.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext(
"The requested taxonomy item relation id has the wrong format!"
),
)
def _get_taxonomy_item_relation(
self, namespace: str, taxonomy: str, taxonomy_item: str, relation: str
) -> TaxonomyItemRelation:
namespace_id = int(namespace)
taxonomy_id = int(taxonomy)
taxonomy_item_id = int(taxonomy_item)
relation_id = int(relation)
found_taxonomy_item_relation: Optional[
TaxonomyItemRelation
] = TaxonomyItemRelation.query.filter(
TaxonomyItemRelation.id == relation_id,
TaxonomyItemRelation.taxonomy_item_source_id == taxonomy_item_id,
).first()
if (
found_taxonomy_item_relation is None
or found_taxonomy_item_relation.taxonomy_item_source.taxonomy_id
!= taxonomy_id
or found_taxonomy_item_relation.taxonomy_item_source.taxonomy.namespace_id
!= namespace_id
):
abort(
HTTPStatus.NOT_FOUND, message=gettext("Taxonomy item relation not found.")
)
return found_taxonomy_item_relation # is not None because abort raises exception
def _check_if_modifiable(self, relation: TaxonomyItemRelation):
taxonomy_item = relation.taxonomy_item_source
taxonomy = taxonomy_item.taxonomy
if taxonomy.namespace.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Namespace is marked as deleted and cannot be modified further."
),
)
if taxonomy.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy is marked as deleted and cannot be modified further."
),
)
if taxonomy_item.deleted_on is not None:
# cannot modify deleted taxonomy item!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy item is marked as deleted and cannot be modified further."
),
)
if relation.deleted_on is not None:
# cannot modify deleted item relation!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy item relation is marked as deleted and cannot be modified further."
),
)
@API_V1.response(DynamicApiResponseSchema(TaxonomyItemRelationSchema()))
def get(
self,
namespace: str,
taxonomy: str,
taxonomy_item: str,
relation: str,
**kwargs: Any
):
"""Get a single relation."""
self._check_path_params(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
)
found_relation = self._get_taxonomy_item_relation(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
)
return ApiResponse(
links=(
*nav_links_for_taxonomy_item_relation(found_relation),
*action_links_for_taxonomy_item_relation(found_relation),
),
data=taxonomy_item_relation_to_taxonomy_item_relation_data(found_relation),
)
@API_V1.response(DynamicApiResponseSchema(ChangedApiObjectSchema()))
def delete(
self,
namespace: str,
taxonomy: str,
taxonomy_item: str,
relation: str,
**kwargs: Any
):
"""Delete an existing relation."""
self._check_path_params(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
)
found_relation = self._get_taxonomy_item_relation(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
)
self._check_if_modifiable(found_relation)
# only actually delete when not already deleted
if found_relation.deleted_on is None:
# delete taxonomy item relation
found_relation.deleted_on = datetime.utcnow()
DB.session.add(found_relation)
DB.session.commit()
relation_link = taxonomy_item_relation_to_taxonomy_item_relation_data(
found_relation
).self
relation_data = taxonomy_item_relation_to_api_response(found_relation)
source_item_link = taxonomy_item_to_api_link(found_relation.taxonomy_item_source)
source_item_data = taxonomy_item_to_api_response(
found_relation.taxonomy_item_source
)
target_item_link = taxonomy_item_to_api_link(found_relation.taxonomy_item_target)
target_item_data = taxonomy_item_to_api_response(
found_relation.taxonomy_item_target
)
return ApiResponse(
links=[relation_link, source_item_link, target_item_link],
embedded=[relation_data, source_item_data, target_item_data],
data=ChangedApiObject(
self=ApiLink(
href=url_for(
"api-v1.TaxonomyItemRelationView",
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
_external=True,
),
rel=(
"delete",
"ont-taxonomy-item-relation",
),
resource_type="changed",
),
changed=relation_link,
),
)
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/versions/"
)
class TaxonomyItemVersionsView(MethodView):
"""Endpoint for all versions of a taxonomy item."""
def get(self, namespace: str, taxonomy: str, taxonomy_item: str, **kwargs: Any):
"""TODO."""
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/versions/<string:version>/"
)
class TaxonomyItemVersionView(MethodView):
"""Endpoint for a single version of a taxonomy item."""
def _check_path_params(
self, namespace: str, taxonomy: str, taxonomy_item: str, version: str
):
if not namespace or not namespace.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested namespace id has the wrong format!"),
)
if not taxonomy or not taxonomy.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy id has the wrong format!"),
)
if not taxonomy_item or not taxonomy_item.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy item id has the wrong format!"),
)
if not version or not version.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext(
"The requested taxonomy item version has the wrong format!"
),
)
def _get_taxonomy_item_version(
self, namespace: str, taxonomy: str, taxonomy_item: str, version: str
) -> TaxonomyItemVersion:
namespace_id = int(namespace)
taxonomy_id = int(taxonomy)
taxonomy_item_id = int(taxonomy_item)
version_nr = int(version)
found_taxonomy_item_version: Optional[
TaxonomyItemVersion
] = TaxonomyItemVersion.query.filter(
TaxonomyItemVersion.version == version_nr,
TaxonomyItemVersion.taxonomy_item_id == taxonomy_item_id,
).first()
if (
found_taxonomy_item_version is None
or found_taxonomy_item_version.taxonomy_item.taxonomy_id != taxonomy_id
or found_taxonomy_item_version.taxonomy_item.taxonomy.namespace_id
!= namespace_id
):
abort(
HTTPStatus.NOT_FOUND, message=gettext("Taxonomy item version not found.")
)
return found_taxonomy_item_version # is not None because abort raises exception
@API_V1.response(DynamicApiResponseSchema(TaxonomyItemSchema()))
def get(
self,
namespace: str,
taxonomy: str,
taxonomy_item: str,
version: str,
**kwargs: Any
):
"""Get a single taxonomy item version."""
self._check_path_params(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
version=version,
)
found_taxonomy_item_version = self._get_taxonomy_item_version(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
version=version,
)
return ApiResponse(
links=[
ApiLink(
href=url_for(
"api-v1.NamespacesView",
_external=True,
**{"item-count": 50},
sort="name",
),
rel=("first", "page", "collection", "nav"),
resource_type="ont-namespace",
schema=url_for(
"api-v1.ApiSchemaView", schema_id="Namespace", _external=True
),
),
*nav_links_for_taxonomy_item_version(found_taxonomy_item_version),
*action_links_for_taxonomy_item_version(found_taxonomy_item_version),
],
data=taxonomy_item_to_taxonomy_item_data(found_taxonomy_item_version),
)
| 39.43352
| 123
| 0.606041
| 3,478
| 35,293
| 5.840713
| 0.065555
| 0.206754
| 0.061928
| 0.029536
| 0.807276
| 0.754701
| 0.713301
| 0.684257
| 0.638131
| 0.60318
| 0
| 0.001344
| 0.325419
| 35,293
| 894
| 124
| 39.477629
| 0.851899
| 0.049217
| 0
| 0.597187
| 0
| 0.006394
| 0.079322
| 0.021288
| 0
| 0
| 0
| 0.001119
| 0
| 1
| 0.029412
| false
| 0
| 0.029412
| 0
| 0.084399
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1cf1510ac46bda476c715d01c64fd6ef223f7da4
| 10,434
|
py
|
Python
|
ami/flowchart/library/Display.py
|
chuckie82/ami
|
7adb72c709afe4c1af53ef7f0d2b0e3639c63bf3
|
[
"BSD-3-Clause-LBNL"
] | 6
|
2018-05-31T21:37:15.000Z
|
2022-01-24T15:22:46.000Z
|
ami/flowchart/library/Display.py
|
chuckie82/ami
|
7adb72c709afe4c1af53ef7f0d2b0e3639c63bf3
|
[
"BSD-3-Clause-LBNL"
] | 68
|
2019-06-06T21:00:49.000Z
|
2022-03-14T22:35:29.000Z
|
ami/flowchart/library/Display.py
|
chuckie82/ami
|
7adb72c709afe4c1af53ef7f0d2b0e3639c63bf3
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2020-12-13T01:53:05.000Z
|
2021-07-19T04:56:51.000Z
|
from ami.flowchart.library.DisplayWidgets import ScalarWidget, ScatterWidget, WaveformWidget, \
ImageWidget, ObjectWidget, LineWidget, TimeWidget, HistogramWidget, \
Histogram2DWidget
from ami.flowchart.library.common import CtrlNode
from amitypes import Array1d, Array2d
from typing import Any
import ami.graph_nodes as gn
class ScalarViewer(CtrlNode):
"""
ScalarViewer displays the value of a scalar.
"""
nodeName = "ScalarViewer"
uiTemplate = []
def __init__(self, name):
super().__init__(name,
terminals={"In": {"io": "in", "ttype": float}},
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, ScalarWidget, **kwargs)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'ScalarWidget', 'terms': terms, 'topics': topics}
class WaveformViewer(CtrlNode):
"""
WaveformViewer displays 1D arrays.
"""
nodeName = "WaveformViewer"
uiTemplate = []
def __init__(self, name):
super().__init__(name, terminals={"In": {"io": "in", "ttype": Array1d}},
allowAddInput=True,
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, WaveformWidget, **kwargs)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'WaveformWidget', 'terms': terms, 'topics': topics}
class ImageViewer(CtrlNode):
"""
ImageViewer displays 2D arrays.
"""
nodeName = "ImageViewer"
uiTemplate = []
def __init__(self, name):
super().__init__(name, terminals={"In": {"io": "in", "ttype": Array2d}}, viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, ImageWidget, **kwargs)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'ImageWidget', 'terms': terms, 'topics': topics}
class ObjectViewer(CtrlNode):
"""
ObjectViewer displays string representation of a python object.
"""
nodeName = "ObjectViewer"
uiTemplate = []
def __init__(self, name):
super().__init__(name, terminals={"In": {"io": "in", "ttype": Any}}, viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, ObjectWidget, **kwargs)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'ObjectWidget', 'terms': terms, 'topics': topics}
class Histogram(CtrlNode):
"""
Histogram plots a histogram created from Binning.
"""
nodeName = "Histogram"
uiTemplate = []
def __init__(self, name):
super().__init__(name,
terminals={"Bins": {"io": "in", "ttype": Array1d},
"Counts": {"io": "in", "ttype": Array1d}},
allowAddInput=True,
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, HistogramWidget, **kwargs)
def addInput(self, **args):
self.addTerminal(name="Bins", io='in', ttype=Array1d, **args)
self.addTerminal(name="Counts", io='in', ttype=Array1d, **args)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'HistogramWidget', 'terms': terms, 'topics': topics}
class Histogram2D(CtrlNode):
"""
Histogram2D plots a 2d histogram created from Binning2D.
"""
nodeName = "Histogram2D"
uiTemplate = []
def __init__(self, name):
super().__init__(name,
terminals={"XBins": {"io": "in", "ttype": Array1d},
"YBins": {"io": "in", "ttype": Array1d},
"Counts": {"io": "in", "ttype": Array2d}},
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, Histogram2DWidget, **kwargs)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'Histogram2DWidget', 'terms': terms, 'topics': topics}
class ScatterPlot(CtrlNode):
"""
Scatter Plot collects two scalars and plots them against each other.
"""
nodeName = "ScatterPlot"
uiTemplate = [("Num Points", 'intSpin', {'value': 100, 'min': 1}),
('Unique', 'check')]
def __init__(self, name):
super().__init__(name, terminals={"X": {"io": "in", "ttype": float},
"Y": {"io": "in", "ttype": float}},
allowAddInput=True,
buffered=True)
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, ScatterWidget, **kwargs)
def isChanged(self, restore_ctrl, restore_widget):
return restore_ctrl
def addInput(self, **args):
self.addTerminal(name="X", io='in', ttype=float, **args)
self.addTerminal(name="Y", io='in', ttype=float, **args)
def to_operation(self, inputs, outputs, **kwargs):
outputs = [self.name()+'.'+i for i in inputs.keys()]
buffer_output = [self.name()]
nodes = [gn.RollingBuffer(name=self.name()+"_buffer",
N=self.values['Num Points'], unique=self.values['Unique'],
inputs=inputs, outputs=buffer_output, **kwargs),
gn.Map(name=self.name()+"_operation",
inputs=buffer_output, outputs=outputs,
func=lambda a: zip(*a),
**kwargs)]
return nodes
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'ScatterWidget', 'terms': terms, 'topics': topics}
class ScalarPlot(CtrlNode):
"""
Scalar Plot collects scalars and plots them.
"""
nodeName = "ScalarPlot"
uiTemplate = [("Num Points", 'intSpin', {'value': 100, 'min': 1})]
def __init__(self, name):
super().__init__(name, terminals={"Y": {"io": "in", "ttype": float}},
allowAddInput=True,
buffered=True)
def isChanged(self, restore_ctrl, restore_widget):
return restore_ctrl
def addInput(self, **args):
self.addTerminal(name="Y", io='in', ttype=float, **args)
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, WaveformWidget, **kwargs)
def to_operation(self, inputs, outputs, **kwargs):
outputs = [self.name()+'.'+i for i in inputs.keys()]
buffer_output = [self.name()]
if len(inputs.values()) > 1:
node = [gn.RollingBuffer(name=self.name()+"_buffer", N=self.values['Num Points'],
inputs=inputs, outputs=buffer_output, **kwargs),
gn.Map(name=self.name()+"_operation", inputs=buffer_output, outputs=outputs,
func=lambda a: zip(*a), **kwargs)]
else:
node = gn.RollingBuffer(name=self.name(), N=self.values['Num Points'],
inputs=inputs, outputs=outputs, **kwargs)
return node
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'WaveformWidget', 'terms': terms, 'topics': topics}
class LinePlot(CtrlNode):
"""
Line Plot plots arrays.
"""
nodeName = "LinePlot"
uiTemplate = []
def __init__(self, name):
super().__init__(name, terminals={"X": {"io": "in", "ttype": Array1d},
"Y": {"io": "in", "ttype": Array1d}},
allowAddInput=True,
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, LineWidget, **kwargs)
def addInput(self, **args):
group = self.nextGroupName()
self.addTerminal(name="X", io='in', ttype=Array1d, group=group, **args)
self.addTerminal(name="Y", io='in', ttype=Array1d, group=group, **args)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'LineWidget', 'terms': terms, 'topics': topics}
class TimePlot(CtrlNode):
"""
Plot a number against time of day.
"""
nodeName = "TimePlot"
uiTemplate = [("Num Points", 'intSpin', {'value': 1000, 'min': 1})]
def __init__(self, name):
super().__init__(name, terminals={"X": {"io": "in", "ttype": float},
"Y": {"io": "in", "ttype": float}},
allowAddInput=True,
buffered=True)
def isChanged(self, restore_ctrl, restore_widget):
return restore_ctrl
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, TimeWidget, **kwargs)
def addInput(self, **args):
self.addTerminal(name="X", io='in', ttype=float, **args)
self.addTerminal(name="Y", io='in', ttype=float, **args)
def to_operation(self, inputs, outputs, **kwargs):
outputs = [self.name()+'.'+i for i in inputs.keys()]
buffer_output = [self.name()]
nodes = [gn.RollingBuffer(name=self.name()+"_buffer", N=self.values['Num Points'],
inputs=inputs, outputs=buffer_output, **kwargs),
gn.Map(name=self.name()+"_operation", inputs=buffer_output, outputs=outputs,
func=lambda a: zip(*a), **kwargs)]
return nodes
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'TimeWidget', 'terms': terms, 'topics': topics}
| 33.986971
| 96
| 0.57236
| 1,077
| 10,434
| 5.431755
| 0.128134
| 0.05641
| 0.038462
| 0.061538
| 0.75812
| 0.706325
| 0.700342
| 0.689573
| 0.648889
| 0.578462
| 0
| 0.005167
| 0.276596
| 10,434
| 306
| 97
| 34.098039
| 0.769873
| 0.043607
| 0
| 0.588235
| 0
| 0
| 0.080383
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.256684
| false
| 0
| 0.026738
| 0.160428
| 0.620321
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
e80a42577d50ff4b5707bc38cca297d3bcb73ab4
| 170
|
py
|
Python
|
vilmedic/scorers/NLG/__init__.py
|
jbdel/vilmedic
|
17d462a540a2632811cc2a78edd2861800a33b07
|
[
"MIT"
] | 15
|
2021-07-24T10:41:07.000Z
|
2022-03-27T14:40:47.000Z
|
vilmedic/scorers/NLG/__init__.py
|
jbdel/vilmedic
|
17d462a540a2632811cc2a78edd2861800a33b07
|
[
"MIT"
] | null | null | null |
vilmedic/scorers/NLG/__init__.py
|
jbdel/vilmedic
|
17d462a540a2632811cc2a78edd2861800a33b07
|
[
"MIT"
] | 2
|
2022-02-22T17:37:22.000Z
|
2022-03-20T12:55:40.000Z
|
from .rouge import ROUGEScorer
from .bleu.bleu import BLEUScorer
from .meteor.meteor import METEORScorer
from .cider.cider import Cider
from .ciderd.ciderd import CiderD
| 28.333333
| 39
| 0.829412
| 24
| 170
| 5.875
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 170
| 5
| 40
| 34
| 0.94
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e811dc5802ea397bf5ec6753cbdbdf5b70c81146
| 151
|
py
|
Python
|
ebmeta/actions/version.py
|
bkidwell/ebmeta-old
|
2279ddd14235ea31b27f0eaa7e9bb26cb43d4133
|
[
"0BSD"
] | 1
|
2021-01-05T10:24:13.000Z
|
2021-01-05T10:24:13.000Z
|
ebmeta/actions/version.py
|
bkidwell/ebmeta-old
|
2279ddd14235ea31b27f0eaa7e9bb26cb43d4133
|
[
"0BSD"
] | null | null | null |
ebmeta/actions/version.py
|
bkidwell/ebmeta-old
|
2279ddd14235ea31b27f0eaa7e9bb26cb43d4133
|
[
"0BSD"
] | null | null | null |
"""Print ebmeta version number."""
import sys
import ebmeta
def run():
print "{} {}".format(ebmeta.PROGRAM_NAME, ebmeta.VERSION)
sys.exit(0)
| 16.777778
| 61
| 0.668874
| 20
| 151
| 5
| 0.65
| 0.26
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007937
| 0.165563
| 151
| 8
| 62
| 18.875
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0.042735
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.4
| null | null | 0.2
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
08fa086cfe8def53819d28aebd9cf2fb43f1e8d2
| 45
|
py
|
Python
|
geomstats/geometry/stratified/__init__.py
|
shubhamtalbar96/geomstats
|
9c17ccede7e3f0fddf31487c59227c677216a2b9
|
[
"MIT"
] | null | null | null |
geomstats/geometry/stratified/__init__.py
|
shubhamtalbar96/geomstats
|
9c17ccede7e3f0fddf31487c59227c677216a2b9
|
[
"MIT"
] | null | null | null |
geomstats/geometry/stratified/__init__.py
|
shubhamtalbar96/geomstats
|
9c17ccede7e3f0fddf31487c59227c677216a2b9
|
[
"MIT"
] | null | null | null |
"""The Stratified Space Geometry Package."""
| 22.5
| 44
| 0.733333
| 5
| 45
| 6.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 45
| 1
| 45
| 45
| 0.825
| 0.844444
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1c30c09f1bd3070f07f121e14a73ab704dad99b4
| 106
|
py
|
Python
|
achievements/admin.py
|
peterkrauz/rpg-achievements-django
|
c65ec12237b2bee9f12d259fedd5f18934ff6c96
|
[
"Apache-2.0"
] | 1
|
2021-08-31T10:52:55.000Z
|
2021-08-31T10:52:55.000Z
|
achievements/admin.py
|
peterkrauz/rpg-achievements-django
|
c65ec12237b2bee9f12d259fedd5f18934ff6c96
|
[
"Apache-2.0"
] | null | null | null |
achievements/admin.py
|
peterkrauz/rpg-achievements-django
|
c65ec12237b2bee9f12d259fedd5f18934ff6c96
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from achievements import models
admin.site.register(models.Achievement)
| 21.2
| 39
| 0.849057
| 14
| 106
| 6.428571
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09434
| 106
| 4
| 40
| 26.5
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1c5786ec0bae08a5ef1c18dbc1ab79a0a17bfc34
| 105
|
py
|
Python
|
10/01/03/2.py
|
pylangstudy/201707
|
c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6
|
[
"CC0-1.0"
] | null | null | null |
10/01/03/2.py
|
pylangstudy/201707
|
c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6
|
[
"CC0-1.0"
] | 46
|
2017-06-30T22:19:07.000Z
|
2017-07-31T22:51:31.000Z
|
10/01/03/2.py
|
pylangstudy/201707
|
c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6
|
[
"CC0-1.0"
] | null | null | null |
class MyClass:
def __repr__(self): return self.__class__.__name__ + '()'
print(MyClass().__repr__())
| 26.25
| 61
| 0.704762
| 12
| 105
| 4.833333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12381
| 105
| 3
| 62
| 35
| 0.630435
| 0
| 0
| 0
| 0
| 0
| 0.019048
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
1c57a86a468018b2042fa4b09d8dfca249bb7498
| 9,562
|
py
|
Python
|
tests/tasks/core/test_core.py
|
andykawabata/prefect
|
a11061c19847beeea26616ccaf4b404ad939676b
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-09-28T16:24:02.000Z
|
2020-10-08T17:08:19.000Z
|
tests/tasks/core/test_core.py
|
andykawabata/prefect
|
a11061c19847beeea26616ccaf4b404ad939676b
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2021-06-28T20:52:27.000Z
|
2022-02-27T13:04:42.000Z
|
tests/tasks/core/test_core.py
|
yalaudah/prefect
|
2f7f92c39a4575119c3268b0415841c6aca5df60
|
[
"Apache-2.0"
] | 1
|
2020-05-04T13:22:11.000Z
|
2020-05-04T13:22:11.000Z
|
import pytest
from prefect.core import Edge, Flow, Parameter, Task
from prefect.tasks.core import collections
from prefect.tasks.core.constants import Constant
from prefect.tasks.core.function import FunctionTask
class IdentityTask(Task):
def run(self, x):
return x
class TestConstant:
def test_constant_task_returns_its_value(self):
x = Constant("x")
assert x.run() == "x"
y = Constant(100)
assert y.run() == 100
def test_automatic_create_constant_task(self):
with Flow(name="test") as flow:
t = Task()
t.set_dependencies(upstream_tasks=[4])
assert len(flow.tasks) == 2
assert any(isinstance(t, Constant) for t in flow.tasks)
class TestFunctionTask:
def test_function_task_requires_callable(self):
with pytest.raises(TypeError):
FunctionTask(fn=1)
def test_function_task_takes_name_from_callable(self):
def my_fn():
pass
f = FunctionTask(fn=my_fn)
assert f.name == "my_fn"
def test_function_task_takes_name_from_arg_if_provided(self):
def my_fn():
pass
f = FunctionTask(fn=my_fn, name="test")
assert f.name == "test"
def test_function_task_docstring(self):
def my_fn():
"""An example docstring."""
pass
# Original docstring available on class
assert "FunctionTask" in FunctionTask.__doc__
# Wrapped function is docstring on instance
f = FunctionTask(fn=my_fn)
assert f.__doc__ == my_fn.__doc__
# Except when no docstring on wrapped function
f = FunctionTask(fn=lambda x: x + 1)
assert "FunctionTask" in f.__doc__
def test_function_task_sets__wrapped__(self):
def my_fn():
"""An example function"""
pass
t = FunctionTask(fn=my_fn)
assert t.__wrapped__ == my_fn
assert not hasattr(FunctionTask, "__wrapped__")
class TestCollections:
def test_list_returns_a_list(self):
l = collections.List()
with Flow(name="test") as f:
l.bind(1, 2)
assert f.run().result[l].result == [1, 2]
def test_list_binds_varargs(self):
t1 = Task()
t2 = Task()
l = collections.List()
with Flow(name="test") as f:
l.bind(t1, t2)
assert set([t1, t2, l]) == f.tasks
assert Edge(t1, l, key="arg_1") in f.edges
assert Edge(t2, l, key="arg_2") in f.edges
def test_tuple_returns_a_tuple(self):
l = collections.Tuple()
with Flow(name="test") as f:
l.bind(1, 2)
assert f.run().result[l].result == (1, 2)
def test_tuple_binds_varargs(self):
t1 = Task()
t2 = Task()
l = collections.Tuple()
with Flow(name="test") as f:
l.bind(t1, t2)
assert set([t1, t2, l]) == f.tasks
assert Edge(t1, l, key="arg_1") in f.edges
assert Edge(t2, l, key="arg_2") in f.edges
def test_set_returns_a_set(self):
l = collections.Set()
with Flow(name="test") as f:
l.bind(1, 2)
assert f.run().result[l].result == set([1, 2])
def test_set_binds_varargs(self):
t1 = Task()
t2 = Task()
l = collections.Set()
with Flow(name="test") as f:
l.bind(t1, t2)
assert set([t1, t2, l]) == f.tasks
assert Edge(t1, l, key="arg_1") in f.edges
assert Edge(t2, l, key="arg_2") in f.edges
def test_dict_returns_a_dict(self):
l = collections.Dict()
with Flow(name="test") as f:
l.bind(keys=["a", "b"], values=[1, 2])
assert f.run().result[l].result == dict(a=1, b=2)
def test_dict_handles_non_string_keys(self):
l = collections.Dict()
with Flow(name="test") as f:
l.bind(keys=[None, 55], values=[1, 2])
assert f.run().result[l].result == {None: 1, 55: 2}
def test_dict_raises_for_differing_length_key_value_pairs(self):
l = collections.Dict()
with Flow(name="test") as f:
l.bind(keys=["a"], values=[1, 2])
state = f.run()
assert state.result[l].is_failed()
assert isinstance(state.result[l].result, ValueError)
def test_list_automatically_applied_to_callargs(self):
x = Parameter("x")
y = Parameter("y")
identity = IdentityTask()
with Flow(name="test") as f:
identity.bind(x=[x, y])
state = f.run(parameters=dict(x=1, y=2))
assert len(f.tasks) == 4
assert sum(isinstance(t, collections.List) for t in f.tasks) == 1
assert state.result[identity].result == [1, 2]
def test_list_automatically_applied_to_callargs_imperative(self):
x = Parameter("x")
y = Parameter("y")
identity = IdentityTask()
f = Flow(name="test")
f.add_task(identity)
identity.bind(x=[x, y], flow=f)
state = f.run(parameters=dict(x=1, y=2))
assert len(f.tasks) == 4
assert sum(isinstance(t, collections.List) for t in f.tasks) == 1
assert state.result[identity].result == [1, 2]
def test_tuple_automatically_applied_to_callargs(self):
x = Parameter("x")
y = Parameter("y")
identity = IdentityTask()
with Flow(name="test") as f:
identity.bind(x=(x, y))
state = f.run(parameters=dict(x=1, y=2))
assert len(f.tasks) == 4
assert sum(isinstance(t, collections.Tuple) for t in f.tasks) == 1
assert state.result[identity].result == (1, 2)
def test_tuple_automatically_applied_to_callargs_imperative(self):
x = Parameter("x")
y = Parameter("y")
identity = IdentityTask()
f = Flow(name="test")
f.add_task(identity)
identity.bind(x=(x, y), flow=f)
state = f.run(parameters=dict(x=1, y=2))
assert len(f.tasks) == 4
assert sum(isinstance(t, collections.Tuple) for t in f.tasks) == 1
assert state.result[identity].result == (1, 2)
def test_set_automatically_applied_to_callargs(self):
x = Parameter("x")
y = Parameter("y")
identity = IdentityTask()
with Flow(name="test") as f:
identity.bind(x=set([x, y]))
state = f.run(parameters=dict(x=1, y=2))
assert len(f.tasks) == 4
assert sum(isinstance(t, collections.Set) for t in f.tasks) == 1
assert state.result[identity].result == set([1, 2])
def test_set_automatically_applied_to_callargs_imperative(self):
x = Parameter("x")
y = Parameter("y")
identity = IdentityTask()
f = Flow(name="test")
f.add_task(identity)
identity.bind(x=set([x, y]), flow=f)
state = f.run(parameters=dict(x=1, y=2))
assert len(f.tasks) == 4
assert sum(isinstance(t, collections.Set) for t in f.tasks) == 1
assert state.result[identity].result == set([1, 2])
def test_dict_automatically_applied_to_callargs(self):
x = Parameter("x")
y = Parameter("y")
identity = IdentityTask()
with Flow(name="test") as f:
identity.bind(x=dict(a=x, b=y))
state = f.run(parameters=dict(x=1, y=2))
assert len(f.tasks) == 5 # 2 params, identity, Dict, List of dict values
assert sum(isinstance(t, collections.Dict) for t in f.tasks) == 1
assert state.result[identity].result == dict(a=1, b=2)
def test_dict_automatically_applied_to_callargs_imperative(self):
x = Parameter("x")
y = Parameter("y")
identity = IdentityTask()
f = Flow(name="test")
f.add_task(identity)
identity.bind(x=dict(a=x, b=y), flow=f)
state = f.run(parameters=dict(x=1, y=2))
assert len(f.tasks) == 5 # 2 params, identity, Dict, List of dict values
assert sum(isinstance(t, collections.Dict) for t in f.tasks) == 1
assert state.result[identity].result == dict(a=1, b=2)
def test_nested_collection_automatically_applied_to_callargs(self):
x = Parameter("x")
y = Parameter("y")
identity = IdentityTask()
with Flow(name="test") as f:
identity.bind(x=dict(a=[x, dict(y=y)], b=(y, set([x]))))
state = f.run(parameters=dict(x=1, y=2))
assert len(f.tasks) == 10
assert state.result[identity].result == dict(a=[1, dict(y=2)], b=(2, set([1])))
def test_nested_collection_automatically_applied_to_callargs_imperative(self):
x = Parameter("x")
y = Parameter("y")
identity = IdentityTask()
f = Flow(name="test")
f.add_task(identity)
identity.bind(x=dict(a=[x, dict(y=y)], b=(y, set([x]))), flow=f)
state = f.run(parameters=dict(x=1, y=2))
assert len(f.tasks) == 10
assert state.result[identity].result == dict(a=[1, dict(y=2)], b=(2, set([1])))
def test_list_maintains_sort_order_for_more_than_10_items(self):
# https://github.com/PrefectHQ/prefect/issues/2451
l = collections.List()
with Flow(name="test") as f:
l.bind(*list(range(15)))
assert f.run().result[l].result == list(range(15))
def test_tuple_maintains_sort_order_for_more_than_10_items(self):
# https://github.com/PrefectHQ/prefect/issues/2451
t = collections.Tuple()
with Flow(name="test") as f:
t.bind(*list(range(15)))
assert f.run().result[t].result == tuple(range(15))
| 33.787986
| 87
| 0.587743
| 1,353
| 9,562
| 4.006652
| 0.099039
| 0.036156
| 0.0487
| 0.050175
| 0.767017
| 0.75927
| 0.747833
| 0.724405
| 0.70024
| 0.667958
| 0
| 0.021861
| 0.272851
| 9,562
| 282
| 88
| 33.907801
| 0.757802
| 0.037335
| 0
| 0.61086
| 0
| 0
| 0.020788
| 0
| 0
| 0
| 0
| 0
| 0.257919
| 1
| 0.149321
| false
| 0.0181
| 0.022624
| 0.004525
| 0.19457
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
98ca5c7bd9f6d4e14adea6a5004535831845ac15
| 6,763
|
py
|
Python
|
pokemon/pokemon_tests/test_serializers.py
|
pessman/pokemon_utils
|
cbe06ebe323cb38a35846274d812bdbe8d0ae8ca
|
[
"MIT"
] | 1
|
2019-03-11T04:12:50.000Z
|
2019-03-11T04:12:50.000Z
|
pokemon/pokemon_tests/test_serializers.py
|
pessman/pokemon_utils
|
cbe06ebe323cb38a35846274d812bdbe8d0ae8ca
|
[
"MIT"
] | null | null | null |
pokemon/pokemon_tests/test_serializers.py
|
pessman/pokemon_utils
|
cbe06ebe323cb38a35846274d812bdbe8d0ae8ca
|
[
"MIT"
] | 2
|
2019-03-13T03:17:29.000Z
|
2019-04-04T20:06:50.000Z
|
import pytest
from django.test import TestCase
from rest_framework import serializers as drf_serializers
from pokemon import models, serializers
@pytest.mark.django_db
class StatsSerializer(TestCase):
"""
Test Module for StatsSerializer
"""
def setUp(self):
models.Nature.objects.create(
name="Adamant",
positive="attack",
negative="special_attack"
)
self.valid_base_stats = {
"hit_points": 108,
"attack": 130,
"defense": 95,
"special_attack": 80,
"special_defense": 85,
"speed": 102
}
self.valid_ivs = {
"hit_points": 24,
"attack": 12,
"defense": 30,
"special_attack": 16,
"special_defense": 23,
"speed": 5
}
self.invalid_ivs_high = {
"hit_points": 33,
"attack": 12,
"defense": 30,
"special_attack": 16,
"special_defense": 23,
"speed": 5
}
self.invalid_ivs_low = {
"hit_points": -1,
"attack": 12,
"defense": 30,
"special_attack": 16,
"special_defense": 23,
"speed": 5
}
self.valid_evs = {
"hit_points": 74,
"attack": 190,
"defense": 91,
"special_attack": 48,
"special_defense": 84,
"speed": 23
}
self.invalid_evs_high_individual = {
"hit_points": 0,
"attack": 300,
"defense": 0,
"special_attack": 0,
"special_defense": 0,
"speed": 0
}
self.invalid_evs_high_total = {
"hit_points": 74,
"attack": 190,
"defense": 91,
"special_attack": 48,
"special_defense": 84,
"speed": 100
}
self.invalid_evs_low_individual = {
"hit_points": 0,
"attack": -10,
"defense": 0,
"special_attack": 0,
"special_defense": 0,
"speed": 0
}
self.valid_level = 78
self.invalid_level_high = 110
self.invalid_level_low = 0
self.valid_nature = "adamant"
self.invalid_nature = "thisisntanature"
def test_stats_serializer(self):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.valid_ivs,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
stats = serializer.get_stats()
self.assertEqual(stats["hit_points"], 289)
self.assertEqual(stats["attack"], 278)
self.assertEqual(stats["defense"], 193)
self.assertEqual(stats["special_attack"], 135)
self.assertEqual(stats["special_defense"], 171)
self.assertEqual(stats["speed"], 171)
def test_invalid_nature(self):
with pytest.raises(drf_serializers.ValidationError) as exc:
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.valid_ivs,
"level": self.valid_level,
"nature": self.invalid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_level_high(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.valid_ivs,
"level": self.invalid_level_high,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_level_low(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.valid_ivs,
"level": self.invalid_level_low,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_ivs_low(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.invalid_ivs_low,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_ivs_high(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.invalid_ivs_high,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_evs_high_total(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.invalid_evs_high_total,
"ivs": self.valid_ivs,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_evs_high_individual(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.invalid_evs_high_individual,
"ivs": self.valid_ivs,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_evs_low_individual(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.invalid_evs_low_individual,
"ivs": self.valid_ivs,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
| 32.990244
| 67
| 0.546799
| 660
| 6,763
| 5.325758
| 0.134848
| 0.107539
| 0.036984
| 0.051209
| 0.76074
| 0.728307
| 0.728307
| 0.714367
| 0.714367
| 0.714367
| 0
| 0.025699
| 0.349845
| 6,763
| 204
| 68
| 33.151961
| 0.773709
| 0.004584
| 0
| 0.59887
| 0
| 0
| 0.119881
| 0
| 0
| 0
| 0
| 0
| 0.033898
| 1
| 0.056497
| false
| 0
| 0.022599
| 0
| 0.084746
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
98e9db17617d3ce2f8dbdda50ebfbe93ce11f25b
| 10,064
|
py
|
Python
|
models/pointnet2_sem_seg_msg_haptic.py
|
yufeiwang63/Pointnet_Pointnet2_pytorch
|
f9078a71b973c13ae7ffa897e142dc7b1e8e88be
|
[
"MIT"
] | null | null | null |
models/pointnet2_sem_seg_msg_haptic.py
|
yufeiwang63/Pointnet_Pointnet2_pytorch
|
f9078a71b973c13ae7ffa897e142dc7b1e8e88be
|
[
"MIT"
] | null | null | null |
models/pointnet2_sem_seg_msg_haptic.py
|
yufeiwang63/Pointnet_Pointnet2_pytorch
|
f9078a71b973c13ae7ffa897e142dc7b1e8e88be
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import torch.nn.functional as F
from haptic.Pointnet_Pointnet2_pytorch.models.pointnet2_utils import PointNetSetAbstractionMsg,PointNetFeaturePropagation
class get_shared_model(nn.Module):
def __init__(self, use_batch_norm, num_classes, num_input_channel=7):
super(get_shared_model, self).__init__()
self.sa1 = PointNetSetAbstractionMsg(1024, [0.05, 0.1], [16, 32], num_input_channel, [[16, 16, 32], [32, 32, 64]], use_batch_norm=use_batch_norm)
self.sa2 = PointNetSetAbstractionMsg(256, [0.1, 0.2], [16, 32], 32+64, [[64, 64, 128], [64, 96, 128]], use_batch_norm=use_batch_norm)
self.sa3 = PointNetSetAbstractionMsg(64, [0.2, 0.4], [16, 32], 128+128, [[128, 196, 256], [128, 196, 256]], use_batch_norm=use_batch_norm)
self.sa4 = PointNetSetAbstractionMsg(16, [0.4, 0.8], [16, 32], 256+256, [[256, 256, 512], [256, 384, 512]], use_batch_norm=use_batch_norm)
self.fp4 = PointNetFeaturePropagation(512+512+256+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp3 = PointNetFeaturePropagation(128+128+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp2 = PointNetFeaturePropagation(32+64+256, [256, 128], use_batch_norm=use_batch_norm)
self.fp1 = PointNetFeaturePropagation(128, [128, 128, 128], use_batch_norm=use_batch_norm)
self.conv1 = nn.Conv1d(128, 128, 1)
if use_batch_norm:
self.bn1 = nn.BatchNorm1d(128)
self.drop1 = nn.Dropout(0.5)
self.conv2 = nn.Conv1d(128, num_classes, 1)
# for normal prediction
self.conv_normal = nn.Conv1d(128, 3, 1)
# for force prediction
self.conv_force = nn.Conv1d(128, 1, 1)
self.use_batch_norm = use_batch_norm
def forward(self, xyz):
l0_points = xyz
l0_xyz = xyz[:,:3,:]
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
l4_xyz, l4_points = self.sa4(l3_xyz, l3_points)
l3_points = self.fp4(l3_xyz, l4_xyz, l3_points, l4_points)
l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
if self.use_batch_norm:
x = self.drop1(F.relu(self.bn1(self.conv1(l0_points))))
else:
x = F.relu(self.conv1(l0_points))
contact = self.conv2(x)
normal = self.conv_normal(x)
normal = F.normalize(normal, dim=1)
force = self.conv_force(x)
# this is not needed with BCElogit loss
# x = F.log_softmax(x, dim=1)
contact = contact.permute(0, 2, 1)
normal = normal.permute(0, 2, 1)
force = force.permute(0, 2, 1)
return (contact, normal, force), l4_points
class get_model(nn.Module):
def __init__(self, use_batch_norm, num_out_channel, num_in_channel=7, target='contact',
radius_list=[[0.05, 0.1], [0.1, 0.2], [0.2, 0.4], [0.4, 0.8]],
npoint_list=[1024, 256, 64, 16],
sample_point_1_list=[16, 16, 16, 16],
sample_point_2_list=[32, 32, 32, 32],
layer=4,
downsample=True,
dropout=True,
track_running_stats=True,
mlp1_size=[16, 16, 32],
mlp2_size=[32, 32, 64],
interpolation_mlp_size=[128, 128, 128]
):
print("using layer: ", layer)
super(get_model, self).__init__()
self.layer = layer
if self.layer == 4:
self.sa1 = PointNetSetAbstractionMsg(npoint_list[0], radius_list[0], [sample_point_1_list[0], sample_point_2_list[0]], num_in_channel, [[16, 16, 32], [32, 32, 64]], use_batch_norm=use_batch_norm)
self.sa2 = PointNetSetAbstractionMsg(npoint_list[1], radius_list[1], [sample_point_1_list[1], sample_point_2_list[1]], 32+64, [[64, 64, 128], [64, 96, 128]], use_batch_norm=use_batch_norm)
self.sa3 = PointNetSetAbstractionMsg(npoint_list[2], radius_list[2], [sample_point_1_list[2], sample_point_2_list[2]], 128+128, [[128, 196, 256], [128, 196, 256]], use_batch_norm=use_batch_norm)
self.sa4 = PointNetSetAbstractionMsg(npoint_list[3], radius_list[3], [sample_point_1_list[3], sample_point_2_list[3]], 256+256, [[256, 256, 512], [256, 384, 512]], use_batch_norm=use_batch_norm)
self.fp4 = PointNetFeaturePropagation(512+512+256+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp3 = PointNetFeaturePropagation(128+128+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp2 = PointNetFeaturePropagation(32+64+256, [256, 128], use_batch_norm=use_batch_norm)
self.fp1 = PointNetFeaturePropagation(128, [128, 128, 128], use_batch_norm=use_batch_norm)
elif self.layer == 3:
self.sa1 = PointNetSetAbstractionMsg(npoint_list[0], radius_list[0], [sample_point_1_list[0], sample_point_2_list[0]], num_in_channel, [[16, 16, 32], [32, 32, 64]], use_batch_norm=use_batch_norm)
self.sa2 = PointNetSetAbstractionMsg(npoint_list[1], radius_list[1], [sample_point_1_list[1], sample_point_2_list[1]], 32+64, [[64, 64, 128], [64, 96, 128]], use_batch_norm=use_batch_norm)
self.sa3 = PointNetSetAbstractionMsg(npoint_list[2], radius_list[2], [sample_point_1_list[2], sample_point_2_list[2]], 128+128, [[128, 196, 256], [128, 196, 256]], use_batch_norm=use_batch_norm)
self.fp3 = PointNetFeaturePropagation(128+128+256+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp2 = PointNetFeaturePropagation(32+64+256, [256, 128], use_batch_norm=use_batch_norm)
self.fp1 = PointNetFeaturePropagation(128, [128, 128, 128], use_batch_norm=use_batch_norm)
elif self.layer == 2:
self.sa1 = PointNetSetAbstractionMsg(npoint_list[0], radius_list[0], [sample_point_1_list[0], sample_point_2_list[0]], num_in_channel, [[16, 16, 32], [32, 32, 64]], use_batch_norm=use_batch_norm)
self.sa2 = PointNetSetAbstractionMsg(npoint_list[1], radius_list[1], [sample_point_1_list[1], sample_point_2_list[1]], 32+64, [[64, 64, 128], [64, 96, 128]], use_batch_norm=use_batch_norm)
self.fp2 = PointNetFeaturePropagation(32+64+128+128, [256, 128], use_batch_norm=use_batch_norm)
self.fp1 = PointNetFeaturePropagation(128, [128, 128, 128], use_batch_norm=use_batch_norm)
elif self.layer == 1:
self.sa1 = PointNetSetAbstractionMsg(npoint_list[0], radius_list[0], [sample_point_1_list[0], sample_point_2_list[0]], num_in_channel, [mlp1_size, mlp2_size], use_batch_norm=use_batch_norm,
downsample=downsample, track_running_stats=track_running_stats)
self.fp1 = PointNetFeaturePropagation(mlp1_size[-1] + mlp2_size[-1], interpolation_mlp_size, use_batch_norm=use_batch_norm, track_running_stats=track_running_stats)
self.drop_out = dropout
self.conv1 = nn.Conv1d(128, 128, 1)
if use_batch_norm:
self.bn1 = nn.BatchNorm1d(128, track_running_stats=track_running_stats)
if self.drop_out:
self.drop1 = nn.Dropout(0.5)
self.conv2 = nn.Conv1d(128, num_out_channel, 1)
self.use_batch_norm = use_batch_norm
self.target = target
def forward(self, xyz):
l0_points = xyz
l0_xyz = xyz[:,:3,:]
if self.layer == 4:
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
l4_xyz, l4_points = self.sa4(l3_xyz, l3_points)
l3_points = self.fp4(l3_xyz, l4_xyz, l3_points, l4_points)
l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
elif self.layer == 3:
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
elif self.layer == 2:
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
elif self.layer == 1:
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
if self.use_batch_norm:
if self.drop_out:
x = self.drop1(F.relu(self.bn1(self.conv1(l0_points))))
else:
x = F.relu(self.bn1(self.conv1(l0_points)))
else:
x = F.relu(self.conv1(l0_points))
x = self.conv2(x)
# this is not needed with BCElogit loss
# x = F.log_softmax(x, dim=1)
if self.target == 'normal':
x = F.normalize(x, dim=1)
x = x.permute(0, 2, 1)
# return x, l4_points
return x, None
class get_loss_original(nn.Module):
def __init__(self):
super(get_loss_original, self).__init__()
def forward(self, pred, target, trans_feat, weight):
total_loss = F.nll_loss(pred, target, weight=weight)
return total_loss
class get_loss(nn.Module):
def __init__(self):
super(get_loss, self).__init__()
self.loss = nn.BCEWithLogitsLoss()
def forward(self, pred, target, trans_feat, weight):
total_loss = self.loss(pred, target)
return total_loss
if __name__ == '__main__':
import torch
model = get_model(13)
xyz = torch.rand(6, 9, 2048)
(model(xyz))
| 54.695652
| 207
| 0.64527
| 1,505
| 10,064
| 4.003987
| 0.089701
| 0.08762
| 0.13143
| 0.074676
| 0.76701
| 0.761699
| 0.756057
| 0.733488
| 0.713575
| 0.713575
| 0
| 0.116216
| 0.227941
| 10,064
| 184
| 208
| 54.695652
| 0.659331
| 0.019277
| 0
| 0.562092
| 0
| 0
| 0.003447
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052288
| false
| 0
| 0.026144
| 0
| 0.130719
| 0.006536
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
98eaf0ff524a7491427b7b19f617c3c6aaefc6a4
| 100
|
py
|
Python
|
backend/src/notifications/admin.py
|
YujithIsura/request-management
|
3c683274881ef7798779e03a24042034edcd941c
|
[
"MIT"
] | 3
|
2021-11-21T20:46:00.000Z
|
2021-12-02T14:47:18.000Z
|
notification/admin.py
|
lautarianoo/django_social_network
|
ec83af7267f830a2463cb591138dae1a088f9a4e
|
[
"BSD-3-Clause"
] | 169
|
2020-04-09T08:39:25.000Z
|
2021-09-03T01:07:01.000Z
|
notification/admin.py
|
lautarianoo/django_social_network
|
ec83af7267f830a2463cb591138dae1a088f9a4e
|
[
"BSD-3-Clause"
] | 13
|
2020-04-05T20:53:11.000Z
|
2022-02-28T14:52:17.000Z
|
from django.contrib import admin
from .models import Notification
admin.site.register(Notification)
| 25
| 33
| 0.85
| 13
| 100
| 6.538462
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09
| 100
| 4
| 33
| 25
| 0.934066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c711129f24117223c3e97558213be4cfb18083e6
| 38
|
py
|
Python
|
scripts/flow_tests/__init__.py
|
rombie/contrail-test
|
a68c71d6f282142501a7e2e889bbb232fdd82dc3
|
[
"Apache-2.0"
] | 5
|
2020-09-29T00:36:57.000Z
|
2022-02-16T06:51:32.000Z
|
serial_scripts/system_test/flow_tests/__init__.py
|
vkolli/contrail-test-perf
|
db04b8924a2c330baabe3059788b149d957a7d67
|
[
"Apache-2.0"
] | 27
|
2019-11-02T02:18:34.000Z
|
2022-02-24T18:49:08.000Z
|
serial_scripts/system_test/flow_tests/__init__.py
|
vkolli/contrail-test-perf
|
db04b8924a2c330baabe3059788b149d957a7d67
|
[
"Apache-2.0"
] | 20
|
2019-11-28T16:02:25.000Z
|
2022-01-06T05:56:58.000Z
|
"""FLOW RELATED SYSTEM TEST CASES."""
| 19
| 37
| 0.684211
| 5
| 38
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 38
| 1
| 38
| 38
| 0.787879
| 0.815789
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c748ba40f4f42a2340be17f0209db3df304f6bd7
| 196
|
py
|
Python
|
plugins/core/player_manager_plugin/__init__.py
|
StarryPy/StarryPy-Historic
|
b9dbd552b8c4631a5a8e9dda98b7ba447eca59da
|
[
"WTFPL"
] | 38
|
2015-02-12T11:57:59.000Z
|
2018-11-15T16:03:45.000Z
|
plugins/core/player_manager_plugin/__init__.py
|
StarryPy/StarryPy-Historic
|
b9dbd552b8c4631a5a8e9dda98b7ba447eca59da
|
[
"WTFPL"
] | 68
|
2015-02-05T23:29:47.000Z
|
2017-12-27T08:26:25.000Z
|
plugins/core/player_manager_plugin/__init__.py
|
StarryPy/StarryPy-Historic
|
b9dbd552b8c4631a5a8e9dda98b7ba447eca59da
|
[
"WTFPL"
] | 21
|
2015-02-06T18:58:21.000Z
|
2017-12-24T20:08:59.000Z
|
from plugins.core.player_manager_plugin.plugin import PlayerManagerPlugin
from plugins.core.player_manager_plugin.manager import (
Banned,
UserLevels,
permissions,
PlayerManager
)
| 24.5
| 73
| 0.795918
| 21
| 196
| 7.238095
| 0.571429
| 0.144737
| 0.197368
| 0.276316
| 0.447368
| 0.447368
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147959
| 196
| 7
| 74
| 28
| 0.91018
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.285714
| 0
| 0.285714
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c790959983852e5ff5dc7391f5d9c3bf229bac12
| 435
|
py
|
Python
|
hci/command/commands/le_apcf_commands/apcf_service_data.py
|
cc4728/python-hci
|
d988f69c55972af445ec3ba04fd4cd1199593d10
|
[
"MIT"
] | 3
|
2021-12-16T14:32:45.000Z
|
2022-01-25T03:10:48.000Z
|
hci/command/commands/le_apcf_commands/apcf_service_data.py
|
cc4728/python-hci
|
d988f69c55972af445ec3ba04fd4cd1199593d10
|
[
"MIT"
] | null | null | null |
hci/command/commands/le_apcf_commands/apcf_service_data.py
|
cc4728/python-hci
|
d988f69c55972af445ec3ba04fd4cd1199593d10
|
[
"MIT"
] | 1
|
2022-01-25T03:10:50.000Z
|
2022-01-25T03:10:50.000Z
|
from ..le_apcf_command_pkt import LE_APCF_Command
from struct import pack, unpack
from enum import IntEnum
"""
This pare base on spec <<Android BT HCI Requirement for BLE feature>> v0.52
Advertisement Package Content filter
"""
class APCF_Service_Data(LE_APCF_Command):
def __init__(self):
# TODO generate cmd
super().__init__()
def __str__(self):
return super().__str__()+''.join(['{}']).format("")
| 25.588235
| 75
| 0.698851
| 59
| 435
| 4.728814
| 0.728814
| 0.064516
| 0.139785
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008451
| 0.183908
| 435
| 17
| 76
| 25.588235
| 0.777465
| 0.03908
| 0
| 0
| 0
| 0
| 0.006734
| 0
| 0
| 0
| 0
| 0.058824
| 0
| 1
| 0.25
| false
| 0
| 0.375
| 0.125
| 0.875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
c799f39a2d11cd8cf47042ccb70ce866c8193b11
| 191
|
py
|
Python
|
dss/dss_capi_gr/__init__.py
|
dss-extensions/dss_python
|
f6c4440a14287d06f1bd10180484b349f764ba7e
|
[
"BSD-3-Clause"
] | 24
|
2019-03-07T20:24:24.000Z
|
2022-03-23T17:58:00.000Z
|
dss/dss_capi_gr/__init__.py
|
dss-extensions/dss_python
|
f6c4440a14287d06f1bd10180484b349f764ba7e
|
[
"BSD-3-Clause"
] | 32
|
2019-02-14T03:46:31.000Z
|
2022-03-23T00:01:28.000Z
|
dss/dss_capi_ir/__init__.py
|
PMeira/dss_python
|
2dbc72ed875108d3f98d21cb0a488bab6b0d7f4c
|
[
"BSD-3-Clause"
] | 5
|
2019-02-19T04:54:49.000Z
|
2022-03-23T10:40:51.000Z
|
'''
A compatibility layer for DSS C-API that mimics the official OpenDSS COM interface.
Copyright (c) 2016-2019 Paulo Meira
'''
from __future__ import absolute_import
from .IDSS import IDSS
| 23.875
| 83
| 0.78534
| 29
| 191
| 5
| 0.827586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.049383
| 0.151832
| 191
| 7
| 84
| 27.285714
| 0.845679
| 0.628272
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c79bb693d6ca4d67f78e8585c83eae0b233a16e3
| 76
|
py
|
Python
|
hydrocarbon_problem/env/__init__.py
|
lollcat/Aspen-RL
|
0abefb9e7def7762e829ac4d621519d9d01592c0
|
[
"MIT"
] | 1
|
2021-12-09T04:27:33.000Z
|
2021-12-09T04:27:33.000Z
|
hydrocarbon_problem/env/__init__.py
|
lollcat/Aspen-RL
|
0abefb9e7def7762e829ac4d621519d9d01592c0
|
[
"MIT"
] | 2
|
2021-12-09T08:47:12.000Z
|
2022-03-25T16:07:56.000Z
|
hydrocarbon_problem/env/__init__.py
|
lollcat/Aspen-RL
|
0abefb9e7def7762e829ac4d621519d9d01592c0
|
[
"MIT"
] | 1
|
2022-03-23T13:53:54.000Z
|
2022-03-23T13:53:54.000Z
|
from hydrocarbon_problem.env.types_ import Observation, Done, Stream, Column
| 76
| 76
| 0.855263
| 10
| 76
| 6.3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 76
| 1
| 76
| 76
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c7ae90de0db880bd9c87e6ef499b2ab425e89a1b
| 19
|
py
|
Python
|
todo/task/__init__.py
|
BenMcLean981/flask-todo
|
9827f4993c7d4af0c42ed2a891f2eb56227f1644
|
[
"MIT"
] | null | null | null |
todo/task/__init__.py
|
BenMcLean981/flask-todo
|
9827f4993c7d4af0c42ed2a891f2eb56227f1644
|
[
"MIT"
] | null | null | null |
todo/task/__init__.py
|
BenMcLean981/flask-todo
|
9827f4993c7d4af0c42ed2a891f2eb56227f1644
|
[
"MIT"
] | null | null | null |
"""Todo module."""
| 9.5
| 18
| 0.526316
| 2
| 19
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 19
| 1
| 19
| 19
| 0.588235
| 0.631579
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 1
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1be2bb16aca1a3770cbb4668f10786667f95971a
| 63
|
py
|
Python
|
src/vilbert/datasets/__init__.py
|
NoOneUST/COMP5212
|
171b564f08841e426545f58e3b52870c0e090586
|
[
"MIT"
] | 3
|
2020-04-05T06:50:46.000Z
|
2020-04-05T08:20:33.000Z
|
src/vilbert/datasets/__init__.py
|
NoOneUST/COMP5212Project
|
171b564f08841e426545f58e3b52870c0e090586
|
[
"MIT"
] | 2
|
2021-05-21T16:24:54.000Z
|
2022-02-10T01:21:54.000Z
|
src/vilbert/datasets/__init__.py
|
NoOneUST/COMP5212Project
|
171b564f08841e426545f58e3b52870c0e090586
|
[
"MIT"
] | 1
|
2020-06-15T16:22:20.000Z
|
2020-06-15T16:22:20.000Z
|
from .visual_entailment_dataset import VisualEntailmentDataset
| 31.5
| 62
| 0.920635
| 6
| 63
| 9.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063492
| 63
| 1
| 63
| 63
| 0.949153
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4001312cef0d9f28268935ec40cf1f39b54d853e
| 131
|
py
|
Python
|
onadata/libs/utils/audit.py
|
ubpd/kobocat
|
45906e07e8f05c30e3e26bab5570a8ab1ee264db
|
[
"BSD-2-Clause"
] | null | null | null |
onadata/libs/utils/audit.py
|
ubpd/kobocat
|
45906e07e8f05c30e3e26bab5570a8ab1ee264db
|
[
"BSD-2-Clause"
] | null | null | null |
onadata/libs/utils/audit.py
|
ubpd/kobocat
|
45906e07e8f05c30e3e26bab5570a8ab1ee264db
|
[
"BSD-2-Clause"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals, print_function, division, absolute_import
HOME_ACCESSED = "home-accessed"
| 32.75
| 82
| 0.824427
| 17
| 131
| 5.882353
| 0.823529
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008475
| 0.099237
| 131
| 3
| 83
| 43.666667
| 0.838983
| 0.099237
| 0
| 0
| 0
| 0
| 0.112069
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
402ba89b6c4bbf8923f29b3e69bf5634d07e5b15
| 98
|
py
|
Python
|
Python/module.py
|
minjibyeongho/KOSA-Pytorch
|
80d71a8c579d645bea4c3352c9babdf232a8630e
|
[
"MIT"
] | 2
|
2021-05-25T08:52:07.000Z
|
2021-08-13T23:49:42.000Z
|
Python/module.py
|
minjibyeongho/KOSA-Pytorch
|
80d71a8c579d645bea4c3352c9babdf232a8630e
|
[
"MIT"
] | null | null | null |
Python/module.py
|
minjibyeongho/KOSA-Pytorch
|
80d71a8c579d645bea4c3352c9babdf232a8630e
|
[
"MIT"
] | 2
|
2021-05-24T00:49:45.000Z
|
2021-06-11T01:30:12.000Z
|
#module.py
def hello():
print("Hello!")
#if __name__=="__main__":
# print(__name__)
| 14
| 26
| 0.581633
| 11
| 98
| 4.090909
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.22449
| 98
| 7
| 27
| 14
| 0.592105
| 0.530612
| 0
| 0
| 0
| 0
| 0.157895
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
407b22ddf13dab3659fb801ada3a7cb31608cf9a
| 200
|
py
|
Python
|
PDA/extra_assignments/10.6. Dicts_ Countries and cities/solution/main.py
|
EMbeDS-education/StatsAndComputing20212022
|
971e418882b206a1b5606d15d222cef1a5a04834
|
[
"MIT"
] | 2
|
2022-02-24T09:35:15.000Z
|
2022-03-14T20:34:33.000Z
|
PDA/extra_assignments/10.6. Dicts_ Countries and cities/solution/main.py
|
GeorgiosArg/StatsAndComputing20212022
|
798d39af6aa5ef5eef49d5d6f43191351e8a49f3
|
[
"MIT"
] | null | null | null |
PDA/extra_assignments/10.6. Dicts_ Countries and cities/solution/main.py
|
GeorgiosArg/StatsAndComputing20212022
|
798d39af6aa5ef5eef49d5d6f43191351e8a49f3
|
[
"MIT"
] | 2
|
2022-03-15T21:40:35.000Z
|
2022-03-26T14:51:31.000Z
|
city_country = {}
for _ in range(int(input())):
country, *cities = input().split()
for city in cities:
city_country[city] = country
for _ in range(int(input())):
print(city_country[input()])
| 28.571429
| 36
| 0.665
| 28
| 200
| 4.571429
| 0.357143
| 0.34375
| 0.21875
| 0.25
| 0.453125
| 0.453125
| 0.453125
| 0
| 0
| 0
| 0
| 0
| 0.16
| 200
| 7
| 37
| 28.571429
| 0.761905
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4086e6c92cd0f6bf0670ff63d76bbec71943f194
| 162
|
py
|
Python
|
20-Blog_Clone_Project/blog_project_Practice/blog/admin.py
|
andy2167565/Django-Bootcamp-Practice
|
f08d2866382db96060450d4dbd1ffaca7243f623
|
[
"MIT"
] | null | null | null |
20-Blog_Clone_Project/blog_project_Practice/blog/admin.py
|
andy2167565/Django-Bootcamp-Practice
|
f08d2866382db96060450d4dbd1ffaca7243f623
|
[
"MIT"
] | null | null | null |
20-Blog_Clone_Project/blog_project_Practice/blog/admin.py
|
andy2167565/Django-Bootcamp-Practice
|
f08d2866382db96060450d4dbd1ffaca7243f623
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from blog.models import Post, Comment
# Register your models here.
admin.site.register(Post)
admin.site.register(Comment)
| 23.142857
| 38
| 0.777778
| 23
| 162
| 5.478261
| 0.565217
| 0.142857
| 0.269841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141975
| 162
| 6
| 39
| 27
| 0.906475
| 0.160494
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
40cc65a33578c41b6882d9360507c431c3bb4a45
| 74
|
py
|
Python
|
flasky/auth/forms/__init__.py
|
by46/fasky
|
c6941972b57284c2167dfacf022f981939249256
|
[
"MIT"
] | null | null | null |
flasky/auth/forms/__init__.py
|
by46/fasky
|
c6941972b57284c2167dfacf022f981939249256
|
[
"MIT"
] | null | null | null |
flasky/auth/forms/__init__.py
|
by46/fasky
|
c6941972b57284c2167dfacf022f981939249256
|
[
"MIT"
] | null | null | null |
from .login import LoginForm
from .registration import RegistrationForm
| 24.666667
| 43
| 0.837838
| 8
| 74
| 7.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 74
| 2
| 44
| 37
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
40ef2f9956caa7a12ca34a8e2817ab06584f9a11
| 3,110
|
py
|
Python
|
wisdem/test/test_optimization_drivers/test_dakota_driver.py
|
johnjasa/WISDEM
|
a4571e71cb5b9869c81790f8abb1bb7fba8fdb02
|
[
"Apache-2.0"
] | 81
|
2015-01-19T18:17:31.000Z
|
2022-03-17T07:14:43.000Z
|
wisdem/test/test_optimization_drivers/test_dakota_driver.py
|
johnjasa/WISDEM
|
a4571e71cb5b9869c81790f8abb1bb7fba8fdb02
|
[
"Apache-2.0"
] | 159
|
2015-02-05T01:54:52.000Z
|
2022-03-30T22:44:39.000Z
|
wisdem/test/test_optimization_drivers/test_dakota_driver.py
|
johnjasa/WISDEM
|
a4571e71cb5b9869c81790f8abb1bb7fba8fdb02
|
[
"Apache-2.0"
] | 70
|
2015-01-02T15:22:39.000Z
|
2022-02-11T00:33:07.000Z
|
import unittest
import numpy as np
from openmdao.utils.assert_utils import assert_near_equal
from wisdem.optimization_drivers.dakota_driver import DakotaOptimizer
try:
import dakota
except ImportError:
dakota = None
@unittest.skipIf(dakota is None, "only run if Dakota is installed.")
class TestDakotaOptimization(unittest.TestCase):
def test_2D_opt_max_iterations(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_high_model as model"
output_scalers = [1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), -9.5)
def test_2D_opt_EGO(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_high_model as model"
output_scalers = [1.0]
options = {"initial_samples": 5, "method": "efficient_global", "seed": 123456}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), -9.999996864)
def test_two_variables(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]]), "z": [1.0, 2.0]}
desvars = {"x": np.array([0.0, 0.25]), "z": 1.5}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_two_variable as model"
output_scalers = [1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), 1.0)
def test_constraint(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y", "con"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_low_model as model"
output_scalers = [1.0, 1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), 0.5)
assert_near_equal(np.min(np.array(results["con"])), 0.0)
if __name__ == "__main__":
unittest.main()
| 40.921053
| 112
| 0.630868
| 410
| 3,110
| 4.573171
| 0.217073
| 0.0224
| 0.0144
| 0.0384
| 0.752533
| 0.752533
| 0.752533
| 0.738133
| 0.72
| 0.7088
| 0
| 0.041874
| 0.224437
| 3,110
| 75
| 113
| 41.466667
| 0.735489
| 0
| 0
| 0.508772
| 0
| 0
| 0.206919
| 0.081713
| 0
| 0
| 0
| 0
| 0.105263
| 1
| 0.070175
| false
| 0
| 0.175439
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
40f93ae054bebaa285f8c2f48242d86d8297b31f
| 8,460
|
py
|
Python
|
python/ht/nodes/styles/styles.py
|
Hengle/Houdini-Toolbox
|
a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3
|
[
"MIT"
] | 136
|
2015-01-03T04:03:23.000Z
|
2022-02-07T11:08:57.000Z
|
python/ht/nodes/styles/styles.py
|
Hengle/Houdini-Toolbox
|
a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3
|
[
"MIT"
] | 11
|
2017-02-09T20:05:04.000Z
|
2021-01-24T22:25:59.000Z
|
python/ht/nodes/styles/styles.py
|
Hengle/Houdini-Toolbox
|
a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3
|
[
"MIT"
] | 26
|
2015-08-18T12:11:02.000Z
|
2020-12-19T01:53:31.000Z
|
"""Classes representing color entries and mappings."""
# =============================================================================
# IMPORTS
# =============================================================================
from __future__ import annotations
# Standard Library
import re
from typing import TYPE_CHECKING, Optional, Tuple
if TYPE_CHECKING:
import hou
# =============================================================================
# CLASSES
# =============================================================================
class StyleConstant:
"""This class represents a named constant style.
:param name: The constant's name.
:param color: The constant's color.
:param color_type: The color type.
:param shape: The constant's shape.
:param file_path: The path to the definition file.
:return:
"""
def __init__(
self,
name: str,
color: hou.Color,
color_type: str,
shape: Optional[str] = None,
file_path: Optional[str] = None,
):
self._color = color
self._color_type = color_type
self._shape = shape
self._file_path = file_path
self._name = name
# -------------------------------------------------------------------------
# SPECIAL METHODS
# -------------------------------------------------------------------------
def __eq__(self, other):
if not isinstance(other, StyleConstant):
return NotImplemented
# For our purposes we only care if the names match.
return self.name == other.name
def __hash__(self):
return hash(self.name)
def __ne__(self, other):
if not isinstance(other, StyleConstant):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return "<StyleConstant {} ({})>".format(self.name, self.color)
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def color(self) -> hou.Color:
"""The mapped color."""
return self._color
# -------------------------------------------------------------------------
@property
def color_type(self) -> str:
"""The mapped color type."""
return self._color_type
# -------------------------------------------------------------------------
@property
def file_path(self) -> Optional[str]:
"""Path the definition was from."""
return self._file_path
# -------------------------------------------------------------------------
@property
def name(self) -> str:
"""The name the color is mapped to."""
return self._name
# -------------------------------------------------------------------------
@property
def shape(self) -> Optional[str]:
"""The mapped shape."""
return self._shape
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
def apply_to_node(self, node: hou.Node):
"""Apply styling to a node.
:param node: Node to apply to
:return:
"""
if self.color is not None:
node.setColor(self.color)
if self.shape is not None:
node.setUserData("nodeshape", self.shape)
class StyleRule:
"""This class represents a color application bound to a name.
:param name: The rule's name.
:param color: The rule's color.
:param color_type: The rule's color type.
:param shape: The rule's shape.
:param file_path: The path to the definition file.
:return:
"""
def __init__(
self,
name: str,
color: hou.Color,
color_type: str,
shape: Optional[str] = None,
file_path: Optional[str] = None,
):
self._color = color
self._color_type = color_type
self._shape = shape
self._file_path = file_path
self._name = name
# -------------------------------------------------------------------------
# SPECIAL METHODS
# -------------------------------------------------------------------------
def __eq__(self, other):
if not isinstance(other, StyleRule):
return NotImplemented
# For our purposes we only care if the names match.
return self.name == other.name
def __hash__(self):
return hash(self.name)
def __ne__(self, other):
if not isinstance(other, StyleRule):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return "<StyleRule {} ({})>".format(self.name, self.color)
def __str__(self):
value = self._get_typed_color_value()
components = [re.sub("\\.*0+$", "", "{:0.3f}".format(val)) for val in value]
return "(" + ", ".join(components) + ")"
# -------------------------------------------------------------------------
# NON-PUBLIC METHODS
# -------------------------------------------------------------------------
def _get_typed_color_value(self) -> Tuple[float]:
"""Get the appropriately typed color values.
:return: The color value in the correct type.
"""
to_func = getattr(self.color, self.color_type.lower())
return to_func()
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def color(self) -> hou.Color:
"""The mapped color."""
return self._color
@property
def color_type(self) -> str:
"""The mapped color type."""
return self._color_type
@property
def shape(self) -> Optional[str]:
"""The mapped shape name."""
return self._shape
@property
def file_path(self) -> Optional[str]:
"""Path the definition was from."""
return self._file_path
@property
def name(self) -> str:
"""The name the style is mapped to."""
return self._name
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
def apply_to_node(self, node: hou.Node):
"""Apply styling to a node.
:param node: Node to apply to
:return:
"""
if self.color is not None:
node.setColor(self.color)
if self.shape is not None:
node.setUserData("nodeshape", self.shape)
class ConstantRule:
"""This class represents a style application bound to a named constant.
:param name: The rule's name.
:param constant_name: The constant name.
:param file_path: The path to the definition file.
:return:
"""
def __init__(self, name: str, constant_name: str, file_path: Optional[str] = None):
self._constant_name = constant_name
self._file_path = file_path
self._name = name
# -------------------------------------------------------------------------
# SPECIAL METHODS
# -------------------------------------------------------------------------
def __eq__(self, other):
if not isinstance(other, ConstantRule):
return NotImplemented
# For our purposes we only care if the names match.
return self.name == other.name
def __hash__(self):
return hash((self.constant_name, self.name))
def __ne__(self, other):
if not isinstance(other, ConstantRule):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return "<ConstantRule {} ({})>".format(self.name, self.constant_name)
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def constant_name(self) -> str:
"""The mapped constant."""
return self._constant_name
@property
def file_path(self) -> Optional[str]:
"""Path the definition was from."""
return self._file_path
@property
def name(self) -> str:
"""The name the style is mapped to."""
return self._name
| 28.389262
| 87
| 0.450473
| 776
| 8,460
| 4.703608
| 0.126289
| 0.039452
| 0.019726
| 0.023014
| 0.744658
| 0.712877
| 0.686301
| 0.672055
| 0.672055
| 0.631781
| 0
| 0.000467
| 0.241253
| 8,460
| 297
| 88
| 28.484848
| 0.568157
| 0.419031
| 0
| 0.823077
| 0
| 0
| 0.021631
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.246154
| false
| 0
| 0.030769
| 0.046154
| 0.553846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
9085eea801b451acd44298bd5d756b5655efe26d
| 138
|
py
|
Python
|
edit/core/optimizer/__init__.py
|
tpoisonooo/basicVSR_mge
|
53df836a7dcc075083ef7c9ff7cabea69fec3192
|
[
"Apache-2.0"
] | 28
|
2021-03-23T09:00:33.000Z
|
2022-03-10T03:55:00.000Z
|
edit/core/optimizer/__init__.py
|
tpoisonooo/basicVSR_mge
|
53df836a7dcc075083ef7c9ff7cabea69fec3192
|
[
"Apache-2.0"
] | 2
|
2021-04-17T20:08:55.000Z
|
2022-02-01T17:48:55.000Z
|
edit/core/optimizer/__init__.py
|
tpoisonooo/basicVSR_mge
|
53df836a7dcc075083ef7c9ff7cabea69fec3192
|
[
"Apache-2.0"
] | 5
|
2021-05-19T07:35:56.000Z
|
2022-01-13T02:11:50.000Z
|
from .builder import build_optimizers, MGE_OPTIMIZERS, build_gradmanagers
from .default_constructor import DefaultOptimizerConstructor
| 23
| 73
| 0.876812
| 14
| 138
| 8.357143
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094203
| 138
| 5
| 74
| 27.6
| 0.936
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9093d4d8bd3bc3c9e386b961c6079deedbc45036
| 204
|
py
|
Python
|
python_code/cutils/viz/__init__.py
|
IBM/oct-glaucoma-vf-estimate
|
ea79352547f33fe05ee532ab9faad6a5e4811a76
|
[
"Apache-2.0"
] | null | null | null |
python_code/cutils/viz/__init__.py
|
IBM/oct-glaucoma-vf-estimate
|
ea79352547f33fe05ee532ab9faad6a5e4811a76
|
[
"Apache-2.0"
] | null | null | null |
python_code/cutils/viz/__init__.py
|
IBM/oct-glaucoma-vf-estimate
|
ea79352547f33fe05ee532ab9faad6a5e4811a76
|
[
"Apache-2.0"
] | null | null | null |
from .vizutils import viz_overlaymask, display_side2side, display_side2sidev1, stack_patches, figure2image, get_heatmap, visualize_probmaps
from .vizutils import get_heatmap_multiple, figure2image_save
| 68
| 140
| 0.872549
| 24
| 204
| 7.041667
| 0.708333
| 0.142012
| 0.213018
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026738
| 0.083333
| 204
| 3
| 141
| 68
| 0.877005
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
90cd458888a31c41557f6a303abf3a9a1b516bae
| 40
|
py
|
Python
|
quicken/_internal/__init__.py
|
chrahunt/quicken
|
2dd00a5f024d7b114b211aad8a2618ec8f101956
|
[
"MIT"
] | 3
|
2019-11-12T17:56:08.000Z
|
2022-03-12T03:43:10.000Z
|
quicken/_internal/__init__.py
|
chrahunt/quicken
|
2dd00a5f024d7b114b211aad8a2618ec8f101956
|
[
"MIT"
] | 47
|
2018-12-10T04:08:58.000Z
|
2022-03-20T14:54:36.000Z
|
quicken/_internal/__init__.py
|
chrahunt/quicken
|
2dd00a5f024d7b114b211aad8a2618ec8f101956
|
[
"MIT"
] | 1
|
2019-11-12T17:55:17.000Z
|
2019-11-12T17:55:17.000Z
|
class QuickenError(Exception):
pass
| 13.333333
| 30
| 0.75
| 4
| 40
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175
| 40
| 2
| 31
| 20
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
90ee00867dbf308646030430e4e8f7dca424dfc1
| 44
|
py
|
Python
|
CustomExceptions.py
|
DouglasHSS/NeuralNetworks
|
739df65866e48a792c151974df528d4afb31d19d
|
[
"MIT"
] | null | null | null |
CustomExceptions.py
|
DouglasHSS/NeuralNetworks
|
739df65866e48a792c151974df528d4afb31d19d
|
[
"MIT"
] | null | null | null |
CustomExceptions.py
|
DouglasHSS/NeuralNetworks
|
739df65866e48a792c151974df528d4afb31d19d
|
[
"MIT"
] | null | null | null |
class PerceptronError(Exception):
pass
| 11
| 33
| 0.75
| 4
| 44
| 8.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 44
| 3
| 34
| 14.666667
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
90f9cab42c98867e4c26010b699fc6f4bbfe103f
| 167
|
py
|
Python
|
deallocate/params.py
|
jefferycwc/tacker-example-plugin
|
641d2acebca3b95c7d2d635769b6f0f2d84051b2
|
[
"Apache-2.0"
] | null | null | null |
deallocate/params.py
|
jefferycwc/tacker-example-plugin
|
641d2acebca3b95c7d2d635769b6f0f2d84051b2
|
[
"Apache-2.0"
] | null | null | null |
deallocate/params.py
|
jefferycwc/tacker-example-plugin
|
641d2acebca3b95c7d2d635769b6f0f2d84051b2
|
[
"Apache-2.0"
] | 1
|
2022-01-19T01:35:43.000Z
|
2022-01-19T01:35:43.000Z
|
OS_MA_NFVO_IP = '192.168.1.197'
OS_USER_DOMAIN_NAME = 'Default'
OS_USERNAME = 'admin'
OS_PASSWORD = '0000'
OS_PROJECT_DOMAIN_NAME = 'Default'
OS_PROJECT_NAME = 'admin'
| 27.833333
| 34
| 0.772455
| 28
| 167
| 4.142857
| 0.607143
| 0.172414
| 0.293103
| 0.327586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093333
| 0.101796
| 167
| 6
| 35
| 27.833333
| 0.68
| 0
| 0
| 0
| 0
| 0
| 0.244048
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.166667
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
90faa5d3c27957f6280791f2da201e228021ab56
| 10,255
|
py
|
Python
|
RFEM/Loads/solidSetLoad.py
|
DavidNaizheZhou/RFEM_Python_Client
|
a5f7790b67de3423907ce10c0aa513c0a1aca47b
|
[
"MIT"
] | 16
|
2021-10-13T21:00:11.000Z
|
2022-03-21T11:12:09.000Z
|
RFEM/Loads/solidSetLoad.py
|
DavidNaizheZhou/RFEM_Python_Client
|
a5f7790b67de3423907ce10c0aa513c0a1aca47b
|
[
"MIT"
] | 49
|
2021-10-19T13:18:51.000Z
|
2022-03-30T08:20:17.000Z
|
RFEM/Loads/solidSetLoad.py
|
DavidNaizheZhou/RFEM_Python_Client
|
a5f7790b67de3423907ce10c0aa513c0a1aca47b
|
[
"MIT"
] | 7
|
2021-10-13T06:06:24.000Z
|
2022-03-29T17:48:39.000Z
|
from RFEM.initModel import Model, clearAtributes, ConvertToDlString
from RFEM.enums import SolidSetLoadType, SolidSetLoadDistribution, SolidSetLoadDirection
class SolidSetLoad():
def __init__(self,
no: int =1,
load_case_no: int = 1,
solid_sets_no: str= '1',
load_type = SolidSetLoadType.LOAD_TYPE_FORCE,
load_distribution = SolidSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM,
load_direction = SolidSetLoadDirection.LOAD_DIRECTION_GLOBAL_Z_OR_USER_DEFINED_W_TRUE,
magnitude: float = 0,
comment: str = '',
params: dict = {}):
# Client model | Solid Load
clientObject = Model.clientModel.factory.create('ns0:solid_set_load')
# Clears object attributes | Sets all attributes to None
clearAtributes(clientObject)
# Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Assigned Solid No.
clientObject.solid_sets = ConvertToDlString(solid_sets_no)
# Load Type
clientObject.load_type = load_type.name
# Load Distribution
clientObject.load_distribution = load_distribution.name
# Load Direction
clientObject.load_direction = load_direction.name
# Load Magnitude
clientObject.uniform_magnitude = magnitude
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Solid Load to client model
Model.clientModel.service.set_solid_set_load(load_case_no, clientObject)
def Force(self,
no: int =1,
load_case_no: int = 1,
solid_sets_no: str= '1',
load_direction = SolidSetLoadDirection.LOAD_DIRECTION_GLOBAL_Z_OR_USER_DEFINED_W_TRUE,
magnitude: float = 0,
comment: str = '',
params: dict = {}):
# Client model | Solid Load
clientObject = Model.clientModel.factory.create('ns0:solid_set_load')
# Clears object attributes | Sets all attributes to None
clearAtributes(clientObject)
# Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Assigned Solid No.
clientObject.solid_sets = ConvertToDlString(solid_sets_no)
# Load Type
clientObject.load_type = SolidSetLoadType.LOAD_TYPE_FORCE.name
# Load Distribution
clientObject.load_distribution = SolidSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM.name
# Load Direction
clientObject.load_direction = load_direction.name
# Load Magnitude
clientObject.uniform_magnitude = magnitude
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Solid Load to client model
Model.clientModel.service.set_solid_set_load(load_case_no, clientObject)
def Temperature(self,
no: int = 1,
load_case_no: int = 1,
solid_sets_no: str= '1',
load_distribution = SolidSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM,
load_parameter = None,
comment: str = '',
params: dict = {}):
'''
load_parameter:
LOAD_DISTRIBUTION_UNIFORM: load_parameter = magnitude
LOAD_DISTRIBUTION_LINEAR_IN_X: load_parameter = [magnitude_1, magnitude_2, node_1, node_2]
LOAD_DISTRIBUTION_LINEAR_IN_Y: load_parameter = [magnitude_1, magnitude_2, node_1, node_2]
LOAD_DISTRIBUTION_LINEAR_IN_Z: load_parameter = [magnitude_1, magnitude_2, node_1, node_2]
params:
{''}
'''
# Client model | Solid Load
clientObject = Model.clientModel.factory.create('ns0:solid_set_load')
# Clears object attributes | Sets all attributes to None
clearAtributes(clientObject)
# Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Assigned Solid No.
clientObject.solid_sets = ConvertToDlString(solid_sets_no)
# Load Type
clientObject.load_type = SolidSetLoadType.LOAD_TYPE_TEMPERATURE.name
# Load Distribution
if load_distribution.name == "LOAD_DISTRIBUTION_UNIFORM":
clientObject.uniform_magnitude = load_parameter
else:
clientObject.magnitude_1 = load_parameter[0]
clientObject.magnitude_2 = load_parameter[1]
clientObject.node_1 = load_parameter[2]
clientObject.node_2 = load_parameter[3]
clientObject.load_distribution = load_distribution.name
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Solid Load to client model
Model.clientModel.service.set_solid_set_load(load_case_no, clientObject)
def Strain(self,
no: int = 1,
load_case_no: int = 1,
solid_sets_no: str= '1',
load_distribution = SolidSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM,
load_parameter = None,
comment: str = '',
params: dict = {}):
'''
load_parameter:
LOAD_DISTRIBUTION_UNIFORM: load_parameter = [strain_uniform_magnitude_x, strain_uniform_magnitude_y, strain_uniform_magnitude_z]
LOAD_DISTRIBUTION_LINEAR_IN_X: load_parameter = [strain_magnitude_x1, strain_magnitude_y1, strain_magnitude_z1, strain_magnitude_x2, strain_magnitude_y2, strain_magnitude_z2, node_1, node_2]
LOAD_DISTRIBUTION_LINEAR_IN_Y: load_parameter = [strain_magnitude_x1, strain_magnitude_y1, strain_magnitude_z1, strain_magnitude_x2, strain_magnitude_y2, strain_magnitude_z2, node_1, node_2]
LOAD_DISTRIBUTION_LINEAR_IN_Z: load_parameter = [strain_magnitude_x1, strain_magnitude_y1, strain_magnitude_z1, strain_magnitude_x2, strain_magnitude_y2, strain_magnitude_z2, node_1, node_2]
params:
{''}
'''
# Client model | Solid Load
clientObject = Model.clientModel.factory.create('ns0:solid_set_load')
# Clears object attributes | Sets all attributes to None
clearAtributes(clientObject)
# Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Assigned Solid No.
clientObject.solid_sets = ConvertToDlString(solid_sets_no)
# Load Type
clientObject.load_type = SolidSetLoadType.LOAD_TYPE_STRAIN.name
# Load Distribution
if load_distribution.name == "LOAD_DISTRIBUTION_UNIFORM":
clientObject.strain_uniform_magnitude_x = load_parameter[0]
clientObject.strain_uniform_magnitude_y = load_parameter[1]
clientObject.strain_uniform_magnitude_z = load_parameter[2]
else:
clientObject.strain_magnitude_x1 = load_parameter[0]
clientObject.strain_magnitude_y1 = load_parameter[1]
clientObject.strain_magnitude_z1 = load_parameter[2]
clientObject.strain_magnitude_x2 = load_parameter[3]
clientObject.strain_magnitude_y2 = load_parameter[4]
clientObject.strain_magnitude_z2 = load_parameter[5]
clientObject.node_1 = load_parameter[6]
clientObject.node_2 = load_parameter[7]
clientObject.load_distribution = load_distribution.name
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Solid Load to client model
Model.clientModel.service.set_solid_set_load(load_case_no, clientObject)
def Motion(self,
no: int = 1,
load_case_no: int = 1,
solid_sets_no: str= '1',
load_parameter = None,
comment: str = '',
params: dict = {}):
'''
load_parameter:
load_parameter = [angular_velocity, angular_acceleration, axis_definition_p1_x, axis_definition_p1_y, axis_definition_p1_z, axis_definition_p2_x, axis_definition_p2_y, axis_definition_p2_z]
params:
{''}
'''
# Client model | Solid Load
clientObject = Model.clientModel.factory.create('ns0:solid_set_load')
# Clears object attributes | Sets all attributes to None
clearAtributes(clientObject)
# Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Assigned Solid No.
clientObject.solid_sets = ConvertToDlString(solid_sets_no)
# Load Type
clientObject.load_type = SolidSetLoadType.LOAD_TYPE_ROTARY_MOTION.name
# Velocity
clientObject.angular_velocity = load_parameter[0]
# Acceleration
clientObject.angular_acceleration = load_parameter[1]
# Axis Definition
clientObject.axis_definition_p1_x = load_parameter[2]
clientObject.axis_definition_p1_y = load_parameter[3]
clientObject.axis_definition_p1_z = load_parameter[4]
clientObject.axis_definition_p2_x = load_parameter[5]
clientObject.axis_definition_p2_y = load_parameter[6]
clientObject.axis_definition_p2_z = load_parameter[7]
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Solid Load to client model
Model.clientModel.service.set_solid_set_load(load_case_no, clientObject)
#def Buoyancy():
# print('The function Buoyancy() is not implemented yet.')
#def Gas():
# print('The function Gas() is not implemented yet.')
| 36.756272
| 202
| 0.648757
| 1,096
| 10,255
| 5.748175
| 0.096715
| 0.080476
| 0.031746
| 0.034921
| 0.799683
| 0.753333
| 0.722063
| 0.704127
| 0.704127
| 0.704127
| 0
| 0.014305
| 0.284252
| 10,255
| 278
| 203
| 36.888489
| 0.844005
| 0.266602
| 0
| 0.7
| 0
| 0
| 0.019939
| 0.006876
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.015385
| 0
| 0.061538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
291c77c6ee2c7b622d64d133d7665a508bb40300
| 106
|
py
|
Python
|
main/models/__init__.py
|
prajnamort/LambdaOJ2
|
5afc7ceb6022caa244f66032a19ebac14c4448da
|
[
"MIT"
] | 2
|
2017-09-26T07:25:11.000Z
|
2021-11-24T04:19:40.000Z
|
main/models/__init__.py
|
prajnamort/LambdaOJ2
|
5afc7ceb6022caa244f66032a19ebac14c4448da
|
[
"MIT"
] | 50
|
2017-03-31T19:54:21.000Z
|
2022-03-11T23:14:22.000Z
|
main/models/__init__.py
|
prajnamort/LambdaOJ2
|
5afc7ceb6022caa244f66032a19ebac14c4448da
|
[
"MIT"
] | 7
|
2017-03-26T07:07:17.000Z
|
2019-12-05T01:05:41.000Z
|
from .user import User, MultiUserUpload
from .problem import Problem, TestData
from .submit import Submit
| 26.5
| 39
| 0.820755
| 14
| 106
| 6.214286
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132075
| 106
| 3
| 40
| 35.333333
| 0.945652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
293ac2ae42d575f893f18bae2751d93e4e138ae8
| 75
|
py
|
Python
|
PP4E-Examples-1.4/Examples/PP4E/System/Environment/echoenv.py
|
AngelLiang/PP4E
|
3a7f63b366e1e4700b4d2524884696999a87ba9d
|
[
"MIT"
] | null | null | null |
PP4E-Examples-1.4/Examples/PP4E/System/Environment/echoenv.py
|
AngelLiang/PP4E
|
3a7f63b366e1e4700b4d2524884696999a87ba9d
|
[
"MIT"
] | null | null | null |
PP4E-Examples-1.4/Examples/PP4E/System/Environment/echoenv.py
|
AngelLiang/PP4E
|
3a7f63b366e1e4700b4d2524884696999a87ba9d
|
[
"MIT"
] | null | null | null |
import os
print('echoenv...', end=' ')
print('Hello,', os.environ['USER'])
| 18.75
| 35
| 0.613333
| 10
| 75
| 4.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093333
| 75
| 3
| 36
| 25
| 0.676471
| 0
| 0
| 0
| 0
| 0
| 0.28
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
2940e9042fa0fc027376618fe6d76d1057e9e9bd
| 37,124
|
py
|
Python
|
pyPLANES/pw/pw_classes.py
|
matael/pyPLANES
|
7f591090446303884c9a3d049e42233efae0b7f4
|
[
"MIT"
] | null | null | null |
pyPLANES/pw/pw_classes.py
|
matael/pyPLANES
|
7f591090446303884c9a3d049e42233efae0b7f4
|
[
"MIT"
] | null | null | null |
pyPLANES/pw/pw_classes.py
|
matael/pyPLANES
|
7f591090446303884c9a3d049e42233efae0b7f4
|
[
"MIT"
] | 1
|
2020-12-15T16:24:08.000Z
|
2020-12-15T16:24:08.000Z
|
#! /usr/bin/env python
# -*- coding:utf8 -*-
#
# pw_classes.py
#
# This file is part of pyplanes, a software distributed under the MIT license.
# For any question, please contact one of the authors cited below.
#
# Copyright (c) 2020
# Olivier Dazel <[email protected]>
# Mathieu Gaborit <[email protected]>
# Peter Göransson <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
import numpy as np
import numpy.linalg as LA
import matplotlib.pyplot as plt
from mediapack import from_yaml
from mediapack import Air, PEM, EqFluidJCA
from pyPLANES.utils.io import initialisation_out_files_plain
from pyPLANES.core.calculus import PwCalculus
from pyPLANES.core.multilayer import MultiLayer
from pyPLANES.pw.pw_layers import FluidLayer
from pyPLANES.pw.pw_interfaces import FluidFluidInterface, RigidBacking
Air = Air()
# def initialise_PW_solver(L, b):
# nb_PW = 0
# dofs = []
# for _layer in L:
# if _layer.medium.MODEL == "fluid":
# dofs.append(nb_PW+np.arange(2))
# nb_PW += 2
# elif _layer.medium.MODEL == "pem":
# dofs.append(nb_PW+np.arange(6))
# nb_PW += 6
# elif _layer.medium.MODEL == "elastic":
# dofs.append(nb_PW+np.arange(4))
# nb_PW += 4
# interface = []
# for i_l, _layer in enumerate(L[:-1]):
# interface.append((L[i_l].medium.MODEL, L[i_l+1].medium.MODEL))
# return nb_PW, interface, dofs
class PwProblem(PwCalculus, MultiLayer):
"""
Plane Wave Problem
"""
def __init__(self, **kwargs):
PwCalculus.__init__(self, **kwargs)
termination = kwargs.get("termination","rigid")
self.method = kwargs.get("termination","global")
MultiLayer.__init__(self, **kwargs)
self.kx, self.ky, self.k = None, None, None
self.shift_plot = kwargs.get("shift_pw", 0.)
self.plot = kwargs.get("plot_results", [False]*6)
self.result = {}
self.outfiles_directory = False
if self.method == "global":
self.layers.insert(0,FluidLayer(Air,1.e-2))
if self.layers[1].medium.MEDIUM_TYPE == "fluid":
self.interfaces.append(FluidFluidInterface(self.layers[0],self.layers[1]))
self.nb_PW = 0
for _layer in self.layers:
if _layer.medium.MODEL == "fluid":
_layer.dofs = self.nb_PW+np.arange(2)
self.nb_PW += 2
elif _layer.medium.MODEL == "pem":
_layer.dofs = self.nb_PW+np.arange(6)
self.nb_PW += 6
elif _layer.medium.MODEL == "elastic":
_layer.dofs = self.nb_PW+np.arange(4)
self.nb_PW += 4
def update_frequency(self, f):
PwCalculus.update_frequency(self, f)
MultiLayer.update_frequency(self, f, self.k, self.kx)
def create_linear_system(self, f):
self.A = np.zeros((self.nb_PW-1, self.nb_PW), dtype=complex)
i_eq = 0
# Loop on the interfaces
for _int in self.interfaces:
if self.method == "global":
i_eq = _int.update_M_global(self.A, i_eq)
# for i_inter, _inter in enumerate(self.interfaces):
# if _inter[0] == "fluid":
# if _inter[1] == "fluid":
# i_eq = self.interface_fluid_fluid(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "pem":
# i_eq = self.interface_fluid_pem(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "elastic":
# i_eq = self.interface_fluid_elastic(i_eq, i_inter, Layers, dofs, M)
# elif _inter[0] == "pem":
# if _inter[1] == "fluid":
# i_eq = self.interface_pem_fluid(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "pem":
# i_eq = self.interface_pem_pem(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "elastic":
# i_eq = self.interface_pem_elastic(i_eq, i_inter, Layers, dofs, M)
# elif _inter[0] == "elastic":
# if _inter[1] == "fluid":
# i_eq = self.interface_elastic_fluid(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "pem":
# i_eq = self.interface_elastic_pem(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "elastic":
# i_eq = self.interface_elastic_elastic(i_eq, i_inter, Layers, dofs, M)
# if self.backing == backing.rigid:
# if Layers[-1].medium.MODEL == "fluid":
# i_eq = self.interface_fluid_rigid(M, i_eq, Layers[-1], dofs[-1] )
# elif Layers[-1].medium.MODEL == "pem":
# i_eq = self.interface_pem_rigid(M, i_eq, Layers[-1], dofs[-1])
# elif Layers[-1].medium.MODEL == "elastic":
# i_eq = self.interface_elastic_rigid(M, i_eq, Layers[-1], dofs[-1])
# elif self.backing == "transmission":
# i_eq = self.semi_infinite_medium(M, i_eq, Layers[-1], dofs[-1] )
self.F = -self.A[:, 0]*np.exp(1j*self.ky*self.layers[0].d) # - is for transposition, exponential term is for the phase shift
self.A = np.delete(self.A, 0, axis=1)
# print(self.A)
X = LA.solve(self.A, self.F)
# print(X)
# R_pyPLANES_PW = X[0]
# if self.backing == "transmission":
# T_pyPLANES_PW = X[-2]
# else:
# T_pyPLANES_PW = 0.
# X = np.delete(X, 0)
# del(dofs[0])
# for i, _ld in enumerate(dofs):
# dofs[i] -= 2
# if self.plot:
# self.plot_sol_PW(X, dofs)
# out["R"] = R_pyPLANES_PW
# out["T"] = T_pyPLANES_PW
# return out
# class Solver_PW(PwCalculus):
# def __init__(self, **kwargs):
# PwCalculus.__init__(self, **kwargs)
# ml = kwargs.get("ml")
# termination = kwargs.get("termination")
# self.layers = []
# for _l in ml:
# if _l[0] == "Air":
# mat = Air
# else:
# mat = from_yaml(_l[0]+".yaml")
# d = _l[1]
# self.layers.append(Layer(mat,d))
# if termination in ["trans", "transmission","Transmission"]:
# self.backing = "Transmission"
# else:
# self.backing = backing.rigid
# self.kx, self.ky, self.k = None, None, None
# self.shift_plot = kwargs.get("shift_pw", 0.)
# self.plot = kwargs.get("plot_results", [False]*6)
# self.result = {}
# self.outfiles_directory = False
# initialisation_out_files_plain(self)
# def write_out_files(self, out):
# self.out_file.write("{:.12e}\t".format(self.current_frequency))
# abs = 1-np.abs(out["R"])**2
# self.out_file.write("{:.12e}\t".format(abs))
# self.out_file.write("\n")
# def interface_fluid_fluid(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = fluid_SV(self.kx, self.k, L[iinter].medium.K)
# SV_2, k_y_2 = fluid_SV(self.kx, self.k, L[iinter+1].medium.K)
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[0, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[1, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_fluid_rigid(self, M, ieq, L, d):
# SV, k_y = fluid_SV(self.kx, self.k, L.medium.K)
# M[ieq, d[0]] = SV[0, 0]*np.exp(-1j*k_y*L.thickness)
# M[ieq, d[1]] = SV[0, 1]
# ieq += 1
# return ieq
# def semi_infinite_medium(self, M, ieq, L, d):
# M[ieq, d[1]] = 1.
# ieq += 1
# return ieq
# def interface_pem_pem(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = PEM_SV(L[iinter].medium, self.kx)
# SV_2, k_y_2 = PEM_SV(L[iinter+1].medium, self.kx)
# for _i in range(6):
# M[ieq, d[iinter+0][0]] = SV_1[_i, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[_i, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[_i, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[_i, 3]
# M[ieq, d[iinter+0][4]] = SV_1[_i, 4]
# M[ieq, d[iinter+0][5]] = SV_1[_i, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[_i, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[_i, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[_i, 2]
# M[ieq, d[iinter+1][3]] = -SV_2[_i, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = -SV_2[_i, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = -SV_2[_i, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_fluid_pem(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = fluid_SV(self.kx, self.k, L[iinter].medium.K)
# SV_2, k_y_2 = PEM_SV(L[iinter+1].medium,self.kx)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[2, 2]
# M[ieq, d[iinter+1][3]] = -SV_2[2, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = -SV_2[2, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = -SV_2[2, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[4, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[4, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[4, 2]
# M[ieq, d[iinter+1][3]] = -SV_2[4, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = -SV_2[4, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = -SV_2[4, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = SV_2[0, 2]
# M[ieq, d[iinter+1][3]] = SV_2[0, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[0, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[0, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+1][0]] = SV_2[3, 0]
# M[ieq, d[iinter+1][1]] = SV_2[3, 1]
# M[ieq, d[iinter+1][2]] = SV_2[3, 2]
# M[ieq, d[iinter+1][3]] = SV_2[3, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[3, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[3, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_elastic_pem(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = elastic_SV(L[iinter].medium,self.kx, self.omega)
# SV_2, k_y_2 = PEM_SV(L[iinter+1].medium,self.kx)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = -SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[0, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[0, 3]
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = SV_2[0, 2]
# M[ieq, d[iinter+1][3]] = SV_2[0, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[0, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[0, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[1, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[1, 3]
# M[ieq, d[iinter+1][0]] = SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = SV_2[1, 1]
# M[ieq, d[iinter+1][2]] = SV_2[1, 2]
# M[ieq, d[iinter+1][3]] = SV_2[1, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[1, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[1, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[1, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[1, 3]
# M[ieq, d[iinter+1][0]] = SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = SV_2[2, 2]
# M[ieq, d[iinter+1][3]] = SV_2[2, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[2, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[2, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[2, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[2, 3]
# M[ieq, d[iinter+1][0]] = (SV_2[3, 0]-SV_2[4, 0])
# M[ieq, d[iinter+1][1]] = (SV_2[3, 1]-SV_2[4, 1])
# M[ieq, d[iinter+1][2]] = (SV_2[3, 2]-SV_2[4, 2])
# M[ieq, d[iinter+1][3]] = (SV_2[3, 3]-SV_2[4, 3])*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = (SV_2[3, 4]-SV_2[4, 4])*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = (SV_2[3, 5]-SV_2[4, 5])*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[3, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[3, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[3, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[3, 3]
# M[ieq, d[iinter+1][0]] = SV_2[5, 0]
# M[ieq, d[iinter+1][1]] = SV_2[5, 1]
# M[ieq, d[iinter+1][2]] = SV_2[5, 2]
# M[ieq, d[iinter+1][3]] = SV_2[5, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[5, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[5, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_pem_elastic(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = PEM_SV(L[iinter].medium,self.kx)
# SV_2, k_y_2 = elastic_SV(L[iinter+1].medium,self.kx, self.omega)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[0, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[0, 3]
# M[ieq, d[iinter+0][4]] = SV_1[0, 4]
# M[ieq, d[iinter+0][5]] = SV_1[0, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[0, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[0, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[1, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[1, 3]
# M[ieq, d[iinter+0][4]] = SV_1[1, 4]
# M[ieq, d[iinter+0][5]] = SV_1[1, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[1, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[1, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[1, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[2, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[2, 3]
# M[ieq, d[iinter+0][4]] = SV_1[2, 4]
# M[ieq, d[iinter+0][5]] = SV_1[2, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[1, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[1, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[1, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = (SV_1[3, 0]-SV_1[4, 0])*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = (SV_1[3, 1]-SV_1[4, 1])*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = (SV_1[3, 2]-SV_1[4, 2])*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = (SV_1[3, 3]-SV_1[4, 3])
# M[ieq, d[iinter+0][4]] = (SV_1[3, 4]-SV_1[4, 4])
# M[ieq, d[iinter+0][5]] = (SV_1[3, 5]-SV_1[4, 5])
# M[ieq, d[iinter+1][0]] = -SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[2, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[2, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[5, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[5, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[5, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[5, 3]
# M[ieq, d[iinter+0][4]] = SV_1[5, 4]
# M[ieq, d[iinter+0][5]] = SV_1[5, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[3, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[3, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[3, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[3, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_elastic_elastic(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = elastic_SV(L[iinter].medium,self.kx, self.omega)
# SV_2, k_y_2 = elastic_SV(L[iinter+1].medium,self.kx, self.omega)
# for _i in range(4):
# M[ieq, d[iinter+0][0]] = SV_1[_i, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[_i, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[_i, 2]
# M[ieq, d[iinter+0][3]] = SV_1[_i, 3]
# M[ieq, d[iinter+1][0]] = -SV_2[_i, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[_i, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[_i, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[_i, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_fluid_elastic(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = fluid_SV(self.kx, self.k, L[iinter].medium.K)
# SV_2, k_y_2 = elastic_SV(L[iinter+1].medium, self.kx, self.omega)
# # Continuity of u_y
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[1, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[1, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[1, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# # sigma_yy = -p
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]
# M[ieq, d[iinter+1][0]] = SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = SV_2[2, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = SV_2[2, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# # sigma_xy = 0
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = SV_2[0, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = SV_2[0, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_pem_fluid(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = PEM_SV(L[iinter].medium, self.kx)
# SV_2, k_y_2 = fluid_SV(self.kx, self.k, L[iinter+1].medium.K)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = -SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[2, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = -SV_1[2, 3]
# M[ieq, d[iinter+0][4]] = -SV_1[2, 4]
# M[ieq, d[iinter+0][5]] = -SV_1[2, 5]
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[4, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[4, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[4, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = -SV_1[4, 3]
# M[ieq, d[iinter+0][4]] = -SV_1[4, 4]
# M[ieq, d[iinter+0][5]] = -SV_1[4, 5]
# M[ieq, d[iinter+1][0]] = SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = SV_2[1, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[0, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[0, 3]
# M[ieq, d[iinter+0][4]] = SV_1[0, 4]
# M[ieq, d[iinter+0][5]] = SV_1[0, 5]
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[3, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[3, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[3, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[3, 3]
# M[ieq, d[iinter+0][4]] = SV_1[3, 4]
# M[ieq, d[iinter+0][5]] = SV_1[3, 5]
# ieq += 1
# return ieq
# def interface_elastic_fluid(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = elastic_SV(L[iinter].medium, self.kx, self.omega)
# SV_2, k_y_2 = fluid_SV(self.kx, self.k, L[iinter+1].medium.K)
# # Continuity of u_y
# M[ieq, d[iinter+0][0]] = -SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[1, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[1, 3]
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# # sigma_yy = -p
# M[ieq, d[iinter+0][0]] = SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[2, 2]
# M[ieq, d[iinter+0][3]] = SV_1[2, 3]
# M[ieq, d[iinter+1][0]] = SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = SV_2[1, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# # sigma_xy = 0
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[0, 2]
# M[ieq, d[iinter+0][3]] = SV_1[0, 3]
# ieq += 1
# return ieq
# def interface_elastic_rigid(self, M, ieq, L, d):
# SV, k_y = elastic_SV(L.medium,self.kx, self.omega)
# M[ieq, d[0]] = SV[1, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[1, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[1, 2]
# M[ieq, d[3]] = SV[1, 3]
# ieq += 1
# M[ieq, d[0]] = SV[3, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[3, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[3, 2]
# M[ieq, d[3]] = SV[3, 3]
# ieq += 1
# return ieq
# def interface_pem_rigid(self, M, ieq, L, d):
# SV, k_y = PEM_SV(L.medium, self.kx)
# M[ieq, d[0]] = SV[1, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[1, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[1, 2]*np.exp(-1j*k_y[2]*L.thickness)
# M[ieq, d[3]] = SV[1, 3]
# M[ieq, d[4]] = SV[1, 4]
# M[ieq, d[5]] = SV[1, 5]
# ieq += 1
# M[ieq, d[0]] = SV[2, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[2, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[2, 2]*np.exp(-1j*k_y[2]*L.thickness)
# M[ieq, d[3]] = SV[2, 3]
# M[ieq, d[4]] = SV[2, 4]
# M[ieq, d[5]] = SV[2, 5]
# ieq += 1
# M[ieq, d[0]] = SV[5, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[5, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[5, 2]*np.exp(-1j*k_y[2]*L.thickness)
# M[ieq, d[3]] = SV[5, 3]
# M[ieq, d[4]] = SV[5, 4]
# M[ieq, d[5]] = SV[5, 5]
# ieq += 1
# return ieq
# def plot_sol_PW(self, X, dofs):
# x_start = self.shift_plot
# for _l, _layer in enumerate(self.layers):
# x_f = np.linspace(0, _layer.thickness,200)
# x_b = x_f-_layer.thickness
# if _layer.medium.MODEL == "fluid":
# SV, k_y = fluid_SV(self.kx, self.k, _layer.medium.K)
# pr = SV[1, 0]*np.exp(-1j*k_y*x_f)*X[dofs[_l][0]]
# pr += SV[1, 1]*np.exp( 1j*k_y*x_b)*X[dofs[_l][1]]
# ut = SV[0, 0]*np.exp(-1j*k_y*x_f)*X[dofs[_l][0]]
# ut += SV[0, 1]*np.exp( 1j*k_y*x_b)*X[dofs[_l][1]]
# if self.plot[2]:
# plt.figure(2)
# plt.plot(x_start+x_f, np.abs(pr), 'r')
# plt.plot(x_start+x_f, np.imag(pr), 'm')
# plt.title("Pressure")
# # plt.figure(5)
# # plt.plot(x_start+x_f,np.abs(ut),'b')
# # plt.plot(x_start+x_f,np.imag(ut),'k')
# if _layer.medium.MODEL == "pem":
# SV, k_y = PEM_SV(_layer.medium, self.kx)
# ux, uy, pr, ut = 0*1j*x_f, 0*1j*x_f, 0*1j*x_f, 0*1j*x_f
# for i_dim in range(3):
# ux += SV[1, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# ux += SV[1, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]]
# uy += SV[5, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# uy += SV[5, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]]
# pr += SV[4, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# pr += SV[4, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]]
# ut += SV[2, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# ut += SV[2, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]]
# if self.plot[0]:
# plt.figure(0)
# plt.plot(x_start+x_f, np.abs(uy), 'r')
# plt.plot(x_start+x_f, np.imag(uy), 'm')
# plt.title("Solid displacement along x")
# if self.plot[1]:
# plt.figure(1)
# plt.plot(x_start+x_f, np.abs(ux), 'r')
# plt.plot(x_start+x_f, np.imag(ux), 'm')
# plt.title("Solid displacement along y")
# if self.plot[2]:
# plt.figure(2)
# plt.plot(x_start+x_f, np.abs(pr), 'r')
# plt.plot(x_start+x_f, np.imag(pr), 'm')
# plt.title("Pressure")
# if _layer.medium.MODEL == "elastic":
# SV, k_y = elastic_SV(_layer.medium, self.kx, self.omega)
# ux, uy, pr, sig = 0*1j*x_f, 0*1j*x_f, 0*1j*x_f, 0*1j*x_f
# for i_dim in range(2):
# ux += SV[1, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# ux += SV[1, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]]
# uy += SV[3, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# uy += SV[3, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]]
# pr -= SV[2, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# pr -= SV[2, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]]
# sig -= SV[0, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# sig -= SV[0, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]]
# if self.plot[0]:
# plt.figure(0)
# plt.plot(x_start+x_f, np.abs(uy), 'r')
# plt.plot(x_start+x_f, np.imag(uy), 'm')
# plt.title("Solid displacement along x")
# if self.plot[1]:
# plt.figure(1)
# plt.plot(x_start+x_f, np.abs(ux), 'r')
# plt.plot(x_start+x_f, np.imag(ux), 'm')
# plt.title("Solid displacement along y")
# # if self.plot[2]:
# # plt.figure(2)
# # plt.plot(x_start+x_f, np.abs(pr), 'r')
# # plt.plot(x_start+x_f, np.imag(pr), 'm')
# # plt.title("Sigma_yy")
# # if self.plot[2]:
# # plt.figure(3)
# # plt.plot(x_start+x_f, np.abs(sig), 'r')
# # plt.plot(x_start+x_f, np.imag(sig), 'm')
# # plt.title("Sigma_xy")
# x_start += _layer.thickness
# def PEM_SV(mat,ky):
# ''' S={0:\hat{\sigma}_{xy}, 1:u_y^s, 2:u_y^t, 3:\hat{\sigma}_{yy}, 4:p, 5:u_x^s}'''
# kx_1 = np.sqrt(mat.delta_1**2-ky**2)
# kx_2 = np.sqrt(mat.delta_2**2-ky**2)
# kx_3 = np.sqrt(mat.delta_3**2-ky**2)
# kx = np.array([kx_1, kx_2, kx_3])
# delta = np.array([mat.delta_1, mat.delta_2, mat.delta_3])
# alpha_1 = -1j*mat.A_hat*mat.delta_1**2-1j*2*mat.N*kx[0]**2
# alpha_2 = -1j*mat.A_hat*mat.delta_2**2-1j*2*mat.N*kx[1]**2
# alpha_3 = -2*1j*mat.N*kx[2]*ky
# SV = np.zeros((6,6), dtype=complex)
# SV[0:6, 0] = np.array([-2*1j*mat.N*kx[0]*ky, kx[0], mat.mu_1*kx[0], alpha_1, 1j*delta[0]**2*mat.K_eq_til*mat.mu_1, ky])
# SV[0:6, 3] = np.array([ 2*1j*mat.N*kx[0]*ky,-kx[0],-mat.mu_1*kx[0], alpha_1, 1j*delta[0]**2*mat.K_eq_til*mat.mu_1, ky])
# SV[0:6, 1] = np.array([-2*1j*mat.N*kx[1]*ky, kx[1], mat.mu_2*kx[1],alpha_2, 1j*delta[1]**2*mat.K_eq_til*mat.mu_2, ky])
# SV[0:6, 4] = np.array([ 2*1j*mat.N*kx[1]*ky,-kx[1],-mat.mu_2*kx[1],alpha_2, 1j*delta[1]**2*mat.K_eq_til*mat.mu_2, ky])
# SV[0:6, 2] = np.array([1j*mat.N*(kx[2]**2-ky**2), ky, mat.mu_3*ky, alpha_3, 0., -kx[2]])
# SV[0:6, 5] = np.array([1j*mat.N*(kx[2]**2-ky**2), ky, mat.mu_3*ky, -alpha_3, 0., kx[2]])
# return SV, kx
# def elastic_SV(mat,ky, omega):
# ''' S={0:\sigma_{xy}, 1: u_y, 2 \sigma_{yy}, 3 u_x}'''
# P_mat = mat.lambda_ + 2.*mat.mu
# delta_p = omega*np.sqrt(mat.rho/P_mat)
# delta_s = omega*np.sqrt(mat.rho/mat.mu)
# kx_p = np.sqrt(delta_p**2-ky**2)
# kx_s = np.sqrt(delta_s**2-ky**2)
# kx = np.array([kx_p, kx_s])
# alpha_p = -1j*mat.lambda_*delta_p**2 - 2j*mat.mu*kx[0]**2
# alpha_s = 2j*mat.mu*kx[1]*ky
# SV = np.zeros((4, 4), dtype=np.complex)
# SV[0:4, 0] = np.array([-2.*1j*mat.mu*kx[0]*ky, kx[0], alpha_p, ky])
# SV[0:4, 2] = np.array([ 2.*1j*mat.mu*kx[0]*ky, -kx[0], alpha_p, ky])
# SV[0:4, 1] = np.array([1j*mat.mu*(kx[1]**2-ky**2), ky,-alpha_s, -kx[1]])
# SV[0:4, 3] = np.array([1j*mat.mu*(kx[1]**2-ky**2), ky, alpha_s, kx[1]])
# return SV, kx
# def fluid_SV(kx, k, K):
# ''' S={0:u_y , 1:p}'''
# ky = np.sqrt(k**2-kx**2)
# SV = np.zeros((2, 2), dtype=complex)
# SV[0, 0:2] = np.array([ky/(1j*K*k**2), -ky/(1j*K*k**2)])
# SV[1, 0:2] = np.array([1, 1])
# return SV, ky
# def resolution_PW_imposed_displacement(S, p):
# # print("k={}".format(p.k))
# Layers = S.layers.copy()
# n, interfaces, dofs = initialise_PW_solver(Layers, S.backing)
# M = np.zeros((n, n), dtype=complex)
# i_eq = 0
# # Loop on the layers
# for i_inter, _inter in enumerate(interfaces):
# if _inter[0] == "fluid":
# if _inter[1] == "fluid":
# i_eq = interface_fluid_fluid(i_eq, i_inter, Layers, dofs, M, p)
# if _inter[1] == "pem":
# i_eq = interface_fluid_pem(i_eq, i_inter, Layers, dofs, M, p)
# elif _inter[0] == "pem":
# if _inter[1] == "fluid":
# i_eq = interface_pem_fluid(i_eq, i_inter, Layers, dofs, M, p)
# if _inter[1] == "pem":
# i_eq = interface_pem_pem(i_eq, i_inter, Layers, dofs, M, p)
# if S.backing == backing.rigid:
# if Layers[-1].medium.MODEL == "fluid":
# i_eq = interface_fluid_rigid(M, i_eq, Layers[-1], dofs[-1], p)
# elif Layers[-1].medium.MODEL == "pem":
# i_eq = interface_pem_rigid(M, i_eq, Layers[-1], dofs[-1], p)
# if Layers[0].medium.MODEL == "fluid":
# F = np.zeros(n, dtype=complex)
# SV, k_y = fluid_SV(p.kx, p.k, Layers[0].medium.K)
# M[i_eq, dofs[0][0]] = SV[0, 0]
# M[i_eq, dofs[0][1]] = SV[0, 1]*np.exp(-1j*k_y*Layers[0].thickness)
# F[i_eq] = 1.
# elif Layers[0].medium.MODEL == "pem":
# SV, k_y = PEM_SV(Layers[0].medium, p.kx)
# M[i_eq, dofs[0][0]] = SV[2, 0]
# M[i_eq, dofs[0][1]] = SV[2, 1]
# M[i_eq, dofs[0][2]] = SV[2, 2]
# M[i_eq, dofs[0][3]] = SV[2, 3]*np.exp(-1j*k_y[0]*Layers[0].thickness)
# M[i_eq, dofs[0][4]] = SV[2, 4]*np.exp(-1j*k_y[1]*Layers[0].thickness)
# M[i_eq, dofs[0][5]] = SV[2, 5]*np.exp(-1j*k_y[2]*Layers[0].thickness)
# F = np.zeros(n, dtype=complex)
# F[i_eq] = 1.
# i_eq +=1
# M[i_eq, dofs[0][0]] = SV[0, 0]
# M[i_eq, dofs[0][1]] = SV[0, 1]
# M[i_eq, dofs[0][2]] = SV[0, 2]
# M[i_eq, dofs[0][3]] = SV[0, 3]*np.exp(-1j*k_y[0]*Layers[0].thickness)
# M[i_eq, dofs[0][4]] = SV[0, 4]*np.exp(-1j*k_y[1]*Layers[0].thickness)
# M[i_eq, dofs[0][5]] = SV[0, 5]*np.exp(-1j*k_y[2]*Layers[0].thickness)
# i_eq += 1
# M[i_eq, dofs[0][0]] = SV[3, 0]
# M[i_eq, dofs[0][1]] = SV[3, 1]
# M[i_eq, dofs[0][2]] = SV[3, 2]
# M[i_eq, dofs[0][3]] = SV[3, 3]*np.exp(-1j*k_y[0]*Layers[0].thickness)
# M[i_eq, dofs[0][4]] = SV[3, 4]*np.exp(-1j*k_y[1]*Layers[0].thickness)
# M[i_eq, dofs[0][5]] = SV[3, 5]*np.exp(-1j*k_y[2]*Layers[0].thickness)
# X = LA.solve(M, F)
# # print("|R pyPLANES_PW| = {}".format(np.abs(X[0])))
# print("R pyPLANES_PW = {}".format(X[0]))
# plot_sol_PW(S, X, dofs, p)
| 48.911726
| 132
| 0.502721
| 7,051
| 37,124
| 2.48759
| 0.0373
| 0.056784
| 0.06984
| 0.135462
| 0.792588
| 0.769384
| 0.747434
| 0.726853
| 0.690935
| 0.674629
| 0
| 0.079647
| 0.268128
| 37,124
| 758
| 133
| 48.976253
| 0.565918
| 0.891607
| 0
| 0.040816
| 0
| 0
| 0.025411
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061224
| false
| 0
| 0.204082
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
2960f549fc004cf3590c25e915c7395ebd3b5e4d
| 79
|
py
|
Python
|
Geometry/VeryForwardGeometry/python/dd4hep/geometryRPFromDD_2021_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 2
|
2020-10-26T18:40:32.000Z
|
2021-04-10T16:33:25.000Z
|
Geometry/VeryForwardGeometry/python/dd4hep/geometryRPFromDD_2021_cfi.py
|
gartung/cmssw
|
3072dde3ce94dcd1791d778988198a44cde02162
|
[
"Apache-2.0"
] | 25
|
2016-06-24T20:55:32.000Z
|
2022-02-01T19:24:45.000Z
|
Geometry/VeryForwardGeometry/python/dd4hep/geometryRPFromDD_2021_cfi.py
|
gartung/cmssw
|
3072dde3ce94dcd1791d778988198a44cde02162
|
[
"Apache-2.0"
] | 8
|
2016-03-25T07:17:43.000Z
|
2021-07-08T17:11:21.000Z
|
from Geometry.VeryForwardGeometry.dd4hep.v5.geometryRPFromDD_2021_cfi import *
| 39.5
| 78
| 0.886076
| 9
| 79
| 7.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 0.050633
| 79
| 1
| 79
| 79
| 0.826667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
464ea27cbe788bd3f30824ac8262b6f8546e28e0
| 40
|
py
|
Python
|
scraper.py
|
souravkaranjai/python-webscraper
|
b4a76846d80e724059eb7cb9abcd5ec13125258a
|
[
"MIT"
] | null | null | null |
scraper.py
|
souravkaranjai/python-webscraper
|
b4a76846d80e724059eb7cb9abcd5ec13125258a
|
[
"MIT"
] | null | null | null |
scraper.py
|
souravkaranjai/python-webscraper
|
b4a76846d80e724059eb7cb9abcd5ec13125258a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
print('Hello world')
| 13.333333
| 20
| 0.7
| 6
| 40
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 0.075
| 40
| 3
| 20
| 13.333333
| 0.72973
| 0.425
| 0
| 0
| 0
| 0
| 0.478261
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
46674d12a75c726caab7f069ff51c1295884c1f4
| 67
|
py
|
Python
|
backend/views/__init__.py
|
chriamue/flask-unchained-react-spa
|
610e099f3ece508f4c8a62d3704e4cc49f869194
|
[
"MIT"
] | 5
|
2018-10-15T15:33:32.000Z
|
2021-01-13T23:03:48.000Z
|
backend/views/__init__.py
|
chriamue/flask-unchained-react-spa
|
610e099f3ece508f4c8a62d3704e4cc49f869194
|
[
"MIT"
] | 18
|
2019-12-10T22:11:27.000Z
|
2021-12-13T20:42:58.000Z
|
backend/views/__init__.py
|
chriamue/flask-unchained-react-spa
|
610e099f3ece508f4c8a62d3704e4cc49f869194
|
[
"MIT"
] | 4
|
2018-10-15T15:59:25.000Z
|
2020-04-11T17:48:35.000Z
|
from .contact_submission_resource import ContactSubmissionResource
| 33.5
| 66
| 0.925373
| 6
| 67
| 10
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059701
| 67
| 1
| 67
| 67
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
46a0c78276633a2a5a223df91b47b5f7924ae094
| 66
|
py
|
Python
|
packaging/pack1/andrew_mod1.py
|
AndreiHondrari/python_exploration
|
cb4ac0b92ddc48c322201ba31cd6e7c5ee6af06d
|
[
"MIT"
] | 3
|
2019-05-04T12:19:09.000Z
|
2019-08-30T07:12:31.000Z
|
packaging/pack1/build/lib/mymod1.py
|
AndreiHondrari/python_exploration
|
cb4ac0b92ddc48c322201ba31cd6e7c5ee6af06d
|
[
"MIT"
] | null | null | null |
packaging/pack1/build/lib/mymod1.py
|
AndreiHondrari/python_exploration
|
cb4ac0b92ddc48c322201ba31cd6e7c5ee6af06d
|
[
"MIT"
] | null | null | null |
def something() -> None:
print("Andrew says: `something`.")
| 13.2
| 38
| 0.606061
| 7
| 66
| 5.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19697
| 66
| 4
| 39
| 16.5
| 0.754717
| 0
| 0
| 0
| 0
| 0
| 0.390625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
46a1c447600050372f1c46ddc6ed6f7e8c87b183
| 117
|
py
|
Python
|
app/api/v2/views/blacklist.py
|
MaggieChege/STORE-MANAGER-API-V2
|
d8b2c7312304df627369721e8e1821cf724431d7
|
[
"MIT"
] | null | null | null |
app/api/v2/views/blacklist.py
|
MaggieChege/STORE-MANAGER-API-V2
|
d8b2c7312304df627369721e8e1821cf724431d7
|
[
"MIT"
] | null | null | null |
app/api/v2/views/blacklist.py
|
MaggieChege/STORE-MANAGER-API-V2
|
d8b2c7312304df627369721e8e1821cf724431d7
|
[
"MIT"
] | null | null | null |
blacklist=set()
def get_blacklist():
return blacklist
def add_to_blacklist(jti):
return blacklist.add(jti)
| 14.625
| 29
| 0.735043
| 16
| 117
| 5.1875
| 0.5
| 0.361446
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162393
| 117
| 7
| 30
| 16.714286
| 0.846939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.4
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
46b3fea476ee5e207c6461dc2f22693adf1376cd
| 94
|
py
|
Python
|
python/tako/client/__init__.py
|
vyomkeshj/tako
|
d0906df5cdc0023ee955ad34d9eb4696b5ecec5e
|
[
"MIT"
] | null | null | null |
python/tako/client/__init__.py
|
vyomkeshj/tako
|
d0906df5cdc0023ee955ad34d9eb4696b5ecec5e
|
[
"MIT"
] | null | null | null |
python/tako/client/__init__.py
|
vyomkeshj/tako
|
d0906df5cdc0023ee955ad34d9eb4696b5ecec5e
|
[
"MIT"
] | null | null | null |
from .exception import TakoException, TaskFailed # noqa
from .session import connect # noqa
| 31.333333
| 56
| 0.787234
| 11
| 94
| 6.727273
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159574
| 94
| 2
| 57
| 47
| 0.936709
| 0.095745
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d3c36036476de94ac751c017398b3c5474c873f2
| 51
|
py
|
Python
|
io_almacen/channel/__init__.py
|
xyla-io/io_almacen
|
76725391b496fe3f778d013fc680ae80637eb74b
|
[
"MIT"
] | null | null | null |
io_almacen/channel/__init__.py
|
xyla-io/io_almacen
|
76725391b496fe3f778d013fc680ae80637eb74b
|
[
"MIT"
] | null | null | null |
io_almacen/channel/__init__.py
|
xyla-io/io_almacen
|
76725391b496fe3f778d013fc680ae80637eb74b
|
[
"MIT"
] | null | null | null |
from .channel_io import Channel, channel_entity_url
| 51
| 51
| 0.882353
| 8
| 51
| 5.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078431
| 51
| 1
| 51
| 51
| 0.893617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d3c9a9f08cb2ab991b3fa5be8156332e24b37380
| 52
|
py
|
Python
|
config/paths.py
|
fusic-com/flask-todo
|
909ce22132ed081feca02e2fb255afa08b59611d
|
[
"MIT"
] | 34
|
2015-01-08T07:11:54.000Z
|
2021-08-28T23:55:25.000Z
|
config/paths.py
|
spacecode-live/flask-todo
|
909ce22132ed081feca02e2fb255afa08b59611d
|
[
"MIT"
] | null | null | null |
config/paths.py
|
spacecode-live/flask-todo
|
909ce22132ed081feca02e2fb255afa08b59611d
|
[
"MIT"
] | 13
|
2015-02-10T09:48:53.000Z
|
2021-03-02T15:23:21.000Z
|
from settings import VAR_DIR
CACHE=VAR_DIR/'cache'
| 13
| 28
| 0.807692
| 9
| 52
| 4.444444
| 0.666667
| 0.3
| 0.55
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 52
| 3
| 29
| 17.333333
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0.096154
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
d3ce35364812f96b726436b7cd0cab140d019f97
| 956
|
py
|
Python
|
e2e_test.py
|
bartossh/hebbian_mirror
|
2d080ae7a707845e0922894e5cee2ad7b0119e8f
|
[
"MIT"
] | 2
|
2019-11-15T09:10:19.000Z
|
2019-12-26T15:05:16.000Z
|
e2e_test.py
|
bartOssh/hebbian_mirror
|
2d080ae7a707845e0922894e5cee2ad7b0119e8f
|
[
"MIT"
] | 1
|
2019-11-07T11:06:09.000Z
|
2019-11-07T11:06:09.000Z
|
e2e_test.py
|
bartOssh/hebbian_mirror
|
2d080ae7a707845e0922894e5cee2ad7b0119e8f
|
[
"MIT"
] | null | null | null |
import requests
num_of_iter = 2
data = open('./assets/test.jpg', 'rb').read()
for i in range(0, num_of_iter):
res = requests.get(
url='http://0.0.0.0:8000/recognition/object/boxes_names'
)
print("\n RESPONSE GET boxes names for test number {}: \n {}"
.format(i, res.__dict__))
res = requests.post(url='http://0.0.0.0:8000/recognition/object/boxes',
data=data,
headers={'Content-Type': 'application/octet-stream'})
print("\n RESPONSE POST to boxes, test num {} \n Sending buffer length: {},\n Received {}"
.format(i, len(data), res.__dict__))
res = requests.post(url='http://0.0.0.0:8000/recognition/object/image',
data=data,
headers={'Content-Type': 'application/octet-stream'})
print("\n RESPONSE POST to image, test num {} \n Sending buffer length: {},\n Received {}"
.format(i, len(data), res))
| 43.454545
| 94
| 0.58159
| 130
| 956
| 4.176923
| 0.361538
| 0.033149
| 0.033149
| 0.049724
| 0.71639
| 0.71639
| 0.71639
| 0.71639
| 0.71639
| 0.71639
| 0
| 0.036212
| 0.248954
| 956
| 21
| 95
| 45.52381
| 0.720056
| 0
| 0
| 0.210526
| 0
| 0
| 0.466527
| 0.050209
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.052632
| 0.157895
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
31118c5b5246a2ec094961b6d1e7c75e1bcdc0c9
| 279
|
py
|
Python
|
KaratAPP/models.py
|
MHuiG/Karat-Django-Backend
|
8887417bb3eee302a1639e247957539479d2ef67
|
[
"MIT"
] | null | null | null |
KaratAPP/models.py
|
MHuiG/Karat-Django-Backend
|
8887417bb3eee302a1639e247957539479d2ef67
|
[
"MIT"
] | null | null | null |
KaratAPP/models.py
|
MHuiG/Karat-Django-Backend
|
8887417bb3eee302a1639e247957539479d2ef67
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
##########################################################################
#投票
class Vote(models.Model):
data=models.CharField(max_length=255)
##########################################################################
| 31
| 74
| 0.351254
| 20
| 279
| 4.85
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011583
| 0.071685
| 279
| 9
| 75
| 31
| 0.362934
| 0.09319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
312cb34d34abecdfef42214150394d17f2b7b90e
| 118
|
py
|
Python
|
Practica 1 E4.py
|
pardo13/python
|
3d15c9a0414a240588da4d24184f63370b736d55
|
[
"MIT"
] | null | null | null |
Practica 1 E4.py
|
pardo13/python
|
3d15c9a0414a240588da4d24184f63370b736d55
|
[
"MIT"
] | null | null | null |
Practica 1 E4.py
|
pardo13/python
|
3d15c9a0414a240588da4d24184f63370b736d55
|
[
"MIT"
] | null | null | null |
A=int(input("dame int"))
B=int(input("dame int"))
if(A>B):
print("A es mayor")
else:
print("B es mayor")
| 14.75
| 24
| 0.559322
| 22
| 118
| 3
| 0.454545
| 0.242424
| 0.363636
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.211864
| 118
| 7
| 25
| 16.857143
| 0.709677
| 0
| 0
| 0
| 0
| 0
| 0.305085
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
313105ee1f0beaa4963e8ca27411e52ee4288019
| 130
|
py
|
Python
|
app/dists/admin.py
|
ariashahverdi/Backend
|
ea8976f1eec4e75eba895f467d157f0f1345b2b7
|
[
"MIT"
] | null | null | null |
app/dists/admin.py
|
ariashahverdi/Backend
|
ea8976f1eec4e75eba895f467d157f0f1345b2b7
|
[
"MIT"
] | null | null | null |
app/dists/admin.py
|
ariashahverdi/Backend
|
ea8976f1eec4e75eba895f467d157f0f1345b2b7
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Distribution
admin.site.register(Distribution)
# Register your models here.
| 21.666667
| 33
| 0.823077
| 17
| 130
| 6.294118
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 130
| 5
| 34
| 26
| 0.930435
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
313fe3ae0a54054320169a34676d7ed8d2ac4692
| 203
|
py
|
Python
|
workoutlog/workout/admin.py
|
michaelrodgers/itc172_final
|
b71f25a5cbffab00b06c60c8816f339d169d9dc1
|
[
"Apache-2.0"
] | null | null | null |
workoutlog/workout/admin.py
|
michaelrodgers/itc172_final
|
b71f25a5cbffab00b06c60c8816f339d169d9dc1
|
[
"Apache-2.0"
] | null | null | null |
workoutlog/workout/admin.py
|
michaelrodgers/itc172_final
|
b71f25a5cbffab00b06c60c8816f339d169d9dc1
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import Target, Exercise, Workout
# Register your models here.
admin.site.register(Target)
admin.site.register(Exercise)
admin.site.register(Workout)
| 25.375
| 46
| 0.783251
| 27
| 203
| 5.888889
| 0.481481
| 0.169811
| 0.320755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128079
| 203
| 7
| 47
| 29
| 0.898305
| 0.128079
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
31a27b0c36981ab92aff36160266dec12ad84cdb
| 5,238
|
py
|
Python
|
test/test_dot.py
|
croqaz/dot
|
b57f3c68dfa1ac5a7afb9f83af6035c34e342c83
|
[
"MIT"
] | null | null | null |
test/test_dot.py
|
croqaz/dot
|
b57f3c68dfa1ac5a7afb9f83af6035c34e342c83
|
[
"MIT"
] | null | null | null |
test/test_dot.py
|
croqaz/dot
|
b57f3c68dfa1ac5a7afb9f83af6035c34e342c83
|
[
"MIT"
] | null | null | null |
import pytest
from prop import strict_get
from prop import get as dot_get
class A:
def __init__(self, val):
self.val = val
def test_dot_get_list():
assert dot_get(['asd'], '0') == dot_get(['asd'], ['0']) == 'asd'
data = {'nested': [0, False, 'foo']}
assert dot_get(data, 'nested.0') == 0
assert dot_get(data, 'nested.1') is False
assert dot_get(data, 'nested.2') == 'foo'
assert dot_get(data, ['nested', '0']) == 0
assert dot_get(data, ['nested', '1']) is False
assert dot_get(data, ['nested', b'1']) is False
assert dot_get(data, ('nested', '2')) == 'foo'
assert dot_get(data, ('nested', b'2')) == 'foo'
assert dot_get(data, ['nested', 1]) is False
assert dot_get(data, ('nested', 2)) == 'foo'
# inexistent
assert dot_get(data, 'nested.9') is None
assert dot_get(data, 'nested.9', 'default') == 'default'
assert dot_get(data, ('nested', 9)) is None
assert dot_get(data, ['nested', '9']) is None
assert dot_get(data, ['nested', b'9']) is None
assert dot_get(data, ['nested', 9], 'default') == 'default'
assert dot_get(data, ('nested', '9'), 'default') == 'default'
assert dot_get(data, ('nested', b'9'), 'default') == 'default'
def test_dot_get_dict():
data = {'a': 'a', 'nested': {'x': 'y', 'int': 0, 'null': None}}
assert dot_get(data, 'a') == 'a'
assert dot_get(data, 'nested.x') == 'y'
assert dot_get(data, 'nested.int') == 0
assert dot_get(data, 'nested.null') is None
assert dot_get(data, ('nested', 'x')) == 'y'
assert dot_get(data, ['nested', 'int']) == 0
assert dot_get(data, ['nested', 'null']) is None
# inexistent
assert dot_get(data, 'nope') is None
assert dot_get(data, 'nested.9') is None
assert dot_get(data, 'nope', 'default') == 'default'
assert dot_get(data, ['nope']) is None
assert dot_get(data, ['nope'], 'default') == 'default'
assert dot_get(data, ('nested', 9)) is None
def test_str_dot_get_obj():
a = A(1)
assert dot_get(a, 'val') == 1
assert dot_get(a, 'nope') is None
assert dot_get(a, 'nope', 'default') == 'default'
a = A([0, False, 'foo'])
assert dot_get(a, 'val.0') == 0
assert dot_get(a, 'val.1') is False
assert dot_get(a, 'val.2') == 'foo'
assert dot_get(a, 'nope') is None
assert dot_get(a, 'nope', 'default') == 'default'
def test_dot_get_mixed():
data = {
'nested': {
1: '1',
'x': 'y',
None: 'null',
},
'list': [[[None, True, 9]]],
b'byte': b'this',
}
assert dot_get(data, 'list.0.0.1') is True
assert dot_get(data, 'list.0.0.2') == 9
assert dot_get(data, ('list', 0, 0, 1)) is True
assert dot_get(data, ['list', 0, 0, 2]) == 9
# String paths can only access string keys, so this won't work:
# assert dot_get(data, 'nested.1') == '1'
# assert dot_get(data, 'nested.None') == 'null'
# But this works:
assert dot_get(data, [b'byte']) == b'this'
assert dot_get(data, ['nested', 1]) == '1'
assert dot_get(data, ['nested', None]) == 'null'
a = A(data)
assert dot_get(a, 'val.nested.x') == 'y'
assert dot_get(a, 'val.list.0.0.1') is True
assert dot_get(a, ['val', 'list', 0, 0, 1]) is True
assert dot_get(a, ('val', 'list', 0, 0, 2)) == 9
def test_circular_refs():
c = A(1)
b = A(c)
a = A(b)
assert dot_get(c, 'val') == 1
assert dot_get(b, 'val') is c
assert dot_get(a, 'val') is b
assert dot_get(a, 'val.val.val') == 1
assert dot_get(a, ['val', 'val', 'val']) == 1
# Create cyclic ref
c.val = a
assert dot_get(c, 'val') == a
assert dot_get(c, 'val.val.val.val') == a
assert dot_get(c, ['val', 'val', 'val', 'val']) == a
def test_str_dot_strict_get():
data = {
'1': 1,
'a': A(7),
'nested': {
'x': 'y',
'int': 0,
'null': None,
},
'list': [[[None, True, 9]]],
}
assert strict_get(data, '1') == 1
assert strict_get(data, 'a.val') == 7
assert strict_get(data, 'nested.x') == 'y'
assert strict_get(data, 'nested.int') == 0
assert strict_get(data, 'nested.null') is None
assert strict_get(data, 'list.0.0.1') is True
assert strict_get(data, 'list.0.0.-1') == 9
with pytest.raises(KeyError):
assert strict_get(data, 'nope') is None
with pytest.raises(IndexError):
assert strict_get(data, 'list.9') is None
def test_str_dot_set_mix():
data = {
'a': 'a',
'nested': {
'x': 'x',
'int': 0,
'list': ['y', 'n'],
},
}
assert strict_get(data, 'nested.x') == 'x'
assert strict_get(data, 'nested.list.0') == 'y'
nested = dot_get(data, 'nested')
nested['x'] = 'yyy'
li = strict_get(data, 'nested.list')
li.insert(0, 'z')
assert strict_get(data, 'nested.x') == 'yyy'
assert strict_get(data, 'nested.list.0') == 'z'
def test_crappy_path():
with pytest.raises(TypeError):
assert dot_get(['asd'], True)
with pytest.raises(TypeError):
assert dot_get(['asd'], None)
with pytest.raises(TypeError):
assert dot_get(['asd'], 0)
| 27.714286
| 68
| 0.550592
| 794
| 5,238
| 3.492443
| 0.095718
| 0.153624
| 0.276956
| 0.230797
| 0.807068
| 0.715471
| 0.629282
| 0.570501
| 0.485395
| 0.476019
| 0
| 0.023457
| 0.251241
| 5,238
| 188
| 69
| 27.861702
| 0.68358
| 0.038755
| 0
| 0.171642
| 0
| 0
| 0.155361
| 0
| 0
| 0
| 0
| 0
| 0.559701
| 1
| 0.067164
| false
| 0
| 0.022388
| 0
| 0.097015
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
31b23312e6643e95278a2225ec84f190096c74fe
| 69
|
py
|
Python
|
src/python_import/C/cc.py
|
matiastang/matias-python
|
b7785217e5d386c01198305751ecd562259ea2b7
|
[
"MIT"
] | null | null | null |
src/python_import/C/cc.py
|
matiastang/matias-python
|
b7785217e5d386c01198305751ecd562259ea2b7
|
[
"MIT"
] | null | null | null |
src/python_import/C/cc.py
|
matiastang/matias-python
|
b7785217e5d386c01198305751ecd562259ea2b7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
#coding=utf-8
def cc_debug():
print(__name__)
| 13.8
| 19
| 0.681159
| 11
| 69
| 3.818182
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 0.130435
| 69
| 5
| 19
| 13.8
| 0.666667
| 0.42029
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
31dd6e6741a804d90f5239811383ca0cdca9f19d
| 12,218
|
py
|
Python
|
tensornetwork/backends/backend_test.py
|
ashoknar/TensorNetwork
|
82636b75a0c53b5447c84d9a4e85226fe0e6f43a
|
[
"Apache-2.0"
] | null | null | null |
tensornetwork/backends/backend_test.py
|
ashoknar/TensorNetwork
|
82636b75a0c53b5447c84d9a4e85226fe0e6f43a
|
[
"Apache-2.0"
] | null | null | null |
tensornetwork/backends/backend_test.py
|
ashoknar/TensorNetwork
|
82636b75a0c53b5447c84d9a4e85226fe0e6f43a
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for graphmode_tensornetwork."""
import builtins
import sys
import pytest
import numpy as np
from tensornetwork import connect, contract, Node
from tensornetwork.backends.base_backend import BaseBackend
from tensornetwork.backends import backend_factory
def clean_tensornetwork_modules():
for mod in list(sys.modules.keys()):
if mod.startswith('tensornetwork'):
sys.modules.pop(mod, None)
@pytest.fixture(autouse=True)
def clean_backend_import():
#never do this outside testing
clean_tensornetwork_modules()
yield # use as teardown
clean_tensornetwork_modules()
@pytest.fixture
def no_backend_dependency(monkeypatch):
import_orig = builtins.__import__
# pylint: disable=redefined-builtin
def mocked_import(name, globals, locals, fromlist, level):
if name in ['torch', 'tensorflow', 'jax']:
raise ImportError()
return import_orig(name, globals, locals, fromlist, level)
monkeypatch.setattr(builtins, '__import__', mocked_import)
# Nuke the cache.
backend_factory._INSTANTIATED_BACKENDS = dict()
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_pytorch_missing_cannot_initialize_backend():
#pylint: disable=import-outside-toplevel
with pytest.raises(ImportError):
# pylint: disable=import-outside-toplevel
from tensornetwork.backends.pytorch.pytorch_backend import PyTorchBackend
PyTorchBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_tensorflow_missing_cannot_initialize_backend():
#pylint: disable=import-outside-toplevel
with pytest.raises(ImportError):
# pylint: disable=import-outside-toplevel
from tensornetwork.backends.tensorflow.tensorflow_backend \
import TensorFlowBackend
TensorFlowBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_jax_missing_cannot_initialize_backend():
#pylint: disable=import-outside-toplevel
with pytest.raises(ImportError):
# pylint: disable=import-outside-toplevel
from tensornetwork.backends.jax.jax_backend import JaxBackend
JaxBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_config_backend_missing_can_import_config():
#not sure why config is imported here?
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import tensornetwork.config
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import torch
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import tensorflow as tf
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_import_tensornetwork_without_backends():
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
#pylint: disable=reimported
import tensornetwork
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.pytorch.pytorch_backend
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.tensorflow.tensorflow_backend
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.jax.jax_backend
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.numpy.numpy_backend
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import torch
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import tensorflow as tf
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_basic_numpy_network_without_backends():
#pylint: disable=import-outside-toplevel
#pylint: disable=reimported
#pylint: disable=unused-variable
import tensornetwork
a = Node(np.ones((10,)), backend="numpy")
b = Node(np.ones((10,)), backend="numpy")
edge = connect(a[0], b[0])
final_node = contract(edge)
assert final_node.tensor == np.array(10.)
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import torch
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import tensorflow as tf
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_basic_network_without_backends_raises_error():
#pylint: disable=import-outside-toplevel
#pylint: disable=reimported
#pylint: disable=unused-variable
import tensornetwork
with pytest.raises(ImportError):
Node(np.ones((2, 2)), backend="jax")
with pytest.raises(ImportError):
Node(np.ones((2, 2)), backend="tensorflow")
with pytest.raises(ImportError):
Node(np.ones((2, 2)), backend="pytorch")
def test_base_backend_name():
backend = BaseBackend()
assert backend.name == "base backend"
def test_base_backend_tensordot_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.tensordot(np.ones((2, 2)), np.ones((2, 2)), axes=[[0], [0]])
def test_base_backend_reshape_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.reshape(np.ones((2, 2)), (4, 1))
def test_base_backend_transpose_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.transpose(np.ones((2, 2)), [0, 1])
def test_base_backend_slice_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.slice(np.ones((2, 2)), (0, 1), (1, 1))
def test_base_backend_svd_decompositon_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.svd_decomposition(np.ones((2, 2)), 0)
def test_base_backend_qr_decompositon_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.qr_decomposition(np.ones((2, 2)), 0)
def test_base_backend_rq_decompositon_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.rq_decomposition(np.ones((2, 2)), 0)
def test_base_backend_shape_concat_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.shape_concat([np.ones((2, 2)), np.ones((2, 2))], 0)
def test_base_backend_shape_tensor_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.shape_tensor(np.ones((2, 2)))
def test_base_backend_shape_tuple_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.shape_tuple(np.ones((2, 2)))
def test_base_backend_shape_prod_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.shape_prod(np.ones((2, 2)))
def test_base_backend_sqrt_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.sqrt(np.ones((2, 2)))
def test_base_backend_diag_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.diag(np.ones((2, 2)))
def test_base_backend_convert_to_tensor_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.convert_to_tensor(np.ones((2, 2)))
def test_base_backend_trace_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.trace(np.ones((2, 2)))
def test_base_backend_outer_product_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.outer_product(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_einsul_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.einsum("ii", np.ones((2, 2)))
def test_base_backend_norm_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.norm(np.ones((2, 2)))
def test_base_backend_eye_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.eye(2, dtype=np.float64)
def test_base_backend_ones_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.ones((2, 2), dtype=np.float64)
def test_base_backend_zeros_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.zeros((2, 2), dtype=np.float64)
def test_base_backend_randn_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.randn((2, 2))
def test_base_backend_random_uniforl_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.random_uniform((2, 2))
def test_base_backend_conj_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.conj(np.ones((2, 2)))
def test_base_backend_eigh_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.eigh(np.ones((2, 2)))
def test_base_backend_eigs_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.eigs(np.ones((2, 2)))
def test_base_backend_eigs_lanczos_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.eigsh_lanczos(lambda x: x, np.ones((2)))
def test_base_backend_addition_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.addition(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_subtraction_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.subtraction(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_multiply_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.multiply(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_divide_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.divide(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_index_update_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.index_update(np.ones((2, 2)), np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_inv_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.inv(np.ones((2, 2)))
def test_base_backend_sin_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.sin(np.ones((2, 2)))
def test_base_backend_cos_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.cos(np.ones((2, 2)))
def test_base_backend_exp_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.exp(np.ones((2, 2)))
def test_base_backend_log_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.log(np.ones((2, 2)))
def test_base_backend_expm_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.expm(np.ones((2, 2)))
def test_base_backend_sparse_shape_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.sparse_shape(np.ones((2, 2)))
def test_base_backend_broadcast_right_multiplication_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.broadcast_right_multiplication(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_broadcast_left_multiplication_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.broadcast_left_multiplication(np.ones((2, 2)), np.ones((2, 2)))
def test_backend_instantiation(backend):
backend1 = backend_factory.get_backend(backend)
backend2 = backend_factory.get_backend(backend)
assert backend1 is backend2
| 29.8
| 77
| 0.761581
| 1,511
| 12,218
| 5.933157
| 0.114494
| 0.062465
| 0.099944
| 0.043726
| 0.804127
| 0.768767
| 0.75382
| 0.75382
| 0.696821
| 0.359621
| 0
| 0.013056
| 0.12236
| 12,218
| 409
| 78
| 29.872861
| 0.822997
| 0.126698
| 0
| 0.463035
| 0
| 0
| 0.021844
| 0.013841
| 0
| 0
| 0
| 0
| 0.011673
| 1
| 0.210117
| false
| 0
| 0.194553
| 0
| 0.40856
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9ecff0d2def72853bb2077007cb31a53e1e71834
| 231
|
py
|
Python
|
recipe/app.py
|
Udayan-Coding/examples
|
720515bf614f4edd08c734cc5a708d8a2618522d
|
[
"MIT"
] | 1
|
2021-01-04T17:17:14.000Z
|
2021-01-04T17:17:14.000Z
|
recipe/app.py
|
Udayan-Coding/examples
|
720515bf614f4edd08c734cc5a708d8a2618522d
|
[
"MIT"
] | null | null | null |
recipe/app.py
|
Udayan-Coding/examples
|
720515bf614f4edd08c734cc5a708d8a2618522d
|
[
"MIT"
] | 1
|
2021-01-31T11:10:44.000Z
|
2021-01-31T11:10:44.000Z
|
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route("/")
def hello():
return render_template("index.html", name="WORLD!")
@app.route("/about")
def about():
return render_template("about.html")
| 19.25
| 53
| 0.709957
| 31
| 231
| 5.064516
| 0.516129
| 0.267516
| 0.254777
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116883
| 231
| 11
| 54
| 21
| 0.769608
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.125
| 0.25
| 0.625
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
b47f33a5bfd7dd5f1e09089984f041a42647c888
| 177
|
py
|
Python
|
atendimento/admin.py
|
alantinoco/django-crmsmart
|
f8bd3404e0dfdf4a2976ec8bbdaee27a012f9981
|
[
"MIT"
] | null | null | null |
atendimento/admin.py
|
alantinoco/django-crmsmart
|
f8bd3404e0dfdf4a2976ec8bbdaee27a012f9981
|
[
"MIT"
] | null | null | null |
atendimento/admin.py
|
alantinoco/django-crmsmart
|
f8bd3404e0dfdf4a2976ec8bbdaee27a012f9981
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Contato, Venda, FormaPagamento
admin.site.register(Contato)
admin.site.register(Venda)
admin.site.register(FormaPagamento)
| 25.285714
| 50
| 0.830508
| 23
| 177
| 6.391304
| 0.478261
| 0.183673
| 0.346939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079096
| 177
| 6
| 51
| 29.5
| 0.90184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b48c5e302c25178ab826b1d7d13350ce7b179b8d
| 184
|
py
|
Python
|
dvc/dependency/ssh.py
|
yfarjoun/dvc
|
eaca7dc80c765dd3a8dbe4c8fb3b206656bbc5e2
|
[
"Apache-2.0"
] | 2
|
2021-09-22T15:31:46.000Z
|
2021-11-17T10:40:07.000Z
|
dvc/dependency/ssh.py
|
yfarjoun/dvc
|
eaca7dc80c765dd3a8dbe4c8fb3b206656bbc5e2
|
[
"Apache-2.0"
] | null | null | null |
dvc/dependency/ssh.py
|
yfarjoun/dvc
|
eaca7dc80c765dd3a8dbe4c8fb3b206656bbc5e2
|
[
"Apache-2.0"
] | 1
|
2019-09-02T00:29:40.000Z
|
2019-09-02T00:29:40.000Z
|
from __future__ import unicode_literals
from dvc.output.ssh import OutputSSH
from dvc.dependency.base import DependencyBase
class DependencySSH(DependencyBase, OutputSSH):
pass
| 20.444444
| 47
| 0.831522
| 22
| 184
| 6.727273
| 0.681818
| 0.094595
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 184
| 8
| 48
| 23
| 0.919255
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.6
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
c30749f6e672c3d0997217dae6e0ef97c37975d8
| 631
|
py
|
Python
|
scripts/tests/snapshots/snap_keywords_test.py
|
Duroktar/Wolf
|
c192d5c27eb2098e440f7726eb1bff40ed004db5
|
[
"Apache-2.0"
] | 105
|
2018-02-07T22:07:47.000Z
|
2022-03-31T18:16:47.000Z
|
scripts/tests/snapshots/snap_keywords_test.py
|
Duroktar/Wolf
|
c192d5c27eb2098e440f7726eb1bff40ed004db5
|
[
"Apache-2.0"
] | 57
|
2018-02-07T23:07:41.000Z
|
2021-11-21T17:14:06.000Z
|
scripts/tests/snapshots/snap_keywords_test.py
|
Duroktar/Wolf
|
c192d5c27eb2098e440f7726eb1bff40ed004db5
|
[
"Apache-2.0"
] | 10
|
2018-02-24T23:44:51.000Z
|
2022-03-02T07:52:27.000Z
|
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_keywords 1'] = '[{"lineno": 7, "source": [" a\\n"], "value": "1"}, {"lineno": 7, "source": [" a\\n"], "value": "2"}, {"lineno": 7, "source": [" a\\n"], "value": "3"}, {"lineno": 13, "source": [" i\\n"], "value": "0"}, {"lineno": 13, "source": [" i\\n"], "value": "1"}, {"lineno": 13, "source": [" i\\n"], "value": "2"}, {"lineno": 13, "source": [" i\\n"], "value": "3"}, {"lineno": 13, "source": [" i\\n"], "value": "4"}]'
| 57.363636
| 462
| 0.505547
| 80
| 631
| 3.9125
| 0.3875
| 0.153355
| 0.223642
| 0.239617
| 0.539936
| 0.539936
| 0.313099
| 0.178914
| 0.178914
| 0
| 0
| 0.047801
| 0.171157
| 631
| 10
| 463
| 63.1
| 0.550669
| 0.098257
| 0
| 0
| 0
| 0.25
| 0.784452
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c309cc940b59cd3830a59d4a46d48907f9c3e32d
| 515
|
py
|
Python
|
go_server_app/views.py
|
benjaminaaron/simple-go-server
|
0ebe6756f72f896fd014d060252c27c2907e7ae8
|
[
"MIT"
] | 1
|
2017-11-29T22:39:05.000Z
|
2017-11-29T22:39:05.000Z
|
go_server_app/views.py
|
benjaminaaron/simple-go-server
|
0ebe6756f72f896fd014d060252c27c2907e7ae8
|
[
"MIT"
] | 1
|
2017-11-09T18:41:41.000Z
|
2017-11-09T19:14:08.000Z
|
go_server_app/views.py
|
benjaminaaron/simple-go-server
|
0ebe6756f72f896fd014d060252c27c2907e7ae8
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from .models import GameMeta
def index(request):
return render(request, 'go_server_app/index.html')
def dashboard(request):
return render(request, 'go_server_app/dashboard.html', {'games_list': GameMeta.objects.all()})
def game(request, game_id):
game_meta = GameMeta.objects.get(game_id=game_id)
return render(request, 'go_server_app/game.html', {'game_meta': game_meta})
def terminal(request):
return render(request, 'go_server_app/terminal.html')
| 24.52381
| 98
| 0.747573
| 73
| 515
| 5.068493
| 0.342466
| 0.12973
| 0.205405
| 0.227027
| 0.381081
| 0.381081
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0.126214
| 515
| 20
| 99
| 25.75
| 0.822222
| 0
| 0
| 0
| 0
| 0
| 0.234951
| 0.198058
| 0
| 0
| 0
| 0
| 0
| 1
| 0.363636
| false
| 0
| 0.181818
| 0.272727
| 0.909091
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
c326bebf1fd8cf9fedf46e490c5cf11624fd3c7e
| 6,950
|
py
|
Python
|
sam-app/tests/unit/test_apns.py
|
mgacy/Adequate-Backend
|
7f62f692a3fff53f825e597289515bffadb8f25c
|
[
"MIT"
] | 1
|
2021-06-03T07:27:18.000Z
|
2021-06-03T07:27:18.000Z
|
sam-app/tests/unit/test_apns.py
|
mgacy/Adequate-Backend
|
7f62f692a3fff53f825e597289515bffadb8f25c
|
[
"MIT"
] | 3
|
2021-04-06T18:36:02.000Z
|
2021-06-16T04:22:27.000Z
|
sam-app/tests/unit/test_apns.py
|
mgacy/Adequate-Backend
|
7f62f692a3fff53f825e597289515bffadb8f25c
|
[
"MIT"
] | null | null | null |
import unittest
from .mocks import BotoSessionMock
from push_notification import apns
class APNSTestCase(unittest.TestCase):
def_apns_category = 'MGDailyDealCategory'
# def setUp(self):
# def tearDown(self):
# push_notification
# push_background
# make_new_deal_message
# make_delta_message
def test_make_delta_comment_1(self):
deal_id = 'a6k5A000000kP9LQAU'
delta_type = 'commentCount'
delta_value = 5
message = {
'id': deal_id,
'delta_type': delta_type,
'delta_value': delta_value
}
expected = (
'{"aps": {"content-available": 1}, '
'"deal-id": "a6k5A000000kP9LQAU", '
'"delta-type": "commentCount", '
'"delta-value": 5}'
)
result = apns.make_delta_message(message)
self.assertEqual(result, expected)
def test_make_delta_status_1(self):
deal_id = 'a6k5A000000kP9LQAU'
delta_type = 'launchStatus'
delta_value = 'launch'
message = {
'id': deal_id,
'delta_type': delta_type,
'delta_value': delta_value
}
expected = (
'{"aps": {"content-available": 1}, '
'"deal-id": "a6k5A000000kP9LQAU", '
'"delta-type": "launchStatus", '
'"delta-value": "launch"}'
)
result = apns.make_delta_message(message)
self.assertEqual(result, expected)
# publish_message
def test_publish_delta_status_prod(self):
message = (
'{"aps": {"content-available": 1}, '
'"deal-id": "a6k5A000000kP9LQAU", '
'"delta-type": "launchStatus", '
'"delta-value": "launch"}'
)
# deal_id = 'a6k5A000000kP9LQAU'
# delta_type = 'launchStatus'
# delta_value = 'launch'
# message = (
# '{"aps": {"content-available": 1}, '
# f'"deal-id": "{deal_id}", '
# f'"delta-type": "{delta_type}", '
# f'"delta-value": "{delta_value}"'
# '}'
# )
session = BotoSessionMock()
default_message='default message'
apns_server = 'prod'
apns.publish_message(session,
topic_arn='fake_topic_arn',
apns_server=apns_server,
apns_message=message,
default_message=default_message)
expected = (
'{'
'"default": "default message", '
'"APNS": "{'
'\\"aps\\": {'
'\\"content-available\\": 1'
'}, '
'\\"deal-id\\": \\"a6k5A000000kP9LQAU\\", '
'\\"delta-type\\": \\"launchStatus\\", '
'\\"delta-value\\": \\"launch\\"'
'}"'
'}'
)
result = session.client.message
self.assertEqual(result, expected)
def test_publish_delta_status_dev(self):
message = (
'{"aps": {"content-available": 1}, '
'"deal-id": "a6k5A000000kP9LQAU", '
'"delta-type": "launchStatus", '
'"delta-value": "launch"}'
)
session = BotoSessionMock()
default_message='default message'
apns_server = 'dev'
apns.publish_message(session,
topic_arn='fake_topic_arn',
apns_server=apns_server,
apns_message=message,
default_message=default_message)
expected = (
'{'
'"default": "default message", '
'"APNS_SANDBOX": "{'
'\\"aps\\": {'
'\\"content-available\\": 1'
'}, '
'\\"deal-id\\": \\"a6k5A000000kP9LQAU\\", '
'\\"delta-type\\": \\"launchStatus\\", '
'\\"delta-value\\": \\"launch\\"'
'}"'
'}'
)
result = session.client.message
self.assertEqual(result, expected)
def test_publish_delta_status_both(self):
message = (
'{"aps": {"content-available": 1}, '
'"deal-id": "a6k5A000000kP9LQAU", '
'"delta-type": "launchStatus", '
'"delta-value": "launch"}'
)
session = BotoSessionMock()
default_message='default message'
apns_server = 'both'
apns.publish_message(session,
topic_arn='fake_topic_arn',
apns_server=apns_server,
apns_message=message,
default_message=default_message)
expected = (
'{'
'"default": "default message", '
'"APNS": "{'
'\\"aps\\": {'
'\\"content-available\\": 1'
'}, '
'\\"deal-id\\": \\"a6k5A000000kP9LQAU\\", '
'\\"delta-type\\": \\"launchStatus\\", '
'\\"delta-value\\": \\"launch\\"'
'}", '
'"APNS_SANDBOX": "{'
'\\"aps\\": {'
'\\"content-available\\": 1'
'}, '
'\\"deal-id\\": \\"a6k5A000000kP9LQAU\\", '
'\\"delta-type\\": \\"launchStatus\\", '
'\\"delta-value\\": \\"launch\\"'
'}"'
'}'
)
result = session.client.message
self.assertEqual(result, expected)
def test_publish_invalid_server(self):
session = BotoSessionMock()
topic_arn='fake_topic_arn'
apns_server = 'meh'
apns_message ='{"aps": {"content-available": 1}'
default_message='default message'
self.assertRaises(
ValueError, apns.publish_message, session, topic_arn, apns_server, apns_message, default_message)
# _make_background_notification
def test_make_background_notification_no_additional(self):
additional = None
expected = {
'aps': {
'content-available': 1
}
}
result = apns._make_background_notification(additional)
self.assertEqual(result, expected)
def test_make_background_notification_with_additional(self):
deal_id = 'a6k5A000000kP9LQAU'
delta_type = 'commentCount'
delta_value = 5
additional = {
'id': deal_id,
'delta_type': delta_type,
'delta_value': delta_value
}
expected = {
'aps': {
'content-available': 1
},
'id': deal_id,
'delta_type': delta_type,
'delta_value': delta_value
}
result = apns._make_background_notification(additional)
self.assertDictEqual(result, expected)
# _make_notification
# def test_make_notification_1(self):
# raise_for_status
| 29.079498
| 109
| 0.489784
| 550
| 6,950
| 5.925455
| 0.118182
| 0.063516
| 0.095735
| 0.11568
| 0.8027
| 0.750844
| 0.740718
| 0.684566
| 0.666155
| 0.659098
| 0
| 0.031001
| 0.368777
| 6,950
| 238
| 110
| 29.201681
| 0.711876
| 0.069496
| 0
| 0.745665
| 0
| 0
| 0.275097
| 0.066253
| 0
| 0
| 0
| 0
| 0.046243
| 1
| 0.046243
| false
| 0
| 0.017341
| 0
| 0.075145
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c32fe65d24a5f464b2f3a2a3ac48a2c68f408fd3
| 1,418
|
py
|
Python
|
Corpus/Pyramid Score/PyrEval/Pyramid/parameters.py
|
LCS2-IIITD/summarization_bias
|
d66846bb7657439347f4714f2672350447474c5a
|
[
"MIT"
] | 1
|
2020-11-11T19:48:10.000Z
|
2020-11-11T19:48:10.000Z
|
Corpus/Pyramid Score/PyrEval/Pyramid/parameters.py
|
LCS2-IIITD/summarization_bias
|
d66846bb7657439347f4714f2672350447474c5a
|
[
"MIT"
] | null | null | null |
Corpus/Pyramid Score/PyrEval/Pyramid/parameters.py
|
LCS2-IIITD/summarization_bias
|
d66846bb7657439347f4714f2672350447474c5a
|
[
"MIT"
] | null | null | null |
"""
=========== What is Matter Parameters ===================
"""
#tups = [(125.0, 1.0), (125.0, 1.5), (125.0, 2.0), (125.0, 2.5), (125.0, 3.0), (150.0, 1.0), (150.0, 1.5), (150.0, 2.0), (150.0, 2.5), (150.0, 3.0), (175.0, 1.0), (175.0, 1.5), (175.0, 2.0), (175.0, 2.5), (175.0, 3.0), (200.0, 1.0), (200.0, 1.5), (200.0, 2.0), (200.0, 2.5), (200.0, 3.0), (225.0, 1.0), (225.0, 1.5), (225.0, 2.0), (225.0, 2.5), (225.0, 3.0), (250.0, 1.0), (250.0, 1.5), (250.0, 2.0), (250.0, 2.5), (250.0, 3.0)]
"""
=========== DUC Data ==========
"""
#tups = [(64.0, 1.0), (64.0, 1.5), (64.0, 2.0), (64.0, 2.5), (70.0, 1.0), (70.0, 1.5), (70.0, 2.0), (70.0, 2.5), (76.0, 1.0), (76.0, 1.5), (76.0, 2.0), (76.0, 2.5), (82.0, 1.0), (82.0, 1.5), (82.0, 2.0), (82.0, 2.5), (88.0, 1.0), (88.0, 1.5), (88.0, 2.0), (88.0, 2.5), (96.0, 1.0), (96.0, 1.5), (96.0, 2.0), (96.0, 2.5), (100.0, 1.0), (100.0, 1.5), (100.0, 2.0), (100.0, 2.5)]
#b = [1.0,1.5,2.0,2.5,3.0]
# alpha should be from [10,40]
#a = range(len(segpool)+10,len(segpool)+60,10)
#tups = list(itertools.product(a,b))
#print "Alll combinations ", tups
#tups = [(125, 1.0), (125, 1.5), (125, 2.0), (125, 2.5), (125, 3.0), (135, 1.0), (135, 1.5), (135, 2.0), (135, 2.5), (135, 3.0), (145, 1.0), (145, 1.5), (145, 2.0), (145, 2.5), (145, 3.0), (155, 1.0), (155, 1.5), (155, 2.0), (155, 2.5), (155, 3.0), (165, 1.0), (165, 1.5), (165, 2.0), (165, 2.5), (165, 3.0)]
#thresholds = [83]
| 78.777778
| 428
| 0.43512
| 354
| 1,418
| 1.742938
| 0.138418
| 0.08752
| 0.068071
| 0.019449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.390224
| 0.148801
| 1,418
| 18
| 429
| 78.777778
| 0.120961
| 0.952045
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c3561322c8fe83a3cce278173951cb1c3bdb4ed4
| 284
|
py
|
Python
|
imdb/utils.py
|
rinkurajole/imdb_sanic_app
|
502852b911eb2cfdc5dfcdb4fba585b91e2ce7c6
|
[
"BSD-3-Clause"
] | null | null | null |
imdb/utils.py
|
rinkurajole/imdb_sanic_app
|
502852b911eb2cfdc5dfcdb4fba585b91e2ce7c6
|
[
"BSD-3-Clause"
] | null | null | null |
imdb/utils.py
|
rinkurajole/imdb_sanic_app
|
502852b911eb2cfdc5dfcdb4fba585b91e2ce7c6
|
[
"BSD-3-Clause"
] | null | null | null |
import bcrypt
salt = bcrypt.gensalt()
def generate_hash(passwd, salt=salt):
return str(bcrypt.hashpw(passwd, salt))
def match_password(req_pwd, db_pwd):
db_pwd = db_pwd.replace('b\'','').replace('\'','').encode('utf-8')
return db_pwd == bcrypt.hashpw(req_pwd, db_pwd)
| 23.666667
| 70
| 0.683099
| 43
| 284
| 4.302326
| 0.488372
| 0.135135
| 0.172973
| 0.118919
| 0.097297
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004065
| 0.133803
| 284
| 11
| 71
| 25.818182
| 0.747967
| 0
| 0
| 0
| 1
| 0
| 0.06338
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.428571
| 0.142857
| 0.142857
| 0.714286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
c36a18741da6b1e9a7e803a47b014cff09f34cfc
| 310
|
py
|
Python
|
inf_classif_analysis/descriptive_analysis.py
|
Marco-Ametrano/myocardal_infarction_class
|
d2fb9d4d6643d0b836ffdb94a32911eb4d68c390
|
[
"MIT"
] | null | null | null |
inf_classif_analysis/descriptive_analysis.py
|
Marco-Ametrano/myocardal_infarction_class
|
d2fb9d4d6643d0b836ffdb94a32911eb4d68c390
|
[
"MIT"
] | null | null | null |
inf_classif_analysis/descriptive_analysis.py
|
Marco-Ametrano/myocardal_infarction_class
|
d2fb9d4d6643d0b836ffdb94a32911eb4d68c390
|
[
"MIT"
] | null | null | null |
#AFTER PREPROCESSING AND TARGETS DEFINITION
newdataset.describe()
LET_IS.value_counts()
LET_IS.value_counts().plot(kind='bar', color='c')
Y_unica.value_counts()
Y_unica.value_counts().plot(kind='bar', color='c')
ZSN.value_counts().plot(kind='bar', color='c')
Survive.value_counts().plot(kind='bar', color='c')
| 34.444444
| 50
| 0.748387
| 49
| 310
| 4.530612
| 0.408163
| 0.297297
| 0.27027
| 0.342342
| 0.504505
| 0.504505
| 0.504505
| 0
| 0
| 0
| 0
| 0
| 0.051613
| 310
| 8
| 51
| 38.75
| 0.755102
| 0.135484
| 0
| 0
| 0
| 0
| 0.059925
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6f22dd259e43cf8dd03f6e436b63e23ee3c3c16a
| 133
|
py
|
Python
|
mycelium/__init__.py
|
suet-lee/mycelium
|
db83cd3ab00697f28b2def2cebcdef52698fdd92
|
[
"Apache-2.0"
] | 6
|
2021-05-23T17:36:02.000Z
|
2022-01-21T20:34:17.000Z
|
mycelium/__init__.py
|
suet-lee/mycelium
|
db83cd3ab00697f28b2def2cebcdef52698fdd92
|
[
"Apache-2.0"
] | null | null | null |
mycelium/__init__.py
|
suet-lee/mycelium
|
db83cd3ab00697f28b2def2cebcdef52698fdd92
|
[
"Apache-2.0"
] | 1
|
2021-06-17T20:35:10.000Z
|
2021-06-17T20:35:10.000Z
|
from .switch import EKFSwitch, RelaySwitch, InitialModeSwitch
from .camera_t265 import CameraT265
from .camera_d435 import CameraD435
| 44.333333
| 61
| 0.864662
| 16
| 133
| 7.0625
| 0.6875
| 0.176991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0.097744
| 133
| 3
| 62
| 44.333333
| 0.841667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6f5df725ff569b1c32118a15233cd3613598d3f9
| 95
|
py
|
Python
|
todo/admin.py
|
haidoro/TODO_lesson
|
fa0b92eb5d6f05ee15900dcc407e1ae3451fee5b
|
[
"CECILL-B"
] | null | null | null |
todo/admin.py
|
haidoro/TODO_lesson
|
fa0b92eb5d6f05ee15900dcc407e1ae3451fee5b
|
[
"CECILL-B"
] | null | null | null |
todo/admin.py
|
haidoro/TODO_lesson
|
fa0b92eb5d6f05ee15900dcc407e1ae3451fee5b
|
[
"CECILL-B"
] | null | null | null |
from django.contrib import admin
from .models import TodoModel
admin.site.register(TodoModel)
| 19
| 32
| 0.831579
| 13
| 95
| 6.076923
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 95
| 4
| 33
| 23.75
| 0.929412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6f75fde6361af1d1bfaca77b15e701086bf2e3b2
| 13,684
|
py
|
Python
|
src/ensemble_nn/agent_nn.py
|
AbhinavGopal/ts_tutorial
|
147ff28dc507172774693f225071f8e244e5994e
|
[
"MIT"
] | 290
|
2017-12-29T01:55:21.000Z
|
2022-03-28T10:00:32.000Z
|
src/ensemble_nn/agent_nn.py
|
AbhinavGopal/ts_tutorial
|
147ff28dc507172774693f225071f8e244e5994e
|
[
"MIT"
] | 3
|
2018-08-02T11:45:51.000Z
|
2020-09-24T14:34:58.000Z
|
src/ensemble_nn/agent_nn.py
|
AbhinavGopal/ts_tutorial
|
147ff28dc507172774693f225071f8e244e5994e
|
[
"MIT"
] | 76
|
2018-01-17T06:19:51.000Z
|
2021-11-10T06:18:20.000Z
|
"""Agents for neural net bandit problems.
We implement three main types of agent:
- epsilon-greedy (fixed epsilon, annealing epsilon)
- dropout (arXiv:1506.02142)
- ensemble sampling
All code is specialized to the setting of 2-layer fully connected MLPs.
"""
import numpy as np
import numpy.random as rd
from base.agent import Agent
from ensemble_nn.env_nn import TwoLayerNNBandit
class TwoLayerNNEpsilonGreedy(Agent):
def __init__(self,
input_dim,
hidden_dim,
actions,
time_horizon,
prior_var,
noise_var,
epsilon_param=0.0,
learning_rate=1e-1,
num_gradient_steps=1,
batch_size=64,
lr_decay=1,
leaky_coeff=0.01):
"""Epsilon-greedy agent with two-layer neural network model.
Args:
input_dim: int dimension of input.
hidden_dim: int size of hidden layer.
actions: numpy array of valid actions (generated by environment).
time_horizon: int size to pre-allocate data storage.
prior_var: prior variance for random initialization.
noise_var: noise variance for update.
epsilon_param: fixed epsilon choice.
learning_rate: sgd learning rate.
num_gradient_steps: how many sgd to do.
batch_size: size of batch.
lr_decay: decay learning rate.
leaky_coeff: slope of "negative" part of the Leaky ReLU.
"""
self.W1 = 1e-2 * rd.randn(hidden_dim, input_dim) # initialize weights
self.W2 = 1e-2 * rd.randn(hidden_dim)
self.actions = actions
self.num_actions = len(actions)
self.T = time_horizon
self.prior_var = prior_var
self.noise_var = noise_var
self.epsilon_param = epsilon_param
self.lr = learning_rate
self.num_gradient_steps = num_gradient_steps # number of gradient steps we
# take during each time period
self.batch_size = batch_size
self.lr_decay = lr_decay
self.leaky_coeff = leaky_coeff
self.action_hist = np.zeros((self.T, input_dim))
self.reward_hist = np.zeros(self.T)
def _model_forward(self, input_actions):
"""Neural network forward pass.
Args:
input_actions: actions to evaluate (numpy array).
Returns:
out: network prediction.
cache: tuple holding intermediate activations for backprop.
"""
affine_out = np.sum(input_actions[:, np.newaxis, :] * self.W1, axis=2)
relu_out = np.maximum(self.leaky_coeff * affine_out, affine_out)
out = np.sum(relu_out * self.W2, axis=1)
cache = (input_actions, affine_out, relu_out)
return out, cache
def _model_backward(self, out, cache, y):
"""Neural network backward pass (for backpropagation).
Args:
out: output of batch of predictions.
cache: intermediate activations from _model_forward.
y: target labels.
Returns:
dW1: gradients for layer 1.
dW2: gradients for layer 2.
"""
input_actions, affine_out, relu_out = cache
dout = -(2 / self.noise_var) * (y - out)
dW2 = np.sum(dout[:, np.newaxis] * relu_out, axis=0)
drelu_out = dout[:, np.newaxis] * self.W2
mask = (affine_out >= 0) + self.leaky_coeff * (affine_out < 0)
daffine_out = mask * drelu_out
dW1 = np.dot(daffine_out.T, input_actions)
return dW1, dW2
def _update_model(self, t):
"""Update the model by taking a few gradient steps."""
for i in range(self.num_gradient_steps):
# sample minibatch
batch_ind = rd.randint(t + 1, size=self.batch_size)
action_batch = self.action_hist[batch_ind]
reward_batch = self.reward_hist[batch_ind]
out, cache = self._model_forward(action_batch)
dW1, dW2 = self._model_backward(out, cache, reward_batch)
dW1 /= self.batch_size
dW2 /= self.batch_size
dW1 += 2 / (self.prior_var * (t + 1)) * self.W1
dW2 += 2 / (self.prior_var * (t + 1)) * self.W2
self.W1 -= self.lr * dW1
self.W2 -= self.lr * dW2
def update_observation(self, observation, action, reward):
"""Learn from observations."""
t = observation
self.action_hist[t] = self.actions[action]
self.reward_hist[t] = reward
self._update_model(t)
self.lr *= self.lr_decay
def pick_action(self, observation):
"""Fixed epsilon-greedy action selection."""
u = rd.rand()
if u < self.epsilon_param:
action = rd.randint(self.num_actions)
else:
model_out, _ = self._model_forward(self.actions)
action = np.argmax(model_out)
return action
class TwoLayerNNEpsilonGreedyAnnealing(TwoLayerNNEpsilonGreedy):
"""Epsilon-greedy with an annealing epsilon:
epsilon = self.epsilon_param / (self.epsilon_param + t)
"""
def pick_action(self, observation):
"""Overload pick_action to dynamically recalculate epsilon-greedy."""
t = observation
epsilon = self.epsilon_param / (self.epsilon_param + t)
u = rd.rand()
if u < epsilon:
action = rd.randint(self.num_actions)
else:
model_out, _ = self._model_forward(self.actions)
action = np.argmax(model_out)
return action
class TwoLayerNNDropout(TwoLayerNNEpsilonGreedy):
"""Dropout is used to represent model uncertainty.
ICML paper suggests this is Bayesian uncertainty: arXiv:1506.02142.
Follow up work suggests that this is flawed: TODO(iosband) add link.
"""
def __init__(self,
input_dim,
hidden_dim,
actions,
time_horizon,
prior_var,
noise_var,
drop_prob=0.5,
learning_rate=1e-1,
num_gradient_steps=1,
batch_size=64,
lr_decay=1,
leaky_coeff=0.01):
"""Dropout agent with two-layer neural network model.
Args:
input_dim: int dimension of input.
hidden_dim: int size of hidden layer.
actions: numpy array of valid actions (generated by environment).
time_horizon: int size to pre-allocate data storage.
prior_var: prior variance for random initialization.
noise_var: noise variance for update.
drop_prob: probability of randomly zero-ing out weight component.
learning_rate: sgd learning rate.
num_gradient_steps: how many sgd to do.
batch_size: size of batch.
lr_decay: decay learning rate.
leaky_coeff: slope of "negative" part of the Leaky ReLU.
"""
self.W1 = 1e-2 * rd.randn(hidden_dim, input_dim)
self.W2 = 1e-2 * rd.randn(hidden_dim)
self.actions = actions
self.num_actions = len(actions)
self.T = time_horizon
self.prior_var = prior_var
self.noise_var = noise_var
self.p = drop_prob
self.lr = learning_rate
self.num_gradient_steps = num_gradient_steps
self.batch_size = batch_size
self.lr_decay = lr_decay
self.leaky_coeff = leaky_coeff
self.action_hist = np.zeros((self.T, input_dim))
self.reward_hist = np.zeros(self.T)
def _model_forward(self, input_actions):
"""Neural network forward pass.
Note that dropout remains "on" so that forward pass is stochastic.
Args:
input_actions: actions to evaluate (numpy array).
Returns:
out: network prediction.
cache: tuple holding intermediate activations for backprop.
"""
affine_out = np.sum(input_actions[:, np.newaxis, :] * self.W1, axis=2)
relu_out = np.maximum(self.leaky_coeff * affine_out, affine_out)
dropout_mask = rd.rand(*relu_out.shape) > self.p
dropout_out = relu_out * dropout_mask
out = np.sum(dropout_out * self.W2, axis=1)
cache = (input_actions, affine_out, relu_out, dropout_mask, dropout_out)
return out, cache
def _model_backward(self, out, cache, y):
"""Neural network backward pass (for backpropagation).
Args:
out: output of batch of predictions.
cache: intermediate activations from _model_forward.
y: target labels.
Returns:
dW1: gradients for layer 1.
dW2: gradients for layer 2.
"""
input_actions, affine_out, relu_out, dropout_mask, dropout_out = cache
dout = -(2 / self.noise_var) * (y - out)
dW2 = np.sum(dout[:, np.newaxis] * relu_out, axis=0)
ddropout_out = dout[:, np.newaxis] * self.W2
drelu_out = ddropout_out * dropout_mask
relu_mask = (affine_out >= 0) + self.leaky_coeff * (affine_out < 0)
daffine_out = relu_mask * drelu_out
dW1 = np.dot(daffine_out.T, input_actions)
return dW1, dW2
def pick_action(self, observation):
"""Select the greedy action according to the output of a stochastic
forward pass."""
model_out, _ = self._model_forward(self.actions)
action = np.argmax(model_out)
return action
class TwoLayerNNEnsembleSampling(Agent):
"""An ensemble sampling agent maintains an ensemble of neural nets, each
fitted to a perturbed prior and perturbed observations."""
def __init__(self,
input_dim,
hidden_dim,
actions,
time_horizon,
prior_var,
noise_var,
num_models=10,
learning_rate=1e-1,
num_gradient_steps=1,
batch_size=64,
lr_decay=1,
leaky_coeff=0.01):
"""Ensemble sampling agent with two-layer neural network model.
Args:
input_dim: int dimension of input.
hidden_dim: int size of hidden layer.
actions: numpy array of valid actions (generated by environment).
time_horizon: int size to pre-allocate data storage.
prior_var: prior variance for random initialization.
noise_var: noise variance for update.
num_models: Number of ensemble models to train.
learning_rate: sgd learning rate.
num_gradient_steps: how many sgd to do.
batch_size: size of batch.
lr_decay: decay learning rate.
leaky_coeff: slope of "negative" part of the Leaky ReLU.
"""
self.M = num_models
# initialize models by sampling perturbed prior means
self.W1_model_prior = np.sqrt(prior_var) * rd.randn(self.M, hidden_dim,
input_dim)
self.W2_model_prior = np.sqrt(prior_var) * rd.randn(self.M, hidden_dim)
self.W1 = np.copy(self.W1_model_prior)
self.W2 = np.copy(self.W2_model_prior)
self.actions = actions
self.num_actions = len(actions)
self.T = time_horizon
self.prior_var = prior_var
self.noise_var = noise_var
self.lr = learning_rate
self.num_gradient_steps = num_gradient_steps
self.batch_size = batch_size
self.lr_decay = lr_decay
self.leaky_coeff = leaky_coeff
self.action_hist = np.zeros((self.T, input_dim))
self.model_reward_hist = np.zeros((self.M, self.T))
def _model_forward(self, m, input_actions):
"""Neural network forward pass for single model of ensemble.
Args:
m: index of which network to evaluate.
input_actions: actions to evaluate (numpy array).
Returns:
out: network prediction.
cache: tuple holding intermediate activations for backprop.
"""
affine_out = np.sum(input_actions[:, np.newaxis, :] * self.W1[m], axis=2)
relu_out = np.maximum(self.leaky_coeff * affine_out, affine_out)
out = np.sum(relu_out * self.W2[m], axis=1)
cache = (input_actions, affine_out, relu_out)
return out, cache
def _model_backward(self, m, out, cache, y):
"""Neural network backward pass (for backpropagation) for single network.
Args:
m: index of which network to evaluate.
out: output of batch of predictions.
cache: intermediate activations from _model_forward.
y: target labels.
Returns:
dW1: gradients for layer 1.
dW2: gradients for layer 2.
"""
input_actions, affine_out, relu_out = cache
dout = -(2 / self.noise_var) * (y - out)
dW2 = np.sum(dout[:, np.newaxis] * relu_out, axis=0)
drelu_out = dout[:, np.newaxis] * self.W2[m]
mask = (affine_out >= 0) + self.leaky_coeff * (affine_out < 0)
daffine_out = mask * drelu_out
dW1 = np.dot(daffine_out.T, input_actions)
return dW1, dW2
def _update_model(self, m, t):
"""Apply SGD to model m."""
for i in range(self.num_gradient_steps):
# sample minibatch
batch_ind = rd.randint(t + 1, size=self.batch_size)
action_batch = self.action_hist[batch_ind]
reward_batch = self.model_reward_hist[m][batch_ind]
out, cache = self._model_forward(m, action_batch)
dW1, dW2 = self._model_backward(m, out, cache, reward_batch)
dW1 /= self.batch_size
dW2 /= self.batch_size
dW1 += 2 / (self.prior_var * (t + 1)) * (
self.W1[m] - self.W1_model_prior[m])
dW2 += 2 / (self.prior_var * (t + 1)) * (
self.W2[m] - self.W2_model_prior[m])
self.W1[m] -= self.lr * dW1
self.W2[m] -= self.lr * dW2
return
def update_observation(self, observation, action, reward):
"""Learn from observations, shared across all models.
However, perturb the reward independently for each model and then update.
"""
t = observation
self.action_hist[t] = self.actions[action]
for m in range(self.M):
m_noise = np.sqrt(self.noise_var) * rd.randn()
self.model_reward_hist[m, t] = reward + m_noise
self._update_model(m, t)
self.lr *= self.lr_decay
def pick_action(self, observation):
"""Select action via ensemble sampling.
Choose active network uniformly at random, then act greedily wrt that model.
"""
m = rd.randint(self.M)
model_out, _ = self._model_forward(m, self.actions)
action = np.argmax(model_out)
return action
| 33.621622
| 80
| 0.656168
| 1,885
| 13,684
| 4.564987
| 0.131034
| 0.021964
| 0.026031
| 0.010459
| 0.769088
| 0.74817
| 0.732714
| 0.717374
| 0.700174
| 0.673097
| 0
| 0.01528
| 0.249123
| 13,684
| 406
| 81
| 33.704434
| 0.82219
| 0.341274
| 0
| 0.690141
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002463
| 0
| 1
| 0.079812
| false
| 0
| 0.018779
| 0
| 0.169014
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
489dcb5eb95e27bdfa01e5e5808a8eedc54c5b9e
| 140
|
py
|
Python
|
src/scrapers/models/__init__.py
|
jskroodsma/helpradar
|
d9a2198db30995e790ab4f1611e15b85540cd3f8
|
[
"MIT"
] | null | null | null |
src/scrapers/models/__init__.py
|
jskroodsma/helpradar
|
d9a2198db30995e790ab4f1611e15b85540cd3f8
|
[
"MIT"
] | null | null | null |
src/scrapers/models/__init__.py
|
jskroodsma/helpradar
|
d9a2198db30995e790ab4f1611e15b85540cd3f8
|
[
"MIT"
] | null | null | null |
from .database import Db
from .initiatives import InitiativeBase, Platform, ImportBatch, InitiativeImport, BatchImportState, InitiativeGroup
| 70
| 115
| 0.864286
| 13
| 140
| 9.307692
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 140
| 2
| 115
| 70
| 0.945313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
489e5789fc9bdd522af9556ca44141058ccb8f59
| 27
|
py
|
Python
|
python/testData/completion/relativeImport/pkg/main.after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/completion/relativeImport/pkg/main.after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/completion/relativeImport/pkg/main.after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from .string import <caret>
| 27
| 27
| 0.777778
| 4
| 27
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 27
| 1
| 27
| 27
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 1
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
48bb529c5d5a0817b3c6e3353e857c62a73b8a16
| 91
|
py
|
Python
|
run.py
|
ellotecnologia/galadriel
|
16b592818d8beb8407805e43f2f881975b245d94
|
[
"MIT"
] | null | null | null |
run.py
|
ellotecnologia/galadriel
|
16b592818d8beb8407805e43f2f881975b245d94
|
[
"MIT"
] | null | null | null |
run.py
|
ellotecnologia/galadriel
|
16b592818d8beb8407805e43f2f881975b245d94
|
[
"MIT"
] | null | null | null |
from app.app import create_app
from config import BaseConfig
app = create_app(BaseConfig)
| 18.2
| 30
| 0.824176
| 14
| 91
| 5.214286
| 0.428571
| 0.246575
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131868
| 91
| 4
| 31
| 22.75
| 0.924051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
48bc7c9db7dabf6628ee230ef0c1f45b6794af0d
| 2,146
|
py
|
Python
|
api/routefinder.py
|
shingkid/DrWatson-ToTheRescue_SCDFXIBM
|
009d2b4599b276ea760dbd888718a25332893075
|
[
"MIT"
] | 1
|
2020-06-12T10:24:31.000Z
|
2020-06-12T10:24:31.000Z
|
api/routefinder.py
|
yankai364/Dr-Watson
|
22bd885d028e118fa5abf5a9d0ea373b7020ca1d
|
[
"MIT"
] | 3
|
2020-09-24T15:36:33.000Z
|
2022-02-10T02:32:42.000Z
|
api/routefinder.py
|
shingkid/DrWatson-ToTheRescue_SCDFXIBM
|
009d2b4599b276ea760dbd888718a25332893075
|
[
"MIT"
] | 1
|
2020-06-14T10:09:58.000Z
|
2020-06-14T10:09:58.000Z
|
import csv
import pandas as pd
import numpy as np
import networkx as nx
class RouteFinder():
def __init__(self):
G = nx.Graph()
with open('data/node_pairs.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
# add edges
G.add_edge(row[0],row[1])
self.G = G
def reset_graph(self):
G = nx.Graph()
with open('data/node_pairs.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
# add edges
G.add_edge(row[0],row[1])
self.G = G
def remove_node(self,nodes):
self.G.remove_nodes_from(nodes)
def optimal_route(self,source,target):
return nx.shortest_path(self.G, source, target)
def optimal_entry_route(self,target):
exits = ['Exit_4','Exit_3','Exit_2','Exit_1']
optimal_route = []
shortest_path_length = 0
for exit in exits:
try:
curr_path = nx.shortest_path(self.G, exit, target)
curr_length = len(curr_path)
if shortest_path_length == 0 or curr_length < shortest_path_length:
optimal_route = curr_path
shortest_path_length = curr_length
except:
msg = 'No paths found'
if shortest_path_length == 0:
return msg
return optimal_route
def optimal_exit_route(self,source):
exits = ['Exit_1','Exit_2','Exit_3','Exit_4']
optimal_route = []
shortest_path_length = 0
for exit in exits:
try:
curr_path = nx.shortest_path(self.G, source, exit)
curr_length = len(curr_path)
if shortest_path_length == 0 or curr_length < shortest_path_length:
optimal_route = curr_path
shortest_path_length = curr_length
except:
msg = 'No paths found'
if shortest_path_length == 0:
return msg
return optimal_route
| 24.11236
| 83
| 0.547996
| 262
| 2,146
| 4.236641
| 0.232824
| 0.140541
| 0.162162
| 0.102703
| 0.728829
| 0.728829
| 0.700901
| 0.700901
| 0.700901
| 0.700901
| 0
| 0.013216
| 0.365331
| 2,146
| 88
| 84
| 24.386364
| 0.801762
| 0.008854
| 0
| 0.690909
| 0
| 0
| 0.054614
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109091
| false
| 0
| 0.072727
| 0.018182
| 0.290909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d28c4ad642d7e25e12003d4150c60dd4429d8299
| 50
|
py
|
Python
|
genrl/deep/agents/sac/__init__.py
|
ajaysub110/JigglypuffRL
|
083fd26d05b7eac018e6db7d32c4be4587461766
|
[
"MIT"
] | null | null | null |
genrl/deep/agents/sac/__init__.py
|
ajaysub110/JigglypuffRL
|
083fd26d05b7eac018e6db7d32c4be4587461766
|
[
"MIT"
] | null | null | null |
genrl/deep/agents/sac/__init__.py
|
ajaysub110/JigglypuffRL
|
083fd26d05b7eac018e6db7d32c4be4587461766
|
[
"MIT"
] | null | null | null |
from genrl.deep.agents.sac.sac import SAC # noqa
| 25
| 49
| 0.76
| 9
| 50
| 4.222222
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14
| 50
| 1
| 50
| 50
| 0.883721
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d2f1e1f4951c3e0fd8684c1a41e6225fa4a4907c
| 100
|
py
|
Python
|
COVIDSafepassage/passsystem/apps.py
|
VICS-CORE/safepassage_server
|
58bc04dbfa55430c0218567211e5259de77518ae
|
[
"MIT"
] | null | null | null |
COVIDSafepassage/passsystem/apps.py
|
VICS-CORE/safepassage_server
|
58bc04dbfa55430c0218567211e5259de77518ae
|
[
"MIT"
] | 8
|
2020-04-25T09:42:25.000Z
|
2022-03-12T00:23:32.000Z
|
COVIDSafepassage/passsystem/apps.py
|
VICS-CORE/safepassage_server
|
58bc04dbfa55430c0218567211e5259de77518ae
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class PasssystemConfig(AppConfig):
name = 'passsystem'
| 16.666667
| 35
| 0.73
| 10
| 100
| 7.3
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 100
| 5
| 36
| 20
| 0.9125
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.666667
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
96065ad383494de22a076bf5a911760ad23ad0e8
| 87
|
py
|
Python
|
pyvecorg/__main__.py
|
torsava/pyvec.org
|
809812395e4bffdb0522a52c6a7f7468ffc7ccd6
|
[
"MIT"
] | 3
|
2016-09-08T09:28:02.000Z
|
2019-08-25T11:56:26.000Z
|
pyvecorg/__main__.py
|
torsava/pyvec.org
|
809812395e4bffdb0522a52c6a7f7468ffc7ccd6
|
[
"MIT"
] | 97
|
2016-08-20T17:11:34.000Z
|
2022-03-29T07:52:13.000Z
|
pyvecorg/__main__.py
|
torsava/pyvec.org
|
809812395e4bffdb0522a52c6a7f7468ffc7ccd6
|
[
"MIT"
] | 7
|
2016-11-26T20:38:29.000Z
|
2021-08-20T11:11:47.000Z
|
from elsa import cli
from pyvecorg import app
cli(app, base_url='http://pyvec.org')
| 12.428571
| 37
| 0.735632
| 15
| 87
| 4.2
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149425
| 87
| 6
| 38
| 14.5
| 0.851351
| 0
| 0
| 0
| 0
| 0
| 0.183908
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
961fc04d55a2472f650b925e3c30b289d25af832
| 123
|
py
|
Python
|
model-server/config.py
|
campos537/deep-fashion-system
|
1de31dd6260cc967e1832cff63ae7e537a3a4e9d
|
[
"Unlicense"
] | 1
|
2021-04-06T00:43:26.000Z
|
2021-04-06T00:43:26.000Z
|
model-server/config.py
|
campos537/deep-fashion-system
|
1de31dd6260cc967e1832cff63ae7e537a3a4e9d
|
[
"Unlicense"
] | null | null | null |
model-server/config.py
|
campos537/deep-fashion-system
|
1de31dd6260cc967e1832cff63ae7e537a3a4e9d
|
[
"Unlicense"
] | null | null | null |
import json
def Config(config_path):
with open(config_path) as config_file:
return json.load(config_file)
| 20.5
| 42
| 0.707317
| 18
| 123
| 4.611111
| 0.611111
| 0.240964
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.219512
| 123
| 6
| 43
| 20.5
| 0.864583
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
824eb389c2a7eca319848d5d0b764477a524317f
| 544
|
py
|
Python
|
ibmsecurity/isam/base/overview.py
|
zone-zero/ibmsecurity
|
7d3e38104b67e1b267e18a44845cb756a5302c3d
|
[
"Apache-2.0"
] | 46
|
2017-03-21T21:08:59.000Z
|
2022-02-20T22:03:46.000Z
|
ibmsecurity/isam/base/overview.py
|
zone-zero/ibmsecurity
|
7d3e38104b67e1b267e18a44845cb756a5302c3d
|
[
"Apache-2.0"
] | 201
|
2017-03-21T21:25:52.000Z
|
2022-03-30T21:38:20.000Z
|
ibmsecurity/isam/base/overview.py
|
zone-zero/ibmsecurity
|
7d3e38104b67e1b267e18a44845cb756a5302c3d
|
[
"Apache-2.0"
] | 91
|
2017-03-22T16:25:36.000Z
|
2022-02-04T04:36:29.000Z
|
def get(isamAppliance, check_mode=False, force=False):
"""
Retrieve an overview of updates and licensing information
"""
return isamAppliance.invoke_get("Retrieve an overview of updates and licensing information",
"/updates/overview")
def get_licensing_info(isamAppliance, check_mode=False, force=False):
"""
Retrieve the licensing information
"""
return isamAppliance.invoke_get("Retrieve the licensing information",
"/lum/is_licensed")
| 36.266667
| 96
| 0.647059
| 55
| 544
| 6.272727
| 0.4
| 0.231884
| 0.127536
| 0.156522
| 0.771014
| 0.771014
| 0.771014
| 0.289855
| 0
| 0
| 0
| 0
| 0.268382
| 544
| 14
| 97
| 38.857143
| 0.866834
| 0.169118
| 0
| 0
| 0
| 0
| 0.294537
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
82763f4b601df981afd52e2acd04c501b896a5f2
| 168
|
py
|
Python
|
apps/tracking/admin.py
|
Codeidea/budget-tracker
|
e07e8d6bb49b0a3de428942a57f090912c191d3e
|
[
"MIT"
] | null | null | null |
apps/tracking/admin.py
|
Codeidea/budget-tracker
|
e07e8d6bb49b0a3de428942a57f090912c191d3e
|
[
"MIT"
] | null | null | null |
apps/tracking/admin.py
|
Codeidea/budget-tracker
|
e07e8d6bb49b0a3de428942a57f090912c191d3e
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import LogCategory, BudgetLog
# Register your models here.
admin.site.register(LogCategory)
admin.site.register(BudgetLog)
| 33.6
| 42
| 0.833333
| 22
| 168
| 6.363636
| 0.545455
| 0.128571
| 0.242857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089286
| 168
| 5
| 43
| 33.6
| 0.915033
| 0.154762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
82dad9c48cf2ee5a8b767bdd94a5e6cdf8574098
| 116
|
py
|
Python
|
asset/admin.py
|
shoaibsaikat/Django-Office-Management-BackEnd
|
bb8ec201e4d414c16f5bac1907a2641d80c5970a
|
[
"Apache-2.0"
] | null | null | null |
asset/admin.py
|
shoaibsaikat/Django-Office-Management-BackEnd
|
bb8ec201e4d414c16f5bac1907a2641d80c5970a
|
[
"Apache-2.0"
] | null | null | null |
asset/admin.py
|
shoaibsaikat/Django-Office-Management-BackEnd
|
bb8ec201e4d414c16f5bac1907a2641d80c5970a
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import Asset
# Register your models here.
admin.site.register(Asset)
| 19.333333
| 32
| 0.801724
| 17
| 116
| 5.470588
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12931
| 116
| 6
| 33
| 19.333333
| 0.920792
| 0.224138
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7d57683f060246ecdbe9fa25924715de937635d2
| 67
|
py
|
Python
|
dexp/processing/remove_beads/__init__.py
|
haesleinhuepf/dexp
|
2ea84f3db323724588fac565fae56f0d522bc5ca
|
[
"BSD-3-Clause"
] | 16
|
2021-04-21T14:09:19.000Z
|
2022-03-22T02:30:59.000Z
|
dexp/processing/remove_beads/__init__.py
|
haesleinhuepf/dexp
|
2ea84f3db323724588fac565fae56f0d522bc5ca
|
[
"BSD-3-Clause"
] | 28
|
2021-04-15T17:43:08.000Z
|
2022-03-29T16:08:35.000Z
|
dexp/processing/remove_beads/__init__.py
|
haesleinhuepf/dexp
|
2ea84f3db323724588fac565fae56f0d522bc5ca
|
[
"BSD-3-Clause"
] | 3
|
2022-02-08T17:41:30.000Z
|
2022-03-18T15:32:27.000Z
|
from dexp.processing.remove_beads.beadsremover import BeadsRemover
| 33.5
| 66
| 0.895522
| 8
| 67
| 7.375
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059701
| 67
| 1
| 67
| 67
| 0.936508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7dcf866c0422d8f7d07418dae857b071849168bc
| 51
|
py
|
Python
|
m3o_plugin/postcode.py
|
JustIceQAQ/play_m3o_in_python
|
140b1f07cb574d1f0a2890503ae9e73ce3907f2b
|
[
"MIT"
] | null | null | null |
m3o_plugin/postcode.py
|
JustIceQAQ/play_m3o_in_python
|
140b1f07cb574d1f0a2890503ae9e73ce3907f2b
|
[
"MIT"
] | null | null | null |
m3o_plugin/postcode.py
|
JustIceQAQ/play_m3o_in_python
|
140b1f07cb574d1f0a2890503ae9e73ce3907f2b
|
[
"MIT"
] | null | null | null |
# TODO Postcode: https://m3o.com/postcode/overview
| 25.5
| 50
| 0.764706
| 7
| 51
| 5.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021277
| 0.078431
| 51
| 1
| 51
| 51
| 0.808511
| 0.941176
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 1
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7dd643437e0865cafce1491b350b4e99be342f2c
| 27
|
py
|
Python
|
tests/tests.py
|
cjapp/tkinter_simpleEncodeDecode
|
15520d73c51bb1a6a316414b2e8fb50b7be8f942
|
[
"MIT"
] | null | null | null |
tests/tests.py
|
cjapp/tkinter_simpleEncodeDecode
|
15520d73c51bb1a6a316414b2e8fb50b7be8f942
|
[
"MIT"
] | null | null | null |
tests/tests.py
|
cjapp/tkinter_simpleEncodeDecode
|
15520d73c51bb1a6a316414b2e8fb50b7be8f942
|
[
"MIT"
] | null | null | null |
from .context import main
| 9
| 25
| 0.777778
| 4
| 27
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 27
| 2
| 26
| 13.5
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
7dd7abdb00a4ee3724c7dfc992569e2f8f38d9dd
| 23,149
|
py
|
Python
|
ofa/tutorial/imagenet_eval_helper.py
|
johsnows/once-for-all
|
fac2a6388e70873666b848a316aa58c7b2e17031
|
[
"Apache-2.0"
] | null | null | null |
ofa/tutorial/imagenet_eval_helper.py
|
johsnows/once-for-all
|
fac2a6388e70873666b848a316aa58c7b2e17031
|
[
"Apache-2.0"
] | null | null | null |
ofa/tutorial/imagenet_eval_helper.py
|
johsnows/once-for-all
|
fac2a6388e70873666b848a316aa58c7b2e17031
|
[
"Apache-2.0"
] | null | null | null |
import os.path as osp
import numpy as np
import math
from tqdm import tqdm
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.utils.data
from torchvision import transforms, datasets
from ofa.utils import AverageMeter, accuracy
from ofa.model_zoo import ofa_specialized
from ofa.imagenet_classification.elastic_nn.utils import set_running_statistics
import copy
import random
def evaluate_ofa_resnet_subnet(ofa_net, path, net_config, data_loader, batch_size, device='cuda:0'):
assert 'w' in net_config and 'd' in net_config and 'e' in net_config
assert len(net_config['w']) == 6 and len(net_config['e']) == 18 and len(net_config['d']) == 5
ofa_net.set_active_subnet(w=net_config['w'], d=net_config['d'], e=net_config['e'])
subnet = ofa_net.get_active_subnet().to(device)
calib_bn(subnet, path, 224, batch_size)
top1 = validate(subnet, path, 224, data_loader, batch_size, device)
return top1
def evaluate_ofa_resnet_ensemble_subnet(ofa_net, path, net_config1, net_config2, data_loader, batch_size, device='cuda:0'):
assert 'w' in net_config1 and 'd' in net_config1 and 'e' in net_config1
assert len(net_config1['w']) == 6 and len(net_config1['e']) == 18 and len(net_config1['d']) == 5
ofa_net.set_active_subnet(w=net_config1['w'], d=net_config1['d'], e=net_config1['e'])
subnet1 = ofa_net.get_active_subnet().to(device)
calib_bn(subnet1, path, 224, batch_size)
ofa_net.set_active_subnet(w=net_config2['w'], d=net_config2['d'], e=net_config2['e'])
subnet2 = ofa_net.get_active_subnet().to(device)
calib_bn(subnet2, path, 224, batch_size)
# assert net_config2['r'][0]==net_config1['r'][0]
subnets = []
subnets.append(subnet2)
subnets.append(subnet1)
top1 = ensemble_validate(subnets, path, 224, data_loader, batch_size, device)
return top1
def evaluate_ofa_subnet(ofa_net, path, net_config, data_loader, batch_size, device='cuda:0'):
assert 'ks' in net_config and 'd' in net_config and 'e' in net_config
assert len(net_config['ks']) == 20 and len(net_config['e']) == 20 and len(net_config['d']) == 5
ofa_net.set_active_subnet(ks=net_config['ks'], d=net_config['d'], e=net_config['e'])
subnet = ofa_net.get_active_subnet().to(device)
calib_bn(subnet, path, net_config['r'][0], batch_size)
top1 = validate(subnet, path, net_config['r'][0], data_loader, batch_size, device)
return top1
def evaluate_ofa_ensemble_subnet(ofa_net, path, net_config1, net_config2, data_loader, batch_size, device='cuda:0'):
assert 'ks' in net_config1 and 'd' in net_config1 and 'e' in net_config1
assert len(net_config1['ks']) == 20 and len(net_config1['e']) == 20 and len(net_config1['d']) == 5
ofa_net.set_active_subnet(ks=net_config1['ks'], d=net_config1['d'], e=net_config1['e'])
subnet1 = ofa_net.get_active_subnet().to(device)
calib_bn(subnet1, path, net_config1['r'][0], batch_size)
ofa_net.set_active_subnet(ks=net_config2['ks'], d=net_config2['d'], e=net_config2['e'])
subnet2 = ofa_net.get_active_subnet().to(device)
calib_bn(subnet2, path, net_config2['r'][0], batch_size)
assert net_config2['r'][0]==net_config1['r'][0]
subnets = []
subnets.append(subnet2)
subnets.append(subnet1)
top1 = ensemble_validate(subnets, path, net_config2['r'][0], data_loader, batch_size, device)
return top1
def calib_bn(net, path, image_size, batch_size, num_images=2000):
# print('Creating dataloader for resetting BN running statistics...')
dataset = datasets.ImageFolder(
osp.join(
path,
'train'),
transforms.Compose([
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=32. / 255., saturation=0.5),
transforms.ToTensor(),
transforms.Normalize(
mean=[
0.485,
0.456,
0.406],
std=[
0.229,
0.224,
0.225]
),
])
)
chosen_indexes = np.random.choice(list(range(len(dataset))), num_images)
sub_sampler = torch.utils.data.sampler.SubsetRandomSampler(chosen_indexes)
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=sub_sampler,
batch_size=batch_size,
num_workers=16,
pin_memory=True,
drop_last=False,
)
# print('Resetting BN running statistics (this may take 10-20 seconds)...')
set_running_statistics(net, data_loader)
def ensemble_validate(nets, path, image_size, data_loader, batch_size=100, device='cuda:0'):
if 'cuda' in device:
print('use cuda')
for net in nets:
net = torch.nn.DataParallel(net).to(device)
else:
for net in nets:
net = net.to(device)
data_loader.dataset.transform = transforms.Compose([
transforms.Resize(int(math.ceil(image_size / 0.875))),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss().to(device)
for net in nets:
net.eval()
net = net.to(device)
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
with torch.no_grad():
with tqdm(total=len(data_loader), desc='Validate') as t:
for i, (images, labels) in enumerate(data_loader):
images, labels = images.to(device), labels.to(device)
# compute output
n = len(nets)
output = 0
for i, net in enumerate(nets):
if i == 0:
output =net(images)
else:
output+=net(images)
output = output/n
loss = criterion(output, labels)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, labels, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0].item(), images.size(0))
top5.update(acc5[0].item(), images.size(0))
t.set_postfix({
'loss': losses.avg,
'top1': top1.avg,
'top5': top5.avg,
'img_size': images.size(2),
})
t.update(1)
print('Results: loss=%.5f,\t top1=%.3f,\t top5=%.1f' % (losses.avg, top1.avg, top5.avg))
return top1.avg
def validate(net, path, image_size, data_loader, batch_size=100, device='cuda:0'):
if 'cuda' in device:
net = torch.nn.DataParallel(net).to(device)
else:
net = net.to(device)
data_loader.dataset.transform = transforms.Compose([
transforms.Resize(int(math.ceil(image_size / 0.875))),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss().to(device)
net.eval()
net = net.to(device)
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
with torch.no_grad():
with tqdm(total=len(data_loader), desc='Validate') as t:
for i, (images, labels) in enumerate(data_loader):
images, labels = images.to(device), labels.to(device)
# compute output
output = net(images)
loss = criterion(output, labels)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, labels, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0].item(), images.size(0))
top5.update(acc5[0].item(), images.size(0))
t.set_postfix({
'loss': losses.avg,
'top1': top1.avg,
'top5': top5.avg,
'img_size': images.size(2),
})
t.update(1)
print('Results: loss=%.5f,\t top1=%.1f,\t top5=%.1f' % (losses.avg, top1.avg, top5.avg))
return top1.avg
def evaluate_ofa_specialized(path, data_loader, batch_size=100, device='cuda:0', ensemble=False):
def select_platform_name():
valid_platform_name = [
'pixel1', 'pixel2', 'note10', 'note8', 's7edge', 'lg-g8', '1080ti', 'v100', 'tx2', 'cpu', 'flops'
]
print("Please select a hardware platform from ('pixel1', 'pixel2', 'note10', 'note8', 's7edge', 'lg-g8', '1080ti', 'v100', 'tx2', 'cpu', 'flops')!\n")
while True:
platform_name = input()
platform_name = platform_name.lower()
if platform_name in valid_platform_name:
return platform_name
print("Platform name is invalid! Please select in ('pixel1', 'pixel2', 'note10', 'note8', 's7edge', 'lg-g8', '1080ti', 'v100', 'tx2', 'cpu', 'flops')!\n")
def select_netid(platform_name):
platform_efficiency_map = {
'pixel1': {
143: 'pixel1_lat@[email protected]_finetune@75',
132: 'pixel1_lat@[email protected]_finetune@75',
79: 'pixel1_lat@[email protected]_finetune@75',
58: 'pixel1_lat@[email protected]_finetune@75',
40: 'pixel1_lat@[email protected]_finetune@25',
28: 'pixel1_lat@[email protected]_finetune@25',
20: 'pixel1_lat@[email protected]_finetune@25',
},
'pixel2': {
62: 'pixel2_lat@[email protected]_finetune@25',
50: 'pixel2_lat@[email protected]_finetune@25',
35: 'pixel2_lat@[email protected]_finetune@25',
25: 'pixel2_lat@[email protected]_finetune@25',
},
'note10': {
64: 'note10_lat@[email protected]_finetune@75',
50: 'note10_lat@[email protected]_finetune@75',
41: 'note10_lat@[email protected]_finetune@75',
30: 'note10_lat@[email protected]_finetune@75',
22: 'note10_lat@[email protected]_finetune@25',
16: 'note10_lat@[email protected]_finetune@25',
11: 'note10_lat@[email protected]_finetune@25',
8: 'note10_lat@[email protected]_finetune@25',
},
'note8': {
65: 'note8_lat@[email protected]_finetune@25',
49: 'note8_lat@[email protected]_finetune@25',
31: 'note8_lat@[email protected]_finetune@25',
22: 'note8_lat@[email protected]_finetune@25',
},
's7edge': {
88: 's7edge_lat@[email protected]_finetune@25',
58: 's7edge_lat@[email protected]_finetune@25',
41: 's7edge_lat@[email protected]_finetune@25',
29: 's7edge_lat@[email protected]_finetune@25',
},
'lg-g8': {
24: 'LG-G8_lat@[email protected]_finetune@25',
16: 'LG-G8_lat@[email protected]_finetune@25',
11: 'LG-G8_lat@[email protected]_finetune@25',
8: 'LG-G8_lat@[email protected]_finetune@25',
},
'1080ti': {
27: '1080ti_gpu64@[email protected]_finetune@25',
22: '1080ti_gpu64@[email protected]_finetune@25',
15: '1080ti_gpu64@[email protected]_finetune@25',
12: '1080ti_gpu64@[email protected]_finetune@25',
},
'v100': {
11: 'v100_gpu64@[email protected]_finetune@25',
9: 'v100_gpu64@[email protected]_finetune@25',
6: 'v100_gpu64@[email protected]_finetune@25',
5: 'v100_gpu64@[email protected]_finetune@25',
},
'tx2': {
96: 'tx2_gpu16@[email protected]_finetune@25',
80: 'tx2_gpu16@[email protected]_finetune@25',
47: 'tx2_gpu16@[email protected]_finetune@25',
35: 'tx2_gpu16@[email protected]_finetune@25',
},
'cpu': {
17: 'cpu_lat@[email protected]_finetune@25',
15: 'cpu_lat@[email protected]_finetune@25',
11: 'cpu_lat@[email protected]_finetune@25',
10: 'cpu_lat@[email protected]_finetune@25',
},
'flops': {
595: 'flops@[email protected]_finetune@75',
482: 'flops@[email protected]_finetune@75',
389: 'flops@[email protected]_finetune@75',
}
}
sub_efficiency_map = platform_efficiency_map[platform_name]
if not platform_name == 'flops':
print("Now, please specify a latency constraint for model specialization among", sorted(list(sub_efficiency_map.keys())), 'ms. (Please just input the number.) \n')
else:
print("Now, please specify a FLOPs constraint for model specialization among", sorted(list(sub_efficiency_map.keys())), 'MFLOPs. (Please just input the number.) \n')
while True:
efficiency_constraint = input()
if not efficiency_constraint.isdigit():
print('Sorry, please input an integer! \n')
continue
efficiency_constraint = int(efficiency_constraint)
if not efficiency_constraint in sub_efficiency_map.keys():
print('Sorry, please choose a value from: ', sorted(list(sub_efficiency_map.keys())), '.\n')
continue
return sub_efficiency_map[efficiency_constraint]
if not ensemble:
platform_name = select_platform_name()
net_id = select_netid(platform_name)
net, image_size = ofa_specialized(net_id=net_id, pretrained=True)
validate(net, path, image_size, data_loader, batch_size, device)
else:
nets = []
for i in range(2):
print('{}model'.format(i))
platform_name = select_platform_name()
net_id = select_netid(platform_name)
net, image_size = ofa_specialized(net_id=net_id, pretrained=True)
nets.append(net)
ensemble_validate(nets, path, image_size, data_loader, batch_size, device)
return net_id
net_id = ['pixel1_lat@[email protected]_finetune@75', 'pixel1_lat@[email protected]_finetune@75',
'pixel1_lat@[email protected]_finetune@75', 'pixel1_lat@[email protected]_finetune@75',
'pixel1_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@25',
'pixel1_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'pixel2_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'pixel2_lat@[email protected]_finetune@25', 'note10_lat@[email protected]_finetune@75',
'note10_lat@[email protected]_finetune@75', 'note10_lat@[email protected]_finetune@75',
'note10_lat@[email protected]_finetune@25', 'note10_lat@[email protected]_finetune@25',
'note10_lat@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25',
'note8_lat@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25',
'note8_lat@[email protected]_finetune@25', 's7edge_lat@[email protected]_finetune@25',
's7edge_lat@[email protected]_finetune@25', 's7edge_lat@[email protected]_finetune@25',
's7edge_lat@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25',
'LG-G8_lat@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25',
'LG-G8_lat@[email protected]_finetune@25', '1080ti_gpu64@[email protected]_finetune@25',
'1080ti_gpu64@[email protected]_finetune@25', '1080ti_gpu64@[email protected]_finetune@25',
'1080ti_gpu64@[email protected]_finetune@25', 'v100_gpu64@[email protected]_finetune@25',
'v100_gpu64@[email protected]_finetune@25', 'v100_gpu64@[email protected]_finetune@25',
'v100_gpu64@[email protected]_finetune@25', 'tx2_gpu16@[email protected]_finetune@25',
'tx2_gpu16@[email protected]_finetune@25', 'tx2_gpu16@[email protected]_finetune@25',
'tx2_gpu16@[email protected]_finetune@25', 'cpu_lat@[email protected]_finetune@25',
'cpu_lat@[email protected]_finetune@25', 'cpu_lat@[email protected]_finetune@25',
'cpu_lat@[email protected]_finetune@25', 'flops@[email protected]_finetune@75',
'flops@[email protected]_finetune@75', 'flops@[email protected]_finetune@75', ]
def evaluate_ofa_space(path, data_loader, batch_size=100, device='cuda:0', ensemble=False):
net_acc=[]
for i, id in enumerate(net_id):
acc=""
for j in range(2, len(id)):
if id[j]=='.':
acc=id[j-2]+id[j-1]+id[j]+id[j+1]
net_acc.append(acc)
id =np.argsort(np.array(net_acc))
new_net_id = copy.deepcopy(net_id)
for i, sortid in enumerate(id):
new_net_id[i] = net_id[sortid]
print('new_net_id', new_net_id)
n = len(net_id)
best_acc = 0
space = []
best_team =[]
for i in range(1, n):
for j in range(i):
nets = []
team = []
team.append(j)
team.append(i)
net, image_size = ofa_specialized(net_id=new_net_id[j], pretrained=True)
nets.append(net)
net, image_size = ofa_specialized(net_id=new_net_id[i], pretrained=True)
nets.append(net)
acc = ensemble_validate(nets, path, image_size, data_loader, batch_size, device)
if acc>best_acc:
best_acc=acc
best_team = team
print('space {} best_acc{}'.format(i+1, best_acc))
space.append(best_acc)
print('space:{}'.format(space))
return net_id[best_team[0]], net_id[best_team[1]]
def evaluate_ofa_best_acc_team(path, data_loader, batch_size=100, device='cuda:0', ensemble=False):
net_acc=[]
for i, id in enumerate(net_id):
acc=""
for j in range(2, len(id)):
if id[j]=='.':
acc=id[j-2]+id[j-1]+id[j]+id[j+1]
net_acc.append(acc)
id =np.argsort(np.array(net_acc))
new_net_id = copy.deepcopy(net_id)
for i, sortid in enumerate(id):
new_net_id[i] = net_id[sortid]
print('new_net_id', new_net_id)
n = len(net_id)
best_acc = 0
space = []
best_team =[]
i = n-1
for j in range(18, n):
nets = []
team = []
team.append(j)
team.append(i)
net, image_size = ofa_specialized(net_id=new_net_id[j], pretrained=True)
nets.append(net)
net, image_size = ofa_specialized(net_id=new_net_id[i], pretrained=True)
nets.append(net)
acc = ensemble_validate(nets, path, image_size, data_loader, batch_size, device)
print('net i:{} netj:{} acc:{}'.format(new_net_id[i], new_net_id[j], acc))
if acc>best_acc:
best_acc=acc
best_team = team
print('space {} best_acc{}'.format(i+1, best_acc))
space.append(best_acc)
print('space:{}'.format(space))
return new_net_id[best_team[0]], new_net_id[best_team[1]]
def evaluate_ofa_random_sample(path, data_loader, batch_size=100, device='cuda:0', ensemble=False):
net_acc=[]
for i, id in enumerate(net_id):
acc=""
for j in range(2, len(id)):
if id[j]=='.':
acc=id[j-2]+id[j-1]+id[j]+id[j+1]
net_acc.append(acc)
id =np.argsort(np.array(net_acc))
new_net_id = copy.deepcopy(net_id)
for i, sortid in enumerate(id):
new_net_id[i] = net_id[sortid]
print('new_net_id', new_net_id)
n = len(net_id)
best_acc = 0
acc_list = []
space = []
best_team =[]
for k in range(20):
nets = []
team = []
i = random.randint(0, n-1)
j = (i + random.randint(1, n-1)) % n
print('i:{} j:{}'.format(i, j))
team.append(j)
team.append(i)
net, image_size = ofa_specialized(net_id=new_net_id[j], pretrained=True)
nets.append(net)
net, image_size = ofa_specialized(net_id=new_net_id[i], pretrained=True)
nets.append(net)
acc = ensemble_validate(nets, path, image_size, data_loader, batch_size, device)
print('net i:{} netj:{} acc:{}'.format(new_net_id[i], new_net_id[j], acc))
acc_list.append(acc)
if acc>best_acc:
best_acc=acc
best_team = team
avg_acc = np.mean(acc_list)
std_acc = np.std(acc_list, ddof=1)
var_acc = np.var(acc_list)
print("avg{} var{} std{}".format(avg_acc, std_acc, var_acc))
print('best_random_team best_acc{}'.format(best_team, best_acc))
space.append(best_acc)
print('space:{}'.format(space))
return new_net_id[best_team[0]], new_net_id[best_team[1]]
sort_net_id=['tx2_gpu16@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25', 's7edge_lat@[email protected]_finetune@25',
'cpu_lat@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@25',
'note10_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25', 'v100_gpu64@[email protected]_finetune@25',
'cpu_lat@11ms_top1@72. 0_finetune@25', '1080ti_gpu64@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25',
'tx2_gpu16@[email protected]_finetune@25', 'v100_gpu64@[email protected]_finetune@25', 'LG-G8_lat@11ms_to [email protected]_finetune@25',
's7edge_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'note10_lat@[email protected]_finetune@25', '1080ti_gpu 64@[email protected]_finetune@25', 'cpu_lat@[email protected]_finetune@25',
's7edge_lat@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'note8_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@25', '1080ti_gpu64@[email protected]_finetune@25',
'v100_gpu64@[email protected]_finetune@25', 'tx2_gpu16@[email protected]_finetune@25', 'note10_lat@[email protected]_finetune@25',
'cpu_lat@[email protected]_finetune@25', 'tx2_gpu16@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'v100_gpu64@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25', 's7edge_lat@[email protected]_finetune@25',
'1080ti_gpu64@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@75',
'pixel1_lat@[email protected]_finetune@75', 'flops@[email protected]_finetune@75', 'note10_lat@[email protected]_finetune@75',
'flops@[email protected]_finetune@75', 'note10_lat@[email protected]_finetune@75', 'pixel1_lat@[email protected]_finetune@75',
'flops@[email protected]_finetune@75', 'pixel1_lat@[email protected]_finetune@75', 'note10_lat@[email protected]_finetune@75']
| 44.093333
| 177
| 0.607154
| 3,335
| 23,149
| 3.958921
| 0.098351
| 0.087101
| 0.015754
| 0.027342
| 0.819511
| 0.803227
| 0.789669
| 0.775733
| 0.678179
| 0.638113
| 0
| 0.111265
| 0.253791
| 23,149
| 524
| 178
| 44.177481
| 0.653062
| 0.012312
| 0
| 0.462555
| 0
| 0.004405
| 0.283952
| 0.229305
| 0
| 0
| 0
| 0
| 0.019824
| 1
| 0.028634
| false
| 0
| 0.028634
| 0
| 0.0837
| 0.050661
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
81642e5d95ded6a23159027c35921f4b03706531
| 136
|
py
|
Python
|
3_gabor/model/gabor_rf/maprf/invlink.py
|
mackelab/IdentifyMechanisticModels_2020
|
b93c90ec6156ae5f8afee6aaac7317373e9caf5e
|
[
"MIT"
] | 3
|
2020-10-23T02:53:11.000Z
|
2021-03-12T11:04:37.000Z
|
3_gabor/model/gabor_rf/maprf/invlink.py
|
mackelab/IdentifyMechanisticModels_2020
|
b93c90ec6156ae5f8afee6aaac7317373e9caf5e
|
[
"MIT"
] | null | null | null |
3_gabor/model/gabor_rf/maprf/invlink.py
|
mackelab/IdentifyMechanisticModels_2020
|
b93c90ec6156ae5f8afee6aaac7317373e9caf5e
|
[
"MIT"
] | 1
|
2021-07-28T08:38:05.000Z
|
2021-07-28T08:38:05.000Z
|
import theano.tensor as tt
def explin(x):
return tt.where(x >= 0, 1 + x, tt.exp(x))
def log_exp1p(x):
return tt.log1p(tt.exp(x))
| 12.363636
| 42
| 0.639706
| 28
| 136
| 3.071429
| 0.571429
| 0.162791
| 0.209302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036036
| 0.183824
| 136
| 10
| 43
| 13.6
| 0.738739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
81a7268b47b548089b30e84d12ff883fa4b80a6d
| 58
|
py
|
Python
|
http_shadow/__init__.py
|
abador/http-shadow
|
040935b0715f983714f38005f8ae97c255dae3e0
|
[
"MIT"
] | null | null | null |
http_shadow/__init__.py
|
abador/http-shadow
|
040935b0715f983714f38005f8ae97c255dae3e0
|
[
"MIT"
] | null | null | null |
http_shadow/__init__.py
|
abador/http-shadow
|
040935b0715f983714f38005f8ae97c255dae3e0
|
[
"MIT"
] | 2
|
2018-09-27T15:20:35.000Z
|
2020-10-02T08:38:31.000Z
|
from .backend import Backend
from .thread import HttpPool
| 19.333333
| 28
| 0.827586
| 8
| 58
| 6
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 58
| 2
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
81b43298bda18b704f77ed56a530bc20370af1bf
| 126
|
py
|
Python
|
projects/PanopticFCN_cityscapes/panopticfcn/__init__.py
|
fatihyildiz-cs/detectron2
|
700b1e6685ca95a60e27cb961f363a2ca7f30d3c
|
[
"Apache-2.0"
] | 166
|
2020-12-01T18:34:47.000Z
|
2021-03-27T04:20:15.000Z
|
panopticfcn/__init__.py
|
ywcmaike/PanopticFCN
|
9201b06d871df128547ce36b80f6caceb105465d
|
[
"Apache-2.0"
] | 28
|
2021-05-20T08:59:05.000Z
|
2022-03-18T13:17:35.000Z
|
panopticfcn/__init__.py
|
ywcmaike/PanopticFCN
|
9201b06d871df128547ce36b80f6caceb105465d
|
[
"Apache-2.0"
] | 33
|
2021-05-23T14:09:19.000Z
|
2022-03-30T14:27:55.000Z
|
from .config import add_panopticfcn_config
from .panoptic_seg import PanopticFCN
from .build_solver import build_lr_scheduler
| 31.5
| 44
| 0.880952
| 18
| 126
| 5.833333
| 0.611111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 126
| 3
| 45
| 42
| 0.921053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
81ca610dec0f1e1d5519b0914515a58eb09c500b
| 55
|
py
|
Python
|
arkfbp/flow/__init__.py
|
arkfbp/arkfbp-py
|
2444736462e8b4f09ae1ffe56779d9f515deb39f
|
[
"MIT"
] | 2
|
2020-09-11T09:26:43.000Z
|
2020-12-17T07:32:38.000Z
|
arkfbp/flow/__init__.py
|
arkfbp/arkfbp-py
|
2444736462e8b4f09ae1ffe56779d9f515deb39f
|
[
"MIT"
] | 4
|
2020-12-02T03:42:38.000Z
|
2020-12-14T07:56:06.000Z
|
arkfbp/flow/__init__.py
|
arkfbp/arkfbp-py
|
2444736462e8b4f09ae1ffe56779d9f515deb39f
|
[
"MIT"
] | 2
|
2020-12-08T01:11:54.000Z
|
2021-01-25T04:29:15.000Z
|
from .base import Flow
from .view_flow import ViewFlow
| 18.333333
| 31
| 0.818182
| 9
| 55
| 4.888889
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145455
| 55
| 2
| 32
| 27.5
| 0.93617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
81d3e9a297bdf6007923e315c9b06917f0723c4c
| 216
|
py
|
Python
|
auxein/fitness/__init__.py
|
auxein/auxein
|
5388cb572b65aecc282f915515c35dc3b987154c
|
[
"Apache-2.0"
] | 1
|
2019-05-08T14:53:27.000Z
|
2019-05-08T14:53:27.000Z
|
auxein/fitness/__init__.py
|
auxein/auxein
|
5388cb572b65aecc282f915515c35dc3b987154c
|
[
"Apache-2.0"
] | 2
|
2020-08-26T09:16:47.000Z
|
2020-10-30T16:47:03.000Z
|
auxein/fitness/__init__.py
|
auxein/auxein
|
5388cb572b65aecc282f915515c35dc3b987154c
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa
from .core import Fitness
from .kernel_based import GlobalMinimum
from .observation_based import ObservationBasedFitness, MultipleLinearRegression, SimplePolynomialRegression, MultipleLinearRegression
| 43.2
| 134
| 0.87963
| 19
| 216
| 9.894737
| 0.684211
| 0.117021
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005051
| 0.083333
| 216
| 5
| 134
| 43.2
| 0.944444
| 0.055556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c4a31e4a9faadb779ad5e3539b89e160045375e9
| 108
|
py
|
Python
|
lmctl/project/mutate/base.py
|
manojn97/lmctl
|
844925cb414722351efac90cb97f10c1185eef7a
|
[
"Apache-2.0"
] | 3
|
2021-07-19T09:46:01.000Z
|
2022-03-07T13:51:25.000Z
|
lmctl/project/mutate/base.py
|
manojn97/lmctl
|
844925cb414722351efac90cb97f10c1185eef7a
|
[
"Apache-2.0"
] | 43
|
2019-08-27T12:36:29.000Z
|
2020-08-27T14:50:40.000Z
|
lmctl/project/mutate/base.py
|
manojn97/lmctl
|
844925cb414722351efac90cb97f10c1185eef7a
|
[
"Apache-2.0"
] | 7
|
2020-09-22T20:32:17.000Z
|
2022-03-29T12:25:51.000Z
|
import abc
class Mutator(abc.ABC):
def apply(self, original_content):
return original_content
| 15.428571
| 38
| 0.712963
| 14
| 108
| 5.357143
| 0.714286
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212963
| 108
| 6
| 39
| 18
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.