hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fbe96376f6c7e8ea5a7177b454718260bda00d58
| 112
|
py
|
Python
|
api/base/views/__init__.py
|
simpsonw/atmosphere
|
3a5203ef0b563de3a0e8c8c8715df88186532d7a
|
[
"BSD-3-Clause"
] | 197
|
2016-12-08T02:33:32.000Z
|
2022-03-23T14:27:47.000Z
|
api/base/views/__init__.py
|
simpsonw/atmosphere
|
3a5203ef0b563de3a0e8c8c8715df88186532d7a
|
[
"BSD-3-Clause"
] | 385
|
2017-01-03T22:51:46.000Z
|
2020-12-16T16:20:42.000Z
|
api/base/views/__init__.py
|
benlazarine/atmosphere
|
38fad8e4002e510e8b4294f2bb5bc75e8e1817fa
|
[
"BSD-3-Clause"
] | 50
|
2016-12-08T08:32:25.000Z
|
2021-12-10T00:21:39.000Z
|
from .version import VersionViewSet, DeployVersionViewSet
__all__ = ["VersionViewSet", "DeployVersionViewSet"]
| 28
| 57
| 0.821429
| 8
| 112
| 11
| 0.75
| 0.772727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089286
| 112
| 3
| 58
| 37.333333
| 0.862745
| 0
| 0
| 0
| 0
| 0
| 0.303571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
220d1b0d3abc6c0db8d6bd13778e65f09dbb4290
| 231
|
py
|
Python
|
src/notifications/tests.py
|
kullo/webconfig
|
470839ed77fda11634d4e14a89bb5e7894aa707d
|
[
"BSD-3-Clause"
] | null | null | null |
src/notifications/tests.py
|
kullo/webconfig
|
470839ed77fda11634d4e14a89bb5e7894aa707d
|
[
"BSD-3-Clause"
] | null | null | null |
src/notifications/tests.py
|
kullo/webconfig
|
470839ed77fda11634d4e14a89bb5e7894aa707d
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2015–2020 Kullo GmbH
#
# This source code is licensed under the 3-clause BSD license. See LICENSE.txt
# in the root directory of this source tree for details.
from django.test import TestCase
# Create your tests here.
| 28.875
| 78
| 0.774892
| 39
| 231
| 4.615385
| 0.897436
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04712
| 0.17316
| 231
| 7
| 79
| 33
| 0.890052
| 0.805195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
225c724f4896f9bddbbf401bf1a3929af43df247
| 94
|
py
|
Python
|
enthought/endo/docerror.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/endo/docerror.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/endo/docerror.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from etsdevtools.endo.docerror import *
| 23.5
| 39
| 0.840426
| 12
| 94
| 6.166667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117021
| 94
| 3
| 40
| 31.333333
| 0.891566
| 0.12766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
227c213f9c9f02d257d21830222edf425fe68721
| 781
|
py
|
Python
|
carl/envs/mario/mario_game.py
|
automl/genRL
|
b7382fec9006d7da768ad7252194c6c5f1b2bbd7
|
[
"Apache-2.0"
] | 27
|
2021-09-13T21:50:10.000Z
|
2022-03-30T15:35:38.000Z
|
carl/envs/mario/mario_game.py
|
automl/genRL
|
b7382fec9006d7da768ad7252194c6c5f1b2bbd7
|
[
"Apache-2.0"
] | 35
|
2021-09-15T07:20:29.000Z
|
2022-03-02T15:14:31.000Z
|
carl/envs/mario/mario_game.py
|
automl/genRL
|
b7382fec9006d7da768ad7252194c6c5f1b2bbd7
|
[
"Apache-2.0"
] | 2
|
2022-01-13T11:13:12.000Z
|
2022-03-14T06:11:13.000Z
|
from abc import ABC, abstractmethod
class MarioGame(ABC):
@abstractmethod
def getPort(self) -> int:
pass
@abstractmethod
def initGame(self):
pass
@abstractmethod
def stepGame(self, left: bool, right: bool, down: bool, speed: bool, jump: bool):
pass
@abstractmethod
def resetGame(self, level: str, timer: int, mario_state: int, inertia: float):
pass
@abstractmethod
def computeObservationRGB(self):
pass
@abstractmethod
def computeReward(self) -> float:
pass
@abstractmethod
def computeDone(self) -> bool:
pass
@abstractmethod
def getCompletionPercentage(self) -> float:
pass
@abstractmethod
def getFrameSize(self) -> int:
pass
| 19.525
| 85
| 0.62484
| 77
| 781
| 6.324675
| 0.415584
| 0.314168
| 0.344969
| 0.160164
| 0.123203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.288092
| 781
| 39
| 86
| 20.025641
| 0.875899
| 0
| 0
| 0.62069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.310345
| false
| 0.310345
| 0.034483
| 0
| 0.37931
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
97db509debe2b8503920910c68f09fde1efdca62
| 6,072
|
py
|
Python
|
colour/models/rgb/transfer_functions/tests/test_panasonic_vlog.py
|
JGoldstone/colour
|
6829b363d5f0682bff0f4826995e7ceac189ff28
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/rgb/transfer_functions/tests/test_panasonic_vlog.py
|
JGoldstone/colour
|
6829b363d5f0682bff0f4826995e7ceac189ff28
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/rgb/transfer_functions/tests/test_panasonic_vlog.py
|
JGoldstone/colour
|
6829b363d5f0682bff0f4826995e7ceac189ff28
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.models.rgb.transfer_functions.\
panasonic_vlog` module.
"""
import numpy as np
import unittest
from colour.models.rgb.transfer_functions import (
log_encoding_VLog,
log_decoding_VLog,
)
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'TestLogEncoding_VLog',
'TestLogDecoding_VLog',
]
class TestLogEncoding_VLog(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_encoding_VLog` definition unit tests methods.
"""
def test_log_encoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_encoding_VLog` definition.
"""
self.assertAlmostEqual(log_encoding_VLog(0.0), 0.125, places=7)
self.assertAlmostEqual(
log_encoding_VLog(0.18), 0.423311448760136, places=7)
self.assertAlmostEqual(
log_encoding_VLog(0.18, 12), 0.423311448760136, places=7)
self.assertAlmostEqual(
log_encoding_VLog(0.18, 10, False), 0.421287228403675, places=7)
self.assertAlmostEqual(
log_encoding_VLog(0.18, 10, False, False),
0.409009628526078,
places=7)
self.assertAlmostEqual(
log_encoding_VLog(1.0), 0.599117700158146, places=7)
def test_n_dimensional_log_encoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_encoding_VLog` definition n-dimensional arrays support.
"""
L_in = 0.18
V_out = log_encoding_VLog(L_in)
L_in = np.tile(L_in, 6)
V_out = np.tile(V_out, 6)
np.testing.assert_almost_equal(
log_encoding_VLog(L_in), V_out, decimal=7)
L_in = np.reshape(L_in, (2, 3))
V_out = np.reshape(V_out, (2, 3))
np.testing.assert_almost_equal(
log_encoding_VLog(L_in), V_out, decimal=7)
L_in = np.reshape(L_in, (2, 3, 1))
V_out = np.reshape(V_out, (2, 3, 1))
np.testing.assert_almost_equal(
log_encoding_VLog(L_in), V_out, decimal=7)
def test_domain_range_scale_log_encoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_encoding_VLog` definition domain and range scale support.
"""
L_in = 0.18
V_out = log_encoding_VLog(L_in)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
log_encoding_VLog(L_in * factor),
V_out * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_log_encoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_encoding_VLog` definition nan support.
"""
log_encoding_VLog(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLogDecoding_VLog(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_decoding_VLog` definition unit tests methods.
"""
def test_log_decoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_decoding_VLog` definition.
"""
self.assertAlmostEqual(log_decoding_VLog(0.125), 0.0, places=7)
self.assertAlmostEqual(
log_decoding_VLog(0.423311448760136), 0.18, places=7)
self.assertAlmostEqual(
log_decoding_VLog(0.423311448760136, 12), 0.18, places=7)
self.assertAlmostEqual(
log_decoding_VLog(0.421287228403675, 10, False), 0.18, places=7)
self.assertAlmostEqual(
log_decoding_VLog(0.409009628526078, 10, False, False),
0.18,
places=7)
self.assertAlmostEqual(
log_decoding_VLog(0.599117700158146), 1.0, places=7)
def test_n_dimensional_log_decoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_decoding_VLog` definition n-dimensional arrays support.
"""
V_out = 0.423311448760136
L_in = log_decoding_VLog(V_out)
V_out = np.tile(V_out, 6)
L_in = np.tile(L_in, 6)
np.testing.assert_almost_equal(
log_decoding_VLog(V_out), L_in, decimal=7)
V_out = np.reshape(V_out, (2, 3))
L_in = np.reshape(L_in, (2, 3))
np.testing.assert_almost_equal(
log_decoding_VLog(V_out), L_in, decimal=7)
V_out = np.reshape(V_out, (2, 3, 1))
L_in = np.reshape(L_in, (2, 3, 1))
np.testing.assert_almost_equal(
log_decoding_VLog(V_out), L_in, decimal=7)
def test_domain_range_scale_log_decoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_decoding_VLog` definition domain and range scale support.
"""
V_out = 0.423311448760136
L_in = log_decoding_VLog(V_out)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
log_decoding_VLog(V_out * factor),
L_in * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_log_decoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_decoding_VLog` definition nan support.
"""
log_decoding_VLog(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
if __name__ == '__main__':
unittest.main()
| 31.138462
| 78
| 0.640316
| 795
| 6,072
| 4.56478
| 0.134591
| 0.021494
| 0.095068
| 0.076054
| 0.825296
| 0.809314
| 0.779002
| 0.714522
| 0.67925
| 0.635161
| 0
| 0.072958
| 0.246047
| 6,072
| 194
| 79
| 31.298969
| 0.719747
| 0.208498
| 0
| 0.533333
| 0
| 0
| 0.05502
| 0.007891
| 0
| 0
| 0
| 0
| 0.190476
| 1
| 0.07619
| false
| 0
| 0.038095
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
97f20ba0590c9d144a0c17683ec4a0a88ea21ea6
| 46
|
py
|
Python
|
ainnovation_dcim/workflow/__init__.py
|
ltxwanzl/ainnovation_dcim
|
b065489e2aa69729c0fd5142cf75d8caa7788b31
|
[
"Apache-2.0"
] | null | null | null |
ainnovation_dcim/workflow/__init__.py
|
ltxwanzl/ainnovation_dcim
|
b065489e2aa69729c0fd5142cf75d8caa7788b31
|
[
"Apache-2.0"
] | null | null | null |
ainnovation_dcim/workflow/__init__.py
|
ltxwanzl/ainnovation_dcim
|
b065489e2aa69729c0fd5142cf75d8caa7788b31
|
[
"Apache-2.0"
] | null | null | null |
# default_app_config = '.apps.WorkflowConfig'
| 23
| 45
| 0.782609
| 5
| 46
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 1
| 46
| 46
| 0.809524
| 0.934783
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3f28d1e2f76100adc00945a0759d254a0a1638b4
| 20
|
py
|
Python
|
RDS/circle3_central_services/research_manager/src/api/User/__init__.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 10
|
2020-06-24T08:22:24.000Z
|
2022-01-13T16:17:36.000Z
|
RDS/circle3_central_services/research_manager/src/api/User/__init__.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 78
|
2020-01-23T14:32:06.000Z
|
2022-03-07T14:11:16.000Z
|
RDS/circle3_central_services/research_manager/src/api/User/__init__.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 1
|
2020-06-24T08:33:48.000Z
|
2020-06-24T08:33:48.000Z
|
from .user import *
| 20
| 20
| 0.7
| 3
| 20
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 20
| 1
| 20
| 20
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
58c6e236acba1419c8019e6e9d0019c26bbbfc7f
| 3,977
|
py
|
Python
|
tests/bs3/test_block_fields.py
|
rpkilby/django-template-forms
|
5099d87d661a6a313df49fa484afd94f145e65bc
|
[
"BSD-3-Clause"
] | 1
|
2021-01-29T11:53:32.000Z
|
2021-01-29T11:53:32.000Z
|
tests/bs3/test_block_fields.py
|
rpkilby/django-template-forms
|
5099d87d661a6a313df49fa484afd94f145e65bc
|
[
"BSD-3-Clause"
] | 5
|
2017-11-29T11:01:56.000Z
|
2018-02-05T23:34:08.000Z
|
tests/bs3/test_block_fields.py
|
rpkilby/django-template-forms
|
5099d87d661a6a313df49fa484afd94f145e65bc
|
[
"BSD-3-Clause"
] | null | null | null |
from django import forms
from django.test import TestCase
from template_forms import bs3
def startswith_a(value):
if value.startswith('a'):
return value
raise forms.ValidationError('Value must start with "a".')
def not_now(value):
if value:
raise forms.ValidationError('I cannot let you do that right now.')
class StandardFieldTests(TestCase):
class Form(bs3.BlockForm, forms.Form):
field = forms.CharField(required=False, validators=[startswith_a], help_text='Example text.', )
def get_attrs(self, bf):
return {
'name': bf.html_name,
'id': bf.auto_id,
'label': bf.label,
}
def test_field(self):
form = self.Form()
field = form['field']
template = """
<div class="form-group">
<label for="{id}" class="control-label">{label}:</label>
<input id="{id}" name="{name}" type="text" class="form-control">
<small class="help-block">Example text.</small>
</div>
"""
self.assertHTMLEqual(
template.format(**self.get_attrs(field)),
form.render_field(field, field.errors)
)
def test_field_bound(self):
form = self.Form({'field': 'a value'})
field = form['field']
template = """
<div class="form-group">
<label for="{id}" class="control-label">{label}:</label>
<input id="{id}" name="{name}" type="text" class="form-control" value="a value">
<small class="help-block">Example text.</small>
</div>
"""
self.assertHTMLEqual(
template.format(**self.get_attrs(field)),
form.render_field(field, field.errors)
)
def test_field_error(self):
form = self.Form({'field': 'error'})
field = form['field']
template = """
<div class="form-group has-error">
<label for="{id}" class="control-label">{label}:</label>
<input id="{id}" name="{name}" type="text" class="form-control has-error" value="error">
<small class="help-block">Value must start with "a".</small>
<small class="help-block">Example text.</small>
</div>
"""
self.assertHTMLEqual(
template.format(**self.get_attrs(field)),
form.render_field(field, field.errors)
)
class CheckboxFieldTests(TestCase):
class Form(bs3.BlockForm, forms.Form):
field = forms.BooleanField(required=False, validators=[not_now], help_text='Example text.')
def get_attrs(self, bf):
return {
'name': bf.html_name,
'id': bf.auto_id,
'label': bf.label,
}
def test_field(self):
form = self.Form()
field = form['field']
template = """
<div class="form-group">
<div class="checkbox">
<label>
<input id="{id}" name="{name}" type="checkbox"> {label}
</label>
</div>
<small class="help-block">Example text.</small>
</div>
"""
self.assertHTMLEqual(
template.format(**self.get_attrs(field)),
form.render_field(field, field.errors)
)
def test_field_error(self):
form = self.Form({'field': 'on'})
field = form['field']
template = """
<div class="form-group has-error">
<div class="checkbox">
<label>
<input id="{id}" name="{name}" type="checkbox" checked> {label}
</label>
</div>
<small class="help-block">I cannot let you do that right now.</small>
<small class="help-block">Example text.</small>
</div>
"""
self.assertHTMLEqual(
template.format(**self.get_attrs(field)),
form.render_field(field, field.errors)
)
| 30.829457
| 103
| 0.538094
| 439
| 3,977
| 4.8041
| 0.159453
| 0.051209
| 0.046468
| 0.063063
| 0.789948
| 0.779991
| 0.779991
| 0.758653
| 0.733049
| 0.68753
| 0
| 0.001089
| 0.307518
| 3,977
| 128
| 104
| 31.070313
| 0.764706
| 0
| 0
| 0.698113
| 0
| 0.028302
| 0.447183
| 0.078219
| 0
| 0
| 0
| 0
| 0.04717
| 1
| 0.084906
| false
| 0
| 0.028302
| 0.018868
| 0.179245
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
58cc1d434d0ca910c890148d1eb3817d02e4f5af
| 278
|
py
|
Python
|
210125/homework_re_3.py
|
shadowsmain/pyton-adv
|
9562097b2d34c1b286c13cf0930fa06079532a67
|
[
"MIT"
] | null | null | null |
210125/homework_re_3.py
|
shadowsmain/pyton-adv
|
9562097b2d34c1b286c13cf0930fa06079532a67
|
[
"MIT"
] | null | null | null |
210125/homework_re_3.py
|
shadowsmain/pyton-adv
|
9562097b2d34c1b286c13cf0930fa06079532a67
|
[
"MIT"
] | null | null | null |
import re
RE_NUMBER_VALIDATOR = re.compile(r'^\d+[.,]\d+$')
def number_is_valid(number):
return RE_NUMBER_VALIDATOR.match(number)
assert number_is_valid('1.32')
assert number_is_valid('1,32')
assert not number_is_valid('asdasd1234')
assert not number_is_valid('22,a44')
| 21.384615
| 49
| 0.755396
| 46
| 278
| 4.26087
| 0.413043
| 0.204082
| 0.331633
| 0.193878
| 0.44898
| 0.255102
| 0.255102
| 0
| 0
| 0
| 0
| 0.055777
| 0.097122
| 278
| 13
| 50
| 21.384615
| 0.7251
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.125
| false
| 0
| 0.125
| 0.125
| 0.375
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
450109704aaa9e57ec8952a08e13c1c362e0340c
| 21
|
py
|
Python
|
test.py
|
AlanFnz/profiles-rest-api
|
c606999f86235ed74fd98421bd02bc598d5a5463
|
[
"MIT"
] | null | null | null |
test.py
|
AlanFnz/profiles-rest-api
|
c606999f86235ed74fd98421bd02bc598d5a5463
|
[
"MIT"
] | null | null | null |
test.py
|
AlanFnz/profiles-rest-api
|
c606999f86235ed74fd98421bd02bc598d5a5463
|
[
"MIT"
] | null | null | null |
print('Test script')
| 10.5
| 20
| 0.714286
| 3
| 21
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 21
| 1
| 21
| 21
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0.52381
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
451235b4dc66c44ae6da7b46c7877673b9a0d562
| 8,175
|
py
|
Python
|
tests/test_compare.py
|
mys-lang/mys
|
070431fdedd7a6bf537f3a30583cd44f644cdbf4
|
[
"MIT"
] | 59
|
2021-01-06T14:21:40.000Z
|
2022-02-22T21:49:39.000Z
|
tests/test_compare.py
|
mys-lang/mys
|
070431fdedd7a6bf537f3a30583cd44f644cdbf4
|
[
"MIT"
] | 31
|
2021-01-05T00:32:36.000Z
|
2022-02-23T13:34:33.000Z
|
tests/test_compare.py
|
mys-lang/mys
|
070431fdedd7a6bf537f3a30583cd44f644cdbf4
|
[
"MIT"
] | 7
|
2021-01-03T11:53:03.000Z
|
2022-02-22T17:49:42.000Z
|
from .utils import TestCase
from .utils import build_and_test_module
from .utils import transpile_source
class Test(TestCase):
def test_compare(self):
with self.assertRaises(SystemExit):
build_and_test_module('compare')
def test_assert_between(self):
self.assert_transpile_raises(
'def foo():\n'
' a = 2\n'
' assert 1 <= a < 3\n',
' File "", line 3\n'
" assert 1 <= a < 3\n"
' ^\n'
"CompileError: can only compare two values\n")
def test_between(self):
self.assert_transpile_raises(
'def foo():\n'
' a = 2\n'
' print(1 <= a < 3)\n',
' File "", line 3\n'
" print(1 <= a < 3)\n"
' ^\n'
"CompileError: can only compare two values\n")
def test_i64_and_bool(self):
self.assert_transpile_raises(
'def foo() -> bool:\n'
' return 1 == True',
' File "", line 2\n'
' return 1 == True\n'
' ^\n'
"CompileError: cannot convert 'i64/i32/i16/i8/u64/u32/u16/u8' "
"to 'bool'\n")
def test_mix_of_literals_and_known_types_1(self):
source = transpile_source('def foo():\n'
' k: u64 = 1\n'
' v: i64 = 1\n'
' if 0xffffffffffffffff == k:\n'
' pass\n'
' print(v)\n')
self.assert_in('18446744073709551615ull', source)
def test_wrong_types_1(self):
self.assert_transpile_raises(
'def foo() -> bool:\n'
' return 1 == [""]\n',
' File "", line 2\n'
' return 1 == [""]\n'
' ^\n'
"CompileError: cannot convert 'i64/i32/i16/i8/u64/u32/u16/u8' to "
"'[string]'\n")
def test_wrong_types_2(self):
self.assert_transpile_raises(
'def foo() -> bool:\n'
' return [""] in 1\n',
' File "", line 2\n'
' return [""] in 1\n'
' ^\n'
"CompileError: not an iterable\n")
def test_wrong_types_3(self):
self.assert_transpile_raises(
'def foo() -> bool:\n'
' return [""] not in 1\n',
' File "", line 2\n'
' return [""] not in 1\n'
' ^\n'
"CompileError: not an iterable\n")
def test_wrong_types_4(self):
self.assert_transpile_raises(
'def foo() -> bool:\n'
' return 2.0 == 1\n',
' File "", line 2\n'
' return 2.0 == 1\n'
' ^\n'
"CompileError: cannot convert 'f64/f32' to "
"'i64/i32/i16/i8/u64/u32/u16/u8'\n")
def test_wrong_types_5(self):
self.assert_transpile_raises(
'def foo() -> bool:\n'
' return 1.0 == [""]\n',
' File "", line 2\n'
' return 1.0 == [""]\n'
' ^\n'
"CompileError: cannot convert 'f64/f32' to '[string]'\n")
def test_wrong_types_6(self):
self.assert_transpile_raises(
'def foo(a: i32) -> bool:\n'
' return a in [""]\n',
' File "", line 2\n'
' return a in [""]\n'
' ^\n'
"CompileError: types 'i32' and 'string' differs\n")
def test_wrong_types_7(self):
self.assert_transpile_raises(
'def foo(a: i32) -> bool:\n'
' return a in a\n',
' File "", line 2\n'
' return a in a\n'
' ^\n'
"CompileError: not an iterable\n")
def test_wrong_types_8(self):
self.assert_transpile_raises(
'def foo(a: i32) -> bool:\n'
' return 1 in a\n',
' File "", line 2\n'
' return 1 in a\n'
' ^\n'
"CompileError: not an iterable\n")
def test_wrong_types_9(self):
self.assert_transpile_raises(
'def foo(a: i32) -> bool:\n'
' return "" == a\n',
' File "", line 2\n'
' return "" == a\n'
' ^\n'
"CompileError: types 'string' and 'i32' differs\n")
def test_wrong_types_10(self):
self.assert_transpile_raises(
'def foo():\n'
' print(1 is None)\n',
' File "", line 2\n'
' print(1 is None)\n'
' ^\n'
"CompileError: 'i64' cannot be None\n")
def test_wrong_types_11(self):
self.assert_transpile_raises(
'def foo():\n'
' print(1.0 is None)\n',
' File "", line 2\n'
' print(1.0 is None)\n'
' ^\n'
"CompileError: 'f64' cannot be None\n")
def test_wrong_types_12(self):
self.assert_transpile_raises(
'def foo(a: i32):\n'
' print(a is None)\n',
' File "", line 2\n'
' print(a is None)\n'
' ^\n'
"CompileError: 'i32' cannot be None\n")
def test_wrong_types_13(self):
self.assert_transpile_raises(
'def foo(a: i32):\n'
' print(None is a)\n',
' File "", line 2\n'
' print(None is a)\n'
' ^\n'
"CompileError: 'i32' cannot be None\n")
def test_wrong_types_14(self):
self.assert_transpile_raises(
'def foo():\n'
' print(True is None)\n',
' File "", line 2\n'
' print(True is None)\n'
' ^\n'
"CompileError: 'bool' cannot be None\n")
def test_wrong_types_15(self):
self.assert_transpile_raises(
'def foo(a: bool):\n'
' print(None is a)\n',
' File "", line 2\n'
' print(None is a)\n'
' ^\n'
"CompileError: 'bool' cannot be None\n")
def test_wrong_types_16(self):
self.assert_transpile_raises(
'def foo(a: bool):\n'
' print(a is not 1)\n',
' File "", line 2\n'
' print(a is not 1)\n'
' ^\n'
"CompileError: cannot convert 'bool' to "
"'i64/i32/i16/i8/u64/u32/u16/u8'\n")
def test_wrong_types_17(self):
self.assert_transpile_raises(
'def foo():\n'
' print(None in [1, 5])\n',
' File "", line 2\n'
' print(None in [1, 5])\n'
' ^\n'
"CompileError: 'i64' cannot be None\n")
def test_wrong_types_18(self):
self.assert_transpile_raises(
'def foo():\n'
' print(None == "")\n',
' File "", line 2\n'
' print(None == "")\n'
' ^\n'
"CompileError: use 'is' and 'is not' to compare to None\n")
def test_wrong_types_20(self):
self.assert_transpile_raises(
'def foo():\n'
' if (1, ("", True)) == (1, ("", 1)):\n'
' pass\n',
# ToDo: Marker in wrong place.
' File "", line 2\n'
' if (1, ("", True)) == (1, ("", 1)):\n'
' ^\n'
"CompileError: cannot convert 'bool' to "
"'i64/i32/i16/i8/u64/u32/u16/u8'\n")
def test_bare_compare(self):
self.assert_transpile_raises(
'def foo():\n'
' 1 == 2\n',
' File "", line 2\n'
' 1 == 2\n'
' ^\n'
"CompileError: bare comparision\n")
| 34.493671
| 78
| 0.416147
| 916
| 8,175
| 3.574236
| 0.10262
| 0.053451
| 0.098351
| 0.161576
| 0.818571
| 0.810324
| 0.765119
| 0.707392
| 0.59102
| 0.564447
| 0
| 0.052423
| 0.444648
| 8,175
| 236
| 79
| 34.639831
| 0.668722
| 0.003425
| 0
| 0.565217
| 0
| 0.009662
| 0.449355
| 0.022591
| 0
| 0
| 0.00221
| 0.004237
| 0.135266
| 1
| 0.120773
| false
| 0.009662
| 0.014493
| 0
| 0.140097
| 0.101449
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
451838fb8b3acc8747399824b9d60c1c29d67e5c
| 3,416
|
py
|
Python
|
test_kmethods.py
|
quinlan-lab/kmertools
|
93e90919c26e2fc899a905b77748857404389e13
|
[
"MIT"
] | 1
|
2020-08-25T01:35:38.000Z
|
2020-08-25T01:35:38.000Z
|
test_kmethods.py
|
quinlan-lab/kmertools
|
93e90919c26e2fc899a905b77748857404389e13
|
[
"MIT"
] | null | null | null |
test_kmethods.py
|
quinlan-lab/kmertools
|
93e90919c26e2fc899a905b77748857404389e13
|
[
"MIT"
] | 1
|
2021-07-13T23:21:56.000Z
|
2021-07-13T23:21:56.000Z
|
from unittest import TestCase
from eskedit.kmethods import *
class Test(TestCase):
def test_generate_kmers(self):
print('Testing %s' % 'test_generate_kmers')
for i in range(1, 8):
self.assertEqual(len(generate_kmers(i)), 4 ** i)
def test_gen_random_sequence(self):
print('Testing %s' % 'test_gen_random_sequence')
self.assertTrue(len(gen_random_sequence(7)) == 7)
self.assertTrue(True)
def test_ref_genome_as_string(self):
print('Testing %s' % 'test_ref_genome_as_string')
self.assertTrue(True)
def test_complement(self):
print("Testing %s" % "test_complement")
self.assertTrue(True)
def test_get_complementary_sequence(self):
print("Testing %s" % "test_get_complementary_sequence")
self.assertTrue(True)
def test_is_quality_snv(self):
print("Testing %s" % "test_is_quality_snv")
self.assertTrue(True)
def test_is_quality_nonsingleton(self):
print("Testing %s" % "test_is_quality_nonsingleton")
self.assertTrue(True)
def test_is_quality_singleton(self):
print("Testing %s" % "test_is_quality_singleton")
self.assertTrue(True)
def test_is_singleton_snv(self):
print("Testing %s" % "test_is_singleton_snv")
self.assertTrue(True)
def test_complete_sequence(self):
print("Testing %s" % "test_complete_sequence")
self.assertTrue(True)
def test_kmer_search(self):
print("Testing %s" % "test_kmer_search")
self.assertTrue(True)
def test_get_vcf_info_fields(self):
print("Testing %s" % "test_get_vcf_info_fields")
self.assertTrue(True)
def test_get_kmer_count(self):
print("Testing %s" % "test_get_kmer_count")
self.assertTrue(True)
def test_merge_transitions_ddc(self):
print("Testing %s" % "test_merge_transitions_ddc")
self.assertTrue(True)
def test_merge_positions_dd(self):
print("Testing %s" % "test_merge_positions_dd")
self.assertTrue(True)
def test_combine_df_kmer_indices(self):
print("Testing %s" % "test_combine_df_kmer_indices")
self.assertTrue(True)
def test_clean_counts_df(self):
print("Testing %s" % "test_clean_counts_df")
self.assertTrue(True)
def test_file_len(self):
print("Testing %s" % "test_file_len")
self.assertTrue(True)
def test_get_counts_from_file(self):
print("Testing %s" % "test_get_counts_from_file")
self.assertTrue(True)
def test_get_counts_dict(self):
print("Testing %s" % "test_get_counts_dict")
self.assertTrue(True)
def test_count_regional_variants(self):
print("Testing %s" % "test_count_regional_variants")
self.assertTrue(True)
def test_count_regional_af(self):
print("Testing %s" % "test_count_regional_af")
self.assertTrue(True)
def test_query_bed_region(self):
print("Testing %s" % "test_query_bed_region")
self.assertTrue(True)
def test_check_bed_regions(self):
print("Testing %s" % "test_check_bed_regions")
self.assertTrue(True)
def test_is_dash(self):
print("Testing %s" % "test_is_dash")
self.assertTrue(True)
def test_check_clinvar(self):
print("Testing %s" % "test_check_clinvar")
self.assertTrue(True)
| 30.5
| 63
| 0.660422
| 438
| 3,416
| 4.805936
| 0.1621
| 0.086461
| 0.197625
| 0.209976
| 0.860808
| 0.587173
| 0.234204
| 0
| 0
| 0
| 0
| 0.00189
| 0.225703
| 3,416
| 111
| 64
| 30.774775
| 0.793951
| 0
| 0
| 0.301205
| 1
| 0
| 0.241803
| 0.115632
| 0
| 0
| 0
| 0
| 0.325301
| 1
| 0.313253
| false
| 0
| 0.024096
| 0
| 0.349398
| 0.313253
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
45266515995c4fa2eef2c47f14074dcb92d42fdb
| 687
|
py
|
Python
|
cracking_the_coding_interview_qs/8.7-8.8/get_all_permutations_of_string_test.py
|
angelusualle/algorithms
|
86286a49db2a755bc57330cb455bcbd8241ea6be
|
[
"Apache-2.0"
] | null | null | null |
cracking_the_coding_interview_qs/8.7-8.8/get_all_permutations_of_string_test.py
|
angelusualle/algorithms
|
86286a49db2a755bc57330cb455bcbd8241ea6be
|
[
"Apache-2.0"
] | null | null | null |
cracking_the_coding_interview_qs/8.7-8.8/get_all_permutations_of_string_test.py
|
angelusualle/algorithms
|
86286a49db2a755bc57330cb455bcbd8241ea6be
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from get_all_permutations_of_string import get_all_permutations_of_string, get_all_permutations_of_string_with_dups
class Test_Case_Get_All_Permutations_Of_String(unittest.TestCase):
def test_get_all_permutations_of_string(self):
self.assertListEqual(get_all_permutations_of_string("tea"), ['tea', 'eta', 'ate', 'tae', 'eat', 'aet'])
def test_get_all_permutations_of_string_with_dups(self):
self.assertListEqual(get_all_permutations_of_string_with_dups("aaa"), ['aaa'])
self.assertListEqual(get_all_permutations_of_string_with_dups("teat"), ['ttea', 'ttae', 'teta', 'teat', 'tate', 'taet', 'etta', 'etat', 'eatt', 'atte', 'atet', 'aett'])
| 76.333333
| 176
| 0.764192
| 96
| 687
| 4.96875
| 0.375
| 0.113208
| 0.339623
| 0.377358
| 0.72327
| 0.559748
| 0.559748
| 0.333333
| 0.222222
| 0
| 0
| 0
| 0.097525
| 687
| 9
| 176
| 76.333333
| 0.769355
| 0
| 0
| 0
| 0
| 0
| 0.114826
| 0
| 0
| 0
| 0
| 0
| 0.375
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.625
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
4544050d8eb06081ede4910d4218a872c580f338
| 35
|
py
|
Python
|
src/mock/webrepl.py
|
hipek/esp32-heating-control
|
4ee511118eb9a208e92298bdcf9c9242368c1806
|
[
"MIT"
] | null | null | null |
src/mock/webrepl.py
|
hipek/esp32-heating-control
|
4ee511118eb9a208e92298bdcf9c9242368c1806
|
[
"MIT"
] | null | null | null |
src/mock/webrepl.py
|
hipek/esp32-heating-control
|
4ee511118eb9a208e92298bdcf9c9242368c1806
|
[
"MIT"
] | null | null | null |
def start(password=None):
pass
| 11.666667
| 25
| 0.685714
| 5
| 35
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 35
| 2
| 26
| 17.5
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 1
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
18a22f9ecd12b8cd2ba070dcb05f2e55ef3f8d64
| 86
|
py
|
Python
|
mne/datasets/kiloword/__init__.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | 1,953
|
2015-01-17T20:33:46.000Z
|
2022-03-30T04:36:34.000Z
|
mne/datasets/kiloword/__init__.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | 8,490
|
2015-01-01T13:04:18.000Z
|
2022-03-31T23:02:08.000Z
|
mne/datasets/kiloword/__init__.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | 1,130
|
2015-01-08T22:39:27.000Z
|
2022-03-30T21:44:26.000Z
|
"""MNE visual_92_categories dataset."""
from .kiloword import data_path, get_version
| 21.5
| 44
| 0.790698
| 12
| 86
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025974
| 0.104651
| 86
| 3
| 45
| 28.666667
| 0.805195
| 0.383721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
18b20197ca16f4d94391b3685611593c8849a3d6
| 23,599
|
py
|
Python
|
cogs/management.py
|
xthecoolboy/MizaBOT
|
fb8a449bde29fdf1d32b5a597e48e6b3463dd867
|
[
"MIT"
] | null | null | null |
cogs/management.py
|
xthecoolboy/MizaBOT
|
fb8a449bde29fdf1d32b5a597e48e6b3463dd867
|
[
"MIT"
] | null | null | null |
cogs/management.py
|
xthecoolboy/MizaBOT
|
fb8a449bde29fdf1d32b5a597e48e6b3463dd867
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
import asyncio
from datetime import datetime, timedelta
import psutil
# Bot related commands
class Management(commands.Cog):
"""Bot related commands. Might require some mod powers in your server"""
def __init__(self, bot):
self.bot = bot
self.color = 0xf49242
def isAuthorized(): # for decorators
async def predicate(ctx):
return ctx.bot.isAuthorized(ctx)
return commands.check(predicate)
def isMod(): # for decorators
async def predicate(ctx):
return ctx.bot.isMod(ctx)
return commands.check(predicate)
def isAuthorizedSpecial(): # for decorators
async def predicate(ctx):
return (ctx.bot.isDebugServer(ctx) or (ctx.bot.isYouServer(ctx) and ctx.bot.isMod(ctx)))
return commands.check(predicate)
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isMod()
@commands.cooldown(1, 3, commands.BucketType.guild)
async def setPrefix(self, ctx, prefix_string : str):
"""Set the prefix used on your server (Mod Only)"""
if len(prefix_string) == 0: return
id = str(ctx.guild.id)
if prefix_string == '$':
if id in self.bot.prefixes:
self.bot.prefixes.pop(id)
self.bot.savePending = True
else:
self.bot.prefixes[id] = prefix_string
self.bot.savePending = True
await ctx.send(embed=self.bot.buildEmbed(title=ctx.guild.name, description="Server Prefix changed to `{}`".format(prefix_string), color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True, aliases=['bug', 'report', 'bug_report'])
@commands.cooldown(1, 10, commands.BucketType.guild)
async def bugReport(self, ctx, *, terms : str):
"""Send a bug report (or your love confessions) to the author"""
if len(terms) == 0:
return
await self.bot.send('debug', embed=self.bot.buildEmbed(title="Bug Report", description=terms, footer="{} ▫️ User ID: {}".format(ctx.author.name, ctx.author.id), thumbnail=ctx.author.avatar_url, color=self.color))
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isAuthorized()
async def joined(self, ctx, member : discord.Member):
"""Says when a member joined."""
await ctx.send(embed=self.bot.buildEmbed(title=ctx.guild.name, description="Joined at {0.joined_at}".format(member), thumbnail=member.avatar_url, color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True, aliases=['source'])
@commands.cooldown(1, 20, commands.BucketType.guild)
async def github(self, ctx):
"""Post the bot.py file running right now"""
await ctx.send(embed=self.bot.buildEmbed(title=self.bot.description.splitlines()[0], description="Code source at https://github.com/MizaGBF/MizaBOT", thumbnail=ctx.guild.me.avatar_url, color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isMod()
async def delST(self, ctx):
"""Delete the ST setting of this server (Mod Only)"""
id = str(ctx.guild.id)
if id in self.bot.st:
self.bot.st.pop(id)
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
else:
await ctx.send(embed=self.bot.buildEmbed(title=ctx.guild.name, description="No ST set on this server\nI can't delete.", thumbnail=ctx.guild.icon_url, color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isMod()
async def setST(self, ctx, st1 : int, st2 : int):
"""Set the two ST of this server (Mod Only)"""
if st1 < 0 or st1 >= 24 or st2 < 0 or st2 >= 24:
await ctx.send(embed=self.bot.buildEmbed(title="Error", description="Values must be between 0 and 23 included", color=self.color))
return
self.bot.st[str(ctx.message.author.guild.id)] = [st1, st2]
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True, cooldown_after_parsing=True, aliases=['banspark'])
@isMod()
async def banRoll(self, ctx, member: discord.Member):
"""Ban an user from the roll ranking (Mod Only)
To avoid retards with fake numbers
The ban is across all servers"""
id = str(member.id)
if id not in self.bot.spark[1]:
self.bot.spark[1].append(id)
self.bot.savePending = True
await ctx.send(embed=self.bot.buildEmbed(title="{} ▫️ {}".format(member.display_name, id), description="Banned from all roll rankings by {}".format(ctx.author.display_name), thumbnail=member.avatar_url, color=self.color, footer=ctx.guild.name))
await self.bot.send('debug', embed=self.bot.buildEmbed(title="{} ▫️ {}".format(member.display_name, id), description="Banned from all roll rankings by {}".format(ctx.author.display_name), thumbnail=member.avatar_url, color=self.color, footer=ctx.guild.name))
else:
await ctx.send(embed=self.bot.buildEmbed(title=member.display_name, description="Already banned", thumbnail=member.avatar_url, color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isAuthorizedSpecial()
async def setGW(self, ctx, id : int, element : str, day : int, month : int, year : int):
"""Set the GW date ((You) Mod only)"""
try:
# stop the task
self.bot.cancelTask('check_buff')
self.bot.gw['state'] = False
self.bot.gw['id'] = id
self.bot.gw['ranking'] = ""
self.bot.gw['element'] = element.lower()
# build the calendar
self.bot.gw['dates'] = {}
self.bot.gw['dates']["Preliminaries"] = datetime.utcnow().replace(year=year, month=month, day=day, hour=19, minute=0, second=0, microsecond=0)
self.bot.gw['dates']["Interlude"] = self.bot.gw['dates']["Preliminaries"] + timedelta(days=1, seconds=43200) # +36h
self.bot.gw['dates']["Day 1"] = self.bot.gw['dates']["Interlude"] + timedelta(days=1) # +24h
self.bot.gw['dates']["Day 2"] = self.bot.gw['dates']["Day 1"] + timedelta(days=1) # +24h
self.bot.gw['dates']["Day 3"] = self.bot.gw['dates']["Day 2"] + timedelta(days=1) # +24h
self.bot.gw['dates']["Day 4"] = self.bot.gw['dates']["Day 3"] + timedelta(days=1) # +24h
self.bot.gw['dates']["Day 5"] = self.bot.gw['dates']["Day 4"] + timedelta(days=1) # +24h
self.bot.gw['dates']["End"] = self.bot.gw['dates']["Day 5"] + timedelta(seconds=61200) # +17h
# build the buff list for (you)
self.bot.gw['buffs'] = []
# Prelims all
self.bot.gw['buffs'].append([self.bot.gw['dates']["Preliminaries"]+timedelta(seconds=7200-300), True, True, True, True]) # warning, double
self.bot.gw['buffs'].append([self.bot.gw['dates']["Preliminaries"]+timedelta(seconds=7200), True, True, False, True])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Preliminaries"]+timedelta(seconds=43200-300), True, False, True, False]) # warning
self.bot.gw['buffs'].append([self.bot.gw['dates']["Preliminaries"]+timedelta(seconds=43200), True, False, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Preliminaries"]+timedelta(seconds=43200+3600-300), False, True, True, False]) # warning
self.bot.gw['buffs'].append([self.bot.gw['dates']["Preliminaries"]+timedelta(seconds=43200+3600), False, True, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Preliminaries"]+timedelta(days=1, seconds=10800-300), True, True, True, False]) # warning
self.bot.gw['buffs'].append([self.bot.gw['dates']["Preliminaries"]+timedelta(days=1, seconds=10800), True, True, False, False])
# Interlude
self.bot.gw['buffs'].append([self.bot.gw['dates']["Interlude"]-timedelta(seconds=300), True, False, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Interlude"], True, False, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Interlude"]+timedelta(seconds=3600-300), False, True, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Interlude"]+timedelta(seconds=3600), False, True, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Interlude"]+timedelta(seconds=54000-300), True, True, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Interlude"]+timedelta(seconds=54000), True, True, False, False])
# Day 1
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 1"]-timedelta(seconds=300), True, False, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 1"], True, False, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 1"]+timedelta(seconds=3600-300), False, True, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 1"]+timedelta(seconds=3600), False, True, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 1"]+timedelta(seconds=54000-300), True, True, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 1"]+timedelta(seconds=54000), True, True, False, False])
# Day 2
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 2"]-timedelta(seconds=300), True, False, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 2"], True, False, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 2"]+timedelta(seconds=3600-300), False, True, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 2"]+timedelta(seconds=3600), False, True, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 2"]+timedelta(seconds=54000-300), True, True, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 2"]+timedelta(seconds=54000), True, True, False, False])
# Day 3
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 3"]-timedelta(seconds=300), True, False, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 3"], True, False, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 3"]+timedelta(seconds=3600-300), False, True, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 3"]+timedelta(seconds=3600), False, True, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 3"]+timedelta(seconds=54000-300), True, True, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 3"]+timedelta(seconds=54000), True, True, False, False])
# Day 4
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 4"]-timedelta(seconds=300), True, False, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 4"], True, False, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 4"]+timedelta(seconds=3600-300), False, True, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 4"]+timedelta(seconds=3600), False, True, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 4"]+timedelta(seconds=54000-300), True, True, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 4"]+timedelta(seconds=54000), True, True, False, False])
# set the gw state to true
self.bot.gw['state'] = True
self.bot.savePending = True
self.bot.runTask('check_buff', self.bot.get_cog('GuildWar').checkGWBuff)
await ctx.send(embed=self.bot.buildEmbed(title="{} Guild War Mode".format(self.bot.getEmote('gw')), description="Set to : **{:%m/%d %H:%M}**".format(self.bot.gw['dates']["Preliminaries"]), color=self.color))
except Exception as e:
self.bot.cancelTask('check_buff')
self.bot.gw['dates'] = {}
self.bot.gw['buffs'] = []
self.bot.gw['state'] = False
self.bot.savePending = True
await ctx.send(embed=self.bot.buildEmbed(title="Error", description="An unexpected error occured", footer=str(e), color=self.color))
await self.bot.sendError('setgw', str(e))
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isAuthorizedSpecial()
async def disableGW(self, ctx):
"""Disable the GW mode ((You) Mod only)
It doesn't delete the GW settings"""
self.bot.cancelTask('check_buff')
self.bot.gw['state'] = False
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isAuthorizedSpecial()
async def enableGW(self, ctx):
"""Enable the GW mode ((You) Mod only)"""
if self.bot.gw['state'] == True:
await ctx.send(embed=self.bot.buildEmbed(title="{} Guild War Mode".format(self.bot.getEmote('gw')), description="Already enabled", color=self.color))
elif len(self.bot.gw['dates']) == 8:
self.bot.gw['state'] = True
self.bot.runTask('check_buff', self.bot.get_cog('GuildWar').checkGWBuff)
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
else:
await ctx.send(embed=self.bot.buildEmbed(title="Error", description="No Guild War available in my memory", color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True, aliases=['skipGW'])
@isAuthorizedSpecial()
async def skipGWBuff(self, ctx):
"""The bot will skip the next GW buff call ((You) Mod only)"""
if not self.bot.gw['skip']:
self.bot.gw['skip'] = True
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
else:
await ctx.send(embed=self.bot.buildEmbed(title="Error", description="I'm already skipping the next set of buffs", color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isAuthorizedSpecial()
async def cancelSkipGWBuff(self, ctx):
"""Cancel the GW buff call skipping ((You) Mod only)"""
if self.bot.gw['skip']:
self.bot.gw['skip'] = False
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
else:
await ctx.send(embed=self.bot.buildEmbed(title="Error", description="No buff skip is currently set", color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isMod()
async def toggleFullBot(self, ctx):
"""Allow or not this channel to use all commands (Mod only)
It disables game/obnoxious commands outside of the whitelisted channels"""
gid = str(ctx.guild.id)
cid = ctx.channel.id
if gid not in self.bot.permitted:
self.bot.permitted[gid] = []
for i in range(0, len(self.bot.permitted[gid])):
if self.bot.permitted[gid][i] == cid:
self.bot.permitted[gid].pop(i)
self.bot.savePending = True
try:
await self.bot.callCommand(ctx, 'seeBotPermission', 'Management')
except Exception as e:
pass
await ctx.message.add_reaction('➖')
return
self.bot.permitted[gid].append(cid)
self.bot.savePending = True
await ctx.message.add_reaction('➕')
try:
await self.bot.callCommand(ctx, 'seeBotPermission', 'Management')
except Exception as e:
pass
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isMod()
async def allowBotEverywhere(self, ctx):
"""Allow full bot access in every channel (Mod only)"""
gid = str(ctx.guild.id)
if gid in self.bot.permitted:
self.bot.permitted.pop(gid)
self.bot.savePending = True
await ctx.send(embed=self.bot.buildEmbed(title="Commands are now sauthorized everywhere", thumbnail=ctx.guild.icon_url, footer=ctx.guild.name + " ▫️ " + str(ctx.guild.id), color=self.color))
else:
await ctx.send(embed=self.bot.buildEmbed(title="Commands are already sauthorized everywhere", thumbnail=ctx.guild.icon_url, footer=ctx.guild.name + " ▫️ " + str(ctx.guild.id), color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isMod()
async def seeBotPermission(self, ctx):
"""See all channels permitted to use all commands (Mod only)"""
gid = str(ctx.guild.id)
if gid in self.bot.permitted:
msg = ""
for c in ctx.guild.channels:
if c.id in self.bot.permitted[gid]:
try:
msg += c.name + "\n"
except:
pass
await ctx.send(embed=self.bot.buildEmbed(title="Channels permitted to use all commands", description=msg, thumbnail=ctx.guild.icon_url, footer=ctx.guild.name + " ▫️ " + str(ctx.guild.id), color=self.color))
else:
await ctx.send(embed=self.bot.buildEmbed(title="Commands are sauthorized everywhere", thumbnail=ctx.guild.icon_url, footer=ctx.guild.name + " ▫️ " + str(ctx.guild.id), color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isMod()
async def toggleBroadcast(self, ctx):
"""Allow or not this channel to use all commands (Mod only)
It disables game/obnoxious commands outside of the whitelisted channels"""
gid = str(ctx.guild.id)
cid = ctx.channel.id
if gid not in self.bot.news:
self.bot.news[gid] = []
for i in range(0, len(self.bot.news[gid])):
if self.bot.news[gid][i] == cid:
self.bot.news[gid].pop(i)
self.bot.savePending = True
try:
await self.bot.callCommand(ctx, 'seeBroadcast', 'Management')
except Exception as e:
pass
await ctx.message.add_reaction('➖')
return
self.bot.news[gid].append(cid)
self.bot.savePending = True
await ctx.message.add_reaction('➕')
try:
await self.bot.callCommand(ctx, 'seeBroadcast', 'Management')
except Exception as e:
pass
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isMod()
async def seeBroadcast(self, ctx):
"""See all channels news to use all commands (Mod only)"""
gid = str(ctx.guild.id)
if gid in self.bot.news:
msg = ""
for c in ctx.guild.channels:
if c.id in self.bot.news[gid]:
try:
msg += c.name + "\n"
except:
pass
await ctx.send(embed=self.bot.buildEmbed(title="Channels receiving broadcasts", description=msg, thumbnail=ctx.guild.icon_url, footer=ctx.guild.name + " ▫️ " + str(ctx.guild.id), color=self.color))
else:
await ctx.send(embed=self.bot.buildEmbed(title="No channels set to receive broadcasts", thumbnail=ctx.guild.icon_url, footer=ctx.guild.name + " ▫️ " + str(ctx.guild.id), color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True, aliases=['mizabot'])
@commands.cooldown(1, 10, commands.BucketType.guild)
async def status(self, ctx):
"""Post the bot status"""
await ctx.send(embed=self.bot.buildEmbed(title="{} ▫️ v{}".format(ctx.guild.me.display_name, self.bot.botversion), description="**Uptime**▫️{}\n**CPU**▫️{}%\n**Memory**▫️{}MB\n**Save Pending**▫️{}\n**Errors since boot**▫️{}\n**Tasks Count**▫️{}\n**Servers Count**▫️{}\n**Pending Servers**▫️{}\n**Cogs Loaded**▫️{}/{}\n**Twitter**▫️{}".format(self.bot.uptime(), self.bot.process.cpu_percent(), self.bot.process.memory_full_info().uss >> 20, self.bot.savePending, self.bot.errn, len(asyncio.all_tasks()), len(self.bot.guilds), len(self.bot.newserver['pending']), len(self.bot.cogs), self.bot.cogn, (self.bot.twitter_api is not None)), thumbnail=ctx.guild.me.avatar_url, color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True)
@commands.cooldown(1, 10, commands.BucketType.guild)
async def changelog(self, ctx):
"""Post the bot changelog"""
msg = ""
for c in self.bot.botchangelog:
msg += "▫️ {}\n".format(c)
if msg != "":
await ctx.send(embed=self.bot.buildEmbed(title="{} ▫️ v{}".format(ctx.guild.me.display_name, self.bot.botversion), description="**Changelog**\n" + msg, thumbnail=ctx.guild.me.avatar_url, color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isMod()
async def asar(self, ctx, *, role_name : str = ""):
"""Add a role to the list of self-assignable roles (Mod Only)"""
if role_name == "":
await ctx.message.add_reaction('❎') # negative check mark
return
role = None
for r in ctx.guild.roles:
if role_name.lower() == r.name.lower():
role = r
break
if role is None:
await ctx.message.add_reaction('❎') # negative check mark
return
id = str(ctx.guild.id)
if id not in self.bot.assignablerole:
self.bot.assignablerole[id] = {}
if role.name.lower() in self.bot.assignablerole[id]:
await ctx.message.add_reaction('❎') # negative check mark
return
self.bot.assignablerole[id][role.name.lower()] = role.id
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isMod()
async def rsar(self, ctx, *, role_name : str = ""):
"""Remove a role from the list of self-assignable roles (Mod Only)"""
if role_name == "":
await ctx.message.add_reaction('❎') # negative check mark
return
role = None
for r in ctx.guild.roles:
if role_name.lower() == r.name.lower():
role = r
break
if role is None:
await ctx.message.add_reaction('❎') # negative check mark
return
id = str(ctx.guild.id)
if id not in self.bot.assignablerole:
self.bot.assignablerole[id] = {}
if role.name.lower() not in self.bot.assignablerole[id]:
await ctx.message.add_reaction('❎') # negative check mark
return
self.bot.assignablerole[id].pop(role.name.lower())
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
| 59.593434
| 695
| 0.604221
| 3,132
| 23,599
| 4.530651
| 0.10696
| 0.107047
| 0.069767
| 0.056237
| 0.785765
| 0.751868
| 0.737209
| 0.715927
| 0.693446
| 0.653559
| 0
| 0.017574
| 0.238061
| 23,599
| 396
| 696
| 59.593434
| 0.768033
| 0.023179
| 0
| 0.521084
| 0
| 0.003012
| 0.105015
| 0.008209
| 0
| 0
| 0.000377
| 0
| 0
| 0
| null | null | 0.018072
| 0.01506
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
18c2d8a09f275424cdb15f2a256534524b3fa369
| 59
|
py
|
Python
|
glue/admin.py
|
Valchris/AngularJS-Django-Template
|
10c90087984dcd9e6d29380eb4380824e65bcecf
|
[
"MIT"
] | 1
|
2015-07-29T04:28:26.000Z
|
2015-07-29T04:28:26.000Z
|
glue/admin.py
|
Valchris/AngularJS-Django-Template
|
10c90087984dcd9e6d29380eb4380824e65bcecf
|
[
"MIT"
] | null | null | null |
glue/admin.py
|
Valchris/AngularJS-Django-Template
|
10c90087984dcd9e6d29380eb4380824e65bcecf
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from glue.models import *
| 19.666667
| 32
| 0.813559
| 9
| 59
| 5.333333
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135593
| 59
| 2
| 33
| 29.5
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
18df2c4ff7c83fc2ff4c4df2ad5efb199366fdfd
| 82
|
wsgi
|
Python
|
jpmorgan.wsgi
|
mrukhlov/jpmorgan
|
ef8f49054772c3f07161f4eaf7c119019ce600e2
|
[
"Apache-2.0"
] | null | null | null |
jpmorgan.wsgi
|
mrukhlov/jpmorgan
|
ef8f49054772c3f07161f4eaf7c119019ce600e2
|
[
"Apache-2.0"
] | null | null | null |
jpmorgan.wsgi
|
mrukhlov/jpmorgan
|
ef8f49054772c3f07161f4eaf7c119019ce600e2
|
[
"Apache-2.0"
] | null | null | null |
import sys
sys.path.insert(0, '/srv/jpmorgan')
from app import app as application
| 20.5
| 35
| 0.768293
| 14
| 82
| 4.5
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013889
| 0.121951
| 82
| 4
| 36
| 20.5
| 0.861111
| 0
| 0
| 0
| 0
| 0
| 0.156627
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7a0036f8904ef04950506fa3bb65a2bb9ab285ce
| 159
|
py
|
Python
|
great_expectations/dataset/__init__.py
|
avanderm/great_expectations
|
e4619a890700a492441a7ed3cbb9e5abb0953268
|
[
"Apache-2.0"
] | 1
|
2021-01-10T18:00:06.000Z
|
2021-01-10T18:00:06.000Z
|
great_expectations/dataset/__init__.py
|
avanderm/great_expectations
|
e4619a890700a492441a7ed3cbb9e5abb0953268
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/dataset/__init__.py
|
avanderm/great_expectations
|
e4619a890700a492441a7ed3cbb9e5abb0953268
|
[
"Apache-2.0"
] | null | null | null |
from .base import Dataset
from .pandas_dataset import MetaPandasDataset, PandasDataset
from .sqlalchemy_dataset import MetaSqlAlchemyDataset, SqlAlchemyDataset
| 53
| 72
| 0.886792
| 16
| 159
| 8.6875
| 0.625
| 0.18705
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081761
| 159
| 3
| 72
| 53
| 0.952055
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e14841f80a1f905b5006c26969f6f10bf64c27b5
| 107
|
py
|
Python
|
Codefights/arcade/intro/level-2/6.Make-Array-Consecutive-2/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | 7
|
2017-09-20T16:40:39.000Z
|
2021-08-31T18:15:08.000Z
|
Codefights/arcade/intro/level-2/6.Make-Array-Consecutive-2/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
Codefights/arcade/intro/level-2/6.Make-Array-Consecutive-2/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
# Python3
def makeArrayConsecutive2(statues):
return (max(statues) - min(statues) + 1) - len(statues)
| 21.4
| 59
| 0.700935
| 12
| 107
| 6.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 0.158879
| 107
| 4
| 60
| 26.75
| 0.8
| 0.065421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
e15ca6e7927c7dfaebe88887cd584126de16a196
| 45
|
py
|
Python
|
mypo/sampler/__init__.py
|
sonesuke/my-portfolio
|
4fd19fdee8a0aa13194cab0df53c83218c5664e3
|
[
"MIT"
] | 2
|
2021-03-14T00:14:25.000Z
|
2021-09-04T16:26:02.000Z
|
mypo/sampler/__init__.py
|
sonesuke/my-portfolio
|
4fd19fdee8a0aa13194cab0df53c83218c5664e3
|
[
"MIT"
] | 104
|
2021-02-21T08:11:11.000Z
|
2021-09-26T03:02:27.000Z
|
mypo/sampler/__init__.py
|
sonesuke/mypo
|
4fd19fdee8a0aa13194cab0df53c83218c5664e3
|
[
"MIT"
] | null | null | null |
# flake8: noqa
from .sampler import Sampler
| 11.25
| 28
| 0.755556
| 6
| 45
| 5.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 0.177778
| 45
| 3
| 29
| 15
| 0.891892
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e16731d27b2926a6e8972922d05bae1f6e5d75bb
| 240
|
py
|
Python
|
ltc/base/admin.py
|
v0devil/jltom
|
b302a39a187b8e1154c6deda636a4db8b30bb40b
|
[
"MIT"
] | 4
|
2016-12-30T13:26:59.000Z
|
2017-04-26T12:07:36.000Z
|
ltc/base/admin.py
|
v0devil/jltom
|
b302a39a187b8e1154c6deda636a4db8b30bb40b
|
[
"MIT"
] | null | null | null |
ltc/base/admin.py
|
v0devil/jltom
|
b302a39a187b8e1154c6deda636a4db8b30bb40b
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from ltc.base.models import Project, Test, Configuration
# Register your models here.
admin.site.register(Project)
admin.site.register(Test)
admin.site.register(Configuration)
| 24
| 56
| 0.808333
| 33
| 240
| 5.878788
| 0.454545
| 0.139175
| 0.262887
| 0.226804
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104167
| 240
| 9
| 57
| 26.666667
| 0.902326
| 0.220833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e16db721abb59e634b09680c4bdf3796a1a5328b
| 6,115
|
py
|
Python
|
VoiceAssistant/Project_Basic_struct/speakListen.py
|
TheRealMilesLee/Python
|
d145c848a7ba76e8e523e4fe06e2a0add7e2fae1
|
[
"MIT"
] | 1
|
2018-12-05T11:04:47.000Z
|
2018-12-05T11:04:47.000Z
|
VoiceAssistant/Project_Basic_struct/speakListen.py
|
MarkHooland/Python
|
d145c848a7ba76e8e523e4fe06e2a0add7e2fae1
|
[
"MIT"
] | null | null | null |
VoiceAssistant/Project_Basic_struct/speakListen.py
|
MarkHooland/Python
|
d145c848a7ba76e8e523e4fe06e2a0add7e2fae1
|
[
"MIT"
] | null | null | null |
import time
from colorama import Fore, Back, Style
import speech_recognition as sr
import os
import pyttsx3
import datetime
from rich.progress import Progress
python = pyttsx3.init("sapi5") # name of the engine is set as Python
voices = python.getProperty("voices")
#print(voices)
python.setProperty("voice", voices[1].id)
python.setProperty("rate", 140)
def speak(text):
"""[This function would speak aloud some text provided as parameter]
Args:
text ([str]): [It is the speech to be spoken]
"""
python.say(text)
python.runAndWait()
def greet(g):
"""Uses the datetime library to generate current time and then greets accordingly.
Args:
g (str): To decide whether to say hello or good bye
"""
if g == "start" or g == "s":
h = datetime.datetime.now().hour
text = ''
if h > 12 and h < 17:
text = "Hello ! Good Afternoon "
elif h < 12 and h > 0:
text = "Hello! Good Morning "
elif h >= 17 :
text = "Hello! Good Evening "
text += " I am Python, How may i help you ?"
speak(text)
elif g == "quit" or g == "end" or g == "over" or g == "e":
text = 'Thank you!. Good Bye ! '
speak(text)
def hear():
"""[It will process the speech of user using Google_Speech_Recognizer(recognize_google)]
Returns:
[str]: [Speech of user as a string in English(en - IN)]
"""
r = sr.Recognizer()
"""Reconizer is a class which has lot of functions related to Speech i/p and o/p.
"""
r.pause_threshold = 1 # a pause of more than 1 second will stop the microphone temporarily
r.energy_threshold = 300 # python by default sets it to 300. It is the minimum input energy to be considered.
r.dynamic_energy_threshold = True # pyhton now can dynamically change the threshold energy
with sr.Microphone() as source:
# read the audio data from the default microphone
print(Fore.RED + "\nListening...")
#time.sleep(0.5)
speech = r.record(source, duration = 9) # option
#speech = r.listen(source)
# convert speech to text
try:
#print("Recognizing...")
recognizing()
speech = r.recognize_google(speech)
print(speech + "\n")
except Exception as exception:
print(exception)
return "None"
return speech
def recognizing():
"""Uses the Rich library to print a simulates version of "recognizing" by printing a loading bar.
"""
with Progress() as pr:
rec = pr.add_task("[red]Recognizing...", total = 100)
while not pr.finished:
pr.update(rec, advance = 1.0)
time.sleep(0.01)
def long_hear(duration_time = 60):
"""[It will process the speech of user using Google_Speech_Recognizer(recognize_google)]
the difference between the hear() and long_hear() is that - the
hear() - records users voice for 9 seconds
long_hear() - will record user's voice for the time specified by user. By default, it records for 60 seconds.
Returns:
[str]: [Speech of user as a string in English(en - IN)]
"""
r = sr.Recognizer()
"""Reconizer is a class which has lot of functions related to Speech i/p and o/p.
"""
r.pause_threshold = 1 # a pause of more than 1 second will stop the microphone temporarily
r.energy_threshold = 300 # python by default sets it to 300. It is the minimum input energy to be considered.
r.dynamic_energy_threshold = True # pyhton now can dynamically change the threshold energy
with sr.Microphone() as source:
# read the audio data from the default microphone
print(Fore.RED + "\nListening...")
#time.sleep(0.5)
speech = r.record(source, duration = duration_time) # option
#speech = r.listen(source)
# convert speech to text
try:
print(Fore.RED +"Recognizing...")
#recognizing()
speech = r.recognize_google(speech)
#print(speech + "\n")
except Exception as exception:
print(exception)
return "None"
return speech
def short_hear(duration_time = 5):
"""[It will process the speech of user using Google_Speech_Recognizer(recognize_google)]
the difference between the hear() and long_hear() is that - the
hear() - records users voice for 9 seconds
long_hear - will record user's voice for the time specified by user. By default, it records for 60 seconds.
Returns:
[str]: [Speech of user as a string in English(en - IN)]
"""
r = sr.Recognizer()
"""Reconizer is a class which has lot of functions related to Speech i/p and o/p.
"""
r.pause_threshold = 1 # a pause of more than 1 second will stop the microphone temporarily
r.energy_threshold = 300 # python by default sets it to 300. It is the minimum input energy to be considered.
r.dynamic_energy_threshold = True # pyhton now can dynamically change the threshold energy
with sr.Microphone() as source:
# read the audio data from the default microphone
print(Fore.RED + "\nListening...")
#time.sleep(0.5)
speech = r.record(source, duration = duration_time) # option
#speech = r.listen(source)
# convert speech to text
try:
print(Fore.RED +"Recognizing...")
#recognizing()
speech = r.recognize_google(speech)
#print(speech + "\n")
except Exception as exception:
print(exception)
return "None"
return speech
if __name__ == '__main__':
# print("Enter your name")
# name = hear()
# speak("Hello " + name)
# greet("s")
# greet("e")
pass
#hear()
#recognizing()
| 35.970588
| 118
| 0.594113
| 791
| 6,115
| 4.537295
| 0.242731
| 0.017554
| 0.020061
| 0.013374
| 0.710783
| 0.701867
| 0.701867
| 0.701867
| 0.701867
| 0.701867
| 0
| 0.01522
| 0.312347
| 6,115
| 169
| 119
| 36.183432
| 0.838288
| 0.409648
| 0
| 0.506024
| 0
| 0
| 0.089944
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072289
| false
| 0.012048
| 0.084337
| 0
| 0.228916
| 0.108434
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e170d7139c31119e1eb476ae084b331e0ed0a722
| 100
|
py
|
Python
|
lambdata_doinalangille/__init__.py
|
doinalangille/lambdata_doinalangille
|
f57e1f9f87615bc9d1d1cfada530a542ea4551a1
|
[
"MIT"
] | null | null | null |
lambdata_doinalangille/__init__.py
|
doinalangille/lambdata_doinalangille
|
f57e1f9f87615bc9d1d1cfada530a542ea4551a1
|
[
"MIT"
] | 3
|
2020-03-24T18:29:36.000Z
|
2021-02-02T22:42:20.000Z
|
lambdata_doinalangille/__init__.py
|
doinalangille/lambdata_doinalangille
|
f57e1f9f87615bc9d1d1cfada530a542ea4551a1
|
[
"MIT"
] | 1
|
2020-02-11T23:05:07.000Z
|
2020-02-11T23:05:07.000Z
|
"""
lambdata - a collection of Data Science helper functions
"""
import pandas as pd
import sklearn
| 16.666667
| 56
| 0.76
| 14
| 100
| 5.428571
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17
| 100
| 6
| 57
| 16.666667
| 0.915663
| 0.56
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e1a571d93e123889de55adde281c383678e87c9f
| 392
|
py
|
Python
|
bitmovin_api_sdk/encoding/encodings/muxings/mp3/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 11
|
2019-07-03T10:41:16.000Z
|
2022-02-25T21:48:06.000Z
|
bitmovin_api_sdk/encoding/encodings/muxings/mp3/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 8
|
2019-11-23T00:01:25.000Z
|
2021-04-29T12:30:31.000Z
|
bitmovin_api_sdk/encoding/encodings/muxings/mp3/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 13
|
2020-01-02T14:58:18.000Z
|
2022-03-26T12:10:30.000Z
|
from bitmovin_api_sdk.encoding.encodings.muxings.mp3.mp3_api import Mp3Api
from bitmovin_api_sdk.encoding.encodings.muxings.mp3.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.encodings.muxings.mp3.information.information_api import InformationApi
from bitmovin_api_sdk.encoding.encodings.muxings.mp3.mp3_muxing_list_query_params import Mp3MuxingListQueryParams
| 78.4
| 113
| 0.903061
| 53
| 392
| 6.396226
| 0.358491
| 0.141593
| 0.176991
| 0.212389
| 0.548673
| 0.548673
| 0.548673
| 0.548673
| 0.283186
| 0
| 0
| 0.021277
| 0.040816
| 392
| 4
| 114
| 98
| 0.880319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e1ad6793329afb999758e7af4b085f4de8b95b33
| 93
|
py
|
Python
|
Configuration/StandardSequences/python/L1Reco_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
Configuration/StandardSequences/python/L1Reco_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
Configuration/StandardSequences/python/L1Reco_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
from L1Trigger.Configuration.L1TReco_cff import *
| 18.6
| 49
| 0.83871
| 12
| 93
| 6.416667
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024096
| 0.107527
| 93
| 4
| 50
| 23.25
| 0.903614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e1c3efdf6d1bcb608ddb86a4384fd1aed1e4458f
| 117
|
py
|
Python
|
hello_world.py
|
michaeljamieson/Python01
|
96777e5252aaf58e5b424dd5b39186b395d9d859
|
[
"Apache-2.0"
] | null | null | null |
hello_world.py
|
michaeljamieson/Python01
|
96777e5252aaf58e5b424dd5b39186b395d9d859
|
[
"Apache-2.0"
] | null | null | null |
hello_world.py
|
michaeljamieson/Python01
|
96777e5252aaf58e5b424dd5b39186b395d9d859
|
[
"Apache-2.0"
] | null | null | null |
print ('hello world')
print ('hey i did something')
print ('what happens if i do a ;');
print ('apparently nothing')
| 23.4
| 35
| 0.683761
| 18
| 117
| 4.444444
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162393
| 117
| 4
| 36
| 29.25
| 0.816327
| 0
| 0
| 0
| 0
| 0
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
bee859bef7a37ff661836407bce80f2d3470ddd9
| 27,023
|
py
|
Python
|
goldstone/tenants/tests_cloud.py
|
Solinea/goldstone-server
|
91b078ca9fed1b33f48dc79f4af5c9d1817a1bc5
|
[
"Apache-2.0"
] | 14
|
2015-05-18T22:11:11.000Z
|
2020-08-14T06:50:09.000Z
|
goldstone/tenants/tests_cloud.py
|
lexjacobs/goldstone-server
|
91b078ca9fed1b33f48dc79f4af5c9d1817a1bc5
|
[
"Apache-2.0"
] | 568
|
2015-05-17T01:26:36.000Z
|
2021-06-10T20:36:47.000Z
|
goldstone/tenants/tests_cloud.py
|
lexjacobs/goldstone-server
|
91b078ca9fed1b33f48dc79f4af5c9d1817a1bc5
|
[
"Apache-2.0"
] | 22
|
2015-05-25T20:16:06.000Z
|
2021-08-08T20:25:24.000Z
|
"""Unit tests for /tenants/<id>/cloud endpoints."""
# Copyright 2015 Solinea, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from django.contrib.auth import get_user_model
from rest_framework.status import HTTP_200_OK, HTTP_401_UNAUTHORIZED, \
HTTP_400_BAD_REQUEST, HTTP_201_CREATED, HTTP_403_FORBIDDEN, \
HTTP_204_NO_CONTENT
from goldstone.test_utils import Setup, create_and_login, \
AUTHORIZATION_PAYLOAD, CONTENT_BAD_TOKEN, CONTENT_NO_CREDENTIALS, \
check_response_without_uuid, TEST_USER_1, CONTENT_PERMISSION_DENIED, \
BAD_TOKEN, BAD_UUID
from .models import Tenant, Cloud
from .tests_tenants import TENANTS_ID_URL
# HTTP response content.
CONTENT_MISSING_OS_USERNAME = '"username":["This field is required."]'
CONTENT_MISSING_OS_NAME = '"tenant_name":["This field is required."]'
CONTENT_MISSING_OS_PASSWORD = '"password":["This field is required."]'
CONTENT_MISSING_OS_URL = '"auth_url":["This field is required."]'
# URLs used by this module.
TENANTS_ID_CLOUD_URL = TENANTS_ID_URL + "cloud/"
TENANTS_ID_CLOUD_ID_URL = TENANTS_ID_CLOUD_URL + "%s/"
class TenantsIdCloud(Setup):
"""Listing the OpenStack clouds of a tenant, and creating a new OpenStack
cloud in a tenant."""
def test_not_logged_in(self):
"""Getting the tenant clouds, or creating a tenant cloud, without being
logged in."""
# Make a tenant.
tenant = Tenant.objects.create(name='tenant 1',
owner='John',
owner_contact='206.867.5309')
# Try the GET and POST without an authorization token.
responses = \
[self.client.get(TENANTS_ID_CLOUD_URL % tenant.uuid),
self.client.post(TENANTS_ID_CLOUD_URL % tenant.uuid,
json.dumps({"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url":
"http://d.com"}),
content_type="application/json")]
for response in responses:
self.assertContains(response,
CONTENT_NO_CREDENTIALS,
status_code=HTTP_401_UNAUTHORIZED)
# Try the GET and POST with a bad authorization token.
responses = [
self.client.get(
TENANTS_ID_CLOUD_URL % tenant.uuid,
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN),
self.client.post(
TENANTS_ID_CLOUD_URL % tenant.uuid,
json.dumps({"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url": "http://d.com"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN)]
for response in responses:
self.assertContains(response,
CONTENT_BAD_TOKEN,
status_code=HTTP_401_UNAUTHORIZED)
def test_no_access(self):
"""Getting the tenant clouds, or creating a tenant cloud, without being
a tenant admin."""
# Make a tenant.
tenant = Tenant.objects.create(name='tenant 1',
owner='John',
owner_contact='206.867.5309')
# Create a normal user who's a member of the tenant, but *not* a
# tenant_admin
token = create_and_login()
user = get_user_model().objects.get(username=TEST_USER_1[0])
user.tenant = tenant
user.save()
# Try the GET and POST.
responses = [
self.client.get(
TENANTS_ID_CLOUD_URL % tenant.uuid,
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
self.client.post(
TENANTS_ID_CLOUD_URL % tenant.uuid,
json.dumps({"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url": "http://d.com"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)]
for response in responses:
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
def test_no_tenant(self):
"""Getting a tenant, or creating a cloud in a tenant, when the tenant
doesn't exist."""
# Create a Django admin user.
token = create_and_login(is_superuser=True)
# Make a tenant, then delete it.
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
tenant.delete()
# Try the GET and POST to a tenant that doesn't exist.
responses = [
self.client.get(
TENANTS_ID_CLOUD_URL % tenant.uuid,
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
self.client.post(
TENANTS_ID_CLOUD_URL % tenant.uuid,
json.dumps({"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url": "http://d.com"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)]
for response in responses:
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
def test_get(self):
"""List a tenant's clouds."""
# The clouds in this test.
TENANT_CLOUD = [{"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url": "http://d.com"},
{"tenant_name": "ee",
"username": "ffffffffuuuuu",
"password": "gah",
"auth_url": "http://route66.com"},
{"tenant_name": "YUNO",
"username": "YOLO",
"password": "ZOMG",
"auth_url": "http://lol.com"},
]
OTHER_CLOUD = [{"tenant_name": "lisa",
"username": "sad lisa lisa",
"password": "on the road",
"auth_url": "http://tofindout.com"},
{"tenant_name": "left",
"username": "right",
"password": "center",
"auth_url": "http://down.com"},
]
EXPECTED_RESULT = TENANT_CLOUD
# Make a tenant
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
# Create clouds in this tenant.
for entry in TENANT_CLOUD:
Cloud.objects.create(tenant=tenant, **entry)
# Create clouds that don't belong to the tenant.
tenant_2 = Tenant.objects.create(name='boris',
owner='John',
owner_contact='206.867.5309')
for entry in OTHER_CLOUD:
entry["tenant"] = tenant_2
Cloud.objects.create(**entry)
# Log in as the tenant_admin.
token = create_and_login(tenant=tenant)
# Get the tenant's cloud list and check the response. We do a partial
# check of the uuid key. It must exist, and its value must be a string
# that's >= 32 characters.
response = self.client.get(
TENANTS_ID_CLOUD_URL % tenant.uuid,
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
# pylint: disable=E1101
self.assertEqual(response.status_code, HTTP_200_OK)
response_content = json.loads(response.content)
for entry in response_content["results"]:
self.assertIsInstance(entry["uuid"], basestring)
self.assertGreaterEqual(len(entry["uuid"]), 32)
del entry["uuid"]
self.assertItemsEqual(response_content["results"], EXPECTED_RESULT)
def test_post(self):
"""Create an OpenStack cloud in a tenant."""
# The clouds in this test.
TENANT_CLOUD = [{"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url": "http://d.com"},
{"tenant_name": "ee",
"username": "ffffffffuuuuu",
"password": "gah",
"auth_url": "http://route66.com"},
]
# Make a tenant
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
# Create a user who's the tenant_admin of this tenant, and log him in.
token = create_and_login(tenant=tenant)
# Create OpenStack clouds in this tenant, and check the results.
for entry in TENANT_CLOUD:
response = self.client.post(
TENANTS_ID_CLOUD_URL % tenant.uuid,
json.dumps(entry),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
check_response_without_uuid(response, HTTP_201_CREATED, entry)
class TenantsIdCloudId(Setup):
"""Retrieve a particular OpenStack cloud from a tenant, update an OpenStack
cloud in a tenant, and delete an OpenStack cloud from a tenant."""
def test_not_logged_in(self):
"""The client is not logged in."""
# Make a tenant, and put one OpenStack cloud in it.
tenant = Tenant.objects.create(name='tenant 1',
owner='John',
owner_contact='206.867.5309')
cloud = Cloud.objects.create(tenant_name="ee",
username="ffffffffuuuuu",
password="gah",
auth_url="http://route66.com",
tenant=tenant)
# Try GET, PUT, and DELETE without an authorization token.
responses = [self.client.get(TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, cloud.uuid)),
self.client.put(TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, cloud.uuid),
json.dumps({"username": "fool"}),
content_type="application/json"),
self.client.delete(TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, cloud.uuid)),
]
for response in responses:
self.assertContains(response,
CONTENT_NO_CREDENTIALS,
status_code=HTTP_401_UNAUTHORIZED)
# Try again with a bad authorization token.
responses = [
self.client.get(
TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, cloud.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN),
self.client.put(
TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, cloud.uuid),
json.dumps({"username": "fool"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN),
self.client.delete(
TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, cloud.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN),
]
for response in responses:
self.assertContains(response,
CONTENT_BAD_TOKEN,
status_code=HTTP_401_UNAUTHORIZED)
def test_no_access(self):
"""The client isn't an authorized user."""
# Make a tenant, put an OpenStack cloud in it.
tenant = Tenant.objects.create(name='tenant 1',
owner='John',
owner_contact='206.867.5309')
cloud = Cloud.objects.create(tenant_name="ee",
username="ffffffffuuuuu",
password="gah",
auth_url="http://route66.com",
tenant=tenant)
# Create a normal user who's a member of the tenant, but *not* a
# tenant_admin
token = create_and_login()
user = get_user_model().objects.get(username=TEST_USER_1[0])
user.tenant = tenant
user.save()
# Try GET, PUT, and DELETE.
responses = [
self.client.get(
TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, cloud.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
self.client.put(
TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, cloud.uuid),
json.dumps({"username": "fool"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
self.client.delete(
TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, cloud.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
]
for response in responses:
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
# Ensure the cloud wasn't deleted.
self.assertEqual(Cloud.objects.count(), 1)
def test_no_tenant(self):
"""Getting a cloud, updating a cloud, or deleting a cloud, when the
tenant doesn't exist."""
# Make a tenant, put an OpenStack cloud in it.
tenant = Tenant.objects.create(name='tenant 1',
owner='John',
owner_contact='206.867.5309')
cloud = Cloud.objects.create(tenant_name="ee",
username="ffffffffuuuuu",
password="gah",
auth_url="http://route66.com",
tenant=tenant)
# Create a tenant_admin of the tenant.
token = create_and_login(tenant=tenant)
# Try GET, PUT, and DELETE to a nonexistent tenant.
responses = [
self.client.get(
TENANTS_ID_CLOUD_ID_URL % (BAD_UUID, cloud.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
self.client.put(
TENANTS_ID_CLOUD_ID_URL % (BAD_UUID, cloud.uuid),
json.dumps({"password": "fool"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
self.client.delete(
TENANTS_ID_CLOUD_ID_URL % (BAD_UUID, cloud.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
]
for response in responses:
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
def test_get_no_cloud(self):
"""Get an OpenStack cloud that does not exist from a tenant."""
# Make a tenant.
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
# Create a tenant_admin of the tenant.
token = create_and_login(tenant=tenant)
# Try GETing a nonexisten cloud from this tenant.
response = self.client.get(
TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, BAD_UUID),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
def test_get(self):
"""Get a specific OpenStack cloud from a tenant."""
# The clouds in this test.
TENANT_CLOUD = [{"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url": "http://d.com"},
{"tenant_name": "ee",
"username": "ffffffffuuuuu",
"password": "gah",
"auth_url": "http://route66.com"},
]
# Make a tenant.
tenant = Tenant.objects.create(name='tenant 1',
owner='John',
owner_contact='206.867.5309')
# Create a tenant_admin of the tenant.
token = create_and_login(tenant=tenant)
# For every test cloud...
for entry in TENANT_CLOUD:
# Make it.
cloud = Cloud.objects.create(tenant=tenant, **entry)
# Try GETting it.
response = self.client.get(
TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, cloud.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
check_response_without_uuid(response, HTTP_200_OK, entry)
def test_put_no_cloud(self):
"""Update a non-existent OpenStack cloud of a tenant."""
# Make a tenant.
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
# Create a tenant_admin of the tenant.
token = create_and_login(tenant=tenant)
# Try PUTing to a nonexistent OpenStack cloud in this tenant.
response = self.client.put(
TENANTS_ID_CLOUD_ID_URL % (tenant.uuid, BAD_UUID),
json.dumps({"tenant_name": "fool"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
def test_put_bad_fields(self):
"""Update an OpenStack cloud with missing fields, unrecognized fields,
or a field that's not allowed to be changed by the tenant_admin."""
# The cloud in this test.
TENANT_CLOUD = {"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url": "http://d.com"}
# Make a tenant, put an OpenStack cloud in it.
tenant = Tenant.objects.create(name='tenant 1',
owner='John',
owner_contact='206.867.5309')
cloud = Cloud.objects.create(tenant=tenant, **TENANT_CLOUD)
# Create a tenant_admin of the tenant.
token = create_and_login(tenant=tenant)
# Try PUTing to the cloud with no fields.
response = self.client.put(
TENANTS_ID_CLOUD_ID_URL % (tenant.uuid, cloud.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
for content in [CONTENT_MISSING_OS_USERNAME, CONTENT_MISSING_OS_NAME,
CONTENT_MISSING_OS_PASSWORD, CONTENT_MISSING_OS_URL]:
self.assertContains(response,
content,
status_code=HTTP_400_BAD_REQUEST)
# Try PUTing to the cloud with no change, and with a change to an
# unrecognized field.
response = self.client.put(
TENANTS_ID_CLOUD_ID_URL % (tenant.uuid, cloud.uuid),
json.dumps(TENANT_CLOUD),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
check_response_without_uuid(response, HTTP_200_OK, TENANT_CLOUD)
bad_field = TENANT_CLOUD.copy()
bad_field["forkintheroad"] = "Traci"
response = self.client.put(
TENANTS_ID_CLOUD_ID_URL % (tenant.uuid, cloud.uuid),
json.dumps(bad_field),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
check_response_without_uuid(response, HTTP_200_OK, TENANT_CLOUD)
# Try PUTing to a cloud on a field that's not allowed to be changed.
# The response should be the same as the "unrecognized field" case.
bad_field = TENANT_CLOUD.copy()
bad_field["uuid"] = BAD_UUID
response = self.client.put(
TENANTS_ID_CLOUD_ID_URL % (tenant.uuid, cloud.uuid),
json.dumps(bad_field),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
check_response_without_uuid(response, HTTP_200_OK, TENANT_CLOUD)
def test_put(self):
"""Update an Openstack cloud in a tenant."""
# The cloud in this test.
TENANT_CLOUD = {"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url": "http://d.com"}
EXPECTED_RESPONSE = TENANT_CLOUD.copy()
EXPECTED_RESPONSE["password"] = "fffffffffuuuuuuu"
# Make a tenant, put an OpenStack cloud in it.
tenant = Tenant.objects.create(name='tenant 1',
owner='John',
owner_contact='206.867.5309')
cloud = Cloud.objects.create(tenant=tenant, **TENANT_CLOUD)
# Create a tenant_admin of the tenant.
token = create_and_login(tenant=tenant)
# Try PUTing to the cloud.
response = self.client.put(
TENANTS_ID_CLOUD_ID_URL % (tenant.uuid, cloud.uuid),
json.dumps(EXPECTED_RESPONSE),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
check_response_without_uuid(response, HTTP_200_OK, EXPECTED_RESPONSE)
# Double-check that the Cloud row was updated.
self.assertEqual(Cloud.objects.count(), 1)
self.assertEqual(Cloud.objects.all()[0].password,
EXPECTED_RESPONSE["password"])
def test_delete_not_member(self):
"""Try deleting a cloud of another tenant."""
# The clouds in this test.
TENANT_CLOUD = [{"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url": "http://d.com"},
{"tenant_name": "ee",
"username": "ffffffffuuuuu",
"password": "gah",
"auth_url": "http://route66.com"},
]
# Make two tenant+cloud pairs
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
tenant_2 = Tenant.objects.create(name='tenant_2',
owner='John',
owner_contact='206.867.5309')
Cloud.objects.create(tenant=tenant, **TENANT_CLOUD[0])
cloud_2 = Cloud.objects.create(tenant=tenant_2, **TENANT_CLOUD[1])
# Create a tenant_admin of the first tenant.
token = create_and_login(tenant=tenant)
# Try DELETE on the second (other) tenant's cloud.
response = self.client.delete(
TENANTS_ID_CLOUD_ID_URL %
(tenant_2.uuid, cloud_2.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
# Ensure we have the right number of OpenStack clouds.
self.assertEqual(Cloud.objects.count(), 2)
def test_delete(self):
"""Delete an OpenStack cloud from a tenant."""
# The clouds in this test.
TENANT_CLOUD = [{"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url": "http://d.com"},
{"tenant_name": "ee",
"username": "ffffffffuuuuu",
"password": "gah",
"auth_url": "http://route66.com"},
]
# Make a tenant with two clouds.
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
cloud = Cloud.objects.create(tenant=tenant, **TENANT_CLOUD[0])
cloud_2 = Cloud.objects.create(tenant=tenant, **TENANT_CLOUD[1])
# Create a tenant_admin.
token = create_and_login(tenant=tenant)
# DELETE one cloud, check, DELETE the other cloud, check.
response = self.client.delete(
TENANTS_ID_CLOUD_ID_URL % (tenant.uuid, cloud_2.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertContains(response, '', status_code=HTTP_204_NO_CONTENT)
# Ensure we have the right number of Clouds.
self.assertEqual(Cloud.objects.count(), 1)
self.assertEqual(Cloud.objects.all()[0].tenant_name,
TENANT_CLOUD[0]["tenant_name"])
response = self.client.delete(
TENANTS_ID_CLOUD_ID_URL % (tenant.uuid, cloud.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertContains(response, '', status_code=HTTP_204_NO_CONTENT)
# Ensure we have the right number of Clouds.
self.assertEqual(Cloud.objects.count(), 0)
| 41.130898
| 79
| 0.531029
| 2,773
| 27,023
| 4.967905
| 0.097367
| 0.045296
| 0.037602
| 0.075203
| 0.754936
| 0.733014
| 0.717843
| 0.67545
| 0.660279
| 0.651931
| 0
| 0.018865
| 0.376198
| 27,023
| 656
| 80
| 41.193598
| 0.798363
| 0.160826
| 0
| 0.751163
| 0
| 0
| 0.096241
| 0
| 0
| 0
| 0
| 0
| 0.05814
| 1
| 0.034884
| false
| 0.065116
| 0.013953
| 0
| 0.053488
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
bef17e7d48e784a47058c04dd63db533f851c334
| 83
|
py
|
Python
|
gawain/tests/test_numerics.py
|
henrywatkins/gawain
|
c556be20242249504fc0e04a5d3b7168a8369043
|
[
"MIT"
] | 1
|
2021-11-20T06:16:13.000Z
|
2021-11-20T06:16:13.000Z
|
gawain/tests/test_numerics.py
|
henrywatkins/gawain
|
c556be20242249504fc0e04a5d3b7168a8369043
|
[
"MIT"
] | null | null | null |
gawain/tests/test_numerics.py
|
henrywatkins/gawain
|
c556be20242249504fc0e04a5d3b7168a8369043
|
[
"MIT"
] | null | null | null |
import pytest
from gawain.numerics import Clock, SolutionVector, MHDSolutionVector
| 27.666667
| 68
| 0.86747
| 9
| 83
| 8
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096386
| 83
| 2
| 69
| 41.5
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8324f3cc8eee905419a3c23f1df365cd7b4e9b24
| 30
|
py
|
Python
|
symbench_athens_client/tests/models/test_pipelines.py
|
valtron/symbench-athens-client
|
11482f5d385217898cfc5cb6ff9d76b19a3f7356
|
[
"Apache-2.0"
] | null | null | null |
symbench_athens_client/tests/models/test_pipelines.py
|
valtron/symbench-athens-client
|
11482f5d385217898cfc5cb6ff9d76b19a3f7356
|
[
"Apache-2.0"
] | 43
|
2021-08-19T20:16:43.000Z
|
2022-03-30T18:54:42.000Z
|
symbench_athens_client/tests/models/test_pipelines.py
|
valtron/symbench-athens-client
|
11482f5d385217898cfc5cb6ff9d76b19a3f7356
|
[
"Apache-2.0"
] | 2
|
2021-11-09T06:07:06.000Z
|
2022-01-13T17:04:29.000Z
|
class TestPipelines:
pass
| 10
| 20
| 0.733333
| 3
| 30
| 7.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.233333
| 30
| 2
| 21
| 15
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
834ad9cbfb170166d5394332db47b29bcb81eb73
| 163
|
py
|
Python
|
examples/plot_kde_2d.py
|
awesome-archive/arviz
|
e11432bc065d0b2280f27c901beb4ac9fc5c5dba
|
[
"Apache-2.0"
] | 2
|
2018-12-01T03:41:54.000Z
|
2018-12-01T22:04:59.000Z
|
examples/plot_kde_2d.py
|
awesome-archive/arviz
|
e11432bc065d0b2280f27c901beb4ac9fc5c5dba
|
[
"Apache-2.0"
] | null | null | null |
examples/plot_kde_2d.py
|
awesome-archive/arviz
|
e11432bc065d0b2280f27c901beb4ac9fc5c5dba
|
[
"Apache-2.0"
] | 1
|
2020-10-16T12:57:48.000Z
|
2020-10-16T12:57:48.000Z
|
"""
2d KDE
======
_thumb: .1, .8
"""
import arviz as az
import numpy as np
az.style.use('arviz-darkgrid')
az.plot_kde(np.random.rand(100), np.random.rand(100))
| 12.538462
| 53
| 0.650307
| 29
| 163
| 3.586207
| 0.62069
| 0.153846
| 0.230769
| 0.288462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06383
| 0.134969
| 163
| 12
| 54
| 13.583333
| 0.673759
| 0.177914
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
83609972eefc4a7ddcf363f8e89f7408af9885f3
| 115
|
py
|
Python
|
backend/backend/urls.py
|
lucasrafaldini/SpaceXLaunches
|
abcd3686677bc3e25903bc2ed1e084e00090ba33
|
[
"MIT"
] | 1
|
2021-09-21T17:51:11.000Z
|
2021-09-21T17:51:11.000Z
|
backend/backend/urls.py
|
lucasrafaldini/SpaceXLaunches
|
abcd3686677bc3e25903bc2ed1e084e00090ba33
|
[
"MIT"
] | 9
|
2020-06-06T00:42:57.000Z
|
2022-02-27T17:29:18.000Z
|
backend/backend/urls.py
|
lucasrafaldini/SpaceXLaunches
|
abcd3686677bc3e25903bc2ed1e084e00090ba33
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from django.urls import include
urlpatterns = [url("api/", include("api.urls"))]
| 23
| 48
| 0.73913
| 17
| 115
| 5
| 0.529412
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113043
| 115
| 4
| 49
| 28.75
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.104348
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
55e92561b0ff7599f7ae6a6d6d8a27dbdab535a8
| 63
|
py
|
Python
|
reqinstall/commands/freeze/__init__.py
|
QualiSystems/reqinstall
|
57268b185428b31368cb7246a20a6c7548fb44dc
|
[
"MIT"
] | null | null | null |
reqinstall/commands/freeze/__init__.py
|
QualiSystems/reqinstall
|
57268b185428b31368cb7246a20a6c7548fb44dc
|
[
"MIT"
] | null | null | null |
reqinstall/commands/freeze/__init__.py
|
QualiSystems/reqinstall
|
57268b185428b31368cb7246a20a6c7548fb44dc
|
[
"MIT"
] | null | null | null |
from reqinstall.commands.freeze.freeze import PipFreezeCommand
| 31.5
| 62
| 0.888889
| 7
| 63
| 8
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063492
| 63
| 1
| 63
| 63
| 0.949153
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
55f88475538cbd35f162e1da477042bc863348a2
| 67
|
py
|
Python
|
python/testData/inspections/PyMethodMayBeStaticInspection/documentedEmpty.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2018-12-29T09:53:39.000Z
|
2018-12-29T09:53:42.000Z
|
python/testData/inspections/PyMethodMayBeStaticInspection/documentedEmpty.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/inspections/PyMethodMayBeStaticInspection/documentedEmpty.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
class A:
def foo(self):
"""Do something"""
pass
| 16.75
| 26
| 0.462687
| 8
| 67
| 3.875
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.38806
| 67
| 4
| 27
| 16.75
| 0.756098
| 0.179104
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
364307863e32ccdc999357c039cf0832ac94b380
| 103
|
py
|
Python
|
rboard/board/__init__.py
|
joalon/rboard
|
cc743d8c08837c20bcc9382655e36bb79aecd524
|
[
"MIT"
] | null | null | null |
rboard/board/__init__.py
|
joalon/rboard
|
cc743d8c08837c20bcc9382655e36bb79aecd524
|
[
"MIT"
] | null | null | null |
rboard/board/__init__.py
|
joalon/rboard
|
cc743d8c08837c20bcc9382655e36bb79aecd524
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
blueprint = Blueprint('board', __name__)
from rboard.board import routes
| 17.166667
| 40
| 0.796117
| 13
| 103
| 6
| 0.615385
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135922
| 103
| 5
| 41
| 20.6
| 0.876404
| 0
| 0
| 0
| 0
| 0
| 0.048544
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
364dc99efa920ea79a2d2856b41d0a11a59412b1
| 68
|
py
|
Python
|
social/actions.py
|
raccoongang/python-social-auth
|
81c0a542d158772bd3486d31834c10af5d5f08b0
|
[
"BSD-3-Clause"
] | 1,987
|
2015-01-01T16:12:45.000Z
|
2022-03-29T14:24:25.000Z
|
social/actions.py
|
raccoongang/python-social-auth
|
81c0a542d158772bd3486d31834c10af5d5f08b0
|
[
"BSD-3-Clause"
] | 731
|
2015-01-01T22:55:25.000Z
|
2022-03-10T15:07:51.000Z
|
virtual/lib/python3.6/site-packages/social/actions.py
|
dennismwaniki67/awards
|
80ed10541f5f751aee5f8285ab1ad54cfecba95f
|
[
"MIT"
] | 1,082
|
2015-01-01T16:27:26.000Z
|
2022-03-22T21:18:33.000Z
|
from social_core.actions import do_auth, do_complete, do_disconnect
| 34
| 67
| 0.867647
| 11
| 68
| 5
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 68
| 1
| 68
| 68
| 0.887097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
365f44e59be4486a64ab3380f2d229d1dcacfbe6
| 34
|
py
|
Python
|
SmartAPI/__init__.py
|
Kreastr/SmartAPI-HEILA
|
97dbe9e6e27267c60a4f94f60692d5f391e2ef7f
|
[
"BSD-2-Clause"
] | null | null | null |
SmartAPI/__init__.py
|
Kreastr/SmartAPI-HEILA
|
97dbe9e6e27267c60a4f94f60692d5f391e2ef7f
|
[
"BSD-2-Clause"
] | null | null | null |
SmartAPI/__init__.py
|
Kreastr/SmartAPI-HEILA
|
97dbe9e6e27267c60a4f94f60692d5f391e2ef7f
|
[
"BSD-2-Clause"
] | null | null | null |
import sys
import site
import os
| 6.8
| 11
| 0.794118
| 6
| 34
| 4.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205882
| 34
| 4
| 12
| 8.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
36bbde81383cafa0b00f9d5defddc4acebc151af
| 4,478
|
py
|
Python
|
tests/enviroments_test/test_environments.py
|
DKE-Data/agrirouter-sdk-python
|
6d6b26606f7d424c62289af56da55acf412772fc
|
[
"Apache-2.0"
] | null | null | null |
tests/enviroments_test/test_environments.py
|
DKE-Data/agrirouter-sdk-python
|
6d6b26606f7d424c62289af56da55acf412772fc
|
[
"Apache-2.0"
] | null | null | null |
tests/enviroments_test/test_environments.py
|
DKE-Data/agrirouter-sdk-python
|
6d6b26606f7d424c62289af56da55acf412772fc
|
[
"Apache-2.0"
] | null | null | null |
"""Test agrirouter/environments/environments.py"""
from agrirouter.environments.environments import ProductionEnvironment as PE
from agrirouter.environments.environments import QAEnvironment as QAE
from tests.constants import application_id
class TestPE:
def test_get_base_url(self):
assert PE().get_base_url() == PE._ENV_BASE_URL
def test_get_api_prefix(self):
assert PE().get_api_prefix() == PE._API_PREFIX
def test_get_registration_service_url(self):
assert PE().get_registration_service_url() == PE._REGISTRATION_SERVICE_URL
def test_get_onboard_url(self):
onb_url = PE._REGISTRATION_SERVICE_URL + PE._API_PREFIX + "/registration/onboard"
assert PE().get_onboard_url() == onb_url
def test_get_secured_onboard_url(self):
onb_url = PE._REGISTRATION_SERVICE_URL + PE._API_PREFIX + "/registration/onboard/request"
assert PE().get_secured_onboard_url() == onb_url
def test_get_verify_onboard_request_url(self):
req_url = PE._REGISTRATION_SERVICE_URL + PE._API_PREFIX + "/registration/onboard/verify"
assert PE().get_verify_onboard_request_url() == req_url
def test_get_revoke_url(self):
rev_url = PE._REGISTRATION_SERVICE_URL + PE._API_PREFIX + "/registration/onboard/revoke"
assert PE().get_revoke_url() == rev_url
def test_get_agrirouter_login_url(self):
login_url = PE._ENV_BASE_URL + PE._AGRIROUTER_LOGIN_URL
assert PE().get_agrirouter_login_url() == login_url
def test_get_secured_onboarding_authorization_url(self):
redirect_uri = "www.my_redirect.com"
response_type = "response_type"
assert PE().get_secured_onboarding_authorization_url(
application_id, response_type, "state", redirect_uri
) == "https://goto.my-agrirouter.com/application/{application_id}/authorize?response_type={response_type}&state={state}".format( # noqa
application_id=application_id,
response_type=response_type,
state="state") + f"&redirect_uri={redirect_uri}"
def test_get_mqtt_server_url(self):
assert PE().get_mqtt_server_url(
"localhost", "5000"
) == PE._MQTT_URL_TEMPLATE.format(
host="localhost", port="5000"
)
def test_get_env_public_key(self):
assert PE().get_env_public_key() == PE.AR_PUBLIC_KEY
class TestQAE:
def test_get_base_url(self):
assert QAE().get_base_url() == QAE._ENV_BASE_URL
def test_get_api_prefix(self):
assert QAE().get_api_prefix() == QAE._API_PREFIX
def test_get_registration_service_url(self):
assert QAE().get_registration_service_url() == QAE._REGISTRATION_SERVICE_URL
def test_get_onboard_url(self):
onb_url = QAE._REGISTRATION_SERVICE_URL + QAE._API_PREFIX + "/registration/onboard"
assert QAE().get_onboard_url() == onb_url
def test_get_secured_onboard_url(self):
onb_url = QAE._REGISTRATION_SERVICE_URL + QAE._API_PREFIX + "/registration/onboard/request"
assert QAE().get_secured_onboard_url() == onb_url
def test_get_verify_onboard_request_url(self):
req_url = QAE._REGISTRATION_SERVICE_URL + QAE._API_PREFIX + "/registration/onboard/verify"
assert QAE().get_verify_onboard_request_url() == req_url
def test_get_revoke_url(self):
rev_url = QAE._REGISTRATION_SERVICE_URL + QAE._API_PREFIX + "/registration/onboard/revoke"
assert QAE().get_revoke_url() == rev_url
def test_get_agrirouter_login_url(self):
login_url = QAE._ENV_BASE_URL + QAE._AGRIROUTER_LOGIN_URL
assert QAE().get_agrirouter_login_url() == login_url
def test_get_secured_onboarding_authorization_url(self):
redirect_uri = "www.my_redirect.com"
response_type = "response_type"
assert QAE().get_secured_onboarding_authorization_url(
application_id, response_type, "state", redirect_uri
) == QAE._ENV_BASE_URL + QAE._SECURED_ONBOARDING_AUTHORIZATION_LINK_TEMPLATE.format(
application_id=application_id,
response_type=response_type,
state="state") + f"&redirect_uri={redirect_uri}"
def test_get_mqtt_server_url(self):
assert QAE().get_mqtt_server_url(
"localhost", "5000"
) == QAE._MQTT_URL_TEMPLATE.format(host="localhost", port="5000")
def test_get_env_public_key(self):
assert QAE().get_env_public_key() == QAE.AR_PUBLIC_KEY
| 42.245283
| 143
| 0.712818
| 587
| 4,478
| 4.957411
| 0.110733
| 0.052921
| 0.075601
| 0.062543
| 0.833333
| 0.76701
| 0.709966
| 0.691409
| 0.691409
| 0.691409
| 0
| 0.004397
| 0.18736
| 4,478
| 105
| 144
| 42.647619
| 0.795273
| 0.011166
| 0
| 0.455696
| 0
| 0.012658
| 0.116915
| 0.060606
| 0
| 0
| 0
| 0
| 0.278481
| 1
| 0.278481
| false
| 0
| 0.037975
| 0
| 0.341772
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
36edb3403cd5d8abc890118c85bd880dd47b74ce
| 198
|
py
|
Python
|
Python/03 - Strings/String Formatting.py
|
sohammanjrekar/HackerRank
|
1f5010133a1ac1e765e855a086053c97d9e958be
|
[
"MIT"
] | null | null | null |
Python/03 - Strings/String Formatting.py
|
sohammanjrekar/HackerRank
|
1f5010133a1ac1e765e855a086053c97d9e958be
|
[
"MIT"
] | null | null | null |
Python/03 - Strings/String Formatting.py
|
sohammanjrekar/HackerRank
|
1f5010133a1ac1e765e855a086053c97d9e958be
|
[
"MIT"
] | null | null | null |
def print_formatted(number):
# your code goes here
for i in range(1, number +1):
width = len(f"{number:b}")
print(f"{i:{width}} {i:{width}o} {i:{width}X} {i:{width}b}")
| 33
| 69
| 0.545455
| 33
| 198
| 3.242424
| 0.575758
| 0.224299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013423
| 0.247475
| 198
| 5
| 70
| 39.6
| 0.704698
| 0.09596
| 0
| 0
| 0
| 0.25
| 0.348837
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.25
| 0.5
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
7fce7a6d8d2ce871e7042ada46c6923907411052
| 257
|
py
|
Python
|
api_python/app/models/classes_basicas/Empregado.py
|
uninassau-2020-2/proj-grupo5
|
ea7ca233004860a432f7301c72bde03fccce5f92
|
[
"CC0-1.0"
] | null | null | null |
api_python/app/models/classes_basicas/Empregado.py
|
uninassau-2020-2/proj-grupo5
|
ea7ca233004860a432f7301c72bde03fccce5f92
|
[
"CC0-1.0"
] | null | null | null |
api_python/app/models/classes_basicas/Empregado.py
|
uninassau-2020-2/proj-grupo5
|
ea7ca233004860a432f7301c72bde03fccce5f92
|
[
"CC0-1.0"
] | null | null | null |
from app.models.classes_basicas.Pessoa import Pessoa
class Empregado(Pessoa):
id_empregado = None
def getIdEmpregado(self):
return self.id_empregado
def setIdEmpregado(self, id_empregado):
self.id_empregado = id_empregado
| 23.363636
| 52
| 0.723735
| 31
| 257
| 5.806452
| 0.516129
| 0.305556
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210117
| 257
| 11
| 53
| 23.363636
| 0.8867
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0.142857
| 0.857143
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
7fcf8c04bfee9a81a78aefffecb7fb16cd7ee1e5
| 19,028
|
py
|
Python
|
suiko/createDiff.py
|
nakamura196/tei
|
7aa62bc0603bbff03f96a3dbaad82d8feb6126ba
|
[
"Apache-2.0"
] | null | null | null |
suiko/createDiff.py
|
nakamura196/tei
|
7aa62bc0603bbff03f96a3dbaad82d8feb6126ba
|
[
"Apache-2.0"
] | null | null | null |
suiko/createDiff.py
|
nakamura196/tei
|
7aa62bc0603bbff03f96a3dbaad82d8feb6126ba
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import difflib
import xml.etree.ElementTree as ET
tmp_path = "data/template.xml"
prefix = ".//{http://www.tei-c.org/ns/1.0}"
xml = ".//{http://www.w3.org/XML/1998/namespace}"
tree = ET.parse(tmp_path)
ET.register_namespace('', "http://www.tei-c.org/ns/1.0")
root = tree.getroot()
body = root.find(prefix + "body")
p = ET.Element("{http://www.tei-c.org/ns/1.0}p")
body.append(p)
a = "鍾伯敬先生批評水滸傳卷之二第二囘王教頭私走延安府九紋龍大閙史家村詩曰千古幽扄一旦開天罡地煞出泉臺自來無事多生事本爲禳災却惹灾社稷從今雲擾擾后戈到處閙垓垓高俅奸侫𨿽堪恨洪信從今釀禍胎話說當時住持眞人對洪太尉說道太尉不知此殿中當𥘉是祖老天師洞玄眞人傳下法符囑付道此殿内鎭鎻着三十六貟天罡星七十二座地煞星共是一百單八箇魔君在𥚃面上立石碑鑿着龍章鳳篆天符鎭住在此若還放他出世必惱下方生靈如今太尉走了怎生是好他日必爲後患洪太尉聽罷渾身冷汗捉顫不住急急收拾行李引了從人下山囘京眞人并道衆送官巳罷自囘宫内修整殿宇竪立石碑不在話下再說洪太尉在路上分付從人教把走妖魔一節休說與外人知道恐天子知而見責於路無話星夜囘至京師進得汴梁城聞人所說天師在東京禁院做了七晝夜好事普施符籙禳救災病瘟疫盡消軍民安泰天師辤朝乘鶴駕雲自囘龍虎山去了洪太尉次日早朝見了天子奏說天師乘鶴駕雲先到京師臣等驛站而來𦂯得到此仁宗准奏賞賜洪信復還舊職亦不在話下後來仁宗天子在位共四十二年晏駕無有太子傳位濮安懿王𠃔讓之子太祖皇帝的孫立帝號曰英宗在位四年傳位與太子神宗天子在位一十八年傳位與太子哲宗皇帝登基那時天下𥁞皆太平四方無事且說東京開封府汴梁宣武軍一箇浮浪破落戸子弟姓高排行第二自小不成家業只好刺鎗使棒最是踢得好脚氣毬京師人口順不呌高二却都呌他做高毬後來𤼵跡便將氣毬那字去了毛傍添作立人便攺作姓高名俅這人吹彈歌舞刺鎗使棒相撲頑耍頗能詩書詞賦若論仁義禮智信行忠良即是不㑹只在東京城𥚃城外㨍閑因㨍了一箇生鉄王員外兒子使錢每日三瓦兩舍風花雪月被他父親開封府裡告了一𥿄文狀府尹把高俅斷了四十春杖迭配出界𤼵放東京城裡人民不許容他在家宿食高俅無計柰何只得來淮西臨淮州投逩一箇開賭坊的閒漢柳大郞名喚柳世權他平生專好惜客養閒人招納四方干隔澇漢子高俅投托得柳大郞家一住三年後來哲宗天子因拜南郊感得風調雨順放𡩖恩大赦天下那高俅在臨淮州因得了赦宥罪犯思鄉要囘東京這柳世權却和東京城裏金梁橋下開生薬鋪的董將士是親叔寫了一封書札收拾些人事盤纏賫𤼵高俅囘東京投逩董將士家過活當時高俅辭了柳大郞背上包褁離了臨淮州迤𨓦囘到東京竟來金梁橋下董生薬家下了這封書董將士一見高俅看了柳世權來書自肚裡𪨆思道這高俅我家如何安着得他若是箇志誠老實的人可以容他在家出入也教孩兒們學些好他却是箇㨍閑的破落戸没信行的人亦且當𥘉有過犯來被開封府斷配出境的人倘或留住在家中倒惹得孩兒們不學好了待他不收留又撇不過柳大郞面皮當時只得權且歡天喜地相留在家宿歇每日酒食管待住了十數日董將士思量出一個緣由將出一套衣服寫了一封書簡對高俅說道小人家下螢火之光照人不亮恐後悞了足下我轉薦足下與小蘇學士處乆後也得箇出身足下意内如何高俅大喜謝了董將士董將士使箇人將着書簡引領高俅竟到學士府内門吏轉報小蘇學士出來見了高俅看罷來書知道高俅原是㨍閑浮浪的人心下想道我這里如何安着得他不如做箇人情薦他去駙馬王晉卿府𥚃做箇親隨人都喚他做小王都太尉便喜歡這樣的人當時囘了董將士書札留高俅在府裏住了一夜次日寫了一封書呈使箇幹人送高俅去那小王都太尉處這太尉廼是哲宗皇帝妹夫神宗皇帝的駙馬他喜愛風流人物正用這樣的人一見小蘇學士差人馳書送這高俅來拜見了便喜隨卽冩囘書收留高俅在府内做箇親隨自此高俅遭際在王都尉府中出入如同家人一般自古道口逺日疏日近日親忽一日小王都太尉慶誕生辰分付府中安排筵宴專靖小舅端王這端王乃是神宗天子第十一子哲宗皇帝御弟見掌東駕排號九大王是箇聰明俊俏人物這浮浪子弟門風早閒之事無一般不曉無一般不㑹更無盤不愛更兼琴棋書𦘕儒釋道教無所不通踢毬打彈品竹調𢇁吹彈歌舞自不必說當日王都尉府中准備筵宴水陸俱偹但見香焚寳鼎花挿金瓶仙音院競奏新聲教坊司頻逞妙藝水晶壼内盡都是紫府瓊浆琥珀盃中滿泛着瑶池玉液玳瑁盤堆仙桃異果玻瓈碗供熊掌駝蹄鱗鱗膾切銀絲細細茶烹玉蕊紅裙舞女盡隨着象板鸞簘翠袖歌姬簇捧定龍笙鳳管兩行珠翠立堦前一𣲖笙歌臨座上且說這端王來王都尉府中赴宴都尉設席請端王居中坐定太尉對席相陪酒進數盃食供兩套那端王起身浄手偶來書院𥚃少歇猛見書案上一對兒羊脂玉碾成的鎭紙獅子極是做得好細巧玲瓏端王拿起獅子不落手看了一囘道好王都尉見端王心愛便說道再有一個玉龍筆架也是這個匠人一手做的𨚫不在手頭明日取來一併相送端王大喜道深謝厚意想那筆架必是更妙王都尉道明日取出來送至宫中便見端王又謝了兩個依舊入席飲宴至暮盡醉方散端王相别囘宫去了次日小王都太尉取出玉龍筆架和兩個鎭𥿄玉獅子着一個小金盒子盛了用黄羅包袱包了冩了一封書呈却使高俅送去高俅領了王都尉鈞㫖將着兩般玉玩器懐中揣了書呈逕投端王宫中來把門官吏轉報與院公沒多時院公出來問你是那個府𥚃來的人高俅施禮罷荅道小人是王駙馬府中特送玉玩器來進大王院公道殿下在庭心衷和小黄門踢氣毬你自過去高俅道相煩引進院公引到庭前高俅看時見端王頭戴軟紗唐巾身穿紫綉龍袍腰繋文武雙穗絛把綉龍袍前襟拽札起揣在縧兒邊足穿一雙嵌金線飛鳳靴三五個小黄門相伴着蹴氣毬高俅不敢過去衝撞立在從人背後伺候也是高俅合當𤼵跡時運到來那箇氣毬騰地起來端王接個不着向人叢𥚃直滚到高俅身邊那髙俅見氣毬來也是一時的胆量使个鸳鸯拐踢还端王端王見了大喜便問道你是甚人高俅向前跪下道小的是王都尉親隨受東人使令賫送兩般玉玩器來進獻大王有書呈在此拜上端王聽罷笑道姐夫直如此掛心高俅取出書呈進上端王開盒子看了玩器都遍與堂候官收了去那端王且不理玉玩器下落𨚫先問高俅道你原來㑹踢氣毬你喚做甚麽高俅叉手跪覆道小的呌做高俅胡踢得幾脚端王道好你便下塲來踢一囘要高俅拜道小的是何等樣人敢與恩王下脚端王道這是齊雲社名爲天下圓但踢何傷高俅再拜道怎敢三囘五次告辭端王定要他踢高俅只得叩頭謝罪觧膝下場𦂯踢幾脚端王喝采高俅只得把平生本事都使出來奉呈端王那身分模樣這氣毬一似鰾膠粘在身上的端王大喜那里肯放高俅囘府去就留在宫中過了一夜次日排個筵㑹專請王都尉宫中赴宴却說王都尉當日晚不見高俅囘來正疑思間只見次日門子報道九大王差人來傳令旨請太尉到宫中赴宴王都尉出來見了幹人看了令㫖隨卽上馬來到九大王府前下馬入宫來見了端王端王大喜稱謝兩盤玉玩器入席飮宴間端王說道這高毬踢得兩脚好氣毬孤欲索此人做親隨如何王都尉荅道殿下既用此人就留在宫中伏侍殿下端王歡喜執盃相謝二人又閒話一囘至晚席散王都尉自囘駙馬府去不在話下且說端王自從索得高俅做伴之後就留在宫中宿食高俅自此遭除端王每日跟着寸步不離却在宫中未及兩箇月哲宗皇帝宴駕無有太子文武百官商議冊立端王爲天子立帝號曰徽宗便是玉清教主微妙道君皇帝登基之後一向無事忽一日與高俅道朕欲要擡舉你但有邊功方可陞遷先教樞密院與你入名只是做隨駕遷轉的人後來没半年之間直擡舉高俅做到殿帥府太尉□事且說高俅得□□□師府太尉選揀吉日良辰去殿師府裏到任所有一應合属公吏衙將都軍禁軍馬步人等盡來𠫵拜各呈手本開報花名高殿帥一一㸃過於内只欠一名八十萬禁軍教頭王進半月之前巳有病狀在官患病未痊不曽入衙門管事高殿帥大怒喝道胡說既有手本呈來𨚫不是那厮抗拒官府搪塞下官此人卽係推病在家快與我拿來隨卽差人到王進家來捉拿王進且說這王進却無妻子止有一箇老母年巳六旬之上牌頭與教頭王進說道如今高殿帥新來上任㸃你不着軍正司禀說染患在家見有病患狀在官高殿帥焦燥那里肻信定要拿你只道是教頭詐病在家教頭只得去走一遭若還不去定連累衆人小人也有罪犯王進聽罷只得捱着病來進得殿帥府前叅見太尉拜了四拜躬身唱箇喏起來立在一邊高俅道你那厮便是都軍教頭王昇的兒子王進禀道小人便是高俅喝道這厮你爺是街市上使花棒賣藥的你省的甚麽武藝前官没眼叅你做個教頭如何敢小覷我不伏俺㸃視你托誰的勢要推病在家安閒快樂王進告道小人怎敢其實患病未痊高太尉罵道賊配軍你既害病如何來得王進又告道太尉呼喚安敢不來高殿帥大怒喝令左右教拿下王進加力與我打這厮衆多牙將都是和王進好的只得與軍正司同告道今日是太尉上任好日頭權免此人這一次高太尉喝道你這賊配軍且看衆將之面饒恕你今日之犯明日却和你理㑹王進謝罪罷起來擡頭看了認得是高俅出得衙門歎口氣道俺的性命今畨難保了俺道是甚麽高殿帥却原來正是東京㨍閑的圓社高二比先時曾學使棒被我父親一棒打翻三四箇月將息不起有此之仇他今日𤼵跡得做殿帥府太尉正待要報仇我不想正属他管自古道不怕官只怕管俺如何與他爭得怎生奈何是好囘到家中悶悶不巳對娘說知此事母子二人抱頭而哭娘道我兒三十六着走爲上着只恐没處走王進道母親說得是兒子𪨆思也是這般計較只有延安府老种經畧相公鎭守邊庭他手下軍官多有曾到京師愛兒子使鎗棒的極多何不逃去投奔他們那里是用人去處足可安身立命兒娘兩個商議定了其母又道我兒和你要私走只恐門前兩個牌軍是殿帥府撥來伏侍你的他若得知須走不脫王進道不妨母親放心兒子自有道理措置他當下日晚未昏王進先呌張牌入來分付道你先吃了些晚飯我使你一處去幹事張牌道教頭使小人那里去王進道我因前日病患許下酸棗門外岳廟𥚃香願明日早要去燒炷頭香你可今晚先去分付廟祝教他來日早開些廟門等我來燒炷頭香就要三牲獻劉李王你就廟𥚃歇了等我張牌應先吃了晚飯呌了安置望廟中去了當夜子母二人收拾了行李衣服細軟銀兩做一担兒打挾了又装兩個料袋袱駝拴在馬上等到五更天色未明王進呌起李牌分付道你與我將這些銀兩去岳廟𥚃和張牌買個三牲煮熟在那里等候我買些紙燭隨後便來李牌將銀子望廟中去了王進自去備了馬牽出後槽將料袋袱駝搭上把索子拴縛牢了牽在後門外扶娘上了馬家中粗重都弃了鎻上前後門挑了担兒跟在馬後趂五更天色未明乘勢出了西華門取路望延安府來且說兩個牌軍買了福物煮熟在廟等到巳牌也不見來李牌心焦走囘到家中𪨆時見鎻了門兩頭無路𪨆了半日並無有人曾見看看待晚岳廟𥚃張牌疑忌一直逩囘家來又和李牌𪨆了一黄昏看看黑了兩個見他當夜不歸又不見了他老娘次日兩個牌軍又去他親戚之家訪問亦無𪨆𠙚兩個恐怕連累只得去殿帥府首告王敎頭棄家在逃子母不知去向高太尉見告了大怒道賊配軍在逃看那厮待走那里去隨即押下文書行開諸州各府捉拿逃軍王進二人首告免其罪責不在話下且說王教頭母子二人自離了東京在路免不得飢飡渴飮夜住曉行在路上一月有餘忽一日天色將晚王進挑着擔兒跟在娘的馬後口裡與母親說道天可憐見慚愧了我子母兩個脫了這天羅地網之厄此去延安府不遠了高太尉便要差人拿我也拿不着了子母兩箇歡喜在路上不覺錯過了宿頭走了這一晚不遇着一處村坊那里去投宿是好正没理會處只見遠遠地林子𥚃閃出一道燈光來王進看了道好了遮莫去那里陪個小心借宿一宵明日早行當時轉入林子裡來看時却是一所大莊院一週遭都是土墙墙外却有二三百株大柳𣗳看那莊院但見前通官道後靠溪岡一週遭楊柳綠陰濃四下里喬松青似染草堂高起盡按五運山莊亭舘低軒直造𠋣山臨水轉屋𧢲牛羊滿地打麥場鵝鴨成羣出園廣野負傭莊客有千人家眷軒昂女使兄童難計數正是家有餘粮鷄犬飽戸多書籍子孫賢當時王教頭來到莊前敲門多時只見一個莊客出來王進放下担兒與他施禮莊客道來俺莊上有甚事王進荅道實不相瞞小人子母二人貪行了些路程錯過了宿店來到這里前不巴村後不巴店欲投貴莊借宿一宵明日早行依例拜納房金萬望周全方便莊客道既是如此且等一等待我去問莊主太公肻時但歇不妨王進又道大哥方便莊客入去多時出來說道莊主太公教你兩個入來王進請娘下了馬王進挑着担兒就牽了馬隨莊客到𥚃面打麥場上歇下担兒把馬拴在柳𣗳上子母兩個直到草堂上來見太公那太公年近六旬之上鬚髮皆白頭戴遮塵暖帽身穿直縫𡩖𥘎腰繋皂𢇁縧足穿熟皮靴王進見了便拜太公連忙道客人休拜且請起來你們是行路的人辛苦風霜且坐一坐王進母子兩個敘禮罷都坐定太公問道你們是那里來如何昏晚到此王進荅道小人姓張原是京師人今來消折了本錢無可營用要去延安府投逩親眷不想今日路上貪行了些程途錯過了宿店欲投貴莊假宿一宵來日早行房金依例拜納太公道不妨如今世上人那箇頂着房屋走俚你母子二位敢未打火呌莊客安排飯來没多時就𠫇上放開條卓子莊客托出一桶盤四樣菜蔬一盤牛肉鋪放卓子上先盪酒來下太公道村落中無甚相待休得見怪王進起身謝道小人子母無做相擾得𫏂厚意此恩難報太公道休這般說且請吃酒一靣勸了五七盃酒搬出飯來二人吃了收拾碗碟太公起身引王進子母到客房中安歇王進告道小人母親騎的頭口相煩寄養草料望乞應付一𤼵拜還太公道這個亦不妨我家也有頭口騾馬教莊客牽去後槽一𤼵喂養草料亦不用憂心王進謝了挑那担兒到客房𥚃來莊客㸃上燈火一靣提湯來洗了脚太公自囘𥚃面去了王進子母二人謝了莊客掩上房門收拾歇息次日睡到天曉不見起來莊主太公來到客房前過聼得王進子母在房中聲喚太公問道客官天曉好起了王進聼得慌忙出房來見太公施禮說道小人起多時了夜來多多攪擾甚是不當太公問道誰人如此聲喚王進道實不敢瞞太公說老母鞍馬勞倦昨夜心疼病發太公道既然如此客人休要煩惱教你老母且在老夫莊上住幾日我有個醫心疼的方呌莊客去縣𥚃撮藥來與你老母親吃教他放心慢慢地將息王進謝了話休絮繁自此王進子母兩個在太公莊上服藥住了五七日覺道母親病患痊了王進收拾要行當日因來後槽看馬只見空地上一箇後生脫膊着剌着一身靑龍銀盤也似一個面皮約有十八九歲拿條棒在那里使王進看了半晌不覺笑口道這棒也使得好了只是有破綻嬴不得眞好漢那後生聽得大怒喝道你是甚麽人敢來笑話我的本事俺經了七八個有名的師父我不信倒不如你你敢和我扠一扠麽說猶未了太公到來喝那後生不得無禮那後生道𡬡耐這厮笑話我的棒法太公道客人莫不會使鎗棒王進道頗曉得些敢問長上這後生是宅上的誰太公道是老漢的兒子王進道既然是宅内小官人若愛學時小人㸃撥他端正如何太公道恁地時十分好便教那後生來拜師父那後生那里肻拜心中越怒道阿爹休聽這厮胡說若吃他贏得我這條棒時我便拜他爲師王進道小官人若是不當村時較量一棒耍子那後生就空地當中把一條棒使得風車兒似轉向王進道你來你來怕的不筭好漢王進只是笑不肻動手太公道客官既是肻教小頑時使一棒何妨王進笑道恐衝撞了令郞時須不好看太公道這個不妨若是打折了手脚也是他自作自受王進道恕無禮去鎗架上拿了一條棒在手𥚃來到空地上使箇旗鼓那後生看了一看拿條棒滚將入來逕逩王進王進托地拖了棒便走那後生掄着棒又赶入來王進囘身把棒望空地里劈將下來那後生見棒劈來用棒來隔王進却不打下來將棒一掣却望後生懐𥚃直搠將來只一繳那後生的棒丟在一邊撲地望後倒了王進連𢗅撇下棒向前扶住道休恠休恠那後生爬將起來便去傍邉掇條凳子納王進坐便拜道我枉自經了許多師家原來不值半分師父没奈何只得請教王進道我子母二人連日在此攪擾宅上無恩可報當以効力太公大喜呌那後生穿了衣裳一同來後堂坐下呌莊客殺一個羊安排了酒食果品之類就請王進的母親一同赴席四個人坐定一面把盞太公起身勸了一盃酒說道師父如此高強必是個教頭小兒有眼不識㤗山王進笑道奸不厮欺俏不厮瞞小人不姓張俺是東京八十萬禁軍教頭王進的便是這鎗棒終日搏弄爲因新任一個高太尉原被先父打翻今做殿帥府太尉懐挾舊仇要奈何王進小人不合属他所管和他爭不得只得子母二人逃上延安府去投托老种經畧相公處勾當不想來到這里得遇長上父子二位如此看待又蒙救了老母病患連日管顧甚是不當既然令郞肯學時小人一力奉教只是令郞學的都是花棒只好看上陣無用小人從新㸃撥他太公見說了便道我𧠇可知輸了快來再拜師父那後生又拜了王進太公道教頭在上老漢祖居在這華陰縣界前面便是少華山這村便喚做史家村村中緫有三四百家都姓史老漢的兒子從小不務農業只愛刺鎗使棒母親說他不得嘔氣死了老漢只得隨他性子不知使了多少錢財投師父教他又請高手匠人與他刺了這身花綉肩臂胸膛總有九條龍滿縣人口順都呌他做九紋龍史進教頭今日既到這里一𤼵成全了他亦好老漢自當重重酧謝王進大喜道太公放心既然如此說時小人一發教了令郞方去自當日爲始吃了酒食留住王教頭子母二人在莊上史進每日求王教頭㸃撥十八般武藝一一從頭指教那十八般武藝矛鎚弓弩銃鞭簡劒鏈撾斧鉞并戈㦸牌棒與鎗杈話說這史進每日在莊上管待王教頭母子二人指教武藝史太公自去華陰縣中承當里正不在話下不覺荏苒光陰早過半年之上正是窓外日光彈指過席間花影坐前移一盃未進笙歌送堦下辰牌又報時前後得半年之上史進把這十八般武藝從新學得十分精熟多得王進盡心指教㸃撥得件件都有奥妙王進見他學得精熟了自思在此𨿽好只是不了一日想起來相辭要上延安府去史進那里肻放說道師父只在此間過了小弟奉養你母子二人以終天年多少是好王進道賢弟多蒙你好心在此十分之好只恐高太尉追捕到來負累了你恐教賢弟亦遭縲絏之厄不當穩便以此兩難我一心要去延安府投着在老种經畧處勾當那里是鎭守邉庭用人之際足可安身立命史進并太公苦留不住只得安排一個筵席送行托出一盤兩個叚子一百兩花銀謝師次日王進收拾了擔兒備了馬子母二人相辭史太公史進請娘乘了馬望延安府路途進發史進呌莊客挑了担兒親送十里之程中心難捨史進當時拜别了師父洒淚分手和莊客自囘王教頭依舊自挑了担兒跟着馬和娘兩個自取関西路里去了話中不說王進去投軍役只說史進囘到莊上每日只是打熬氣力亦且壯年又没老小半夜三更起來演習武藝白日𥚃只在莊後射弓走馬不到半載之間史進父親太公𣑱患病症數日不起史進使人逺近請醫士看治不能痊可嗚呼哀哉太公殁了史進一面備棺椁衣殮請僧修設好事追齋理七薦史太公又請道士建立齋醮超度生天整做了十數壇好事功果道場選了吉日良時出䘮安塟滿村中三四百史家莊戸都來送䘮掛孝理𣩵在村西山上祖墳内了史進家自此無人管業史進又不肯務農只要𪨆人使家生較量鎗棒自史太公死後又早過了三四個月日時當六月中旬炎天正𤍠那一日史進無可消遣捉箇交床坐在打麥場邊柳陰樹下乘凉對面松林透過風來史進喝采道好凉風正乘凉俚只見一個人採頭採腦在那里張望史進喝道作怪誰在那里張俺莊上史進跳起身來轉過樹背後打一看時認得是獵戸摽兎李吉史進喝道李吉張我莊内做甚麽莫不來相脚頭李吉向前聲喏道大郞小人要𪨆莊上矮丘乙郞吃碗酒因見大郞在此乘凉不敢過來衝撞史進道我且問你往常時你只是擔些野味來我莊上賣我又不㑹虧了你如何一向不將來賣與我敢是欺負我没錢李吉荅道小人怎敢一向没有野味以此不敢來史進道胡說偌大一箇少華山恁地廣濶不信没有箇獐兒兎兒李吉道大郎原來不知如今近日上面添了一夥強人札下箇山寨在上面聚集着五七百箇小嘍囉有百十疋好馬爲頭那個大王喚做神機軍師朱武第二箇喚做跳澗虎陳逹第三箇喚做白花蛇楊春這三箇爲頭打家刼舍華陰縣𥚃不敢捉他出三千貫賞錢召人拿他誰敢上去惹他因此上小人們不敢上山打捕野味那討來賣史進道我也聽得說有強人不想那厮們如此大弄必然要惱人李吉你今後有野味時𪨆些來李吉唱箇喏自去了史進歸到𠫇前𪨆思這厮們大弄必要來𧂭惱村坊既然如此便呌莊客揀兩頭肥水牛來殺了莊内自有造下的好酒先燒了一陌順溜𥿄便呌莊客去請這當村𥚃三四百史家莊戸都到家中草堂上序齒坐下教莊客一面把盞勸酒史進對衆人說道我聽得少華山上有三箇強人聚集着五七百小嘍囉打家劫舍這厮們既然大弄必然早晚要來俺村中囉唕我今特請你衆人來啇議倘若那厮們來時各家准備我莊上打起梆子你衆人可各執鎗棒前來救應你各家有事亦是如此遍相救護共保村坊如若強人自來都是我來理㑹衆人道我等村農只靠大郞做主梆子嚮時誰敢不來當晚衆人謝酒各自分付囘家准備器械自此史進修整門戸墻垣安排莊院拴束衣甲整頓刀馬隄防賊㓂不在話下且說少華山寨中三個頭領坐定商議爲頭的神機軍師朱武𨿽無本事廣有謀畧朱武當與陳逹楊春說道如今我聽知華隂縣𥚃出三千貫賞錢召人捉我們誠恐來時要與他厮殺只是山寨錢粮欠少如何不去刼擄些來以供山寨之用聚積些粮食在寨𥚃防備官軍來時好和他打熬跳澗虎陳逹道說得是如今便去華陰縣𥚃先問他借粮看他如何白花蛇楊春道不要華隂縣去只去蒲城縣萬無一失陳逹道蒲城縣人户稀少錢粮不多不如只打華陰縣那里人民豊富錢粮廣有楊春道哥哥不知若去打華陰縣時須從史家村過那箇九紋龍史進是箇大虫不可去撩撥他他如何肯放我們過去陳逹道兄弟好懦弱一箇村坊過去不得怎地敢抵敵官軍楊春道哥哥不可小覷了他那人端的了得朱武道我也曾聞他十分英雄說這人真有本事兄弟休去罷陳逹呌將起來說道你兩個閉了烏嘴長别人志氣滅自巳威風也只是一箇人須不三頭六臂我不信喝呌小嘍囉快脩我的馬來如今便去先打史家莊後取華陰縣朱武楊春再三諌勸陳逹那里肻聽隨卽披掛上馬㸃了一百四五十小嘍囉嗚鑼擂鼓下山望史家村去了且說史進正在莊内整製刀馬只見莊客報知此事史進聼得就莊上敲起梆子來那莊前莊後莊東莊西三四百史家莊戸聽得梆子嚮都拖鎗拽棒聚取三四百人一齊都到史家莊上看了史進頭戴一字巾身披朱紅甲上穿靑錦襖下着抹綠靴腰繋皮𦞂膊前後鉄𢲅心一張弓一壼箭手𥚃拿一把三尖兩刃四竅八環刀莊客牽過那疋火炭赤馬史進上了馬綽了刀前面擺着三四十壯健的莊客後面列着八九十村蠢的鄉夫各史家莊戸都跟在後頭一齊納喊直到村北路口擺開却早望見來軍但見紅旗閃閃赤幟翩翩小嘍囉亂搠叉鎗莾撞漢齊擔刀斧頭巾歪整渾如三月桃花衲襖𦂳拴却似九秋落葉箇箇圎睜横死眼人人輙起夜叉心那少華山陳逹引了人馬飛逩到山坡下便將小嘍囉擺開史進看時見陳逹頭戴乾紅凹面巾身披裹金生鉄甲上穿一領紅衲襖脚穿一對吊墩靴腰繋七尺攅線搭膊坐騎一疋高頭白馬手中横着丈八㸃鋼矛小嘍囉兩勢下納喊二貟將就馬上相見陳逹在馬上看着史進欠身施禮史進喝道汝等殺人放火打家刼舍犯着迷天大罪都是該死的人你也須有耳朶好大胆直來太歲頭上動土陳逹在馬上荅道俺山寨𥚃欠少些粮食欲徃華阴縣借粮經由貴莊假一條路並不敢動一根草可放我們過去囘來自當拜謝史進道胡說俺家見當里正正要來拿你這夥賊今日到來經由我村中過却不拿你到放你過去本縣知道須連累于我陳逹道四海之内皆兄弟也相煩借一條路史進道甚麽閑話我便肻時有一個不肻你問得他肻便去陳逹道好漢教我問誰史進道你問得我手𥚃這口刀肯便放你去陳逹大怒道赶人不要赶上休得要逞精神史進也怒掄手中刀驟坐下馬來战陳逹陳逹也拍馬挺鎗來迎史進兩個交馬但見一來一往一上一下一來一往有如深水戲珠龍一上一下却似半岩爭食虎左盤右旋好似張飛敵吕布前廻後轉渾如敬德戰秦瓊九紋龍忿怒三尖刀只望頂門飛跳澗虎生嗔丈八矛不離心坎刺好手中間逞好手紅心𥚃面奪紅心史進陳逹兩個𩰖了多時只見戰馬咆哮踢起手中軍器鎗刀來往各防架隔遮攔兩個𩰖到間深𥚃史進賣個破綻讓陳逹把鎗望心窩𥚃搠來史進却把腰一閃陳逹和鎗攧入懐𥚃來史進輕舒猿臂疑紅狼腰只一挾把陳逹輕輕摘離了嵌花鞍疑疑揪住了線𦞂膊丟在馬前受降那疋𢧐馬撥風也似去了史進呌莊客將陳逹綁縛了衆人把小嘍囉一赶都走了史進囘到莊上將陳逹綁在庭心内柱上等待一𤼵拿了那兩個賊首一併解官請賞且把酒來賞了衆人教權且散衆人喝采不枉了史大郞如此豪傑休說衆人歡喜飲酒却說朱武楊春兩個正在寨𥚃猜疑捉模不定且教小嘍囉再去探聽消息只見囘去的人牽着空馬逩到山前只呌道苦也陳家哥哥不聽二位哥哥所說送了性命朱武問其緣故小嘍囉備說交𨦟一節怎當史進英勇朱武道我的言語不聼果有此禍楊春道我們盡數都去和他死併如何朱武道亦是不可他尚自輸了你如何併得他過我有一條苦計若救他不得我和你都休楊春問道如何苦計朱武付耳低言說道只除恁地楊春道好計我和你便去事不冝遲再說史進正在莊上忿怒未消只見莊客飛報道山寨𥚃朱武楊春自來了史進道這厮合休我教他兩箇一𤼵解官快牽過馬來一面打起梆子衆人早都到來史進上了馬正待出莊們只見朱武楊春步行巳到莊前兩個雙雙跪下擎着兩眼淚史進下馬來喝道你兩個跪下如何說朱武哭道小人等三箇累被官司逼迫不得巳上山落草當𥘉𤼵願道不求同日生只願同日死𨿽不及関張劉備的義氣其心則同今日小弟陳逹不聽好言誤犯虎威巳被英雄擒捉在貴莊無計懇求今來一逕就死望英雄將我三人一𤼵解官請賞誓不皺眉我等就英雄手内請死並無怨心史進聽了𪨆思道他們直恁義氣我若拿他去解官請賞時反敎天下好漢們耻笑我不英雄自古道大虫不吃伏肉史進便道你兩個且跟我進來朱武楊春並無惧怯隨了史進直到後𠫇前跪下又教史進綁縛史進三囘五次呌起來那兩個那里肻起來惺惺惜惺惺好漢識好漢史進道你們既然如此義氣深重我若送了你們不是好漢我放陳逹還你如何朱武道休得連累了英雄不當𥡷便𡩬可把我們去解官請賞史進道如何使得你肻吃我酒食麽朱武道一死尚然不惧何况酒肉乎當時史進大喜解放陳逹就後𠫊上座置酒設席管待三人朱武楊春陳逹拜謝大恩酒至數盃少添春色酒罷三人謝了史進囘山去了史進送出莊門自囘莊上却說朱武等三人歸到寨中坐下朱武道我們不是這條苦計怎得性命在此𨿽然救了一人𨚫也難得史進爲義氣上放了我們過幾日俻些禮物送去謝他救命之恩話休絮繁過了十數日朱武等三人收拾得三十兩蒜條金使兩個小嘍羅趂月黑夜送去史家莊上當夜𥘉更時分小嘍囉敲門莊客報知史進史進火急披衣來到門前問小嘍囉有甚話說小嘍囉道三個頭領再三拜覆特地使小校送些薄禮酧謝大郎不殺之恩不要推欲望乞笑留取出金子逓與史進𥘉時推却次後𪨆思道既然送來囘禮可酧受了金子呌莊客置酒管待小校吃了半夜酒把些零碎銀兩賞了小校囘山去了又過半月有餘朱武等三人在寨中商議擄掠得一串好大珠子又使小嘍囉連夜送來史家莊上史進受了不在話下又過了半月史進𪨆思道也難得這三個敬重我我也俻些禮物囘奉他次日呌莊客𪨆個裁縫自去縣𥚃買了三匹紅錦裁成三領錦襖子又㨂肥羊煮了三箇將大盒子盛了委兩個莊客去送史進莊上有個爲頭的莊客王四此人頗能荅應官府口舌利便滿莊人都呌他做王伯當史進教他同一個得力莊客挑了盒担直送到山下小嘍囉問了備細引到山寨𥚃見了朱武等三個頭領大喜受了錦襖子并肥羊酒禮把十兩銀子賞了莊客每人吃了十數碗酒下山囘歸莊内見了史進說道山上頭領多多上覆史進自此常常與朱武等三人往來不時間只是王四去山寨𥚃送物事不則一日寨𥚃頭領也頻頻地使人送金銀來與史進荏苒光陰時遇八月中秋到來史進要和三人說話約至十五夜來莊上賞月飮酒先使莊客王四賫一封請書直去少華山上請朱武陳逹楊春來莊上赴席王四馳書逕到山寨𥚃見了三位頭領下了來書朱武看了大喜三個應𠃔隨即寫封囘書賞了王四五兩銀子吃了十來碗酒王四下得山來正撞着如常送物事來的小嘍囉一把抱住那里肯放又拖去山路邉村酒店裡吃了十數碗酒王四相别了囘莊一面走着被山風一吹酒却湧上來浪浪蹌蹌一步一攧走不得十里之路見座林子逩到𥚃面望着那綠茸茸莎草地上撲地倒了原來摽兎李吉正在那山坡下張兔兒認得是史家莊上王四赶入林子𥚃來扶他那里扶得動只見王四𦞂膊裏突出銀子來李吉𪨆思道這厮醉了那里討得許多何不拿他些也是天罡星合當聚㑹自然生出機㑹來李吉解那𦞂膊望地下只一抖那封囘書和銀子都抖出來李吉拿起頗識幾字將書拆開看時見上面寫着少華山朱武陳逹楊春中間多有兼文带武的言語𨚫不識得只認得三箇名字李吉道我做獵戸幾時能勾𤼵跡筭命道我今年有大財却在這里華陰縣𥚃見出三千貫賞錢捕捉他三箇賊人尀耐史進那厮前日我去他莊上𪨆矮丘乙郞他道我來相脚頭躧盤你原來倒和賊人來往銀子并書都拿去了望華隂縣𥚃來出首却說莊客王四一覺直睡到二更方醒覺來看見月光微微照在身上王四吃了一驚跳將起來却見四邊都是松樹便去腰裡摸時𦞂膊和書都不見了四下里𪨆時只見空𦞂膊在莎草地上王四只管呌苦𪨆思道銀子不打𦂳這封囘書却怎生好正不知被甚人拿去了眉頭一縱計上心來自道若囘去莊上說脱了囘書大郞必然焦燥定是赶我出去不知只說不曾有囘書那里查照計較定了飛也似取路歸來莊上却好五更天氣史進見王四囘來問道你如何方𦂯歸來王四道托主人福廕寨中三箇頭領都不肻放留住王四吃了半夜酒因此囘來遲了史進又問曾有囘書麽王四道三箇頭領要寫囘書却是小人道三位頭領既然凖來赴席何必囘書小人又有盃酒路上恐有些失支脫節不是耍處史進聽了大喜說道不枉了諸人呌做賽伯當眞箇了得王四應道小人怎敢差遲路上不曾住脚一直逩囘莊上史進道既然如此敎人去縣𥚃買些果品案酒伺候不覺中秋節至是日晴明得好史進當日分付家中莊客宰了一腔大羊殺了百十箇鷄鵝准俻下酒食筵宴看看天色晚來怎見得好個中秋但見午夜𥘉長黄昏巳半一輪月掛如銀水盤如晝賞翫正冝人清影十分圓滿桂花玉兎交馨簾櫳高捲金盃頻勸酒歡笑賀昇平年年當此節酩酊醉醺醺莫辭終夕飮銀漢露華新且說少華山上朱武陳逹楊春三箇頭領分付小嘍囉看守寨栅只帶三五箇做伴將了朴刀各跨口腰刀不騎鞍馬步行下山逕來到史家莊上史進接着各叙禮罷請入後園莊内巳安排下筵宴史進請三位頭領上坐史進對席相陪便呌莊客把前後莊門拴了一面飮酒莊内莊客輪流把盏一邊割羊勸酒酒至數盃却早東邉推起那輪明月但見桂花離海嶠雲葉散天衢彩霞照萬里如銀素魄映千山似水一輪𠁊塏能分宇宙澄清四海團圝射映乾坤皎潔影橫曠野驚獨宿之烏鴉光射平湖照雙栖之鴻鴈氷輪展出三千里玉兎平吞四百州史進正和三箇頭領在後園飲酒賞翫中秋叙說舊話新言只聽得墻外一聲喊起火把亂明史進大驚跳起身來分付三位賢友且坐待我去看喝呌莊客不要開門掇條梯子上墻打一看時只見是華陰縣縣尉在馬上引着兩箇都頭带着三四百土兵圍住莊院史進和三箇頭領没做理會外面火把光中照見鋼叉朴刀五股叉留客住擺得似麻林一般兩箇都頭口𥚃呌道不要走了強賊不是這夥人來捉史進并三個頭領有分教史進先殺了一兩個人結識了十數個好漢大閙動河北直使天罡地煞一齊相㑹直教蘆花深處屯兵士荷葉陰中治戰舡畢竟史進與三個頭領怎地脫身且聼下囘分解總評神機軍師朱武妙計只是投降若非史進英雄拿去解官請賞何如或曰此正見神機處何𤼵一笑鍾伯敬先生批評水滸傳卷之二(終)"
b = "第二囘王教頭私走延安府九紋龍大閙史家村千古幽扃一旦開天罡地殺出星臺自來無事多生事本爲禳災𨚫惹災社覆從今雲擾擾兵戈到处閙垓垓高俅奸侫眞堪恨洪信從今釀禍胎當時道官對洪太尉說是老祖天師洞玄眞人鎭鎖着三十六員天罡星七十二座地殺星共一百單八個魔君上立石碑刻著龍章鳳篆天符鎭住若放他出世必害下方生靈如今放脫怎生是好太尉听了大驚收拾同衆人囘京在路分付衆人把走妖魔事情休説恐天子知而見責囘至京師听知天師在東京做了七晝夜好事普施符籙瘟疫寧息軍民安痊天師囘龍虎山去了太尉次日朝見天子奏說天師駕雲已先到京師臣從馹傳而來靣君復命仁宗准奏賞賜洪信仁宗在位四十二年傳位英宗英宗在位四年傳位神宗神宗在位一十八年傳位哲宗天下太平四方無事且說東京開封府汴梁宣武軍一個浮浪子弟姓高名俅自㓜好使鎗棒相拍頑耍踢得好氣毬京師人都呌他做高毬後來發跡將氣毬毬字去了毛傍添作立人名爲高俅在東京城𥚃因幫生鉄王員外兒子賭錢被他父親告了府尹把高俅㫁了四十杖迭配出界不許東京城𥚃人民容藏高俅無計只得投奔淮州柳世雄家三年後宋哲宗因拜南郊大赦天下高俅要囘東京這柳世雄𨚫和東京城𥚃金梁橋下開生藥鋪董將士是親寫了封書與高俅去投董將士家過活高俅辞了柳大郎囘東京至董家呈上世雄的書董將士看畢尋思□高俅是個浪子着留了他必定教壞吾兒待不收留又負了柳大郎靣情只得权且留下一日将士思計對高俅曰弟欲留公在家恐悞了足下我轉薦足下與蘇學士處乆後必得出身足下如何高俅大喜董將士卽使人持書引高俅到學士府學士出堂見了高俅看了來書心下想道不如薦他去鬪馬王晉卿府𥚃做個親隨人次日脩書一封使人送高俅去王都太尉處這太尉乃是哲宗的妹夫神宗皇帝的駙馬他喜愛風流人物當時駙馬見蘇學士這人送高俅來拜見了卽隨寫囘書收留高俅做個親隨人忽一日王太尉慶生辰安排酒宴專請小𠢎端王這端王乃神宗第十一子哲宗御弟掌東駕排號九大王是聰明俊俏人物更兼琴棋書畫踢球打彈品竹調絲无有不能當日王都尉府中在偹筵宴但見香焚寶鼎花揷金瓶仙音院競奏新聲教坊司頻逞妙藝水晶壺內尽都是紫府瓊漿琥珀盃中滿泛着瑤池玉液玳瑁盤堆着仙桃異果玻璃碗俱是熊掌駝蹄鱗鱗膾切銀絲細細茶烹玉蕋紅裙舞女尽随着象板鸞簫翠袖歌姬簇捧定龍笙鳳管兩行珠翠立階前一派笙歌臨座上端王來都尉府中赴宴酒進數盃端王起身淨手來書院𥚃見案上一對羊脂玉碾成的鎭紙獅子極做得細巧端王拿起看了一會曰好王都尉見端王心愛便說曰再有一個玉龍筆架也是那匠人做的明日一併相送端王大喜称謝依舊入席至暮方散次日王都尉取出玉龍筆架鎭紙玉獅子使高俅送投端王府中來院公出見引到庭前高俅看見端王頭戴軟紗唐巾身穿紫綉袍腰繋王帶足穿嵌金線靴與三五個小黃門相伴踢氣球高俅立在從人背後伺侯也是高俅合當發跡那個氣球直滚到高俅身邉那高俅見氣球來到身邉便使個鴛鴦拐踢还端王端王大喜問曰你是甚麽人高俅跪下曰小人是王都尉親随使令賫送兩般玉玩噐献上大王有書在上端王看了玩噐卽令收了便問高俅你原來會踢氣球喚作甚名高俅跪荅小人名喚高俅這氣球胡乱踢得幾腳端王曰你便踢一囘高俅拜曰小的是何䓁樣人敢與大王下腳端王曰這是齊雲社名為天下圓但踢何妨高俅叩頭觧膝上塲𦂯踢幾腳端王喝采高俅把平生本事都使出來那氣毬一似鰾膠粘在身上端王大喜留住高俅次日設宴請王都尉赴宴王都尉見了令旨隨卽來到宮中端王先謝玉玩噐後入席飮宴間端王曰這高俅踢得兩腳好氣毬孤欲用此人就當伏侍端王執盃相謝至脫筵罷王都尉自囘端王自得高俅未及兩月哲宗来有太子文武商議共立端王嗣哲宗之位爲天子號曰徽宗皇帝登基之後擡舉高俅做到殿帥府太尉之聀高俅卽選吉日到任所有一應牙將都軍禁軍馬步兵䓁都來𠫭拜只欠一名乃八十萬禁軍教頭王進軍政司稟曰半月之前已有病狀不曾入衙高俅怒曰此人推病在家隨卽差人拿王進且說這王進止有老母无妻子牌軍來拿王進只得捱病入府𠫭見拜了高俅曰你是都軍教頭王昇的兒子王進稟曰小人便是高俅喝曰你是街市上使花棒賣营藥的你如何敢不伏我點視詐病在家王進告曰小人怎敢是寔患病高俅罵曰你既患病如何來得唱令左右拿下王進與我重打衆牙將皆稟曰今日是老爺上任好日权免這次高太尉喝曰且看衆將之靣饒你明日理會王進起來認得是高俅出衙門咲曰只道是甚麽高殿帥原來是東京幫閑的圓社的高二先時曾學使棒被我父親一棒打番他今日要報前仇囘到家中對娘說知此事母子抱頭而哭王進曰兒子尋思不如逃去延安府老种經畧相公名下投他方可安身母曰門前兩個牌軍是殿帥撥來的他若得知便走不脫王進曰不妨兒子自有道理當晚對兩個牌軍說我因前日患病在酸棗門外岳庙𥚃許下香愿明日要去燒香你今晚去買三牲先去对他說知二人先領命去了當夜子母收拾行李出了西華門望延安而去且說兩個牌軍買了福物在庙中䓁到次日巳牌不見來二人心焦走囘見鎖了門直尋到晚不見踪跡兩人恐怕連累及巳卽投殿師府中首先說王進棄家逃走不知去向高太尉大怒卽押文書行關各州府捉拿不題且說王進子母自離東京在路月餘一日天晚不斍路過宿店捱到一處是一所大庄王進到庄前敲門有一庄客出來王進施礼曰小人母子貪行些路錯過客店來投貴庄借宿明早便行庄客入報出來言曰太公教你兩人進去王進同母入到草堂見太公各叙礼畢太公問曰客官貴處因甚昏晚到此王進曰小人姓張原是京師人要去延安府投奔親眷太公曰既如此但村中无甚相待休得見怪王進謝曰多蒙厚意无恩可報晚飯畢太公引王進子母到客房安歇王進曰小人的馬相煩寄养一発还錢太公曰我家也有頭口呌庄客牽去後槽喂养王進謝了各自安歇次日大明王進收拾要行來後槽看馬只見空地上有一個後生脫膊刺着一身青龍拿一條棍在那里使王進咲曰只有些破綻那後生听得喝道你是甚人敢咲我的本事俺曾經七八个明師倒不如你麽說犹未了太公來到喝那後生不得无礼那後生曰闘耐這廝咲我的棍法太公曰客官莫會使棒王進曰畧曉得些敢問這後生是誰太公曰是老漢的兒子進曰既然是小官人小人點撥他端正如何太公曰恁的極好便喚那後生來呌師父後生曰爹爹休聽這厮胡說他若贏得我一棍我便拜他爲師王進曰小官人若不相信請較量一棒耍那後生拿一條棒使得似風車兒樣轉呌王進曰你來你來王進只是咲不肯動手太公曰客官既肯見教小頑使一棒何妨王進咲曰只恐冲撞了令郎太公道這個不妨客官只管上塲王進曰恕罪了拿一條棒在手使個旗皷势那後生輪棒滾將過來王進托地拖了棒便走那後生又趕入來王進囘身舉棒望空劈將下來那後生用棒來隔王進𨚫不打下來提棒望後生懷𥚃只一鈢那後生的棒丟在一邉撲地倒了王進連忙進前扶住曰休怪休怪那後生扒將起來便拜曰俺自經了許多教師不如客官願請賜教王進曰俺母子在此多擾當效力報恩太公大喜教庄客安排酒食就請王進的母親一同赴席太公曰師父如此高强必然是個教頭小兒有眼不識泰山王進曰寔不相瞞小人不姓張乃是東京八十萬禁軍教頭王進便是爲因新任高太尉原被先父打畨今做殿帥府太尉怀挾旧仇因此母子二人迯上延安府老种經畧相公処勾當不想得遇太公如此看待若令郎肯學小人顧奉教太公□老漢祖居華隂縣界內前靣便是少華山這村喚做史家莊老漢這個兒子自㓜不務農業只愛刺鎗使棒母親說他不得嘔氣死了老漢只得隨他性子不知去了多少錢財投師這身花綉刺有九條龍人都呌他做九紋龍史進教頭既到這里望乞賜教自當重謝王進曰既然如此必當奉命自此留住王進母子在庄上每日教史進點撥他十八般武藝矛錘弓弩銃鞭簡劍鏈撾斧鉞并戈戟牌棒與鎗爬𨚫說史進留王進指教武藝不覺半年王進把十八般兵噐教得史進精熟王進相辞要行史進曰師父只在我家我奉養師父母子以終天年王進曰雖蒙好意只恐高太尉知道連累不便史進太公苦留不住設宴送行托出一盤緞子百兩花銀謝師次日王進收拾望延安府去了史進送了一程囘庄每日演習武藝當時六月炎天史進坐在柳隂樹下乘涼見一獵戶呌做摽兎李吉行過史進問曰你往常挑野味在我庄上來賣這一向爲何不來李吉曰小人不說大郎不知近日少華山上添了一夥强人聚有七百餘人爲頭的大王喚做神机軍師朱武第二個喚做跳澗虎陳達第三個喚作白花蛇楊春官兵不敢促他小人因此不敢上山打獵那討野味史進听了尋思這賊終乆來我庄上便教庄客殺牛聚集四百餘庄人飮酒對衆人曰我今听得少華山上有一夥強人恐早晚間要來我村中打我特請你衆人商議他倘若來我村中時你們各執鎗棒前來救應一家有事各家救護衆人曰我們村農只靠大郎作主梆子响時誰敢不來當日衆人囘家准偹噐械不題𨚫說少華山神机軍師朱武廣有智畧一日與陳達楊春計議曰我听知華隂縣𥚃出三千貫賞錢招人來捉我們軍兵來時要與他們厮殺目今山寨缺少錢粮如之奈何陳達曰便去革隂縣𥚃借粮看他如何楊春曰不要去華隂縣只去浦城縣万无一失陳達曰浦城縣錢粮稀少只去打華隂縣錢粮更多楊春曰若去打華隂縣時須從史家村過聞知九紋龍史進有万人之敵他如何肯放我們過去陳達曰量一村坊過去不得尚敢抵敵官軍長他人之志氣滅自巳的威風遂點嘍啰披掛下山去了史進正在庄上整頓弓馬只見庄客報說賊到史進呌敲起梆子那四百庄人都到史進頭戴一字巾身穿硃紅甲前後鉄掩心一張弓一壺箭手提一把三尖刃騎一⽦火炭赤馬庄人随後吶喊直到庄前排開陣势見陳達頭頂乾紅盔身披鏤金甲坐下一⽦高鞍馬手牛點鋼鎗二將相見陳達馬上欠身施礼史進喝曰汝等強盜敢來太𡻕頭上動土陳達曰因我山寨欠缺錢粮欲往華隂縣借粮經由貴村借路過去不敢動你一根草囘日重謝史進曰我家正當甲長放你過去本縣知道必連累我陳達曰四海之內皆兄弟也借路一過不妨史進不允陳達大怒挺鎗刺來史進拍馬來迎二人閗了五十合史進使個破綻讓陳達一鎗望心窩𥚃搠來史進𨚫把腰一閃陳達和鎗撲入怀𥚃史進輕舒猿臂只一挾把陳達捉過馬來衆嘍囉都走了史進囘到庄上將陳達綁在柱上偹酒來賞了衆人俱各准偹𨚫說朱武楊春正在寨中嘍啰報說二頭領被捉去了朱武嘆曰不听吾言果有此禍楊春曰奈何朱武曰我有一條計可以救他楊春曰有何計朱武附耳低言春曰好計和你便去史進正在庄上庄客來報曰少華山朱武楊春都來了史進便提刀上馬正出庄門只見朱武楊春都到䨇䨇跪下史進喝曰你二人跪下如何朱武哭曰小人三個因被官司累次逼迫不得巳上山落草三人當初發願不願同生只求同死雖不及關張劉偹其心則同今陳達誤犯被促我三人義不貪生特來請死大郎將我三人觧官請賞誓不皱眉史進听了他們如此義氣我若拿他觧官反被天下好漢耻咲便曰你二人跟我進來朱武楊春随了史進直到厛前跪下又請綁縛史進曰惺惺惜惺惺好漢惜好漢你們既如此義氣我若送了你們不是好漢放陳達还你如何朱武曰休得連累了將軍寧可將我們觧官史進曰不可卽令入了陳達就置酒疑待三人飮罷拜辞史進三人囘到寨中朱武曰雖然是計亦難得史進好意我們須要報謝随卽收拾得三十條金使両個嘍囉趂月送與史進嘍羅到史進庄內將金献上告達三人酧謝不殺之恩史進受了金子教庄客將酒相待囘山半月朱武䓁擄得一出大珠子又使嘍囉送來史進又受了尋思難得這三個敬重我也討些礼囘奉他次日教三個裁縫做了三件錦襖殺了一腔肥羊令庄客送至山寨見了一個頭領朱武䓁大喜收了礼物欵待來人白金五両庄客拜別囘來史進自此與朱武往來荏苒光陰將近八月中秋要請三人至十五日夜來庄上賞月先令庄客王四送書去請三位頭領看書大喜卽冩下囘書賞銀下山遇着嘍囉又拖去酒店中吃了數碗相別囘程走不到十里酒𨚫湧上來便醉倒了那摽兎李吉正在山坡下來認得是史家庄的王四逕來扶他見王四𦞂膊𥚃突出銀子來李吉尋思這厮醉了這銀子何不拿他的去李吉觧下𦞂膊一抖那封囘書和銀子都抖出來李吉將書拆開見書上冩着少華山朱武三人名字李吉曰闘耐史進原來與強盜來往把書望華隂縣出首去了王四睡到三更方醒看見月光跳將起來四邉都是松樹忙去腰間摸時𦞂膊并書都不見了哭曰銀子不打𦂳失了這封書如何是好心生一計只說不曽有回書來到庄上史進問曰你往何方𦂯囘來王四曰托主人福蔭寨中頭領留我吃了半夜酒因此回遲史進又問曰曽有回書否王四曰他要脩囘書是小人說若拿囘書恐路上不便史進大喜排起筵宴伺侯朱武三人分付嘍囉看守寨門只帶三五個作伴各藏短刀下山來到庄上史進接着各敘礼畢請入後園分賓主坐定令庄客把前後庄門拴了一靣飮酒酒至數杯只見東邉推起那輪明月但見秋夜初長黃錯巳半一輪月掛如銀氷盤如昼翫正空人淸影十分圓滿桂花玉兎交馨簾籠高捲金盃頻觀酒觀咲賀昇平當此節酩酊醉燻燻莫辭終夕醉銀漢露華新且說史進正和三人飮酒只聽得牆外𠴁起火把亂明三人大驚史進曰三位休慌待我去看掇條梯子傍墻一看只見縣尉在馬上引兩個都頭領四百士兵圍住庄院都頭大呌不要走了強盜這夥人來捉進進直使大罡地殺一齊相會正是芦花深處藏將士荷葉隂中聚𢧐舡畢竟史進與三個頭領怎的脫身且聽下囘分觧"
id_a = "A006267-002"
id_b = "A006371-002"
title_a = "鍾伯敬先生批評水滸伝一百巻一百回 第二回"
title_b = "新刻全像忠義水滸誌伝二十五巻一百十五回 第二回"
sourceDesc = root.find(prefix + "sourceDesc")
listWit = ET.Element("{http://www.tei-c.org/ns/1.0}listWit")
sourceDesc.append(listWit)
witness = ET.Element("{http://www.tei-c.org/ns/1.0}witness")
listWit.append(witness)
witness.set("xml:id", id_a)
witness.text = title_a
witness = ET.Element("{http://www.tei-c.org/ns/1.0}witness")
listWit.append(witness)
witness.set("xml:id", id_b)
witness.text = title_b
teiHeader = root.find(prefix + "teiHeader")
encodingDesc = ET.Element("{http://www.tei-c.org/ns/1.0}encodingDesc")
teiHeader.append(encodingDesc)
variantEncoding = ET.Element("{http://www.tei-c.org/ns/1.0}variantEncoding")
encodingDesc.append(variantEncoding)
variantEncoding.set("method", "parallel-segmentation")
variantEncoding.set("location", "internal")
s = difflib.SequenceMatcher(None, a, b)
old_ele = p
for tag, i1, i2, j1, j2 in s.get_opcodes():
if tag == "delete":
app = ET.Element("{http://www.tei-c.org/ns/1.0}app")
p.append(app)
rdg = ET.Element("{http://www.tei-c.org/ns/1.0}rdg")
app.append(rdg)
rdg.set("wit", "#"+id_a)
rdg.text = a[i1:i2]
old_ele = app
elif tag == "insert":
app = ET.Element("{http://www.tei-c.org/ns/1.0}app")
p.append(app)
rdg = ET.Element("{http://www.tei-c.org/ns/1.0}rdg")
app.append(rdg)
rdg.set("wit", "#"+id_b)
rdg.text = b[j1:j2]
old_ele = app
elif tag == "replace":
app = ET.Element("{http://www.tei-c.org/ns/1.0}app")
p.append(app)
rdg = ET.Element("{http://www.tei-c.org/ns/1.0}rdg")
app.append(rdg)
rdg.set("wit", "#"+id_a)
rdg.text = a[i1:i2]
rdg = ET.Element("{http://www.tei-c.org/ns/1.0}rdg")
app.append(rdg)
rdg.set("wit", "#"+id_b)
rdg.text = b[j1:j2]
old_ele = app
elif tag == "equal":
old_ele.tail = a[i1:i2]
else:
print(tag)
tree.write("data/diff.xml", encoding="utf-8")
| 184.737864
| 11,603
| 0.947393
| 417
| 19,028
| 43.199041
| 0.232614
| 0.006217
| 0.008327
| 0.00916
| 0.041801
| 0.040913
| 0.040913
| 0.040913
| 0.038914
| 0.038914
| 0
| 0.003717
| 0.024333
| 19,028
| 103
| 11,604
| 184.737864
| 0.966173
| 0.001104
| 0
| 0.408451
| 0
| 0
| 0.908503
| 0.869357
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.028169
| 0
| 0.028169
| 0.014085
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3d372653470996da64ee3dcad6250a21dbd2e6ea
| 52
|
py
|
Python
|
zucchini/graders/exceptions.py
|
dbecker1/zucchini
|
47eb9a40b47bb1b131dcfd0073596ccf8816562c
|
[
"Apache-2.0"
] | 3
|
2018-03-27T18:09:54.000Z
|
2021-04-08T03:03:55.000Z
|
zucchini/graders/exceptions.py
|
dbecker1/zucchini
|
47eb9a40b47bb1b131dcfd0073596ccf8816562c
|
[
"Apache-2.0"
] | 337
|
2017-12-17T13:22:26.000Z
|
2022-03-28T02:05:09.000Z
|
zucchini/graders/exceptions.py
|
dbecker1/zucchini
|
47eb9a40b47bb1b131dcfd0073596ccf8816562c
|
[
"Apache-2.0"
] | 7
|
2018-01-10T18:46:26.000Z
|
2020-10-17T17:47:07.000Z
|
class InvalidGraderConfigError(Exception):
pass
| 17.333333
| 42
| 0.807692
| 4
| 52
| 10.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134615
| 52
| 2
| 43
| 26
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
3d3c48e30dea59b0f2566984a39668435562eafb
| 10,007
|
py
|
Python
|
tests/texts/declerations.py
|
Intsights/flake8-intsights
|
b3785a3be855e05090641696e0648486107dba72
|
[
"MIT"
] | 12
|
2020-02-18T17:47:57.000Z
|
2021-07-13T10:23:40.000Z
|
tests/texts/declerations.py
|
Intsights/flake8-intsights
|
b3785a3be855e05090641696e0648486107dba72
|
[
"MIT"
] | 7
|
2020-02-25T12:14:11.000Z
|
2020-12-01T08:14:58.000Z
|
tests/texts/declerations.py
|
Intsights/flake8-intsights
|
b3785a3be855e05090641696e0648486107dba72
|
[
"MIT"
] | 1
|
2020-07-01T15:49:28.000Z
|
2020-07-01T15:49:28.000Z
|
declerations_test_text_001 = '''
list1 = [
1,
]
'''
declerations_test_text_002 = '''
list1 = [
1,
2,
]
'''
declerations_test_text_003 = '''
tuple1 = (
1,
)
'''
declerations_test_text_004 = '''
tuple1 = (
1,
2,
)
'''
declerations_test_text_005 = '''
set1 = {
1,
}
'''
declerations_test_text_006 = '''
set1 = {
1,
2,
}
'''
declerations_test_text_007 = '''
dict1 = {
'key': 1,
}
'''
declerations_test_text_008 = '''
dict1 = {
'key1': 1,
'key2': 2,
}
'''
declerations_test_text_009 = '''
return [
1,
]
'''
declerations_test_text_010 = '''
return [
1,
2,
]
'''
declerations_test_text_011 = '''
return (
1,
)
'''
declerations_test_text_012 = '''
return (
1,
2,
)
'''
declerations_test_text_013 = '''
return {
1,
}
'''
declerations_test_text_014 = '''
return {
1,
2,
}
'''
declerations_test_text_015 = '''
return {
'key': 1,
}
'''
declerations_test_text_016 = '''
return {
'key1': 1,
'key2': 2,
}
'''
declerations_test_text_017 = '''
yield [
1,
]
'''
declerations_test_text_018 = '''
yield [
1,
2,
]
'''
declerations_test_text_019 = '''
yield (
1,
)
'''
declerations_test_text_020 = '''
yield (
1,
2,
)
'''
declerations_test_text_021 = '''
yield {
1,
}
'''
declerations_test_text_022 = '''
yield {
1,
2,
}
'''
declerations_test_text_023 = '''
yield {
'key': 1,
}
'''
declerations_test_text_024 = '''
yield {
'key1': 1,
'key2': 2,
}
'''
declerations_test_text_025 = '''
list1 = [
[
1,
],
]
'''
declerations_test_text_026 = '''
list1 = [
[
1,
2,
],
]
'''
declerations_test_text_027 = '''
tuple1 = (
(
1,
),
)
'''
declerations_test_text_028 = '''
tuple1 = (
(
1,
2,
),
)
'''
declerations_test_text_029 = '''
set1 = {
{
1,
},
}
'''
declerations_test_text_030 = '''
set1 = {
{
1,
2,
},
}
'''
declerations_test_text_031 = '''
dict1 = {
'key': {
'key': 1,
},
}
'''
declerations_test_text_032 = '''
dict1 = {
'key1': {
'key1': 1,
'key2': 2,
},
'key2': {
'key1': 1,
'key2': 2,
},
}
'''
declerations_test_text_033 = '''
return [
[
1,
],
]
'''
declerations_test_text_034 = '''
return [
[
1,
2,
],
]
'''
declerations_test_text_035 = '''
return (
(
1,
),
)
'''
declerations_test_text_036 = '''
return (
(
1,
2,
),
)
'''
declerations_test_text_037 = '''
return {
{
1,
},
}
'''
declerations_test_text_038 = '''
return {
{
1,
2,
},
}
'''
declerations_test_text_039 = '''
return {
'key': {
'key': 1,
},
}
'''
declerations_test_text_040 = '''
return {
'key1': {
'key1': 1,
'key2': 2,
},
'key2': {
'key1': 1,
'key2': 2,
},
}
'''
declerations_test_text_041 = '''
yield [
[
1,
],
]
'''
declerations_test_text_042 = '''
yield [
[
1,
2,
],
]
'''
declerations_test_text_043 = '''
yield (
(
1,
),
)
'''
declerations_test_text_044 = '''
yield (
(
1,
2,
),
)
'''
declerations_test_text_045 = '''
yield {
{
1,
},
}
'''
declerations_test_text_046 = '''
yield {
{
1,
2,
},
}
'''
declerations_test_text_047 = '''
yield {
'key': {
'key': 1,
},
}
'''
declerations_test_text_048 = '''
yield {
'key1': {
'key1': 1,
'key2': 2,
},
'key2': {
'key1': 1,
'key2': 2,
},
}
'''
declerations_test_text_049 = '''
list1 = [
[
2,
],
]
'''
declerations_test_text_050 = '''
list_1 = [
[
[
2,
],
],
]
'''
declerations_test_text_051 = '''
list_1 = [
(
2,
),
]
'''
declerations_test_text_052 = '''
list_1 = [
{
'key1': 'value1',
},
]
'''
declerations_test_text_053 = '''
list_1 = [
call(
param1,
),
]
'''
declerations_test_text_054 = '''
entry_1, entry_2 = call()
'''
declerations_test_text_055 = '''
(
entry_1,
entry_2,
) = call()
'''
declerations_test_text_056 = '''
[
1
for a, b in call()
]
'''
declerations_test_text_057 = '''
{
'key': [
'entry_1',
'entry_2',
]
}
'''
declerations_test_text_058 = '''
list_1 = [instance.attribute]
'''
declerations_test_text_059 = '''
list_1 = [1]
'''
declerations_test_text_060 = '''
list_1 = [test]
'''
declerations_test_text_061 = '''
dict_1 = {}
'''
declerations_test_text_062 = '''
list_1 = [term[1]]
'''
declerations_test_text_063 = '''
test = {
'list_of_lists': [
[],
],
}
'''
declerations_test_text_064 = '''
class ClassName:
pass
'''
declerations_test_text_065 = '''
class ClassName(
Class1,
Class2,
):
pass
'''
declerations_test_text_066 = '''
class ClassName():
pass
'''
declerations_test_text_067 = '''
class ClassName(Class1, Class2):
pass
'''
declerations_test_text_068 = '''
class ClassName(
Class1,
Class2
):
pass
'''
declerations_test_text_069 = '''
def function_name():
pass
'''
declerations_test_text_070 = '''
def function_name( ):
pass
'''
declerations_test_text_071 = '''
def function_name(
):
pass
'''
declerations_test_text_072 = '''
def function_name(
):
pass
'''
declerations_test_text_073 = '''
def function_name(
arg1,
arg2,
):
pass
'''
declerations_test_text_074 = '''
def function_name(
arg1,
arg2
):
pass
'''
declerations_test_text_075 = '''
def function_name(arg1):
pass
'''
declerations_test_text_076 = '''
def function_name(
arg1, arg2,
):
pass
'''
declerations_test_text_077 = '''
def function_name(
arg1,
arg2,
):
pass
'''
declerations_test_text_078 = '''
def function_name(
arg1,
**kwargs
):
pass
'''
declerations_test_text_079 = '''
class Class:
def function_name_two(
self,
arg1,
arg2,
):
pass
'''
declerations_test_text_080 = '''
class Class:
@property
def function_name_one(
self,
):
pass
'''
declerations_test_text_081 = '''
def function_name(
*args,
**kwargs
):
pass
'''
declerations_test_text_082 = '''
class A:
def b():
class B:
pass
'''
declerations_test_text_083 = '''
@decorator(
param=1,
)
def function_name(
param_one,
param_two,
):
pass
'''
declerations_test_text_084 = '''
class ClassA:
def function_a():
pass
class TestServerHandler(
http.server.BaseHTTPRequestHandler,
):
pass
'''
declerations_test_text_085 = '''
def function(
param_a,
param_b=[
'test',
],
):
pass
'''
declerations_test_text_086 = '''
@decorator
class DecoratedClass(
ClassBase,
):
pass
'''
declerations_test_text_087 = '''
class ClassName(
object,
):
pass
'''
declerations_test_text_088 = '''
pixel[x,y] = 10
'''
declerations_test_text_089 = '''
@decorator.one
@decorator.two()
class DecoratedClass:
pass
'''
declerations_test_text_090 = '''
@staticmethod
def static_method():
pass
'''
declerations_test_text_091 = '''
@decorator1
@decorator2
def static_method(
param1,
param2,
):
pass
'''
declerations_test_text_092 = '''
@decorator1(
param=1,
)
def method():
pass
'''
declerations_test_text_093 = '''
try:
pass
except Exception:
pass
'''
declerations_test_text_094 = '''
try:
pass
except (
Exception1,
Exception2,
):
pass
'''
declerations_test_text_095 = '''
try:
pass
except Exception as exception:
pass
'''
declerations_test_text_096 = '''
try:
pass
except (
Exception1,
Exception2,
) as exception:
pass
'''
declerations_test_text_097 = '''
try:
pass
except Exception as e:
pass
'''
declerations_test_text_098 = '''
try:
pass
except (
Exception1,
Exception2,
) as e:
pass
'''
declerations_test_text_099 = '''
dict1 = {
'key_one': 1, 'key_two': 2,
}
'''
declerations_test_text_100 = '''
dict1 = {
'key_one': 1,
'key_two': 2,
}
'''
declerations_test_text_101 = '''
dict1 = {
'key_one': 1,
'key_two': 2,
}
'''
declerations_test_text_102 = '''
dict1 = {
'key_one':
1,
}
'''
declerations_test_text_103 = '''
dict_one = {
'list_comp': [
{
'key_one': 'value',
}
for i in range(5)
],
'dict_comp': {
'key_one': i
for i in range(5)
},
'set_comp': {
i
for i in range(5)
},
'generator_comp': (
i
for i in range(5)
),
}
'''
declerations_test_text_104 = '''
dict_one = {
'text_key': 'value',
f'formatted_text_key': 'value',
name_key: 'value',
1: 'value',
dictionary['name']: 'value',
object.attribute: 'value',
}
dict_two = {
'key_text_multiline': \'\'\'
text
\'\'\',
1: 'text',
function(
param=1,
): 'text',
'text'.format(
param=1,
): 'text',
'long_text': (
'first line'
'second line'
),
**other_dict,
}
'''
declerations_test_text_105 = '''
async def function(
param1,
):
pass
'''
declerations_test_text_106 = '''
def no_args_function():
pass
def no_args_function() :
pass
def no_args_function ():
pass
def no_args_function( ):
pass
def no_args_function():
pass
def no_args_function() -> None:
pass
def no_args_function() -> None :
pass
def no_args_function () -> None:
pass
def no_args_function( ) -> None:
pass
def no_args_function() -> None:
pass
'''
declerations_test_text_107 = '''
class Class:
@decorator(
param=1,
)
async def function():
pass
'''
declerations_test_text_108 = '''
list_a = [
\'\'\'
multiline
string
\'\'\',
\'\'\'
multiline
string
\'\'\',
]
'''
declerations_test_text_109 = '''
list_with_empty_tuple = [
(),
]
'''
| 13.098168
| 47
| 0.540122
| 1,055
| 10,007
| 4.729858
| 0.194313
| 0.349499
| 0.436874
| 0.177956
| 0.582164
| 0.424249
| 0.236673
| 0.180561
| 0.136072
| 0.098397
| 0
| 0.074001
| 0.292395
| 10,007
| 763
| 48
| 13.115334
| 0.630702
| 0
| 0
| 0.578393
| 0
| 0
| 0.607874
| 0.009893
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.069829
| 0
| 0
| 0.02108
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
e9ead4efec2b488b003bd50670c0f814058b8f19
| 29
|
py
|
Python
|
router/tasks/__init__.py
|
smallwat3r/shopify-webhook-processor
|
4f16017cb9695ca00eb6d95e4381a8442b3dc0e3
|
[
"MIT"
] | 1
|
2021-08-30T14:01:03.000Z
|
2021-08-30T14:01:03.000Z
|
router/tasks/__init__.py
|
smallwat3r/shopify-webhook-processor
|
4f16017cb9695ca00eb6d95e4381a8442b3dc0e3
|
[
"MIT"
] | null | null | null |
router/tasks/__init__.py
|
smallwat3r/shopify-webhook-processor
|
4f16017cb9695ca00eb6d95e4381a8442b3dc0e3
|
[
"MIT"
] | 2
|
2021-08-30T14:01:04.000Z
|
2021-09-07T01:07:41.000Z
|
from .tasks import Processor
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e9ee58711825a498c9db3c3f37e476c5e56bb0a6
| 282
|
py
|
Python
|
auction/models/bidbasket.py
|
littlepea/django-auction
|
fe0219faabe17efbeca1be51869d750e82299941
|
[
"MIT"
] | 10
|
2015-01-13T02:51:35.000Z
|
2021-01-25T21:02:29.000Z
|
auction/models/bidbasket.py
|
JohnRomanski/django-auction
|
bc6982c8f34a9a6914badb203424eca7f3219685
|
[
"MIT"
] | 2
|
2016-08-05T09:24:30.000Z
|
2020-06-28T06:00:11.000Z
|
auction/models/bidbasket.py
|
JohnRomanski/django-auction
|
bc6982c8f34a9a6914badb203424eca7f3219685
|
[
"MIT"
] | 22
|
2015-03-12T10:41:52.000Z
|
2021-11-23T14:33:09.000Z
|
import importlib
from django.conf import settings
from auction.utils.loader import load_class
AUCTION_BIDBASKET_MODEL = getattr(settings, 'AUCTION_BIDBASKET_MODEL',
'auction.models.defaults.BidBasket')
BidBasket = load_class(AUCTION_BIDBASKET_MODEL, 'AUCTION_BIDBASKET_MODEL')
| 35.25
| 74
| 0.840426
| 35
| 282
| 6.485714
| 0.457143
| 0.281938
| 0.370044
| 0.220264
| 0.264317
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 282
| 8
| 74
| 35.25
| 0.879845
| 0
| 0
| 0
| 0
| 0
| 0.279152
| 0.279152
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a1119377e73c71b58b46883ef014d640d56156e5
| 117
|
py
|
Python
|
garageofcode/semantic/main.py
|
tpi12jwe/garageofcode
|
3cfaf01f6d77130bb354887e6ed9921c791db849
|
[
"MIT"
] | 2
|
2020-02-11T10:32:06.000Z
|
2020-02-11T17:00:47.000Z
|
garageofcode/semantic/main.py
|
tpi12jwe/garageofcode
|
3cfaf01f6d77130bb354887e6ed9921c791db849
|
[
"MIT"
] | null | null | null |
garageofcode/semantic/main.py
|
tpi12jwe/garageofcode
|
3cfaf01f6d77130bb354887e6ed9921c791db849
|
[
"MIT"
] | null | null | null |
def have(subj, obj):
subj.add(obj)
def change(subj, obj, state):
pass
if __name__ == '__main__':
main()
| 14.625
| 29
| 0.606838
| 17
| 117
| 3.705882
| 0.647059
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 117
| 8
| 30
| 14.625
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0.067797
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.166667
| 0
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
a11589146f3d49dce0f6bfd0ac0a0e58ecd53f6f
| 3,659
|
py
|
Python
|
shopify_listener/dispatcher.py
|
smallwat3r/shopify-webhook-manager
|
1161f070470bc2d2f81c98222b67300bc616121f
|
[
"MIT"
] | 6
|
2019-08-13T18:12:37.000Z
|
2021-05-26T17:55:58.000Z
|
shopify_listener/dispatcher.py
|
smallwat3r/shopify-webhook-manager
|
1161f070470bc2d2f81c98222b67300bc616121f
|
[
"MIT"
] | null | null | null |
shopify_listener/dispatcher.py
|
smallwat3r/shopify-webhook-manager
|
1161f070470bc2d2f81c98222b67300bc616121f
|
[
"MIT"
] | 4
|
2019-10-16T06:14:35.000Z
|
2021-06-03T06:25:26.000Z
|
# -*- coding: utf-8 -*-
# @Author: Matthieu Petiteau
# @Date: 2019-04-26 21:01:07
# @Last Modified by: Matthieu Petiteau
# @Last Modified time: 2019-04-26 21:52:46
"""Dispatch webhook event to specific actions."""
import json
class Dispatcher:
"""Dispatch the different webhook events to the related functions.
The list of all webhook events can be found at:
https://help.shopify.com/en/api/reference/events/webhook
"""
def __init__(self, data):
"""Init webhook data."""
self.data = json.loads(data)
@staticmethod
def name_topic(topic):
"""Rename the topic event to match the function names."""
return "_" + topic.replace('/', '_')
def dispatch_event(self, topic):
"""Dispatch the event to the correct function."""
return getattr(self, self.name_topic(topic))()
def _carts_create(self):
pass
def _carts_update(self):
pass
def _checkout_create(self):
pass
def _checkout_update(self):
pass
def _checkout_delete(self):
pass
def _collections_create(self):
pass
def _collections_update(self):
pass
def _collections_delete(self):
pass
def _collection_listings_add(self):
pass
def _collection_listings_remove(self):
pass
def _collection_listings_update(self):
pass
def _customers_create(self):
pass
def _customers_disable(self):
pass
def _customers_enable(self):
pass
def _customers_update(self):
pass
def _customers_delete(self):
pass
def _customer_groups_create(self):
pass
def _customer_groups_update(self):
pass
def _customer_groups_delete(self):
pass
def _draft_orders_create(self):
pass
def _draft_orders_update(self):
pass
def _draft_orders_delete(self):
pass
def _fulfillments_create(self):
pass
def _fulfillments_update(self):
pass
def _fulfillment_events_create(self):
pass
def _fulfillment_events_delete(self):
pass
def _inventory_items_create(self):
pass
def _inventory_items_update(self):
pass
def _inventory_items_delete(self):
pass
def _inventory_levels_connect(self):
pass
def _inventory_levels_update(self):
pass
def _inventory_levels_disconnect(self):
pass
def _locations_create(self):
pass
def _locations_update(self):
pass
def _locations_delete(self):
pass
def _orders_cancelled(self):
pass
def _orders_create(self):
pass
def _orders_fulfilled(self):
pass
def _orders_paid(self):
pass
def _orders_partially_fulfilled(self):
pass
def _orders_updated(self):
pass
def _orders_delete(self):
pass
def _orders_transactions_create(self):
pass
def _products_create(self):
pass
def _products_update(self):
pass
def _products_delete(self):
pass
def _product_listings_add(self):
pass
def _product_listings_remove(self):
pass
def _product_listings_update(self):
pass
def _refund_create(self):
pass
def _app_uninstalled(self):
pass
def _shop_update(self):
pass
def _tender_transactions_create(self):
pass
def _themes_create(self):
pass
def _theme_publish(self):
pass
def _theme_update(self):
pass
def _theme_delete(self):
pass
| 18.20398
| 70
| 0.622301
| 421
| 3,659
| 5.061758
| 0.251781
| 0.213984
| 0.289066
| 0.12764
| 0.430314
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011324
| 0.300082
| 3,659
| 200
| 71
| 18.295
| 0.820773
| 0.133096
| 0
| 0.463415
| 0
| 0
| 0.000959
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.487805
| false
| 0.463415
| 0.00813
| 0
| 0.520325
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
a174909b1f9a6d386413fccc83ffd4e52629d864
| 75,049
|
py
|
Python
|
tests/unit/utils/test_docker.py
|
springborland/salt
|
bee85e477d57e9a171884e54fefb9a59d0835ed0
|
[
"Apache-2.0"
] | 1
|
2020-04-09T03:25:10.000Z
|
2020-04-09T03:25:10.000Z
|
tests/unit/utils/test_docker.py
|
springborland/salt
|
bee85e477d57e9a171884e54fefb9a59d0835ed0
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/utils/test_docker.py
|
springborland/salt
|
bee85e477d57e9a171884e54fefb9a59d0835ed0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
tests.unit.utils.test_docker
============================
Test the funcs in salt.utils.docker and salt.utils.docker.translate
"""
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import copy
import functools
import logging
import os
# Import salt libs
import salt.config
import salt.loader
import salt.utils.docker.translate.container
import salt.utils.docker.translate.network
import salt.utils.platform
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
from salt.utils.docker.translate import helpers as translate_helpers
# Import Salt Testing Libs
from tests.support.unit import TestCase
log = logging.getLogger(__name__)
class Assert(object):
def __init__(self, translator):
self.translator = translator
def __call__(self, func):
self.func = func
return functools.wraps(func)(
# pylint: disable=unnecessary-lambda
lambda testcase, *args, **kwargs: self.wrap(testcase, *args, **kwargs)
# pylint: enable=unnecessary-lambda
)
def wrap(self, *args, **kwargs):
raise NotImplementedError
def test_stringlist(self, testcase, name):
alias = self.translator.ALIASES_REVMAP.get(name)
# Using file paths here because "volumes" must be passed through this
# set of assertions and it requires absolute paths.
if salt.utils.platform.is_windows():
data = [r"c:\foo", r"c:\bar", r"c:\baz"]
else:
data = ["/foo", "/bar", "/baz"]
for item in (name, alias):
if item is None:
continue
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: ",".join(data)}
),
testcase.apply_defaults({name: data}),
)
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: data}),
testcase.apply_defaults({name: data}),
)
if name != "volumes":
# Test coercing to string
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: ["one", 2]}
),
testcase.apply_defaults({name: ["one", "2"]}),
)
if alias is not None:
# Test collision
# sorted() used here because we want to confirm that we discard the
# alias' value and go with the unsorted version.
test_kwargs = {name: data, alias: sorted(data)}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults({name: test_kwargs[name]}),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
def test_key_value(self, testcase, name, delimiter):
"""
Common logic for key/value pair testing. IP address validation is
turned off here, and must be done separately in the wrapped function.
"""
alias = self.translator.ALIASES_REVMAP.get(name)
expected = {"foo": "bar", "baz": "qux"}
vals = "foo{0}bar,baz{0}qux".format(delimiter)
for item in (name, alias):
if item is None:
continue
for val in (vals, vals.split(",")):
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=False, **{item: val}
),
testcase.apply_defaults({name: expected}),
)
# Dictionary input
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=False, **{item: expected}
),
testcase.apply_defaults({name: expected}),
)
# "Dictlist" input from states
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator,
validate_ip_addrs=False,
**{item: [{"foo": "bar"}, {"baz": "qux"}]}
),
testcase.apply_defaults({name: expected}),
)
if alias is not None:
# Test collision
test_kwargs = {name: vals, alias: "hello{0}world".format(delimiter)}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator,
validate_ip_addrs=False,
ignore_collisions=True,
**test_kwargs
),
testcase.apply_defaults({name: expected}),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator,
validate_ip_addrs=False,
ignore_collisions=False,
**test_kwargs
)
class assert_bool(Assert):
"""
Test a boolean value
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
alias = self.translator.ALIASES_REVMAP.get(name)
for item in (name, alias):
if item is None:
continue
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: True}),
testcase.apply_defaults({name: True}),
)
# These two are contrived examples, but they will test bool-ifying
# a non-bool value to ensure proper input format.
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: "foo"}),
testcase.apply_defaults({name: True}),
)
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: 0}),
testcase.apply_defaults({name: False}),
)
if alias is not None:
# Test collision
test_kwargs = {name: True, alias: False}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults({name: test_kwargs[name]}),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
return self.func(testcase, *args, **kwargs)
class assert_int(Assert):
"""
Test an integer value
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
alias = self.translator.ALIASES_REVMAP.get(name)
for item in (name, alias):
if item is None:
continue
for val in (100, "100"):
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: val}),
testcase.apply_defaults({name: 100}),
)
# Error case: non-numeric value passed
with testcase.assertRaisesRegex(
CommandExecutionError, "'foo' is not an integer"
):
salt.utils.docker.translate_input(self.translator, **{item: "foo"})
if alias is not None:
# Test collision
test_kwargs = {name: 100, alias: 200}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults({name: test_kwargs[name]}),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
return self.func(testcase, *args, **kwargs)
class assert_string(Assert):
"""
Test that item is a string or is converted to one
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
alias = self.translator.ALIASES_REVMAP.get(name)
# Using file paths here because "working_dir" must be passed through
# this set of assertions and it requires absolute paths.
if salt.utils.platform.is_windows():
data = r"c:\foo"
else:
data = "/foo"
for item in (name, alias):
if item is None:
continue
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: data}),
testcase.apply_defaults({name: data}),
)
if name != "working_dir":
# Test coercing to string
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: 123}),
testcase.apply_defaults({name: "123"}),
)
if alias is not None:
# Test collision
test_kwargs = {name: data, alias: data}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults({name: test_kwargs[name]}),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
return self.func(testcase, *args, **kwargs)
class assert_int_or_string(Assert):
"""
Test an integer or string value
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
alias = self.translator.ALIASES_REVMAP.get(name)
for item in (name, alias):
if item is None:
continue
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: 100}),
testcase.apply_defaults({name: 100}),
)
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: "100M"}),
testcase.apply_defaults({name: "100M"}),
)
if alias is not None:
# Test collision
test_kwargs = {name: 100, alias: "100M"}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults({name: test_kwargs[name]}),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
return self.func(testcase, *args, **kwargs)
class assert_stringlist(Assert):
"""
Test a comma-separated or Python list of strings
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
self.test_stringlist(testcase, name)
return self.func(testcase, *args, **kwargs)
class assert_dict(Assert):
"""
Dictionaries should be untouched, dictlists should be repacked and end up
as a single dictionary.
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
alias = self.translator.ALIASES_REVMAP.get(name)
expected = {"foo": "bar", "baz": "qux"}
for item in (name, alias):
if item is None:
continue
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: expected}),
testcase.apply_defaults({name: expected}),
)
# "Dictlist" input from states
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator,
**{item: [{x: y} for x, y in six.iteritems(expected)]}
),
testcase.apply_defaults({name: expected}),
)
# Error case: non-dictionary input
with testcase.assertRaisesRegex(
CommandExecutionError, "'foo' is not a dictionary"
):
salt.utils.docker.translate_input(self.translator, **{item: "foo"})
if alias is not None:
# Test collision
test_kwargs = {name: "foo", alias: "bar"}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults({name: test_kwargs[name]}),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
return self.func(testcase, *args, **kwargs)
class assert_cmd(Assert):
"""
Test for a string, or a comma-separated or Python list of strings. This is
different from a stringlist in that we do not do any splitting. This
decorator is used both by the "command" and "entrypoint" arguments.
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
alias = self.translator.ALIASES_REVMAP.get(name)
for item in (name, alias):
if item is None:
continue
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: "foo bar"}),
testcase.apply_defaults({name: "foo bar"}),
)
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: ["foo", "bar"]}
),
testcase.apply_defaults({name: ["foo", "bar"]}),
)
# Test coercing to string
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: 123}),
testcase.apply_defaults({name: "123"}),
)
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: ["one", 2]}
),
testcase.apply_defaults({name: ["one", "2"]}),
)
if alias is not None:
# Test collision
test_kwargs = {name: "foo", alias: "bar"}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults({name: test_kwargs[name]}),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
return self.func(testcase, *args, **kwargs)
class assert_key_colon_value(Assert):
"""
Test a key/value pair with parameters passed as key:value pairs
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
self.test_key_value(testcase, name, ":")
return self.func(testcase, *args, **kwargs)
class assert_key_equals_value(Assert):
"""
Test a key/value pair with parameters passed as key=value pairs
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
self.test_key_value(testcase, name, "=")
if name == "labels":
self.test_stringlist(testcase, name)
return self.func(testcase, *args, **kwargs)
class assert_labels(Assert):
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
alias = self.translator.ALIASES_REVMAP.get(name)
labels = ["foo", "bar=baz", {"hello": "world"}]
expected = {"foo": "", "bar": "baz", "hello": "world"}
for item in (name, alias):
if item is None:
continue
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: labels}),
testcase.apply_defaults({name: expected}),
)
# Error case: Passed a mutli-element dict in dictlist
bad_labels = copy.deepcopy(labels)
bad_labels[-1]["bad"] = "input"
with testcase.assertRaisesRegex(
CommandExecutionError, r"Invalid label\(s\)"
):
salt.utils.docker.translate_input(self.translator, **{item: bad_labels})
return self.func(testcase, *args, **kwargs)
class assert_device_rates(Assert):
"""
Tests for device_{read,write}_{bps,iops}. The bps values have a "Rate"
value expressed in bytes/kb/mb/gb, while the iops values have a "Rate"
expressed as a simple integer.
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
alias = self.translator.ALIASES_REVMAP.get(name)
for item in (name, alias):
if item is None:
continue
# Error case: Not an absolute path
path = os.path.join("foo", "bar", "baz")
with testcase.assertRaisesRegex(
CommandExecutionError,
"Path '{0}' is not absolute".format(path.replace("\\", "\\\\")),
):
salt.utils.docker.translate_input(
self.translator, **{item: "{0}:1048576".format(path)}
)
if name.endswith("_bps"):
# Both integer bytes and a string providing a shorthand for kb,
# mb, or gb can be used, so we need to test for both.
expected = ({}, [])
vals = "/dev/sda:1048576,/dev/sdb:1048576"
for val in (vals, vals.split(",")):
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: val}
),
testcase.apply_defaults(
{
name: [
{"Path": "/dev/sda", "Rate": 1048576},
{"Path": "/dev/sdb", "Rate": 1048576},
]
}
),
)
vals = "/dev/sda:1mb,/dev/sdb:5mb"
for val in (vals, vals.split(",")):
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: val}
),
testcase.apply_defaults(
{
name: [
{"Path": "/dev/sda", "Rate": "1mb"},
{"Path": "/dev/sdb", "Rate": "5mb"},
]
}
),
)
if alias is not None:
# Test collision
test_kwargs = {
name: "/dev/sda:1048576,/dev/sdb:1048576",
alias: "/dev/sda:1mb,/dev/sdb:5mb",
}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults(
{
name: [
{"Path": "/dev/sda", "Rate": 1048576},
{"Path": "/dev/sdb", "Rate": 1048576},
]
}
),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
else:
# The "Rate" value must be an integer
vals = "/dev/sda:1000,/dev/sdb:500"
for val in (vals, vals.split(",")):
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: val}
),
testcase.apply_defaults(
{
name: [
{"Path": "/dev/sda", "Rate": 1000},
{"Path": "/dev/sdb", "Rate": 500},
]
}
),
)
# Test non-integer input
expected = (
{},
{item: "Rate '5mb' for path '/dev/sdb' is non-numeric"},
[],
)
vals = "/dev/sda:1000,/dev/sdb:5mb"
for val in (vals, vals.split(",")):
with testcase.assertRaisesRegex(
CommandExecutionError,
"Rate '5mb' for path '/dev/sdb' is non-numeric",
):
salt.utils.docker.translate_input(
self.translator, **{item: val}
)
if alias is not None:
# Test collision
test_kwargs = {
name: "/dev/sda:1000,/dev/sdb:500",
alias: "/dev/sda:888,/dev/sdb:999",
}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults(
{
name: [
{"Path": "/dev/sda", "Rate": 1000},
{"Path": "/dev/sdb", "Rate": 500},
]
}
),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
return self.func(testcase, *args, **kwargs)
class assert_subnet(Assert):
"""
Test an IPv4 or IPv6 subnet
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
alias = self.translator.ALIASES_REVMAP.get(name)
for item in (name, alias):
if item is None:
continue
for val in ("127.0.0.1/32", "::1/128"):
log.debug("Verifying '%s' is a valid subnet", val)
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=True, **{item: val}
),
testcase.apply_defaults({name: val}),
)
# Error case: invalid subnet caught by validation
for val in (
"127.0.0.1",
"999.999.999.999/24",
"10.0.0.0/33",
"::1",
"feaz::1/128",
"::1/129",
):
log.debug("Verifying '%s' is not a valid subnet", val)
with testcase.assertRaisesRegex(
CommandExecutionError, "'{0}' is not a valid subnet".format(val)
):
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=True, **{item: val}
)
# This is not valid input but it will test whether or not subnet
# validation happened
val = "foo"
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=False, **{item: val}
),
testcase.apply_defaults({name: val}),
)
if alias is not None:
# Test collision
test_kwargs = {name: "10.0.0.0/24", alias: "192.168.50.128/25"}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults({name: test_kwargs[name]}),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
return self.func(testcase, *args, **kwargs)
class TranslateBase(TestCase):
maxDiff = None
translator = None # Must be overridden in the subclass
def apply_defaults(self, ret, skip_translate=None):
if skip_translate is not True:
defaults = getattr(self.translator, "DEFAULTS", {})
for key, val in six.iteritems(defaults):
if key not in ret:
ret[key] = val
return ret
@staticmethod
def normalize_ports(ret):
"""
When we translate exposed ports, we can end up with a mixture of ints
(representing TCP ports) and tuples (representing UDP ports). Python 2
will sort an iterable containing these mixed types, but Python 3 will
not. This helper is used to munge the ports in the return data so that
the resulting list is sorted in a way that can reliably be compared to
the expected results in the test.
This helper should only be needed for port_bindings and ports.
"""
if "ports" in ret[0]:
tcp_ports = []
udp_ports = []
for item in ret[0]["ports"]:
if isinstance(item, six.integer_types):
tcp_ports.append(item)
else:
udp_ports.append(item)
ret[0]["ports"] = sorted(tcp_ports) + sorted(udp_ports)
return ret
def tearDown(self):
"""
Test skip_translate kwarg
"""
name = self.id().split(".")[-1][5:]
# The below is not valid input for the Docker API, but these
# assertions confirm that we successfully skipped translation.
for val in (True, name, [name]):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, skip_translate=val, **{name: "foo"}
),
self.apply_defaults({name: "foo"}, skip_translate=val),
)
class TranslateContainerInputTestCase(TranslateBase):
"""
Tests for salt.utils.docker.translate_input(), invoked using
salt.utils.docker.translate.container as the translator module.
"""
translator = salt.utils.docker.translate.container
@staticmethod
def normalize_ports(ret):
"""
When we translate exposed ports, we can end up with a mixture of ints
(representing TCP ports) and tuples (representing UDP ports). Python 2
will sort an iterable containing these mixed types, but Python 3 will
not. This helper is used to munge the ports in the return data so that
the resulting list is sorted in a way that can reliably be compared to
the expected results in the test.
This helper should only be needed for port_bindings and ports.
"""
if "ports" in ret:
tcp_ports = []
udp_ports = []
for item in ret["ports"]:
if isinstance(item, six.integer_types):
tcp_ports.append(item)
else:
udp_ports.append(item)
ret["ports"] = sorted(tcp_ports) + sorted(udp_ports)
return ret
@assert_bool(salt.utils.docker.translate.container)
def test_auto_remove(self):
"""
Should be a bool or converted to one
"""
def test_binds(self):
"""
Test the "binds" kwarg. Any volumes not defined in the "volumes" kwarg
should be added to the results.
"""
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, binds="/srv/www:/var/www:ro", volumes="/testing"
),
{"binds": ["/srv/www:/var/www:ro"], "volumes": ["/testing", "/var/www"]},
)
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, binds=["/srv/www:/var/www:ro"], volumes="/testing"
),
{"binds": ["/srv/www:/var/www:ro"], "volumes": ["/testing", "/var/www"]},
)
self.assertEqual(
salt.utils.docker.translate_input(
self.translator,
binds={"/srv/www": {"bind": "/var/www", "mode": "ro"}},
volumes="/testing",
),
{
"binds": {"/srv/www": {"bind": "/var/www", "mode": "ro"}},
"volumes": ["/testing", "/var/www"],
},
)
@assert_int(salt.utils.docker.translate.container)
def test_blkio_weight(self):
"""
Should be an int or converted to one
"""
def test_blkio_weight_device(self):
"""
Should translate a list of PATH:WEIGHT pairs to a list of dictionaries
with the following format: {'Path': PATH, 'Weight': WEIGHT}
"""
for val in ("/dev/sda:100,/dev/sdb:200", ["/dev/sda:100", "/dev/sdb:200"]):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, blkio_weight_device="/dev/sda:100,/dev/sdb:200"
),
{
"blkio_weight_device": [
{"Path": "/dev/sda", "Weight": 100},
{"Path": "/dev/sdb", "Weight": 200},
]
},
)
# Error cases
with self.assertRaisesRegex(
CommandExecutionError, r"'foo' contains 1 value\(s\) \(expected 2\)"
):
salt.utils.docker.translate_input(
self.translator, blkio_weight_device="foo"
)
with self.assertRaisesRegex(
CommandExecutionError, r"'foo:bar:baz' contains 3 value\(s\) \(expected 2\)"
):
salt.utils.docker.translate_input(
self.translator, blkio_weight_device="foo:bar:baz"
)
with self.assertRaisesRegex(
CommandExecutionError, r"Weight 'foo' for path '/dev/sdb' is not an integer"
):
salt.utils.docker.translate_input(
self.translator, blkio_weight_device=["/dev/sda:100", "/dev/sdb:foo"]
)
@assert_stringlist(salt.utils.docker.translate.container)
def test_cap_add(self):
"""
Should be a list of strings or converted to one
"""
@assert_stringlist(salt.utils.docker.translate.container)
def test_cap_drop(self):
"""
Should be a list of strings or converted to one
"""
@assert_cmd(salt.utils.docker.translate.container)
def test_command(self):
"""
Can either be a string or a comma-separated or Python list of strings.
"""
@assert_string(salt.utils.docker.translate.container)
def test_cpuset_cpus(self):
"""
Should be a string or converted to one
"""
@assert_string(salt.utils.docker.translate.container)
def test_cpuset_mems(self):
"""
Should be a string or converted to one
"""
@assert_int(salt.utils.docker.translate.container)
def test_cpu_group(self):
"""
Should be an int or converted to one
"""
@assert_int(salt.utils.docker.translate.container)
def test_cpu_period(self):
"""
Should be an int or converted to one
"""
@assert_int(salt.utils.docker.translate.container)
def test_cpu_shares(self):
"""
Should be an int or converted to one
"""
@assert_bool(salt.utils.docker.translate.container)
def test_detach(self):
"""
Should be a bool or converted to one
"""
@assert_device_rates(salt.utils.docker.translate.container)
def test_device_read_bps(self):
"""
CLI input is a list of PATH:RATE pairs, but the API expects a list of
dictionaries in the format [{'Path': path, 'Rate': rate}]
"""
@assert_device_rates(salt.utils.docker.translate.container)
def test_device_read_iops(self):
"""
CLI input is a list of PATH:RATE pairs, but the API expects a list of
dictionaries in the format [{'Path': path, 'Rate': rate}]
"""
@assert_device_rates(salt.utils.docker.translate.container)
def test_device_write_bps(self):
"""
CLI input is a list of PATH:RATE pairs, but the API expects a list of
dictionaries in the format [{'Path': path, 'Rate': rate}]
"""
@assert_device_rates(salt.utils.docker.translate.container)
def test_device_write_iops(self):
"""
CLI input is a list of PATH:RATE pairs, but the API expects a list of
dictionaries in the format [{'Path': path, 'Rate': rate}]
"""
@assert_stringlist(salt.utils.docker.translate.container)
def test_devices(self):
"""
Should be a list of strings or converted to one
"""
@assert_stringlist(salt.utils.docker.translate.container)
def test_dns_opt(self):
"""
Should be a list of strings or converted to one
"""
@assert_stringlist(salt.utils.docker.translate.container)
def test_dns_search(self):
"""
Should be a list of strings or converted to one
"""
def test_dns(self):
"""
While this is a stringlist, it also supports IP address validation, so
it can't use the test_stringlist decorator because we need to test both
with and without validation, and it isn't necessary to make all other
stringlist tests also do that same kind of testing.
"""
for val in ("8.8.8.8,8.8.4.4", ["8.8.8.8", "8.8.4.4"]):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, dns=val, validate_ip_addrs=True,
),
{"dns": ["8.8.8.8", "8.8.4.4"]},
)
# Error case: invalid IP address caught by validation
for val in ("8.8.8.888,8.8.4.4", ["8.8.8.888", "8.8.4.4"]):
with self.assertRaisesRegex(
CommandExecutionError, r"'8.8.8.888' is not a valid IP address"
):
salt.utils.docker.translate_input(
self.translator, dns=val, validate_ip_addrs=True,
)
# This is not valid input but it will test whether or not IP address
# validation happened.
for val in ("foo,bar", ["foo", "bar"]):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, dns=val, validate_ip_addrs=False,
),
{"dns": ["foo", "bar"]},
)
@assert_string(salt.utils.docker.translate.container)
def test_domainname(self):
"""
Should be a list of strings or converted to one
"""
@assert_cmd(salt.utils.docker.translate.container)
def test_entrypoint(self):
"""
Can either be a string or a comma-separated or Python list of strings.
"""
@assert_key_equals_value(salt.utils.docker.translate.container)
def test_environment(self):
"""
Can be passed in several formats but must end up as a dictionary
mapping keys to values
"""
def test_extra_hosts(self):
"""
Can be passed as a list of key:value pairs but can't be simply tested
using @assert_key_colon_value since we need to test both with and without
IP address validation.
"""
for val in ("web1:10.9.8.7,web2:10.9.8.8", ["web1:10.9.8.7", "web2:10.9.8.8"]):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, extra_hosts=val, validate_ip_addrs=True,
),
{"extra_hosts": {"web1": "10.9.8.7", "web2": "10.9.8.8"}},
)
# Error case: invalid IP address caught by validation
for val in (
"web1:10.9.8.299,web2:10.9.8.8",
["web1:10.9.8.299", "web2:10.9.8.8"],
):
with self.assertRaisesRegex(
CommandExecutionError, r"'10.9.8.299' is not a valid IP address"
):
salt.utils.docker.translate_input(
self.translator, extra_hosts=val, validate_ip_addrs=True,
)
# This is not valid input but it will test whether or not IP address
# validation happened.
for val in ("foo:bar,baz:qux", ["foo:bar", "baz:qux"]):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, extra_hosts=val, validate_ip_addrs=False,
),
{"extra_hosts": {"foo": "bar", "baz": "qux"}},
)
@assert_stringlist(salt.utils.docker.translate.container)
def test_group_add(self):
"""
Should be a list of strings or converted to one
"""
@assert_string(salt.utils.docker.translate.container)
def test_hostname(self):
"""
Should be a string or converted to one
"""
@assert_string(salt.utils.docker.translate.container)
def test_ipc_mode(self):
"""
Should be a string or converted to one
"""
@assert_string(salt.utils.docker.translate.container)
def test_isolation(self):
"""
Should be a string or converted to one
"""
@assert_labels(salt.utils.docker.translate.container)
def test_labels(self):
"""
Can be passed as a list of key=value pairs or a dictionary, and must
ultimately end up as a dictionary.
"""
@assert_key_colon_value(salt.utils.docker.translate.container)
def test_links(self):
"""
Can be passed as a list of key:value pairs or a dictionary, and must
ultimately end up as a dictionary.
"""
def test_log_config(self):
"""
This is a mixture of log_driver and log_opt, which get combined into a
dictionary.
log_driver is a simple string, but log_opt can be passed in several
ways, so we need to test them all.
"""
expected = (
{"log_config": {"Type": "foo", "Config": {"foo": "bar", "baz": "qux"}}},
{},
[],
)
for val in (
"foo=bar,baz=qux",
["foo=bar", "baz=qux"],
[{"foo": "bar"}, {"baz": "qux"}],
{"foo": "bar", "baz": "qux"},
):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, log_driver="foo", log_opt="foo=bar,baz=qux"
),
{"log_config": {"Type": "foo", "Config": {"foo": "bar", "baz": "qux"}}},
)
# Ensure passing either `log_driver` or `log_opt` alone works
self.assertEqual(
salt.utils.docker.translate_input(self.translator, log_driver="foo"),
{"log_config": {"Type": "foo", "Config": {}}},
)
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, log_opt={"foo": "bar", "baz": "qux"}
),
{"log_config": {"Type": "none", "Config": {"foo": "bar", "baz": "qux"}}},
)
@assert_key_equals_value(salt.utils.docker.translate.container)
def test_lxc_conf(self):
"""
Can be passed as a list of key=value pairs or a dictionary, and must
ultimately end up as a dictionary.
"""
@assert_string(salt.utils.docker.translate.container)
def test_mac_address(self):
"""
Should be a string or converted to one
"""
@assert_int_or_string(salt.utils.docker.translate.container)
def test_mem_limit(self):
"""
Should be a string or converted to one
"""
@assert_int(salt.utils.docker.translate.container)
def test_mem_swappiness(self):
"""
Should be an int or converted to one
"""
@assert_int_or_string(salt.utils.docker.translate.container)
def test_memswap_limit(self):
"""
Should be a string or converted to one
"""
@assert_string(salt.utils.docker.translate.container)
def test_name(self):
"""
Should be a string or converted to one
"""
@assert_bool(salt.utils.docker.translate.container)
def test_network_disabled(self):
"""
Should be a bool or converted to one
"""
@assert_string(salt.utils.docker.translate.container)
def test_network_mode(self):
"""
Should be a string or converted to one
"""
@assert_bool(salt.utils.docker.translate.container)
def test_oom_kill_disable(self):
"""
Should be a bool or converted to one
"""
@assert_int(salt.utils.docker.translate.container)
def test_oom_score_adj(self):
"""
Should be an int or converted to one
"""
@assert_string(salt.utils.docker.translate.container)
def test_pid_mode(self):
"""
Should be a string or converted to one
"""
@assert_int(salt.utils.docker.translate.container)
def test_pids_limit(self):
"""
Should be an int or converted to one
"""
def test_port_bindings(self):
"""
This has several potential formats and can include port ranges. It
needs its own test.
"""
# ip:hostPort:containerPort - Bind a specific IP and port on the host
# to a specific port within the container.
bindings = (
"10.1.2.3:8080:80,10.1.2.3:8888:80,10.4.5.6:3333:3333,"
"10.7.8.9:14505-14506:4505-4506,10.1.2.3:8080:81/udp,"
"10.1.2.3:8888:81/udp,10.4.5.6:3334:3334/udp,"
"10.7.8.9:15505-15506:5505-5506/udp"
)
for val in (bindings, bindings.split(",")):
self.assertEqual(
self.normalize_ports(
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
),
{
"port_bindings": {
80: [("10.1.2.3", 8080), ("10.1.2.3", 8888)],
3333: ("10.4.5.6", 3333),
4505: ("10.7.8.9", 14505),
4506: ("10.7.8.9", 14506),
"81/udp": [("10.1.2.3", 8080), ("10.1.2.3", 8888)],
"3334/udp": ("10.4.5.6", 3334),
"5505/udp": ("10.7.8.9", 15505),
"5506/udp": ("10.7.8.9", 15506),
},
"ports": [
80,
3333,
4505,
4506,
(81, "udp"),
(3334, "udp"),
(5505, "udp"),
(5506, "udp"),
],
},
)
# ip::containerPort - Bind a specific IP and an ephemeral port to a
# specific port within the container.
bindings = (
"10.1.2.3::80,10.1.2.3::80,10.4.5.6::3333,10.7.8.9::4505-4506,"
"10.1.2.3::81/udp,10.1.2.3::81/udp,10.4.5.6::3334/udp,"
"10.7.8.9::5505-5506/udp"
)
for val in (bindings, bindings.split(",")):
self.assertEqual(
self.normalize_ports(
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
),
{
"port_bindings": {
80: [("10.1.2.3",), ("10.1.2.3",)],
3333: ("10.4.5.6",),
4505: ("10.7.8.9",),
4506: ("10.7.8.9",),
"81/udp": [("10.1.2.3",), ("10.1.2.3",)],
"3334/udp": ("10.4.5.6",),
"5505/udp": ("10.7.8.9",),
"5506/udp": ("10.7.8.9",),
},
"ports": [
80,
3333,
4505,
4506,
(81, "udp"),
(3334, "udp"),
(5505, "udp"),
(5506, "udp"),
],
},
)
# hostPort:containerPort - Bind a specific port on all of the host's
# interfaces to a specific port within the container.
bindings = (
"8080:80,8888:80,3333:3333,14505-14506:4505-4506,8080:81/udp,"
"8888:81/udp,3334:3334/udp,15505-15506:5505-5506/udp"
)
for val in (bindings, bindings.split(",")):
self.assertEqual(
self.normalize_ports(
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
),
{
"port_bindings": {
80: [8080, 8888],
3333: 3333,
4505: 14505,
4506: 14506,
"81/udp": [8080, 8888],
"3334/udp": 3334,
"5505/udp": 15505,
"5506/udp": 15506,
},
"ports": [
80,
3333,
4505,
4506,
(81, "udp"),
(3334, "udp"),
(5505, "udp"),
(5506, "udp"),
],
},
)
# containerPort - Bind an ephemeral port on all of the host's
# interfaces to a specific port within the container.
bindings = "80,3333,4505-4506,81/udp,3334/udp,5505-5506/udp"
for val in (bindings, bindings.split(",")):
self.assertEqual(
self.normalize_ports(
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
),
{
"port_bindings": {
80: None,
3333: None,
4505: None,
4506: None,
"81/udp": None,
"3334/udp": None,
"5505/udp": None,
"5506/udp": None,
},
"ports": [
80,
3333,
4505,
4506,
(81, "udp"),
(3334, "udp"),
(5505, "udp"),
(5506, "udp"),
],
},
)
# Test a mixture of different types of input
bindings = (
"10.1.2.3:8080:80,10.4.5.6::3333,14505-14506:4505-4506,"
"9999-10001,10.1.2.3:8080:81/udp,10.4.5.6::3334/udp,"
"15505-15506:5505-5506/udp,19999-20001/udp"
)
for val in (bindings, bindings.split(",")):
self.assertEqual(
self.normalize_ports(
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
),
{
"port_bindings": {
80: ("10.1.2.3", 8080),
3333: ("10.4.5.6",),
4505: 14505,
4506: 14506,
9999: None,
10000: None,
10001: None,
"81/udp": ("10.1.2.3", 8080),
"3334/udp": ("10.4.5.6",),
"5505/udp": 15505,
"5506/udp": 15506,
"19999/udp": None,
"20000/udp": None,
"20001/udp": None,
},
"ports": [
80,
3333,
4505,
4506,
9999,
10000,
10001,
(81, "udp"),
(3334, "udp"),
(5505, "udp"),
(5506, "udp"),
(19999, "udp"),
(20000, "udp"),
(20001, "udp"),
],
},
)
# Error case: too many items (max 3)
with self.assertRaisesRegex(
CommandExecutionError,
r"'10.1.2.3:8080:80:123' is an invalid port binding "
r"definition \(at most 3 components are allowed, found 4\)",
):
salt.utils.docker.translate_input(
self.translator, port_bindings="10.1.2.3:8080:80:123"
)
# Error case: port range start is greater than end
for val in (
"10.1.2.3:5555-5554:1111-1112",
"10.1.2.3:1111-1112:5555-5554",
"10.1.2.3::5555-5554",
"5555-5554:1111-1112",
"1111-1112:5555-5554",
"5555-5554",
):
with self.assertRaisesRegex(
CommandExecutionError,
r"Start of port range \(5555\) cannot be greater than end "
r"of port range \(5554\)",
):
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
# Error case: non-numeric port range
for val in (
"10.1.2.3:foo:1111-1112",
"10.1.2.3:1111-1112:foo",
"10.1.2.3::foo",
"foo:1111-1112",
"1111-1112:foo",
"foo",
):
with self.assertRaisesRegex(
CommandExecutionError, "'foo' is non-numeric or an invalid port range"
):
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
# Error case: misatched port range
for val in ("10.1.2.3:1111-1113:1111-1112", "1111-1113:1111-1112"):
with self.assertRaisesRegex(
CommandExecutionError,
r"Host port range \(1111-1113\) does not have the same "
r"number of ports as the container port range \(1111-1112\)",
):
salt.utils.docker.translate_input(self.translator, port_bindings=val)
for val in ("10.1.2.3:1111-1112:1111-1113", "1111-1112:1111-1113"):
with self.assertRaisesRegex(
CommandExecutionError,
r"Host port range \(1111-1112\) does not have the same "
r"number of ports as the container port range \(1111-1113\)",
):
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
# Error case: empty host port or container port
with self.assertRaisesRegex(
CommandExecutionError, "Empty host port in port binding definition ':1111'"
):
salt.utils.docker.translate_input(self.translator, port_bindings=":1111")
with self.assertRaisesRegex(
CommandExecutionError,
"Empty container port in port binding definition '1111:'",
):
salt.utils.docker.translate_input(self.translator, port_bindings="1111:")
with self.assertRaisesRegex(
CommandExecutionError, "Empty port binding definition found"
):
salt.utils.docker.translate_input(self.translator, port_bindings="")
def test_ports(self):
"""
Ports can be passed as a comma-separated or Python list of port
numbers, with '/tcp' being optional for TCP ports. They must ultimately
be a list of port definitions, in which an integer denotes a TCP port,
and a tuple in the format (port_num, 'udp') denotes a UDP port. Also,
the port numbers must end up as integers. None of the decorators will
suffice so this one must be tested specially.
"""
for val in (
"1111,2222/tcp,3333/udp,4505-4506",
[1111, "2222/tcp", "3333/udp", "4505-4506"],
["1111", "2222/tcp", "3333/udp", "4505-4506"],
):
self.assertEqual(
self.normalize_ports(
salt.utils.docker.translate_input(self.translator, ports=val,)
),
{"ports": [1111, 2222, 4505, 4506, (3333, "udp")]},
)
# Error case: non-integer and non/string value
for val in (1.0, [1.0]):
with self.assertRaisesRegex(
CommandExecutionError, "'1.0' is not a valid port definition"
):
salt.utils.docker.translate_input(
self.translator, ports=val,
)
# Error case: port range start is greater than end
with self.assertRaisesRegex(
CommandExecutionError,
r"Start of port range \(5555\) cannot be greater than end of "
r"port range \(5554\)",
):
salt.utils.docker.translate_input(
self.translator, ports="5555-5554",
)
@assert_bool(salt.utils.docker.translate.container)
def test_privileged(self):
"""
Should be a bool or converted to one
"""
@assert_bool(salt.utils.docker.translate.container)
def test_publish_all_ports(self):
"""
Should be a bool or converted to one
"""
@assert_bool(salt.utils.docker.translate.container)
def test_read_only(self):
"""
Should be a bool or converted to one
"""
def test_restart_policy(self):
"""
Input is in the format "name[:retry_count]", but the API wants it
in the format {'Name': name, 'MaximumRetryCount': retry_count}
"""
name = "restart_policy"
alias = "restart"
for item in (name, alias):
# Test with retry count
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: "on-failure:5"}
),
{name: {"Name": "on-failure", "MaximumRetryCount": 5}},
)
# Test without retry count
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: "on-failure"}
),
{name: {"Name": "on-failure", "MaximumRetryCount": 0}},
)
# Error case: more than one policy passed
with self.assertRaisesRegex(
CommandExecutionError, "Only one policy is permitted"
):
salt.utils.docker.translate_input(
self.translator, **{item: "on-failure,always"}
)
# Test collision
test_kwargs = {name: "on-failure:5", alias: "always"}
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
{name: {"Name": "on-failure", "MaximumRetryCount": 5}},
)
with self.assertRaisesRegex(
CommandExecutionError, "'restart' is an alias for 'restart_policy'"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
@assert_stringlist(salt.utils.docker.translate.container)
def test_security_opt(self):
"""
Should be a list of strings or converted to one
"""
@assert_int_or_string(salt.utils.docker.translate.container)
def test_shm_size(self):
"""
Should be a string or converted to one
"""
@assert_bool(salt.utils.docker.translate.container)
def test_stdin_open(self):
"""
Should be a bool or converted to one
"""
@assert_string(salt.utils.docker.translate.container)
def test_stop_signal(self):
"""
Should be a string or converted to one
"""
@assert_int(salt.utils.docker.translate.container)
def test_stop_timeout(self):
"""
Should be an int or converted to one
"""
@assert_key_equals_value(salt.utils.docker.translate.container)
def test_storage_opt(self):
"""
Can be passed in several formats but must end up as a dictionary
mapping keys to values
"""
@assert_key_equals_value(salt.utils.docker.translate.container)
def test_sysctls(self):
"""
Can be passed in several formats but must end up as a dictionary
mapping keys to values
"""
@assert_dict(salt.utils.docker.translate.container)
def test_tmpfs(self):
"""
Can be passed in several formats but must end up as a dictionary
mapping keys to values
"""
@assert_bool(salt.utils.docker.translate.container)
def test_tty(self):
"""
Should be a bool or converted to one
"""
def test_ulimits(self):
"""
Input is in the format "name=soft_limit[:hard_limit]", but the API
wants it in the format
{'Name': name, 'Soft': soft_limit, 'Hard': hard_limit}
"""
# Test with and without hard limit
ulimits = "nofile=1024:2048,nproc=50"
for val in (ulimits, ulimits.split(",")):
self.assertEqual(
salt.utils.docker.translate_input(self.translator, ulimits=val,),
{
"ulimits": [
{"Name": "nofile", "Soft": 1024, "Hard": 2048},
{"Name": "nproc", "Soft": 50, "Hard": 50},
]
},
)
# Error case: Invalid format
with self.assertRaisesRegex(
CommandExecutionError,
r"Ulimit definition 'nofile:1024:2048' is not in the format "
r"type=soft_limit\[:hard_limit\]",
):
salt.utils.docker.translate_input(
self.translator, ulimits="nofile:1024:2048"
)
# Error case: Invalid format
with self.assertRaisesRegex(
CommandExecutionError,
r"Limit 'nofile=foo:2048' contains non-numeric value\(s\)",
):
salt.utils.docker.translate_input(
self.translator, ulimits="nofile=foo:2048"
)
def test_user(self):
"""
Must be either username (string) or uid (int). An int passed as a
string (e.g. '0') should be converted to an int.
"""
# Username passed as string
self.assertEqual(
salt.utils.docker.translate_input(self.translator, user="foo"),
{"user": "foo"},
)
for val in (0, "0"):
self.assertEqual(
salt.utils.docker.translate_input(self.translator, user=val),
{"user": 0},
)
# Error case: non string/int passed
with self.assertRaisesRegex(
CommandExecutionError, "Value must be a username or uid"
):
salt.utils.docker.translate_input(self.translator, user=["foo"])
# Error case: negative int passed
with self.assertRaisesRegex(CommandExecutionError, "'-1' is an invalid uid"):
salt.utils.docker.translate_input(self.translator, user=-1)
@assert_string(salt.utils.docker.translate.container)
def test_userns_mode(self):
"""
Should be a bool or converted to one
"""
@assert_string(salt.utils.docker.translate.container)
def test_volume_driver(self):
"""
Should be a bool or converted to one
"""
@assert_stringlist(salt.utils.docker.translate.container)
def test_volumes(self):
"""
Should be a list of absolute paths
"""
# Error case: Not an absolute path
path = os.path.join("foo", "bar", "baz")
with self.assertRaisesRegex(
CommandExecutionError,
"'{0}' is not an absolute path".format(path.replace("\\", "\\\\")),
):
salt.utils.docker.translate_input(self.translator, volumes=path)
@assert_stringlist(salt.utils.docker.translate.container)
def test_volumes_from(self):
"""
Should be a list of strings or converted to one
"""
@assert_string(salt.utils.docker.translate.container)
def test_working_dir(self):
"""
Should be a single absolute path
"""
# Error case: Not an absolute path
path = os.path.join("foo", "bar", "baz")
with self.assertRaisesRegex(
CommandExecutionError,
"'{0}' is not an absolute path".format(path.replace("\\", "\\\\")),
):
salt.utils.docker.translate_input(self.translator, working_dir=path)
class TranslateNetworkInputTestCase(TranslateBase):
"""
Tests for salt.utils.docker.translate_input(), invoked using
salt.utils.docker.translate.network as the translator module.
"""
translator = salt.utils.docker.translate.network
ip_addrs = {
True: ("10.1.2.3", "::1"),
False: ("FOO", "0.9.800.1000", "feaz::1", "aj01::feac"),
}
@assert_string(salt.utils.docker.translate.network)
def test_driver(self):
"""
Should be a string or converted to one
"""
@assert_key_equals_value(salt.utils.docker.translate.network)
def test_options(self):
"""
Can be passed in several formats but must end up as a dictionary
mapping keys to values
"""
@assert_dict(salt.utils.docker.translate.network)
def test_ipam(self):
"""
Must be a dict
"""
@assert_bool(salt.utils.docker.translate.network)
def test_check_duplicate(self):
"""
Should be a bool or converted to one
"""
@assert_bool(salt.utils.docker.translate.network)
def test_internal(self):
"""
Should be a bool or converted to one
"""
@assert_labels(salt.utils.docker.translate.network)
def test_labels(self):
"""
Can be passed as a list of key=value pairs or a dictionary, and must
ultimately end up as a dictionary.
"""
@assert_bool(salt.utils.docker.translate.network)
def test_enable_ipv6(self):
"""
Should be a bool or converted to one
"""
@assert_bool(salt.utils.docker.translate.network)
def test_attachable(self):
"""
Should be a bool or converted to one
"""
@assert_bool(salt.utils.docker.translate.network)
def test_ingress(self):
"""
Should be a bool or converted to one
"""
@assert_string(salt.utils.docker.translate.network)
def test_ipam_driver(self):
"""
Should be a bool or converted to one
"""
@assert_key_equals_value(salt.utils.docker.translate.network)
def test_ipam_opts(self):
"""
Can be passed in several formats but must end up as a dictionary
mapping keys to values
"""
def ipam_pools(self):
"""
Must be a list of dictionaries (not a dictlist)
"""
good_pool = {
"subnet": "10.0.0.0/24",
"iprange": "10.0.0.128/25",
"gateway": "10.0.0.254",
"aux_addresses": {
"foo.bar.tld": "10.0.0.20",
"hello.world.tld": "10.0.0.21",
},
}
bad_pools = [
{
"subnet": "10.0.0.0/33",
"iprange": "10.0.0.128/25",
"gateway": "10.0.0.254",
"aux_addresses": {
"foo.bar.tld": "10.0.0.20",
"hello.world.tld": "10.0.0.21",
},
},
{
"subnet": "10.0.0.0/24",
"iprange": "foo/25",
"gateway": "10.0.0.254",
"aux_addresses": {
"foo.bar.tld": "10.0.0.20",
"hello.world.tld": "10.0.0.21",
},
},
{
"subnet": "10.0.0.0/24",
"iprange": "10.0.0.128/25",
"gateway": "10.0.0.256",
"aux_addresses": {
"foo.bar.tld": "10.0.0.20",
"hello.world.tld": "10.0.0.21",
},
},
{
"subnet": "10.0.0.0/24",
"iprange": "10.0.0.128/25",
"gateway": "10.0.0.254",
"aux_addresses": {
"foo.bar.tld": "10.0.0.20",
"hello.world.tld": "999.0.0.21",
},
},
]
self.assertEqual(
salt.utils.docker.translate_input(self.translator, ipam_pools=[good_pool],),
{"ipam_pools": [good_pool]},
)
for bad_pool in bad_pools:
with self.assertRaisesRegex(CommandExecutionError, "not a valid"):
salt.utils.docker.translate_input(
self.translator, ipam_pools=[good_pool, bad_pool]
)
@assert_subnet(salt.utils.docker.translate.network)
def test_subnet(self):
"""
Must be an IPv4 or IPv6 subnet
"""
@assert_subnet(salt.utils.docker.translate.network)
def test_iprange(self):
"""
Must be an IPv4 or IPv6 subnet
"""
def test_gateway(self):
"""
Must be an IPv4 or IPv6 address
"""
for val in self.ip_addrs[True]:
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=True, gateway=val,
),
self.apply_defaults({"gateway": val}),
)
for val in self.ip_addrs[False]:
with self.assertRaisesRegex(
CommandExecutionError, "'{0}' is not a valid IP address".format(val)
):
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=True, gateway=val,
)
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=False, gateway=val,
),
self.apply_defaults(
{
"gateway": val
if isinstance(val, six.string_types)
else six.text_type(val)
}
),
)
@assert_key_equals_value(salt.utils.docker.translate.network)
def test_aux_addresses(self):
"""
Must be a mapping of hostnames to IP addresses
"""
name = "aux_addresses"
alias = "aux_address"
for item in (name, alias):
for val in self.ip_addrs[True]:
addresses = {"foo.bar.tld": val}
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=True, **{item: addresses}
),
self.apply_defaults({name: addresses}),
)
for val in self.ip_addrs[False]:
addresses = {"foo.bar.tld": val}
with self.assertRaisesRegex(
CommandExecutionError, "'{0}' is not a valid IP address".format(val)
):
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=True, **{item: addresses}
)
self.assertEqual(
salt.utils.docker.translate_input(
self.translator,
validate_ip_addrs=False,
aux_addresses=addresses,
),
self.apply_defaults({name: addresses}),
)
class DockerTranslateHelperTestCase(TestCase):
"""
Tests for a couple helper functions in salt.utils.docker.translate
"""
def test_get_port_def(self):
"""
Test translation of port definition (1234, '1234/tcp', '1234/udp',
etc.) into the format which docker-py uses (integer for TCP ports,
'port_num/udp' for UDP ports).
"""
# Test TCP port (passed as int, no protocol passed)
self.assertEqual(translate_helpers.get_port_def(2222), 2222)
# Test TCP port (passed as str, no protocol passed)
self.assertEqual(translate_helpers.get_port_def("2222"), 2222)
# Test TCP port (passed as str, with protocol passed)
self.assertEqual(translate_helpers.get_port_def("2222", "tcp"), 2222)
# Test TCP port (proto passed in port_num, with passed proto ignored).
# This is a contrived example as we would never invoke the function in
# this way, but it tests that we are taking the port number from the
# port_num argument and ignoring the passed protocol.
self.assertEqual(translate_helpers.get_port_def("2222/tcp", "udp"), 2222)
# Test UDP port (passed as int)
self.assertEqual(translate_helpers.get_port_def(2222, "udp"), (2222, "udp"))
# Test UDP port (passed as string)
self.assertEqual(translate_helpers.get_port_def("2222", "udp"), (2222, "udp"))
# Test UDP port (proto passed in port_num
self.assertEqual(translate_helpers.get_port_def("2222/udp"), (2222, "udp"))
def test_get_port_range(self):
"""
Test extracting the start and end of a port range from a port range
expression (e.g. 4505-4506)
"""
# Passing a single int should return the start and end as the same value
self.assertEqual(translate_helpers.get_port_range(2222), (2222, 2222))
# Same as above but with port number passed as a string
self.assertEqual(translate_helpers.get_port_range("2222"), (2222, 2222))
# Passing a port range
self.assertEqual(translate_helpers.get_port_range("2222-2223"), (2222, 2223))
# Error case: port range start is greater than end
with self.assertRaisesRegex(
ValueError,
r"Start of port range \(2222\) cannot be greater than end of "
r"port range \(2221\)",
):
translate_helpers.get_port_range("2222-2221")
# Error case: non-numeric input
with self.assertRaisesRegex(
ValueError, "'2222-bar' is non-numeric or an invalid port range"
):
translate_helpers.get_port_range("2222-bar")
| 37.134587
| 88
| 0.514371
| 7,991
| 75,049
| 4.731698
| 0.073833
| 0.046177
| 0.075772
| 0.1206
| 0.801936
| 0.766735
| 0.736029
| 0.714977
| 0.69329
| 0.633387
| 0
| 0.046337
| 0.375128
| 75,049
| 2,020
| 89
| 37.15297
| 0.759937
| 0.170422
| 0
| 0.561352
| 0
| 0.007348
| 0.109755
| 0.020414
| 0
| 0
| 0
| 0
| 0.14989
| 1
| 0.077149
| false
| 0
| 0.010287
| 0
| 0.115356
| 0.000735
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a189a8ce0239f76496cb3c604a52bf52c941ff4e
| 515
|
py
|
Python
|
playing1.py
|
bert386/rpi-monitor-cam-led
|
d333a8313500be8150e59462df5482b307eb368d
|
[
"Apache-2.0"
] | null | null | null |
playing1.py
|
bert386/rpi-monitor-cam-led
|
d333a8313500be8150e59462df5482b307eb368d
|
[
"Apache-2.0"
] | null | null | null |
playing1.py
|
bert386/rpi-monitor-cam-led
|
d333a8313500be8150e59462df5482b307eb368d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Description:
Todo:
"""
import os
import sys
import logging
from collections import deque
from base_state import BaseState
class Playing1stState(BaseState):
""""""
def __init__(self, state_controller):
super().__init__(state_controller, self.in_state)
@BaseState.decorator_enter
def on_entered(self):
logging.warning("Track01 started ...")
def in_state(self):
pass
@BaseState.decorator_exit
def on_exited(self):
pass
| 16.612903
| 57
| 0.664078
| 59
| 515
| 5.508475
| 0.559322
| 0.092308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01
| 0.223301
| 515
| 31
| 58
| 16.612903
| 0.8025
| 0.081553
| 0
| 0.125
| 0
| 0
| 0.041667
| 0
| 0
| 0
| 0
| 0.032258
| 0
| 1
| 0.25
| false
| 0.125
| 0.3125
| 0
| 0.625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
a194ce5184afbac2e200ce258188a996d6313650
| 113
|
py
|
Python
|
api/weibo/api/api.py
|
Eurkon/api
|
a51eae901e003ac6b94c04d12f1afeec00108256
|
[
"MIT"
] | 5
|
2021-06-15T05:33:01.000Z
|
2022-03-14T01:17:38.000Z
|
api/weibo/api/api.py
|
Eurkon/api
|
a51eae901e003ac6b94c04d12f1afeec00108256
|
[
"MIT"
] | 1
|
2021-06-03T09:22:50.000Z
|
2021-06-03T09:22:50.000Z
|
api/weibo/api/api.py
|
Eurkon/api
|
a51eae901e003ac6b94c04d12f1afeec00108256
|
[
"MIT"
] | 1
|
2021-07-25T15:58:40.000Z
|
2021-07-25T15:58:40.000Z
|
# -*- coding: utf-8 -*-
# @Author : Eurkon
# @Date : 2021/6/9 17:13
from api.weibo.api.top import weibo_top
| 22.6
| 39
| 0.610619
| 19
| 113
| 3.578947
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122222
| 0.20354
| 113
| 5
| 39
| 22.6
| 0.633333
| 0.548673
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a1acd3aad52a9f207d22596dfa16d615ad5b5b36
| 6,253
|
py
|
Python
|
agents/hub_policy.py
|
floriandonhauser/TeBaG-RL
|
0110087c97e4d67f739961e7320945da4b3d9592
|
[
"MIT"
] | null | null | null |
agents/hub_policy.py
|
floriandonhauser/TeBaG-RL
|
0110087c97e4d67f739961e7320945da4b3d9592
|
[
"MIT"
] | null | null | null |
agents/hub_policy.py
|
floriandonhauser/TeBaG-RL
|
0110087c97e4d67f739961e7320945da4b3d9592
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import tensorflow_hub as hub
from tf_agents.networks import network
# Bert needs this (I think) TODO: Check?
import tensorflow_text as text
embedding = "https://tfhub.dev/google/nnlm-en-dim128-with-normalization/2"
tfhub_handle_encoder = (
"https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/1"
)
tfhub_handle_preprocess = "https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
class HubPolicyFC(network.Network):
"""Policy for DQN agent utilizing pre-trained NNLM embedding into FC layers."""
def __init__(
self, input_tensor_spec, action_spec, num_verb, num_obj, name="ActorNetwork"
):
super().__init__()
num_actions = action_spec.maximum - action_spec.minimum + 1
assert num_actions == num_verb * num_obj
self.num_verb = num_verb
self.num_obj = num_obj
self.hub_layer = hub.KerasLayer(
embedding,
input_shape=[],
dtype=tf.string,
trainable=True
)
self.fc1 = tf.keras.layers.Dense(128, activation="relu")
self.fc2 = tf.keras.layers.Dense(64, activation="relu")
self.bn1 = tf.keras.layers.BatchNormalization()
self.bn2 = tf.keras.layers.BatchNormalization()
self.do1 = tf.keras.layers.Dropout(0.1)
self.do2 = tf.keras.layers.Dropout(0.1)
self.verb_layer = tf.keras.layers.Dense(num_verb, activation=None)
self.obj_layer = tf.keras.layers.Dense(num_obj, activation=None)
self.number_of_strings = input_tensor_spec.shape[0]
def call(self, observation, network_state=(), training=False):
"""A wrapper around `Network.call`.
Args:
inputs: The input to `self.call`, matching `self.input_tensor_spec`
network_state: A state to pass to the network used by the RNN layer
training: Optional argument to set to training mode
Returns:
A tuple `(outputs, new_network_state)`.
"""
if network_state is not None and len(network_state) == 0:
network_state = None
flattened_observation = tf.reshape(observation, (-1))
embedded_observations = self.hub_layer(flattened_observation, training=training)
embedded_observations = tf.reshape(
embedded_observations, (observation.shape[0], observation.shape[1], 128)
)
out = self.bn1(embedded_observations, training=training)
out = self.fc1(out, training=training)
self.do1(out, training=training)
out = self.bn2(out, training=training)
out = self.fc2(out, training=training)
self.do2(out, training=training)
verb_q_value = self.verb_layer(out, training=training)
obj_q_value = self.obj_layer(out, training=training)
# q_value_multiplied = tf.matmul(verb_q_value, obj_q_value, transpose_a=True)
# q_values = tf.reshape(q_value_multiplied, (observation.shape[0], -1))
verb_q_value = tf.reshape(verb_q_value, (observation.shape[0], observation.shape[1], verb_q_value.shape[2], 1))
obj_q_value = tf.reshape(obj_q_value, (observation.shape[0], observation.shape[1], 1, obj_q_value.shape[2]))
q_values_added = tf.add(verb_q_value, obj_q_value)
q_values_added = tf.math.reduce_sum(q_values_added, axis=1)
q_values = tf.reshape(q_values_added, (observation.shape[0], -1))
return q_values, ()
class HubPolicyBert(network.Network):
"""Policy for DQN agent utilizing pre-trained smallBert into FC layers. """
def __init__(
self, input_tensor_spec, action_spec, num_verb, num_obj, name="ActorNetwork"
):
super().__init__()
num_actions = action_spec.maximum - action_spec.minimum + 1
assert num_actions == num_verb * num_obj
self.num_verb = num_verb
self.num_obj = num_obj
self.bert_preprocess_model = hub.KerasLayer(
tfhub_handle_preprocess,
input_shape=[],
dtype=tf.string,
)
self.bert_model = hub.KerasLayer(tfhub_handle_encoder, trainable=True)
self.fc1 = tf.keras.layers.Dense(128, activation="relu")
self.do1 = tf.keras.layers.Dropout(0.1)
self.verb_layer = tf.keras.layers.Dense(num_verb, activation=None)
self.obj_layer = tf.keras.layers.Dense(num_obj, activation=None)
self.verbobj_layer = tf.keras.layers.Dense(num_actions, activation=None)
self.number_of_strings = input_tensor_spec.shape[0]
def call(self, observation, network_state=(), training=False):
"""A wrapper around `Network.call`.
Args:
observation: The input to `self.call`, matching `self.input_tensor_spec`
network_state: A state to pass to the network used by the RNN layer
training: Optional argument to set to training mode
Returns:
A tuple `(outputs, new_network_state)`.
"""
if network_state is not None and len(network_state) == 0:
network_state = None
flattened_observation = tf.reshape(observation, (-1))
encoder_inputs = self.bert_preprocess_model(flattened_observation)
outputs = self.bert_model(encoder_inputs, training=training)
out = outputs["pooled_output"]
out = tf.reshape(out, (observation.shape[0], observation.shape[1], 128))
# out = self.do1(out, training=training)
# out = self.fc1(out, training=training)
verb_q_value = self.verb_layer(out, training=training)
obj_q_value = self.obj_layer(out, training=training)
# q_value_multiplied = tf.matmul(verb_q_value, obj_q_value, transpose_a=True)
# q_values = tf.reshape(q_value_multiplied, (observation.shape[0], -1))
verb_q_value = tf.reshape(verb_q_value, (observation.shape[0], observation.shape[1], verb_q_value.shape[2], 1))
obj_q_value = tf.reshape(obj_q_value, (observation.shape[0], observation.shape[1], 1, obj_q_value.shape[2]))
q_values_added = tf.add(verb_q_value, obj_q_value)
q_values_added = tf.math.reduce_sum(q_values_added, axis=1)
q_values = tf.reshape(q_values_added, (observation.shape[0], -1))
return q_values, ()
| 40.869281
| 120
| 0.669119
| 845
| 6,253
| 4.704142
| 0.172781
| 0.042264
| 0.042516
| 0.036226
| 0.786415
| 0.737107
| 0.730566
| 0.720755
| 0.702893
| 0.646541
| 0
| 0.017457
| 0.221334
| 6,253
| 152
| 121
| 41.138158
| 0.798932
| 0.181673
| 0
| 0.586957
| 0
| 0
| 0.047629
| 0
| 0
| 0
| 0
| 0.006579
| 0.021739
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a1be9584512b198578c74cac68370142c4a6feeb
| 121
|
py
|
Python
|
tuinwolk/server/daemons/tuinwolk_daemon.py
|
TuinfeesT/TuinWolk
|
0af0321948f4f573d8eb5ad1b87ea42bfa6644e1
|
[
"MIT"
] | 1
|
2017-09-08T02:34:22.000Z
|
2017-09-08T02:34:22.000Z
|
tuinwolk/server/daemons/tuinwolk_daemon.py
|
TuinfeesT/TuinWolk
|
0af0321948f4f573d8eb5ad1b87ea42bfa6644e1
|
[
"MIT"
] | null | null | null |
tuinwolk/server/daemons/tuinwolk_daemon.py
|
TuinfeesT/TuinWolk
|
0af0321948f4f573d8eb5ad1b87ea42bfa6644e1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import daemon
class TuinWolkDaemon(daemon.Daemon):
def run(self):
#TODO: implement me!
pass
| 13.444444
| 36
| 0.719008
| 17
| 121
| 5.117647
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157025
| 121
| 8
| 37
| 15.125
| 0.852941
| 0.322314
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0
| 1
| 0.25
| false
| 0.25
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
a1e6051e4e110799735dcb4615879dd95634d238
| 107
|
py
|
Python
|
swagger_client/apis/__init__.py
|
sendx/sendx-api-python
|
edce9755d3718efb12cb5493da7cbac961cb1d9b
|
[
"Apache-2.0"
] | null | null | null |
swagger_client/apis/__init__.py
|
sendx/sendx-api-python
|
edce9755d3718efb12cb5493da7cbac961cb1d9b
|
[
"Apache-2.0"
] | null | null | null |
swagger_client/apis/__init__.py
|
sendx/sendx-api-python
|
edce9755d3718efb12cb5493da7cbac961cb1d9b
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
# import apis into api package
from .contact_api import ContactApi
| 21.4
| 38
| 0.841121
| 15
| 107
| 5.6
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140187
| 107
| 4
| 39
| 26.75
| 0.913043
| 0.261682
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b811d6fa0121474e3b20b511fc6bfce131c9ffa7
| 440
|
py
|
Python
|
calc-app/input_console.py
|
t4d-classes/python_10042021
|
e2c28448ad66784c429655ab766f902b76d6ac79
|
[
"MIT"
] | null | null | null |
calc-app/input_console.py
|
t4d-classes/python_10042021
|
e2c28448ad66784c429655ab766f902b76d6ac79
|
[
"MIT"
] | null | null | null |
calc-app/input_console.py
|
t4d-classes/python_10042021
|
e2c28448ad66784c429655ab766f902b76d6ac79
|
[
"MIT"
] | null | null | null |
from common.input import input_int, input_float
def get_operand():
return input_float("Please enter an operand: ")
def get_command():
return input("Enter a command: ")
def get_history_entry_id():
return input_int("Please enter a history entry id: ")
def get_history_file_name():
return input("Enter a history file name: ")
def get_history_report_file_name():
return input("Enter a history report file name: ")
| 20
| 57
| 0.725
| 66
| 440
| 4.590909
| 0.318182
| 0.09901
| 0.158416
| 0.168317
| 0.211221
| 0.211221
| 0.211221
| 0
| 0
| 0
| 0
| 0
| 0.179545
| 440
| 21
| 58
| 20.952381
| 0.839335
| 0
| 0
| 0
| 0
| 0
| 0.309091
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.454545
| true
| 0
| 0.090909
| 0.454545
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
b812c3ba84401bea4ea454da2ee284c224fe8b47
| 44
|
py
|
Python
|
lib/connection/RequestException.py
|
Karaya-12/Website-Dir-Scanner
|
04b150524631e2fff00a319b7daab7f4d32ddb18
|
[
"MIT"
] | 3
|
2019-09-30T07:12:58.000Z
|
2020-08-15T10:50:51.000Z
|
emailrep/exceptions.py
|
pry0cc/emailrep.io-python
|
c33839d327d438e75b4e6eea462ab15677462d54
|
[
"MIT"
] | null | null | null |
emailrep/exceptions.py
|
pry0cc/emailrep.io-python
|
c33839d327d438e75b4e6eea462ab15677462d54
|
[
"MIT"
] | 1
|
2019-08-22T15:35:11.000Z
|
2019-08-22T15:35:11.000Z
|
class RequestException(Exception):
pass
| 14.666667
| 34
| 0.772727
| 4
| 44
| 8.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159091
| 44
| 2
| 35
| 22
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
6299c0fed43754304eadd3c72255fa97d06e27b5
| 119
|
py
|
Python
|
pyimagesearch/utils/__init__.py
|
agoila/lisa-faster-R-CNN
|
3b88c9b7da2106a805089f9619ea62cdc1f21d99
|
[
"MIT"
] | 17
|
2018-09-09T10:56:58.000Z
|
2022-02-22T07:18:50.000Z
|
pyimagesearch/utils/__init__.py
|
agoila/lisa-faster-R-CNN
|
3b88c9b7da2106a805089f9619ea62cdc1f21d99
|
[
"MIT"
] | null | null | null |
pyimagesearch/utils/__init__.py
|
agoila/lisa-faster-R-CNN
|
3b88c9b7da2106a805089f9619ea62cdc1f21d99
|
[
"MIT"
] | 21
|
2018-09-19T11:07:10.000Z
|
2022-02-22T07:18:45.000Z
|
# import the necessary packages
from .agegenderhelper import AgeGenderHelper
from .imagenethelper import ImageNetHelper
| 39.666667
| 44
| 0.87395
| 12
| 119
| 8.666667
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10084
| 119
| 3
| 45
| 39.666667
| 0.971963
| 0.243697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
62d30bbb6f283ca534cedc754312f5c27d2a329b
| 141
|
py
|
Python
|
Tuples.py
|
PiggyAwesome/Learn-Python-Full-Course-for-Beginners-Tutorial-code
|
c164492a757cb825b73af1014f95aef884ac49af
|
[
"Unlicense"
] | 2
|
2021-08-11T15:53:16.000Z
|
2021-09-13T13:43:59.000Z
|
Tuples.py
|
PiggyAwesome/Learn-Python-Full-Course-for-Beginners-Tutorial-code
|
c164492a757cb825b73af1014f95aef884ac49af
|
[
"Unlicense"
] | null | null | null |
Tuples.py
|
PiggyAwesome/Learn-Python-Full-Course-for-Beginners-Tutorial-code
|
c164492a757cb825b73af1014f95aef884ac49af
|
[
"Unlicense"
] | null | null | null |
# Tuples
coordinates = (4, 5) # Cant be changed or modified
print(coordinates[1])
# coordinates[1] = 10
# print(coordinates[1])
| 14.1
| 51
| 0.638298
| 18
| 141
| 5
| 0.666667
| 0.4
| 0.377778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06422
| 0.22695
| 141
| 9
| 52
| 15.666667
| 0.761468
| 0.539007
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
62fd39c0aafef0a38c14c50d32b531ce3872cae4
| 17,658
|
py
|
Python
|
tests/unit/utils/test_win_system.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | 9,425
|
2015-01-01T05:59:24.000Z
|
2022-03-31T20:44:05.000Z
|
tests/unit/utils/test_win_system.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | 33,507
|
2015-01-01T00:19:56.000Z
|
2022-03-31T23:48:20.000Z
|
tests/unit/utils/test_win_system.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | 5,810
|
2015-01-01T19:11:45.000Z
|
2022-03-31T02:37:20.000Z
|
import os
import salt.utils.platform
from tests.support.mock import patch
from tests.support.unit import TestCase, skipIf
try:
import salt.utils.win_system as win_system
except Exception as exc: # pylint: disable=broad-except
win_system = exc
class WinSystemImportTestCase(TestCase):
"""
Simply importing should not raise an error, especially on Linux
"""
def test_import(self):
if isinstance(win_system, Exception):
raise Exception(
"Importing win_system caused traceback: {}".format(win_system)
)
@skipIf(not salt.utils.platform.is_windows(), "Only test on Windows systems")
class WinSystemTestCase(TestCase):
"""
Test cases for salt.utils.win_system
"""
def test_get_computer_name(self):
"""
Should return the computer name
"""
with patch("win32api.GetComputerNameEx", return_value="FAKENAME"):
self.assertEqual(win_system.get_computer_name(), "FAKENAME")
def test_get_computer_name_fail(self):
"""
If it fails, it returns False
"""
with patch("win32api.GetComputerNameEx", return_value=None):
self.assertFalse(win_system.get_computer_name())
def test_get_pending_computer_name(self):
"""
Will return the pending computer name if one is pending
"""
expected = "PendingName"
patch_value = {"vdata": expected}
with patch("salt.utils.win_reg.read_value", return_value=patch_value):
result = win_system.get_pending_computer_name()
self.assertEqual(expected, result)
def test_get_pending_computer_name_none(self):
"""
Will return the None if the pending computer is the current name
"""
patch_value = {"vdata": os.environ.get("COMPUTERNAME")}
with patch("salt.utils.win_reg.read_value", return_value=patch_value):
self.assertIsNone(win_system.get_pending_computer_name())
def test_get_pending_computer_name_false(self):
"""
Will return False if there is no pending computer name
"""
with patch("salt.utils.win_reg.read_value", return_value=False):
self.assertIsNone(win_system.get_pending_computer_name())
def test_get_pending_component_servicing(self):
"""
If none of the keys exist, should return False
"""
with patch("salt.utils.win_reg.key_exists", return_value=False):
self.assertFalse(win_system.get_pending_component_servicing())
def test_get_pending_component_servicing_true_1(self):
"""
If the RebootPending key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[True]):
self.assertTrue(win_system.get_pending_component_servicing())
def test_get_pending_component_servicing_true_2(self):
"""
If the RebootInProgress key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[False, True]):
self.assertTrue(win_system.get_pending_component_servicing())
def test_get_pending_component_servicing_true_3(self):
"""
If the PackagesPending key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[False, False, True]):
self.assertTrue(win_system.get_pending_component_servicing())
def test_get_pending_domain_join(self):
"""
If none of the keys exist, should return False
"""
with patch("salt.utils.win_reg.key_exists", return_value=False):
self.assertFalse(win_system.get_pending_domain_join())
def test_get_pending_domain_join_true_1(self):
"""
If the AvoidSpnSet key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[True]):
self.assertTrue(win_system.get_pending_domain_join())
def test_get_pending_domain_join_true_2(self):
"""
If the JoinDomain key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[False, True]):
self.assertTrue(win_system.get_pending_domain_join())
def test_get_pending_file_rename_false_1(self):
"""
If none of the value names exist, should return False
"""
patched_return = {"success": False}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertFalse(win_system.get_pending_file_rename())
def test_get_pending_file_rename_false_2(self):
"""
If one of the value names exists but is not set, should return False
"""
patched_return = {"success": True, "vdata": "(value not set)"}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertFalse(win_system.get_pending_file_rename())
def test_get_pending_file_rename_true_1(self):
"""
If one of the value names exists and is set, should return True
"""
patched_return = {"success": True, "vdata": "some value"}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertTrue(win_system.get_pending_file_rename())
def test_get_pending_servermanager_false_1(self):
"""
If the CurrentRebootAttempts value name does not exist, should return
False
"""
patched_return = {"success": False}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertFalse(win_system.get_pending_servermanager())
def test_get_pending_servermanager_false_2(self):
"""
If the CurrentRebootAttempts value name exists but is not an integer,
should return False
"""
patched_return = {"success": True, "vdata": "(value not set)"}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertFalse(win_system.get_pending_file_rename())
def test_get_pending_servermanager_true(self):
"""
If the CurrentRebootAttempts value name exists and is an integer,
should return True
"""
patched_return = {"success": True, "vdata": 1}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertTrue(win_system.get_pending_file_rename())
def test_get_pending_dvd_reboot(self):
"""
If the DVDRebootSignal value name does not exist, should return False
"""
with patch("salt.utils.win_reg.value_exists", return_value=False):
self.assertFalse(win_system.get_pending_dvd_reboot())
def test_get_pending_dvd_reboot_true(self):
"""
If the DVDRebootSignal value name exists, should return True
"""
with patch("salt.utils.win_reg.value_exists", return_value=True):
self.assertTrue(win_system.get_pending_dvd_reboot())
def test_get_pending_update(self):
"""
If none of the keys exist and there are not subkeys, should return False
"""
with patch("salt.utils.win_reg.key_exists", return_value=False), patch(
"salt.utils.win_reg.list_keys", return_value=[]
):
self.assertFalse(win_system.get_pending_update())
def test_get_pending_update_true_1(self):
"""
If the RebootRequired key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[True]):
self.assertTrue(win_system.get_pending_update())
def test_get_pending_update_true_2(self):
"""
If the PostRebootReporting key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[False, True]):
self.assertTrue(win_system.get_pending_update())
def test_get_reboot_required_witnessed_false_1(self):
"""
The ``Reboot Required`` value name does not exist, should return False
"""
patched_data = {"vdata": None}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertFalse(win_system.get_reboot_required_witnessed())
def test_get_reboot_required_witnessed_false_2(self):
"""
The ``Reboot required`` value name is set to 0, should return False
"""
patched_data = {"vdata": 0}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertFalse(win_system.get_reboot_required_witnessed())
def test_get_reboot_required_witnessed_true(self):
"""
The ``Reboot required`` value name is set to 1, should return True
"""
patched_data = {"vdata": 1}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertTrue(win_system.get_reboot_required_witnessed())
def test_set_reboot_required_witnessed(self):
"""
The call to ``set_value`` should return True and should be called with
the specified parameters
"""
with patch("salt.utils.win_reg.set_value", return_value=True) as sv:
self.assertTrue(win_system.set_reboot_required_witnessed())
sv.assert_called_once_with(
hive="HKLM",
key=win_system.MINION_VOLATILE_KEY,
volatile=True,
vname=win_system.REBOOT_REQUIRED_NAME,
vdata=1,
vtype="REG_DWORD",
)
def test_get_pending_update_exe_volatile_false_1(self):
"""
If UpdateExeVolatile value name is 0, should return False
"""
patched_data = {"success": True, "vdata": 0}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertFalse(win_system.get_pending_update_exe_volatile())
def test_get_pending_update_exe_volatile_false_2(self):
"""
If UpdateExeVolatile value name is not present, should return False
"""
patched_data = {"success": False}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertFalse(win_system.get_pending_update_exe_volatile())
def test_get_pending_update_exe_volatile_true_1(self):
"""
If UpdateExeVolatile value name is not 0, should return True
"""
patched_data = {"success": True, "vdata": 1}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertTrue(win_system.get_pending_update_exe_volatile())
def test_get_pending_reboot(self):
"""
If all functions return Falsy data, should return False
"""
with patch(
"salt.utils.win_system.get_pending_update", return_value=False
), patch("salt.utils.win_update.needs_reboot", return_value=False), patch(
"salt.utils.win_system.get_pending_update_exe_volatile", return_value=False
), patch(
"salt.utils.win_system.get_pending_file_rename", return_value=False
), patch(
"salt.utils.win_system.get_pending_servermanager", return_value=False
), patch(
"salt.utils.win_system.get_pending_component_servicing", return_value=False
), patch(
"salt.utils.win_system.get_pending_dvd_reboot", return_value=False
), patch(
"salt.utils.win_system.get_reboot_required_witnessed", return_value=False
), patch(
"salt.utils.win_system.get_pending_computer_name", return_value=None
), patch(
"salt.utils.win_system.get_pending_domain_join", return_value=False
):
self.assertFalse(win_system.get_pending_reboot())
def test_get_pending_reboot_true_1(self):
"""
If any boolean returning functions return True, should return True
"""
with patch(
"salt.utils.win_system.get_pending_update", return_value=False
), patch("salt.utils.win_update.needs_reboot", return_value=False), patch(
"salt.utils.win_system.get_pending_update_exe_volatile", return_value=False
), patch(
"salt.utils.win_system.get_pending_file_rename", return_value=False
), patch(
"salt.utils.win_system.get_pending_servermanager", return_value=False
), patch(
"salt.utils.win_system.get_pending_component_servicing", return_value=False
), patch(
"salt.utils.win_system.get_pending_dvd_reboot", return_value=False
), patch(
"salt.utils.win_system.get_reboot_required_witnessed", return_value=False
), patch(
"salt.utils.win_system.get_pending_computer_name", return_value=None
), patch(
"salt.utils.win_system.get_pending_domain_join", return_value=True
):
self.assertTrue(win_system.get_pending_reboot())
def test_get_pending_reboot_true_2(self):
"""
If a computer name is returned, should return True
"""
with patch(
"salt.utils.win_system.get_pending_update", return_value=False
), patch("salt.utils.win_update.needs_reboot", return_value=False), patch(
"salt.utils.win_system.get_pending_update_exe_volatile", return_value=False
), patch(
"salt.utils.win_system.get_pending_file_rename", return_value=False
), patch(
"salt.utils.win_system.get_pending_servermanager", return_value=False
), patch(
"salt.utils.win_system.get_pending_component_servicing", return_value=False
), patch(
"salt.utils.win_system.get_pending_dvd_reboot", return_value=False
), patch(
"salt.utils.win_system.get_reboot_required_witnessed", return_value=False
), patch(
"salt.utils.win_system.get_pending_computer_name",
return_value="pending name",
):
self.assertTrue(win_system.get_pending_reboot())
def test_get_pending_reboot_details(self):
"""
All items False should return a dictionary with all items False
"""
with patch(
"salt.utils.win_system.get_pending_update", return_value=False
), patch("salt.utils.win_update.needs_reboot", return_value=False), patch(
"salt.utils.win_system.get_pending_update_exe_volatile", return_value=False
), patch(
"salt.utils.win_system.get_pending_file_rename", return_value=False
), patch(
"salt.utils.win_system.get_pending_servermanager", return_value=False
), patch(
"salt.utils.win_system.get_pending_component_servicing", return_value=False
), patch(
"salt.utils.win_system.get_pending_dvd_reboot", return_value=False
), patch(
"salt.utils.win_system.get_reboot_required_witnessed", return_value=False
), patch(
"salt.utils.win_system.get_pending_computer_name", return_value=None
), patch(
"salt.utils.win_system.get_pending_domain_join", return_value=False
):
expected = {
"Pending Component Servicing": False,
"Pending Computer Rename": False,
"Pending DVD Reboot": False,
"Pending File Rename": False,
"Pending Join Domain": False,
"Pending ServerManager": False,
"Pending Update": False,
"Pending Windows Update": False,
"Reboot Required Witnessed": False,
"Volatile Update Exe": False,
}
result = win_system.get_pending_reboot_details()
self.assertDictEqual(expected, result)
def test_get_pending_reboot_details_true(self):
"""
All items True should return a dictionary with all items True
"""
with patch(
"salt.utils.win_system.get_pending_update", return_value=True
), patch("salt.utils.win_update.needs_reboot", return_value=True), patch(
"salt.utils.win_system.get_pending_update_exe_volatile", return_value=True
), patch(
"salt.utils.win_system.get_pending_file_rename", return_value=True
), patch(
"salt.utils.win_system.get_pending_servermanager", return_value=True
), patch(
"salt.utils.win_system.get_pending_component_servicing", return_value=True
), patch(
"salt.utils.win_system.get_pending_dvd_reboot", return_value=True
), patch(
"salt.utils.win_system.get_reboot_required_witnessed", return_value=True
), patch(
"salt.utils.win_system.get_pending_computer_name",
return_value="pending name",
), patch(
"salt.utils.win_system.get_pending_domain_join", return_value=True
):
expected = {
"Pending Component Servicing": True,
"Pending Computer Rename": True,
"Pending DVD Reboot": True,
"Pending File Rename": True,
"Pending Join Domain": True,
"Pending ServerManager": True,
"Pending Update": True,
"Pending Windows Update": True,
"Reboot Required Witnessed": True,
"Volatile Update Exe": True,
}
result = win_system.get_pending_reboot_details()
self.assertDictEqual(expected, result)
| 41.942993
| 87
| 0.64979
| 2,129
| 17,658
| 5.070925
| 0.076092
| 0.089848
| 0.088922
| 0.122823
| 0.809652
| 0.784272
| 0.736013
| 0.700537
| 0.673768
| 0.655613
| 0
| 0.002505
| 0.253823
| 17,658
| 420
| 88
| 42.042857
| 0.816864
| 0.128214
| 0
| 0.532319
| 0
| 0
| 0.265868
| 0.216508
| 0
| 0
| 0
| 0
| 0.136882
| 1
| 0.136882
| false
| 0
| 0.030418
| 0
| 0.174905
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1a0cfe1974d3fead0e36d406bfbe33d55d632379
| 6,981
|
py
|
Python
|
marc_5gempower/run_5gempower.py
|
arled-papa/marc
|
cb94636d786e215195e914b37131277f835bcf52
|
[
"Apache-2.0"
] | 1
|
2021-11-30T00:07:28.000Z
|
2021-11-30T00:07:28.000Z
|
marc_5gempower/run_5gempower.py
|
arled-papa/marc
|
cb94636d786e215195e914b37131277f835bcf52
|
[
"Apache-2.0"
] | null | null | null |
marc_5gempower/run_5gempower.py
|
arled-papa/marc
|
cb94636d786e215195e914b37131277f835bcf52
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) 2021 Arled Papa
# Author: Arled Papa <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from marc_5gempower.agent_func import empower_agent
import marc_5gempower.measurement_helper_func.measurement_func as util_stats_report
import time
import asyncio
import aiomultiprocess as mp
controller_ip = "Your Controller's PC IP" # Place your own controller ip
controller_port = 2210 # Typical 5G-EmPOWER controller port
measurement_time = 600 # Place your own measurement time (currently 10 min)
"""
This dictionary stores all the agent ids that are recognized by the 5G-EmPOWER controller.
Since the controller only accepts agents that registered at the database this has to be done beforehand
Currently 100 agent ids are registered as follows
"""
agntMAC = {0: b'\x00\x00\x00\x00\x00\x00', 1: b'\x00\x00\x00\x00\x00\x01', 2: b'\x00\x00\x00\x00\x00\x02',
3: b'\x00\x00\x00\x00\x00\x03', 4: b'\x00\x00\x00\x00\x00\x04', 5: b'\x00\x00\x00\x00\x00\x05',
6: b'\x00\x00\x00\x00\x00\x06', 7: b'\x00\x00\x00\x00\x00\x07', 8: b'\x00\x00\x00\x00\x00\x08',
9: b'\x00\x00\x00\x00\x01\x00', 10: b'\x00\x00\x00\x00\x01\x01', 11: b'\x00\x00\x00\x00\x01\x02',
12: b'\x00\x00\x00\x00\x01\x03', 13: b'\x00\x00\x00\x00\x01\x04', 14: b'\x00\x00\x00\x00\x01\x05',
15: b'\x00\x00\x00\x00\x01\x06', 16: b'\x00\x00\x00\x00\x01\x07', 17: b'\x00\x00\x00\x00\x01\x08',
18: b'\x00\x00\x00\x00\x02\x00', 19: b'\x00\x00\x00\x00\x02\x01', 20: b'\x00\x00\x00\x00\x02\x02',
21: b'\x00\x00\x00\x00\x02\x03', 22: b'\x00\x00\x00\x00\x02\x04', 23: b'\x00\x00\x00\x00\x02\x05',
24: b'\x00\x00\x00\x00\x02\x06', 25: b'\x00\x00\x00\x00\x02\x07', 26: b'\x00\x00\x00\x00\x02\x08',
27: b'\x00\x00\x00\x00\x03\x00', 28: b'\x00\x00\x00\x00\x03\x01', 29: b'\x00\x00\x00\x00\x03\x02',
30: b'\x00\x00\x00\x00\x03\x03', 31: b'\x00\x00\x00\x00\x03\x04', 32: b'\x00\x00\x00\x00\x03\x05',
33: b'\x00\x00\x00\x00\x03\x06', 34: b'\x00\x00\x00\x00\x03\x07', 35: b'\x00\x00\x00\x00\x03\x08',
36: b'\x00\x00\x00\x00\x04\x00', 37: b'\x00\x00\x00\x00\x04\x01', 38: b'\x00\x00\x00\x00\x04\x02',
39: b'\x00\x00\x00\x00\x04\x03', 40: b'\x00\x00\x00\x00\x04\x04', 41: b'\x00\x00\x00\x00\x04\x05',
42: b'\x00\x00\x00\x00\x04\x06', 43: b'\x00\x00\x00\x00\x04\x07', 44: b'\x00\x00\x00\x00\x04\x08',
45: b'\x00\x00\x00\x00\x05\x00', 46: b'\x00\x00\x00\x00\x05\x01', 47: b'\x00\x00\x00\x00\x05\x02',
48: b'\x00\x00\x00\x00\x05\x03', 49: b'\x00\x00\x00\x00\x05\x04', 50: b'\x00\x00\x00\x00\x05\x05',
51: b'\x00\x00\x00\x00\x05\x06', 52: b'\x00\x00\x00\x00\x05\x07', 53: b'\x00\x00\x00\x00\x05\x08',
54: b'\x00\x00\x00\x00\x06\x00', 55: b'\x00\x00\x00\x00\x06\x01', 56: b'\x00\x00\x00\x00\x06\x02',
57: b'\x00\x00\x00\x00\x06\x03', 58: b'\x00\x00\x00\x00\x06\x04', 59: b'\x00\x00\x00\x00\x06\x05',
60: b'\x00\x00\x00\x00\x06\x06', 61: b'\x00\x00\x00\x00\x06\x07', 62: b'\x00\x00\x00\x00\x06\x08',
63: b'\x00\x00\x00\x00\x07\x00', 64: b'\x00\x00\x00\x00\x07\x01', 65: b'\x00\x00\x00\x00\x07\x02',
66: b'\x00\x00\x00\x00\x07\x03', 67: b'\x00\x00\x00\x00\x07\x04', 68: b'\x00\x00\x00\x00\x07\x05',
69: b'\x00\x00\x00\x00\x07\x06', 70: b'\x00\x00\x00\x00\x07\x07', 71: b'\x00\x00\x00\x00\x07\x08',
72: b'\x00\x00\x00\x00\x08\x00', 73: b'\x00\x00\x00\x00\x08\x01', 74: b'\x00\x00\x00\x00\x08\x02',
75: b'\x00\x00\x00\x00\x08\x03', 76: b'\x00\x00\x00\x00\x08\x04', 77: b'\x00\x00\x00\x00\x08\x05',
78: b'\x00\x00\x00\x00\x08\x06', 79: b'\x00\x00\x00\x00\x08\x07', 80: b'\x00\x00\x00\x00\x08\x08',
81: b'\x00\x00\x00\x01\x00\x00', 82: b'\x00\x00\x00\x01\x00\x01', 83: b'\x00\x00\x00\x01\x00\x02',
84: b'\x00\x00\x00\x01\x00\x03', 85: b'\x00\x00\x00\x01\x00\x04', 86: b'\x00\x00\x00\x01\x00\x05',
87: b'\x00\x00\x00\x01\x00\x06', 88: b'\x00\x00\x00\x01\x00\x07', 89: b'\x00\x00\x00\x01\x00\x08',
90: b'\x00\x00\x00\x01\x01\x00', 91: b'\x00\x00\x00\x01\x01\x01', 92: b'\x00\x00\x00\x01\x01\x02',
93: b'\x00\x00\x00\x01\x01\x03', 94: b'\x00\x00\x00\x01\x01\x04', 95: b'\x00\x00\x00\x01\x01\x05',
96: b'\x00\x00\x00\x01\x01\x06', 97: b'\x00\x00\x00\x01\x01\x07', 98: b'\x00\x00\x00\x01\x01\x08',
99: b'\x00\x00\x00\x01\x02\x00'}
# Function that terminates all processes once the measurement time has been finalized
def terminate_all_processes(processes):
target_time = time.time() + measurement_time
while time.time() < target_time:
pass
for proc in processes:
proc.terminate()
"""
Function that initiates the run of 5G-EmPOWER.
Args:
agents: The number of 5G-EmPOWER agents to initiate
users: The number of users per each initiated FlexRAN agent
delay: Binary that indicated if agent related delay measurement are taking place
"""
async def run_5gempower(args=None):
print(args)
processes = []
# Generate the user activations and de-activations from the data plane according to the configuration setup
util_stats_report.update_configuration_file(args["users"])
measure_agent_delay = False
if args["agents"]:
for agnt in range(int(args["agents"])):
if agnt == int(args["agents"]) - 1:
# In case that delay is True measure the initialization delay of the FlexRAN agent
if args["delay"] == "True":
measure_agent_delay = True
print("Start measuring agent's delay")
# Each 5G-EmPOWER agent is initiated as a process targeting the 5G-EmPOWER agent function
task = mp.Process(target=empower_agent.enode_agent, args=(controller_ip, controller_port, args["users"],
args["agents"], agntMAC[agnt],
measure_agent_delay))
# Append the process to the list of processes
processes.append(task)
# Task initiation
task.start()
time.sleep(0.5)
# Once the measurement has finalized terminate all tasks
terminate_all_processes(processes)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(run_5gempower())
loop.close()
| 56.756098
| 116
| 0.627847
| 1,242
| 6,981
| 3.491143
| 0.238325
| 0.404059
| 0.396448
| 0.230627
| 0.377306
| 0.365314
| 0
| 0
| 0
| 0
| 0
| 0.253787
| 0.196247
| 6,981
| 122
| 117
| 57.221311
| 0.518981
| 0.171895
| 0
| 0
| 0
| 0
| 0.477035
| 0.457404
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014286
| false
| 0.014286
| 0.071429
| 0
| 0.085714
| 0.028571
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1a286a917af5eacc1b12d3158f1106f90974b451
| 252
|
py
|
Python
|
lightnn/base/__init__.py
|
tongluocq/lightnn
|
602b0742d1141efc73a7146c930c5ea9eb994d37
|
[
"Apache-2.0"
] | 131
|
2017-04-05T06:03:25.000Z
|
2021-05-20T03:05:36.000Z
|
ch4/lightnn/lightnn/base/__init__.py
|
helloqorld/book-of-qna-code
|
54950478fb28d15cd73dae4dc39f3cd783721e08
|
[
"Apache-2.0"
] | 27
|
2018-11-26T07:39:25.000Z
|
2022-02-09T23:44:53.000Z
|
ch4/lightnn/lightnn/base/__init__.py
|
helloqorld/book-of-qna-code
|
54950478fb28d15cd73dae4dc39f3cd783721e08
|
[
"Apache-2.0"
] | 62
|
2018-11-26T07:44:02.000Z
|
2022-01-13T08:31:00.000Z
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .activations import *
from .losses import *
from .initializers import *
from .optimizers import *
| 19.384615
| 38
| 0.797619
| 33
| 252
| 5.666667
| 0.545455
| 0.213904
| 0.256684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004608
| 0.138889
| 252
| 12
| 39
| 21
| 0.857143
| 0.130952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.142857
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c556a7e2e7f0a44508e2fef82666c7378cbf88cf
| 226
|
py
|
Python
|
ninja/security/__init__.py
|
lsaavedr/django-ninja
|
caa182007368bb0fed85b184fb0583370e9589b4
|
[
"MIT"
] | null | null | null |
ninja/security/__init__.py
|
lsaavedr/django-ninja
|
caa182007368bb0fed85b184fb0583370e9589b4
|
[
"MIT"
] | null | null | null |
ninja/security/__init__.py
|
lsaavedr/django-ninja
|
caa182007368bb0fed85b184fb0583370e9589b4
|
[
"MIT"
] | null | null | null |
from ninja.security.apikey import APIKeyQuery, APIKeyCookie, APIKeyHeader
from ninja.security.http import HttpBearer, HttpBasicAuth
def django_auth(request):
if request.user.is_authenticated:
return request.user
| 28.25
| 73
| 0.800885
| 27
| 226
| 6.62963
| 0.740741
| 0.100559
| 0.189944
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137168
| 226
| 7
| 74
| 32.285714
| 0.917949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c567629ea21a15f16d30ea7895f7a40e8e344679
| 80,085
|
py
|
Python
|
pyeccodes/defs/grib2/localConcepts/cnmc/name_def.py
|
ecmwf/pyeccodes
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
[
"Apache-2.0"
] | 7
|
2020-04-14T09:41:17.000Z
|
2021-08-06T09:38:19.000Z
|
pyeccodes/defs/grib2/localConcepts/cnmc/name_def.py
|
ecmwf/pyeccodes
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
[
"Apache-2.0"
] | null | null | null |
pyeccodes/defs/grib2/localConcepts/cnmc/name_def.py
|
ecmwf/pyeccodes
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
[
"Apache-2.0"
] | 3
|
2020-04-30T12:44:48.000Z
|
2020-12-15T08:40:26.000Z
|
import pyeccodes.accessors as _
def load(h):
def wrapped(h):
discipline = h.get_l('discipline')
parameterCategory = h.get_l('parameterCategory')
parameterNumber = h.get_l('parameterNumber')
instrumentType = h.get_l('instrumentType')
satelliteSeries = h.get_l('satelliteSeries')
scaledValueOfCentralWaveNumber = h.get_l('scaledValueOfCentralWaveNumber')
satelliteNumber = h.get_l('satelliteNumber')
typeOfGeneratingProcess = h.get_l('typeOfGeneratingProcess')
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 136986 and satelliteNumber == 72 and typeOfGeneratingProcess == 8:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 161290 and satelliteNumber == 72 and typeOfGeneratingProcess == 8:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and scaledValueOfCentralWaveNumber == 103092 and satelliteNumber == 72 and typeOfGeneratingProcess == 8 and instrumentType == 207 and satelliteSeries == 333:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and scaledValueOfCentralWaveNumber == 114942 and satelliteNumber == 72 and typeOfGeneratingProcess == 8 and instrumentType == 207 and satelliteSeries == 333:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 256410 and satelliteNumber == 72 and typeOfGeneratingProcess == 8:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 74626 and satelliteNumber == 72 and typeOfGeneratingProcess == 8:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and satelliteNumber == 72 and typeOfGeneratingProcess == 8 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 83333:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and scaledValueOfCentralWaveNumber == 92592 and satelliteNumber == 72 and typeOfGeneratingProcess == 8 and instrumentType == 207 and satelliteSeries == 333:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 1 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 1250000 and satelliteNumber == 72 and typeOfGeneratingProcess == 8:
return 'Obser. Sat. Meteosat sec. generation Albedo (scaled)'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 1 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 1666666 and satelliteNumber == 72 and typeOfGeneratingProcess == 8:
return 'Obser. Sat. Meteosat sec. generation Albedo (scaled)'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 1 and satelliteNumber == 72 and typeOfGeneratingProcess == 8 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 625000:
return 'Obser. Sat. Meteosat sec. generation Albedo (scaled)'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 1 and scaledValueOfCentralWaveNumber == 2000000 and satelliteNumber == 72 and typeOfGeneratingProcess == 8 and instrumentType == 207 and satelliteSeries == 333:
return 'Obser. Sat. Meteosat sec. generation Albedo (scaled)'
scaledValueOfFirstFixedSurface = h.get_l('scaledValueOfFirstFixedSurface')
typeOfFirstFixedSurface = h.get_l('typeOfFirstFixedSurface')
scaleFactorOfFirstFixedSurface = h.get_l('scaleFactorOfFirstFixedSurface')
typeOfStatisticalProcessing = h.get_l('typeOfStatisticalProcessing')
if discipline == 0 and parameterCategory == 2 and parameterNumber == 22 and scaledValueOfFirstFixedSurface == 10 and typeOfFirstFixedSurface == 103 and typeOfGeneratingProcess == 198 and scaleFactorOfFirstFixedSurface == 0 and typeOfStatisticalProcessing == 2:
return 'calibrated forecast, wind speed (gust)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 56 and typeOfFirstFixedSurface == 1 and typeOfGeneratingProcess == 198 and typeOfStatisticalProcessing == 1:
return 'calibrated forecast, large-scale snowfall rate w.e.'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 52 and typeOfFirstFixedSurface == 1 and typeOfGeneratingProcess == 198 and typeOfStatisticalProcessing == 1:
return 'calibrated forecast, total precipitation rate'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 22 and typeOfFirstFixedSurface == 103 and typeOfGeneratingProcess == 197 and scaleFactorOfFirstFixedSurface == 0 and typeOfStatisticalProcessing == 2 and scaledValueOfFirstFixedSurface == 10:
return 'smoothed forecast, wind speed (gust)'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 18 and scaleFactorOfFirstFixedSurface == -2 and typeOfGeneratingProcess == 197 and typeOfFirstFixedSurface == 106:
return 'smoothed forecast, soil temperature'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 56 and typeOfGeneratingProcess == 197 and typeOfStatisticalProcessing == 1 and typeOfFirstFixedSurface == 1:
return 'smoothed forecast, large-scale snowfall rate w.e.'
typeOfSecondFixedSurface = h.get_l('typeOfSecondFixedSurface')
scaledValueOfSecondFixedSurface = h.get_l('scaledValueOfSecondFixedSurface')
scaleFactorOfSecondFixedSurface = h.get_l('scaleFactorOfSecondFixedSurface')
if discipline == 0 and parameterCategory == 6 and parameterNumber == 22 and typeOfFirstFixedSurface == 100 and typeOfSecondFixedSurface == 100 and scaledValueOfFirstFixedSurface == 0 and scaledValueOfSecondFixedSurface == 400 and scaleFactorOfFirstFixedSurface == -2 and typeOfGeneratingProcess == 197 and scaleFactorOfSecondFixedSurface == -2:
return 'smoothed forecast, cloud cover high'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 22 and typeOfFirstFixedSurface == 100 and typeOfSecondFixedSurface == 100 and scaledValueOfSecondFixedSurface == 800 and scaleFactorOfFirstFixedSurface == -2 and typeOfGeneratingProcess == 197 and scaleFactorOfSecondFixedSurface == -2 and scaledValueOfFirstFixedSurface == 400:
return 'smoothed forecast, cloud cover medium'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 22 and typeOfFirstFixedSurface == 100 and typeOfSecondFixedSurface == 1 and scaleFactorOfFirstFixedSurface == -2 and typeOfGeneratingProcess == 197 and scaledValueOfFirstFixedSurface == 800:
return 'smoothed forecast, cloud cover low'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 1 and typeOfFirstFixedSurface == 1 and typeOfGeneratingProcess == 197:
return 'smoothed forecast, total cloud cover'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 52 and typeOfFirstFixedSurface == 1 and typeOfGeneratingProcess == 197 and typeOfStatisticalProcessing == 1:
return 'smoothed forecast, total precipitation rate'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 3 and scaledValueOfFirstFixedSurface == 10 and typeOfFirstFixedSurface == 103 and typeOfGeneratingProcess == 197 and scaleFactorOfFirstFixedSurface == 0:
return 'smoothed forecast, v comp. of wind'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 2 and typeOfFirstFixedSurface == 103 and typeOfGeneratingProcess == 197 and scaleFactorOfFirstFixedSurface == 0 and scaledValueOfFirstFixedSurface == 10:
return 'smoothed forecast, u comp. of wind'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 6 and typeOfGeneratingProcess == 197 and scaleFactorOfFirstFixedSurface == 0 and scaledValueOfFirstFixedSurface == 2 and typeOfFirstFixedSurface == 103:
return 'smoothed forecast, dew point temp.'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0 and typeOfGeneratingProcess == 197 and scaleFactorOfFirstFixedSurface == 0 and typeOfStatisticalProcessing == 3 and scaledValueOfFirstFixedSurface == 2 and typeOfFirstFixedSurface == 103:
return 'smoothed forecast, minimum temp.'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0 and typeOfGeneratingProcess == 197 and scaleFactorOfFirstFixedSurface == 0 and typeOfStatisticalProcessing == 2 and scaledValueOfFirstFixedSurface == 2 and typeOfFirstFixedSurface == 103:
return 'smoothed forecast, maximum temp.'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0 and scaledValueOfFirstFixedSurface == 2 and typeOfFirstFixedSurface == 103 and typeOfGeneratingProcess == 197 and scaleFactorOfFirstFixedSurface == 0:
return 'smoothed forecast, temperature'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 136986 and satelliteNumber == 72:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and scaledValueOfCentralWaveNumber == 161290 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and scaledValueOfCentralWaveNumber == 103092 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 114942 and satelliteNumber == 72:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and scaledValueOfCentralWaveNumber == 256410 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 74626 and satelliteNumber == 72:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and scaledValueOfCentralWaveNumber == 82644 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and scaledValueOfCentralWaveNumber == 92592 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 136986 and satelliteNumber == 72:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 161290 and satelliteNumber == 72:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and scaledValueOfCentralWaveNumber == 103092 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 114942 and satelliteNumber == 72 and instrumentType == 207:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 256410 and satelliteNumber == 72:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and scaledValueOfCentralWaveNumber == 74626 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and scaledValueOfCentralWaveNumber == 82644 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 92592 and satelliteNumber == 72:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 136986:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 161290 and satelliteNumber == 72:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and scaledValueOfCentralWaveNumber == 103092 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 256410 and satelliteNumber == 72:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 74626 and satelliteNumber == 72:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and scaledValueOfCentralWaveNumber == 82644 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 92592 and satelliteNumber == 72:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and scaledValueOfCentralWaveNumber == 114942 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and scaledValueOfCentralWaveNumber == 136986 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and scaledValueOfCentralWaveNumber == 161290 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 103092 and satelliteNumber == 72:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and scaledValueOfCentralWaveNumber == 114942 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 256410 and satelliteNumber == 72:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 74626 and satelliteNumber == 72:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 82644 and satelliteNumber == 72:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and scaledValueOfCentralWaveNumber == 92592 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and satelliteNumber == 54 and instrumentType == 205 and satelliteSeries == 331:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and instrumentType == 205 and satelliteSeries == 331 and satelliteNumber == 54:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and satelliteNumber == 54 and instrumentType == 205 and satelliteSeries == 331:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and instrumentType == 205 and satelliteSeries == 331 and satelliteNumber == 54:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and instrumentType == 205 and satelliteSeries == 331 and satelliteNumber == 54:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and satelliteSeries == 331 and satelliteNumber == 54 and instrumentType == 205:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and satelliteNumber == 54 and instrumentType == 205 and satelliteSeries == 331:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and satelliteNumber == 54 and instrumentType == 205 and satelliteSeries == 331:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and satelliteSeries == 331 and satelliteNumber == 53 and instrumentType == 205:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and instrumentType == 205 and satelliteSeries == 331 and satelliteNumber == 53:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and satelliteNumber == 53 and instrumentType == 205 and satelliteSeries == 331:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and satelliteNumber == 53 and instrumentType == 205 and satelliteSeries == 331:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and instrumentType == 205 and satelliteSeries == 331 and satelliteNumber == 52:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and satelliteNumber == 52 and instrumentType == 205 and satelliteSeries == 331:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and instrumentType == 205 and satelliteSeries == 331 and satelliteNumber == 52:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and satelliteNumber == 52 and instrumentType == 205 and satelliteSeries == 331:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 196 and typeOfGeneratingProcess == 200 and typeOfStatisticalProcessing == 5:
return 'Monthly Mean of RMS of difference IA-AN of kinetic energy'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 196 and typeOfStatisticalProcessing == 5 and typeOfGeneratingProcess == 199:
return 'Monthly Mean of RMS of difference FG-AN of kinetic energy'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 8 and typeOfStatisticalProcessing == 5 and typeOfGeneratingProcess == 200:
return 'Monthly Mean of RMS of difference IA-AN of vert.velocity (pressure)'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 8 and typeOfGeneratingProcess == 199 and typeOfStatisticalProcessing == 5:
return 'Monthly Mean of RMS of difference FG-AN of vert.velocity (pressure)'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0 and typeOfStatisticalProcessing == 5 and typeOfGeneratingProcess == 200:
return 'Monthly Mean of RMS of difference IA-AN of temperature'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0 and typeOfStatisticalProcessing == 5 and typeOfGeneratingProcess == 199:
return 'Monthly Mean of RMS of difference FG-AN of temperature'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 1 and typeOfGeneratingProcess == 200 and typeOfStatisticalProcessing == 5:
return 'Monthly Mean of RMS of difference IA-AN of relative humidity'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 1 and typeOfGeneratingProcess == 199 and typeOfStatisticalProcessing == 5:
return 'Monthly Mean of RMS of difference FG-AN of relative humidity'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 4 and typeOfStatisticalProcessing == 5 and typeOfGeneratingProcess == 200:
return 'Monthly Mean of RMS of difference IA-AN of geopotential'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 4 and typeOfStatisticalProcessing == 5 and typeOfGeneratingProcess == 199:
return 'Monthly Mean of RMS of difference FG-AN of geopotential'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 3 and typeOfGeneratingProcess == 200 and typeOfStatisticalProcessing == 5:
return 'Monthly Mean of RMS of difference IA-AN of v-component of wind'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 3 and typeOfStatisticalProcessing == 5 and typeOfGeneratingProcess == 199:
return 'Monthly Mean of RMS of difference FG-AN of v-component of wind'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 2 and typeOfStatisticalProcessing == 5 and typeOfGeneratingProcess == 200:
return 'Monthly Mean of RMS of difference IA-AN of u-component of wind'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 2 and typeOfGeneratingProcess == 199 and typeOfStatisticalProcessing == 5:
return 'Monthly Mean of RMS of difference FG-AN of u-component of wind'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 1 and typeOfGeneratingProcess == 200 and typeOfStatisticalProcessing == 5:
return 'Monthly Mean of RMS of difference IA-AN of pressure reduced to MSL'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 1 and typeOfStatisticalProcessing == 5 and typeOfGeneratingProcess == 199:
return 'Monthly Mean of RMS of difference FG-AN of pressure reduced to MSL'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 199 and typeOfFirstFixedSurface == 1:
return 'modified cloud cover for media'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 198 and typeOfFirstFixedSurface == 1:
return 'modified cloud depth for media'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 7:
return 'Icing Grade (1=LGT,2=MOD,3=SEV)'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 13 and typeOfFirstFixedSurface == 1:
return 'Ceiling'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 3:
return 'Aequivalentpotentielle Temperatur'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 3 and typeOfFirstFixedSurface == 1:
return 'KO index'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 0 and typeOfFirstFixedSurface == 107 and scaleFactorOfFirstFixedSurface == -2:
return 'Druck einer isentropen Flaeche'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 14 and typeOfFirstFixedSurface == 107:
return 'Isentrope potentielle Vorticity'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 25 and typeOfFirstFixedSurface == 1:
return 'weather interpretation (WMO)'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 26 and typeOfFirstFixedSurface == 1:
return 'Konv.-U-Grenze-nn Hoehe der Konvektionsuntergrenze ueber nn'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 10:
return 'absolute vorticity advection'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 8 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'storm relative helicity'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 1 and typeOfFirstFixedSurface == 105:
return 'wind shear'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 51:
return 'UV_Index_Maximum_W UV_Index clouded (W), daily maximum'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 23 and typeOfFirstFixedSurface == 1:
return 'Gravity wave dissipation (vertical integral)'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 23 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 0:
return 'Gravity wave dissipation (vertical integral)'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 194 and typeOfFirstFixedSurface == 1:
return 'v-momentum flux due to SSO-effects'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 194 and typeOfStatisticalProcessing == 0 and typeOfFirstFixedSurface == 1:
return 'v-momentum flux due to SSO-effects'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 193 and typeOfFirstFixedSurface == 1:
return 'u-momentum flux due to SSO-effects'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 193 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 0:
return 'u-momentum flux due to SSO-effects'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 227:
return 'Ba140 - wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 226:
return 'Ba140 - dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 225:
return 'Air concentration of Barium 40'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 224:
return 'I131o - wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 223:
return 'I131o - dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 222:
return 'I131o - concentration'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 221:
return 'I131g - wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 220:
return 'Xe133 - wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 219:
return 'I131g - concentration'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 218:
return 'Xe133 - wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 217:
return 'Xe133 - dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 216:
return 'Air concentration of Xenon 133 (Xe133 - concentration)'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 215:
return 'TRACER - wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 214:
return 'TRACER - dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 213:
return 'TRACER - concentration'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 212:
return 'Kr85-wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 211:
return 'Kr85-dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 210:
return 'Air concentration of Krypton 85 (Kr85-concentration)'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 209:
return 'Zr95-wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 208:
return 'Zr95-dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 207:
return 'Air concentration of Zirconium 95 (Zr95-concentration)'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 206:
return 'Te132-wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 205:
return 'Te132-dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 204:
return 'Air concentration of Tellurium 132 (Te132-concentration)'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 203:
return 'Cs137-wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 202:
return 'Cs137-dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 201:
return 'Cs137-concentration'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 200:
return 'I131-wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 199:
return 'I131-dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 198:
return 'I131-concentration'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 197:
return 'Sr90-wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 196:
return 'Sr90-dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 195:
return 'Air concentration of Strontium 90'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 194:
return 'Ru103-wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 193:
return 'Ru103-dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 192:
return 'Air concentration of Ruthenium 103 (Ru103- concentration)'
if discipline == 0 and parameterCategory == 14 and parameterNumber == 1 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Ozone Mixing Ratio'
if discipline == 0 and parameterCategory == 15 and parameterNumber == 194 and typeOfFirstFixedSurface == 1:
return 'Delay of the GPS signal trough dry atmos.'
if discipline == 0 and parameterCategory == 15 and parameterNumber == 193 and typeOfFirstFixedSurface == 1:
return 'Delay of the GPS signal trough wet atmos.'
if discipline == 0 and parameterCategory == 15 and parameterNumber == 192 and typeOfFirstFixedSurface == 1:
return 'Delay of the GPS signal trough the (total) atm.'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 200 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Friction velocity'
if discipline == 0 and parameterCategory == 191 and parameterNumber == 2 and typeOfFirstFixedSurface == 1:
return 'geographical longitude'
if discipline == 0 and parameterCategory == 191 and parameterNumber == 1 and typeOfFirstFixedSurface == 1:
return 'geographical latitude'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 193 and typeOfFirstFixedSurface == 1:
return 'Coriolis parameter'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 208:
return 'water vapor flux'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 207 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'tendency of specific humidity'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 196 and typeOfStatisticalProcessing == 0:
return 'Sea salt aerosol (12M)'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 196:
return 'Sea salt aerosol'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 195 and typeOfStatisticalProcessing == 0:
return 'Black carbon aerosol (12M)'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 195:
return 'Black carbon aerosol'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 194 and typeOfStatisticalProcessing == 0:
return 'Organic aerosol (12M)'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 194:
return 'Organic aerosol'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 193 and typeOfStatisticalProcessing == 0:
return 'Total soil dust aerosol (12M)'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 193:
return 'Total soil dust aerosol'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 192 and typeOfStatisticalProcessing == 0:
return 'Total sulfate aerosol (12M)'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 192:
return 'Total sulfate aerosol'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 192 and typeOfFirstFixedSurface == 1:
return 'ratio of monthly mean NDVI (normalized differential vegetation index) to annual maximum'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 192 and typeOfStatisticalProcessing == 0 and typeOfFirstFixedSurface == 1:
return 'ratio of monthly mean NDVI (normalized differential vegetation index) to annual maximum'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 31 and typeOfStatisticalProcessing == 2:
return 'normalized differential vegetation index (NDVI)'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 31:
return 'normalized differential vegetation index'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 30 and typeOfFirstFixedSurface == 1:
return 'deciduous forest'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 29 and typeOfFirstFixedSurface == 1:
return 'evergreen forest'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 20 and typeOfFirstFixedSurface == 106 and typeOfSecondFixedSurface == 106 and scaledValueOfSecondFixedSurface == 100 and scaleFactorOfFirstFixedSurface == -2 and typeOfStatisticalProcessing == 7 and scaleFactorOfSecondFixedSurface == -2 and scaledValueOfFirstFixedSurface == 10:
return 'variance of soil moisture content (10-100)'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 20 and typeOfFirstFixedSurface == 106 and typeOfSecondFixedSurface == 106 and scaledValueOfSecondFixedSurface == 10 and scaleFactorOfFirstFixedSurface == -2 and typeOfStatisticalProcessing == 7 and scaleFactorOfSecondFixedSurface == -2 and scaledValueOfFirstFixedSurface == 0:
return 'variance of soil moisture content (0-10)'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 7:
return 'Orographie + Land-Meer-Verteilung'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 28 and typeOfStatisticalProcessing == 3 and typeOfFirstFixedSurface == 1:
return 'Min Leaf area index'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 28 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 2:
return 'Max Leaf area index'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 4 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 3:
return 'Plant covering degree in the quiescent phas'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 4 and typeOfStatisticalProcessing == 2 and typeOfFirstFixedSurface == 1:
return 'Plant covering degree in the vegetation phase'
if discipline == 0 and parameterCategory == 14 and parameterNumber == 193 and typeOfFirstFixedSurface == 1:
return 'vertically integrated ozone content (climatological)'
if discipline == 0 and parameterCategory == 14 and parameterNumber == 192 and typeOfFirstFixedSurface == 1:
return 'height of ozone maximum (climatological)'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 32 and typeOfFirstFixedSurface == 1:
return 'root depth of vegetation'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 28 and typeOfFirstFixedSurface == 1:
return 'Leaf area index'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 0 and typeOfFirstFixedSurface == 1:
return 'Soil Type'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 196 and typeOfFirstFixedSurface == 1:
return 'surface emissivity'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 22 and typeOfFirstFixedSurface == 1:
return 'Slope of sub-gridscale orography'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 21 and typeOfFirstFixedSurface == 1:
return 'Angle of sub-gridscale orography'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 24 and typeOfFirstFixedSurface == 1:
return 'Anisotropy of sub-gridscale orography'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 20 and typeOfFirstFixedSurface == 1:
return 'Standard deviation of sub-grid scale orography'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 195 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'meridional wind tendency due to subgrid scale oro.'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 194 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'zonal wind tendency due to subgrid scale oro.'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 3 and typeOfStatisticalProcessing == 6 and typeOfGeneratingProcess == 7:
return 'analysis error(standard deviation), v-comp. of wind'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 2 and typeOfStatisticalProcessing == 6 and typeOfGeneratingProcess == 7:
return 'analysis error(standard deviation), u-comp. of wind'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 5 and typeOfGeneratingProcess == 7 and typeOfStatisticalProcessing == 6:
return 'analysis error(standard deviation), geopotential(gpm)'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 199:
return 'total directional spread'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 198:
return 'total Tm2 period'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 197:
return 'total Tm1 period'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 196:
return 'total wave mean period'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 195:
return 'total wave peak period'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 194:
return 'swell peak period'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 194 and typeOfFirstFixedSurface == 101:
return 'swell mean period'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 193:
return 'wind sea peak period'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 193 and typeOfFirstFixedSurface == 101:
return 'wind sea mean period'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 192:
return 'total wave direction'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 206 and typeOfFirstFixedSurface == 1:
return 'moisture convergence for Kuo-type closure'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 6 and typeOfFirstFixedSurface == 1:
return 'Convective Available Potential Energy'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 205 and typeOfFirstFixedSurface == 1:
return 'Massflux at convective cloud base'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 195:
return 'residuum of soil moisture'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 194:
return 'total forcing at soil surface'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 193:
return 'total transpiration from all soil layers'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 192:
return 'sum of contributions to evaporation'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 193 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Effective transmissivity of solar radiation'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 192 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'unknown'
if discipline == 0 and parameterCategory == 15 and parameterNumber == 1 and typeOfFirstFixedSurface == 10:
return 'Base reflectivity (cmax)'
if discipline == 0 and parameterCategory == 15 and parameterNumber == 1 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Base reflectivity'
if discipline == 0 and parameterCategory == 15 and parameterNumber == 1 and typeOfFirstFixedSurface == 1:
return 'Base reflectivity'
if discipline == 10 and parameterCategory == 2 and parameterNumber == 8 and typeOfFirstFixedSurface == 1:
return 'sea Ice Temperature'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 16 and typeOfFirstFixedSurface == 1:
return 'Minimal Stomatal Resistance'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 18 and typeOfFirstFixedSurface == 1:
return 'Snow temperature (top of snow)'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 13 and typeOfFirstFixedSurface == 1:
return 'Plant Canopy Surface Water'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 22 and typeOfFirstFixedSurface == 106 and scaleFactorOfFirstFixedSurface == -2:
return 'soil ice content (multilayers)'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 20 and scaleFactorOfFirstFixedSurface == -2 and typeOfFirstFixedSurface == 106:
return 'Column-integrated Soil Moisture (multilayers)'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 18 and scaleFactorOfFirstFixedSurface == -2 and typeOfFirstFixedSurface == 106:
return 'Soil Temperature (multilayers)'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 192:
return 'Air concentration of Ruthenium 103'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 22 and scaledValueOfFirstFixedSurface == 10 and typeOfFirstFixedSurface == 103 and scaleFactorOfFirstFixedSurface == 0 and typeOfStatisticalProcessing == 2:
return 'maximum Wind 10m'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 3 and typeOfFirstFixedSurface == 1:
return 'mixed layer depth'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 19 and typeOfFirstFixedSurface == 1:
return 'Turbulent transfer coefficient for heat (and Moisture)'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 29 and typeOfFirstFixedSurface == 1:
return 'Turbulent transfer coefficient for impulse'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 20 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Turbulent diffusion coefficient for heat (and moisture)'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 31 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Turbulent diffusioncoefficient for momentum'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 11 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Turbulent Kinetic Energy'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 196 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Kinetic Energy'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 192 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Tendency of turbulent kinetic energy'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 24:
return 'Convective turbulent kinetic enery'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 7 and typeOfFirstFixedSurface == 192:
return 'Convective Inhibition, mean layer'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 6 and typeOfFirstFixedSurface == 192:
return 'Convective Available Potential Energy, mean layer'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 7 and typeOfFirstFixedSurface == 193:
return 'Convective Inhibition, most unstable'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 6 and typeOfFirstFixedSurface == 193:
return 'Convective Available Potential Energy, most unstable'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 193 and typeOfFirstFixedSurface == 1:
return 'supercell detection index 2 (only rot. up drafts)'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 192 and typeOfFirstFixedSurface == 1:
return 'supercell detection index 1 (rot. up+down drafts)'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 192 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Pressure perturbation'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 61 and typeOfFirstFixedSurface == 1:
return 'Snow density'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 75 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 1:
return 'Graupel (snow pellets) precipitation rate'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 75 and typeOfFirstFixedSurface == 1:
return 'Graupel (snow pellets) precipitation rate'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 202 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'tendency of specific cloud ice content due to grid scale precipitation'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 203:
return 'Fresh snow factor (weighting function for albedo indicating freshness of snow)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 201 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'tendency of specific cloud liquid water content due to grid scale precipitation'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 200 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Specific humitiy tendency due to grid scale precipitation'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 193 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Temperature tendency due to grid scale precipation'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 66 and typeOfStatisticalProcessing == 1 and typeOfFirstFixedSurface == 1:
return 'snow amount, grid-scale plus convective'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 65 and typeOfStatisticalProcessing == 1 and typeOfFirstFixedSurface == 1:
return 'rain amount, grid-scale plus convective'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 76 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 1:
return 'Convective rain rate (s)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 55 and typeOfFirstFixedSurface == 1:
return 'Convective snowfall rate water equivalent'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 76 and typeOfFirstFixedSurface == 1:
return 'Convective rain rate'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 77 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 1:
return 'Large scale rain rate (s)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 56 and typeOfFirstFixedSurface == 1:
return 'Large scale snowfall rate water equivalent'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 77 and typeOfFirstFixedSurface == 1:
return 'Large scale rain rate'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 196 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Specific content of precipitation particles (needed for water loadin)g'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 199 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'tendency of specific cloud ice content due to convection'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 198 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Tendency of specific cloud liquid water content due to conversion'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 204:
return 'Height of snow fall limit'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 200 and typeOfFirstFixedSurface == 4:
return 'height of 0 degree celsius level code 0,3,6 ?'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 196 and typeOfFirstFixedSurface == 1:
return 'height of top of dry convection'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 193 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'meridional wind tendency due to convection'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 192 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'zonal wind tendency due to convection'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 197 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Specific humitiy tendency due to convection'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 192 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Temperature tendency due to convection'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 195 and typeOfFirstFixedSurface == 1:
return 'top index (vertical level) of main convective cloud (i)'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 194 and typeOfFirstFixedSurface == 1:
return 'base index (vertical level) of main convective cloud (i)'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 27 and typeOfFirstFixedSurface == 3:
return 'Height of Convective Cloud Top (i)'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 26 and typeOfFirstFixedSurface == 2:
return 'Height of Convective Cloud Base (i)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 195 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'specific cloud water content, convective cloud'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 193 and typeOfFirstFixedSurface == 3:
return 'cloud top above msl, shallow convection'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 192 and typeOfFirstFixedSurface == 2:
return 'cloud base above msl, shallow convection'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 194 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'subgridscale cloud ice'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 193 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'subgrid scale cloud water'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 192 and typeOfFirstFixedSurface == 1:
return 'vertical integral of divergence of total water content (s)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 78 and typeOfFirstFixedSurface == 1:
return 'Total Column integrated water (all components incl. precipitation)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 74:
return 'Total column integrated grauple'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 32 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Grauple'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 46:
return 'Total column integrated snow'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 45:
return 'Total column integrated rain'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 25 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Snow mixing ratio'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 24 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Rain mixing ratio'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 82 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Cloud Ice Mixing Ratio'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 22 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Cloud Mixing Ratio'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 14 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Non-Convective Cloud Cover, grid scale'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 22 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Cloud cover'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 195 and typeOfFirstFixedSurface == 1:
return 'Stomatal Resistance'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 24 and typeOfStatisticalProcessing == 1:
return 'Sunshine'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 194 and typeOfStatisticalProcessing == 0 and scaleFactorOfFirstFixedSurface == -2 and typeOfFirstFixedSurface == 106:
return 'Latent heat flux from plants'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 193 and typeOfStatisticalProcessing == 0 and typeOfFirstFixedSurface == 1:
return 'Latent heat flux from bare soil'
if discipline == 0 and parameterCategory == 5 and parameterNumber == 192 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Thermal radiation heating rate'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 192 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Solar radiation heating rate'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 10 and typeOfFirstFixedSurface == 1:
return 'Photosynthetically active radiation'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 10 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 0:
return 'Photosynthetically active radiation (m) (at the surface)'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 18 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 0:
return 'Momentum Flux, V-Component (m)'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 17 and typeOfStatisticalProcessing == 0 and typeOfFirstFixedSurface == 1:
return 'Momentum Flux, U-Component (m)'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 11 and typeOfStatisticalProcessing == 0 and typeOfFirstFixedSurface == 1:
return 'Sensible Heat Net Flux (m)'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 10 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 0:
return 'Latent Heat Net Flux (m)'
if discipline == 0 and parameterCategory == 5 and parameterNumber == 5 and typeOfFirstFixedSurface == 0:
return 'Net long wave radiation flux'
if discipline == 0 and parameterCategory == 5 and parameterNumber == 5 and typeOfFirstFixedSurface == 0 and typeOfStatisticalProcessing == 0:
return 'Net long wave radiation flux (m) (on the model top)'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 9 and typeOfFirstFixedSurface == 0:
return 'Net short wave radiation flux'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 9 and typeOfFirstFixedSurface == 0 and typeOfStatisticalProcessing == 0:
return 'Net short wave radiation flux (m) (on the model top)'
if discipline == 0 and parameterCategory == 5 and parameterNumber == 5 and typeOfFirstFixedSurface == 1:
return 'Net long wave radiation flux'
if discipline == 0 and parameterCategory == 5 and parameterNumber == 5 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 0:
return 'Net long wave radiation flux (m) (at the surface)'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 9 and typeOfFirstFixedSurface == 1:
return 'Net short wave radiation flux'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 9 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 0:
return 'Net short wave radiation flux (m) (at the surface)'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 9:
return 'Mean period of swell waves'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 8:
return 'Significant height of swell waves'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 7:
return 'Direction of swell waves'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 6:
return 'Mean period of wind waves'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 5:
return 'Significant height of wind waves'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 4:
return 'Direction of wind waves'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 3:
return 'Significant height of combined wind waves and swell'
if discipline == 10 and parameterCategory == 2 and parameterNumber == 1 and typeOfFirstFixedSurface == 1:
return 'sea Ice Thickness'
if discipline == 10 and parameterCategory == 2 and parameterNumber == 0 and typeOfFirstFixedSurface == 1:
return 'Sea Ice Cover ( 0= free, 1=cover)'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 5 and typeOfFirstFixedSurface == 106 and typeOfSecondFixedSurface == 106 and scaledValueOfFirstFixedSurface == 0 and scaleFactorOfSecondFixedSurface == -2 and typeOfStatisticalProcessing == 1 and scaleFactorOfFirstFixedSurface == -2 and scaledValueOfSecondFixedSurface == 10:
return 'Water Runoff (s)'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 5 and typeOfFirstFixedSurface == 106 and typeOfSecondFixedSurface == 106 and typeOfStatisticalProcessing == 1 and scaleFactorOfFirstFixedSurface == -2 and scaledValueOfSecondFixedSurface == 190 and scaledValueOfFirstFixedSurface == 10 and scaleFactorOfSecondFixedSurface == -2:
return 'Water Runoff (10-190)'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 5 and typeOfFirstFixedSurface == 106 and typeOfSecondFixedSurface == 106 and typeOfStatisticalProcessing == 1 and scaleFactorOfFirstFixedSurface == -2 and scaledValueOfSecondFixedSurface == 100 and scaledValueOfFirstFixedSurface == 10 and scaleFactorOfSecondFixedSurface == -2:
return 'Water Runoff (10-100)'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 4 and typeOfFirstFixedSurface == 1:
return 'Plant cover'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 20 and typeOfFirstFixedSurface == 106 and typeOfSecondFixedSurface == 106 and scaledValueOfSecondFixedSurface == 100 and scaledValueOfFirstFixedSurface == 10 and scaleFactorOfSecondFixedSurface == -2 and scaleFactorOfFirstFixedSurface == -2:
return 'Column-integrated Soil Moisture (2) 10-100cm'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 20 and typeOfFirstFixedSurface == 106 and typeOfSecondFixedSurface == 106 and scaledValueOfFirstFixedSurface == 0 and scaleFactorOfSecondFixedSurface == -2 and scaleFactorOfFirstFixedSurface == -2 and scaledValueOfSecondFixedSurface == 10:
return 'Column-integrated Soil Moisture (1) 0 -10 cm'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 20 and typeOfFirstFixedSurface == 106 and typeOfSecondFixedSurface == 106 and scaleFactorOfFirstFixedSurface == -2 and scaledValueOfSecondFixedSurface == 190 and scaledValueOfFirstFixedSurface == 100 and scaleFactorOfSecondFixedSurface == -2:
return 'Column-integrated Soil Moisture'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 18 and typeOfFirstFixedSurface == 106 and scaledValueOfFirstFixedSurface == 0 and scaleFactorOfFirstFixedSurface == -2:
return 'Soil Temperature'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 18 and typeOfFirstFixedSurface == 106 and scaledValueOfFirstFixedSurface == 9 and scaleFactorOfFirstFixedSurface == -2:
return 'Soil Temperature'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 18 and scaleFactorOfFirstFixedSurface == -2 and typeOfFirstFixedSurface == 106 and scaledValueOfFirstFixedSurface == 41:
return 'Soil Temperature (41 cm depth)'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 18 and scaleFactorOfFirstFixedSurface == -2 and typeOfFirstFixedSurface == 106 and scaledValueOfFirstFixedSurface == 36:
return 'Soil Temperature ( 36 cm depth, vv=0h)'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 1 and typeOfStatisticalProcessing == 0 and typeOfFirstFixedSurface == 1:
return 'Albedo (in short-wave)'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 1 and typeOfFirstFixedSurface == 1:
return 'Albedo (in short-wave)'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 1 and typeOfFirstFixedSurface == 1:
return 'Surface Roughness length Surface Roughness'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 0 and typeOfFirstFixedSurface == 1:
return 'Land Cover (1=land, 0=sea)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 56 and typeOfStatisticalProcessing == 1 and typeOfFirstFixedSurface == 1:
return 'Large-Scale snowfall rate water equivalent (s)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 55 and typeOfStatisticalProcessing == 1 and typeOfFirstFixedSurface == 1:
return 'Convective Snowfall rate water equivalent (s)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 69 and typeOfFirstFixedSurface == 1:
return 'Total Column-Integrated Cloud Water'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 22 and typeOfFirstFixedSurface == 100 and typeOfSecondFixedSurface == 100 and scaleFactorOfFirstFixedSurface == -2 and scaledValueOfSecondFixedSurface == 400 and scaledValueOfFirstFixedSurface == 0 and scaleFactorOfSecondFixedSurface == -2:
return 'Cloud Cover (0 - 400 hPa)'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 22 and typeOfFirstFixedSurface == 100 and typeOfSecondFixedSurface == 100 and scaledValueOfFirstFixedSurface == 400 and scaleFactorOfSecondFixedSurface == -2 and scaleFactorOfFirstFixedSurface == -2 and scaledValueOfSecondFixedSurface == 800:
return 'Cloud Cover (400 - 800 hPa)'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 22 and scaledValueOfFirstFixedSurface == 800 and scaleFactorOfFirstFixedSurface == -2 and typeOfSecondFixedSurface == 1 and typeOfFirstFixedSurface == 100:
return 'Cloud Cover (800 hPa - Soil)'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 2 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Convective Cloud Cover'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 1 and typeOfFirstFixedSurface == 1:
return 'Total Cloud Cover'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 11 and typeOfFirstFixedSurface == 1:
return 'Snow Depth'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 60 and typeOfFirstFixedSurface == 1:
return 'Snow depth water equivalent'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 37 and typeOfStatisticalProcessing == 1:
return 'Convective Precipitation rate'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 54 and typeOfStatisticalProcessing == 1:
return 'Large-Scale Precipitation rate'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 52 and typeOfStatisticalProcessing == 1 and typeOfFirstFixedSurface == 1:
return 'Total Precipitation rate (S)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 70 and typeOfFirstFixedSurface == 1:
return 'Total Column-Integrated Cloud Ice'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 79 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 1:
return 'Evaporation (s)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 64 and typeOfFirstFixedSurface == 1:
return 'Total column integrated water vapour'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 1:
return 'Relative Humidity'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 1 and typeOfFirstFixedSurface == 103 and scaledValueOfFirstFixedSurface == 2 and scaleFactorOfFirstFixedSurface == 0:
return '2m Relative Humidity'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 0 and scaleFactorOfFirstFixedSurface == 0:
return 'Specific Humidity'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 0 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 103 and scaledValueOfFirstFixedSurface == 2:
return 'Specific Humidity (2m)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 0 and typeOfFirstFixedSurface == 1:
return 'Specific Humidity (S)'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 9:
return 'Vertical Velocity (Geometric) (w)'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 8:
return 'Vertical Velocity (Pressure) ( omega=dp/dt )'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 3:
return 'V component of wind'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 3 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 103 and scaledValueOfFirstFixedSurface == 10:
return 'V component of wind'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 2:
return 'U component of wind'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 2 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 103 and scaledValueOfFirstFixedSurface == 10:
return 'U component of wind'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 1 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Wind speed (SP)'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 1 and typeOfFirstFixedSurface == 103 and scaledValueOfFirstFixedSurface == 10 and scaleFactorOfFirstFixedSurface == 0:
return 'Wind speed (SP_10M)'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 0 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Wind Direction (DD)'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 0 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 103 and scaledValueOfFirstFixedSurface == 10:
return 'Wind Direction (DD_10M)'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 2:
return 'Wave spectra (3)'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 1:
return 'Wave spectra (2)'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 0:
return 'Wave spectra (1)'
if discipline == 0 and parameterCategory == 15 and parameterNumber == 6 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 2:
return 'Radar spectra (1)'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 6 and typeOfFirstFixedSurface == 103 and scaledValueOfFirstFixedSurface == 2 and typeOfStatisticalProcessing == 0 and scaleFactorOfFirstFixedSurface == 0:
return '2m Dew Point Temperature (AV)'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0 and typeOfFirstFixedSurface == 103 and scaledValueOfFirstFixedSurface == 2 and typeOfStatisticalProcessing == 3 and scaleFactorOfFirstFixedSurface == 0:
return 'Min 2m Temperature (i)'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0 and typeOfFirstFixedSurface == 103 and scaledValueOfFirstFixedSurface == 2 and typeOfStatisticalProcessing == 2 and scaleFactorOfFirstFixedSurface == 0:
return 'Max 2m Temperature (i)'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0:
return 'Temperature'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0 and typeOfGeneratingProcess == 9 and typeOfFirstFixedSurface == 103 and scaledValueOfFirstFixedSurface == 2 and typeOfStatisticalProcessing == 0 and scaleFactorOfFirstFixedSurface == 0:
return 'Climat. temperature, 2m Temperature'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0 and typeOfFirstFixedSurface == 1:
return 'Temperature (G)'
if discipline == 0 and parameterCategory == 14 and parameterNumber == 2 and typeOfFirstFixedSurface == 1:
return 'Total Column Integrated Ozone'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 6 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Geometric Height of the layer limits above sea level(NN)'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 6 and typeOfFirstFixedSurface == 1:
return 'Geometric Height of the earths surface above sea level'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 4:
return 'Geopotential'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 4 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Geopotential (full lev)'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 4 and typeOfFirstFixedSurface == 1:
return 'Geopotential (S)'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 2 and typeOfFirstFixedSurface == 1:
return 'Pressure Tendency (S)'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 1 and typeOfFirstFixedSurface == 101:
return 'Pressure Reduced to MSL'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 0:
return 'Pressure'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 0 and typeOfFirstFixedSurface == 1:
return 'Pressure (S) (not reduced)'
is_s2s = h.get_l('is_s2s')
subCentre = h.get_l('subCentre')
if discipline == 0 and parameterCategory == 0 and parameterNumber == 6 and scaleFactorOfFirstFixedSurface == 0 and scaledValueOfFirstFixedSurface == 2 and is_s2s == 1 and typeOfFirstFixedSurface == 103 and subCentre == 102:
return '2 metre dewpoint temperature'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 6 and typeOfFirstFixedSurface == 103 and scaleFactorOfFirstFixedSurface == 0 and scaledValueOfFirstFixedSurface == 2:
return '2 metre dewpoint temperature'
if discipline == 10 and parameterCategory == 2 and parameterNumber == 0 and subCentre == 102 and is_s2s == 1:
return 'Sea ice area fraction'
if discipline == 10 and parameterCategory == 2 and parameterNumber == 0:
return 'Sea ice area fraction'
return wrapped
| 66.7375
| 355
| 0.700893
| 8,164
| 80,085
| 6.871999
| 0.063817
| 0.029802
| 0.061173
| 0.07529
| 0.901627
| 0.870399
| 0.849527
| 0.782668
| 0.685418
| 0.621642
| 0
| 0.055447
| 0.228457
| 80,085
| 1,199
| 356
| 66.793161
| 0.85253
| 0
| 0
| 0.114856
| 0
| 0
| 0.167072
| 0.003921
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002497
| false
| 0
| 0.001248
| 0
| 0.491885
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3d9a1b0edafd4fb0b37e8206295d03027352213c
| 18
|
py
|
Python
|
mltk/marl/algorithms/__init__.py
|
lqf96/mltk
|
7187be5d616781695ee68674cd335fbb5a237ccc
|
[
"MIT"
] | null | null | null |
mltk/marl/algorithms/__init__.py
|
lqf96/mltk
|
7187be5d616781695ee68674cd335fbb5a237ccc
|
[
"MIT"
] | 2
|
2019-12-24T01:54:21.000Z
|
2019-12-24T02:23:54.000Z
|
mltk/marl/algorithms/__init__.py
|
lqf96/mltk
|
7187be5d616781695ee68674cd335fbb5a237ccc
|
[
"MIT"
] | null | null | null |
from .phc import *
| 18
| 18
| 0.722222
| 3
| 18
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 18
| 1
| 18
| 18
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
3da40761377898e0edc360572dbd5d864963e85c
| 4,232
|
py
|
Python
|
crime_data/resources/incidents.py
|
18F/crime-data-api
|
3e8cab0fad4caac1d7d8ef1b62ae7a1441752c6c
|
[
"CC0-1.0"
] | 51
|
2016-09-16T00:37:56.000Z
|
2022-01-22T03:48:24.000Z
|
crime_data/resources/incidents.py
|
harrisj/crime-data-api
|
9b49b5cc3cd8309dda888f49356ee5168c43851a
|
[
"CC0-1.0"
] | 605
|
2016-09-15T19:16:49.000Z
|
2018-01-18T20:46:39.000Z
|
crime_data/resources/incidents.py
|
harrisj/crime-data-api
|
9b49b5cc3cd8309dda888f49356ee5168c43851a
|
[
"CC0-1.0"
] | 12
|
2018-01-18T21:15:34.000Z
|
2022-02-17T10:09:40.000Z
|
from webargs.flaskparser import use_args
from itertools import filterfalse
from crime_data.common import cdemodels, marshmallow_schemas, models, newmodels
from crime_data.common.base import CdeResource, tuning_page, ExplorerOffenseMapping
from crime_data.extensions import DEFAULT_MAX_AGE
from flask.ext.cachecontrol import cache
from flask import jsonify
def _is_string(col):
col0 = list(col.base_columns)[0]
return issubclass(col0.type.python_type, str)
class AgenciesSumsState(CdeResource):
'''''
Agency Suboffense Sums by (year, agency) - Only agencies reporting all 12 months.
'''''
schema = marshmallow_schemas.AgencySumsSchema(many=True)
fast_count = True
@use_args(marshmallow_schemas.OffenseCountViewArgs)
@cache(max_age=DEFAULT_MAX_AGE, public=True)
@tuning_page
def get(self, args, state_abbr = None, agency_ori = None):
self.verify_api_key(args)
model = newmodels.AgencySums()
year = args.get('year', None)
explorer_offense = args.get('explorer_offense', None)
agency_sums = model.get(state = state_abbr, agency = agency_ori, year = year, explorer_offense = explorer_offense)
filename = 'agency_sums_state'
return self.render_response(agency_sums, args, csv_filename=filename)
class AgenciesSumsCounty(CdeResource):
'''''
Agency Suboffense Sums by (year, agency) - Only agencies reporting all 12 months.
'''''
schema = marshmallow_schemas.AgencySumsSchema(many=True)
fast_count = True
@use_args(marshmallow_schemas.OffenseCountViewArgsYear)
@cache(max_age=DEFAULT_MAX_AGE, public=True)
@tuning_page
def get(self, args, state_abbr = None, county_fips_code = None, agency_ori = None):
'''''
Year is a required field atm.
'''''
self.verify_api_key(args)
model = newmodels.AgencySums()
year = args.get('year', None)
explorer_offense = args.get('explorer_offense', None)
agency_sums = model.get(agency = agency_ori, year = year, county = county_fips_code, state=state_abbr, explorer_offense=explorer_offense)
filename = 'agency_sums_county'
return self.render_response(agency_sums, args, csv_filename=filename)
class AgenciesOffensesCount(CdeResource):
'''''
Agency Offense counts by year.
'''''
schema = marshmallow_schemas.AgencyOffensesSchema(many=True)
fast_count = True
@use_args(marshmallow_schemas.OffenseCountViewArgs)
@cache(max_age=DEFAULT_MAX_AGE, public=True)
@tuning_page
def get(self, args, state_abbr = None, agency_ori = None):
self.verify_api_key(args)
year = args.get('year', None)
explorer_offense = args.get('explorer_offense', None)
agency_sums = None
# ugh
if explorer_offense == 'violent' or explorer_offense == 'property':
agency_sums = newmodels.AgencyClassificationCounts().get(state = state_abbr, agency = agency_ori, year = year, classification = explorer_offense)
else:
agency_sums = newmodels.AgencyOffenseCounts().get(state = state_abbr, agency = agency_ori, year = year, explorer_offense = explorer_offense)
filename = 'agency_offenses_state'
return self.render_response(agency_sums, args, csv_filename=filename)
class AgenciesOffensesCountyCount(CdeResource):
'''''
Agency Offense counts by year.
'''''
schema = marshmallow_schemas.AgencyOffensesSchema(many=True)
fast_count = True
@use_args(marshmallow_schemas.OffenseCountViewArgsYear)
@cache(max_age=DEFAULT_MAX_AGE, public=True)
@tuning_page
def get(self, args, state_abbr = None, county_fips_code = None, agency_ori = None):
'''''
Year is a required field atm.
'''''
self.verify_api_key(args)
model = newmodels.AgencyOffenseCounts()
year = args.get('year', None)
explorer_offense = args.get('explorer_offense', None)
agency_sums = model.get(agency = agency_ori, year = year, county = county_fips_code, state=state_abbr, explorer_offense=explorer_offense)
filename = 'agency_sums_county'
return self.render_response(agency_sums, args, csv_filename=filename)
| 41.087379
| 157
| 0.708176
| 502
| 4,232
| 5.7251
| 0.207171
| 0.099165
| 0.022617
| 0.033055
| 0.768963
| 0.768963
| 0.768963
| 0.767571
| 0.767571
| 0.753653
| 0
| 0.002055
| 0.19518
| 4,232
| 102
| 158
| 41.490196
| 0.84175
| 0.072779
| 0
| 0.693333
| 0
| 0
| 0.044206
| 0.005493
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.093333
| 0
| 0.386667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3dbf87737162b90ca8a50c6b75c42c1a4829f712
| 6,159
|
py
|
Python
|
test/test_auth.py
|
tjones-commits/server-client-python
|
b9309fb79564de9f28196b929ee77b0e77a8f504
|
[
"CC0-1.0",
"MIT"
] | 470
|
2016-09-14T23:38:48.000Z
|
2022-03-31T07:59:53.000Z
|
test/test_auth.py
|
jorwoods/server-client-python
|
fefd6f18d8a6617829c6323879d2c3ed77a4cda6
|
[
"CC0-1.0",
"MIT"
] | 772
|
2016-09-09T18:15:44.000Z
|
2022-03-31T22:01:08.000Z
|
test/test_auth.py
|
jorwoods/server-client-python
|
fefd6f18d8a6617829c6323879d2c3ed77a4cda6
|
[
"CC0-1.0",
"MIT"
] | 346
|
2016-09-10T00:05:00.000Z
|
2022-03-30T18:55:47.000Z
|
import unittest
import os.path
import requests_mock
import tableauserverclient as TSC
TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets')
SIGN_IN_XML = os.path.join(TEST_ASSET_DIR, 'auth_sign_in.xml')
SIGN_IN_IMPERSONATE_XML = os.path.join(TEST_ASSET_DIR, 'auth_sign_in_impersonate.xml')
SIGN_IN_ERROR_XML = os.path.join(TEST_ASSET_DIR, 'auth_sign_in_error.xml')
class AuthTests(unittest.TestCase):
def setUp(self):
self.server = TSC.Server('http://test')
self.baseurl = self.server.auth.baseurl
def test_sign_in(self):
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
tableau_auth = TSC.TableauAuth('testuser', 'password', site_id='Samples')
self.server.auth.sign_in(tableau_auth)
self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('6b7179ba-b82b-4f0f-91ed-812074ac5da6', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
def test_sign_in_with_personal_access_tokens(self):
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
tableau_auth = TSC.PersonalAccessTokenAuth(token_name='mytoken',
personal_access_token='Random123Generated', site_id='Samples')
self.server.auth.sign_in(tableau_auth)
self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('6b7179ba-b82b-4f0f-91ed-812074ac5da6', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
def test_sign_in_impersonate(self):
with open(SIGN_IN_IMPERSONATE_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
tableau_auth = TSC.TableauAuth('testuser', 'password',
user_id_to_impersonate='dd2239f6-ddf1-4107-981a-4cf94e415794')
self.server.auth.sign_in(tableau_auth)
self.assertEqual('MJonFA6HDyy2C3oqR13fRGqE6cmgzwq3', self.server.auth_token)
self.assertEqual('dad65087-b08b-4603-af4e-2887b8aafc67', self.server.site_id)
self.assertEqual('dd2239f6-ddf1-4107-981a-4cf94e415794', self.server.user_id)
def test_sign_in_error(self):
with open(SIGN_IN_ERROR_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml, status_code=401)
tableau_auth = TSC.TableauAuth('testuser', 'wrongpassword')
self.assertRaises(TSC.ServerResponseError, self.server.auth.sign_in, tableau_auth)
def test_sign_in_invalid_token(self):
with open(SIGN_IN_ERROR_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml, status_code=401)
tableau_auth = TSC.PersonalAccessTokenAuth(token_name='mytoken', personal_access_token='invalid')
self.assertRaises(TSC.ServerResponseError, self.server.auth.sign_in, tableau_auth)
def test_sign_in_without_auth(self):
with open(SIGN_IN_ERROR_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml, status_code=401)
tableau_auth = TSC.TableauAuth('', '')
self.assertRaises(TSC.ServerResponseError, self.server.auth.sign_in, tableau_auth)
def test_sign_out(self):
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
m.post(self.baseurl + '/signout', text='')
tableau_auth = TSC.TableauAuth('testuser', 'password')
self.server.auth.sign_in(tableau_auth)
self.server.auth.sign_out()
self.assertIsNone(self.server._auth_token)
self.assertIsNone(self.server._site_id)
self.assertIsNone(self.server._user_id)
def test_switch_site(self):
self.server.version = '2.6'
baseurl = self.server.auth.baseurl
site_id, user_id, auth_token = list('123')
self.server._set_auth(site_id, user_id, auth_token)
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(baseurl + '/switchSite', text=response_xml)
site = TSC.SiteItem('Samples', 'Samples')
self.server.auth.switch_site(site)
self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('6b7179ba-b82b-4f0f-91ed-812074ac5da6', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
def test_revoke_all_server_admin_tokens(self):
self.server.version = "3.10"
baseurl = self.server.auth.baseurl
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(baseurl + '/signin', text=response_xml)
m.post(baseurl + '/revokeAllServerAdminTokens', text='')
tableau_auth = TSC.TableauAuth('testuser', 'password')
self.server.auth.sign_in(tableau_auth)
self.server.auth.revoke_all_server_admin_tokens()
self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('6b7179ba-b82b-4f0f-91ed-812074ac5da6', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
| 49.272
| 117
| 0.664069
| 790
| 6,159
| 4.946835
| 0.139241
| 0.092119
| 0.071648
| 0.032242
| 0.816018
| 0.776868
| 0.729273
| 0.704964
| 0.697544
| 0.685773
| 0
| 0.058326
| 0.21221
| 6,159
| 124
| 118
| 49.669355
| 0.747115
| 0
| 0
| 0.542857
| 0
| 0
| 0.156844
| 0.102776
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.095238
| false
| 0.047619
| 0.038095
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3dd524d8e59e2c8188892e7a7fe2e15518d2a46b
| 5,294
|
py
|
Python
|
depthaware/data/sunrgbd_dataset.py
|
crmauceri/DepthAwareCNN-pytorch1.5
|
6d9b0cf001d7482df7d4cd7240fc36cbfc8356f9
|
[
"MIT"
] | 3
|
2021-03-11T01:24:37.000Z
|
2021-06-29T03:46:40.000Z
|
depthaware/data/sunrgbd_dataset.py
|
crmauceri/DepthAwareCNN-pytorch1.5
|
6d9b0cf001d7482df7d4cd7240fc36cbfc8356f9
|
[
"MIT"
] | null | null | null |
depthaware/data/sunrgbd_dataset.py
|
crmauceri/DepthAwareCNN-pytorch1.5
|
6d9b0cf001d7482df7d4cd7240fc36cbfc8356f9
|
[
"MIT"
] | null | null | null |
import os.path
from depthaware.data.base_dataset import *
from PIL import Image
import time
def make_dataset_fromlst(dataroot, listfilename):
"""
NYUlist format:
imagepath seglabelpath depthpath HHApath
"""
images = []
segs = []
depths = []
HHAs = []
with open(listfilename) as f:
content = f.readlines()
for x in content:
imgname, segname, depthname, HHAname = x.strip().split(' ')
images += [os.path.join(dataroot, imgname)]
segs += [os.path.join(dataroot, segname)]
depths += [os.path.join(dataroot, depthname)]
HHAs += [os.path.join(dataroot, HHAname)]
return {'images':images, 'segs':segs, 'HHAs':HHAs, 'depths':depths}
class SUNRGBDDataset(BaseDataset):
def __init__(self, opt):
self.opt = opt
np.random.seed(int(time.time()))
self.paths_dict = make_dataset_fromlst(opt.dataroot, opt.list)
self.len = len(self.paths_dict['images'])
# self.label_weight = torch.Tensor(label_weight)
self.datafile = 'sunrgbd_dataset.py'
def __getitem__(self, index):
#self.paths['images'][index]
# print self.opt.scale,self.opt.flip,self.opt.crop,self.opt.colorjitter
img = np.asarray(Image.open(self.paths_dict['images'][index]))#.astype(np.uint8)
HHA = np.asarray(Image.open(self.paths_dict['HHAs'][index]))[:,:,::-1]
seg = np.asarray(Image.open(self.paths_dict['segs'][index])).astype(np.uint8)-1
depth = np.asarray(Image.open(self.paths_dict['depths'][index])).astype(np.uint16)
assert (img.shape[0]==HHA.shape[0]==seg.shape[0]==depth.shape[0])
assert (img.shape[1]==HHA.shape[1]==seg.shape[1]==depth.shape[1])
depth = np.bitwise_or(np.right_shift(depth,3),np.left_shift(depth,16-3))
depth = depth.astype(np.float32)/120. # 1/5 * depth
params = get_params_sunrgbd(self.opt, seg.shape, maxcrop=.7)
depth_tensor_tranformed = transform(depth, params, normalize=False,istrain=self.opt.isTrain)
seg_tensor_tranformed = transform(seg, params, normalize=False,method='nearest',istrain=self.opt.isTrain)
if self.opt.inputmode == 'bgr-mean':
img_tensor_tranformed = transform(img, params, normalize=False, istrain=self.opt.isTrain, option=1)
HHA_tensor_tranformed = transform(HHA, params, normalize=False, istrain=self.opt.isTrain, option=2)
else:
img_tensor_tranformed = transform(img, params, istrain=self.opt.isTrain, option=1)
HHA_tensor_tranformed = transform(HHA, params, istrain=self.opt.isTrain, option=2)
# print img_tensor_tranformed
# print(np.unique(depth_tensor_tranformed.numpy()).shape)
# print img_tensor_tranformed.size()
return {'image':img_tensor_tranformed,
'depth':depth_tensor_tranformed,
'seg': seg_tensor_tranformed,
'HHA': HHA_tensor_tranformed,
'imgpath': self.paths_dict['segs'][index]}
def __len__(self):
return self.len
def name(self):
return 'sunrgbd_dataset'
class SUNRGBDDataset_val(BaseDataset):
def __init__(self, opt):
self.opt = opt
np.random.seed(8964)
self.paths_dict = make_dataset_fromlst(opt.dataroot, opt.vallist)
self.len = len(self.paths_dict['images'])
def __getitem__(self, index):
#self.paths['images'][index]
img = np.asarray(Image.open(self.paths_dict['images'][index]))#.astype(np.uint8)
HHA = np.asarray(Image.open(self.paths_dict['HHAs'][index]))[:,:,::-1]
seg = np.asarray(Image.open(self.paths_dict['segs'][index])).astype(np.uint8)-1
depth = np.asarray(Image.open(self.paths_dict['depths'][index])).astype(np.uint16)
depth = np.bitwise_or(np.right_shift(depth,3),np.left_shift(depth,16-3))
depth = depth.astype(np.float32)/120. # 1/5 * depth
assert (img.shape[0]==HHA.shape[0]==seg.shape[0]==depth.shape[0])
assert (img.shape[1]==HHA.shape[1]==seg.shape[1]==depth.shape[1])
params = get_params_sunrgbd(self.opt, seg.shape, test=True)
depth_tensor_tranformed = transform(depth, params, normalize=False,istrain=self.opt.isTrain)
seg_tensor_tranformed = transform(seg, params, normalize=False,method='nearest',istrain=self.opt.isTrain)
# HHA_tensor_tranformed = transform(HHA, params,istrain=self.opt.isTrain)
if self.opt.inputmode == 'bgr-mean':
img_tensor_tranformed = transform(img, params, normalize=False, istrain=self.opt.isTrain, option=1)
HHA_tensor_tranformed = transform(HHA, params, normalize=False, istrain=self.opt.isTrain, option=2)
else:
img_tensor_tranformed = transform(img, params, istrain=self.opt.isTrain, option=1)
HHA_tensor_tranformed = transform(HHA, params, istrain=self.opt.isTrain, option=2)
return {'image':img_tensor_tranformed,
'depth':depth_tensor_tranformed,
'seg': seg_tensor_tranformed,
'HHA': HHA_tensor_tranformed,
'imgpath': self.paths_dict['segs'][index]}
def __len__(self):
return self.len
def name(self):
return 'sunrgbd_dataset_Val'
| 44.116667
| 113
| 0.647526
| 679
| 5,294
| 4.886598
| 0.178203
| 0.052743
| 0.054852
| 0.082278
| 0.773357
| 0.773357
| 0.773357
| 0.755877
| 0.710066
| 0.682942
| 0
| 0.015025
| 0.207971
| 5,294
| 119
| 114
| 44.487395
| 0.776294
| 0.090102
| 0
| 0.658824
| 0
| 0
| 0.043687
| 0
| 0
| 0
| 0
| 0
| 0.047059
| 1
| 0.105882
| false
| 0
| 0.047059
| 0.047059
| 0.258824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9a735bf957ffc30fea6d0bb1fe8f079ce7582eb6
| 23,569
|
py
|
Python
|
extern/face_expression/face_expression/dataset.py
|
wangxihao/rgbd-kinect-pose
|
03180723c99759ba2500bcd42b5fe7a1d26eb507
|
[
"MIT"
] | 1
|
2022-02-07T06:12:26.000Z
|
2022-02-07T06:12:26.000Z
|
extern/face_expression/face_expression/dataset.py
|
wangxihao/rgbd-kinect-pose
|
03180723c99759ba2500bcd42b5fe7a1d26eb507
|
[
"MIT"
] | null | null | null |
extern/face_expression/face_expression/dataset.py
|
wangxihao/rgbd-kinect-pose
|
03180723c99759ba2500bcd42b5fe7a1d26eb507
|
[
"MIT"
] | null | null | null |
import os
import sys
import json
import pickle
import h5py
from tqdm import tqdm
import numpy as np
import torch
import cv2
import scipy.spatial
import hydra
from face_expression import utils
from face_expression.third_party.face_mesh_mediapipe import FaceMeshMediaPipe
# class VoxCeleb2FaceDataset(torch.utils.data.Dataset):
# def __init__(
# self,
# h5_path,
# scheme_path,
# image_root,
# return_images=True,
# bbox_scale=2.0,
# image_shape=(256, 256),
# sample_range=None
# ):
# self.h5_path = h5_path
# self.scheme_path = scheme_path
# self.image_root = image_root
# self.return_images = return_images
# self.bbox_scale = bbox_scale
# self.image_shape = image_shape
# self.sample_range = sample_range
# # load scheme
# with open(scheme_path, 'rb') as f:
# self.scheme = pickle.load(f)
# if sample_range is not None:
# self.scheme = [self.scheme[i] for i in range(sample_range[0], sample_range[1], sample_range[2])]
# def open_h5_file(self):
# self.h5f = h5py.File(self.h5_path, mode='r')
# def load_image(self, identity_id, video_id, utterance_id, seq_index):
# image_dir = os.path.join(self.image_root, identity_id, video_id, utterance_id)
# names = sorted(os.listdir(image_dir))
# if seq_index < len(names):
# name = names[seq_index]
# path = os.path.join(image_dir, name)
# image = cv2.imread(path)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# else:
# # black image mock
# name = names[0]
# path = os.path.join(image_dir, name)
# image = cv2.imread(path)
# image = np.zeros(image.shape, dtype=np.uint8)
# return image
# def get_camera_matrix(self, h, w):
# fx, fy = 3000.0, 3000.0
# cx, cy = w/2, h/2
# camera_martix = np.array([
# [fx, 0.0, cx],
# [0.0, fy, cy],
# [0.0, 0.0, 1.0]
# ])
# return camera_martix
# def get_transformation_matrix(self):
# transformation_matrix = np.eye(3, 4)
# return transformation_matrix
# def get_bbox(self, keypoints_2d):
# left, top, right, down = (
# keypoints_2d[:, 0].min(),
# keypoints_2d[:, 1].min(),
# keypoints_2d[:, 0].max(),
# keypoints_2d[:, 1].max()
# )
# # convex_hull = scipy.spatial.ConvexHull(points)
# # center_x, center_y = (np.mean(convex_hull.points[convex_hull.vertices, axis]) for axis in (0, 1))
# center_x, center_y = (left + right) / 2, (top + down) / 2
# w, h = right - left, down - top
# bbox = (
# center_x - w/2,
# center_y - h/2,
# center_x + w/2,
# center_y + h/2
# )
# bbox = utils.common.utils.common.get_square_bbox(bbox)
# bbox = utils.common.utils.common.scale_bbox(bbox, self.bbox_scale)
# return bbox
# def normalize_keypoints_2d(self, keypoints_2d):
# convex_hull = scipy.spatial.ConvexHull(keypoints_2d)
# center = np.mean(convex_hull.points[convex_hull.vertices], axis=0)
# keypoints_2d = (keypoints_2d - center) / np.sqrt(convex_hull.area)
# return keypoints_2d
# def load_sample(self, identity_id, video_id, utterance_id, seq_index):
# sample = dict()
# # load h5_data
# try:
# h5_data = self.h5f[identity_id][video_id][utterance_id]
# except Exception as e:
# print(identity_id, video_id, utterance_id, seq_index)
# print(e)
# sample['expression'] = h5_data['expressions'][seq_index]
# sample['pose'] = h5_data['poses'][seq_index]
# sample['beta'] = h5_data['betas'][:]
# sample['keypoints_2d'] = h5_data['face_keypoints_2d'][seq_index]
# # load image
# if self.return_images:
# image = self.load_image(identity_id, video_id, utterance_id, seq_index)
# orig_h, orig_w = image.shape[:2]
# # crop
# bbox = self.get_bbox(sample['keypoints_2d'])
# image = utils.common.utils.common.crop_image(image, bbox)
# # resize
# image = utils.common.utils.common.resize_image(image, self.image_shape)
# image = image / 255.0
# image = image.transpose(2, 0, 1)
# sample['image'] = image
# # load projection matrix
# h, w = image.shape[1:3]
# bbox_h, bbox_w = bbox[3] - bbox[1], bbox[2] - bbox[0]
# if 'camera_matrix' in h5_data:
# print('hey')
# camera_matrix = h5_data['camera_matrix'][:]
# else:
# camera_matrix = self.get_camera_matrix(orig_h, orig_w)
# camera_matrix = utils.common.utils.common.update_after_crop_and_resize(
# camera_matrix, bbox, (w/bbox_w, h/bbox_h)
# )
# # update keypoints 2d ufter crop and resize
# sample['keypoints_2d'][:, 0] -= bbox[0]
# sample['keypoints_2d'][:, 1] -= bbox[1]
# sample['keypoints_2d'][:, 0] *= w/bbox_w
# sample['keypoints_2d'][:, 1] *= h/bbox_h
# else:
# image = np.zeros((*self.image_shape, 3), dtype=np.uint8)
# image = image / 255.0
# image = image.transpose(2, 0, 1)
# h, w = image.shape[1:3]
# sample['image'] = image
# if 'camera_matrix' in h5_data:
# camera_matrix = h5_data['camera_matrix'][:]
# else:
# camera_matrix = self.get_camera_matrix(*self.image_shape)
# transformation_matrix = self.get_transformation_matrix()
# projection_matrix = camera_matrix @ transformation_matrix
# sample['camera_matrix'] = camera_matrix
# sample['projection_matrix'] = projection_matrix
# sample['h'] = h
# sample['w'] = w
# # normalize keypoints 2d
# sample['keypoints_2d'] = self.normalize_keypoints_2d(sample['keypoints_2d'])
# return sample
# def __len__(self):
# return len(self.scheme)
# def __getitem__(self, index):
# # this should be normally done in __init__, but due to DataLoader behaviour
# # when num_workers > 1, the h5 file is opened during first data access:
# # https://github.com/pytorch/pytorch/issues/11929#issuecomment-649760983
# if not hasattr(self, 'h5f'):
# self.open_h5_file()
# sample_key = self.scheme[index]
# sample = self.load_sample(*sample_key)
# return sample
# @staticmethod
# def build_scheme(h5f):
# scheme = []
# for identity_id in tqdm(h5f):
# for video_id in h5f[identity_id]:
# for utterance_id in h5f[identity_id][video_id]:
# seq_length = h5f[identity_id][video_id][utterance_id]['expressions'].shape[0]
# for seq_index in range(seq_length):
# scheme.append((identity_id, video_id, utterance_id, seq_index))
# scheme = sorted(scheme)
# return scheme
# @staticmethod
# def preprocess_dataset(face_root, image_root, openpose_root, h5_path):
# # load scheme
# scheme = []
# identity_id_list = sorted(os.listdir(face_root))
# for identity_id in tqdm(identity_id_list):
# identity_dir = os.path.join(face_root, identity_id)
# video_id_list = sorted(os.listdir(identity_dir))
# for video_id in video_id_list:
# video_dir = os.path.join(identity_dir, video_id)
# utterance_id_list = sorted(os.listdir(video_dir))
# for utterance_id in utterance_id_list:
# utterance_dir = os.path.join(video_dir, utterance_id)
# scheme.append((identity_id, video_id, utterance_id))
# scheme = sorted(scheme)
# # build h5 file
# with h5py.File(h5_path, 'w') as hf:
# for (identity_id, video_id, utterance_id) in tqdm(scheme):
# # load face
# face_dir = os.path.join(face_root, identity_id, video_id, utterance_id, 'joints_op_face')
# expressions = np.load(os.path.join(face_dir, 'expressions.npy')) * 100
# poses = np.load(os.path.join(face_dir, 'poses.npy'))
# betas = np.load(os.path.join(face_dir, 'betas.npy'))
# # load openpose keypoints 2d
# openpose_dir = os.path.join(openpose_root, identity_id, video_id, utterance_id)
# face_keypoints_2d_list = []
# names = sorted(os.listdir(openpose_dir))
# for name in names:
# path = os.path.join(openpose_dir, name)
# with open(path) as f:
# openpose_data = json.load(f)
# face_keypoints_2d = openpose_data['people'][0]['face_keypoints_2d']
# face_keypoints_2d = np.array(face_keypoints_2d).reshape(70, 3)
# face_keypoints_2d = face_keypoints_2d[:, :2] # remove confidences
# face_keypoints_2d_list.append(face_keypoints_2d)
# face_keypoints_2d_arr = np.array(face_keypoints_2d_list)
# # save to h5
# group = hf.create_group(f"{identity_id}/{video_id}/{utterance_id}")
# group['expressions'] = expressions
# group['poses'] = poses
# group['betas'] = betas
# group['face_keypoints_2d'] = face_keypoints_2d_arr
class VoxCeleb2MediapipeDataset(torch.utils.data.Dataset):
def __init__(
self, *,
h5_path='', scheme_path='',
image_root='',
return_keypoints_3d=False,
return_images=True, bbox_scale=2.0, image_shape=(256, 256),
sample_range=None
):
assert return_images
self.h5_path = h5_path
self.scheme_path = scheme_path
self.return_keypoints_3d = return_keypoints_3d
self.image_root = image_root
self.return_images = return_images
self.bbox_scale = bbox_scale
self.image_shape = image_shape
self.sample_range = sample_range
# load facemesh model
models_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "third_party", "face_mesh_mediapipe", "models")
anchors_path = os.path.join(models_dir, "face_anchors.csv")
detection_model_path = os.path.join(models_dir, "face_detection_front.tflite")
landmark_model_path = os.path.join(models_dir, "face_landmark.tflite")
self.face_mesh_model = FaceMeshMediaPipe(anchors_path, detection_model_path, landmark_model_path, bbox_scale=1.5)
# load scheme
with open(scheme_path, 'rb') as f:
self.scheme = pickle.load(f)
if sample_range is not None:
start = max(0, sample_range[0])
end = min(len(self.scheme), sample_range[1])
step = sample_range[2]
self.scheme = [self.scheme[i] for i in range(start, end, step)]
def open_h5_file(self):
self.h5f = h5py.File(self.h5_path, mode='r')
def load_image(self, identity_id, video_id, utterance_id, seq_index):
image_dir = os.path.join(self.image_root, identity_id, video_id, utterance_id)
if not os.path.exists(image_dir):
image_dir = os.path.join(self.image_root, identity_id, video_id, 'color_undistorted')
names = sorted(os.listdir(image_dir))
if seq_index < len(names):
name = names[seq_index]
path = os.path.join(image_dir, name)
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
else:
# black image mock
name = names[0]
path = os.path.join(image_dir, name)
image = cv2.imread(path)
image = np.zeros(image.shape, dtype=np.uint8)
return image
def get_camera_matrix(self, h, w):
fx, fy = 3000.0, 3000.0
cx, cy = w/2, h/2
camera_martix = np.array([
[fx, 0.0, cx],
[0.0, fy, cy],
[0.0, 0.0, 1.0]
])
return camera_martix
def get_transformation_matrix(self):
transformation_matrix = np.eye(3, 4)
return transformation_matrix
def get_bbox(self, keypoints_2d):
left, top, right, down = (
keypoints_2d[:, 0].min(),
keypoints_2d[:, 1].min(),
keypoints_2d[:, 0].max(),
keypoints_2d[:, 1].max()
)
center_x, center_y = (left + right) / 2, (top + down) / 2
w, h = right - left, down - top
bbox = (
center_x - w/2,
center_y - h/2,
center_x + w/2,
center_y + h/2
)
if np.sum(bbox) == 0.0 or np.sum(np.isnan(bbox)) > 0:
return np.array([0.0, 0.0, 100.0, 100.0])
bbox = utils.common.get_square_bbox(bbox)
bbox = utils.common.scale_bbox(bbox, self.bbox_scale)
return bbox
# def normalize_keypoints_2d(self, keypoints_2d, image_shape):
# convex_hull = scipy.spatial.ConvexHull(keypoints_2d[:, :2])
# center = np.mean(convex_hull.points[convex_hull.vertices], axis=0)
# keypoints_2d[:, :2] = keypoints_2d[:, :2] - center
# if self.keypoints_2d_normalization == 'area':
# keypoints_2d[:, :2] = keypoints_2d[:, :2] / np.sqrt(convex_hull.area)
# elif self.keypoints_2d_normalization == 'image_shape':
# keypoints_2d[:, :2] = keypoints_2d[:, :2] / np.array([image_shape[1], image_shape[0]])
# elif self.keypoints_2d_normalization == 'no':
# pass
# else:
# raise NotImplementedError("Unknown keypoints_2d_normalization mode: {self.keypoints_2d_normalization}")
# # norm depth
# if keypoints_2d.shape[1] == 3: # 3d keypoints
# keypoints_2d[:, 2] /= 100.0
# return keypoints_2d
def load_sample(self, identity_id, video_id, utterance_id, seq_index):
sample = dict()
sample['key'] = (identity_id, video_id, utterance_id, seq_index)
# load h5_data
try:
h5_data = self.h5f[identity_id][video_id][utterance_id]
except Exception as e:
print(identity_id, video_id, utterance_id, seq_index)
print(e)
sample['expression'] = h5_data['expressions'][seq_index]
sample['pose'] = h5_data['poses'][seq_index] # 90 = [63 pose + 3 jaw + 6 eye + 12 hand + 3 trans + 3 root_orient]
sample['beta'] = h5_data['betas'][:]
sample['keypoints_2d_op'] = h5_data['face_keypoints_2d'][seq_index].astype(np.float32)
# load image
if self.return_images:
image = self.load_image(identity_id, video_id, utterance_id, seq_index)
orig_h, orig_w = image.shape[:2]
# get keypoints_2d
op_bbox = self.get_bbox(sample['keypoints_2d_op'])
image_op_cropped = utils.common.crop_image(image, op_bbox)
keypoints_3d, keypoints_3d_normed = self.face_mesh_model(image_op_cropped)
if keypoints_3d_normed is None:
keypoints_3d_normed = np.zeros((468, 3))
keypoints_3d = np.zeros((468, 3))
bbox = op_bbox
else:
keypoints_3d[:, :2] += np.array(op_bbox[:2])
bbox = self.get_bbox(keypoints_3d[:, :2])
if self.return_keypoints_3d:
sample['keypoints'] = keypoints_3d_normed.astype(np.float32)
sample['keypoints_orig'] = keypoints_3d.astype(np.float32)
else:
sample['keypoints'] = keypoints_3d_normed[:, :2].astype(np.float32)
sample['keypoints_orig'] = keypoints_3d[:, :2].astype(np.float32)
# crop
image = utils.common.crop_image(image, bbox)
# resize
image = utils.common.resize_image(image, self.image_shape)
image = image / 255.0
image = image.transpose(2, 0, 1)
sample['image'] = image
# load projection matrix
h, w = image.shape[1:3]
bbox_h, bbox_w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if 'camera_matrix' in h5_data:
camera_matrix = h5_data['camera_matrix'][:]
else:
camera_matrix = self.get_camera_matrix(orig_h, orig_w)
camera_matrix = utils.common.update_after_crop_and_resize(
camera_matrix, bbox, (w/bbox_w, h/bbox_h)
)
transformation_matrix = self.get_transformation_matrix()
projection_matrix = camera_matrix @ transformation_matrix
sample['camera_matrix'] = camera_matrix
sample['projection_matrix'] = projection_matrix
sample['h'] = h
sample['w'] = w
# update keypoints 2d after crop and resize
sample['keypoints_orig'][:, 0] -= bbox[0]
sample['keypoints_orig'][:, 1] -= bbox[1]
sample['keypoints_orig'][:, 0] *= w/bbox_w
sample['keypoints_orig'][:, 1] *= h/bbox_h
# # normalize keypoints 2d
# sample['keypoints_2d_orig'] = sample['keypoints_2d'].copy()
# if not np.all(sample['keypoints_2d'] == 0.0):
# try:
# sample['keypoints_2d'] = self.normalize_keypoints_2d(sample['keypoints_2d'], (h, w)).astype(np.float32)
# except Exception as e:
# sample['keypoints_2d'] = np.zeros_like(sample['keypoints_2d']).astype(np.float32)
return sample
def __len__(self):
return len(self.scheme)
def __getitem__(self, index):
# this should be normally done in __init__, but due to DataLoader behaviour
# when num_workers > 1, the h5 file is opened during first data access:
# https://github.com/pytorch/pytorch/issues/11929#issuecomment-649760983
if not hasattr(self, 'h5f'):
self.open_h5_file()
sample_key = self.scheme[index]
sample = self.load_sample(*sample_key)
return sample
@staticmethod
def build_scheme(h5f):
scheme = []
for identity_id in tqdm(h5f):
for video_id in h5f[identity_id]:
for utterance_id in h5f[identity_id][video_id]:
seq_length = h5f[identity_id][video_id][utterance_id]['expressions'].shape[0]
for seq_index in range(seq_length):
scheme.append((identity_id, video_id, utterance_id, seq_index))
scheme = sorted(scheme)
return scheme
@staticmethod
def preprocess_dataset(face_root, image_root, openpose_root, h5_path):
# load scheme
scheme = []
identity_id_list = sorted(os.listdir(face_root))
for identity_id in tqdm(identity_id_list):
identity_dir = os.path.join(face_root, identity_id)
video_id_list = sorted(os.listdir(identity_dir))
for video_id in video_id_list:
video_dir = os.path.join(identity_dir, video_id)
utterance_id_list = sorted(os.listdir(video_dir))
for utterance_id in utterance_id_list:
utterance_dir = os.path.join(video_dir, utterance_id)
scheme.append((identity_id, video_id, utterance_id))
scheme = sorted(scheme)
# build h5 file
with h5py.File(h5_path, 'w') as hf:
for (identity_id, video_id, utterance_id) in tqdm(scheme):
# load face
face_dir = os.path.join(face_root, identity_id, video_id, utterance_id, 'joints_op_face')
expressions = np.load(os.path.join(face_dir, 'expressions.npy')) * 100
poses = np.load(os.path.join(face_dir, 'poses.npy'))
betas = np.load(os.path.join(face_dir, 'betas.npy'))
# load openpose keypoints 2d
openpose_dir = os.path.join(openpose_root, identity_id, video_id, utterance_id)
face_keypoints_2d_list = []
names = sorted(os.listdir(openpose_dir))
for name in names:
path = os.path.join(openpose_dir, name)
with open(path) as f:
openpose_data = json.load(f)
face_keypoints_2d = openpose_data['people'][0]['face_keypoints_2d']
face_keypoints_2d = np.array(face_keypoints_2d).reshape(70, 3)
face_keypoints_2d = face_keypoints_2d[:, :2] # remove confidences
face_keypoints_2d_list.append(face_keypoints_2d)
face_keypoints_2d_arr = np.array(face_keypoints_2d_list)
# save to h5
group = hf.create_group(f"{identity_id}/{video_id}/{utterance_id}")
group['expressions'] = expressions
group['poses'] = poses
group['betas'] = betas
group['face_keypoints_2d'] = face_keypoints_2d_arr
@hydra.main(config_path='config/default.yaml')
def main(config):
print(config.pretty())
# preprocess
print(f"Preprocess split {split}")
VoxCeleb2FaceDataset.preprocess_dataset(
config.data.face_root, config.data.image_root, config.data.openpose_root, config.data.h5_path
)
# save scheme
print("Build scheme")
h5f = h5py.File(config.data.h5_path, mode='r', libver='latest')
scheme = VoxCeleb2FaceDataset.build_scheme(h5f)
with open(config.data.scheme_path, 'wb') as f:
pickle.dump(scheme, f)
# filter scheme
print("Filter scheme")
dataset = VoxCeleb2FaceDataset(
config.data.h5_path, config.data.scheme_path,
config.data.image_root,
return_images=config.data.return_images, bbox_scale=config.data.bbox_scale, image_shape=config.data.image_shape
)
invalid_indices = []
for i in tqdm(range(len(dataset))):
try:
sample = dataset[i]
except Exception as e:
invalid_indices.append(i)
print(f"Index {i} is invalid. Reason: {e}")
invalid_indices = set(invalid_indices)
print(f"Found {len(invalid_indices)} invalid samples")
scheme_filtered = [sample_key for i, sample_key in enumerate(dataset.scheme) if i not in invalid_indices]
with open(config.data.scheme_path, 'wb') as f:
pickle.dump(scheme_filtered, f)
print("Success!")
if __name__ == '__main__':
main()
| 37.058176
| 125
| 0.566252
| 2,845
| 23,569
| 4.429877
| 0.093849
| 0.075934
| 0.038086
| 0.043164
| 0.794493
| 0.758946
| 0.74879
| 0.726018
| 0.704912
| 0.690391
| 0
| 0.028516
| 0.321524
| 23,569
| 635
| 126
| 37.116535
| 0.759615
| 0.465909
| 0
| 0.092437
| 0
| 0
| 0.057405
| 0.007145
| 0
| 0
| 0
| 0
| 0.004202
| 1
| 0.05042
| false
| 0
| 0.054622
| 0.004202
| 0.147059
| 0.037815
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9a760367155f89800e9ffffd081d1132a56544e5
| 194
|
py
|
Python
|
scripts/item/consume_2432803.py
|
Snewmy/swordie
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
[
"MIT"
] | null | null | null |
scripts/item/consume_2432803.py
|
Snewmy/swordie
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
[
"MIT"
] | null | null | null |
scripts/item/consume_2432803.py
|
Snewmy/swordie
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
[
"MIT"
] | null | null | null |
# Princess No Damage Skin (30-Days)
success = sm.addDamageSkin(2432803)
if success:
sm.chat("The Princess No Damage Skin (30-Days) has been added to your account's damage skin collection.")
| 38.8
| 109
| 0.747423
| 31
| 194
| 4.677419
| 0.677419
| 0.206897
| 0.22069
| 0.275862
| 0.358621
| 0.358621
| 0
| 0
| 0
| 0
| 0
| 0.067073
| 0.154639
| 194
| 4
| 110
| 48.5
| 0.817073
| 0.170103
| 0
| 0
| 0
| 0.333333
| 0.591195
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9a801f3178565c7f1b1008bb487a050d3079d8d5
| 448
|
py
|
Python
|
rush_hour/test_solution.py
|
ssebastianj/taip-2014
|
2a0e62c4bf755ff752136350c246456d65a8c3eb
|
[
"MIT"
] | null | null | null |
rush_hour/test_solution.py
|
ssebastianj/taip-2014
|
2a0e62c4bf755ff752136350c246456d65a8c3eb
|
[
"MIT"
] | null | null | null |
rush_hour/test_solution.py
|
ssebastianj/taip-2014
|
2a0e62c4bf755ff752136350c246456d65a8c3eb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
import pytest
from .solution import calc_minimum_travels
class CalcMininumTravelsTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_calc_minimum_travels(self):
assert calc_minimum_travels([4, 5, 2, 3, 1]) == 3
assert calc_minimum_travels([1, 2, 3]) == 1
assert calc_minimum_travels([9, 4, 2, 7, 8, 3, 5, 6, 1]) == 4
| 23.578947
| 69
| 0.638393
| 63
| 448
| 4.365079
| 0.460317
| 0.2
| 0.327273
| 0.261818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061584
| 0.238839
| 448
| 18
| 70
| 24.888889
| 0.744868
| 0.046875
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| false
| 0.166667
| 0.25
| 0
| 0.583333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
9a90d892378e62b46598d590087d4afcc5ce7a6c
| 269
|
py
|
Python
|
NeoAnalysis_Py2.7/NeoAnalysis/__init__.py
|
Research-lab-KUMS/NeoAnalysis
|
32b508dfade3069b1ec5cc7664574b8d3f2d5f57
|
[
"MIT"
] | 23
|
2017-09-04T13:20:38.000Z
|
2022-03-08T08:15:17.000Z
|
NeoAnalysis_Py2.7/NeoAnalysis/__init__.py
|
Research-lab-KUMS/NeoAnalysis
|
32b508dfade3069b1ec5cc7664574b8d3f2d5f57
|
[
"MIT"
] | 4
|
2018-01-05T13:44:29.000Z
|
2021-09-30T17:08:15.000Z
|
NeoAnalysis_Py2.7/NeoAnalysis/__init__.py
|
neoanalysis/NeoAnalysis
|
c5f25b71e16997f3a05f70b1eead11f99a3b7e2b
|
[
"MIT"
] | 5
|
2017-11-26T19:40:46.000Z
|
2021-03-11T17:25:23.000Z
|
__version__ = '0.10.0'
from NeoAnalysis.spikedetection import SpikeDetection
from NeoAnalysis.spikesorting import SpikeSorting
from NeoAnalysis.analogfilter import AnalogFilter
from NeoAnalysis.graphics import Graphics
from NeoAnalysis.popuanalysis import PopuAnalysis
| 38.428571
| 53
| 0.877323
| 29
| 269
| 8
| 0.37931
| 0.323276
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01626
| 0.085502
| 269
| 6
| 54
| 44.833333
| 0.926829
| 0
| 0
| 0
| 0
| 0
| 0.022305
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.833333
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9ac5612f4d7fef57c2d92d9c354db5aaef44d59e
| 1,020
|
py
|
Python
|
Modo/Kits/OD_ModoCopyPasteExternal/lxserv/cmd_copyToExternal.py
|
heimlich1024/OD_CopyPasteExternal
|
943b993198e16d19f1fb4ba44049e498abf1e993
|
[
"Apache-2.0"
] | 278
|
2017-04-27T18:44:06.000Z
|
2022-03-31T02:49:42.000Z
|
Modo/Kits/OD_ModoCopyPasteExternal/lxserv/cmd_copyToExternal.py
|
heimlich1024/OD_CopyPasteExternal
|
943b993198e16d19f1fb4ba44049e498abf1e993
|
[
"Apache-2.0"
] | 57
|
2017-05-01T11:58:41.000Z
|
2022-02-06T18:43:13.000Z
|
Modo/Kits/OD_ModoCopyPasteExternal/lxserv/cmd_copyToExternal.py
|
heimlich1024/OD_CopyPasteExternal
|
943b993198e16d19f1fb4ba44049e498abf1e993
|
[
"Apache-2.0"
] | 49
|
2017-04-28T19:24:14.000Z
|
2022-03-12T15:17:13.000Z
|
################################################################################
#
# cmd_copyToExternal.py
#
# Author: Oliver Hotz | Chris Sprance
#
# Description: Copies Geo/Weights/Morphs/UV's to External File
#
# Last Update:
#
################################################################################
import lx
import lxifc
import lxu.command
from od_copy_paste_external import copy_to_external
class ODCopyToExternal(lxu.command.BasicCommand):
def __init__(self):
lxu.command.BasicCommand.__init__(self)
def cmd_Flags(self):
return lx.symbol.fCMD_MODEL | lx.symbol.fCMD_UNDO
def basic_Enable(self, msg):
return True
def cmd_Interact(self):
pass
def basic_Execute(self, msg, flags):
# TODO: Disable reload for release
reload(copy_to_external)
copy_to_external.execute()
def cmd_Query(self, index, vaQuery):
lx.notimpl()
lx.bless(ODCopyToExternal, "OD_CopyToExternal")
| 23.72093
| 81
| 0.560784
| 106
| 1,020
| 5.150943
| 0.54717
| 0.07326
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210784
| 1,020
| 42
| 82
| 24.285714
| 0.678261
| 0.160784
| 0
| 0
| 0
| 0
| 0.026439
| 0
| 0
| 0
| 0
| 0.02381
| 0
| 1
| 0.315789
| false
| 0.052632
| 0.210526
| 0.105263
| 0.684211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
9ac8a3896499bd8c6da3c5ab7c320fbd74dda4ff
| 111
|
py
|
Python
|
aiophotoprism/__init__.py
|
zhulik/aiophotoprism
|
91cc263ffbd85c7dc7ccef6d4cdafdfdaf2a4c85
|
[
"MIT"
] | 4
|
2021-08-09T05:02:23.000Z
|
2022-01-30T03:04:29.000Z
|
aiophotoprism/__init__.py
|
zhulik/aiophotoprism
|
91cc263ffbd85c7dc7ccef6d4cdafdfdaf2a4c85
|
[
"MIT"
] | null | null | null |
aiophotoprism/__init__.py
|
zhulik/aiophotoprism
|
91cc263ffbd85c7dc7ccef6d4cdafdfdaf2a4c85
|
[
"MIT"
] | null | null | null |
"""Asynchronous Python client for the Photoprism REST API."""
from .photoprism import API, Photoprism # noqa
| 27.75
| 61
| 0.756757
| 14
| 111
| 6
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153153
| 111
| 3
| 62
| 37
| 0.893617
| 0.54955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9ae9dc9146555c9b41506690dc497c2bf3438943
| 170
|
py
|
Python
|
commands/cmd_invite.py
|
cygnus-dev/python01
|
e0111ef7031f2c931d433d3dc6449c6740a7880e
|
[
"MIT"
] | null | null | null |
commands/cmd_invite.py
|
cygnus-dev/python01
|
e0111ef7031f2c931d433d3dc6449c6740a7880e
|
[
"MIT"
] | 4
|
2021-06-08T22:27:42.000Z
|
2022-03-12T00:51:07.000Z
|
commands/cmd_invite.py
|
cygnus-dev/python01
|
e0111ef7031f2c931d433d3dc6449c6740a7880e
|
[
"MIT"
] | null | null | null |
async def run(ctx):
await ctx.send(''' `bot invite link:`
<https://discord.com/api/oauth2/authorize?client_id=732933945057869867&permissions=538569921&scope=bot>''')
| 42.5
| 107
| 0.747059
| 23
| 170
| 5.478261
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178344
| 0.076471
| 170
| 4
| 107
| 42.5
| 0.624204
| 0
| 0
| 0
| 0
| 0.333333
| 0.723529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b1367245e5290f368fa75d027c1ba49c8fa30f4e
| 5,061
|
py
|
Python
|
src/compare_eval.py
|
gccrpm/cdmf
|
5fca1393acbedbbd6ebc65bf2c9336645cc3e0fc
|
[
"BSD-2-Clause"
] | 1
|
2020-04-16T05:06:39.000Z
|
2020-04-16T05:06:39.000Z
|
src/compare_eval.py
|
gccrpm/cdmf
|
5fca1393acbedbbd6ebc65bf2c9336645cc3e0fc
|
[
"BSD-2-Clause"
] | null | null | null |
src/compare_eval.py
|
gccrpm/cdmf
|
5fca1393acbedbbd6ebc65bf2c9336645cc3e0fc
|
[
"BSD-2-Clause"
] | 1
|
2020-04-16T05:06:52.000Z
|
2020-04-16T05:06:52.000Z
|
import os
import re
import hyperparams as hp
from data_load import DataLoad
from tqdm import tqdm
import numpy as np
import pandas as pd
import tensorflow as tf
def load_ckpt_paths(model_name='cdmf'):
# get ckpt
ckpt_path = '../model_ckpt/compare/{}/'.format(model_name)
fpaths = []
with open(ckpt_path+'checkpoint', 'r', encoding='utf-8') as f_ckpt :
for line in f_ckpt.readlines()[1:]:
fname = re.sub(r'\"', '', line.split(':')[-1]).strip()
fpath = os.path.join(ckpt_path, fname)
fpaths.append(fpath)
return fpaths
if __name__ == '__main__':
data = DataLoad(data_path=hp.DATA_PATH,
fnames=hp.FNAMES,
forced_seq_len=hp.FORCED_SEQ_LEN,
vocab_size=hp.VOCAB_SIZE,
paly_times=hp.PLAY_TIMES,
num_main_actors=hp.NUM_MAIN_ACTORS,
batch_size=hp.BATCH_SIZE,
num_epochs=hp.NUM_EPOCHS,
noise_rate=hp.NOISE_RATE)
# CDMF
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False)
sess = tf.Session(config=session_conf)
with sess.as_default():
for fpath in load_ckpt_paths('cdmf'):
saver = tf.train.import_meta_graph(fpath+'.meta')
saver.restore(sess, fpath)
# Get the placeholders from the graph by name
m_oids = graph.get_tensor_by_name('movie_order_ids:0')
info = graph.get_tensor_by_name('info:0')
actors = graph.get_tensor_by_name('actors:0')
descriptions = graph.get_tensor_by_name('descriptions:0')
u_oids = graph.get_tensor_by_name('user_order_ids:0')
r_seq = graph.get_tensor_by_name('rating_sequence:0')
dropout_keep_prob = graph.get_tensor_by_name("dropout_keep_prob:0")
# Tensors we want to evaluate
mse_op = graph.get_tensor_by_name('mse/mse_op:0')
# load evalset
eval_iter = data.load_data('eval')
mse, count = 0.0, 0
for (sub_X_user, sub_X_movie), sub_Y in tqdm(eval_iter):
# unpack
sub_u_oids, sub_bu_seq = sub_X_user
sub_m_oids, sub_info, sub_actors, sub_des, sub_bm_seq = sub_X_movie
sub_r_seq = sub_Y
dev_feed_dict = {
m_oids: sub_m_oids,
info: sub_info,
actors: sub_actors,
descriptions: sub_des,
u_oids: sub_u_oids,
r_seq: sub_r_seq,
dropout_keep_prob: hp.DROPOUT_KEEP_PROB}
sub_mse = sess.run(mse_op, feed_dict=dev_feed_dict)
mse += sub_mse
count += 1
rmse = np.sqrt(mse / count)
print('cdmf | rmse:{}'.format(rmse))
# ConvMF
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False)
sess = tf.Session(config=session_conf)
with sess.as_default():
for fpath in load_ckpt_paths('convmf'):
saver = tf.train.import_meta_graph(fpath+'.meta')
saver.restore(sess, fpath)
# Get the placeholders from the graph by name
m_oids = graph.get_tensor_by_name('movie_order_ids:0')
descriptions = graph.get_tensor_by_name('descriptions:0')
u_oids = graph.get_tensor_by_name('user_order_ids:0')
r_seq = graph.get_tensor_by_name('rating_sequence:0')
dropout_keep_prob = graph.get_tensor_by_name("dropout_keep_prob:0")
# Tensors we want to evaluate
mse_op = graph.get_tensor_by_name('mse/mse_op:0')
# load evalset
eval_iter = data.load_data('eval')
mse, count = 0.0, 0
for (sub_X_user, sub_X_movie), sub_Y in tqdm(eval_iter):
# unpack
sub_u_oids, sub_bu_seq = sub_X_user
sub_m_oids, sub_info, sub_actors, sub_des, sub_bm_seq = sub_X_movie
sub_r_seq = sub_Y
dev_feed_dict = {
m_oids: sub_m_oids,
descriptions: sub_des,
u_oids: sub_u_oids,
r_seq: sub_r_seq,
dropout_keep_prob: hp.DROPOUT_KEEP_PROB}
sub_mse = sess.run(mse_op, feed_dict=dev_feed_dict)
mse += sub_mse
count += 1
rmse = np.sqrt(mse / count)
print('convmf | rmse:{}'.format(rmse))
| 40.814516
| 87
| 0.538234
| 634
| 5,061
| 3.929022
| 0.20347
| 0.038539
| 0.078683
| 0.089924
| 0.719791
| 0.703733
| 0.703733
| 0.703733
| 0.703733
| 0.703733
| 0
| 0.007899
| 0.37463
| 5,061
| 124
| 88
| 40.814516
| 0.779147
| 0.040308
| 0
| 0.632653
| 0
| 0
| 0.065621
| 0.005159
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010204
| false
| 0
| 0.102041
| 0
| 0.122449
| 0.020408
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b17694133578e1b1a9c1c195cbd91ca5e72b6295
| 181
|
py
|
Python
|
test/conftest.py
|
PlaidCloud/sqlalchemy-greenplum
|
b40beeee8b775290b262d3b9989e8faeba8b2d20
|
[
"BSD-3-Clause"
] | 6
|
2019-05-10T18:31:05.000Z
|
2021-09-08T16:59:46.000Z
|
test/conftest.py
|
PlaidCloud/sqlalchemy-greenplum
|
b40beeee8b775290b262d3b9989e8faeba8b2d20
|
[
"BSD-3-Clause"
] | 2
|
2018-06-04T23:28:16.000Z
|
2022-03-08T14:20:14.000Z
|
test/conftest.py
|
PlaidCloud/sqlalchemy-greenplum
|
b40beeee8b775290b262d3b9989e8faeba8b2d20
|
[
"BSD-3-Clause"
] | 1
|
2019-06-13T10:12:44.000Z
|
2019-06-13T10:12:44.000Z
|
from sqlalchemy.dialects import registry
registry.register("greenplum", "sqlalchemy_greenplum.dialect", "GreenplumDialect")
from sqlalchemy.testing.plugin.pytestplugin import *
| 22.625
| 82
| 0.823204
| 18
| 181
| 8.222222
| 0.666667
| 0.189189
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082873
| 181
| 7
| 83
| 25.857143
| 0.891566
| 0
| 0
| 0
| 0
| 0
| 0.296089
| 0.156425
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b19b3f7c4a68fc939bc0e963cc37d4583121c7aa
| 111
|
py
|
Python
|
Game22/modules/online/__init__.py
|
ttkaixin1998/pikachupythongames
|
609a3a5a2be3f5a187c332c7980bb5bb14548f02
|
[
"MIT"
] | 4,013
|
2018-06-16T08:00:02.000Z
|
2022-03-30T11:48:14.000Z
|
Game22/modules/online/__init__.py
|
pigbearcat/Games
|
b8c47ef1bcce9a9db3f3730c162e6e8e08b508a2
|
[
"MIT"
] | 22
|
2018-10-18T00:15:50.000Z
|
2022-01-13T08:16:15.000Z
|
Game22/modules/online/__init__.py
|
pigbearcat/Games
|
b8c47ef1bcce9a9db3f3730c162e6e8e08b508a2
|
[
"MIT"
] | 2,172
|
2018-07-20T04:03:14.000Z
|
2022-03-31T14:18:29.000Z
|
'''初始化'''
from .server import gobangSever
from .client import gobangClient
from .playOnline import playOnlineUI
| 27.75
| 36
| 0.810811
| 13
| 111
| 6.923077
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 111
| 4
| 36
| 27.75
| 0.909091
| 0.027027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b19eba8650f17954158c7ab292c05abfa2a4065c
| 44
|
py
|
Python
|
src/basics/files/delete_fichero.py
|
FoxNeo/MyPythonProjects
|
3499ef0853f0087f6f143e1633b0a88a3d7b9818
|
[
"MIT"
] | null | null | null |
src/basics/files/delete_fichero.py
|
FoxNeo/MyPythonProjects
|
3499ef0853f0087f6f143e1633b0a88a3d7b9818
|
[
"MIT"
] | null | null | null |
src/basics/files/delete_fichero.py
|
FoxNeo/MyPythonProjects
|
3499ef0853f0087f6f143e1633b0a88a3d7b9818
|
[
"MIT"
] | null | null | null |
import os
os.remove("fichero_generado.txt")
| 14.666667
| 33
| 0.795455
| 7
| 44
| 4.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068182
| 44
| 3
| 33
| 14.666667
| 0.829268
| 0
| 0
| 0
| 0
| 0
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
494e7be275c169f4f4b49f4a379016a1594a2a8b
| 135
|
py
|
Python
|
quicksilver.py
|
binaryflesh/quicksilver
|
0d65259f305beb05efe00f096e48c41b62bfdf57
|
[
"MIT"
] | 1
|
2018-12-01T07:52:13.000Z
|
2018-12-01T07:52:13.000Z
|
quicksilver.py
|
binaryflesh/quicksilver
|
0d65259f305beb05efe00f096e48c41b62bfdf57
|
[
"MIT"
] | 7
|
2018-12-02T23:31:38.000Z
|
2018-12-03T07:44:41.000Z
|
quicksilver.py
|
binaryflesh/quicksilver
|
0d65259f305beb05efe00f096e48c41b62bfdf57
|
[
"MIT"
] | null | null | null |
# Quicksilver.py - Agnostic project analyzer that generates resourceful diagrams. WIP
# Copyright (C) 2018 Logan Campos - @binaryflesh
| 45
| 85
| 0.792593
| 16
| 135
| 6.6875
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034483
| 0.140741
| 135
| 2
| 86
| 67.5
| 0.887931
| 0.962963
| 0
| null | 1
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4953e3a0846206727edbbb495ede380b618ab266
| 1,781
|
py
|
Python
|
PluginSDK/PythonRecon/Python/excel_helper.py
|
PengJinFa/YAPNew
|
fafee8031669b24d0cc74876a477c97d0d7ebadc
|
[
"MIT"
] | 20
|
2016-07-05T05:23:04.000Z
|
2021-11-07T14:25:59.000Z
|
PluginSDK/PythonRecon/Python/excel_helper.py
|
PengJinFa/YAPNew
|
fafee8031669b24d0cc74876a477c97d0d7ebadc
|
[
"MIT"
] | 20
|
2016-06-08T06:36:55.000Z
|
2018-04-25T09:52:18.000Z
|
PluginSDK/PythonRecon/Python/excel_helper.py
|
PengJinFa/YAPNew
|
fafee8031669b24d0cc74876a477c97d0d7ebadc
|
[
"MIT"
] | 21
|
2016-05-31T15:34:09.000Z
|
2021-11-07T14:26:03.000Z
|
from openpyxl import Workbook
from openpyxl.utils import get_column_letter
import numbers
wb = Workbook()
def XL_Location(row, column):
return get_column_letter(column) + str(row)
def Save_Column_Title(file_dir, features, row_index, column_start):
ws = wb.active
keys = [x[0] for x in features.items()]
try:
for index in range(len(keys)):
loc = XL_Location(index + row_index, column_start)
if ws[loc].value is None:
ws[loc] = keys[index]
else:
assert (ws[loc].value == keys[index])
wb.save(file_dir)
except:
return False
return True
# save to excel data
def Save_Column_Exel(file_dir, features, row_index, column_start):
ws = wb.active
vals = [x[1] for x in features.items()]
try:
for index in range(len(vals)):
loc = XL_Location(index + row_index, column_start)
if isinstance(vals[index], numbers.Number):
ws[XL_Location(index + row_index, column_start)] = vals[index]
else:
ws[XL_Location(index + row_index, column_start)] = str(vals[index])
wb.save(file_dir)
except:
return False
return True
def Save_Row_Exel(file_dir, features, row_index, column_start):
ws = wb.active
vals = [x[1] for x in features.items()]
try:
for index in range(len(vals)):
loc = XL_Location(index + row_index, column_start)
if isinstance(vals[index], numbers.Number):
ws[XL_Location(row_index, index + column_start)] = vals[index]
else:
ws[XL_Location(row_index, index + column_start)] = str(vals[index])
wb.save(file_dir)
except:
return False
return True
| 30.706897
| 83
| 0.60977
| 242
| 1,781
| 4.305785
| 0.214876
| 0.076775
| 0.153551
| 0.145873
| 0.74856
| 0.74856
| 0.74856
| 0.74856
| 0.71785
| 0.601727
| 0
| 0.002366
| 0.28804
| 1,781
| 58
| 84
| 30.706897
| 0.819401
| 0.010107
| 0
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020833
| 1
| 0.083333
| false
| 0
| 0.0625
| 0.020833
| 0.291667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
496b0cdd9c9c0a2581d8be6db775211985c0614c
| 278
|
py
|
Python
|
hubspot/discovery/crm/extensions/videoconferencing/discovery.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | 117
|
2020-04-06T08:22:53.000Z
|
2022-03-18T03:41:29.000Z
|
hubspot/discovery/crm/extensions/videoconferencing/discovery.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | 62
|
2020-04-06T16:21:06.000Z
|
2022-03-17T16:50:44.000Z
|
hubspot/discovery/crm/extensions/videoconferencing/discovery.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | 45
|
2020-04-06T16:13:52.000Z
|
2022-03-30T21:33:17.000Z
|
import hubspot.crm.extensions.videoconferencing as api_client
from ....discovery_base import DiscoveryBase
class Discovery(DiscoveryBase):
@property
def settings_api(self) -> api_client.SettingsApi:
return self._configure_api_client(api_client, "SettingsApi")
| 30.888889
| 68
| 0.78777
| 32
| 278
| 6.59375
| 0.625
| 0.170616
| 0.189573
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129496
| 278
| 8
| 69
| 34.75
| 0.871901
| 0
| 0
| 0
| 0
| 0
| 0.039568
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0.166667
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
497a5f4c2e39ef62c200675216c42fbc21c52436
| 34
|
py
|
Python
|
tests/snmp/test_base.py
|
zohassadar/netdisc
|
9ce4d5c2b0f30d36e71118ffbe6b7ffd93e0dfc8
|
[
"MIT"
] | null | null | null |
tests/snmp/test_base.py
|
zohassadar/netdisc
|
9ce4d5c2b0f30d36e71118ffbe6b7ffd93e0dfc8
|
[
"MIT"
] | null | null | null |
tests/snmp/test_base.py
|
zohassadar/netdisc
|
9ce4d5c2b0f30d36e71118ffbe6b7ffd93e0dfc8
|
[
"MIT"
] | null | null | null |
from netdisc.snmp import snmpbase
| 17
| 33
| 0.852941
| 5
| 34
| 5.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b8ce37a154e212778f695fcf9135c3e96507ff09
| 88
|
py
|
Python
|
app/admin/controllers/__init__.py
|
aries-zhang/flask-template
|
369d77f2910f653f46668dd9bda735954b6c145e
|
[
"MIT"
] | null | null | null |
app/admin/controllers/__init__.py
|
aries-zhang/flask-template
|
369d77f2910f653f46668dd9bda735954b6c145e
|
[
"MIT"
] | null | null | null |
app/admin/controllers/__init__.py
|
aries-zhang/flask-template
|
369d77f2910f653f46668dd9bda735954b6c145e
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
admin = Blueprint('admin', __name__, url_prefix='/manage')
| 22
| 58
| 0.761364
| 11
| 88
| 5.636364
| 0.818182
| 0.451613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 88
| 3
| 59
| 29.333333
| 0.794872
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
b8de8fb9e2f63a96dbca5bb30f4841f157b6ed7b
| 160
|
py
|
Python
|
items.py
|
yarnoiser/PyDungeon
|
c37ad314605065194732202539db50eef94ea3da
|
[
"BSD-3-Clause"
] | 1
|
2018-05-15T01:26:04.000Z
|
2018-05-15T01:26:04.000Z
|
items.py
|
yarnoiser/PyDungeon
|
c37ad314605065194732202539db50eef94ea3da
|
[
"BSD-3-Clause"
] | null | null | null |
items.py
|
yarnoiser/PyDungeon
|
c37ad314605065194732202539db50eef94ea3da
|
[
"BSD-3-Clause"
] | null | null | null |
from dice import *
class Item():
def __init__(self, weight):
self.weight = weight
class Weapon(item):
def __init__(self, weight, damage_die, reach)
| 14.545455
| 47
| 0.69375
| 22
| 160
| 4.636364
| 0.590909
| 0.294118
| 0.215686
| 0.294118
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19375
| 160
| 10
| 48
| 16
| 0.790698
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.166667
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b8f4752d0093b3381dd899cada064a8f50a481ea
| 16
|
py
|
Python
|
cdn/__init__.py
|
Kingjmk/mlfaati
|
12c0dcbe0389c2c1da0bde80509fb3374955e293
|
[
"MIT"
] | 1
|
2021-01-04T07:34:34.000Z
|
2021-01-04T07:34:34.000Z
|
cdn/__init__.py
|
Kingjmk/mlfaati
|
12c0dcbe0389c2c1da0bde80509fb3374955e293
|
[
"MIT"
] | null | null | null |
cdn/__init__.py
|
Kingjmk/mlfaati
|
12c0dcbe0389c2c1da0bde80509fb3374955e293
|
[
"MIT"
] | null | null | null |
"""
CDN App
"""
| 4
| 7
| 0.375
| 2
| 16
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 16
| 4
| 8
| 4
| 0.5
| 0.4375
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
774a3cbe3570598a07718acd612708e7b85dbeed
| 34,273
|
py
|
Python
|
src/cd.py
|
laura-rieger/deep-explanation-penalization
|
ac82aa4717b24e0ccf48ecbbf4c05d7e77a6d88f
|
[
"MIT"
] | 105
|
2019-10-01T19:00:35.000Z
|
2022-03-25T14:03:32.000Z
|
src/cd.py
|
laura-rieger/deep-explanation-penalization
|
ac82aa4717b24e0ccf48ecbbf4c05d7e77a6d88f
|
[
"MIT"
] | 11
|
2020-01-13T15:49:13.000Z
|
2021-12-28T11:36:21.000Z
|
src/cd.py
|
laura-rieger/deep-explanation-penalization
|
ac82aa4717b24e0ccf48ecbbf4c05d7e77a6d88f
|
[
"MIT"
] | 16
|
2019-12-22T20:53:33.000Z
|
2022-03-15T14:17:50.000Z
|
#original from https://github.com/csinva/hierarchical-dnn-interpretations/blob/master/acd/scores/cd.py
import torch
import torch.nn.functional as F
from copy import deepcopy
from torch import sigmoid
from torch import tanh
import numpy as np
stabilizing_constant = 10e-20
def propagate_three(a, b, c, activation):
a_contrib = 0.5 * (activation(a + c) - activation(c) + activation(a + b + c) - activation(b + c))
b_contrib = 0.5 * (activation(b + c) - activation(c) + activation(a + b + c) - activation(a + c))
return a_contrib, b_contrib, activation(c)
# propagate tanh nonlinearity
def propagate_tanh_two(a, b):
return 0.5 * (tanh(a) + (tanh(a + b) - tanh(b))), 0.5 * (tanh(b) + (tanh(a + b) - tanh(a)))
# propagate convolutional or linear layer
def propagate_conv_linear(relevant, irrelevant, module, device='cuda'):
bias = module(torch.zeros(irrelevant.size()).to(device))
rel = module(relevant) - bias
irrel = module(irrelevant) - bias
# elementwise proportional
prop_rel = torch.abs(rel)
prop_irrel = torch.abs(irrel)
prop_sum = prop_rel + prop_irrel +stabilizing_constant
prop_rel = torch.div(prop_rel, prop_sum)
prop_irrel = torch.div(prop_irrel, prop_sum)
return rel + torch.mul(prop_rel, bias), irrel + torch.mul(prop_irrel, bias)
def propagate_AdaptiveAvgPool2d(relevant, irrelevant, module, device='cuda'):
rel = module(relevant)
irrel = module(irrelevant)
return rel, irrel
# propagate ReLu nonlinearity
def propagate_relu(relevant, irrelevant, activation, device='cuda'):
swap_inplace = False
try: # handles inplace
if activation.inplace:
swap_inplace = True
activation.inplace = False
except:
pass
zeros = torch.zeros(relevant.size()).to(device)
rel_score = activation(relevant)
irrel_score = activation(relevant + irrelevant) - activation(relevant)
if swap_inplace:
activation.inplace = True
return rel_score, irrel_score
# propagate maxpooling operation
def propagate_pooling(relevant, irrelevant, pooler, model_type='mnist'):
if model_type == 'mnist':
unpool = torch.nn.MaxUnpool2d(kernel_size=2, stride=2)
avg_pooler = torch.nn.AvgPool2d(kernel_size=2, stride=2)
window_size = 4
elif model_type == 'vgg':
unpool = torch.nn.MaxUnpool2d(kernel_size=pooler.kernel_size, stride=pooler.stride)
avg_pooler = torch.nn.AvgPool2d(kernel_size=(pooler.kernel_size, pooler.kernel_size),
stride=(pooler.stride, pooler.stride), count_include_pad=False)
window_size = 4
# get both indices
p = deepcopy(pooler)
p.return_indices = True
both, both_ind = p(relevant + irrelevant)
ones_out = torch.ones_like(both)
size1 = relevant.size()
mask_both = unpool(ones_out, both_ind, output_size=size1)
# relevant
rel = mask_both * relevant
rel = avg_pooler(rel) * window_size
# irrelevant
irrel = mask_both * irrelevant
irrel = avg_pooler(irrel) * window_size
return rel, irrel
# propagate dropout operation
def propagate_dropout(relevant, irrelevant, dropout):
return dropout(relevant), dropout(irrelevant)
# get contextual decomposition scores for blob
def cd(blob, im_torch, model, model_type='mnist', device='cuda'):
# set up model
model.eval()
im_torch = im_torch.to(device)
# set up blobs
blob = torch.FloatTensor(blob).to(device)
relevant = blob * im_torch
irrelevant = (1 - blob) * im_torch
if model_type == 'mnist':
scores = []
mods = list(model.modules())[1:]
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[0])
relevant, irrelevant = propagate_pooling(relevant, irrelevant,
lambda x: F.max_pool2d(x, 2, return_indices=True), model_type='mnist')
relevant, irrelevant = propagate_relu(relevant, irrelevant, F.relu)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[1])
relevant, irrelevant = propagate_pooling(relevant, irrelevant,
lambda x: F.max_pool2d(x, 2, return_indices=True), model_type='mnist')
relevant, irrelevant = propagate_relu(relevant, irrelevant, F.relu)
relevant = relevant.view(-1, 800)
irrelevant = irrelevant.view(-1, 800)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[2])
relevant, irrelevant = propagate_relu(relevant, irrelevant, F.relu)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[3])
else:
mods = list(model.modules())
for i, mod in enumerate(mods):
t = str(type(mod))
if 'Conv2d' in t or 'Linear' in t:
if 'Linear' in t:
relevant = relevant.view(relevant.size(0), -1)
irrelevant = irrelevant.view(irrelevant.size(0), -1)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mod)
elif 'ReLU' in t:
relevant, irrelevant = propagate_relu(relevant, irrelevant, mod)
elif 'MaxPool2d' in t:
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mod, model_type=model_type)
elif 'Dropout' in t:
relevant, irrelevant = propagate_dropout(relevant, irrelevant, mod)
return relevant, irrelevant
# batch of [start, stop) with unigrams working
def cd_batch_text(batch, model, start, stop, my_device = 0):
# rework for
weights = model.lstm
# Index one = word vector (i) or hidden state (h), index two = gate
W_ii, W_if, W_ig, W_io = torch.chunk(weights.weight_ih_l0, 4, 0)
W_hi, W_hf, W_hg, W_ho = torch.chunk(weights.weight_hh_l0, 4, 0)
b_i, b_f, b_g, b_o = torch.chunk(weights.bias_ih_l0 + weights.bias_hh_l0, 4)
word_vecs = torch.transpose(model.embed(batch.text).data, 1,2) #change: we take all
T = word_vecs.shape[0]
batch_size = word_vecs.shape[2]
relevant_h = torch.zeros(( model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
irrelevant_h = torch.zeros((model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
prev_rel = torch.zeros(( model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
prev_irrel = torch.zeros(( model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
for i in range(T):
prev_rel_h = relevant_h
prev_irrel_h = irrelevant_h
rel_i = torch.matmul(W_hi, prev_rel_h)
rel_g = torch.matmul(W_hg, prev_rel_h)
rel_f = torch.matmul(W_hf, prev_rel_h)
rel_o = torch.matmul(W_ho, prev_rel_h)
irrel_i = torch.matmul(W_hi, prev_irrel_h)
irrel_g = torch.matmul(W_hg, prev_irrel_h)
irrel_f = torch.matmul(W_hf, prev_irrel_h)
irrel_o = torch.matmul(W_ho, prev_irrel_h)
if i >= start and i <= stop:
rel_i = rel_i +torch.matmul(W_ii, word_vecs[i])
rel_g = rel_g +torch.matmul(W_ig, word_vecs[i])
rel_f = rel_f +torch.matmul(W_if, word_vecs[i])
rel_o = rel_o +torch.matmul(W_io, word_vecs[i])
else:
irrel_i = irrel_i +torch.matmul(W_ii, word_vecs[i])
irrel_g = irrel_g +torch.matmul(W_ig, word_vecs[i])
irrel_f = irrel_f +torch.matmul(W_if, word_vecs[i])
irrel_o = irrel_o +torch.matmul(W_io, word_vecs[i])
rel_contrib_i, irrel_contrib_i, bias_contrib_i = propagate_three(rel_i, irrel_i, b_i[:,None], sigmoid)
rel_contrib_g, irrel_contrib_g, bias_contrib_g = propagate_three(rel_g, irrel_g, b_g[:,None], tanh)
relevant = rel_contrib_i * (rel_contrib_g + bias_contrib_g) + bias_contrib_i * rel_contrib_g
irrelevant = irrel_contrib_i * (rel_contrib_g + irrel_contrib_g + bias_contrib_g) + (rel_contrib_i + bias_contrib_i) * irrel_contrib_g
if i >= start and i < stop:
relevant =relevant + bias_contrib_i * bias_contrib_g
else:
irrelevant =irrelevant + bias_contrib_i * bias_contrib_g
if i > 0:
rel_contrib_f, irrel_contrib_f, bias_contrib_f = propagate_three(rel_f, irrel_f, b_f[:,None], sigmoid)
relevant = relevant +(rel_contrib_f + bias_contrib_f) * prev_rel
irrelevant = irrelevant+(rel_contrib_f + irrel_contrib_f + bias_contrib_f) * prev_irrel + irrel_contrib_f * prev_rel
o = sigmoid(torch.matmul(W_io, word_vecs[i]) + torch.matmul(W_ho, prev_rel_h + prev_irrel_h) + b_o[:,None])
new_rel_h, new_irrel_h = propagate_tanh_two(relevant, irrelevant)
relevant_h = o * new_rel_h
irrelevant_h = o * new_irrel_h
prev_rel = relevant
prev_irrel = irrelevant
W_out = model.hidden_to_label.weight
# Sanity check: scores + irrel_scores should equal the LSTM's output minus model.hidden_to_label.bias
scores = torch.matmul(W_out, relevant_h)
irrel_scores = torch.matmul(W_out, irrelevant_h)
#tolerance = 0.001
#assert torch.sum(torch.abs((model.forward(batch) -model.hidden_to_label.bias.data) - (scores+irrel_scores))).cpu().detach().numpy() < tolerance
return scores, irrel_scores
def cd_text_irreg_scores(batch_text, model, start, stop, my_device = 0):
weights = model.lstm
# Index one = word vector (i) or hidden state (h), index two = gate
W_ii, W_if, W_ig, W_io = torch.chunk(weights.weight_ih_l0, 4, 0)
W_hi, W_hf, W_hg, W_ho = torch.chunk(weights.weight_hh_l0, 4, 0)
b_i, b_f, b_g, b_o = torch.chunk(weights.bias_ih_l0 + weights.bias_hh_l0, 4)
word_vecs = torch.transpose(model.embed(batch_text).data, 1,2) #change: we take all
T = word_vecs.shape[0]
batch_size = word_vecs.shape[2]
relevant_h = torch.zeros(( model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
irrelevant_h = torch.zeros((model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
prev_rel = torch.zeros(( model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
prev_irrel = torch.zeros(( model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
for i in range(T):
prev_rel_h = relevant_h
prev_irrel_h = irrelevant_h
rel_i = torch.matmul(W_hi, prev_rel_h)
rel_g = torch.matmul(W_hg, prev_rel_h)
rel_f = torch.matmul(W_hf, prev_rel_h)
rel_o = torch.matmul(W_ho, prev_rel_h)
irrel_i = torch.matmul(W_hi, prev_irrel_h)
irrel_g = torch.matmul(W_hg, prev_irrel_h)
irrel_f = torch.matmul(W_hf, prev_irrel_h)
irrel_o = torch.matmul(W_ho, prev_irrel_h)
w_ii_contrib = torch.matmul(W_ii, word_vecs[i])
w_ig_contrib = torch.matmul(W_ig, word_vecs[i])
w_if_contrib = torch.matmul(W_if, word_vecs[i])
w_io_contrib = torch.matmul(W_io, word_vecs[i])
is_in_relevant = ((start <= i) * (i <= stop)).cuda().float()
is_not_in_relevant = 1 - is_in_relevant
rel_i = rel_i + is_in_relevant * w_ii_contrib
rel_g = rel_g + is_in_relevant * w_ig_contrib
rel_f = rel_f + is_in_relevant * w_if_contrib
rel_o = rel_o + is_in_relevant * w_io_contrib
irrel_i = irrel_i + is_not_in_relevant * w_ii_contrib
irrel_g = irrel_g + is_not_in_relevant * w_ig_contrib
irrel_f = irrel_f + is_not_in_relevant * w_if_contrib
irrel_o = irrel_o + is_not_in_relevant * w_io_contrib
rel_contrib_i, irrel_contrib_i, bias_contrib_i = propagate_three(rel_i, irrel_i, b_i[:,None], sigmoid)
rel_contrib_g, irrel_contrib_g, bias_contrib_g = propagate_three(rel_g, irrel_g, b_g[:,None], tanh)
relevant = rel_contrib_i * (rel_contrib_g + bias_contrib_g) + bias_contrib_i * rel_contrib_g
irrelevant = irrel_contrib_i * (rel_contrib_g + irrel_contrib_g + bias_contrib_g) + (rel_contrib_i + bias_contrib_i) * irrel_contrib_g
bias_contrib =bias_contrib_i * bias_contrib_g
is_in_relevant_bias = ((start <= i) * (i < stop)).cuda().float()
is_not_in_relevant_bias = 1- is_in_relevant_bias
relevant =relevant + is_in_relevant_bias*bias_contrib
irrelevant =irrelevant + is_not_in_relevant_bias*bias_contrib
if i > 0:
rel_contrib_f, irrel_contrib_f, bias_contrib_f = propagate_three(rel_f, irrel_f, b_f[:,None], sigmoid)
relevant = relevant +(rel_contrib_f + bias_contrib_f) * prev_rel
irrelevant = irrelevant+(rel_contrib_f + irrel_contrib_f + bias_contrib_f) * prev_irrel + irrel_contrib_f * prev_rel
o = sigmoid(torch.matmul(W_io, word_vecs[i]) + torch.matmul(W_ho, prev_rel_h + prev_irrel_h) + b_o[:,None])
new_rel_h, new_irrel_h = propagate_tanh_two(relevant, irrelevant)
relevant_h = o * new_rel_h
irrelevant_h = o * new_irrel_h
prev_rel = relevant
prev_irrel = irrelevant
W_out = model.hidden_to_label.weight
# Sanity check: scores + irrel_scores should equal the LSTM's output minus model.hidden_to_label.bias
scores = torch.matmul(W_out, relevant_h)
irrel_scores = torch.matmul(W_out, irrelevant_h)
return scores, irrel_scores
def cd_text(batch, model, start, stop, batch_id = 0,my_device = 0):
# rework for
weights = model.lstm.state_dict()
# Index one = word vector (i) or hidden state (h), index two = gate
W_ii, W_if, W_ig, W_io = torch.chunk(weights['weight_ih_l0'], 4, 0)
W_hi, W_hf, W_hg, W_ho = torch.chunk(weights['weight_hh_l0'], 4, 0)
b_i, b_f, b_g, b_o = torch.chunk(weights['bias_ih_l0'] + weights['bias_hh_l0'], 4)
word_vecs = model.embed(batch.text)[:, batch_id].data
T = word_vecs.shape[0]
relevant = torch.zeros((T, model.hidden_dim), device =torch.device(my_device))
irrelevant = torch.zeros((T, model.hidden_dim), device =torch.device(my_device))
relevant_h = torch.zeros((T, model.hidden_dim), device =torch.device(my_device))
irrelevant_h = torch.zeros((T, model.hidden_dim), device =torch.device(my_device))
for i in range(T):
if i > 0:
prev_rel_h = relevant_h[i - 1]
prev_irrel_h = irrelevant_h[i - 1]
else:
prev_rel_h = torch.zeros(model.hidden_dim, device =torch.device(my_device))
prev_irrel_h = torch.zeros(model.hidden_dim, device =torch.device(my_device))
rel_i = torch.matmul(W_hi, prev_rel_h)
rel_g = torch.matmul(W_hg, prev_rel_h)
rel_f = torch.matmul(W_hf, prev_rel_h)
rel_o = torch.matmul(W_ho, prev_rel_h)
irrel_i = torch.matmul(W_hi, prev_irrel_h)
irrel_g = torch.matmul(W_hg, prev_irrel_h)
irrel_f = torch.matmul(W_hf, prev_irrel_h)
irrel_o = torch.matmul(W_ho, prev_irrel_h)
if start <= i <= stop:
rel_i = rel_i + torch.matmul(W_ii, word_vecs[i])
rel_g = rel_g + torch.matmul(W_ig, word_vecs[i])
rel_f = rel_f + torch.matmul(W_if, word_vecs[i])
rel_o = rel_o + torch.matmul(W_io, word_vecs[i])
else:
irrel_i = irrel_i + torch.matmul(W_ii, word_vecs[i])
irrel_g = irrel_g + torch.matmul(W_ig, word_vecs[i])
irrel_f = irrel_f + torch.matmul(W_if, word_vecs[i])
irrel_o = irrel_o + torch.matmul(W_io, word_vecs[i])
rel_contrib_i, irrel_contrib_i, bias_contrib_i = propagate_three(rel_i, irrel_i, b_i, sigmoid)
rel_contrib_g, irrel_contrib_g, bias_contrib_g = propagate_three(rel_g, irrel_g, b_g, tanh)
relevant[i] = rel_contrib_i * (rel_contrib_g + bias_contrib_g) + bias_contrib_i * rel_contrib_g
irrelevant[i] = irrel_contrib_i * (rel_contrib_g + irrel_contrib_g + bias_contrib_g) + (
rel_contrib_i + bias_contrib_i) * irrel_contrib_g
if start <= i <= stop:
relevant[i] += bias_contrib_i * bias_contrib_g
else:
irrelevant[i] += bias_contrib_i * bias_contrib_g
if i > 0:
rel_contrib_f, irrel_contrib_f, bias_contrib_f = propagate_three(rel_f, irrel_f, b_f, sigmoid)
relevant[i] += (rel_contrib_f + bias_contrib_f) * relevant[i - 1]
irrelevant[i] += (rel_contrib_f + irrel_contrib_f + bias_contrib_f) * irrelevant[i - 1] + irrel_contrib_f * \
relevant[i - 1]
o = sigmoid(torch.matmul(W_io, word_vecs[i]) + torch.matmul(W_ho, prev_rel_h + prev_irrel_h) + b_o)
#rel_contrib_o, irrel_contrib_o, bias_contrib_o = propagate_three(rel_o, irrel_o, b_o, sigmoid)
new_rel_h, new_irrel_h = propagate_tanh_two(relevant[i], irrelevant[i])
relevant_h[i] = o * new_rel_h
irrelevant_h[i] = o * new_irrel_h
W_out = model.hidden_to_label.weight.data
# Sanity check: scores + irrel_scores should equal the LSTM's output minus model.hidden_to_label.bias
scores = torch.matmul(W_out, relevant_h[T - 1])
irrel_scores = torch.matmul(W_out, irrelevant_h[T - 1])
tolerance = 0.001
assert torch.sum(torch.abs((model.forward(batch) -model.hidden_to_label.bias.data) - (scores+irrel_scores))).cpu().detach().numpy() < tolerance
return scores
def softmax_out(output):
return torch.nn.functional.softmax(torch.stack((output[0].reshape(-1),output[1].reshape(-1)), 1), dim = 1)
def is_in_relevant_toy(batch, start, stop, class_rules):
#XXX only for current model where relevant bigger five
rel_digits = ((batch.label ==0)[None, :] *(batch.text ==class_rules[0])) + (batch.label ==1)[None, :] *(batch.text ==class_rules[1])
relevant = rel_digits[start:stop].sum(dim=0)
irrelevant = rel_digits.sum(dim=0) - relevant
test_out = torch.cat((relevant[:, None], irrelevant[:, None]), 1)
return test_out
def cd_penalty_for_one_toy(batch, model1, start, stop,class_rules):
# get output
model1_output = cd_batch_text(batch, model1, start, stop)
# only use the correct class
correct_idx = (batch.label, torch.arange(batch.label.shape[0]))
model1_softmax = softmax_out((model1_output[0][correct_idx],model1_output[1][correct_idx]))
model2_softmax = is_in_relevant_toy(batch, start, stop,class_rules).cuda().float()
output = -(torch.log(model1_softmax)*model2_softmax).mean()
return output
def is_in_relevant_decoy(batch, start, stop, class_rules):
is_decoy = ((batch.label ==0) *(batch.text[start:stop] ==class_rules[0]) + (batch.label ==1) *(batch.text[start:stop] ==class_rules[1]))
return is_decoy.sum(dim=0)
def cd_penalty_for_one_decoy(batch, model1, start, stop,class_rules):
model1_output = cd_batch_text(batch, model1, start, stop)
correct_idx = (batch.label, torch.arange(batch.label.shape[0])) # only use the correct class
model1_softmax = softmax_out((model1_output[0][correct_idx],model1_output[1][correct_idx]))
mask_decoy_in_relevant = is_in_relevant_decoy(batch, start, stop,class_rules).cuda()
if mask_decoy_in_relevant.byte().any():
masked_relevant = model1_softmax[:,1].masked_select(mask_decoy_in_relevant.byte())
output = -(torch.log(masked_relevant)).mean()
return output
else:
return torch.zeros(1).cuda()
def cd_penalty_annotated(batch, model1, start, stop, scores):
# get index where annotation present:
idx_nonzero = (start != -1).nonzero()[:,0] # find the ones where annotation exists
model_output = cd_text_irreg_scores(batch.text[:, idx_nonzero], model1, start[ idx_nonzero], stop[idx_nonzero])[0] #get the output and focus on relevant scores for class 0 vs 1
model_softmax = torch.nn.functional.softmax(model_output, dim =0)[batch.label[idx_nonzero],np.arange(len(idx_nonzero))] #take softmax of class 0 vs 1 and take the correct digit
output = -(torch.log(model_softmax)*scores[ idx_nonzero].float()).mean() #-(torch.log(1-model_softmax)*(1- scores[ idx_nonzero]).float() ).mean() #if it agrees, maximize - if it dis, min
return output
# def cd_penalty_annotated(batch, model1, start, stop, scores):
# # get index where annotation present:
# idx_nonzero = (start != -1).nonzero()[:,0]
# model_output = cd_text_irreg_scores(batch.text[:, idx_nonzero], model1, start[ idx_nonzero], stop[idx_nonzero])[0]
# correct_idx = (batch.label[ idx_nonzero], torch.arange(batch.label[ idx_nonzero].shape[0]) )
# model_softmax = torch.nn.functional.softmax(model_output, dim =0)[correct_idx]
# output = -(torch.log(model_softmax)*scores[ idx_nonzero].float()).mean() -(torch.log(model_softmax)*(1- scores[ idx_nonzero]).float() ).mean() #next thing to try
# print(output, torch.log(model_softmax).mean())
# return output
# def cd_penalty_annotated(batch, model1, start, stop, agrees):
# model1_output = cd_text_irreg_scores(batch.text, model1, start, stop)
# correct_idx = (batch.label, torch.arange(batch.label.shape[0])) # only use the correct class
# model1_softmax = softmax_out((model1_output[0][0],model1_output[0][1]))[correct_idx]
# output = -(torch.log(model1_softmax) * agrees.float()).mean() #+ (torch.log(model1_softmax) * (1-agrees).float()).mean()
# return output
def cd_penalty_for_one_decoy_all(batch, model1, start, stop):
mask_exists =(start!=-1).byte().cuda()
if mask_exists.any():
model1_output = cd_text_irreg_scores(batch.text, model1, start, stop)
correct_idx = (batch.label, torch.arange(batch.label.shape[0])) # only use the correct class
wrong_idx = (1-batch.label, torch.arange(batch.label.shape[0]))
model1_softmax = softmax_out((model1_output[0][correct_idx],model1_output[1][correct_idx])) #+ softmax_out((model1_output[0][wrong_idx],model1_output[1][wrong_idx]))
output = (torch.log(model1_softmax[:,1])).masked_select(mask_exists)
return -output.mean()
else:
return torch.zeros(1).cuda()
def cd_penalty(batch, model1, model2, start, stop):
model1_output = cd_batch_text(batch, model1, start, stop)
model2_output = cd_batch_text(batch, model2, start, stop)
model1_softmax = softmax_out(model1_output)
model2_softmax = softmax_out(model2_output)
return ((model1_softmax-model2_softmax)*(torch.log(model1_softmax) - torch.log(model2_softmax))).sum(dim=1).reshape((2,-1)).sum(dim=0)
# this implementation of cd is very long so that we can view CD at intermediate layers
# in reality, this should be a loop which uses the above functions
def cd_vgg_features(blob,im_torch, model, model_type='vgg'):
# set up model
model.eval()
# set up blobs
blob = torch.cuda.FloatTensor(blob)
relevant = blob * im_torch
irrelevant = (1 - blob) * im_torch
mods = list(model.modules())[2:]
# (0): Conv2d (3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (1): ReLU(inplace)
# (2): Conv2d (64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (3): ReLU(inplace)
# (4): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[0])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[1])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[2])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[3])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[4], model_type=model_type)
# (5): Conv2d (64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (6): ReLU(inplace)
# (7): Conv2d (128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (8): ReLU(inplace)
# (9): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[5])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[6])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[7])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[8])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[9], model_type=model_type)
# (10): Conv2d (128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (11): ReLU(inplace)
# (12): Conv2d (256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (13): ReLU(inplace)
# (14): Conv2d (256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (15): ReLU(inplace)
# (16): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[10])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[11])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[12])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[13])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[14])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[15])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[16], model_type=model_type)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[17])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[18])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[19])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[20])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[21])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[22])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[23], model_type=model_type)
# scores.append((relevant.clone(), irrelevant.clone()))
# (24): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (25): ReLU(inplace)
# (26): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (27): ReLU(inplace)
# (28): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (29): ReLU(inplace)
# (30): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[24])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[25])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[26])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[27])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[28])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[29])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[30], model_type=model_type)
relevant, irrelevant = propagate_AdaptiveAvgPool2d(relevant, irrelevant, mods[31])
# scores.append((relevant.clone(), irrelevant.clone()))
# return relevant, irrelevant
relevant = relevant.view(relevant.size(0), -1)
irrelevant = irrelevant.view(irrelevant.size(0), -1)
return relevant, irrelevant
def cd_vgg_classifier(relevant, irrelevant, im_torch, model, model_type='vgg'):
# set up model
model.eval()
mods = list(model.modules())[1:]
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[0])
# print(relevant.shape)
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[1])
relevant, irrelevant = propagate_dropout(relevant, irrelevant, mods[2])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[3])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[4])
relevant, irrelevant = propagate_dropout(relevant, irrelevant, mods[5])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[6])
# only interested in not cancer, which is class 0
#model.train()
return relevant, irrelevant
def cd_track_vgg(blob, im_torch, model, model_type='vgg'):
# set up model
model.eval()
# set up blobs
blob = torch.cuda.FloatTensor(blob)
relevant = blob * im_torch
irrelevant = (1 - blob) * im_torch
mods = list(model.modules())[2:]
# (0): Conv2d (3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (1): ReLU(inplace)
# (2): Conv2d (64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (3): ReLU(inplace)
# (4): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[0])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[1])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[2])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[3])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[4], model_type=model_type)
# (5): Conv2d (64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (6): ReLU(inplace)
# (7): Conv2d (128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (8): ReLU(inplace)
# (9): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[5])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[6])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[7])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[8])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[9], model_type=model_type)
# (10): Conv2d (128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (11): ReLU(inplace)
# (12): Conv2d (256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (13): ReLU(inplace)
# (14): Conv2d (256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (15): ReLU(inplace)
# (16): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[10])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[11])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[12])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[13])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[14])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[15])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[16], model_type=model_type)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[17])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[18])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[19])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[20])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[21])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[22])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[23], model_type=model_type)
# scores.append((relevant.clone(), irrelevant.clone()))
# (24): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (25): ReLU(inplace)
# (26): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (27): ReLU(inplace)
# (28): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (29): ReLU(inplace)
# (30): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[24])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[25])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[26])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[27])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[28])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[29])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[30], model_type=model_type)
relevant, irrelevant = propagate_AdaptiveAvgPool2d(relevant, irrelevant, mods[31])
# scores.append((relevant.clone(), irrelevant.clone()))
# return relevant, irrelevant
relevant = relevant.view(relevant.size(0), -1)
irrelevant = irrelevant.view(irrelevant.size(0), -1)
# (classifier): Sequential(
# (0): Linear(in_features=25088, out_features=4096)
# (1): ReLU(inplace)
# (2): Dropout(p=0.5)
# (3): Linear(in_features=4096, out_features=4096)
# (4): ReLU(inplace)
# (5): Dropout(p=0.5)
# (6): Linear(in_features=4096, out_features=1000)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[33])
# print(relevant.shape)
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[34])
relevant, irrelevant = propagate_dropout(relevant, irrelevant, mods[35])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[36])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[37])
relevant, irrelevant = propagate_dropout(relevant, irrelevant, mods[38])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[39])
return relevant, irrelevant
| 49.599132
| 190
| 0.664663
| 4,745
| 34,273
| 4.547313
| 0.069547
| 0.165176
| 0.113871
| 0.047551
| 0.80901
| 0.768179
| 0.730639
| 0.705149
| 0.673171
| 0.651203
| 0
| 0.029009
| 0.211449
| 34,273
| 690
| 191
| 49.671014
| 0.76937
| 0.193213
| 0
| 0.55711
| 0
| 0
| 0.005088
| 0
| 0
| 0
| 0
| 0
| 0.002331
| 1
| 0.051282
| false
| 0.002331
| 0.013986
| 0.006993
| 0.121212
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
775334a35368377b6411b76e0cda684325c797b3
| 119
|
py
|
Python
|
Python/ComplexPaths02/src/main/MainModule01.py
|
tduoth/JsObjects
|
eb3e2a8b1f47d0da53c8b1a85a7949269711932f
|
[
"MIT"
] | 22
|
2015-02-26T09:07:18.000Z
|
2020-05-10T16:22:05.000Z
|
Python/ComplexPaths02/src/main/MainModule01.py
|
tduoth/JsObjects
|
eb3e2a8b1f47d0da53c8b1a85a7949269711932f
|
[
"MIT"
] | 123
|
2016-04-05T18:32:41.000Z
|
2022-03-13T21:09:21.000Z
|
Python/ComplexPaths02/src/main/MainModule01.py
|
tduoth/JsObjects
|
eb3e2a8b1f47d0da53c8b1a85a7949269711932f
|
[
"MIT"
] | 56
|
2015-03-19T22:26:37.000Z
|
2021-12-06T02:52:02.000Z
|
'''
Created on May 26, 2012
@author: Charlie
'''
class MainModule01(object):
def __init__(self):
pass
| 13.222222
| 27
| 0.613445
| 14
| 119
| 4.928571
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 0.260504
| 119
| 9
| 28
| 13.222222
| 0.693182
| 0.344538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
91f3e934e2bf21d69c8e84878b0f0bb1bc0e52af
| 104
|
py
|
Python
|
Courses/HSEPython/8 week/5.py
|
searayeah/sublime-snippets
|
deff53a06948691cd5e5d7dcfa85515ddd8fab0b
|
[
"MIT"
] | null | null | null |
Courses/HSEPython/8 week/5.py
|
searayeah/sublime-snippets
|
deff53a06948691cd5e5d7dcfa85515ddd8fab0b
|
[
"MIT"
] | null | null | null |
Courses/HSEPython/8 week/5.py
|
searayeah/sublime-snippets
|
deff53a06948691cd5e5d7dcfa85515ddd8fab0b
|
[
"MIT"
] | null | null | null |
from functools import reduce
print(reduce(lambda x, y: x * (y**5), list(map(int, input().split())), 1))
| 34.666667
| 74
| 0.653846
| 18
| 104
| 3.777778
| 0.833333
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021978
| 0.125
| 104
| 2
| 75
| 52
| 0.725275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
620552e0f37628fdaf507905b2e507f52f6149a8
| 158
|
py
|
Python
|
pyblaze/nn/data/__init__.py
|
Greenroom-Robotics/pyblaze
|
e45e27fbd400b6ae2365ad2347165c7b5154ac51
|
[
"MIT"
] | 20
|
2020-03-29T08:43:15.000Z
|
2021-12-17T21:38:17.000Z
|
pyblaze/nn/data/__init__.py
|
borchero/bxtorch
|
8d01568c8ee9fc05f5b3c84ca3ec68ea74eef9eb
|
[
"MIT"
] | 4
|
2020-10-27T20:43:40.000Z
|
2021-04-29T12:19:39.000Z
|
pyblaze/nn/data/__init__.py
|
borchero/bxtorch
|
8d01568c8ee9fc05f5b3c84ca3ec68ea74eef9eb
|
[
"MIT"
] | 2
|
2020-08-16T18:10:49.000Z
|
2021-03-31T23:17:28.000Z
|
import pyblaze.nn.data.extensions
from .noise import NoiseDataset, LabeledNoiseDataset
from .zip import ZipDataLoader
from .transform import TransformDataset
| 31.6
| 52
| 0.860759
| 18
| 158
| 7.555556
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094937
| 158
| 4
| 53
| 39.5
| 0.951049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
620eed4cbd2619972703ee779c16c8a7ab6c7ba9
| 54
|
py
|
Python
|
src/apps/startposes/models/__init__.py
|
sanderland/katago-server
|
6414fab080d007c05068a06ff4f25907b92848bd
|
[
"MIT"
] | 27
|
2020-05-03T11:01:27.000Z
|
2022-03-17T05:33:10.000Z
|
src/apps/startposes/models/__init__.py
|
sanderland/katago-server
|
6414fab080d007c05068a06ff4f25907b92848bd
|
[
"MIT"
] | 54
|
2020-05-09T01:18:41.000Z
|
2022-01-22T10:31:15.000Z
|
src/apps/startposes/models/__init__.py
|
sanderland/katago-server
|
6414fab080d007c05068a06ff4f25907b92848bd
|
[
"MIT"
] | 9
|
2020-09-29T11:31:32.000Z
|
2022-03-09T01:37:50.000Z
|
from .startpos import StartPos, StartPosCumWeightOnly
| 27
| 53
| 0.87037
| 5
| 54
| 9.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092593
| 54
| 1
| 54
| 54
| 0.959184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6217c3865432b1a663db3913c183c3b2bdd9e8cf
| 53
|
py
|
Python
|
src/algorithm/__init__.py
|
ShogoAkiyama/metaworld.pytorch
|
6b08163d2c0d73b6d1d9b4b513d18f0a308e92c4
|
[
"MIT"
] | null | null | null |
src/algorithm/__init__.py
|
ShogoAkiyama/metaworld.pytorch
|
6b08163d2c0d73b6d1d9b4b513d18f0a308e92c4
|
[
"MIT"
] | null | null | null |
src/algorithm/__init__.py
|
ShogoAkiyama/metaworld.pytorch
|
6b08163d2c0d73b6d1d9b4b513d18f0a308e92c4
|
[
"MIT"
] | null | null | null |
from .sac import SAC
from .eval import EvalAlgorithm
| 17.666667
| 31
| 0.811321
| 8
| 53
| 5.375
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150943
| 53
| 2
| 32
| 26.5
| 0.955556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6242dfa1c761870f2a85f43957247c13b7b53277
| 173
|
py
|
Python
|
cosypose/simulator/__init__.py
|
ompugao/cosypose
|
4e471c16f19d5ee632668cd52eaa57b562f287d6
|
[
"MIT"
] | 202
|
2020-08-19T19:28:03.000Z
|
2022-03-29T07:10:47.000Z
|
cosypose/simulator/__init__.py
|
ompugao/cosypose
|
4e471c16f19d5ee632668cd52eaa57b562f287d6
|
[
"MIT"
] | 66
|
2020-08-24T09:28:05.000Z
|
2022-03-31T07:11:06.000Z
|
cosypose/simulator/__init__.py
|
ompugao/cosypose
|
4e471c16f19d5ee632668cd52eaa57b562f287d6
|
[
"MIT"
] | 66
|
2020-08-19T19:28:05.000Z
|
2022-03-18T20:47:55.000Z
|
from .body import Body
from .camera import Camera
from .base_scene import BaseScene
from .caching import BodyCache, TextureCache
from .textures import apply_random_textures
| 28.833333
| 44
| 0.843931
| 24
| 173
| 5.958333
| 0.541667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121387
| 173
| 5
| 45
| 34.6
| 0.940789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6269ffcac7da3b6435494d0d70dbe0aa69f6f55f
| 324
|
py
|
Python
|
conjur_api/__init__.py
|
cyberark/conjur-api-python
|
7dd1819bf68042620a06f38e395c3eb2989202a9
|
[
"Apache-2.0"
] | 1
|
2022-03-09T18:25:29.000Z
|
2022-03-09T18:25:29.000Z
|
conjur_api/__init__.py
|
cyberark/conjur-api-python
|
7dd1819bf68042620a06f38e395c3eb2989202a9
|
[
"Apache-2.0"
] | null | null | null |
conjur_api/__init__.py
|
cyberark/conjur-api-python
|
7dd1819bf68042620a06f38e395c3eb2989202a9
|
[
"Apache-2.0"
] | null | null | null |
"""
conjur_api
Package containing classes that are responsible for communicating with the Conjur server
"""
__version__ = "0.0.5"
from conjur_api.client import Client
from conjur_api.interface import CredentialsProviderInterface
from conjur_api import models
from conjur_api import errors
from conjur_api import providers
| 24.923077
| 88
| 0.83642
| 45
| 324
| 5.8
| 0.533333
| 0.206897
| 0.249042
| 0.218391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010563
| 0.123457
| 324
| 12
| 89
| 27
| 0.908451
| 0.308642
| 0
| 0
| 0
| 0
| 0.023148
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.833333
| 0
| 0.833333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
62898b405054c48d9122f893fe9282577a49a61e
| 55
|
py
|
Python
|
enthought/traits/ui/wx/button_editor.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/traits/ui/wx/button_editor.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/traits/ui/wx/button_editor.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from traitsui.wx.button_editor import *
| 18.333333
| 39
| 0.8
| 8
| 55
| 5.375
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127273
| 55
| 2
| 40
| 27.5
| 0.895833
| 0.218182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
656a96b402f3415f23db4722e5168fd52c75cff5
| 832
|
py
|
Python
|
preml/showtime/showie.py
|
5amron/pre-ml
|
3dff146d89468f4db0b7a9d92f3b0a26854efaf8
|
[
"MIT"
] | 3
|
2017-09-03T17:55:54.000Z
|
2018-11-24T13:11:19.000Z
|
preml/showtime/showie.py
|
5amron/pre-ml
|
3dff146d89468f4db0b7a9d92f3b0a26854efaf8
|
[
"MIT"
] | 2
|
2021-12-08T14:51:24.000Z
|
2021-12-09T15:42:09.000Z
|
preml/showtime/showie.py
|
5amron/pre-ml
|
3dff146d89468f4db0b7a9d92f3b0a26854efaf8
|
[
"MIT"
] | 2
|
2019-07-16T01:28:48.000Z
|
2020-04-12T21:23:08.000Z
|
from . import baco_show
# solution === (new_dataset, best_ant_road, acc_before_run, best_fit_so_far, total_feature_num, best_selected_features_num, best_fitnesses_each_iter, average_fitnesses_each_iter ,num_of_features_selected_by_best_ant_each_iter, time_temp, sample_num)
def draw_baco(solution):
if(len(solution) != 11):
print("+++ can't draw the solution due to problem with it! +++")
return
(new_dataset, best_ant_road, acc_before_run, best_fit_so_far, total_feature_num, best_selected_features_num, best_fitnesses_each_iter, average_fitnesses_each_iter ,num_of_features_selected_by_best_ant_each_iter, time_temp, sample_num) = solution
baco_show.show_res_for_this_run(best_fitnesses_each_iter, average_fitnesses_each_iter, num_of_features_selected_by_best_ant_each_iter, total_feature_num)
| 48.941176
| 249
| 0.824519
| 133
| 832
| 4.548872
| 0.360902
| 0.119008
| 0.168595
| 0.104132
| 0.742149
| 0.742149
| 0.742149
| 0.742149
| 0.742149
| 0.742149
| 0
| 0.002688
| 0.105769
| 832
| 16
| 250
| 52
| 0.810484
| 0.296875
| 0
| 0
| 0
| 0
| 0.09434
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.428571
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
65c19e6d0f4a645a3e85871f601e50a70618990c
| 215
|
py
|
Python
|
component/model/dmp_model.py
|
12rambau/damage_proxy_maps
|
98a004bf4420c6ce1b7ecd77e426e8fe7d512f52
|
[
"MIT"
] | 1
|
2021-09-01T18:27:19.000Z
|
2021-09-01T18:27:19.000Z
|
component/model/dmp_model.py
|
12rambau/damage_proxy_maps
|
98a004bf4420c6ce1b7ecd77e426e8fe7d512f52
|
[
"MIT"
] | 3
|
2021-06-01T10:15:36.000Z
|
2021-10-07T10:00:16.000Z
|
component/model/dmp_model.py
|
12rambau/damage_proxy_maps
|
98a004bf4420c6ce1b7ecd77e426e8fe7d512f52
|
[
"MIT"
] | 2
|
2021-06-01T10:16:03.000Z
|
2021-06-10T12:43:47.000Z
|
from sepal_ui import model
from traitlets import Any
class DmpModel(model.Model):
# inputs
event = Any(None).tag(sync=True)
username = Any(None).tag(sync=True)
password = Any(None).tag(sync=True)
| 19.545455
| 39
| 0.693023
| 32
| 215
| 4.625
| 0.53125
| 0.141892
| 0.202703
| 0.283784
| 0.364865
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186047
| 215
| 10
| 40
| 21.5
| 0.845714
| 0.027907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.166667
| 0.333333
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
65e2a0c64857964a543fdd7ce72cd8eee8d2cbac
| 165
|
py
|
Python
|
farms2face/subscriptions/views.py
|
dev1farms2face/f2f
|
54e58187a68574bf2bd0dfb7e58a2b416336106a
|
[
"MIT"
] | null | null | null |
farms2face/subscriptions/views.py
|
dev1farms2face/f2f
|
54e58187a68574bf2bd0dfb7e58a2b416336106a
|
[
"MIT"
] | null | null | null |
farms2face/subscriptions/views.py
|
dev1farms2face/f2f
|
54e58187a68574bf2bd0dfb7e58a2b416336106a
|
[
"MIT"
] | 2
|
2018-06-19T12:12:08.000Z
|
2018-06-25T18:45:36.000Z
|
from django.shortcuts import render
# Create your views here.
def subscribe(request):
return render(request, "subscribe.html",
{'data': {}})
| 20.625
| 44
| 0.636364
| 18
| 165
| 5.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.242424
| 165
| 7
| 45
| 23.571429
| 0.84
| 0.139394
| 0
| 0
| 0
| 0
| 0.128571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
02b1a17a92fa82b80990e63f3f0d4e50c1738b1c
| 5,106
|
py
|
Python
|
takedown/__init__.py
|
zsxing99/Takedown-script
|
fcd0533ab71a1198651a6e53cd1d58039d4fa7fd
|
[
"MIT"
] | 1
|
2021-01-06T00:23:03.000Z
|
2021-01-06T00:23:03.000Z
|
takedown/__init__.py
|
zsxing99/Takedown-script
|
fcd0533ab71a1198651a6e53cd1d58039d4fa7fd
|
[
"MIT"
] | 4
|
2020-11-09T06:01:25.000Z
|
2020-12-17T06:39:30.000Z
|
takedown/__init__.py
|
zsxing99/Takedown-script
|
fcd0533ab71a1198651a6e53cd1d58039d4fa7fd
|
[
"MIT"
] | null | null | null |
"""
TakeDown v0.0.1
===============
author: Zesheng Xing
email: [email protected]
This Python project is to help people search on some client hosting contents that potential violate the their copyright.
"""
VERSION = "0.1.0"
DESCRIPTION = "A python script that allows users to search potential copyright violated information on GitHub and " \
"send emails taking down those."
CONTRIBUTORS_INFO = "The project is developed by Zesheng Xing and supervised by Joël Porquet-Lupine at UC Davis, 2020."
USAGE = \
"""
Usage: takedown.py command [args...]
where commands include:
find search repositories
python takedown.py find [search_query] [GitHub_token] [-options]
with following args:
[search_query]: required. The text used to search.
[Github_token]: required. The Github token used to raise the rate limit and enable broader search.
[-t target]: optional. The target of the search query. It could be “repo”, “code”. It is “code” by default.
Concatenate them by “+”, eg. “-t code+repo”.
[-i input]: optional. The file path of previous output of takedown find. By providing this path, the output
this time will be compared against the previous one.
[-o output]: optional. The output file path. The result will be printed to the console by default.
[-f format]: optional. The output format. It could be “yaml” or “json”. It is “yaml” by default
or using a configuration file:
python takedown.py find -c <path_to_config_file>
config file args:
required args:
[search_query]: required. The text used to search.
[Github_token]: required. The Github token used to raise the rate limit and enable broader search.
optional args:
[target]: optional. The target of the search query. It could be “repo”, “code”. It is “code” by default.
Concatenate them by “+”, eg. “-t code+repo”.
[input]: optional. The file path of previous output of takedown find. By providing this path,
the output this time will be compared against the previous one.
[output]: optional. The output file path. The result will be printed to the console by default.
[format]: optional. The output format. It could be “yaml” or “json”. It is “yaml” by default
send send emails based on records
python takedown send [domain] [port] [inputs] [-options]
with following args:
[domain]: required. The domain address to connect
[port]: required. port of domain to connect
[inputs]: required. Input files to send email
[-u username]: optional. username of the account. or ask
[-p password]: optional. password of the account. or ask
[-s secure method]: optional. It could be “TLS” or “SSL”, depending on the domain and port connected.
Confirm before using this option.
[-t tags]: optional. Only the records that matches the tag will be sent with an email
[-o output]: optional. The output file path. The result will be printed to the console by default.
[-f format]: optional. The output format. It could be “yaml” or “json”. It is “yaml” by default
[-en email name]: optional. name used to send email. Otherwise username will be used
[-es email subject]: optional. subject of the email. Otherwise default email subject is used
[-ep email preface]: optional. preface of the email. Otherwise default email preface is used
[-ee email ending]: optional. preface of the email. Otherwise default email preface is used
or using a configuration file:
python takedown.py send -c <path_to_config_file>
config file args:
required parameters:
[domain]: required. Domain used to connect smtp service
[port]: required. Port of domain to connect smtp service
[inputs]: required. Records based to send emails
optional parameters:
[username]: optional. username of the account. or ask
[password]: optional. password of the account. or ask
[secure method]: optional. It could be “TLS” or “SSL”, depending on the domain and port connected.
Confirm before using this option.
[tags]: optional. Only the records that matches the tag will be sent with an email
[output]: optional. The output file path. The result will be printed to the console by default.
[format]: optional. The output format. It could be “yaml” or “json”. It is “yaml” by default
[emai_name]: optional. name used to send email. Otherwise username will be used
[email_subject]: optional. subject of the email. Otherwise default email subject is used
[email_preface]: optional. preface of the email. Otherwise default email preface is used
[email_ending]: optional. preface of the email. Otherwise default email preface is used
help show instructions and list of options
"""
| 60.785714
| 120
| 0.665883
| 708
| 5,106
| 4.778249
| 0.230226
| 0.039019
| 0.021283
| 0.033698
| 0.742536
| 0.742536
| 0.742536
| 0.723027
| 0.65031
| 0.627254
| 0
| 0.002646
| 0.25989
| 5,106
| 83
| 121
| 61.518072
| 0.892564
| 0.03917
| 0
| 0
| 0
| 0
| 0.728707
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b898de5e2e4348a76809bd0da7631a2cc93a7b25
| 3,757
|
py
|
Python
|
pyaz/billing/invoice/section/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/billing/invoice/section/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/billing/invoice/section/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | 1
|
2022-02-03T09:12:01.000Z
|
2022-02-03T09:12:01.000Z
|
'''
billing invoice section
'''
from .... pyaz_utils import _call_az
def list(account_name, profile_name):
'''
List the invoice sections that a user has access to. The operation is supported only for billing accounts with agreement type Microsoft Customer Agreement.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- profile_name -- The ID that uniquely identifies a billing profile.
'''
return _call_az("az billing invoice section list", locals())
def show(account_name, name, profile_name):
'''
Get an invoice section by its ID. The operation is supported only for billing accounts with agreement type Microsoft Customer Agreement.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- name -- The ID that uniquely identifies an invoice section.
- profile_name -- The ID that uniquely identifies a billing profile.
'''
return _call_az("az billing invoice section show", locals())
def create(account_name, name, profile_name, display_name=None, labels=None, no_wait=None):
'''
Creates or updates an invoice section. The operation is supported only for billing accounts with agreement type Microsoft Customer Agreement.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- name -- The ID that uniquely identifies an invoice section.
- profile_name -- The ID that uniquely identifies a billing profile.
Optional Parameters:
- display_name -- The name of the invoice section.
- labels -- Dictionary of metadata associated with the invoice section. Expect value: KEY1=VALUE1 KEY2=VALUE2 ...
- no_wait -- Do not wait for the long-running operation to finish.
'''
return _call_az("az billing invoice section create", locals())
def update(account_name, name, profile_name, display_name=None, labels=None, no_wait=None):
'''
Creates or updates an invoice section. The operation is supported only for billing accounts with agreement type Microsoft Customer Agreement.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- name -- The ID that uniquely identifies an invoice section.
- profile_name -- The ID that uniquely identifies a billing profile.
Optional Parameters:
- display_name -- The name of the invoice section.
- labels -- Dictionary of metadata associated with the invoice section. Expect value: KEY1=VALUE1 KEY2=VALUE2 ...
- no_wait -- Do not wait for the long-running operation to finish.
'''
return _call_az("az billing invoice section update", locals())
def wait(account_name, name, profile_name, created=None, custom=None, deleted=None, exists=None, interval=None, timeout=None, updated=None):
'''
Place the CLI in a waiting state until a condition of the billing invoice section is met.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- name -- The ID that uniquely identifies an invoice section.
- profile_name -- The ID that uniquely identifies a billing profile.
Optional Parameters:
- created -- wait until created with 'provisioningState' at 'Succeeded'
- custom -- Wait until the condition satisfies a custom JMESPath query. E.g. provisioningState!='InProgress', instanceView.statuses[?code=='PowerState/running']
- deleted -- wait until deleted
- exists -- wait until the resource exists
- interval -- polling interval in seconds
- timeout -- maximum wait in seconds
- updated -- wait until updated with provisioningState at 'Succeeded'
'''
return _call_az("az billing invoice section wait", locals())
| 45.26506
| 164
| 0.730104
| 497
| 3,757
| 5.43662
| 0.207243
| 0.093264
| 0.046632
| 0.067358
| 0.73094
| 0.711695
| 0.711695
| 0.698742
| 0.698742
| 0.698742
| 0
| 0.002646
| 0.195369
| 3,757
| 82
| 165
| 45.817073
| 0.891168
| 0.725313
| 0
| 0
| 0
| 0
| 0.196782
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.454545
| false
| 0
| 0.090909
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
b8c18579e2101b06416f377ffa427b6e165dcba7
| 53
|
py
|
Python
|
agency/memory/__init__.py
|
jackharmer/agency
|
5a78dd23e14c44c4076e49ea44b83ab1697e51c8
|
[
"MIT"
] | 2
|
2022-03-30T19:51:42.000Z
|
2022-03-30T20:05:39.000Z
|
agency/memory/__init__.py
|
jackharmer/agency
|
5a78dd23e14c44c4076e49ea44b83ab1697e51c8
|
[
"MIT"
] | null | null | null |
agency/memory/__init__.py
|
jackharmer/agency
|
5a78dd23e14c44c4076e49ea44b83ab1697e51c8
|
[
"MIT"
] | null | null | null |
from .episodic import EpisodicMemory, EpisodicBuffer
| 26.5
| 52
| 0.867925
| 5
| 53
| 9.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09434
| 53
| 1
| 53
| 53
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b218434a962715f2504f0272b199565a159dcf7b
| 115
|
py
|
Python
|
aim/pytorch.py
|
avkudr/aim
|
5961f31d358929287986ace09c73310886a94704
|
[
"Apache-2.0"
] | 2,195
|
2020-01-23T03:08:11.000Z
|
2022-03-31T14:32:19.000Z
|
aim/pytorch.py
|
deepanprabhu/aim
|
c00d8ec7bb2d9fd230a9430b516ca90cdb8072cb
|
[
"Apache-2.0"
] | 696
|
2020-02-08T21:55:45.000Z
|
2022-03-31T16:52:22.000Z
|
aim/pytorch.py
|
deepanprabhu/aim
|
c00d8ec7bb2d9fd230a9430b516ca90cdb8072cb
|
[
"Apache-2.0"
] | 150
|
2020-03-27T10:44:25.000Z
|
2022-03-21T21:29:41.000Z
|
# Alias to SDK PyTorch utils
from aim.sdk.adapters.pytorch import track_params_dists, track_gradients_dists # noqa
| 38.333333
| 85
| 0.834783
| 18
| 115
| 5.111111
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113043
| 115
| 2
| 86
| 57.5
| 0.901961
| 0.269565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.