hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9ec7841a173dc4c19d7dac5f98e4c9ddedd5460c
| 157
|
py
|
Python
|
glimix_core/_util/_array.py
|
Horta/limix-inference
|
1ba102fc544f8d307412d361b574da9d4c166f8e
|
[
"MIT"
] | 7
|
2019-06-10T12:27:25.000Z
|
2021-07-23T16:36:04.000Z
|
glimix_core/_util/_array.py
|
Horta/limix-inference
|
1ba102fc544f8d307412d361b574da9d4c166f8e
|
[
"MIT"
] | 12
|
2017-05-28T10:59:31.000Z
|
2021-05-17T20:11:00.000Z
|
glimix_core/_util/_array.py
|
Horta/limix-inference
|
1ba102fc544f8d307412d361b574da9d4c166f8e
|
[
"MIT"
] | 5
|
2017-08-27T20:13:45.000Z
|
2022-02-14T06:33:14.000Z
|
from numpy import reshape
def vec(x):
return reshape(x, (-1,) + x.shape[2:], order="F")
def unvec(x, shape):
return reshape(x, shape, order="F")
| 15.7
| 53
| 0.611465
| 26
| 157
| 3.692308
| 0.538462
| 0.1875
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015873
| 0.197452
| 157
| 9
| 54
| 17.444444
| 0.746032
| 0
| 0
| 0
| 0
| 0
| 0.012739
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
9edc4b896c4673af8ba61e91bf9ac87a555fe75f
| 272
|
py
|
Python
|
tests/bitwiseOperations/__init__.py
|
mgorzkowski/abn
|
3a9ac6fb0cfe9d497b6d8f26373d2af3b6ff9860
|
[
"MIT"
] | 4
|
2018-04-24T15:25:55.000Z
|
2022-03-08T15:01:07.000Z
|
tests/bitwiseOperations/__init__.py
|
mgorzkowski/abn
|
3a9ac6fb0cfe9d497b6d8f26373d2af3b6ff9860
|
[
"MIT"
] | 2
|
2021-05-04T19:44:28.000Z
|
2021-05-05T11:51:15.000Z
|
tests/bitwiseOperations/__init__.py
|
mgorzkowski/abn
|
3a9ac6fb0cfe9d497b6d8f26373d2af3b6ff9860
|
[
"MIT"
] | null | null | null |
from . import nand_tests
from . import and_tests
from . import nor_tests
from . import not_tests
from . import or_tests
from . import xor_tests
from . import rotate_left_tests
from . import rotate_right_tests
from . import shift_left_tests
from . import shift_right_tests
| 24.727273
| 32
| 0.816176
| 44
| 272
| 4.727273
| 0.295455
| 0.480769
| 0.649038
| 0.201923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 272
| 10
| 33
| 27.2
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9efe36b7df749158058e0d954855a509a9ce6a8b
| 7,057
|
py
|
Python
|
tests/library/test_ceph_volume_simple_activate.py
|
u-kosmonaft-u/ceph-ansible
|
14c472707c165f77def05826b22885480af3e8f9
|
[
"Apache-2.0"
] | 1,570
|
2015-01-03T08:38:22.000Z
|
2022-03-31T09:24:37.000Z
|
tests/library/test_ceph_volume_simple_activate.py
|
u-kosmonaft-u/ceph-ansible
|
14c472707c165f77def05826b22885480af3e8f9
|
[
"Apache-2.0"
] | 4,964
|
2015-01-05T10:41:44.000Z
|
2022-03-31T07:59:49.000Z
|
tests/library/test_ceph_volume_simple_activate.py
|
u-kosmonaft-u/ceph-ansible
|
14c472707c165f77def05826b22885480af3e8f9
|
[
"Apache-2.0"
] | 1,231
|
2015-01-04T11:48:16.000Z
|
2022-03-31T12:15:28.000Z
|
from mock.mock import patch
import os
import pytest
import ca_test_common
import ceph_volume_simple_activate
fake_cluster = 'ceph'
fake_container_binary = 'podman'
fake_container_image = 'quay.ceph.io/ceph/daemon:latest'
fake_id = '42'
fake_uuid = '0c4a7eca-0c2a-4c12-beff-08a80f064c52'
fake_path = '/etc/ceph/osd/{}-{}.json'.format(fake_id, fake_uuid)
class TestCephVolumeSimpleActivateModule(object):
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
def test_with_check_mode(self, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid,
'_ansible_check_mode': True
})
m_exit_json.side_effect = ca_test_common.exit_json
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert not result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
assert result['rc'] == 0
assert not result['stdout']
assert not result['stderr']
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_with_failure(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = 'error'
rc = 2
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
assert result['rc'] == rc
assert result['stderr'] == stderr
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_all_osds(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_all': True
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--all']
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.object(os.path, 'exists', return_value=True)
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_path_exists(self, m_run_command, m_exit_json, m_os_path):
ca_test_common.set_module_args({
'path': fake_path
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--file', fake_path]
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.object(os.path, 'exists', return_value=False)
@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
def test_activate_path_not_exists(self, m_fail_json, m_os_path):
ca_test_common.set_module_args({
'path': fake_path
})
m_fail_json.side_effect = ca_test_common.fail_json
with pytest.raises(ca_test_common.AnsibleFailJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['msg'] == '{} does not exist'.format(fake_path)
assert result['rc'] == 1
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_without_systemd(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid,
'systemd': False
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid, '--no-systemd']
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
@patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_with_container(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid,
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == [fake_container_binary,
'run', '--rm', '--privileged',
'--ipc=host', '--net=host',
'-v', '/etc/ceph:/etc/ceph:z',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'-v', '/run/lvm/:/run/lvm/',
'-v', '/run/lock/lvm/:/run/lock/lvm/',
'--entrypoint=ceph-volume', fake_container_image,
'--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
| 40.325714
| 132
| 0.621794
| 857
| 7,057
| 4.816803
| 0.123687
| 0.081395
| 0.063953
| 0.06686
| 0.803537
| 0.796269
| 0.766231
| 0.753634
| 0.741764
| 0.741764
| 0
| 0.006254
| 0.252232
| 7,057
| 174
| 133
| 40.557471
| 0.776009
| 0
| 0
| 0.66
| 0
| 0
| 0.205186
| 0.121581
| 0
| 0
| 0
| 0
| 0.206667
| 1
| 0.046667
| false
| 0
| 0.033333
| 0
| 0.086667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
73455aa40d8fdaf8fad425f0bc60becf47571215
| 4,387
|
py
|
Python
|
tests/test_infection.py
|
chinapnr/covid-19-data
|
409fa260c16e09b7ef820435c5086207bb5e40ef
|
[
"MIT"
] | 3
|
2020-05-27T01:21:50.000Z
|
2020-08-20T07:54:42.000Z
|
tests/test_infection.py
|
chinapnr/covid-19-data
|
409fa260c16e09b7ef820435c5086207bb5e40ef
|
[
"MIT"
] | 24
|
2020-03-26T10:45:34.000Z
|
2020-04-06T06:13:50.000Z
|
tests/test_infection.py
|
chinapnr/covid-19-data
|
409fa260c16e09b7ef820435c5086207bb5e40ef
|
[
"MIT"
] | null | null | null |
import json
import pytest
@pytest.mark.usefixtures('client', 'headers')
class TestInfection:
def test_infection_region_tc01(self, client, headers):
# db has data BETWEEN 2020-03-22 2020-03-24
region = 'China'
payload = {
'region': region,
'start_date': '2020-03-22',
'end_date': '2020-03-24',
'include_hmt': 'false'
}
response = client.get('/infection/region', params=payload, headers=headers)
assert response.status_code == 200
print("response: ", response.text)
response_data = json.loads(response.text)['data']
assert response_data
def test_infection_region_tc02(self, client, headers):
# db has no data BETWEEN 2020-03-25 2020-03-26
region = 'China'
payload = {
'region': region,
'start_date': '2020-03-25',
'end_date': '2020-03-26',
'include_hmt': 'false'
}
response = client.get('/infection/region', params=payload, headers=headers)
assert response.status_code == 200
print("response: ", response.text)
response_data = json.loads(response.text)['data']
assert response_data
def test_infection_region_tc03(self, client, headers):
# db has data BETWEEN 2020-03-22 2020-03-24
# look up detail
region = 'China'
payload = {
'region': region,
'start_date': '2020-03-22',
'end_date': '2020-03-24',
'include_hmt': 'true'
}
response = client.get('/infection/region', params=payload, headers=headers)
assert response.status_code == 200
print("response: ", response.text)
response_data = json.loads(response.text)['data']
assert response_data
def test_infection_region_tc04(self, client, headers):
# db has data BETWEEN 2020-03-22 2020-03-24
# look up detail
region = 'China'
payload = {
'region': region,
'start_date': '2020-03-22',
# 'end_date': '2020-03-24',
'include_hmt': 'true'
}
response = client.get('/infection/region', params=payload, headers=headers)
assert response.status_code == 200
print("response: ", response.text)
response_data = json.loads(response.text)['data']
assert response_data
def test_infection_region_tc05(self, client, headers):
# db has data BETWEEN 2020-03-22 2020-03-24
# look up detail
region = 'China'
payload = {
'region': region,
'start_date': '2020-01-22',
# 'end_date': '2020-03-24',
'include_hmt': 'true'
}
response = client.get('/infection/region', params=payload, headers=headers)
assert response.status_code == 400
print("response: ", response.text)
response_data = json.loads(response.text)['code']
assert response_data == "30018"
def test_infection_region_detail(self, client, headers):
region = 'China'
payload = {
'region': region,
'start_date': '2020-03-22',
'end_date': '2020-03-24',
'include_hmt': 'true'
}
response = client.get('/infection/region/detail', params=payload, headers=headers)
assert response.status_code == 200
print("response: ", response.text)
response_data = json.loads(response.text)['data']
assert response_data
@pytest.mark.skip
def test_infection_area(self, client, headers):
region = 'China'
area = 'Chongqing'
payload = {
'region': region,
'area': area,
'start_date': '2020-03-22',
'end_date': '2020-03-24'
}
response = client.get('/infection/area', params=payload, headers=headers)
assert response.status_code == 200
print("response: ", response.text)
response_data = json.loads(response.text)['data']
assert response_data
def test_infection_global(self, client, headers):
response = client.get('/infection/global', headers=headers)
assert response.status_code == 200
print("response: ", response.text)
response_data = json.loads(response.text)['data']
assert response_data
| 35.096
| 90
| 0.581947
| 487
| 4,387
| 5.106776
| 0.12115
| 0.055489
| 0.052272
| 0.083635
| 0.844391
| 0.817451
| 0.817451
| 0.817451
| 0.817451
| 0.799357
| 0
| 0.074372
| 0.291999
| 4,387
| 124
| 91
| 35.379032
| 0.726336
| 0.070435
| 0
| 0.683168
| 0
| 0
| 0.167937
| 0.005901
| 0
| 0
| 0
| 0
| 0.158416
| 1
| 0.079208
| false
| 0
| 0.019802
| 0
| 0.108911
| 0.079208
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b40913984e0d9a08276edd74c8a43fc4a6017a70
| 9,921
|
py
|
Python
|
utils.py
|
sWizad/HashNeRF-pytorch
|
e8fe9b4879fc6ef3cdfa8fd3d268a92c4fa0d910
|
[
"MIT"
] | null | null | null |
utils.py
|
sWizad/HashNeRF-pytorch
|
e8fe9b4879fc6ef3cdfa8fd3d268a92c4fa0d910
|
[
"MIT"
] | null | null | null |
utils.py
|
sWizad/HashNeRF-pytorch
|
e8fe9b4879fc6ef3cdfa8fd3d268a92c4fa0d910
|
[
"MIT"
] | null | null | null |
import json
import numpy as np
import pdb
import torch
from ray_utils import get_rays, get_ray_directions, get_ndc_rays
BOX_OFFSETS = torch.tensor([[[i,j,k] for i in [0, 1] for j in [0, 1] for k in [0, 1]]],
device='cuda')
SQR_OFFSETS = torch.tensor([[[i,j] for i in [0, 1] for j in [0, 1] ]], device='cuda')
def hash(coords, log2_hashmap_size):
'''
coords: 3D coordinates. B x 3
log2T: logarithm of T w.r.t 2
'''
x, y, z = coords[..., 0], coords[..., 1], coords[..., 2]
return torch.tensor((1<<log2_hashmap_size)-1) & (x*73856093 ^ y*19349663 ^ z*83492791)
#return ((1<<log2_hashmap_size)-1) & (x*73856093 ^ y*19349663 ^ z*83492791)
def hash2d(coords, log2_hashmap_size):
'''
coords: 2D coordinates. B x 3
log2T: logarithm of T w.r.t 2
'''
x, y = coords[..., 0], coords[..., 1]
return torch.tensor((1<<log2_hashmap_size)-1) & (x*73856093 ^ y*19349663)
def xy2index(xy,resolution):
return xy[...,0]+xy[...,1]*resolution
def get_bbox3d_for_blenderobj(camera_transforms, H, W, near=2.0, far=6.0):
camera_angle_x = float(camera_transforms['camera_angle_x'])
focal = 0.5*W/np.tan(0.5 * camera_angle_x)
# ray directions in camera coordinates
directions = get_ray_directions(H, W, focal)
min_bound = [100, 100, 100]
max_bound = [-100, -100, -100]
points = []
for frame in camera_transforms["frames"]:
c2w = torch.FloatTensor(frame["transform_matrix"])
rays_o, rays_d = get_rays(directions, c2w)
def find_min_max(pt):
for i in range(3):
if(min_bound[i] > pt[i]):
min_bound[i] = pt[i]
if(max_bound[i] < pt[i]):
max_bound[i] = pt[i]
return
for i in [0, W-1, H*W-W, H*W-1]:
min_point = rays_o[i] + near*rays_d[i]
max_point = rays_o[i] + far*rays_d[i]
points += [min_point, max_point]
find_min_max(min_point)
find_min_max(max_point)
return (torch.tensor(min_bound)-torch.tensor([1.0,1.0,1.0]), torch.tensor(max_bound)+torch.tensor([1.0,1.0,1.0]))
def get_bbox3d_for_llff(poses, hwf, near=0.0, far=1.0):
H, W, focal = hwf
H, W = int(H), int(W)
# ray directions in camera coordinates
directions = get_ray_directions(H, W, focal)
min_bound = [100, 100, 100]
max_bound = [-100, -100, -100]
points = []
poses = torch.FloatTensor(poses)
for pose in poses:
rays_o, rays_d = get_rays(directions, pose)
rays_o, rays_d = get_ndc_rays(H, W, focal, 1.0, rays_o, rays_d)
def find_min_max(pt):
for i in range(3):
if(min_bound[i] > pt[i]):
min_bound[i] = pt[i]
if(max_bound[i] < pt[i]):
max_bound[i] = pt[i]
return
for i in [0, W-1, H*W-W, H*W-1]:
min_point = rays_o[i] + near*rays_d[i]
max_point = rays_o[i] + far*rays_d[i]
points += [min_point, max_point]
find_min_max(min_point)
find_min_max(max_point)
return (torch.tensor(min_bound)-torch.tensor([0.1,0.1,0.0001]), torch.tensor(max_bound)+torch.tensor([0.1,0.1,0.0001]))
def get_voxel_vertices(xyz, bounding_box, resolution, log2_hashmap_size):
'''
xyz: 3D coordinates of samples. B x 3
bounding_box: min and max x,y,z coordinates of object bbox
resolution: number of voxels per axis
'''
box_min, box_max = bounding_box
if not torch.all(xyz <= box_max) or not torch.all(xyz >= box_min):
# print("ALERT: some points are outside bounding box. Clipping them!")
pdb.set_trace()
xyz = torch.clamp(xyz, min=box_min, max=box_max)
grid_size = (box_max-box_min)/resolution
bottom_left_idx = torch.floor((xyz-box_min)/grid_size).int()
voxel_min_vertex = bottom_left_idx*grid_size + box_min
voxel_max_vertex = voxel_min_vertex + torch.tensor([1.0,1.0,1.0])*grid_size
# hashed_voxel_indices = [] # B x 8 ... 000,001,010,011,100,101,110,111
# for i in [0, 1]:
# for j in [0, 1]:
# for k in [0, 1]:
# vertex_idx = bottom_left_idx + torch.tensor([i,j,k])
# # vertex = bottom_left + torch.tensor([i,j,k])*grid_size
# hashed_voxel_indices.append(hash(vertex_idx, log2_hashmap_size))
voxel_indices = bottom_left_idx.unsqueeze(1) + BOX_OFFSETS
hashed_voxel_indices = hash(voxel_indices, log2_hashmap_size)
return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices
def get_plane_vertices_old(xyz, bounding_box, resolution, log2_hashmap_size):
'''
xyz: 3D coordinates of samples. B x 3
bounding_box: min and max x,y,z coordinates of object bbox
resolution: number of voxels per axis
'''
def box2plane(input):
in_xy = input[:,:2]#.unsqueeze(1)
in_xz = input[:,::2]#.unsqueeze(1)
in_yz = input[:,-2:]#.unsqueeze(1)
return [in_xy,in_xz,in_yz]
box_min, box_max = bounding_box
if not torch.all(xyz <= box_max) or not torch.all(xyz >= box_min):
# print("ALERT: some points are outside bounding box. Clipping them!")
pdb.set_trace()
xyz = torch.clamp(xyz, min=box_min, max=box_max)
grid_size = (box_max-box_min)/resolution
bottom_left_idx = torch.floor((xyz-box_min)/grid_size).int() #(B, 3)
voxel_min_vertex = bottom_left_idx*grid_size + box_min
voxel_max_vertex = voxel_min_vertex + torch.tensor([1.0,1.0,1.0])*grid_size
# hashed_voxel_indices = [] # B x 8 ... 000,001,010,011,100,101,110,111
# for i in [0, 1]:
# for j in [0, 1]:
# for k in [0, 1]:
# vertex_idx = bottom_left_idx + torch.tensor([i,j,k])
# # vertex = bottom_left + torch.tensor([i,j,k])*grid_size
# hashed_voxel_indices.append(hash(vertex_idx, log2_hashmap_size))
#voxel_indices = bottom_left_idx.unsqueeze(1) + BOX_OFFSETS #(B, 8, 3)
#hashed_voxel_indices = hash(voxel_indices, log2_hashmap_size) #(B, 8)
voxel_indices_xy = bottom_left_idx[:,:2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_xz = bottom_left_idx[:,::2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_yz = bottom_left_idx[:,-2:].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
hashed_voxel_indices_xy = hash2d(voxel_indices_xy, log2_hashmap_size) #(B, 4)
hashed_voxel_indices_xz = hash2d(voxel_indices_xz, log2_hashmap_size) #(B, 4)
hashed_voxel_indices_yz = hash2d(voxel_indices_yz, log2_hashmap_size) #(B, 4)
hashed_voxel_indices = [hashed_voxel_indices_xy,
hashed_voxel_indices_xz,
hashed_voxel_indices_yz]
voxel_min_vertex = box2plane(voxel_min_vertex)
voxel_max_vertex = box2plane(voxel_max_vertex)
#pdb.set_trace()
return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices
def get_plane_vertices(xyz, bounding_box, resolution, log2_hashmap_size):
'''
xyz: 3D coordinates of samples. B x 3
bounding_box: min and max x,y,z coordinates of object bbox
resolution: number of voxels per axis
'''
def box2plane(input):
in_xy = input[:,:2]#.unsqueeze(1)
in_xz = input[:,::2]#.unsqueeze(1)
in_yz = input[:,-2:]#.unsqueeze(1)
return [in_xy,in_xz,in_yz]
box_min, box_max = bounding_box
if not torch.all(xyz <= box_max) or not torch.all(xyz >= box_min):
# print("ALERT: some points are outside bounding box. Clipping them!")
pdb.set_trace()
xyz = torch.clamp(xyz, min=box_min, max=box_max)
grid_size = (box_max-box_min)/resolution
bottom_left_idx = torch.floor((xyz-box_min)/grid_size).int() #(B, 3)
voxel_min_vertex = bottom_left_idx*grid_size + box_min
voxel_max_vertex = voxel_min_vertex + torch.tensor([1.0,1.0,1.0])*grid_size
# hashed_voxel_indices = [] # B x 8 ... 000,001,010,011,100,101,110,111
# for i in [0, 1]:
# for j in [0, 1]:
# for k in [0, 1]:
# vertex_idx = bottom_left_idx + torch.tensor([i,j,k])
# # vertex = bottom_left + torch.tensor([i,j,k])*grid_size
# hashed_voxel_indices.append(hash(vertex_idx, log2_hashmap_size))
#voxel_indices = bottom_left_idx.unsqueeze(1) + BOX_OFFSETS #(B, 8, 3)
#hashed_voxel_indices = hash(voxel_indices, log2_hashmap_size) #(B, 8)
voxel_indices_xy = bottom_left_idx[:,:2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_xz = bottom_left_idx[:,::2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_yz = bottom_left_idx[:,-2:].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
#hashed_voxel_indices_xy = hash2d(voxel_indices_xy, log2_hashmap_size) #(B, 4)
#hashed_voxel_indices_xz = hash2d(voxel_indices_xz, log2_hashmap_size) #(B, 4)
#hashed_voxel_indices_yz = hash2d(voxel_indices_yz, log2_hashmap_size) #(B, 4)
hashed_voxel_indices_xy = xy2index(voxel_indices_xy,resolution) #(B, 4)
hashed_voxel_indices_xz = xy2index(voxel_indices_xz,resolution) #(B, 4)
hashed_voxel_indices_yz = xy2index(voxel_indices_yz,resolution) #(B, 4)
#print(hashed_voxel_indices_yz.shape)
#pdb.set_trace()
hashed_voxel_indices = [hashed_voxel_indices_xy,
hashed_voxel_indices_xz,
hashed_voxel_indices_yz]
voxel_min_vertex = box2plane(voxel_min_vertex)
voxel_max_vertex = box2plane(voxel_max_vertex)
return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices
if __name__=="__main__":
with open("data/nerf_synthetic/chair/transforms_train.json", "r") as f:
camera_transforms = json.load(f)
bounding_box = get_bbox3d_for_blenderobj(camera_transforms, 800, 800)
| 40.493878
| 123
| 0.627961
| 1,528
| 9,921
| 3.793194
| 0.102094
| 0.10559
| 0.093168
| 0.008282
| 0.8706
| 0.848516
| 0.819531
| 0.810214
| 0.810214
| 0.791063
| 0
| 0.053681
| 0.241407
| 9,921
| 244
| 124
| 40.659836
| 0.71645
| 0.270235
| 0
| 0.654135
| 0
| 0
| 0.014158
| 0.006654
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090226
| false
| 0
| 0.037594
| 0.007519
| 0.218045
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b40c87bef3a1437769ac688f07452b9daed5f901
| 189
|
py
|
Python
|
src/base/admin.py
|
dhavall13/Decode
|
8b9cbec72ade727d62edb90c3a38152e0285fe90
|
[
"MIT"
] | null | null | null |
src/base/admin.py
|
dhavall13/Decode
|
8b9cbec72ade727d62edb90c3a38152e0285fe90
|
[
"MIT"
] | null | null | null |
src/base/admin.py
|
dhavall13/Decode
|
8b9cbec72ade727d62edb90c3a38152e0285fe90
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Room, Topic, Message, User
admin.site.register(Room)
admin.site.register(Topic)
admin.site.register(Message)
admin.site.register(User)
| 23.625
| 46
| 0.804233
| 28
| 189
| 5.428571
| 0.428571
| 0.236842
| 0.447368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084656
| 189
| 7
| 47
| 27
| 0.878613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
b414e74ae421f14965c6e966091b96bde22167db
| 8,249
|
py
|
Python
|
orca/topology/infra/k8s/__init__.py
|
filwie/orca
|
84cfd53d309d85f7a7fb8649ba4abc8c2df9feac
|
[
"Apache-2.0"
] | null | null | null |
orca/topology/infra/k8s/__init__.py
|
filwie/orca
|
84cfd53d309d85f7a7fb8649ba4abc8c2df9feac
|
[
"Apache-2.0"
] | null | null | null |
orca/topology/infra/k8s/__init__.py
|
filwie/orca
|
84cfd53d309d85f7a7fb8649ba4abc8c2df9feac
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 OpenRCA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from orca.topology import bundle
from orca.topology.infra.istio import linker as istio_linker
from orca.topology.infra.k8s import cluster, linker, probe
def get_probes():
return [
bundle.ProbeBundle(
probe=probe.PodPullProbe,
linkers=[
linker.PodToServiceLinker,
linker.PodToReplicaSetLinker,
linker.PodToStatefulSetLinker,
linker.PodToDaemonSetLinker,
linker.PodToNodeLinker,
linker.ConfigMapToPodLinker,
linker.SecretToPodLinker,
linker.PersistentVolumeClaimToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.PodPushProbe,
linkers=[
linker.PodToServiceLinker,
linker.PodToReplicaSetLinker,
linker.PodToStatefulSetLinker,
linker.PodToDaemonSetLinker,
linker.PodToNodeLinker,
linker.ConfigMapToPodLinker,
linker.SecretToPodLinker,
linker.PersistentVolumeClaimToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.ServicePullProbe,
linkers=[
linker.PodToServiceLinker,
linker.EndpointsToServiceLinker,
istio_linker.VirtualServiceToServiceLinker,
istio_linker.DestinationRuleToServiceLinker,
linker.IngressToServiceLinker
]
),
bundle.ProbeBundle(
probe=probe.ServicePushProbe,
linkers=[
linker.PodToServiceLinker,
linker.EndpointsToServiceLinker,
istio_linker.VirtualServiceToServiceLinker,
istio_linker.DestinationRuleToServiceLinker,
linker.IngressToServiceLinker
]
),
bundle.ProbeBundle(
probe=probe.EndpointsPullProbe,
linkers=[
linker.EndpointsToServiceLinker
]
),
bundle.ProbeBundle(
probe=probe.EndpointsPushProbe,
linkers=[
linker.EndpointsToServiceLinker
]
),
bundle.ProbeBundle(
probe=probe.DeploymentPullProbe,
linkers=[
linker.DeploymentToHorizontalPodAutoscalerLinker,
linker.ReplicaSetToDeploymentLinker
]
),
bundle.ProbeBundle(
probe=probe.DeploymentPushProbe,
linkers=[
linker.DeploymentToHorizontalPodAutoscalerLinker,
linker.ReplicaSetToDeploymentLinker
]
),
bundle.ProbeBundle(
probe=probe.ReplicaSetPullProbe,
linkers=[
linker.PodToReplicaSetLinker,
linker.ReplicaSetToDeploymentLinker,
linker.ReplicaSetToHorizontalPodAutoscalerLinker
]
),
bundle.ProbeBundle(
probe=probe.ReplicaSetPushProbe,
linkers=[
linker.PodToReplicaSetLinker,
linker.ReplicaSetToDeploymentLinker,
linker.ReplicaSetToHorizontalPodAutoscalerLinker
]
),
bundle.ProbeBundle(
probe=probe.DaemonSetPullProbe,
linkers=[
linker.PodToDaemonSetLinker
]
),
bundle.ProbeBundle(
probe=probe.DaemonSetPushProbe,
linkers=[
linker.PodToDaemonSetLinker
]
),
bundle.ProbeBundle(
probe=probe.StatefulSetPullProbe,
linkers=[
linker.PodToStatefulSetLinker,
linker.StatefulSetToHorizontalPodAutoscalerLinker
]
),
bundle.ProbeBundle(
probe=probe.StatefulSetPushProbe,
linkers=[
linker.PodToStatefulSetLinker,
linker.StatefulSetToHorizontalPodAutoscalerLinker
]
),
bundle.ProbeBundle(
probe=probe.ConfigMapPullProbe,
linkers=[
linker.ConfigMapToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.ConfigMapPushProbe,
linkers=[
linker.ConfigMapToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.SecretPullProbe,
linkers=[
linker.SecretToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.SecretPushProbe,
linkers=[
linker.SecretToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.StorageClassPullProbe,
linkers=[
linker.PersistentVolumeToStorageClassLinker
]
),
bundle.ProbeBundle(
probe=probe.StorageClassPushProbe,
linkers=[
linker.PersistentVolumeToStorageClassLinker
]
),
bundle.ProbeBundle(
probe=probe.PersistentVolumePullProbe,
linkers=[
linker.PersistentVolumeToStorageClassLinker,
linker.PersistentVolumeToPersistentVolumeClaimLinker
]
),
bundle.ProbeBundle(
probe=probe.PersistentVolumePushProbe,
linkers=[
linker.PersistentVolumeToStorageClassLinker,
linker.PersistentVolumeToPersistentVolumeClaimLinker
]
),
bundle.ProbeBundle(
probe=probe.PersistentVolumeClaimPullProbe,
linkers=[
linker.PersistentVolumeToPersistentVolumeClaimLinker,
linker.PersistentVolumeClaimToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.PersistentVolumeClaimPushProbe,
linkers=[
linker.PersistentVolumeToPersistentVolumeClaimLinker,
linker.PersistentVolumeClaimToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.HorizontalPodAutoscalerPullProbe,
linkers=[
linker.DeploymentToHorizontalPodAutoscalerLinker,
linker.ReplicaSetToHorizontalPodAutoscalerLinker,
linker.StatefulSetToHorizontalPodAutoscalerLinker
]
),
bundle.ProbeBundle(
probe=probe.HorizontalPodAutoscalerPushProbe,
linkers=[
linker.DeploymentToHorizontalPodAutoscalerLinker,
linker.ReplicaSetToHorizontalPodAutoscalerLinker,
linker.StatefulSetToHorizontalPodAutoscalerLinker
]
),
bundle.ProbeBundle(
probe=probe.NodePullProbe,
linkers=[
linker.PodToNodeLinker,
linker.NodeToClusterLinker
]
),
bundle.ProbeBundle(
probe=probe.NodePushProbe,
linkers=[
linker.PodToNodeLinker,
linker.NodeToClusterLinker
]
),
bundle.ProbeBundle(
probe=probe.IngressPullProbe,
linkers=[
linker.IngressToServiceLinker
]
),
bundle.ProbeBundle(
probe=probe.IngressPushProbe,
linkers=[
linker.IngressToServiceLinker
]
),
bundle.ProbeBundle(
probe=cluster.ClusterProbe,
linkers=[
linker.NodeToClusterLinker
]
)
]
| 29.566308
| 74
| 0.562856
| 450
| 8,249
| 10.304444
| 0.288889
| 0.113651
| 0.147078
| 0.174682
| 0.730429
| 0.730429
| 0.704766
| 0.56804
| 0.56804
| 0.328661
| 0
| 0.00176
| 0.380046
| 8,249
| 278
| 75
| 29.672662
| 0.904967
| 0.066917
| 0
| 0.702586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00431
| true
| 0
| 0.012931
| 0.00431
| 0.021552
| 0
| 0
| 0
| 1
| null | 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b44ff9fd50fe2d54276ded1d327434e0e7c23eab
| 282
|
py
|
Python
|
tests/test_classes/users.py
|
dialogs/python-bot-sdk
|
737152e5ef8406af0b22600ef7cefa78da9035e8
|
[
"Apache-2.0"
] | 9
|
2019-01-22T09:59:12.000Z
|
2021-05-08T10:59:00.000Z
|
tests/test_classes/users.py
|
dialogs/python-bot-sdk
|
737152e5ef8406af0b22600ef7cefa78da9035e8
|
[
"Apache-2.0"
] | 29
|
2018-10-08T17:10:49.000Z
|
2021-04-28T18:46:30.000Z
|
tests/test_classes/users.py
|
dialogs/python-bot-sdk
|
737152e5ef8406af0b22600ef7cefa78da9035e8
|
[
"Apache-2.0"
] | 8
|
2019-01-22T09:49:32.000Z
|
2022-01-26T18:55:52.000Z
|
from dialog_api.users_pb2 import RequestLoadFullUsers, ResponseLoadFullUsers, FullUser
class Users:
def LoadFullUsers(self, request: RequestLoadFullUsers) -> ResponseLoadFullUsers:
return ResponseLoadFullUsers(full_users=[FullUser(id=1, contact_info=[], about=None)])
| 40.285714
| 94
| 0.797872
| 28
| 282
| 7.892857
| 0.785714
| 0.371041
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008
| 0.113475
| 282
| 6
| 95
| 47
| 0.876
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
b45b6c7e93b004510cd39ca579e1ae1a135f82e4
| 30
|
py
|
Python
|
SVDD/__init__.py
|
SolidusAbi/SVDD-Python
|
ce2b834bf31cfdbbbebc08c8a1bac8c37b081d0e
|
[
"MIT"
] | null | null | null |
SVDD/__init__.py
|
SolidusAbi/SVDD-Python
|
ce2b834bf31cfdbbbebc08c8a1bac8c37b081d0e
|
[
"MIT"
] | null | null | null |
SVDD/__init__.py
|
SolidusAbi/SVDD-Python
|
ce2b834bf31cfdbbbebc08c8a1bac8c37b081d0e
|
[
"MIT"
] | null | null | null |
from .BaseSVDD import BaseSVDD
| 30
| 30
| 0.866667
| 4
| 30
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 30
| 1
| 30
| 30
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b4620451d250c59f2d44e900d7695fc1a4e00f84
| 258
|
py
|
Python
|
python/hayate/store/actions.py
|
tao12345666333/Talk-Is-Cheap
|
7b2c5959828b6d8bbbad8144b9b97f9b77c6b34c
|
[
"MIT"
] | 4
|
2016-04-14T02:11:35.000Z
|
2019-05-30T10:18:41.000Z
|
python/hayate/store/actions.py
|
tao12345666333/Talk-Is-Cheap
|
7b2c5959828b6d8bbbad8144b9b97f9b77c6b34c
|
[
"MIT"
] | 8
|
2016-07-21T16:02:17.000Z
|
2021-09-23T02:49:34.000Z
|
python/hayate/store/actions.py
|
tao12345666333/Talk-Is-Cheap
|
7b2c5959828b6d8bbbad8144b9b97f9b77c6b34c
|
[
"MIT"
] | 2
|
2017-02-17T05:02:02.000Z
|
2017-11-08T12:22:09.000Z
|
from turbo.flux import Mutation, register, dispatch, register_dispatch
import mutation_types
@register_dispatch('user', mutation_types.INCREASE)
def increase(rank):
pass
def decrease(rank):
return dispatch('user', mutation_types.DECREASE, rank)
| 19.846154
| 70
| 0.77907
| 32
| 258
| 6.125
| 0.46875
| 0.244898
| 0.204082
| 0.255102
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127907
| 258
| 12
| 71
| 21.5
| 0.871111
| 0
| 0
| 0
| 0
| 0
| 0.031008
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.142857
| 0.285714
| 0.142857
| 0.714286
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
|
0
| 6
|
b46ebc3b01df0741b7690606a0b55aac51c6693f
| 237
|
py
|
Python
|
wagtail/wagtailadmin/blocks.py
|
patphongs/wagtail
|
32555f7a1c599c139e0f26c22907c9612af2e015
|
[
"BSD-3-Clause"
] | 3
|
2016-08-17T13:56:36.000Z
|
2019-04-23T19:59:25.000Z
|
wagtail/wagtailadmin/blocks.py
|
patphongs/wagtail
|
32555f7a1c599c139e0f26c22907c9612af2e015
|
[
"BSD-3-Clause"
] | 11
|
2016-08-05T15:43:06.000Z
|
2016-12-16T13:32:23.000Z
|
wagtail/wagtailadmin/blocks.py
|
patphongs/wagtail
|
32555f7a1c599c139e0f26c22907c9612af2e015
|
[
"BSD-3-Clause"
] | 2
|
2017-08-08T01:39:02.000Z
|
2018-05-06T06:16:10.000Z
|
from __future__ import absolute_import, unicode_literals
import warnings
from wagtail.wagtailcore.blocks import * # noqa
warnings.warn("wagtail.wagtailadmin.blocks has moved to wagtail.wagtailcore.blocks", UserWarning, stacklevel=2)
| 29.625
| 111
| 0.827004
| 29
| 237
| 6.551724
| 0.655172
| 0.189474
| 0.252632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004695
| 0.101266
| 237
| 7
| 112
| 33.857143
| 0.887324
| 0.016878
| 0
| 0
| 0
| 0
| 0.290043
| 0.229437
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
81e7bcf77b3d24a119c0b31470b009787721b442
| 15,921
|
py
|
Python
|
pipeline/tests/engine/core/data/test_api.py
|
wkma/bk-sops
|
8fb5609c0c4495c28d588fbafa9d9f5f2976929b
|
[
"Apache-2.0"
] | 2
|
2021-07-28T01:48:31.000Z
|
2021-11-17T11:02:26.000Z
|
pipeline/tests/engine/core/data/test_api.py
|
wkma/bk-sops
|
8fb5609c0c4495c28d588fbafa9d9f5f2976929b
|
[
"Apache-2.0"
] | null | null | null |
pipeline/tests/engine/core/data/test_api.py
|
wkma/bk-sops
|
8fb5609c0c4495c28d588fbafa9d9f5f2976929b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import sys
from django.test import TestCase
from django.utils.module_loading import import_string
from pipeline.tests.mock import * # noqa
from pipeline.tests.mock_settings import * # noqa
class EngineDataAPITestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.mock_settings = MagicMock()
cls.settings_patch = patch(ENGINE_DATA_API_SETTINGS, cls.mock_settings)
cls.import_backend_patch = patch(ENGINE_DATA_API_IMPORT_BACKEND, MagicMock())
cls.settings_patch.start()
cls.import_backend_patch.start()
cls.api = import_string("pipeline.engine.core.data.api")
cls.write_methods = ["set_object", "del_object", "expire_cache"]
cls.read_methods = ["get_object", "cache_for"]
cls.method_params = {
"set_object": ["key", "obj"],
"del_object": ["key"],
"expire_cache": ["key", "obj", "expires"],
"cache_for": ["key"],
"get_object": ["key"],
}
@classmethod
def tearDownClass(cls):
cls.settings_patch.stop()
cls.import_backend_patch.stop()
def setUp(self):
self.backend = MagicMock()
self.candidate_backend = MagicMock()
self.mock_settings.PIPELINE_DATA_BACKEND_AUTO_EXPIRE = False
def test_write__without_candidate(self):
for method in self.write_methods:
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, None):
getattr(self.api, method)(*self.method_params[method])
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_not_called()
sys.stdout.write(
"{} pass test_write__without_candidate test\n".format(method)
)
def test_write__without_candiate_raise_err(self):
for method in self.write_methods:
setattr(self.backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, None):
self.assertRaises(
Exception,
getattr(self.api, method),
*self.method_params[method]
)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_not_called()
sys.stdout.write(
"{} pass test_write__without_candiate_raise_err test\n".format(method)
)
def test_write__with_candidate(self):
for method in self.write_methods:
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
getattr(self.api, method)(*self.method_params[method])
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write("{} pass test_write__with_candidate test\n".format(method))
def test_write__with_candidate_main_raise_err(self):
for method in self.write_methods:
setattr(self.backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
getattr(self.api, method)(*self.method_params[method])
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_write__with_candidate_main_raise_err test\n".format(
method
)
)
def test_write__with_candidate_raise_err(self):
for method in self.write_methods:
setattr(self.candidate_backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
getattr(self.api, method)(*self.method_params[method])
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_write__with_candidate_raise_err test\n".format(method)
)
def test_write__with_candidate_both_raise_err(self):
for method in self.write_methods:
setattr(self.backend, method, MagicMock(side_effect=Exception))
setattr(self.candidate_backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
self.assertRaises(
Exception,
getattr(self.api, method),
*self.method_params[method]
)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_write__with_candidate_both_raise_err test\n".format(
method
)
)
def test_write__with_auto_expire(self):
self.mock_settings.PIPELINE_DATA_BACKEND_AUTO_EXPIRE = True
self.mock_settings.PIPELINE_DATA_BACKEND_AUTO_EXPIRE_SECONDS = 30
for method in self.write_methods:
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
getattr(self.api, method)(*self.method_params[method])
if method == "set_object":
getattr(self.backend, "expire_cache").assert_called_once_with(
*self.method_params[method], expires=30
)
self.backend.expire_cache.reset_mock()
else:
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_write__with_candidate_both_raise_err test\n".format(
method
)
)
def test_read__without_candidate(self):
for method in self.read_methods:
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, None):
data = getattr(self.api, method)(*self.method_params[method])
self.assertIsNotNone(data)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_not_called()
sys.stdout.write(
"{} pass test_read__without_candidate test\n".format(method)
)
def test_read__without_candidate_raise_err(self):
for method in self.read_methods:
setattr(self.backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, None):
self.assertRaises(
Exception,
getattr(self.api, method),
*self.method_params[method]
)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_not_called()
sys.stdout.write(
"{} pass test_read__without_candidate_raise_err test\n".format(method)
)
def test_read__with_candidate_not_use(self):
for method in self.read_methods:
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
data = getattr(self.api, method)(*self.method_params[method])
self.assertIsNotNone(data)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_not_called()
sys.stdout.write(
"{} pass test_read__with_candidate_not_use test\n".format(method)
)
def test_read__with_candidate_use(self):
for method in self.read_methods:
setattr(self.backend, method, MagicMock(return_value=None))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
data = getattr(self.api, method)(*self.method_params[method])
self.assertIsNotNone(data)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_read__with_candidate_use test\n".format(method)
)
def test_read__with_candidate_err(self):
for method in self.read_methods:
setattr(self.backend, method, MagicMock(return_value=None))
setattr(self.candidate_backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
data = getattr(self.api, method)(*self.method_params[method])
self.assertIsNone(data)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_read__with_candidate_err test\n".format(method)
)
def test_read__with_candidate_main_raise_err(self):
for method in self.read_methods:
setattr(self.backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
data = getattr(self.api, method)(*self.method_params[method])
self.assertIsNotNone(data)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_read__with_candidate_main_raise_err test\n".format(method)
)
def test_read__with_candidate_both_raise_err(self):
for method in self.read_methods:
setattr(self.backend, method, MagicMock(side_effect=Exception))
setattr(self.candidate_backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
self.assertRaises(
Exception,
getattr(self.api, method),
*self.method_params[method]
)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_read__with_candidate_both_raise_err test\n".format(method)
)
def test_set_schedule_data(self):
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
self.api.set_schedule_data("key", "data")
self.backend.set_object.assert_called_once_with(
"key_schedule_parent_data", "data"
)
self.candidate_backend.set_object.assert_called_once_with(
"key_schedule_parent_data", "data"
)
def test_delete_parent_data(self):
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
self.api.delete_parent_data("key")
self.backend.del_object.assert_called_once_with(
"key_schedule_parent_data"
)
self.candidate_backend.del_object.assert_called_once_with(
"key_schedule_parent_data"
)
def test_get_schedule_parent_data(self):
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
data = self.api.get_schedule_parent_data("key")
self.assertIsNotNone(data)
self.backend.get_object.assert_called_once_with(
"key_schedule_parent_data"
)
self.candidate_backend.get_object.assert_not_called()
| 43.381471
| 115
| 0.592174
| 1,672
| 15,921
| 5.297847
| 0.098086
| 0.093926
| 0.068639
| 0.094378
| 0.817227
| 0.812034
| 0.80876
| 0.80797
| 0.781553
| 0.771054
| 0
| 0.001402
| 0.328183
| 15,921
| 366
| 116
| 43.5
| 0.826758
| 0.045977
| 0
| 0.573883
| 0
| 0
| 0.067264
| 0.042032
| 0
| 0
| 0
| 0
| 0.154639
| 1
| 0.068729
| false
| 0.04811
| 0.030928
| 0
| 0.103093
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c30a8241bc4eb176e2d35bfc53ddbf79b7ca685f
| 77
|
py
|
Python
|
test/settings/test_kafka_consumer_config.py
|
DebasishMaji/PI
|
e293982cae8f8755d28d7b3de22966dc74759b90
|
[
"Apache-2.0"
] | null | null | null |
test/settings/test_kafka_consumer_config.py
|
DebasishMaji/PI
|
e293982cae8f8755d28d7b3de22966dc74759b90
|
[
"Apache-2.0"
] | null | null | null |
test/settings/test_kafka_consumer_config.py
|
DebasishMaji/PI
|
e293982cae8f8755d28d7b3de22966dc74759b90
|
[
"Apache-2.0"
] | null | null | null |
import unittest
class TestKafkaConsumerConfig(unittest.TestCase):
pass
| 12.833333
| 49
| 0.805195
| 7
| 77
| 8.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 77
| 5
| 50
| 15.4
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
c30ea52dd60b15b77f690236c9544837627ac0f7
| 7,684
|
py
|
Python
|
Pycraft/StartupAnimation.py
|
demirdogukan/InsiderPycraft
|
5567107326fbd222a7df6aabf4ab265e0a157636
|
[
"MIT"
] | 22
|
2021-03-25T17:47:45.000Z
|
2022-03-29T01:56:12.000Z
|
Pycraft/StartupAnimation.py
|
demirdogukan/InsiderPycraft
|
5567107326fbd222a7df6aabf4ab265e0a157636
|
[
"MIT"
] | 1
|
2021-12-22T16:12:59.000Z
|
2021-12-22T16:12:59.000Z
|
Pycraft/StartupAnimation.py
|
demirdogukan/InsiderPycraft
|
5567107326fbd222a7df6aabf4ab265e0a157636
|
[
"MIT"
] | 3
|
2021-09-05T14:10:05.000Z
|
2022-01-10T12:57:34.000Z
|
if not __name__ == "__main__":
print("Started <Pycraft_StartupAnimation>")
class GenerateStartupScreen:
def __init__(self):
pass
def Start(self):
try:
self.Display.fill(self.BackgroundCol)
self.mod_Pygame__.display.flip()
self.mod_Pygame__.display.set_caption(f"Pycraft: v{self.version}: Welcome")
PresentsFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 35)
PycraftFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 60)
NameFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 45)
NameText = NameFont.render("Tom Jebbo", True, self.FontCol)
NameTextWidth = NameText.get_width()
NameTextHeight = NameText.get_height()
PresentsText = PresentsFont.render("presents", True, self.FontCol)
PycraftText = PycraftFont.render("Pycraft", True, self.FontCol)
PycraftTextWidth = PycraftText.get_width()
PycraftTextHeight = PycraftText.get_height()
iteration = 0
clock = self.mod_Pygame__.time.Clock()
if self.RunFullStartup == True:
while iteration <= (60*3):
self.realWidth, self.realHeight = self.mod_Pygame__.display.get_window_size()
self.Display.fill(self.BackgroundCol)
self.Display.blit(NameText, ((self.realWidth-NameTextWidth)/2, (self.realHeight-NameTextHeight)/2))
iteration += 1
if self.realWidth < 1280:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, 1280, self.SavedHeight)
if self.realHeight < 720:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, self.SavedWidth, 720)
self.mod_Pygame__.display.flip()
clock.tick(60)
for event in self.mod_Pygame__.event.get():
if event.type == self.mod_Pygame__.QUIT:
self.Stop_Thread_Event.set()
self.Thread_StartLongThread.join()
self.Thread_AdaptiveMode.join()
self.Thread_StartLongThread.join()
self.mod_Pygame__.quit()
self.mod_Sys__.exit("Thanks for playing")
quit()
iteration = 0
while iteration <= (60*2):
self.realWidth, self.realHeight = self.mod_Pygame__.display.get_window_size()
self.Display.fill(self.BackgroundCol)
self.Display.blit(NameText, ((self.realWidth-NameTextWidth)/2, (self.realHeight-NameTextHeight)/2))
self.Display.blit(PresentsText, ((((self.realWidth-NameTextWidth)/2)+120), ((self.realHeight-NameTextHeight)/2)+30))
iteration += 1
if self.realWidth < 1280:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, 1280, self.SavedHeight)
if self.realHeight < 720:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, self.SavedWidth, 720)
self.mod_Pygame__.display.flip()
clock.tick(60)
for event in self.mod_Pygame__.event.get():
if event.type == self.mod_Pygame__.QUIT:
self.Stop_Thread_Event.set()
self.Thread_StartLongThread.join()
self.Thread_AdaptiveMode.join()
self.Thread_StartLongThread.join()
self.mod_Pygame__.quit()
self.mod_Sys__.exit("Thanks for playing")
quit()
iteration = 0
while iteration <= (60*3):
self.realWidth, self.realHeight = self.mod_Pygame__.display.get_window_size()
self.Display.fill(self.BackgroundCol)
self.Display.blit(PycraftText, ((self.realWidth-PycraftTextWidth)/2, (self.realHeight-PycraftTextHeight)/2))
iteration += 1
if self.realWidth < 1280:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, 1280, self.SavedHeight)
if self.realHeight < 720:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, self.SavedWidth, 720)
self.mod_Pygame__.display.flip()
clock.tick(60)
for event in self.mod_Pygame__.event.get():
if event.type == self.mod_Pygame__.QUIT:
self.Stop_Thread_Event.set()
self.Thread_StartLongThread.join()
self.Thread_AdaptiveMode.join()
self.Thread_StartLongThread.join()
self.mod_Pygame__.quit()
self.mod_Sys__.exit("Thanks for playing")
quit()
y = 0
while True:
self.realWidth, self.realHeight = self.mod_Pygame__.display.get_window_size()
self.Display.fill(self.BackgroundCol)
self.Display.blit(PycraftText, ((self.realWidth-PycraftTextWidth)/2, ((self.realHeight-PycraftTextHeight)/2)-y))
y += 2
if self.realWidth < 1280:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, 1280, self.SavedHeight)
if self.realHeight < 720:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, self.SavedWidth, 720)
self.mod_Pygame__.display.flip()
clock.tick(60)
for event in self.mod_Pygame__.event.get():
if event.type == self.mod_Pygame__.QUIT:
self.Stop_Thread_Event.set()
self.Thread_StartLongThread.join()
self.Thread_AdaptiveMode.join()
self.Thread_StartLongThread.join()
self.mod_Pygame__.quit()
self.mod_Sys__.exit("Thanks for playing")
quit()
if ((self.realHeight-PycraftTextHeight)/2)-y <= 0:
self.RunFullStartup = False
return None
except Exception as Message:
self.RunFullStartup = False
return Message
else:
print("You need to run this as part of Pycraft")
import tkinter as tk
from tkinter import messagebox
root = tk.Tk()
root.withdraw()
messagebox.showerror("Startup Fail", "You need to run this as part of Pycraft, please run the 'main.py' file")
quit()
| 53.361111
| 141
| 0.511322
| 679
| 7,684
| 5.533137
| 0.185567
| 0.076391
| 0.089965
| 0.053234
| 0.769497
| 0.755124
| 0.745542
| 0.745542
| 0.745542
| 0.729039
| 0
| 0.022628
| 0.401874
| 7,684
| 144
| 142
| 53.361111
| 0.794822
| 0
| 0
| 0.68595
| 1
| 0
| 0.047865
| 0.003447
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016529
| false
| 0.008264
| 0.016529
| 0
| 0.057851
| 0.016529
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c331cb67fa44126ad7899136fc1a363b37ea7fe2
| 263
|
py
|
Python
|
gdal/swig/python/scripts/gdal2xyz.py
|
Sokigo-GLS/gdal
|
595f74bf60dff89fc5df53f9f4c3e40fc835e909
|
[
"MIT"
] | null | null | null |
gdal/swig/python/scripts/gdal2xyz.py
|
Sokigo-GLS/gdal
|
595f74bf60dff89fc5df53f9f4c3e40fc835e909
|
[
"MIT"
] | null | null | null |
gdal/swig/python/scripts/gdal2xyz.py
|
Sokigo-GLS/gdal
|
595f74bf60dff89fc5df53f9f4c3e40fc835e909
|
[
"MIT"
] | null | null | null |
import sys
# import osgeo.utils.gdal2xyz as a convenience to use as a script
from osgeo.utils.gdal2xyz import * # noqa
from osgeo.utils.gdal2xyz import main
from osgeo.gdal import deprecation_warn
deprecation_warn('gdal2xyz', 'utils')
sys.exit(main(sys.argv))
| 26.3
| 65
| 0.787072
| 41
| 263
| 5
| 0.463415
| 0.146341
| 0.263415
| 0.214634
| 0.273171
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017391
| 0.125475
| 263
| 9
| 66
| 29.222222
| 0.873913
| 0.258555
| 0
| 0
| 0
| 0
| 0.067708
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c37854af006991db33cfa5319fe951302a09dbf2
| 164
|
py
|
Python
|
segmentation/data/transforms/__init__.py
|
RajasekharChowdary9/panoptic-deeplab
|
7645bc1cf51e3ebc85153666f26f8630a407b52b
|
[
"Apache-2.0"
] | 506
|
2020-06-12T01:07:56.000Z
|
2022-03-26T00:56:52.000Z
|
segmentation/data/transforms/__init__.py
|
RajasekharChowdary9/panoptic-deeplab
|
7645bc1cf51e3ebc85153666f26f8630a407b52b
|
[
"Apache-2.0"
] | 85
|
2020-06-12T04:51:31.000Z
|
2022-03-23T16:19:44.000Z
|
segmentation/data/transforms/__init__.py
|
RajasekharChowdary9/panoptic-deeplab
|
7645bc1cf51e3ebc85153666f26f8630a407b52b
|
[
"Apache-2.0"
] | 102
|
2020-06-12T06:45:44.000Z
|
2022-03-22T14:03:04.000Z
|
from .build import build_transforms
from .pre_augmentation_transforms import Resize
from .target_transforms import PanopticTargetGenerator, SemanticTargetGenerator
| 41
| 79
| 0.896341
| 17
| 164
| 8.411765
| 0.588235
| 0.223776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079268
| 164
| 3
| 80
| 54.666667
| 0.94702
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5edecbbe347219a2740ccd3534f648ace677fd24
| 10,232
|
py
|
Python
|
tests/exchanges_tests.py
|
tomwalton78/Crypto-Exchange-API-Aggregator
|
c5b1756eac46274cdbe5c4e49db62450a35b70a6
|
[
"MIT"
] | null | null | null |
tests/exchanges_tests.py
|
tomwalton78/Crypto-Exchange-API-Aggregator
|
c5b1756eac46274cdbe5c4e49db62450a35b70a6
|
[
"MIT"
] | null | null | null |
tests/exchanges_tests.py
|
tomwalton78/Crypto-Exchange-API-Aggregator
|
c5b1756eac46274cdbe5c4e49db62450a35b70a6
|
[
"MIT"
] | 1
|
2019-11-16T07:31:00.000Z
|
2019-11-16T07:31:00.000Z
|
import unittest
from datetime import datetime
import os
import sys
from api.exchanges.exchange import ExchangeAPICallFailedException
from api.exchanges.gdax_exchange import GdaxExchange
from api.exchanges.kraken_exchange import KrakenExchange
from api.exchanges.bitstamp_exchange import BitstampExchange
from api.exchanges.bitfinex_exchange import BitfinexExchange
class HiddenPrints:
"""Class to disable printing for functions run under its scope.
Example:
with HiddenPrints()
print('hello world')
Nothing will print, since anything under the scope of HiddenPrints has its
printing output suppressed.
"""
def __enter__(self):
"""Disable printing on entering 'with HiddenPrints()' scope
"""
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
"""Re-enable printing on exiting 'with HiddenPrints()' scope
"""
sys.stdout.close()
sys.stdout = self._original_stdout
class GdaxExchangeTests(unittest.TestCase):
"""
Tests that functions within GdaxExchange class perform as intended.
"""
def test_initialisation_with_valid_market(self):
try:
g = GdaxExchange('BTC-EUR')
pass
except KeyError:
self.fail(
'Initialising GdaxExchange with BTC-EUR raised KeyError.'
)
def test_initialisation_with_invalid_market(self):
with self.assertRaises(KeyError):
g = GdaxExchange('REDDDDDDDDDD-BLUEEEEEEEEEE')
def test_fetch_l1_quote_on_supported_market(self):
try:
g = GdaxExchange('BTC-EUR')
g.fetch_l1_quote()
pass
except Exception as e:
self.fail(
'Fetch l1 quote on supported market failed: {}'.format(
str(e)
)
)
def test_fetch_l1_quote_on_unsupported_market(self):
with self.assertRaises(ExchangeAPICallFailedException):
g = GdaxExchange('LTC-GBP')
g.fetch_l1_quote()
def test_latest_l1_quote_to_csv(self):
g = GdaxExchange('BTC-EUR')
g.latest_l1_quote = {
"best ask size": 0.65333759,
"best bid price": 5780.1,
"best ask price": 5781.24,
"timestamp": datetime.utcnow(),
"best bid size": 0.001006
}
g.latest_l1_quote_to_csv(
path_to_folder=os.path.dirname(os.path.realpath(__file__)) +
'/'
)
# Test that csv file exists
path = (
os.path.dirname(os.path.realpath(__file__)) + '/gdax_BTC-EUR.csv'
)
self.assertTrue(os.path.exists(path))
os.remove(path)
def test_fetch_l1_quote_and_write_to_csv(self):
g = GdaxExchange('BTC-EUR')
with HiddenPrints():
g.fetch_l1_quote_and_write_to_csv(
path_to_folder=os.path.dirname(os.path.realpath(__file__)) +
'/'
)
# Test that csv file exists
path = (
os.path.dirname(os.path.realpath(__file__)) + '/gdax_BTC-EUR.csv'
)
self.assertTrue(os.path.exists(path))
os.remove(path)
class KrakenExchangeTests(unittest.TestCase):
"""
Tests that functions within KrakenExchange class perform as intended.
"""
def test_initialisation_with_valid_market(self):
try:
k = KrakenExchange('BTC-EUR')
pass
except KeyError:
self.fail(
'Initialising KrakenExchange with BTC-EUR raised KeyError.'
)
def test_initialisation_with_invalid_market(self):
with self.assertRaises(KeyError):
k = KrakenExchange('REDDDDDDDDDD-BLUEEEEEEEEEE')
def test_fetch_l1_quote_on_supported_market(self):
try:
k = KrakenExchange('BTC-EUR')
k.fetch_l1_quote()
pass
except Exception as e:
self.fail(
'Fetch l1 quote on supported market failed: {}'.format(
str(e)
)
)
def test_fetch_l1_quote_on_unsupported_market(self):
with self.assertRaises(ExchangeAPICallFailedException):
k = KrakenExchange('LTC-GBP')
k.fetch_l1_quote()
def test_latest_l1_quote_to_csv(self):
k = KrakenExchange('BTC-EUR')
k.latest_l1_quote = {
"best ask size": 0.65333759,
"best bid price": 5780.1,
"best ask price": 5781.24,
"timestamp": datetime.utcnow(),
"best bid size": 0.001006
}
k.latest_l1_quote_to_csv(
path_to_folder=os.path.dirname(os.path.realpath(__file__)) +
'/'
)
# Test that csv file exists
path = (
os.path.dirname(os.path.realpath(__file__)) + '/kraken_BTC-EUR.csv'
)
self.assertTrue(os.path.exists(path))
os.remove(path)
def test_fetch_l1_quote_and_write_to_csv(self):
k = KrakenExchange('BTC-EUR')
with HiddenPrints():
k.fetch_l1_quote_and_write_to_csv(
path_to_folder=os.path.dirname(os.path.realpath(__file__)) +
'/'
)
# Test that csv file exists
path = (
os.path.dirname(os.path.realpath(__file__)) + '/kraken_BTC-EUR.csv'
)
self.assertTrue(os.path.exists(path))
os.remove(path)
class BitstampExchangeTests(unittest.TestCase):
"""
Tests that functions within BitstampExchange class perform as intended.
"""
def test_initialisation_with_valid_market(self):
try:
k = BitstampExchange('BTC-EUR')
pass
except KeyError:
self.fail(
'Initialising BitstampExchange with BTC-EUR raised KeyError.'
)
def test_initialisation_with_invalid_market(self):
with self.assertRaises(KeyError):
k = BitstampExchange('REDDDDDDDDDD-BLUEEEEEEEEEE')
def test_fetch_l1_quote_on_supported_market(self):
try:
k = BitstampExchange('BTC-EUR')
k.fetch_l1_quote()
pass
except Exception as e:
self.fail(
'Fetch l1 quote on supported market failed: {}'.format(
str(e)
)
)
def test_fetch_l1_quote_on_unsupported_market(self):
with self.assertRaises(ExchangeAPICallFailedException):
k = BitstampExchange('LTC-GBP')
k.fetch_l1_quote()
def test_latest_l1_quote_to_csv(self):
k = BitstampExchange('BTC-EUR')
k.latest_l1_quote = {
"best ask size": 0.65333759,
"best bid price": 5780.1,
"best ask price": 5781.24,
"timestamp": datetime.utcnow(),
"best bid size": 0.001006
}
k.latest_l1_quote_to_csv(
path_to_folder=os.path.dirname(os.path.realpath(__file__)) +
'/'
)
# Test that csv file exists
path = (
os.path.dirname(os.path.realpath(__file__)) +
'/bitstamp_BTC-EUR.csv'
)
self.assertTrue(os.path.exists(path))
os.remove(path)
def test_fetch_l1_quote_and_write_to_csv(self):
k = BitstampExchange('BTC-EUR')
with HiddenPrints():
k.fetch_l1_quote_and_write_to_csv(
path_to_folder=os.path.dirname(os.path.realpath(__file__)) +
'/'
)
# Test that csv file exists
path = (
os.path.dirname(os.path.realpath(__file__)) +
'/bitstamp_BTC-EUR.csv'
)
self.assertTrue(os.path.exists(path))
os.remove(path)
class BitfinexExchangeTests(unittest.TestCase):
"""
Tests that functions within BitfinexExchange class perform as intended.
"""
def test_initialisation_with_valid_market(self):
try:
k = BitfinexExchange('BTC-EUR')
pass
except KeyError:
self.fail(
'Initialising BitfinexExchange with BTC-EUR raised KeyError.'
)
def test_initialisation_with_invalid_market(self):
with self.assertRaises(KeyError):
k = BitfinexExchange('REDDDDDDDDDD-BLUEEEEEEEEEE')
def test_fetch_l1_quote_on_supported_market(self):
try:
k = BitfinexExchange('BTC-EUR')
k.fetch_l1_quote()
pass
except Exception as e:
self.fail(
'Fetch l1 quote on supported market failed: {}'.format(
str(e)
)
)
def test_fetch_l1_quote_on_unsupported_market(self):
with self.assertRaises(ExchangeAPICallFailedException):
k = BitfinexExchange('LTC-GBP')
k.fetch_l1_quote()
def test_latest_l1_quote_to_csv(self):
k = BitfinexExchange('BTC-EUR')
k.latest_l1_quote = {
"best ask size": 0.65333759,
"best bid price": 5780.1,
"best ask price": 5781.24,
"timestamp": datetime.utcnow(),
"best bid size": 0.001006
}
k.latest_l1_quote_to_csv(
path_to_folder=os.path.dirname(os.path.realpath(__file__)) +
'/'
)
# Test that csv file exists
path = (
os.path.dirname(os.path.realpath(__file__)) +
'/bitfinex_BTC-EUR.csv'
)
self.assertTrue(os.path.exists(path))
os.remove(path)
def test_fetch_l1_quote_and_write_to_csv(self):
k = BitfinexExchange('BTC-EUR')
with HiddenPrints():
k.fetch_l1_quote_and_write_to_csv(
path_to_folder=os.path.dirname(os.path.realpath(__file__)) +
'/'
)
# Test that csv file exists
path = (
os.path.dirname(os.path.realpath(__file__)) +
'/bitfinex_BTC-EUR.csv'
)
self.assertTrue(os.path.exists(path))
os.remove(path)
if __name__ == '__main__':
unittest.main(exit=False)
| 30.094118
| 79
| 0.583268
| 1,126
| 10,232
| 5.027531
| 0.119005
| 0.049461
| 0.059353
| 0.042395
| 0.804098
| 0.803745
| 0.775481
| 0.730613
| 0.699523
| 0.699523
| 0
| 0.021301
| 0.320954
| 10,232
| 339
| 80
| 30.182891
| 0.793466
| 0.082389
| 0
| 0.724
| 0
| 0
| 0.116272
| 0.020259
| 0
| 0
| 0
| 0
| 0.064
| 1
| 0.104
| false
| 0.032
| 0.036
| 0
| 0.16
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6f08e7a44962b3d4ce1d67b7f28da022e46eb7fe
| 4,097
|
py
|
Python
|
src/bindings/python/tests/test_ngraph/test_eye.py
|
si-eun-kim/openvino
|
1db4446e2a6ead55d066e0b4e718fa37f509353a
|
[
"Apache-2.0"
] | 2
|
2021-12-14T15:27:46.000Z
|
2021-12-14T15:34:16.000Z
|
src/bindings/python/tests/test_ngraph/test_eye.py
|
si-eun-kim/openvino
|
1db4446e2a6ead55d066e0b4e718fa37f509353a
|
[
"Apache-2.0"
] | 33
|
2021-09-23T04:14:30.000Z
|
2022-01-24T13:21:32.000Z
|
src/bindings/python/tests/test_ngraph/test_eye.py
|
si-eun-kim/openvino
|
1db4446e2a6ead55d066e0b4e718fa37f509353a
|
[
"Apache-2.0"
] | 11
|
2021-11-09T00:51:40.000Z
|
2021-11-10T12:04:16.000Z
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import openvino.runtime.opset9 as ov
import numpy as np
import pytest
from tests.runtime import get_runtime
from openvino.runtime.utils.types import get_element_type_str
from openvino.runtime.utils.types import get_element_type
@pytest.mark.parametrize(
"num_rows, num_columns, diagonal_index, out_type",
[
pytest.param(2, 5, 0, np.float32),
pytest.param(5, 3, 2, np.int64),
pytest.param(3, 3, -1, np.float16),
pytest.param(5, 5, -10, np.float32),
],
)
def test_eye_rectangle(num_rows, num_columns, diagonal_index, out_type):
num_rows_array = np.array([num_rows], np.int32)
num_columns_array = np.array([num_columns], np.int32)
diagonal_index_array = np.array([diagonal_index], np.int32)
num_rows_tensor = ov.constant(num_rows_array)
num_columns_tensor = ov.constant(num_columns_array)
diagonal_index_tensor = ov.constant(diagonal_index_array)
# Create with param names
eye_node = ov.eye(num_rows=num_rows_tensor,
num_columns=num_columns_tensor,
diagonal_index=diagonal_index_tensor,
output_type=get_element_type_str(out_type))
# Create with default orded
eye_node = ov.eye(num_rows_tensor,
num_columns_tensor,
diagonal_index_tensor,
get_element_type_str(out_type))
expected_results = np.eye(num_rows, M=num_columns, k=diagonal_index, dtype=np.float32)
assert eye_node.get_type_name() == "Eye"
assert eye_node.get_output_size() == 1
assert eye_node.get_output_element_type(0) == get_element_type(out_type)
assert tuple(eye_node.get_output_shape(0)) == expected_results.shape
# TODO: Enable with Eye reference implementation
# runtime = get_runtime()
# computation = runtime.computation(eye_node)
# eye_results = computation()
# assert np.allclose(eye_results, expected_results)
@pytest.mark.parametrize(
"num_rows, num_columns, diagonal_index, batch_shape, out_type",
[
pytest.param(2, 5, 0, [1], np.float32),
pytest.param(5, 3, 2, [2, 2], np.int64),
pytest.param(3, 3, -1, [1, 3, 2], np.float16),
pytest.param(5, 5, -10, [1, 1], np.float32),
],
)
def test_eye_batch_shape(num_rows, num_columns, diagonal_index, batch_shape, out_type):
num_rows_array = np.array([num_rows], np.int32)
num_columns_array = np.array([num_columns], np.int32)
diagonal_index_array = np.array([diagonal_index], np.int32)
batch_shape_array = np.array(batch_shape, np.int32)
num_rows_tensor = ov.constant(num_rows_array)
num_columns_tensor = ov.constant(num_columns_array)
diagonal_index_tensor = ov.constant(diagonal_index_array)
batch_shape_tensor = ov.constant(batch_shape_array)
# Create with param names
eye_node = ov.eye(num_rows=num_rows_tensor,
num_columns=num_columns_tensor,
diagonal_index=diagonal_index_tensor,
batch_shape=batch_shape_tensor,
output_type=get_element_type_str(out_type))
# Create with default orded
eye_node = ov.eye(num_rows_tensor,
num_columns_tensor,
diagonal_index_tensor,
get_element_type_str(out_type),
batch_shape_tensor)
output_shape = [*batch_shape, 1, 1]
one_matrix = np.eye(num_rows, M=num_columns, k=diagonal_index, dtype=np.float32)
expected_results = np.tile(one_matrix, output_shape)
assert eye_node.get_type_name() == "Eye"
assert eye_node.get_output_size() == 1
assert eye_node.get_output_element_type(0) == get_element_type(out_type)
assert tuple(eye_node.get_output_shape(0)) == expected_results.shape
# TODO: Enable with Eye reference implementation
# runtime = get_runtime()
# computation = runtime.computation(eye_node)
# eye_results = computation()
# assert np.allclose(eye_results, expected_results)
| 39.776699
| 90
| 0.686112
| 567
| 4,097
| 4.619048
| 0.141093
| 0.053456
| 0.042764
| 0.036655
| 0.857197
| 0.842688
| 0.842688
| 0.796487
| 0.765559
| 0.700649
| 0
| 0.027052
| 0.215035
| 4,097
| 102
| 91
| 40.166667
| 0.787313
| 0.137418
| 0
| 0.542857
| 0
| 0
| 0.03213
| 0
| 0
| 0
| 0
| 0.009804
| 0.114286
| 1
| 0.028571
| false
| 0
| 0.085714
| 0
| 0.114286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6f3bc48d07d6db347089edf80b48b6fd74fd6c76
| 2,108
|
py
|
Python
|
download_cifar100_teacher.py
|
valeoai/QuEST
|
02a23d2d8e0d059b4a30433f92eec5db146467f4
|
[
"Apache-2.0"
] | 3
|
2021-06-03T22:45:47.000Z
|
2022-03-27T18:50:06.000Z
|
download_cifar100_teacher.py
|
valeoai/QuEST
|
02a23d2d8e0d059b4a30433f92eec5db146467f4
|
[
"Apache-2.0"
] | null | null | null |
download_cifar100_teacher.py
|
valeoai/QuEST
|
02a23d2d8e0d059b4a30433f92eec5db146467f4
|
[
"Apache-2.0"
] | 1
|
2021-08-20T15:39:40.000Z
|
2021-08-20T15:39:40.000Z
|
import os
import urllib.request
os.makedirs('saved_models', exist_ok=True)
model_path = 'http://shape2prog.csail.mit.edu/repo/wrn_40_2_vanilla/ckpt_epoch_240.pth'
model_dir = 'saved_models/wrn_40_2_vanilla'
os.makedirs(model_dir, exist_ok=True)
urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1]))
print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/")
model_path = 'http://shape2prog.csail.mit.edu/repo/resnet56_vanilla/ckpt_epoch_240.pth'
model_dir = 'saved_models/resnet56_vanilla'
os.makedirs(model_dir, exist_ok=True)
urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1]))
print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/")
model_path = 'http://shape2prog.csail.mit.edu/repo/resnet110_vanilla/ckpt_epoch_240.pth'
model_dir = 'saved_models/resnet110_vanilla'
os.makedirs(model_dir, exist_ok=True)
urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1]))
print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/")
model_path = 'http://shape2prog.csail.mit.edu/repo/resnet32x4_vanilla/ckpt_epoch_240.pth'
model_dir = 'saved_models/resnet32x4_vanilla'
os.makedirs(model_dir, exist_ok=True)
urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1]))
print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/")
model_path = 'http://shape2prog.csail.mit.edu/repo/vgg13_vanilla/ckpt_epoch_240.pth'
model_dir = 'saved_models/vgg13_vanilla'
os.makedirs(model_dir, exist_ok=True)
urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1]))
print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/")
model_path = 'http://shape2prog.csail.mit.edu/repo/ResNet50_vanilla/ckpt_epoch_240.pth'
model_dir = 'saved_models/ResNet50_vanilla'
os.makedirs(model_dir, exist_ok=True)
urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1]))
print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/")
| 51.414634
| 91
| 0.766129
| 328
| 2,108
| 4.64939
| 0.121951
| 0.141639
| 0.110164
| 0.090492
| 0.900984
| 0.900984
| 0.900984
| 0.900984
| 0.876066
| 0.714754
| 0
| 0.033588
| 0.067837
| 2,108
| 40
| 92
| 52.7
| 0.742494
| 0
| 0
| 0.545455
| 0
| 0
| 0.472921
| 0.174081
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.060606
| 0
| 0.060606
| 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6f3d81cff53a00e04f111ddf20aa94a2c2b57bda
| 3,885
|
py
|
Python
|
test/lazy/test_cat_lazy_tensor.py
|
Mehdishishehbor/gpytorch
|
432e537b3f6679ea4ab3acf33b14626b7e161c92
|
[
"MIT"
] | null | null | null |
test/lazy/test_cat_lazy_tensor.py
|
Mehdishishehbor/gpytorch
|
432e537b3f6679ea4ab3acf33b14626b7e161c92
|
[
"MIT"
] | null | null | null |
test/lazy/test_cat_lazy_tensor.py
|
Mehdishishehbor/gpytorch
|
432e537b3f6679ea4ab3acf33b14626b7e161c92
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import unittest
import torch
from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor
from Lgpytorch.test.lazy_tensor_test_case import LazyTensorTestCase
class TestCatLazyTensor(LazyTensorTestCase, unittest.TestCase):
seed = 1
def create_lazy_tensor(self):
root = torch.randn(6, 7)
self.psd_mat = root.matmul(root.t())
slice1_mat = self.psd_mat[:2, :].requires_grad_()
slice2_mat = self.psd_mat[2:4, :].requires_grad_()
slice3_mat = self.psd_mat[4:6, :].requires_grad_()
slice1 = NonLazyTensor(slice1_mat)
slice2 = NonLazyTensor(slice2_mat)
slice3 = NonLazyTensor(slice3_mat)
return CatLazyTensor(slice1, slice2, slice3, dim=-2)
def evaluate_lazy_tensor(self, lazy_tensor):
return self.psd_mat.detach().clone().requires_grad_()
class TestCatLazyTensorColumn(LazyTensorTestCase, unittest.TestCase):
seed = 1
def create_lazy_tensor(self):
root = torch.randn(6, 7)
self.psd_mat = root.matmul(root.t())
slice1_mat = self.psd_mat[:, :2].requires_grad_()
slice2_mat = self.psd_mat[:, 2:4].requires_grad_()
slice3_mat = self.psd_mat[:, 4:6].requires_grad_()
slice1 = NonLazyTensor(slice1_mat)
slice2 = NonLazyTensor(slice2_mat)
slice3 = NonLazyTensor(slice3_mat)
return CatLazyTensor(slice1, slice2, slice3, dim=-1)
def evaluate_lazy_tensor(self, lazy_tensor):
return self.psd_mat.detach().clone().requires_grad_()
class TestCatLazyTensorBatch(LazyTensorTestCase, unittest.TestCase):
seed = 0
def create_lazy_tensor(self):
root = torch.randn(3, 6, 7)
self.psd_mat = root.matmul(root.transpose(-2, -1))
slice1_mat = self.psd_mat[..., :2, :].requires_grad_()
slice2_mat = self.psd_mat[..., 2:4, :].requires_grad_()
slice3_mat = self.psd_mat[..., 4:6, :].requires_grad_()
slice1 = NonLazyTensor(slice1_mat)
slice2 = NonLazyTensor(slice2_mat)
slice3 = NonLazyTensor(slice3_mat)
return CatLazyTensor(slice1, slice2, slice3, dim=-2)
def evaluate_lazy_tensor(self, lazy_tensor):
return self.psd_mat.detach().clone().requires_grad_()
class TestCatLazyTensorMultiBatch(LazyTensorTestCase, unittest.TestCase):
seed = 0
# Because these LTs are large, we'll skil the big tests
skip_slq_tests = True
def create_lazy_tensor(self):
root = torch.randn(4, 3, 6, 7)
self.psd_mat = root.matmul(root.transpose(-2, -1))
slice1_mat = self.psd_mat[..., :2, :].requires_grad_()
slice2_mat = self.psd_mat[..., 2:4, :].requires_grad_()
slice3_mat = self.psd_mat[..., 4:6, :].requires_grad_()
slice1 = NonLazyTensor(slice1_mat)
slice2 = NonLazyTensor(slice2_mat)
slice3 = NonLazyTensor(slice3_mat)
return CatLazyTensor(slice1, slice2, slice3, dim=-2)
def evaluate_lazy_tensor(self, lazy_tensor):
return self.psd_mat.detach().clone().requires_grad_()
class TestCatLazyTensorBatchCat(LazyTensorTestCase, unittest.TestCase):
seed = 0
# Because these LTs are large, we'll skil the big tests
skip_slq_tests = True
def create_lazy_tensor(self):
root = torch.randn(5, 3, 6, 7)
self.psd_mat = root.matmul(root.transpose(-2, -1))
slice1_mat = self.psd_mat[:2, ...].requires_grad_()
slice2_mat = self.psd_mat[2:3, ...].requires_grad_()
slice3_mat = self.psd_mat[3:, ...].requires_grad_()
slice1 = NonLazyTensor(slice1_mat)
slice2 = NonLazyTensor(slice2_mat)
slice3 = NonLazyTensor(slice3_mat)
return CatLazyTensor(slice1, slice2, slice3, dim=0)
def evaluate_lazy_tensor(self, lazy_tensor):
return self.psd_mat.detach().clone().requires_grad_()
if __name__ == "__main__":
unittest.main()
| 31.844262
| 73
| 0.667954
| 488
| 3,885
| 5.040984
| 0.141393
| 0.071138
| 0.101626
| 0.079268
| 0.879675
| 0.863821
| 0.863821
| 0.85122
| 0.836179
| 0.836179
| 0
| 0.037748
| 0.209009
| 3,885
| 121
| 74
| 32.107438
| 0.762773
| 0.033205
| 0
| 0.679487
| 0
| 0
| 0.002132
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128205
| false
| 0
| 0.051282
| 0.064103
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4894cec7ad1d16f91926da91173205b79ee1b463
| 1,620
|
py
|
Python
|
tests/test_compound_where.py
|
WinVector/data_algebra
|
3d6002ddf8231d310e03537a0435df0554b62234
|
[
"BSD-3-Clause"
] | 37
|
2019-08-28T08:16:48.000Z
|
2022-03-14T21:18:39.000Z
|
tests/test_compound_where.py
|
WinVector/data_algebra
|
3d6002ddf8231d310e03537a0435df0554b62234
|
[
"BSD-3-Clause"
] | 1
|
2019-09-02T23:13:29.000Z
|
2019-09-08T01:43:10.000Z
|
tests/test_compound_where.py
|
WinVector/data_algebra
|
3d6002ddf8231d310e03537a0435df0554b62234
|
[
"BSD-3-Clause"
] | 3
|
2019-08-28T12:23:11.000Z
|
2020-02-08T19:22:31.000Z
|
import data_algebra
import data_algebra.test_util
from data_algebra.data_ops import * # https://github.com/WinVector/data_algebra
import data_algebra.util
import data_algebra.SQLite
def test_compount_where_and():
d = data_algebra.default_data_model.pd.DataFrame(
{
"a": ["a", "b", None, None],
"b": ["c", None, "d", None],
"x": [1, 2, None, None],
"y": [3, None, 4, None],
}
)
ops = describe_table(d, table_name="d").select_rows(
'a == "a" and b == "c" and x > 0 and y < 4'
)
db_handle = data_algebra.SQLite.SQLiteModel().db_handle(conn=None)
sql = db_handle.to_sql(ops)
assert isinstance(sql, str)
expect = data_algebra.default_data_model.pd.DataFrame(
{"a": ["a"], "b": ["c"], "x": [1.0], "y": [3.0],}
)
data_algebra.test_util.check_transform(ops=ops, data=d, expect=expect)
def test_compount_where_amp():
d = data_algebra.default_data_model.pd.DataFrame(
{
"a": ["a", "b", None, None],
"b": ["c", None, "d", None],
"x": [1, 2, None, None],
"y": [3, None, 4, None],
}
)
ops = describe_table(d, table_name="d").select_rows(
'a == "a" & b == "c" & x > 0 & y < 4'
)
db_handle = data_algebra.SQLite.SQLiteModel().db_handle(conn=None)
sql = db_handle.to_sql(ops)
assert isinstance(sql, str)
expect = data_algebra.default_data_model.pd.DataFrame(
{"a": ["a"], "b": ["c"], "x": [1.0], "y": [3.0],}
)
data_algebra.test_util.check_transform(ops=ops, data=d, expect=expect)
| 28.421053
| 80
| 0.56358
| 231
| 1,620
| 3.744589
| 0.21645
| 0.178035
| 0.017341
| 0.101734
| 0.824277
| 0.756069
| 0.756069
| 0.756069
| 0.756069
| 0.756069
| 0
| 0.016556
| 0.254321
| 1,620
| 56
| 81
| 28.928571
| 0.699503
| 0.025309
| 0
| 0.55814
| 0
| 0
| 0.067216
| 0
| 0
| 0
| 0
| 0
| 0.046512
| 1
| 0.046512
| false
| 0
| 0.116279
| 0
| 0.162791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4896bd7de479f88113218577909931ad2456610b
| 18,819
|
py
|
Python
|
lshmm/viterbi/vit_diploid_variants_samples.py
|
jeromekelleher/lshmm
|
58e0c3395f222e756bb10a0063f5118b20176a01
|
[
"MIT"
] | null | null | null |
lshmm/viterbi/vit_diploid_variants_samples.py
|
jeromekelleher/lshmm
|
58e0c3395f222e756bb10a0063f5118b20176a01
|
[
"MIT"
] | 9
|
2022-02-24T14:20:09.000Z
|
2022-03-01T17:54:47.000Z
|
lshmm/vit_diploid_variants_samples.py
|
astheeggeggs/ls_hmm
|
11af1eb886ef3db2869cdd50954fba5565fcef51
|
[
"MIT"
] | 1
|
2022-02-28T17:07:36.000Z
|
2022-02-28T17:07:36.000Z
|
"""Collection of functions to run Viterbi algorithms on dipoid genotype data, where the data is structured as variants x samples."""
import numba as nb
import numpy as np
# https://github.com/numba/numba/issues/1269
@nb.njit
def np_apply_along_axis(func1d, axis, arr):
"""Create numpy-like functions for max, sum etc."""
assert arr.ndim == 2
assert axis in [0, 1]
if axis == 0:
result = np.empty(arr.shape[1])
for i in range(len(result)):
result[i] = func1d(arr[:, i])
else:
result = np.empty(arr.shape[0])
for i in range(len(result)):
result[i] = func1d(arr[i, :])
return result
@nb.njit
def np_amax(array, axis):
"""Numba implementation of numpy vectorised maximum."""
return np_apply_along_axis(np.amax, axis, array)
@nb.njit
def np_sum(array, axis):
"""Numba implementation of numpy vectorised sum."""
return np_apply_along_axis(np.sum, axis, array)
@nb.njit
def np_argmax(array, axis):
"""Numba implementation of numpy vectorised argmax."""
return np_apply_along_axis(np.argmax, axis, array)
# def forwards_viterbi_dip_naive(n, m, G, s, e, r):
# # Initialise
# V = np.zeros((m, n, n))
# P = np.zeros((m, n, n)).astype(np.int64)
# c = np.ones(m)
# index = (
# 4*np.equal(G[0,:,:], s[0,0]).astype(np.int64) +
# 2*(G[0,:,:] == 1).astype(np.int64) +
# np.int64(s[0,0] == 1)
# )
# V[0,:,:] = 1/(n**2) * e[0,index]
# r_n = r/n
# for l in range(1,m):
# index = (
# 4*np.equal(G[l,:,:], s[0,l]).astype(np.int64) +
# 2*(G[l,:,:] == 1).astype(np.int64) +
# np.int64(s[0,l] == 1)
# )
# for j1 in range(n):
# for j2 in range(n):
# # Get the vector to maximise over
# v = np.zeros((n,n))
# for k1 in range(n):
# for k2 in range(n):
# v[k1, k2] = V[l-1,k1, k2]
# if ((k1 == j1) and (k2 == j2)):
# v[k1, k2] *= ((1 - r[l])**2 + 2*(1-r[l]) * r_n[l] + r_n[l]**2)
# elif ((k1 == j1) or (k2 == j2)):
# v[k1, k2] *= (r_n[l] * (1 - r[l]) + r_n[l]**2)
# else:
# v[k1, k2] *= r_n[l]**2
# V[l,j1,j2] = np.amax(v) * e[l, index[j1, j2]]
# P[l,j1,j2] = np.argmax(v)
# c[l] = np.amax(V[l,:,:])
# V[l,:,:] *= 1/c[l]
# ll = np.sum(np.log10(c))
# return V, P, ll
@nb.njit
def forwards_viterbi_dip_naive(n, m, G, s, e, r):
"""Naive implementation of LS diploid Viterbi algorithm."""
# Initialise
V = np.zeros((m, n, n))
P = np.zeros((m, n, n)).astype(np.int64)
c = np.ones(m)
r_n = r / n
for j1 in range(n):
for j2 in range(n):
index_tmp = (
4 * np.int64(np.equal(G[0, j1, j2], s[0, 0]))
+ 2 * np.int64((G[0, j1, j2] == 1))
+ np.int64(s[0, 0] == 1)
)
V[0, j1, j2] = 1 / (n ** 2) * e[0, index_tmp]
for l in range(1, m):
index = (
4 * np.equal(G[l, :, :], s[0, l]).astype(np.int64)
+ 2 * (G[l, :, :] == 1).astype(np.int64)
+ np.int64(s[0, l] == 1)
)
for j1 in range(n):
for j2 in range(n):
# Get the vector to maximise over
v = np.zeros((n, n))
for k1 in range(n):
for k2 in range(n):
v[k1, k2] = V[l - 1, k1, k2]
if (k1 == j1) and (k2 == j2):
v[k1, k2] *= (
(1 - r[l]) ** 2 + 2 * (1 - r[l]) * r_n[l] + r_n[l] ** 2
)
elif (k1 == j1) or (k2 == j2):
v[k1, k2] *= r_n[l] * (1 - r[l]) + r_n[l] ** 2
else:
v[k1, k2] *= r_n[l] ** 2
V[l, j1, j2] = np.amax(v) * e[l, index[j1, j2]]
P[l, j1, j2] = np.argmax(v)
c[l] = np.amax(V[l, :, :])
V[l, :, :] *= 1 / c[l]
ll = np.sum(np.log10(c))
return V, P, ll
# def forwards_viterbi_dip_naive_low_mem(n, m, G, s, e, r):
# # Initialise
# V = np.zeros((n,n))
# P = np.zeros((m,n,n)).astype(np.int64)
# c = np.ones(m)
# index = (
# 4*np.equal(G[0,:,:], s[0,0]).astype(np.int64) +
# 2*(G[0,:,:] == 1).astype(np.int64) +
# np.int64(s[0,0] == 1)
# )
# V_previous = 1/(n**2) * e[0,index]
# r_n = r/n
# # Take a look at Haploid Viterbi implementation in Jeromes code and see if we can pinch some ideas.
# # Diploid Viterbi, with smaller memory footprint.
# for l in range(1,m):
# index = (
# 4*np.equal(G[l,:,:], s[0,l]).astype(np.int64) +
# 2*(G[l,:,:] == 1).astype(np.int64) +
# np.int64(s[0,l] == 1)
# )
# for j1 in range(n):
# for j2 in range(n):
# # Get the vector to maximise over
# v = np.zeros((n,n))
# for k1 in range(n):
# for k2 in range(n):
# v[k1, k2] = V_previous[k1, k2]
# if ((k1 == j1) and (k2 == j2)):
# v[k1, k2] *= ((1 - r[l])**2 + 2*(1-r[l]) * r_n[l] + r_n[l]**2)
# elif ((k1 == j1) or (k2 == j2)):
# v[k1, k2] *= (r_n[l] * (1 - r[l]) + r_n[l]**2)
# else:
# v[k1, k2] *= r_n[l]**2
# V[j1,j2] = np.amax(v) * e[l,index[j1, j2]]
# P[l,j1,j2] = np.argmax(v)
# c[l] = np.amax(V)
# V_previous = np.copy(V) / c[l]
# ll = np.sum(np.log10(c))
# return V, P, ll
@nb.njit
def forwards_viterbi_dip_naive_low_mem(n, m, G, s, e, r):
"""Naive implementation of LS diploid Viterbi algorithm, with reduced memory."""
# Initialise
V = np.zeros((n, n))
V_previous = np.zeros((n, n))
P = np.zeros((m, n, n)).astype(np.int64)
c = np.ones(m)
r_n = r / n
for j1 in range(n):
for j2 in range(n):
index_tmp = (
4 * np.int64(np.equal(G[0, j1, j2], s[0, 0]))
+ 2 * np.int64((G[0, j1, j2] == 1))
+ np.int64(s[0, 0] == 1)
)
V_previous[j1, j2] = 1 / (n ** 2) * e[0, index_tmp]
# Take a look at Haploid Viterbi implementation in Jeromes code and see if we can pinch some ideas.
# Diploid Viterbi, with smaller memory footprint.
for l in range(1, m):
index = (
4 * np.equal(G[l, :, :], s[0, l]).astype(np.int64)
+ 2 * (G[l, :, :] == 1).astype(np.int64)
+ np.int64(s[0, l] == 1)
)
for j1 in range(n):
for j2 in range(n):
# Get the vector to maximise over
v = np.zeros((n, n))
for k1 in range(n):
for k2 in range(n):
v[k1, k2] = V_previous[k1, k2]
if (k1 == j1) and (k2 == j2):
v[k1, k2] *= (
(1 - r[l]) ** 2 + 2 * (1 - r[l]) * r_n[l] + r_n[l] ** 2
)
elif (k1 == j1) or (k2 == j2):
v[k1, k2] *= r_n[l] * (1 - r[l]) + r_n[l] ** 2
else:
v[k1, k2] *= r_n[l] ** 2
V[j1, j2] = np.amax(v) * e[l, index[j1, j2]]
P[l, j1, j2] = np.argmax(v)
c[l] = np.amax(V)
V_previous = np.copy(V) / c[l]
ll = np.sum(np.log10(c))
return V, P, ll
# def forwards_viterbi_dip_low_mem(n, m, G, s, e, r):
# # Initialise
# V = np.zeros((n, n))
# P = np.zeros((m,n,n)).astype(np.int64)
# index = (
# 4*np.equal(G[0,:,:], s[0,0]).astype(np.int64) +
# 2*(G[0,:,:] == 1).astype(np.int64) +
# np.int64(s[0,0] == 1)
# )
# V_previous = 1/(n**2) * e[0,index]
# c = np.ones(m)
# r_n = r/n
# # Diploid Viterbi, with smaller memory footprint, rescaling, and using the structure of the HMM.
# for l in range(1,m):
# index = (
# 4*np.equal(G[l,:,:], s[0,l]).astype(np.int64) +
# 2*(G[l,:,:] == 1).astype(np.int64) +
# np.int64(s[0,l] == 1)
# )
# c[l] = np.amax(V_previous)
# argmax = np.argmax(V_previous)
# V_previous *= 1/c[l]
# V_rowcol_max = np_amax(V_previous, 0)
# arg_rowcol_max = np_argmax(V_previous, 0)
# no_switch = (1 - r[l])**2 + 2*(r_n[l]*(1 - r[l])) + r_n[l]**2
# single_switch = r_n[l]*(1 - r[l]) + r_n[l]**2
# double_switch = r_n[l]**2
# j1_j2 = 0
# for j1 in range(n):
# for j2 in range(n):
# V_single_switch = max(V_rowcol_max[j1], V_rowcol_max[j2])
# P_single_switch = np.argmax(np.array([V_rowcol_max[j1], V_rowcol_max[j2]]))
# if P_single_switch == 0:
# template_single_switch = j1*n + arg_rowcol_max[j1]
# else:
# template_single_switch = arg_rowcol_max[j2]*n + j2
# V[j1,j2] = V_previous[j1,j2] * no_switch # No switch in either
# P[l, j1, j2] = j1_j2
# # Single or double switch?
# single_switch_tmp = single_switch * V_single_switch
# if (single_switch_tmp > double_switch):
# # Then single switch is the alternative
# if (V[j1,j2] < single_switch * V_single_switch):
# V[j1,j2] = single_switch * V_single_switch
# P[l, j1, j2] = template_single_switch
# else:
# # Double switch is the alternative
# if V[j1, j2] < double_switch:
# V[j1, j2] = double_switch
# P[l, j1, j2] = argmax
# V[j1,j2] *= e[l, index[j1, j2]]
# j1_j2 += 1
# V_previous = np.copy(V)
# ll = np.sum(np.log10(c)) + np.log10(np.amax(V))
# return V, P, ll
@nb.njit
def forwards_viterbi_dip_low_mem(n, m, G, s, e, r):
"""LS diploid Viterbi algorithm, with reduced memory."""
# Initialise
V = np.zeros((n, n))
V_previous = np.zeros((n, n))
P = np.zeros((m, n, n)).astype(np.int64)
c = np.ones(m)
r_n = r / n
for j1 in range(n):
for j2 in range(n):
index_tmp = (
4 * np.int64(np.equal(G[0, j1, j2], s[0, 0]))
+ 2 * np.int64((G[0, j1, j2] == 1))
+ np.int64(s[0, 0] == 1)
)
V_previous[j1, j2] = 1 / (n ** 2) * e[0, index_tmp]
# Diploid Viterbi, with smaller memory footprint, rescaling, and using the structure of the HMM.
for l in range(1, m):
index = (
4 * np.equal(G[l, :, :], s[0, l]).astype(np.int64)
+ 2 * (G[l, :, :] == 1).astype(np.int64)
+ np.int64(s[0, l] == 1)
)
c[l] = np.amax(V_previous)
argmax = np.argmax(V_previous)
V_previous *= 1 / c[l]
V_rowcol_max = np_amax(V_previous, 0)
arg_rowcol_max = np_argmax(V_previous, 0)
no_switch = (1 - r[l]) ** 2 + 2 * (r_n[l] * (1 - r[l])) + r_n[l] ** 2
single_switch = r_n[l] * (1 - r[l]) + r_n[l] ** 2
double_switch = r_n[l] ** 2
j1_j2 = 0
for j1 in range(n):
for j2 in range(n):
V_single_switch = max(V_rowcol_max[j1], V_rowcol_max[j2])
P_single_switch = np.argmax(
np.array([V_rowcol_max[j1], V_rowcol_max[j2]])
)
if P_single_switch == 0:
template_single_switch = j1 * n + arg_rowcol_max[j1]
else:
template_single_switch = arg_rowcol_max[j2] * n + j2
V[j1, j2] = V_previous[j1, j2] * no_switch # No switch in either
P[l, j1, j2] = j1_j2
# Single or double switch?
single_switch_tmp = single_switch * V_single_switch
if single_switch_tmp > double_switch:
# Then single switch is the alternative
if V[j1, j2] < single_switch * V_single_switch:
V[j1, j2] = single_switch * V_single_switch
P[l, j1, j2] = template_single_switch
else:
# Double switch is the alternative
if V[j1, j2] < double_switch:
V[j1, j2] = double_switch
P[l, j1, j2] = argmax
V[j1, j2] *= e[l, index[j1, j2]]
j1_j2 += 1
V_previous = np.copy(V)
ll = np.sum(np.log10(c)) + np.log10(np.amax(V))
return V, P, ll
# def forwards_viterbi_dip_naive_vec(n, m, G, s, e, r):
# # Initialise
# V = np.zeros((m,n,n))
# P = np.zeros((m,n,n)).astype(np.int64)
# c = np.ones(m)
# index = (
# 4*np.equal(G[0,:,:], s[0,0]).astype(np.int64) +
# 2*(G[0,:,:] == 1).astype(np.int64) +
# np.int64(s[0,0] == 1)
# )
# V[0,:,:] = 1/(n**2) * e[0,index]
# r_n = r/n
# # Jumped the gun - vectorising.
# for l in range(1,m):
# index = (
# 4*np.equal(G[l,:,:], s[0,l]).astype(np.int64) +
# 2*(G[l,:,:] == 1).astype(np.int64) +
# np.int64(s[0,l] == 1)
# )
# for j1 in range(n):
# for j2 in range(n):
# v = (r_n[l]**2) * np.ones((n,n))
# v[j1,j2] += (1-r[l])**2
# v[j1, :] += (r_n[l] * (1 - r[l]))
# v[:, j2] += (r_n[l] * (1 - r[l]))
# v *= V[l-1,:,:]
# V[l,j1,j2] = np.amax(v) * e[l,index[j1, j2]]
# P[l,j1,j2] = np.argmax(v)
# c[l] = np.amax(V[l,:,:])
# V[l,:,:] *= 1/c[l]
# ll = np.sum(np.log10(c))
# return V, P, ll
@nb.jit
def forwards_viterbi_dip_naive_vec(n, m, G, s, e, r):
"""Vectorised LS diploid Viterbi algorithm using numpy."""
# Initialise
V = np.zeros((m, n, n))
P = np.zeros((m, n, n)).astype(np.int64)
c = np.ones(m)
r_n = r / n
for j1 in range(n):
for j2 in range(n):
index_tmp = (
4 * np.int64(np.equal(G[0, j1, j2], s[0, 0]))
+ 2 * np.int64((G[0, j1, j2] == 1))
+ np.int64(s[0, 0] == 1)
)
V[0, j1, j2] = 1 / (n ** 2) * e[0, index_tmp]
# Jumped the gun - vectorising.
for l in range(1, m):
index = (
4 * np.equal(G[l, :, :], s[0, l]).astype(np.int64)
+ 2 * (G[l, :, :] == 1).astype(np.int64)
+ np.int64(s[0, l] == 1)
)
for j1 in range(n):
for j2 in range(n):
v = (r_n[l] ** 2) * np.ones((n, n))
v[j1, j2] += (1 - r[l]) ** 2
v[j1, :] += r_n[l] * (1 - r[l])
v[:, j2] += r_n[l] * (1 - r[l])
v *= V[l - 1, :, :]
V[l, j1, j2] = np.amax(v) * e[l, index[j1, j2]]
P[l, j1, j2] = np.argmax(v)
c[l] = np.amax(V[l, :, :])
V[l, :, :] *= 1 / c[l]
ll = np.sum(np.log10(c))
return V, P, ll
def forwards_viterbi_dip_naive_full_vec(n, m, G, s, e, r):
"""Fully vectorised naive LS diploid Viterbi algorithm using numpy."""
char_both = np.eye(n * n).ravel().reshape((n, n, n, n))
char_col = np.tile(np.sum(np.eye(n * n).reshape((n, n, n, n)), 3), (n, 1, 1, 1))
char_row = np.copy(char_col).T
rows, cols = np.ogrid[:n, :n]
# Initialise
V = np.zeros((m, n, n))
P = np.zeros((m, n, n)).astype(np.int64)
c = np.ones(m)
index = (
4 * np.equal(G[0, :, :], s[0, 0]).astype(np.int64)
+ 2 * (G[0, :, :] == 1).astype(np.int64)
+ np.int64(s[0, 0] == 1)
)
V[0, :, :] = 1 / (n ** 2) * e[0, index]
r_n = r / n
for l in range(1, m):
index = (
4 * np.equal(G[l, :, :], s[0, l]).astype(np.int64)
+ 2 * (G[l, :, :] == 1).astype(np.int64)
+ np.int64(s[0, l] == 1)
)
v = (
(r_n[l] ** 2)
+ (1 - r[l]) ** 2 * char_both
+ (r_n[l] * (1 - r[l])) * (char_col + char_row)
)
v *= V[l - 1, :, :]
P[l, :, :] = np.argmax(v.reshape(n, n, -1), 2) # Have to flatten to use argmax
V[l, :, :] = v.reshape(n, n, -1)[rows, cols, P[l, :, :]] * e[l, index]
c[l] = np.amax(V[l, :, :])
V[l, :, :] *= 1 / c[l]
ll = np.sum(np.log10(c))
return V, P, ll
@nb.jit
def backwards_viterbi_dip(m, V_last, P):
"""Run a backwards pass to determine the most likely path."""
assert V_last.ndim == 2
assert V_last.shape[0] == V_last.shape[1]
# Initialisation
path = np.zeros(m).astype(np.int64)
path[m - 1] = np.argmax(V_last)
# Backtrace
for j in range(m - 2, -1, -1):
path[j] = P[j + 1, :, :].ravel()[path[j + 1]]
return path
def get_phased_path(n, path):
"""Obtain the phased path."""
return np.unravel_index(path, (n, n))
@nb.jit
def path_ll_dip(n, m, G, phased_path, s, e, r):
"""Evaluate log-likelihood path through a reference panel which results in sequence s."""
index = (
4 * np.int64(np.equal(G[0, phased_path[0][0], phased_path[1][0]], s[0, 0]))
+ 2 * np.int64(G[0, phased_path[0][0], phased_path[1][0]] == 1)
+ np.int64(s[0, 0] == 1)
)
log_prob_path = np.log10(1 / (n ** 2) * e[0, index])
old_phase = np.array([phased_path[0][0], phased_path[1][0]])
r_n = r / n
for l in range(1, m):
index = (
4 * np.int64(np.equal(G[l, phased_path[0][l], phased_path[1][l]], s[0, l]))
+ 2 * np.int64(G[l, phased_path[0][l], phased_path[1][l]] == 1)
+ np.int64(s[0, l] == 1)
)
current_phase = np.array([phased_path[0][l], phased_path[1][l]])
phase_diff = np.sum(~np.equal(current_phase, old_phase))
if phase_diff == 0:
log_prob_path += np.log10(
(1 - r[l]) ** 2 + 2 * (r_n[l] * (1 - r[l])) + r_n[l] ** 2
)
elif phase_diff == 1:
log_prob_path += np.log10(r_n[l] * (1 - r[l]) + r_n[l] ** 2)
else:
log_prob_path += np.log10(r_n[l] ** 2)
log_prob_path += np.log10(e[l, index])
old_phase = current_phase
return log_prob_path
| 33.307965
| 132
| 0.435145
| 2,901
| 18,819
| 2.720786
| 0.064461
| 0.06208
| 0.016344
| 0.012163
| 0.862283
| 0.848347
| 0.813632
| 0.789814
| 0.775877
| 0.775877
| 0
| 0.065352
| 0.380413
| 18,819
| 564
| 133
| 33.367021
| 0.611578
| 0.418035
| 0
| 0.558052
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014981
| 1
| 0.044944
| false
| 0
| 0.007491
| 0
| 0.097378
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
48eca2b30f95acacb8513624eb0235e73603734b
| 183
|
py
|
Python
|
src/c3nav/site/templatetags/route_render.py
|
johnjohndoe/c3nav
|
a17f863a3512e305595c16b0300796b6bae81241
|
[
"Apache-2.0"
] | 132
|
2016-11-12T01:45:23.000Z
|
2022-03-08T15:17:10.000Z
|
src/c3nav/site/templatetags/route_render.py
|
johnjohndoe/c3nav
|
a17f863a3512e305595c16b0300796b6bae81241
|
[
"Apache-2.0"
] | 66
|
2016-09-29T09:46:19.000Z
|
2022-03-11T23:26:18.000Z
|
src/c3nav/site/templatetags/route_render.py
|
johnjohndoe/c3nav
|
a17f863a3512e305595c16b0300796b6bae81241
|
[
"Apache-2.0"
] | 42
|
2016-09-29T08:34:57.000Z
|
2022-03-08T15:17:15.000Z
|
from django import template
register = template.Library()
@register.filter
def negate(value):
return -value
@register.filter
def subtract(value, arg):
return value - arg
| 13.071429
| 29
| 0.726776
| 23
| 183
| 5.782609
| 0.565217
| 0.210526
| 0.255639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180328
| 183
| 13
| 30
| 14.076923
| 0.886667
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.125
| 0.25
| 0.625
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
5b1a7c8341406690f20aa12accdb9fc9001deadc
| 238
|
py
|
Python
|
speechpro/cloud/speech/synthesis/rest/cloud_client/api/__init__.py
|
speechpro/cloud-python
|
dfcfc19a1f008b55c5290599c594fe8de777018b
|
[
"MIT"
] | 15
|
2020-05-27T09:35:32.000Z
|
2022-03-29T18:35:36.000Z
|
speechpro/cloud/speech/synthesis/rest/cloud_client/api/__init__.py
|
speechpro/cloud-python
|
dfcfc19a1f008b55c5290599c594fe8de777018b
|
[
"MIT"
] | null | null | null |
speechpro/cloud/speech/synthesis/rest/cloud_client/api/__init__.py
|
speechpro/cloud-python
|
dfcfc19a1f008b55c5290599c594fe8de777018b
|
[
"MIT"
] | 1
|
2021-04-06T21:39:29.000Z
|
2021-04-06T21:39:29.000Z
|
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
import speechpro.cloud.speech.synthesis.rest.cloud_client.api.session_api
import speechpro.cloud.speech.synthesis.rest.cloud_client.api.synthesize_api
| 29.75
| 76
| 0.848739
| 34
| 238
| 5.676471
| 0.529412
| 0.15544
| 0.207254
| 0.26943
| 0.549223
| 0.549223
| 0.549223
| 0.549223
| 0.549223
| 0
| 0
| 0.004566
| 0.079832
| 238
| 7
| 77
| 34
| 0.876712
| 0.172269
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d294cefa293f8d84c96bacb7467d9cfe88246372
| 147
|
py
|
Python
|
armageddon/__init__.py
|
acse-ns1321/asteroid-impact-simulator
|
986c12ff1276e5d0547a4f760e1d2cb90fe4ba11
|
[
"MIT"
] | null | null | null |
armageddon/__init__.py
|
acse-ns1321/asteroid-impact-simulator
|
986c12ff1276e5d0547a4f760e1d2cb90fe4ba11
|
[
"MIT"
] | null | null | null |
armageddon/__init__.py
|
acse-ns1321/asteroid-impact-simulator
|
986c12ff1276e5d0547a4f760e1d2cb90fe4ba11
|
[
"MIT"
] | null | null | null |
# flake8:NOQA
"""Python asteroid airburst calculator"""
from .solver import *
from .damage import *
from .locator import *
from .mapping import *
| 18.375
| 41
| 0.734694
| 18
| 147
| 6
| 0.666667
| 0.277778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008065
| 0.156463
| 147
| 7
| 42
| 21
| 0.862903
| 0.326531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d2a2c147c06d327188733c71e9a83b70f75131b1
| 27
|
py
|
Python
|
micro-benchmark-key-errs/snippets/dicts/type_coercion/main.py
|
WenJinfeng/PyCG
|
b45e8e04fe697d8301cf27222a8f37646d69f168
|
[
"Apache-2.0"
] | 121
|
2020-12-16T20:31:37.000Z
|
2022-03-21T20:32:43.000Z
|
micro-benchmark-key-errs/snippets/dicts/type_coercion/main.py
|
WenJinfeng/PyCG
|
b45e8e04fe697d8301cf27222a8f37646d69f168
|
[
"Apache-2.0"
] | 24
|
2021-03-13T00:04:00.000Z
|
2022-03-21T17:28:11.000Z
|
micro-benchmark-key-errs/snippets/dicts/type_coercion/main.py
|
WenJinfeng/PyCG
|
b45e8e04fe697d8301cf27222a8f37646d69f168
|
[
"Apache-2.0"
] | 19
|
2021-03-23T10:58:47.000Z
|
2022-03-24T19:46:50.000Z
|
d = {"1": "a"}
d[1]
d["1"]
| 6.75
| 14
| 0.259259
| 7
| 27
| 1
| 0.428571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0.222222
| 27
| 3
| 15
| 9
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d2bc823500d7e835a13076bd5554f0f404893ff4
| 243
|
py
|
Python
|
jmeter_api/timers/__init__.py
|
dashawn888/jmeter_api
|
1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd
|
[
"Apache-2.0"
] | 11
|
2020-03-22T13:30:21.000Z
|
2021-12-25T06:23:44.000Z
|
jmeter_api/timers/__init__.py
|
dashawn888/jmeter_api
|
1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd
|
[
"Apache-2.0"
] | 2
|
2020-03-23T00:06:42.000Z
|
2021-02-24T21:41:40.000Z
|
jmeter_api/timers/__init__.py
|
dashawn888/jmeter_api
|
1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd
|
[
"Apache-2.0"
] | 3
|
2020-11-09T14:14:25.000Z
|
2021-05-27T02:54:38.000Z
|
from jmeter_api.timers.constant_throughput_timer.elements import ConstantThroughputTimer, BasedOn
from jmeter_api.timers.constant_timer.elements import ConstantTimer
from jmeter_api.timers.uniform_random_timer.elements import UniformRandTimer
| 60.75
| 97
| 0.90535
| 30
| 243
| 7.066667
| 0.5
| 0.141509
| 0.183962
| 0.268868
| 0.254717
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053498
| 243
| 3
| 98
| 81
| 0.921739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d2d3eacc8c8caee95603f50b68c177c406992381
| 83
|
py
|
Python
|
backend/grant/task/__init__.py
|
DSBUGAY2/zcash-grant-system
|
729b9edda13bd1eeb3f445d889264230c6470d7e
|
[
"MIT"
] | 8
|
2019-06-03T16:29:49.000Z
|
2021-05-11T20:38:36.000Z
|
backend/grant/task/__init__.py
|
DSBUGAY2/zcash-grant-system
|
729b9edda13bd1eeb3f445d889264230c6470d7e
|
[
"MIT"
] | 342
|
2019-01-15T19:13:58.000Z
|
2020-03-24T16:38:13.000Z
|
backend/grant/task/__init__.py
|
DSBUGAY2/zcash-grant-system
|
729b9edda13bd1eeb3f445d889264230c6470d7e
|
[
"MIT"
] | 5
|
2019-02-15T09:06:47.000Z
|
2022-01-24T21:38:41.000Z
|
from . import models
from . import views
from . import commands
from . import jobs
| 16.6
| 22
| 0.759036
| 12
| 83
| 5.25
| 0.5
| 0.634921
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192771
| 83
| 5
| 23
| 16.6
| 0.940299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d2e9f3e2143b7da446094a72db5befcb7fc0a728
| 54,559
|
py
|
Python
|
autogalaxy/profiles/mass_profiles/stellar_mass_profiles.py
|
Jammy2211/PyAutoModel
|
02f54e71900de9ec12c9070dc00a4bd001b25afa
|
[
"MIT"
] | 4
|
2019-10-29T13:27:23.000Z
|
2020-03-24T11:13:35.000Z
|
autogalaxy/profiles/mass_profiles/stellar_mass_profiles.py
|
Jammy2211/PyAutoModel
|
02f54e71900de9ec12c9070dc00a4bd001b25afa
|
[
"MIT"
] | null | null | null |
autogalaxy/profiles/mass_profiles/stellar_mass_profiles.py
|
Jammy2211/PyAutoModel
|
02f54e71900de9ec12c9070dc00a4bd001b25afa
|
[
"MIT"
] | 3
|
2020-02-12T10:29:59.000Z
|
2020-03-24T11:13:53.000Z
|
import copy
import numpy as np
from scipy.special import wofz
from scipy.integrate import quad
from typing import List, Tuple
import autoarray as aa
from autogalaxy.profiles.mass_profiles import MassProfile
from autogalaxy.profiles.mass_profiles.mass_profiles import (
MassProfileMGE,
MassProfileCSE,
)
from autogalaxy.profiles.mass_profiles.mass_profiles import psi_from
class StellarProfile:
pass
class EllGaussian(MassProfile, StellarProfile):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
sigma: float = 0.01,
mass_to_light_ratio: float = 1.0,
):
"""
The elliptical Gaussian light profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
sigma
The sigma value of the Gaussian.
"""
super(EllGaussian, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
super(MassProfile, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
self.mass_to_light_ratio = mass_to_light_ratio
self.intensity = intensity
self.sigma = sigma
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
return self.deflections_2d_via_analytic_from(grid=grid)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_analytic_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
deflections = (
self.mass_to_light_ratio
* self.intensity
* self.sigma
* np.sqrt((2 * np.pi) / (1.0 - self.axis_ratio ** 2.0))
* self.zeta_from(grid=grid)
)
return self.rotate_grid_from_reference_frame(
np.multiply(
1.0, np.vstack((-1.0 * np.imag(deflections), np.real(deflections))).T
)
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_integral_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
Note: sigma is divided by sqrt(q) here.
"""
def calculate_deflection_component(npow, index):
deflection_grid = self.axis_ratio * grid[:, index]
for i in range(grid.shape[0]):
deflection_grid[i] *= (
self.intensity
* self.mass_to_light_ratio
* quad(
self.deflection_func,
a=0.0,
b=1.0,
args=(
grid[i, 0],
grid[i, 1],
npow,
self.axis_ratio,
self.sigma / np.sqrt(self.axis_ratio),
),
)[0]
)
return deflection_grid
deflection_y = calculate_deflection_component(1.0, 0)
deflection_x = calculate_deflection_component(0.0, 1)
return self.rotate_grid_from_reference_frame(
np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T)
)
@staticmethod
def deflection_func(u, y, x, npow, axis_ratio, sigma):
eta_u = np.sqrt(axis_ratio) * np.sqrt(
(u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u))))
)
return np.exp(-0.5 * np.square(np.divide(eta_u, sigma))) / (
(1 - (1 - axis_ratio ** 2) * u) ** (npow + 0.5)
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_from(self, grid: aa.type.Grid2DLike):
"""Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self.convergence_func(self.grid_to_eccentric_radii(grid))
def convergence_func(self, grid_radius: float) -> float:
return self.mass_to_light_ratio * self.image_2d_via_radii_from(grid_radius)
@aa.grid_dec.grid_2d_to_structure
def potential_2d_from(self, grid: aa.type.Grid2DLike):
return np.zeros(shape=grid.shape[0])
def image_2d_via_radii_from(self, grid_radii: np.ndarray):
"""Calculate the intensity of the Gaussian light profile on a grid of radial coordinates.
Parameters
----------
grid_radii
The radial distance from the centre of the profile. for each coordinate on the grid.
Note: sigma is divided by sqrt(q) here.
"""
return np.multiply(
self.intensity,
np.exp(
-0.5
* np.square(
np.divide(grid_radii, self.sigma / np.sqrt(self.axis_ratio))
)
),
)
@property
def axis_ratio(self):
axis_ratio = super().axis_ratio
return axis_ratio if axis_ratio < 0.9999 else 0.9999
def zeta_from(self, grid: aa.type.Grid2DLike):
q2 = self.axis_ratio ** 2.0
ind_pos_y = grid[:, 0] >= 0
shape_grid = np.shape(grid)
output_grid = np.zeros((shape_grid[0]), dtype=np.complex128)
scale_factor = self.axis_ratio / (self.sigma * np.sqrt(2.0 * (1.0 - q2)))
xs_0 = grid[:, 1][ind_pos_y] * scale_factor
ys_0 = grid[:, 0][ind_pos_y] * scale_factor
xs_1 = grid[:, 1][~ind_pos_y] * scale_factor
ys_1 = -grid[:, 0][~ind_pos_y] * scale_factor
output_grid[ind_pos_y] = -1j * (
wofz(xs_0 + 1j * ys_0)
- np.exp(-(xs_0 ** 2.0) * (1.0 - q2) - ys_0 * ys_0 * (1.0 / q2 - 1.0))
* wofz(self.axis_ratio * xs_0 + 1j * ys_0 / self.axis_ratio)
)
output_grid[~ind_pos_y] = np.conj(
-1j
* (
wofz(xs_1 + 1j * ys_1)
- np.exp(-(xs_1 ** 2.0) * (1.0 - q2) - ys_1 * ys_1 * (1.0 / q2 - 1.0))
* wofz(self.axis_ratio * xs_1 + 1j * ys_1 / self.axis_ratio)
)
)
return output_grid
def with_new_normalization(self, normalization):
mass_profile = copy.copy(self)
mass_profile.mass_to_light_ratio = normalization
return mass_profile
# noinspection PyAbstractClass
class AbstractEllSersic(MassProfile, MassProfileMGE, MassProfileCSE, StellarProfile):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The Sersic mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens \
model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profiles
"""
super(AbstractEllSersic, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
super(MassProfile, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
super(MassProfileMGE, self).__init__()
super(MassProfileCSE, self).__init__()
self.mass_to_light_ratio = mass_to_light_ratio
self.intensity = intensity
self.effective_radius = effective_radius
self.sersic_index = sersic_index
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
return self.deflections_2d_via_cse_from(grid=grid)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_mge_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the projected 2D deflection angles from a grid of (y,x) arc second coordinates, by computing and
summing the convergence of each individual cse used to decompose the mass profile.
The cored steep elliptical (cse) decomposition of a the elliptical NFW mass
profile (e.g. `decompose_convergence_via_cse`) is using equation (12) of
Oguri 2021 (https://arxiv.org/abs/2106.11464).
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self._deflections_2d_via_mge_from(
grid=grid, sigmas_factor=np.sqrt(self.axis_ratio)
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_cse_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the projected 2D deflection angles from a grid of (y,x) arc second coordinates, by computing and
summing the convergence of each individual cse used to decompose the mass profile.
The cored steep elliptical (cse) decomposition of a the elliptical NFW mass
profile (e.g. `decompose_convergence_via_cse`) is using equation (12) of
Oguri 2021 (https://arxiv.org/abs/2106.11464).
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self._deflections_2d_via_cse_from(grid=grid)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_from(self, grid: aa.type.Grid2DLike):
"""Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self.convergence_func(self.grid_to_eccentric_radii(grid))
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_via_mge_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
eccentric_radii = self.grid_to_eccentric_radii(grid=grid)
return self._convergence_2d_via_mge_from(grid_radii=eccentric_radii)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_via_cse_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the projected 2D convergence from a grid of (y,x) arc second coordinates, by computing and summing
the convergence of each individual cse used to decompose the mass profile.
The cored steep elliptical (cse) decomposition of a the elliptical NFW mass
profile (e.g. `decompose_convergence_via_cse`) is using equation (12) of
Oguri 2021 (https://arxiv.org/abs/2106.11464).
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
elliptical_radii = self.grid_to_elliptical_radii(grid=grid)
return self._convergence_2d_via_cse_from(grid_radii=elliptical_radii)
def convergence_func(self, grid_radius: float) -> float:
return self.mass_to_light_ratio * self.image_2d_via_radii_from(grid_radius)
@aa.grid_dec.grid_2d_to_structure
def potential_2d_from(self, grid: aa.type.Grid2DLike):
return np.zeros(shape=grid.shape[0])
def image_2d_via_radii_from(self, radius: np.ndarray):
"""
Returns the intensity of the profile at a given radius.
Parameters
----------
radius
The distance from the centre of the profile.
"""
return self.intensity * np.exp(
-self.sersic_constant
* (((radius / self.effective_radius) ** (1.0 / self.sersic_index)) - 1)
)
def decompose_convergence_via_mge(self) -> Tuple[List, List]:
radii_min = self.effective_radius / 100.0
radii_max = self.effective_radius * 20.0
def sersic_2d(r):
return (
self.mass_to_light_ratio
* self.intensity
* np.exp(
-self.sersic_constant
* (((r / self.effective_radius) ** (1.0 / self.sersic_index)) - 1.0)
)
)
return self._decompose_convergence_via_mge(
func=sersic_2d, radii_min=radii_min, radii_max=radii_max
)
def decompose_convergence_via_cse(self,) -> Tuple[List, List]:
"""
Decompose the convergence of the Sersic profile into cored steep elliptical (cse) profiles.
This decomposition uses the standard 2d profile of a Sersic mass profile.
Parameters
----------
func
The function representing the profile that is decomposed into CSEs.
radii_min:
The minimum radius to fit
radii_max:
The maximum radius to fit
total_cses
The number of CSEs used to approximate the input func.
sample_points: int (should be larger than 'total_cses')
The number of data points to fit
Returns
-------
Tuple[List, List]
A list of amplitudes and core radii of every cored steep elliptical (cse) the mass profile is decomposed
into.
"""
upper_dex, lower_dex, total_cses, sample_points = cse_settings_from(
effective_radius=self.effective_radius,
sersic_index=self.sersic_index,
sersic_constant=self.sersic_constant,
mass_to_light_gradient=0.0,
)
scaled_effective_radius = self.effective_radius / np.sqrt(self.axis_ratio)
radii_min = scaled_effective_radius / 10.0 ** lower_dex
radii_max = scaled_effective_radius * 10.0 ** upper_dex
def sersic_2d(r):
return (
self.mass_to_light_ratio
* self.intensity
* np.exp(
-self.sersic_constant
* (
((r / scaled_effective_radius) ** (1.0 / self.sersic_index))
- 1.0
)
)
)
return self._decompose_convergence_via_cse_from(
func=sersic_2d,
radii_min=radii_min,
radii_max=radii_max,
total_cses=total_cses,
sample_points=sample_points,
)
@property
def sersic_constant(self):
"""A parameter derived from Sersic index which ensures that effective radius contains 50% of the profile's
total integrated light.
"""
return (
(2 * self.sersic_index)
- (1.0 / 3.0)
+ (4.0 / (405.0 * self.sersic_index))
+ (46.0 / (25515.0 * self.sersic_index ** 2))
+ (131.0 / (1148175.0 * self.sersic_index ** 3))
- (2194697.0 / (30690717750.0 * self.sersic_index ** 4))
)
@property
def ellipticity_rescale(self):
return 1.0 - ((1.0 - self.axis_ratio) / 2.0)
@property
def elliptical_effective_radius(self):
"""
The effective_radius of a Sersic light profile is defined as the circular effective radius. This is the \
radius within which a circular aperture contains half the profiles's total integrated light. For elliptical \
systems, this won't robustly capture the light profile's elliptical shape.
The elliptical effective radius instead describes the major-axis radius of the ellipse containing \
half the light, and may be more appropriate for highly flattened systems like disk galaxies.
"""
return self.effective_radius / np.sqrt(self.axis_ratio)
def with_new_normalization(self, normalization):
mass_profile = copy.copy(self)
mass_profile.mass_to_light_ratio = normalization
return mass_profile
class EllSersic(AbstractEllSersic, MassProfileMGE, MassProfileCSE):
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_integral_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
def calculate_deflection_component(npow, index):
sersic_constant = self.sersic_constant
deflection_grid = self.axis_ratio * grid[:, index]
for i in range(grid.shape[0]):
deflection_grid[i] *= (
self.intensity
* self.mass_to_light_ratio
* quad(
self.deflection_func,
a=0.0,
b=1.0,
args=(
grid[i, 0],
grid[i, 1],
npow,
self.axis_ratio,
self.sersic_index,
self.effective_radius,
sersic_constant,
),
)[0]
)
return deflection_grid
deflection_y = calculate_deflection_component(1.0, 0)
deflection_x = calculate_deflection_component(0.0, 1)
return self.rotate_grid_from_reference_frame(
np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T)
)
@staticmethod
def deflection_func(
u, y, x, npow, axis_ratio, sersic_index, effective_radius, sersic_constant
):
eta_u = np.sqrt(axis_ratio) * np.sqrt(
(u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u))))
)
return np.exp(
-sersic_constant
* (((eta_u / effective_radius) ** (1.0 / sersic_index)) - 1)
) / ((1 - (1 - axis_ratio ** 2) * u) ** (npow + 0.5))
class SphSersic(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The Sersic mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens
model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre
intensity
Overall flux intensity normalisation in the light profiles (electrons per second)
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profile.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
)
class EllExponential(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The EllExponential mass profile, the mass profiles of the light profiles that are used to fit and
subtract the lens model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
mass_to_light_ratio
The mass-to-light ratio of the light profiles
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity,
effective_radius=effective_radius,
sersic_index=1.0,
mass_to_light_ratio=mass_to_light_ratio,
)
class SphExponential(EllExponential):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The Exponential mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens
model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
mass_to_light_ratio
The mass-to-light ratio of the light profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
mass_to_light_ratio=mass_to_light_ratio,
)
class EllDevVaucouleurs(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The EllDevVaucouleurs mass profile, the mass profiles of the light profiles that are used to fit and
subtract the lens model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The radius containing half the light of this profile.
mass_to_light_ratio
The mass-to-light ratio of the light profile.
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity,
effective_radius=effective_radius,
sersic_index=4.0,
mass_to_light_ratio=mass_to_light_ratio,
)
class SphDevVaucouleurs(EllDevVaucouleurs):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The DevVaucouleurs mass profile, the mass profiles of the light profiles that are used to fit and subtract the
lens model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
mass_to_light_ratio
The mass-to-light ratio of the light profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
mass_to_light_ratio=mass_to_light_ratio,
)
class EllSersicRadialGradient(AbstractEllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
mass_to_light_gradient: float = 0.0,
):
"""
Setup a Sersic mass and light profiles.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profile.
mass_to_light_gradient
The mass-to-light radial gradient.
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
)
self.mass_to_light_gradient = mass_to_light_gradient
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_integral_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
def calculate_deflection_component(npow, index):
sersic_constant = self.sersic_constant
deflection_grid = self.axis_ratio * grid[:, index]
for i in range(grid.shape[0]):
deflection_grid[i] *= (
self.intensity
* self.mass_to_light_ratio
* quad(
self.deflection_func,
a=0.0,
b=1.0,
args=(
grid[i, 0],
grid[i, 1],
npow,
self.axis_ratio,
self.sersic_index,
self.effective_radius,
self.mass_to_light_gradient,
sersic_constant,
),
)[0]
)
return deflection_grid
deflection_y = calculate_deflection_component(1.0, 0)
deflection_x = calculate_deflection_component(0.0, 1)
return self.rotate_grid_from_reference_frame(
np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T)
)
@staticmethod
def deflection_func(
u,
y,
x,
npow,
axis_ratio,
sersic_index,
effective_radius,
mass_to_light_gradient,
sersic_constant,
):
eta_u = np.sqrt(axis_ratio) * np.sqrt(
(u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u))))
)
return (
(((axis_ratio * eta_u) / effective_radius) ** -mass_to_light_gradient)
* np.exp(
-sersic_constant
* (((eta_u / effective_radius) ** (1.0 / sersic_index)) - 1)
)
/ ((1 - (1 - axis_ratio ** 2) * u) ** (npow + 0.5))
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_from(self, grid: aa.type.Grid2DLike):
"""Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self.convergence_func(self.grid_to_eccentric_radii(grid))
def convergence_func(self, grid_radius: float) -> float:
return (
self.mass_to_light_ratio
* (
((self.axis_ratio * grid_radius) / self.effective_radius)
** -self.mass_to_light_gradient
)
* self.image_2d_via_radii_from(grid_radius)
)
def decompose_convergence_via_mge(self):
radii_min = self.effective_radius / 100.0
radii_max = self.effective_radius * 20.0
def sersic_radial_gradient_2D(r):
return (
self.mass_to_light_ratio
* self.intensity
* (
((self.axis_ratio * r) / self.effective_radius)
** -self.mass_to_light_gradient
)
* np.exp(
-self.sersic_constant
* (((r / self.effective_radius) ** (1.0 / self.sersic_index)) - 1.0)
)
)
return self._decompose_convergence_via_mge(
func=sersic_radial_gradient_2D, radii_min=radii_min, radii_max=radii_max
)
def decompose_convergence_via_cse(self) -> Tuple[List, List]:
"""
Decompose the convergence of the Sersic profile into singular isothermal elliptical (sie) profiles.
This decomposition uses the standard 2d profile of a Sersic mass profile.
Parameters
----------
func
The function representing the profile that is decomposed into CSEs.
radii_min:
The minimum radius to fit
radii_max:
The maximum radius to fit
total_sies
The number of SIEs used to approximate the input func.
sample_points: int (should be larger than 'total_sies')
The number of data points to fit
Returns
-------
Tuple[List, List]
A list of amplitudes and core radii of every singular isothernal ellipsoids (sie) the mass profile is decomposed
into.
"""
upper_dex, lower_dex, total_cses, sample_points = cse_settings_from(
effective_radius=self.effective_radius,
sersic_index=self.sersic_index,
sersic_constant=self.sersic_constant,
mass_to_light_gradient=self.mass_to_light_gradient,
)
scaled_effective_radius = self.effective_radius / np.sqrt(self.axis_ratio)
radii_min = scaled_effective_radius / 10.0 ** lower_dex
radii_max = scaled_effective_radius * 10.0 ** upper_dex
def sersic_radial_gradient_2D(r):
return (
self.mass_to_light_ratio
* self.intensity
* (
((self.axis_ratio * r) / scaled_effective_radius)
** -self.mass_to_light_gradient
)
* np.exp(
-self.sersic_constant
* (
((r / scaled_effective_radius) ** (1.0 / self.sersic_index))
- 1.0
)
)
)
return self._decompose_convergence_via_cse_from(
func=sersic_radial_gradient_2D,
radii_min=radii_min,
radii_max=radii_max,
total_cses=total_cses,
sample_points=sample_points,
)
class SphSersicRadialGradient(EllSersicRadialGradient):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
mass_to_light_gradient: float = 0.0,
):
"""
Setup a Sersic mass and light profiles.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profile.
mass_to_light_gradient
The mass-to-light radial gradient.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
mass_to_light_gradient=mass_to_light_gradient,
)
class EllSersicCore(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
effective_radius: float = 0.6,
sersic_index: float = 4.0,
radius_break: float = 0.01,
intensity_break: float = 0.05,
gamma: float = 0.25,
alpha: float = 3.0,
mass_to_light_ratio: float = 1.0,
):
"""
The elliptical cored-Sersic light profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
radius_break
The break radius separating the inner power-law (with logarithmic slope gamma) and outer Sersic function.
intensity_break
The intensity at the break radius.
gamma
The logarithmic power-law slope of the inner core profiles
alpha :
Controls the sharpness of the transition between the inner core / outer Sersic profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity_break,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
)
self.radius_break = radius_break
self.intensity_break = intensity_break
self.alpha = alpha
self.gamma = gamma
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
return self.deflections_2d_via_mge_from(grid=grid)
def image_2d_via_radii_from(self, grid_radii: np.ndarray):
"""
Calculate the intensity of the cored-Sersic light profile on a grid of radial coordinates.
Parameters
----------
grid_radii
The radial distance from the centre of the profile. for each coordinate on the grid.
"""
return np.multiply(
np.multiply(
self.intensity_prime,
np.power(
np.add(
1,
np.power(np.divide(self.radius_break, grid_radii), self.alpha),
),
(self.gamma / self.alpha),
),
),
np.exp(
np.multiply(
-self.sersic_constant,
(
np.power(
np.divide(
np.add(
np.power(grid_radii, self.alpha),
(self.radius_break ** self.alpha),
),
(self.effective_radius ** self.alpha),
),
(1.0 / (self.alpha * self.sersic_index)),
)
),
)
),
)
def decompose_convergence_via_mge(self):
radii_min = self.effective_radius / 50.0
radii_max = self.effective_radius * 20.0
def core_sersic_2D(r):
return (
self.mass_to_light_ratio
* self.intensity_prime
* (1.0 + (self.radius_break / r) ** self.alpha)
** (self.gamma / self.alpha)
* np.exp(
-self.sersic_constant
* (
(r ** self.alpha + self.radius_break ** self.alpha)
/ self.effective_radius ** self.alpha
)
** (1.0 / (self.sersic_index * self.alpha))
)
)
return self._decompose_convergence_via_mge(
func=core_sersic_2D, radii_min=radii_min, radii_max=radii_max
)
@property
def intensity_prime(self):
"""Overall intensity normalisation in the rescaled Core-Sersic light profiles (electrons per second)"""
return (
self.intensity_break
* (2.0 ** (-self.gamma / self.alpha))
* np.exp(
self.sersic_constant
* (
((2.0 ** (1.0 / self.alpha)) * self.radius_break)
/ self.effective_radius
)
** (1.0 / self.sersic_index)
)
)
class SphSersicCore(EllSersicCore):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
effective_radius: float = 0.6,
sersic_index: float = 4.0,
radius_break: float = 0.01,
intensity_break: float = 0.05,
gamma: float = 0.25,
alpha: float = 3.0,
):
"""
The elliptical cored-Sersic light profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
radius_break
The break radius separating the inner power-law (with logarithmic slope gamma) and outer Sersic function.
intensity_break
The intensity at the break radius.
gamma
The logarithmic power-law slope of the inner core profiles
alpha :
Controls the sharpness of the transition between the inner core / outer Sersic profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
effective_radius=effective_radius,
sersic_index=sersic_index,
radius_break=radius_break,
intensity_break=intensity_break,
gamma=gamma,
alpha=alpha,
)
self.radius_break = radius_break
self.intensity_break = intensity_break
self.alpha = alpha
self.gamma = gamma
class EllChameleon(MassProfile, StellarProfile):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
core_radius_0: float = 0.01,
core_radius_1: float = 0.02,
mass_to_light_ratio: float = 1.0,
):
"""
The elliptical Chamelon mass profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
core_radius_0 : the core size of the first elliptical cored Isothermal profile.
core_radius_1 : core_radius_0 + core_radius_1 is the core size of the second elliptical cored Isothermal profile.
We use core_radius_1 here is to avoid negative values.
Profile form:
mass_to_light_ratio * intensity *\
(1.0 / Sqrt(x^2 + (y/q)^2 + core_radius_0^2) - 1.0 / Sqrt(x^2 + (y/q)^2 + (core_radius_0 + core_radius_1)**2.0))
"""
super(EllChameleon, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
super(MassProfile, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
self.mass_to_light_ratio = mass_to_light_ratio
self.intensity = intensity
self.core_radius_0 = core_radius_0
self.core_radius_1 = core_radius_1
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
return self.deflections_2d_via_analytic_from(grid=grid)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_analytic_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Following Eq. (15) and (16), but the parameters are slightly different.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
factor = (
2.0
* self.mass_to_light_ratio
* self.intensity
/ (1 + self.axis_ratio)
* self.axis_ratio
/ np.sqrt(1.0 - self.axis_ratio ** 2.0)
)
core_radius_0 = np.sqrt(
(4.0 * self.core_radius_0 ** 2.0) / (1.0 + self.axis_ratio) ** 2
)
core_radius_1 = np.sqrt(
(4.0 * self.core_radius_1 ** 2.0) / (1.0 + self.axis_ratio) ** 2
)
psi0 = psi_from(
grid=grid, axis_ratio=self.axis_ratio, core_radius=core_radius_0
)
psi1 = psi_from(
grid=grid, axis_ratio=self.axis_ratio, core_radius=core_radius_1
)
deflection_y0 = np.arctanh(
np.divide(
np.multiply(np.sqrt(1.0 - self.axis_ratio ** 2.0), grid[:, 0]),
np.add(psi0, self.axis_ratio ** 2.0 * core_radius_0),
)
)
deflection_x0 = np.arctan(
np.divide(
np.multiply(np.sqrt(1.0 - self.axis_ratio ** 2.0), grid[:, 1]),
np.add(psi0, core_radius_0),
)
)
deflection_y1 = np.arctanh(
np.divide(
np.multiply(np.sqrt(1.0 - self.axis_ratio ** 2.0), grid[:, 0]),
np.add(psi1, self.axis_ratio ** 2.0 * core_radius_1),
)
)
deflection_x1 = np.arctan(
np.divide(
np.multiply(np.sqrt(1.0 - self.axis_ratio ** 2.0), grid[:, 1]),
np.add(psi1, core_radius_1),
)
)
deflection_y = np.subtract(deflection_y0, deflection_y1)
deflection_x = np.subtract(deflection_x0, deflection_x1)
return self.rotate_grid_from_reference_frame(
np.multiply(factor, np.vstack((deflection_y, deflection_x)).T)
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_from(self, grid: aa.type.Grid2DLike):
"""Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self.convergence_func(self.grid_to_elliptical_radii(grid))
def convergence_func(self, grid_radius: float) -> float:
return self.mass_to_light_ratio * self.image_2d_via_radii_from(grid_radius)
@aa.grid_dec.grid_2d_to_structure
def potential_2d_from(self, grid: aa.type.Grid2DLike):
return np.zeros(shape=grid.shape[0])
def image_2d_via_radii_from(self, grid_radii: np.ndarray):
"""Calculate the intensity of the Chamelon light profile on a grid of radial coordinates.
Parameters
----------
grid_radii
The radial distance from the centre of the profile. for each coordinate on the grid.
"""
axis_ratio_factor = (1.0 + self.axis_ratio) ** 2.0
return np.multiply(
self.intensity / (1 + self.axis_ratio),
np.add(
np.divide(
1.0,
np.sqrt(
np.add(
np.square(grid_radii),
(4.0 * self.core_radius_0 ** 2.0) / axis_ratio_factor,
)
),
),
-np.divide(
1.0,
np.sqrt(
np.add(
np.square(grid_radii),
(4.0 * self.core_radius_1 ** 2.0) / axis_ratio_factor,
)
),
),
),
)
@property
def axis_ratio(self):
axis_ratio = super().axis_ratio
return axis_ratio if axis_ratio < 0.99999 else 0.99999
def with_new_normalization(self, normalization):
mass_profile = copy.copy(self)
mass_profile.mass_to_light_ratio = normalization
return mass_profile
class SphChameleon(EllChameleon):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
core_radius_0: float = 0.01,
core_radius_1: float = 0.02,
mass_to_light_ratio: float = 1.0,
):
"""
The spherica; Chameleon mass profile.
Profile form:
mass_to_light_ratio * intensity *\
(1.0 / Sqrt(x^2 + (y/q)^2 + core_radius_0^2) - 1.0 / Sqrt(x^2 + (y/q)^2 + (core_radius_0 + core_radius_1)**2.0))
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
core_radius_0 : the core size of the first elliptical cored Isothermal profile.
core_radius_1 : core_radius_0 + core_radius_1 is the core size of the second elliptical cored Isothermal profile.
We use core_radius_1 here is to avoid negative values.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
core_radius_0=core_radius_0,
core_radius_1=core_radius_1,
mass_to_light_ratio=mass_to_light_ratio,
)
def cse_settings_from(
effective_radius, sersic_index, sersic_constant, mass_to_light_gradient
):
if mass_to_light_gradient > 0.5:
if effective_radius > 0.2:
lower_dex = 6.0
upper_dex = np.min(
[np.log10((18.0 / sersic_constant) ** sersic_index), 1.1]
)
if sersic_index <= 1.2:
total_cses = 50
sample_points = 80
elif sersic_index > 3.8:
total_cses = 40
sample_points = 50
lower_dex = 6.5
else:
total_cses = 30
sample_points = 50
else:
if sersic_index <= 1.2:
upper_dex = 1.0
total_cses = 50
sample_points = 80
lower_dex = 4.5
elif sersic_index > 3.8:
total_cses = 40
sample_points = 50
lower_dex = 6.0
upper_dex = 1.5
else:
upper_dex = 1.1
lower_dex = 6.0
total_cses = 30
sample_points = 50
else:
upper_dex = np.min(
[
np.log10((23.0 / sersic_constant) ** sersic_index),
0.85 - np.log10(effective_radius),
]
)
if (sersic_index <= 0.9) and (sersic_index > 0.8):
total_cses = 50
sample_points = 80
upper_dex = np.log10((18.0 / sersic_constant) ** sersic_index)
lower_dex = 4.3 + np.log10(effective_radius)
elif sersic_index <= 0.8:
total_cses = 50
sample_points = 80
upper_dex = np.log10((16.0 / sersic_constant) ** sersic_index)
lower_dex = 4.0 + np.log10(effective_radius)
elif sersic_index > 3.8:
total_cses = 40
sample_points = 50
lower_dex = 4.5 + np.log10(effective_radius)
else:
lower_dex = 3.5 + np.log10(effective_radius)
total_cses = 30
sample_points = 50
return upper_dex, lower_dex, total_cses, sample_points
| 36.372667
| 129
| 0.560604
| 6,276
| 54,559
| 4.647068
| 0.058158
| 0.018927
| 0.034699
| 0.038951
| 0.890588
| 0.870701
| 0.85246
| 0.835522
| 0.801166
| 0.789782
| 0
| 0.026702
| 0.357503
| 54,559
| 1,499
| 130
| 36.396931
| 0.8053
| 0.291813
| 0
| 0.647399
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078613
| false
| 0.001156
| 0.010405
| 0.018497
| 0.169942
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d2f89e6b57c9a1b93947576a30ec79f4c0bc634e
| 88
|
py
|
Python
|
Workflow/packages/__init__.py
|
MATS64664-2021-Group-2/Hydride-Connect-Group-2
|
fa95d38174ffd85461bf66f923c38a3908a469a7
|
[
"MIT"
] | null | null | null |
Workflow/packages/__init__.py
|
MATS64664-2021-Group-2/Hydride-Connect-Group-2
|
fa95d38174ffd85461bf66f923c38a3908a469a7
|
[
"MIT"
] | 2
|
2021-04-12T20:30:48.000Z
|
2021-05-24T14:07:24.000Z
|
Workflow/packages/__init__.py
|
MATS64664-2021-Group-2/Hydride_Connection
|
fa95d38174ffd85461bf66f923c38a3908a469a7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 15 11:31:06 2021
@author: a77510jm
"""
| 11
| 35
| 0.579545
| 14
| 88
| 3.642857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.257143
| 0.204545
| 88
| 7
| 36
| 12.571429
| 0.471429
| 0.875
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d2ff009598eedc70cbe497c5d19827bdffd07954
| 144,055
|
py
|
Python
|
test/test_parameters.py
|
HubukiNinten/imgaug
|
2570c5651ed1c90addbaffc0f8be226646c55334
|
[
"MIT"
] | 1
|
2019-10-25T17:43:20.000Z
|
2019-10-25T17:43:20.000Z
|
test/test_parameters.py
|
HubukiNinten/imgaug
|
2570c5651ed1c90addbaffc0f8be226646c55334
|
[
"MIT"
] | null | null | null |
test/test_parameters.py
|
HubukiNinten/imgaug
|
2570c5651ed1c90addbaffc0f8be226646c55334
|
[
"MIT"
] | null | null | null |
from __future__ import print_function, division, absolute_import
import itertools
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import skimage
import skimage.data
import skimage.morphology
import scipy
import scipy.special
import imgaug as ia
import imgaug.random as iarandom
from imgaug import parameters as iap
from imgaug.testutils import reseed
def _eps(arr):
if ia.is_np_array(arr) and arr.dtype.kind == "f":
return np.finfo(arr.dtype).eps
return 1e-4
class Test_handle_continuous_param(unittest.TestCase):
def test_value_range_is_none(self):
result = iap.handle_continuous_param(
1, "[test1]",
value_range=None, tuple_to_uniform=True, list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_range_is_tuple_of_nones(self):
result = iap.handle_continuous_param(
1, "[test1b]",
value_range=(None, None),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_stochastic_parameter(self):
result = iap.handle_continuous_param(
iap.Deterministic(1), "[test2]",
value_range=None, tuple_to_uniform=True, list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_range_is_tuple_of_integers(self):
result = iap.handle_continuous_param(
1, "[test3]",
value_range=(0, 10),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_outside_of_value_range(self):
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test4]",
value_range=(2, 12),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test4]" in str(context.exception))
def test_param_is_inside_value_range_and_no_lower_bound(self):
# value within value range (without lower bound)
result = iap.handle_continuous_param(
1, "[test5]",
value_range=(None, 12),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_outside_of_value_range_and_no_lower_bound(self):
# value outside of value range (without lower bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test6]",
value_range=(None, 0),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test6]" in str(context.exception))
def test_param_is_inside_value_range_and_no_upper_bound(self):
# value within value range (without upper bound)
result = iap.handle_continuous_param(
1, "[test7]",
value_range=(-1, None),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_outside_of_value_range_and_no_upper_bound(self):
# value outside of value range (without upper bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test8]",
value_range=(2, None),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test8]" in str(context.exception))
def test_tuple_as_value_but_no_tuples_allowed(self):
# tuple as value, but no tuples allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
(1, 2), "[test9]",
value_range=None,
tuple_to_uniform=False,
list_to_choice=True)
self.assertTrue("[test9]" in str(context.exception))
def test_tuple_as_value_and_tuples_allowed(self):
# tuple as value and tuple allowed
result = iap.handle_continuous_param(
(1, 2), "[test10]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Uniform))
def test_tuple_as_value_and_tuples_allowed_and_inside_value_range(self):
# tuple as value and tuple allowed and tuple within value range
result = iap.handle_continuous_param(
(1, 2), "[test11]",
value_range=(0, 10),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Uniform))
def test_tuple_value_and_allowed_and_partially_outside_value_range(self):
# tuple as value and tuple allowed and tuple partially outside of
# value range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
(1, 2), "[test12]",
value_range=(1.5, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test12]" in str(context.exception))
def test_tuple_value_and_allowed_and_fully_outside_value_range(self):
# tuple as value and tuple allowed and tuple fully outside of value
# range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
(1, 2), "[test13]",
value_range=(3, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test13]" in str(context.exception))
def test_list_as_value_but_no_lists_allowed(self):
# list as value, but no list allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
[1, 2, 3], "[test14]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=False)
self.assertTrue("[test14]" in str(context.exception))
def test_list_as_value_and_lists_allowed(self):
# list as value and list allowed
result = iap.handle_continuous_param(
[1, 2, 3], "[test15]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Choice))
def test_list_value_and_allowed_and_partially_outside_value_range(self):
# list as value and list allowed and list partially outside of value
# range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
[1, 2], "[test16]",
value_range=(1.5, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test16]" in str(context.exception))
def test_list_value_and_allowed_and_fully_outside_of_value_range(self):
# list as value and list allowed and list fully outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
[1, 2], "[test17]",
value_range=(3, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test17]" in str(context.exception))
def test_value_inside_value_range_and_value_range_given_as_callable(self):
# single value within value range given as callable
def _value_range(x):
return -1 < x < 1
result = iap.handle_continuous_param(
1, "[test18]",
value_range=_value_range,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_bad_datatype_as_value_range(self):
# bad datatype for value range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test19]",
value_range=False,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(
"Unexpected input for value_range" in str(context.exception))
class Test_handle_discrete_param(unittest.TestCase):
def test_float_value_inside_value_range_but_no_floats_allowed(self):
# float value without value range when no float value is allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1.5, "[test0]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=True, allow_floats=False)
self.assertTrue("[test0]" in str(context.exception))
def test_value_range_is_none(self):
# value without value range
result = iap.handle_discrete_param(
1, "[test1]", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_range_is_tuple_of_nones(self):
# value without value range as (None, None)
result = iap.handle_discrete_param(
1, "[test1b]", value_range=(None, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_is_stochastic_parameter(self):
# stochastic parameter
result = iap.handle_discrete_param(
iap.Deterministic(1), "[test2]", value_range=None,
tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_inside_value_range(self):
# value within value range
result = iap.handle_discrete_param(
1, "[test3]", value_range=(0, 10), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_outside_value_range(self):
# value outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test4]", value_range=(2, 12), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test4]" in str(context.exception))
def test_value_inside_value_range_no_lower_bound(self):
# value within value range (without lower bound)
result = iap.handle_discrete_param(
1, "[test5]", value_range=(None, 12), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_outside_value_range_no_lower_bound(self):
# value outside of value range (without lower bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test6]", value_range=(None, 0), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test6]" in str(context.exception))
def test_value_inside_value_range_no_upper_bound(self):
# value within value range (without upper bound)
result = iap.handle_discrete_param(
1, "[test7]", value_range=(-1, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_outside_value_range_no_upper_bound(self):
# value outside of value range (without upper bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test8]", value_range=(2, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test8]" in str(context.exception))
def test_value_is_tuple_but_no_tuples_allowed(self):
# tuple as value, but no tuples allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
(1, 2), "[test9]", value_range=None, tuple_to_uniform=False,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test9]" in str(context.exception))
def test_value_is_tuple_and_tuples_allowed(self):
# tuple as value and tuple allowed
result = iap.handle_discrete_param(
(1, 2), "[test10]", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.DiscreteUniform))
def test_value_tuple_and_allowed_and_inside_value_range(self):
# tuple as value and tuple allowed and tuple within value range
result = iap.handle_discrete_param(
(1, 2), "[test11]", value_range=(0, 10), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.DiscreteUniform))
def test_value_tuple_and_allowed_and_inside_vr_allow_floats_false(self):
# tuple as value and tuple allowed and tuple within value range with
# allow_floats=False
result = iap.handle_discrete_param(
(1, 2), "[test11b]", value_range=(0, 10),
tuple_to_uniform=True, list_to_choice=True, allow_floats=False)
self.assertTrue(isinstance(result, iap.DiscreteUniform))
def test_value_tuple_and_allowed_and_partially_outside_value_range(self):
# tuple as value and tuple allowed and tuple partially outside of
# value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
(1, 3), "[test12]", value_range=(2, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test12]" in str(context.exception))
def test_value_tuple_and_allowed_and_fully_outside_value_range(self):
# tuple as value and tuple allowed and tuple fully outside of value
# range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
(1, 2), "[test13]", value_range=(3, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test13]" in str(context.exception))
def test_value_list_but_not_allowed(self):
# list as value, but no list allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
[1, 2, 3], "[test14]", value_range=None, tuple_to_uniform=True,
list_to_choice=False, allow_floats=True)
self.assertTrue("[test14]" in str(context.exception))
def test_value_list_and_allowed(self):
# list as value and list allowed
result = iap.handle_discrete_param(
[1, 2, 3], "[test15]", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Choice))
def test_value_list_and_allowed_and_partially_outside_value_range(self):
# list as value and list allowed and list partially outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
[1, 3], "[test16]", value_range=(2, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test16]" in str(context.exception))
def test_value_list_and_allowed_and_fully_outside_value_range(self):
# list as value and list allowed and list fully outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
[1, 2], "[test17]", value_range=(3, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test17]" in str(context.exception))
def test_value_inside_value_range_given_as_callable(self):
# single value within value range given as callable
def _value_range(x):
return -1 < x < 1
result = iap.handle_discrete_param(
1, "[test18]",
value_range=_value_range,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_bad_datatype_as_value_range(self):
# bad datatype for value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test19]", value_range=False, tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(
"Unexpected input for value_range" in str(context.exception))
class Test_handle_categorical_string_param(unittest.TestCase):
def test_arg_is_all(self):
valid_values = ["class1", "class2"]
param = iap.handle_categorical_string_param(
ia.ALL, "foo", valid_values)
assert isinstance(param, iap.Choice)
assert param.a == valid_values
def test_arg_is_valid_str(self):
valid_values = ["class1", "class2"]
param = iap.handle_categorical_string_param(
"class1", "foo", valid_values)
assert isinstance(param, iap.Deterministic)
assert param.value == "class1"
def test_arg_is_invalid_str(self):
valid_values = ["class1", "class2"]
with self.assertRaises(AssertionError) as ctx:
_param = iap.handle_categorical_string_param(
"class3", "foo", valid_values)
expected = (
"Expected parameter 'foo' to be one of: class1, class2. "
"Got: class3.")
assert expected == str(ctx.exception)
def test_arg_is_valid_list(self):
valid_values = ["class1", "class2", "class3"]
param = iap.handle_categorical_string_param(
["class1", "class3"], "foo", valid_values)
assert isinstance(param, iap.Choice)
assert param.a == ["class1", "class3"]
def test_arg_is_list_with_invalid_types(self):
valid_values = ["class1", "class2", "class3"]
with self.assertRaises(AssertionError) as ctx:
_param = iap.handle_categorical_string_param(
["class1", False], "foo", valid_values)
expected = (
"Expected list provided for parameter 'foo' to only contain "
"strings, got types: str, bool."
)
assert expected in str(ctx.exception)
def test_arg_is_invalid_list(self):
valid_values = ["class1", "class2", "class3"]
with self.assertRaises(AssertionError) as ctx:
_param = iap.handle_categorical_string_param(
["class1", "class4"], "foo", valid_values)
expected = (
"Expected list provided for parameter 'foo' to only contain "
"the following allowed strings: class1, class2, class3. "
"Got strings: class1, class4."
)
assert expected in str(ctx.exception)
def test_arg_is_stochastic_param(self):
param = iap.Deterministic("class1")
param_out = iap.handle_categorical_string_param(
param, "foo", ["class1"])
assert param_out is param
def test_arg_is_invalid_datatype(self):
with self.assertRaises(Exception) as ctx:
_ = iap.handle_categorical_string_param(
False, "foo", ["class1"])
expected = "Expected parameter 'foo' to be imgaug.ALL"
assert expected in str(ctx.exception)
class Test_handle_probability_param(unittest.TestCase):
def test_bool_like_values(self):
for val in [True, False, 0, 1, 0.0, 1.0]:
with self.subTest(param=val):
p = iap.handle_probability_param(val, "[test1]")
assert isinstance(p, iap.Deterministic)
assert p.value == int(val)
def test_float_probabilities(self):
for val in [0.0001, 0.001, 0.01, 0.1, 0.9, 0.99, 0.999, 0.9999]:
with self.subTest(param=val):
p = iap.handle_probability_param(val, "[test2]")
assert isinstance(p, iap.Binomial)
assert isinstance(p.p, iap.Deterministic)
assert val-1e-8 < p.p.value < val+1e-8
def test_probability_is_stochastic_parameter(self):
det = iap.Deterministic(1)
p = iap.handle_probability_param(det, "[test3]")
assert p == det
def test_probability_has_bad_datatype(self):
with self.assertRaises(Exception) as context:
_p = iap.handle_probability_param("test", "[test4]")
self.assertTrue("Expected " in str(context.exception))
def test_probability_is_negative(self):
with self.assertRaises(AssertionError):
_p = iap.handle_probability_param(-0.01, "[test5]")
def test_probability_is_above_100_percent(self):
with self.assertRaises(AssertionError):
_p = iap.handle_probability_param(1.01, "[test6]")
class Test_force_np_float_dtype(unittest.TestCase):
def test_common_dtypes(self):
dtypes = [
("float16", "float16"),
("float32", "float32"),
("float64", "float64"),
("uint8", "float64"),
("int32", "float64")
]
for dtype_in, expected in dtypes:
with self.subTest(dtype_in=dtype_in):
arr = np.zeros((1,), dtype=dtype_in)
observed = iap.force_np_float_dtype(arr).dtype
assert observed.name == expected
class Test_both_np_float_if_one_is_float(unittest.TestCase):
def test_float16_float32(self):
a1 = np.zeros((1,), dtype=np.float16)
b1 = np.zeros((1,), dtype=np.float32)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float16"
assert b2.dtype.name == "float32"
def test_float16_int32(self):
a1 = np.zeros((1,), dtype=np.float16)
b1 = np.zeros((1,), dtype=np.int32)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float16"
assert b2.dtype.name == "float64"
def test_int32_float16(self):
a1 = np.zeros((1,), dtype=np.int32)
b1 = np.zeros((1,), dtype=np.float16)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float64"
assert b2.dtype.name == "float16"
def test_int32_uint8(self):
a1 = np.zeros((1,), dtype=np.int32)
b1 = np.zeros((1,), dtype=np.uint8)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float64"
assert b2.dtype.name == "float64"
class Test_draw_distributions_grid(unittest.TestCase):
def setUp(self):
reseed()
def test_basic_functionality(self):
params = [mock.Mock(), mock.Mock()]
params[0].draw_distribution_graph.return_value = \
np.zeros((1, 1, 3), dtype=np.uint8)
params[1].draw_distribution_graph.return_value = \
np.zeros((1, 1, 3), dtype=np.uint8)
draw_grid_mock = mock.Mock()
draw_grid_mock.return_value = np.zeros((4, 3, 2), dtype=np.uint8)
with mock.patch('imgaug.imgaug.draw_grid', draw_grid_mock):
grid_observed = iap.draw_distributions_grid(
params, rows=2, cols=3, graph_sizes=(20, 21),
sample_sizes=[(1, 2), (3, 4)], titles=["A", "B"])
assert grid_observed.shape == (4, 3, 2)
assert params[0].draw_distribution_graph.call_count == 1
assert params[1].draw_distribution_graph.call_count == 1
assert params[0].draw_distribution_graph.call_args[1]["size"] == (1, 2)
assert params[0].draw_distribution_graph.call_args[1]["title"] == "A"
assert params[1].draw_distribution_graph.call_args[1]["size"] == (3, 4)
assert params[1].draw_distribution_graph.call_args[1]["title"] == "B"
assert draw_grid_mock.call_count == 1
assert draw_grid_mock.call_args[0][0][0].shape == (20, 21, 3)
assert draw_grid_mock.call_args[0][0][1].shape == (20, 21, 3)
assert draw_grid_mock.call_args[1]["rows"] == 2
assert draw_grid_mock.call_args[1]["cols"] == 3
class Test_draw_distributions_graph(unittest.TestCase):
def test_basic_functionality(self):
# this test is very rough as we get a not-very-well-defined image out
# of the function
param = iap.Uniform(0.0, 1.0)
graph_img = param.draw_distribution_graph(title=None, size=(10000,),
bins=100)
# at least 10% of the image should be white-ish (background)
nb_white = np.sum(graph_img[..., :] > [200, 200, 200])
nb_all = np.prod(graph_img.shape)
graph_img_title = param.draw_distribution_graph(title="test",
size=(10000,),
bins=100)
assert graph_img.ndim == 3
assert graph_img.shape[2] == 3
assert nb_white > 0.1 * nb_all
assert graph_img_title.ndim == 3
assert graph_img_title.shape[2] == 3
assert not np.array_equal(graph_img_title, graph_img)
class TestStochasticParameter(unittest.TestCase):
def setUp(self):
reseed()
def test_copy(self):
other_param = iap.Uniform(1.0, 10.0)
param = iap.Discretize(other_param)
other_param.a = [1.0]
param_copy = param.copy()
param.other_param.a[0] += 1
assert isinstance(param_copy, iap.Discretize)
assert isinstance(param_copy.other_param, iap.Uniform)
assert param_copy.other_param.a[0] == param.other_param.a[0]
def test_deepcopy(self):
other_param = iap.Uniform(1.0, 10.0)
param = iap.Discretize(other_param)
other_param.a = [1.0]
param_copy = param.deepcopy()
param.other_param.a[0] += 1
assert isinstance(param_copy, iap.Discretize)
assert isinstance(param_copy.other_param, iap.Uniform)
assert param_copy.other_param.a[0] != param.other_param.a[0]
class TestStochasticParameterOperators(unittest.TestCase):
def setUp(self):
reseed()
def test_multiply_stochasic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 * param2
assert isinstance(param3, iap.Multiply)
assert param3.other_param == param1
assert param3.val == param2
def test_multiply_stochastic_param_with_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1 * 2
assert isinstance(param3, iap.Multiply)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_multiply_integer_with_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = 2 * param1
assert isinstance(param3, iap.Multiply)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_multiply_string_with_stochastic_param_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" * param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_multiply_stochastic_param_with_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 * "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_divide_stochastic_params(self):
# Divide (__truediv__)
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 / param2
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert param3.val == param2
def test_divide_stochastic_param_by_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1 / 2
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_divide_integer_by_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = 2 / param1
assert isinstance(param3, iap.Divide)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_divide_string_by_stochastic_param_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" / param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_divide_stochastic_param_by_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 / "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_div_stochastic_params(self):
# Divide (__div__)
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1.__div__(param2)
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert param3.val == param2
def test_div_stochastic_param_by_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1.__div__(2)
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_div_stochastic_param_by_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1.__div__("test")
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_rdiv_stochastic_param_by_integer(self):
# Divide (__rdiv__)
param1 = iap.Normal(0, 1)
param3 = param1.__rdiv__(2)
assert isinstance(param3, iap.Divide)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_rdiv_stochastic_param_by_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1.__rdiv__("test")
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_floordiv_stochastic_params(self):
# Divide (__floordiv__)
param1_int = iap.DiscreteUniform(0, 10)
param2_int = iap.Choice([1, 2])
param3 = param1_int // param2_int
assert isinstance(param3, iap.Discretize)
assert isinstance(param3.other_param, iap.Divide)
assert param3.other_param.other_param == param1_int
assert param3.other_param.val == param2_int
def test_floordiv_symbol_stochastic_param_by_integer(self):
param1_int = iap.DiscreteUniform(0, 10)
param3 = param1_int // 2
assert isinstance(param3, iap.Discretize)
assert isinstance(param3.other_param, iap.Divide)
assert param3.other_param.other_param == param1_int
assert isinstance(param3.other_param.val, iap.Deterministic)
assert param3.other_param.val.value == 2
def test_floordiv_symbol_integer_by_stochastic_param(self):
param1_int = iap.DiscreteUniform(0, 10)
param3 = 2 // param1_int
assert isinstance(param3, iap.Discretize)
assert isinstance(param3.other_param, iap.Divide)
assert isinstance(param3.other_param.other_param, iap.Deterministic)
assert param3.other_param.other_param.value == 2
assert param3.other_param.val == param1_int
def test_floordiv_symbol_string_by_stochastic_should_fail(self):
param1_int = iap.DiscreteUniform(0, 10)
with self.assertRaises(Exception) as context:
_ = "test" // param1_int
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_floordiv_symbol_stochastic_param_by_string_should_fail(self):
param1_int = iap.DiscreteUniform(0, 10)
with self.assertRaises(Exception) as context:
_ = param1_int // "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_add_stochastic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 + param2
assert isinstance(param3, iap.Add)
assert param3.other_param == param1
assert param3.val == param2
def test_add_integer_to_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = param1 + 2
assert isinstance(param3, iap.Add)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_add_stochastic_param_to_integer(self):
param1 = iap.Normal(0, 1)
param3 = 2 + param1
assert isinstance(param3, iap.Add)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_add_stochastic_param_to_string(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" + param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_add_string_to_stochastic_param(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 + "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_subtract_stochastic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 - param2
assert isinstance(param3, iap.Subtract)
assert param3.other_param == param1
assert param3.val == param2
def test_subtract_integer_from_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = param1 - 2
assert isinstance(param3, iap.Subtract)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_subtract_stochastic_param_from_integer(self):
param1 = iap.Normal(0, 1)
param3 = 2 - param1
assert isinstance(param3, iap.Subtract)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_subtract_stochastic_param_from_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" - param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_subtract_string_from_stochastic_param_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 - "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_exponentiate_stochastic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 ** param2
assert isinstance(param3, iap.Power)
assert param3.other_param == param1
assert param3.val == param2
def test_exponentiate_stochastic_param_by_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1 ** 2
assert isinstance(param3, iap.Power)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_exponentiate_integer_by_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = 2 ** param1
assert isinstance(param3, iap.Power)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_exponentiate_string_by_stochastic_param(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" ** param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_exponentiate_stochastic_param_by_string(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 ** "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
class TestBinomial(unittest.TestCase):
def setUp(self):
reseed()
def test___init___p_is_zero(self):
param = iap.Binomial(0)
assert (
param.__str__()
== param.__repr__()
== "Binomial(Deterministic(int 0))"
)
def test___init___p_is_one(self):
param = iap.Binomial(1.0)
assert (
param.__str__()
== param.__repr__()
== "Binomial(Deterministic(float 1.00000000))"
)
def test_p_is_zero(self):
param = iap.Binomial(0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 0
assert np.all(samples == 0)
def test_p_is_one(self):
param = iap.Binomial(1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 1
assert np.all(samples == 1)
def test_p_is_50_percent(self):
param = iap.Binomial(0.5)
sample = param.draw_sample()
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert sample.shape == tuple()
assert samples.shape == (10000,)
assert sample in [0, 1]
assert len(unique) == 2
for val, count in zip(unique, counts):
if val == 0:
assert 5000 - 500 < count < 5000 + 500
elif val == 1:
assert 5000 - 500 < count < 5000 + 500
else:
assert False
def test_p_is_list(self):
param = iap.Binomial(iap.Choice([0.25, 0.75]))
for _ in sm.xrange(10):
samples = param.draw_samples((1000,))
p = np.sum(samples) / samples.size
assert (
(0.25 - 0.05 < p < 0.25 + 0.05)
or (0.75 - 0.05 < p < 0.75 + 0.05)
)
def test_p_is_tuple(self):
param = iap.Binomial((0.0, 1.0))
last_p = 0.5
diffs = []
for _ in sm.xrange(30):
samples = param.draw_samples((1000,))
p = np.sum(samples).astype(np.float32) / samples.size
diffs.append(abs(p - last_p))
last_p = p
nb_p_changed = sum([diff > 0.05 for diff in diffs])
assert nb_p_changed > 15
def test_samples_same_values_for_same_seeds(self):
param = iap.Binomial(0.5)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestChoice(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Choice([0, 1, 2])
assert (
param.__str__()
== param.__repr__()
== "Choice(a=[0, 1, 2], replace=True, p=None)"
)
def test_value_is_list(self):
param = iap.Choice([0, 1, 2])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1, 2]
assert np.all(
np.logical_or(
np.logical_or(samples == 0, samples == 1),
samples == 2
)
)
def test_sampled_values_match_expected_counts(self):
param = iap.Choice([0, 1, 2])
samples = param.draw_samples((10000,))
expected = 10000/3
expected_tolerance = expected * 0.05
for v in [0, 1, 2]:
count = np.sum(samples == v)
assert (
expected - expected_tolerance
< count <
expected + expected_tolerance
)
def test_value_is_list_containing_negative_number(self):
param = iap.Choice([-1, 1])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 1]
assert np.all(np.logical_or(samples == -1, samples == 1))
def test_value_is_list_of_floats(self):
param = iap.Choice([-1.2, 1.7])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert (
(
-1.2 - _eps(sample)
< sample <
-1.2 + _eps(sample)
)
or
(
1.7 - _eps(sample)
< sample <
1.7 + _eps(sample)
)
)
assert np.all(
np.logical_or(
np.logical_and(
-1.2 - _eps(sample) < samples,
samples < -1.2 + _eps(sample)
),
np.logical_and(
1.7 - _eps(sample) < samples,
samples < 1.7 + _eps(sample)
)
)
)
def test_value_is_list_of_strings(self):
param = iap.Choice(["first", "second", "third"])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in ["first", "second", "third"]
assert np.all(
np.logical_or(
np.logical_or(
samples == "first",
samples == "second"
),
samples == "third"
)
)
def test_sample_without_replacing(self):
param = iap.Choice([1+i for i in sm.xrange(100)], replace=False)
samples = param.draw_samples((50,))
seen = [0 for _ in sm.xrange(100)]
for sample in samples:
seen[sample-1] += 1
assert all([count in [0, 1] for count in seen])
def test_non_uniform_probabilities_over_elements(self):
param = iap.Choice([0, 1], p=[0.25, 0.75])
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert len(unique) == 2
for val, count in zip(unique, counts):
if val == 0:
assert 2500 - 500 < count < 2500 + 500
elif val == 1:
assert 7500 - 500 < count < 7500 + 500
else:
assert False
def test_list_contains_stochastic_parameter(self):
param = iap.Choice([iap.Choice([0, 1]), 2])
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert len(unique) == 3
for val, count in zip(unique, counts):
if val in [0, 1]:
assert 2500 - 500 < count < 2500 + 500
elif val == 2:
assert 5000 - 500 < count < 5000 + 500
else:
assert False
def test_samples_same_values_for_same_seeds(self):
param = iap.Choice([-1, 0, 1, 2, 3])
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
def test_value_is_bad_datatype(self):
with self.assertRaises(Exception) as context:
_ = iap.Choice(123)
self.assertTrue(
"Expected a to be an iterable" in str(context.exception))
def test_p_is_bad_datatype(self):
with self.assertRaises(Exception) as context:
_ = iap.Choice([1, 2], p=123)
self.assertTrue("Expected p to be" in str(context.exception))
def test_value_and_p_have_unequal_lengths(self):
with self.assertRaises(Exception) as context:
_ = iap.Choice([1, 2], p=[1])
self.assertTrue("Expected lengths of" in str(context.exception))
class TestDiscreteUniform(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.DiscreteUniform(0, 2)
assert (
param.__str__()
== param.__repr__()
== "DiscreteUniform(Deterministic(int 0), Deterministic(int 2))"
)
def test_bounds_are_ints(self):
param = iap.DiscreteUniform(0, 2)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1, 2]
assert np.all(
np.logical_or(
np.logical_or(samples == 0, samples == 1),
samples == 2
)
)
def test_samples_match_expected_counts(self):
param = iap.DiscreteUniform(0, 2)
samples = param.draw_samples((10000,))
expected = 10000/3
expected_tolerance = expected * 0.05
for v in [0, 1, 2]:
count = np.sum(samples == v)
assert (
expected - expected_tolerance
< count <
expected + expected_tolerance
)
def test_lower_bound_is_negative(self):
param = iap.DiscreteUniform(-1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(
np.logical_or(
np.logical_or(samples == -1, samples == 0),
samples == 1
)
)
def test_bounds_are_floats(self):
param = iap.DiscreteUniform(-1.2, 1.2)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(
np.logical_or(
np.logical_or(
samples == -1, samples == 0
),
samples == 1
)
)
def test_lower_and_upper_bound_have_wrong_order(self):
param = iap.DiscreteUniform(1, -1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(
np.logical_or(
np.logical_or(
samples == -1, samples == 0
),
samples == 1
)
)
def test_lower_and_upper_bound_are_the_same(self):
param = iap.DiscreteUniform(1, 1)
sample = param.draw_sample()
samples = param.draw_samples((100,))
assert sample == 1
assert np.all(samples == 1)
def test_samples_same_values_for_same_seeds(self):
param = iap.Uniform(-1, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestPoisson(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Poisson(1)
assert (
param.__str__()
== param.__repr__()
== "Poisson(Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Poisson(1)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 <= sample
def test_via_comparison_to_np_poisson(self):
param = iap.Poisson(1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).poisson(
lam=1, size=(100, 1000))
assert samples.shape == (100, 1000)
for i in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
count_direct = int(np.sum(samples_direct == i))
count = np.sum(samples == i)
tolerance = max(count_direct * 0.1, 250)
assert count_direct - tolerance < count < count_direct + tolerance
def test_samples_same_values_for_same_seeds(self):
param = iap.Poisson(1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestNormal(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Normal(0, 1)
assert (
param.__str__()
== param.__repr__()
== "Normal(loc=Deterministic(int 0), scale=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Normal(0, 1)
sample = param.draw_sample()
assert sample.shape == tuple()
def test_via_comparison_to_np_normal(self):
param = iap.Normal(0, 1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).normal(loc=0, scale=1,
size=(100, 1000))
samples = np.clip(samples, -1, 1)
samples_direct = np.clip(samples_direct, -1, 1)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(-1.0, 1.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(-1.0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_loc_is_stochastic_parameter(self):
param = iap.Normal(iap.Choice([-100, 100]), 1)
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if -100 - 10 < exp < -100 + 10:
seen[0] += 1
elif 100 - 10 < exp < 100 + 10:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
def test_scale(self):
param1 = iap.Normal(0, 1)
param2 = iap.Normal(0, 100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.std(samples1) < np.std(samples2)
assert 100 - 10 < np.std(samples2) < 100 + 10
def test_samples_same_values_for_same_seeds(self):
param = iap.Normal(0, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestTruncatedNormal(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.TruncatedNormal(0, 1)
expected = (
"TruncatedNormal("
"loc=Deterministic(int 0), "
"scale=Deterministic(int 1), "
"low=Deterministic(float -inf), "
"high=Deterministic(float inf)"
")"
)
assert (
param.__str__()
== param.__repr__()
== expected
)
def test___init___custom_range(self):
param = iap.TruncatedNormal(0, 1, low=-100, high=50.0)
expected = (
"TruncatedNormal("
"loc=Deterministic(int 0), "
"scale=Deterministic(int 1), "
"low=Deterministic(int -100), "
"high=Deterministic(float 50.00000000)"
")"
)
assert (
param.__str__()
== param.__repr__()
== expected
)
def test_scale_is_zero(self):
param = iap.TruncatedNormal(0.5, 0, low=-10, high=10)
samples = param.draw_samples((100,))
assert np.allclose(samples, 0.5)
def test_scale(self):
param1 = iap.TruncatedNormal(0.0, 0.1, low=-100, high=100)
param2 = iap.TruncatedNormal(0.0, 5.0, low=-100, high=100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.std(samples1) < np.std(samples2)
assert np.isclose(np.std(samples1), 0.1, rtol=0, atol=0.20)
assert np.isclose(np.std(samples2), 5.0, rtol=0, atol=0.40)
def test_loc_is_stochastic_parameter(self):
param = iap.TruncatedNormal(iap.Choice([-100, 100]), 0.01,
low=-1000, high=1000)
seen = [0, 0]
for _ in sm.xrange(200):
samples = param.draw_samples((5,))
observed = np.mean(samples)
dist1 = np.abs(-100 - observed)
dist2 = np.abs(100 - observed)
if dist1 < 1:
seen[0] += 1
elif dist2 < 1:
seen[1] += 1
else:
assert False
assert np.isclose(seen[0], 100, rtol=0, atol=20)
assert np.isclose(seen[1], 100, rtol=0, atol=20)
def test_samples_are_within_bounds(self):
param = iap.TruncatedNormal(0, 10.0, low=-5, high=7.5)
samples = param.draw_samples((1000,))
# are all within bounds
assert np.all(samples >= -5.0 - 1e-4)
assert np.all(samples <= 7.5 + 1e-4)
# at least some samples close to bounds
assert np.any(samples <= -4.5)
assert np.any(samples >= 7.0)
# at least some samples close to loc
assert np.any(np.abs(samples) < 0.5)
def test_samples_same_values_for_same_seeds(self):
param = iap.TruncatedNormal(0, 1)
samples1 = param.draw_samples((10, 5), random_state=1234)
samples2 = param.draw_samples((10, 5), random_state=1234)
assert np.allclose(samples1, samples2)
def test_samples_different_values_for_different_seeds(self):
param = iap.TruncatedNormal(0, 1)
samples1 = param.draw_samples((10, 5), random_state=1234)
samples2 = param.draw_samples((10, 5), random_state=2345)
assert not np.allclose(samples1, samples2)
class TestLaplace(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Laplace(0, 1)
assert (
param.__str__()
== param.__repr__()
== "Laplace(loc=Deterministic(int 0), scale=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Laplace(0, 1)
sample = param.draw_sample()
assert sample.shape == tuple()
def test_via_comparison_to_np_laplace(self):
param = iap.Laplace(0, 1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).laplace(loc=0, scale=1,
size=(100, 1000))
assert samples.shape == (100, 1000)
samples = np.clip(samples, -1, 1)
samples_direct = np.clip(samples_direct, -1, 1)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(-1.0, 1.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(-1.0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_loc_is_stochastic_parameter(self):
param = iap.Laplace(iap.Choice([-100, 100]), 1)
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if -100 - 10 < exp < -100 + 10:
seen[0] += 1
elif 100 - 10 < exp < 100 + 10:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
def test_scale(self):
param1 = iap.Laplace(0, 1)
param2 = iap.Laplace(0, 100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.var(samples1) < np.var(samples2)
def test_scale_is_zero(self):
param1 = iap.Laplace(1, 0)
samples = param1.draw_samples((100,))
assert np.all(np.logical_and(
samples > 1 - _eps(samples),
samples < 1 + _eps(samples)
))
def test_samples_same_values_for_same_seeds(self):
param = iap.Laplace(0, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestChiSquare(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.ChiSquare(1)
assert (
param.__str__()
== param.__repr__()
== "ChiSquare(df=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.ChiSquare(1)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 <= sample
def test_via_comparison_to_np_chisquare(self):
param = iap.ChiSquare(1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).chisquare(df=1,
size=(100, 1000))
assert samples.shape == (100, 1000)
assert np.all(0 <= samples)
samples = np.clip(samples, 0, 3)
samples_direct = np.clip(samples_direct, 0, 3)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 3.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(0, 3.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_df_is_stochastic_parameter(self):
param = iap.ChiSquare(iap.Choice([1, 10]))
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if 1 - 1.0 < exp < 1 + 1.0:
seen[0] += 1
elif 10 - 4.0 < exp < 10 + 4.0:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
def test_larger_df_leads_to_more_variance(self):
param1 = iap.ChiSquare(1)
param2 = iap.ChiSquare(10)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.var(samples1) < np.var(samples2)
assert 2*1 - 1.0 < np.var(samples1) < 2*1 + 1.0
assert 2*10 - 5.0 < np.var(samples2) < 2*10 + 5.0
def test_samples_same_values_for_same_seeds(self):
param = iap.ChiSquare(1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestWeibull(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Weibull(1)
assert (
param.__str__()
== param.__repr__()
== "Weibull(a=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Weibull(1)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 <= sample
def test_via_comparison_to_np_weibull(self):
param = iap.Weibull(1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).weibull(a=1,
size=(100, 1000))
assert samples.shape == (100, 1000)
assert np.all(0 <= samples)
samples = np.clip(samples, 0, 2)
samples_direct = np.clip(samples_direct, 0, 2)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 2.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(0, 2.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_argument_is_stochastic_parameter(self):
param = iap.Weibull(iap.Choice([1, 0.5]))
expected_first = scipy.special.gamma(1 + 1/1)
expected_second = scipy.special.gamma(1 + 1/0.5)
seen = [0, 0]
for _ in sm.xrange(100):
samples = param.draw_samples((50000,))
observed = np.mean(samples)
matches_first = (
expected_first - 0.2 * expected_first
< observed <
expected_first + 0.2 * expected_first
)
matches_second = (
expected_second - 0.2 * expected_second
< observed <
expected_second + 0.2 * expected_second
)
if matches_first:
seen[0] += 1
elif matches_second:
seen[1] += 1
else:
assert False
assert 50 - 25 < seen[0] < 50 + 25
assert 50 - 25 < seen[1] < 50 + 25
def test_different_strengths(self):
param1 = iap.Weibull(1)
param2 = iap.Weibull(0.5)
samples1 = param1.draw_samples((10000,))
samples2 = param2.draw_samples((10000,))
expected_first = (
scipy.special.gamma(1 + 2/1)
- (scipy.special.gamma(1 + 1/1))**2
)
expected_second = (
scipy.special.gamma(1 + 2/0.5)
- (scipy.special.gamma(1 + 1/0.5))**2
)
assert np.var(samples1) < np.var(samples2)
assert (
expected_first - 0.2 * expected_first
< np.var(samples1) <
expected_first + 0.2 * expected_first
)
assert (
expected_second - 0.2 * expected_second
< np.var(samples2) <
expected_second + 0.2 * expected_second
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Weibull(1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestUniform(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Uniform(0, 1.0)
assert (
param.__str__()
== param.__repr__()
== "Uniform(Deterministic(int 0), Deterministic(float 1.00000000))"
)
def test_draw_sample(self):
param = iap.Uniform(0, 1.0)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 - _eps(sample) < sample < 1.0 + _eps(sample)
def test_draw_samples(self):
param = iap.Uniform(0, 1.0)
samples = param.draw_samples((10, 5))
assert samples.shape == (10, 5)
assert np.all(
np.logical_and(
0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_via_density_histogram(self):
param = iap.Uniform(0, 1.0)
samples = param.draw_samples((10000,))
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0.0, 1.0),
density=False)
density_expected = 1.0/nb_bins
density_tolerance = 0.05
for nb_samples in hist:
density = nb_samples / samples.size
assert (
density_expected - density_tolerance
< density <
density_expected + density_tolerance
)
def test_negative_value(self):
param = iap.Uniform(-1.0, 1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
-1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_wrong_argument_order(self):
param = iap.Uniform(1.0, -1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
-1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_arguments_are_integers(self):
param = iap.Uniform(-1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
-1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_arguments_are_identical(self):
param = iap.Uniform(1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert 1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Uniform(-1.0, 1.0)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestBeta(unittest.TestCase):
@classmethod
def _mean(cls, alpha, beta):
return alpha / (alpha + beta)
@classmethod
def _var(cls, alpha, beta):
return (alpha * beta) / ((alpha + beta)**2 * (alpha + beta + 1))
def setUp(self):
reseed()
def test___init__(self):
param = iap.Beta(0.5, 0.5)
assert (
param.__str__()
== param.__repr__()
== "Beta("
"Deterministic(float 0.50000000), "
"Deterministic(float 0.50000000)"
")"
)
def test_draw_sample(self):
param = iap.Beta(0.5, 0.5)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 - _eps(sample) < sample < 1.0 + _eps(sample)
def test_draw_samples(self):
param = iap.Beta(0.5, 0.5)
samples = param.draw_samples((100, 1000))
assert samples.shape == (100, 1000)
assert np.all(
np.logical_and(
0 - _eps(samples) <= samples,
samples <= 1.0 + _eps(samples)
)
)
def test_via_comparison_to_np_beta(self):
param = iap.Beta(0.5, 0.5)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).beta(
a=0.5, b=0.5, size=(100, 1000))
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 1.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_argument_is_stochastic_parameter(self):
param = iap.Beta(iap.Choice([0.5, 2]), 0.5)
expected_first = self._mean(0.5, 0.5)
expected_second = self._mean(2, 0.5)
seen = [0, 0]
for _ in sm.xrange(100):
samples = param.draw_samples((10000,))
observed = np.mean(samples)
if expected_first - 0.05 < observed < expected_first + 0.05:
seen[0] += 1
elif expected_second - 0.05 < observed < expected_second + 0.05:
seen[1] += 1
else:
assert False
assert 50 - 25 < seen[0] < 50 + 25
assert 50 - 25 < seen[1] < 50 + 25
def test_compare_curves_of_different_arguments(self):
param1 = iap.Beta(2, 2)
param2 = iap.Beta(0.5, 0.5)
samples1 = param1.draw_samples((10000,))
samples2 = param2.draw_samples((10000,))
expected_first = self._var(2, 2)
expected_second = self._var(0.5, 0.5)
assert np.var(samples1) < np.var(samples2)
assert (
expected_first - 0.1 * expected_first
< np.var(samples1) <
expected_first + 0.1 * expected_first
)
assert (
expected_second - 0.1 * expected_second
< np.var(samples2) <
expected_second + 0.1 * expected_second
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Beta(0.5, 0.5)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestDeterministic(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
pairs = [
(0, "Deterministic(int 0)"),
(1.0, "Deterministic(float 1.00000000)"),
("test", "Deterministic(test)")
]
for value, expected in pairs:
with self.subTest(value=value):
param = iap.Deterministic(value)
assert (
param.__str__()
== param.__repr__()
== expected
)
def test_samples_same_values_for_same_seeds(self):
values = [
-100, -54, -1, 0, 1, 54, 100,
-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0
]
for value in values:
with self.subTest(value=value):
param = iap.Deterministic(value)
rs1 = iarandom.RNG(123456)
rs2 = iarandom.RNG(123456)
samples1 = param.draw_samples(20, random_state=rs1)
samples2 = param.draw_samples(20, random_state=rs2)
assert np.array_equal(samples1, samples2)
def test_draw_sample_int(self):
values = [-100, -54, -1, 0, 1, 54, 100]
for value in values:
with self.subTest(value=value):
param = iap.Deterministic(value)
sample1 = param.draw_sample()
sample2 = param.draw_sample()
assert sample1.shape == tuple()
assert sample1 == sample2
def test_draw_sample_float(self):
values = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for value in values:
with self.subTest(value=value):
param = iap.Deterministic(value)
sample1 = param.draw_sample()
sample2 = param.draw_sample()
assert sample1.shape == tuple()
assert np.isclose(
sample1, sample2, rtol=0, atol=_eps(sample1))
def test_draw_samples_int(self):
values = [-100, -54, -1, 0, 1, 54, 100]
shapes = [10, 10, (5, 3), (5, 3), (4, 5, 3), (4, 5, 3)]
for value, shape in itertools.product(values, shapes):
with self.subTest(value=value, shape=shape):
param = iap.Deterministic(value)
samples = param.draw_samples(shape)
shape_expected = (
shape
if isinstance(shape, tuple)
else tuple([shape]))
assert samples.shape == shape_expected
assert np.all(samples == value)
def test_draw_samples_float(self):
values = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
shapes = [10, 10, (5, 3), (5, 3), (4, 5, 3), (4, 5, 3)]
for value, shape in itertools.product(values, shapes):
with self.subTest(value=value, shape=shape):
param = iap.Deterministic(value)
samples = param.draw_samples(shape)
shape_expected = (
shape
if isinstance(shape, tuple)
else tuple([shape]))
assert samples.shape == shape_expected
assert np.allclose(samples, value, rtol=0, atol=_eps(samples))
def test_argument_is_stochastic_parameter(self):
seen = [0, 0]
for _ in sm.xrange(200):
param = iap.Deterministic(iap.Choice([0, 1]))
seen[param.value] += 1
assert 100 - 50 < seen[0] < 100 + 50
assert 100 - 50 < seen[1] < 100 + 50
def test_argument_has_invalid_type(self):
with self.assertRaises(Exception) as context:
_ = iap.Deterministic([1, 2, 3])
self.assertTrue(
"Expected StochasticParameter object or number or string"
in str(context.exception))
class TestFromLowerResolution(unittest.TestCase):
def setUp(self):
reseed()
def test___init___size_percent(self):
param = iap.FromLowerResolution(other_param=iap.Deterministic(0),
size_percent=1, method="nearest")
assert (
param.__str__()
== param.__repr__()
== "FromLowerResolution("
"size_percent=Deterministic(int 1), "
"method=Deterministic(nearest), "
"other_param=Deterministic(int 0)"
")"
)
def test___init___size_px(self):
param = iap.FromLowerResolution(other_param=iap.Deterministic(0),
size_px=1, method="nearest")
assert (
param.__str__()
== param.__repr__()
== "FromLowerResolution("
"size_px=Deterministic(int 1), "
"method=Deterministic(nearest), "
"other_param=Deterministic(int 0)"
")"
)
def test_binomial_hwc(self):
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
samples = param.draw_samples((8, 8, 1))
uq = np.unique(samples)
assert samples.shape == (8, 8, 1)
assert len(uq) == 2
assert 0 in uq
assert 1 in uq
def test_binomial_nhwc(self):
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
samples_nhwc = param.draw_samples((1, 8, 8, 1))
uq = np.unique(samples_nhwc)
assert samples_nhwc.shape == (1, 8, 8, 1)
assert len(uq) == 2
assert 0 in uq
assert 1 in uq
def test_draw_samples_with_too_many_dimensions(self):
# (N, H, W, C, something) causing error
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
with self.assertRaises(Exception) as context:
_ = param.draw_samples((1, 8, 8, 1, 1))
self.assertTrue(
"FromLowerResolution can only generate samples of shape"
in str(context.exception)
)
def test_binomial_hw3(self):
# C=3
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
samples = param.draw_samples((8, 8, 3))
uq = np.unique(samples)
assert samples.shape == (8, 8, 3)
assert len(uq) == 2
assert 0 in uq
assert 1 in uq
def test_different_size_px_arguments(self):
# different sizes in px
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=16)
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_different_size_px_arguments_with_tuple(self):
# different sizes in px, one given as tuple (a, b)
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=(2, 16))
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(400):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_different_size_px_argument_with_stochastic_parameters(self):
# different sizes in px, given as StochasticParameter
param1 = iap.FromLowerResolution(iap.Binomial(0.5),
size_px=iap.Deterministic(1))
param2 = iap.FromLowerResolution(iap.Binomial(0.5),
size_px=iap.Choice([8, 16]))
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_px_has_invalid_datatype(self):
# bad datatype for size_px
with self.assertRaises(Exception) as context:
_ = iap.FromLowerResolution(iap.Binomial(0.5), size_px=False)
self.assertTrue("Expected " in str(context.exception))
def test_min_size(self):
# min_size
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=1,
min_size=16)
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_percent(self):
# different sizes in percent
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=0.01)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=0.8)
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_percent_as_stochastic_parameters(self):
# different sizes in percent, given as StochasticParameter
param1 = iap.FromLowerResolution(iap.Binomial(0.5),
size_percent=iap.Deterministic(0.01))
param2 = iap.FromLowerResolution(iap.Binomial(0.5),
size_percent=iap.Choice([0.4, 0.8]))
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_percent_has_invalid_datatype(self):
# bad datatype for size_percent
with self.assertRaises(Exception) as context:
_ = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=False)
self.assertTrue("Expected " in str(context.exception))
def test_method(self):
# method given as StochasticParameter
param = iap.FromLowerResolution(
iap.Binomial(0.5), size_px=4,
method=iap.Choice(["nearest", "linear"]))
seen = [0, 0]
for _ in sm.xrange(200):
samples = param.draw_samples((16, 16, 1))
nb_in_between = np.sum(
np.logical_and(0.05 < samples, samples < 0.95))
if nb_in_between == 0:
seen[0] += 1
else:
seen[1] += 1
assert 100 - 50 < seen[0] < 100 + 50
assert 100 - 50 < seen[1] < 100 + 50
def test_method_has_invalid_datatype(self):
# bad datatype for method
with self.assertRaises(Exception) as context:
_ = iap.FromLowerResolution(iap.Binomial(0.5), size_px=4,
method=False)
self.assertTrue("Expected " in str(context.exception))
def test_samples_same_values_for_same_seeds(self):
# multiple calls with same random_state
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
samples1 = param.draw_samples((10, 5, 1),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5, 1),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestClip(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Clip(iap.Deterministic(0), -1, 1)
assert (
param.__str__()
== param.__repr__()
== "Clip(Deterministic(int 0), -1.000000, 1.000000)"
)
def test_value_within_bounds(self):
param = iap.Clip(iap.Deterministic(0), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 0
assert np.all(samples == 0)
def test_value_exactly_at_upper_bound(self):
param = iap.Clip(iap.Deterministic(1), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 1
assert np.all(samples == 1)
def test_value_exactly_at_lower_bound(self):
param = iap.Clip(iap.Deterministic(-1), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == -1
assert np.all(samples == -1)
def test_value_is_within_bounds_and_float(self):
param = iap.Clip(iap.Deterministic(0.5), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert 0.5 - _eps(sample) < sample < 0.5 + _eps(sample)
assert np.all(
np.logical_and(
0.5 - _eps(sample) <= samples,
samples <= 0.5 + _eps(sample)
)
)
def test_value_is_above_upper_bound(self):
param = iap.Clip(iap.Deterministic(2), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 1
assert np.all(samples == 1)
def test_value_is_below_lower_bound(self):
param = iap.Clip(iap.Deterministic(-2), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == -1
assert np.all(samples == -1)
def test_value_is_sometimes_without_bounds_sometimes_beyond(self):
param = iap.Clip(iap.Choice([0, 2]), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1]
assert np.all(np.logical_or(samples == 0, samples == 1))
def test_samples_same_values_for_same_seeds(self):
param = iap.Clip(iap.Choice([0, 2]), -1, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
def test_lower_bound_is_none(self):
param = iap.Clip(iap.Deterministic(0), None, 1)
sample = param.draw_sample()
assert sample == 0
assert (
param.__str__()
== param.__repr__()
== "Clip(Deterministic(int 0), None, 1.000000)"
)
def test_upper_bound_is_none(self):
param = iap.Clip(iap.Deterministic(0), 0, None)
sample = param.draw_sample()
assert sample == 0
assert (
param.__str__()
== param.__repr__()
== "Clip(Deterministic(int 0), 0.000000, None)"
)
def test_both_bounds_are_none(self):
param = iap.Clip(iap.Deterministic(0), None, None)
sample = param.draw_sample()
assert sample == 0
assert (
param.__str__()
== param.__repr__()
== "Clip(Deterministic(int 0), None, None)"
)
class TestDiscretize(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Discretize(iap.Deterministic(0))
assert (
param.__str__()
== param.__repr__()
== "Discretize(Deterministic(int 0))"
)
def test_applied_to_deterministic(self):
values = [-100.2, -54.3, -1.0, -1, -0.7, -0.00043,
0,
0.00043, 0.7, 1.0, 1, 54.3, 100.2]
for value in values:
with self.subTest(value=value):
param = iap.Discretize(iap.Deterministic(value))
value_expected = np.round(
np.float64([value])
).astype(np.int32)[0]
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == value_expected
assert np.all(samples == value_expected)
# TODO why are these tests applied to DiscreteUniform instead of Uniform?
def test_applied_to_discrete_uniform(self):
param_orig = iap.DiscreteUniform(0, 1)
param = iap.Discretize(param_orig)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1]
assert np.all(np.logical_or(samples == 0, samples == 1))
def test_applied_to_discrete_uniform_with_wider_range(self):
param_orig = iap.DiscreteUniform(0, 2)
param = iap.Discretize(param_orig)
samples1 = param_orig.draw_samples((10000,))
samples2 = param.draw_samples((10000,))
assert np.all(np.abs(samples1 - samples2) < 0.2*(10000/3))
def test_samples_same_values_for_same_seeds(self):
param_orig = iap.DiscreteUniform(0, 2)
param = iap.Discretize(param_orig)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestMultiply(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Multiply(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Multiply(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_multiply_example_integer_values(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Multiply(iap.Deterministic(v1), v2)
samples = p.draw_samples((2, 3))
assert p.draw_sample() == v1 * v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int64) + v1 * v2
)
def test_multiply_example_integer_values_both_deterministic(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Multiply(iap.Deterministic(v1), iap.Deterministic(v2))
samples = p.draw_samples((2, 3))
assert p.draw_sample() == v1 * v2
assert samples.dtype.name == "int32"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int32) + v1 * v2
)
def test_multiply_example_float_values(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Multiply(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert np.isclose(sample, v1 * v2, atol=1e-3, rtol=0)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float32) + v1 * v2
)
def test_multiply_example_float_values_both_deterministic(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Multiply(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert np.isclose(sample, v1 * v2, atol=1e-3, rtol=0)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float32) + v1 * v2
)
def test_multiply_by_stochastic_parameter(self):
param = iap.Multiply(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - _eps(samples))
assert np.all(samples < 1.0 * 2.0 + _eps(samples))
assert (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_multiply_by_stochastic_parameter_elementwise(self):
param = iap.Multiply(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - _eps(samples))
assert np.all(samples < 1.0 * 2.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_multiply_stochastic_parameter_by_fixed_value(self):
param = iap.Multiply(iap.Uniform(1.0, 2.0),
1.0,
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - _eps(samples))
assert np.all(samples < 2.0 * 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_multiply_stochastic_parameter_by_fixed_value_elementwise(self):
param = iap.Multiply(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - _eps(samples))
assert np.all(samples < 2.0 * 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
class TestDivide(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Divide(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Divide(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_divide_integers(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
if v2 == 0:
v2 = 1
with self.subTest(left=v1, right=v2):
p = iap.Divide(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == (v1 / v2)
assert samples.dtype.kind == "f"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.float64) + (v1 / v2)
)
def test_divide_integers_both_deterministic(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
if v2 == 0:
v2 = 1
with self.subTest(left=v1, right=v2):
p = iap.Divide(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == (v1 / v2)
assert samples.dtype.kind == "f"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.float64) + (v1 / v2)
)
def test_divide_floats(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
if v2 == 0:
v2 = 1
with self.subTest(left=v1, right=v2):
p = iap.Divide(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert (
(v1 / v2) - _eps(sample)
<= sample <=
(v1 / v2) + _eps(sample)
)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + (v1 / v2)
)
def test_divide_floats_both_deterministic(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
if v2 == 0:
v2 = 1
with self.subTest(left=v1, right=v2):
p = iap.Divide(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert (
(v1 / v2) - _eps(sample)
<= sample <=
(v1 / v2) + _eps(sample)
)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + (v1 / v2)
)
def test_divide_by_stochastic_parameter(self):
param = iap.Divide(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > (1.0 / 2.0) - _eps(samples))
assert np.all(samples < (1.0 / 1.0) + _eps(samples))
assert (
samples_sorted[0] - _eps(samples)
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples)
)
def test_divide_by_stochastic_parameter_elementwise(self):
param = iap.Divide(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > (1.0 / 2.0) - _eps(samples))
assert np.all(samples < (1.0 / 1.0) + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples)
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples)
)
def test_divide_stochastic_parameter_by_float(self):
param = iap.Divide(iap.Uniform(1.0, 2.0),
1.0,
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > (1.0 / 1.0) - _eps(samples))
assert np.all(samples < (2.0 / 1.0) + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples)
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples)
)
def test_divide_stochastic_parameter_by_float_elementwise(self):
param = iap.Divide(iap.Uniform(1.0, 2.0),
1.0,
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > (1.0 / 1.0) - _eps(samples))
assert np.all(samples < (2.0 / 1.0) + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted)
< samples_sorted[-1]
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted)
)
def test_divide_by_stochastic_parameter_that_can_by_zero(self):
# test division by zero automatically being converted to division by 1
param = iap.Divide(2,
iap.Choice([0, 2]),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_unique = np.sort(np.unique(samples.flatten()))
assert samples_unique[0] == 1 and samples_unique[1] == 2
def test_divide_by_zero(self):
param = iap.Divide(iap.Deterministic(1), 0, elementwise=False)
sample = param.draw_sample()
assert sample == 1
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Add(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Add(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_add_integers(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Add(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == v1 + v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int32) + v1 + v2
)
def test_add_integers_both_deterministic(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Add(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == v1 + v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int32) + v1 + v2
)
def test_add_floats(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Add(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert np.isclose(sample, v1 + v2, atol=1e-3, rtol=0)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float32) + v1 + v2
)
def test_add_floats_both_deterministic(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Add(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert np.isclose(sample, v1 + v2, atol=1e-3, rtol=0)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float32) + v1 + v2
)
def test_add_stochastic_parameter(self):
param = iap.Add(iap.Deterministic(1.0), (1.0, 2.0), elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples >= 1.0 + 1.0 - _eps(samples))
assert np.all(samples <= 1.0 + 2.0 + _eps(samples))
assert (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted[0])
)
def test_add_stochastic_parameter_elementwise(self):
param = iap.Add(iap.Deterministic(1.0), (1.0, 2.0), elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples >= 1.0 + 1.0 - _eps(samples))
assert np.all(samples <= 1.0 + 2.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted[0])
)
def test_add_to_stochastic_parameter(self):
param = iap.Add(iap.Uniform(1.0, 2.0), 1.0, elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples >= 1.0 + 1.0 - _eps(samples))
assert np.all(samples <= 2.0 + 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted[0])
)
def test_add_to_stochastic_parameter_elementwise(self):
param = iap.Add(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples >= 1.0 + 1.0 - _eps(samples))
assert np.all(samples <= 2.0 + 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted[0])
)
class TestSubtract(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Subtract(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Subtract(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_subtract_integers(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Subtract(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == v1 - v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int64) + v1 - v2
)
def test_subtract_integers_both_deterministic(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Subtract(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == v1 - v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int64) + v1 - v2
)
def test_subtract_floats(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Subtract(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert v1 - v2 - _eps(sample) < sample < v1 - v2 + _eps(sample)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + v1 - v2
)
def test_subtract_floats_both_deterministic(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Subtract(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert v1 - v2 - _eps(sample) < sample < v1 - v2 + _eps(sample)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + v1 - v2
)
def test_subtract_stochastic_parameter(self):
param = iap.Subtract(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 2.0 - _eps(samples))
assert np.all(samples < 1.0 - 1.0 + _eps(samples))
assert (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_subtract_stochastic_parameter_elementwise(self):
param = iap.Subtract(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 2.0 - _eps(samples))
assert np.all(samples < 1.0 - 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_subtract_from_stochastic_parameter(self):
param = iap.Subtract(iap.Uniform(1.0, 2.0), 1.0, elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 1.0 - _eps(samples))
assert np.all(samples < 2.0 - 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_subtract_from_stochastic_parameter_elementwise(self):
param = iap.Subtract(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 1.0 - _eps(samples))
assert np.all(samples < 2.0 - 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
class TestPower(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Power(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Power(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_pairs(self):
values = [
-100, -54, -1, 0, 1, 54, 100,
-100.0, -54.0, -1.0, 0.0, 1.0, 54.0, 100.0
]
exponents = [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]
for base, exponent in itertools.product(values, exponents):
if base < 0 and ia.is_single_float(exponent):
continue
if base == 0 and exponent < 0:
continue
with self.subTest(base=base, exponent=exponent):
p = iap.Power(iap.Deterministic(base), exponent)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert (
base ** exponent - _eps(sample)
< sample <
base ** exponent + _eps(sample)
)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + base ** exponent
)
def test_pairs_both_deterministic(self):
values = [
-100, -54, -1, 0, 1, 54, 100,
-100.0, -54.0, -1.0, 0.0, 1.0, 54.0, 100.0
]
exponents = [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]
for base, exponent in itertools.product(values, exponents):
if base < 0 and ia.is_single_float(exponent):
continue
if base == 0 and exponent < 0:
continue
with self.subTest(base=base, exponent=exponent):
p = iap.Power(iap.Deterministic(base), iap.Deterministic(exponent))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert (
base ** exponent - _eps(sample)
< sample <
base ** exponent + _eps(sample)
)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + base ** exponent
)
def test_exponent_is_stochastic_parameter(self):
param = iap.Power(iap.Deterministic(1.5),
(1.0, 2.0),
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.5 ** 1.0 - 2 * _eps(samples))
assert np.all(samples < 1.5 ** 2.0 + 2 * _eps(samples))
assert (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_exponent_is_stochastic_parameter_elementwise(self):
param = iap.Power(iap.Deterministic(1.5),
(1.0, 2.0),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.5 ** 1.0 - 2 * _eps(samples))
assert np.all(samples < 1.5 ** 2.0 + 2 * _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_value_is_uniform(self):
param = iap.Power(iap.Uniform(1.0, 2.0), 1.0, elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 ** 1.0 - 2 * _eps(samples))
assert np.all(samples < 2.0 ** 1.0 + 2 * _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_value_is_uniform_elementwise(self):
param = iap.Power(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 ** 1.0 - 2 * _eps(samples))
assert np.all(samples < 2.0 ** 1.0 + 2 * _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
class TestAbsolute(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Absolute(iap.Deterministic(0))
assert (
param.__str__()
== param.__repr__()
== "Absolute(Deterministic(int 0))"
)
def test_fixed_values(self):
simple_values = [-1.5, -1, -1.0, -0.1, 0, 0.0, 0.1, 1, 1.0, 1.5]
for value in simple_values:
with self.subTest(value=value):
param = iap.Absolute(iap.Deterministic(value))
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
if ia.is_single_float(value):
assert (
abs(value) - _eps(sample)
< sample <
abs(value) + _eps(sample)
)
assert np.all(abs(value) - _eps(samples) < samples)
assert np.all(samples < abs(value) + _eps(samples))
else:
assert sample == abs(value)
assert np.all(samples == abs(value))
def test_value_is_stochastic_parameter(self):
param = iap.Absolute(iap.Choice([-3, -1, 1, 3]))
sample = param.draw_sample()
samples = param.draw_samples((10, 10))
samples_uq = np.sort(np.unique(samples))
assert sample.shape == tuple()
assert sample in [3, 1]
assert samples.shape == (10, 10)
assert len(samples_uq) == 2
assert samples_uq[0] == 1 and samples_uq[1] == 3
class TestRandomSign(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.RandomSign(iap.Deterministic(0), 0.5)
assert (
param.__str__()
== param.__repr__()
== "RandomSign(Deterministic(int 0), 0.50)"
)
def test_value_is_deterministic(self):
param = iap.RandomSign(iap.Deterministic(1))
samples = param.draw_samples((1000,))
n_positive = np.sum(samples == 1)
n_negative = np.sum(samples == -1)
assert samples.shape == (1000,)
assert n_positive + n_negative == 1000
assert 350 < n_positive < 750
def test_value_is_deterministic_many_samples(self):
param = iap.RandomSign(iap.Deterministic(1))
seen = [0, 0]
for _ in sm.xrange(1000):
sample = param.draw_sample()
assert sample.shape == tuple()
if sample == 1:
seen[1] += 1
else:
seen[0] += 1
n_negative, n_positive = seen
assert n_positive + n_negative == 1000
assert 350 < n_positive < 750
def test_value_is_stochastic_parameter(self):
param = iap.RandomSign(iap.Choice([1, 2]))
samples = param.draw_samples((4000,))
seen = [0, 0, 0, 0]
seen[0] = np.sum(samples == -2)
seen[1] = np.sum(samples == -1)
seen[2] = np.sum(samples == 1)
seen[3] = np.sum(samples == 2)
assert np.sum(seen) == 4000
assert all([700 < v < 1300 for v in seen])
def test_samples_same_values_for_same_seeds(self):
param = iap.RandomSign(iap.Choice([1, 2]))
samples1 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.array_equal(samples1, samples2)
assert np.sum(samples1 == -2) > 50
assert np.sum(samples1 == -1) > 50
assert np.sum(samples1 == 1) > 50
assert np.sum(samples1 == 2) > 50
class TestForceSign(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.ForceSign(iap.Deterministic(0), True, "invert", 1)
assert (
param.__str__()
== param.__repr__()
== "ForceSign(Deterministic(int 0), True, invert, 1)"
)
def test_single_sample_positive(self):
param = iap.ForceSign(iap.Deterministic(1), positive=True,
mode="invert")
sample = param.draw_sample()
assert sample.shape == tuple()
assert sample == 1
def test_single_sample_negative(self):
param = iap.ForceSign(iap.Deterministic(1), positive=False,
mode="invert")
sample = param.draw_sample()
assert sample.shape == tuple()
assert sample == -1
def test_many_samples_positive(self):
param = iap.ForceSign(iap.Deterministic(1), positive=True,
mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == 1)
def test_many_samples_negative(self):
param = iap.ForceSign(iap.Deterministic(1), positive=False,
mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == -1)
def test_many_samples_negative_value_to_positive(self):
param = iap.ForceSign(iap.Deterministic(-1), positive=True,
mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == 1)
def test_many_samples_negative_value_to_negative(self):
param = iap.ForceSign(iap.Deterministic(-1), positive=False,
mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == -1)
def test_many_samples_stochastic_value_to_positive(self):
param = iap.ForceSign(iap.Choice([-2, 1]), positive=True,
mode="invert")
samples = param.draw_samples(1000)
n_twos = np.sum(samples == 2)
n_ones = np.sum(samples == 1)
assert samples.shape == (1000,)
assert n_twos + n_ones == 1000
assert 200 < n_twos < 700
assert 200 < n_ones < 700
def test_many_samples_stochastic_value_to_positive_reroll(self):
param = iap.ForceSign(iap.Choice([-2, 1]), positive=True,
mode="reroll")
samples = param.draw_samples(1000)
n_twos = np.sum(samples == 2)
n_ones = np.sum(samples == 1)
assert samples.shape == (1000,)
assert n_twos + n_ones == 1000
assert n_twos > 0
assert n_ones > 0
def test_many_samples_stochastic_value_to_positive_reroll_max_count(self):
param = iap.ForceSign(iap.Choice([-2, 1]), positive=True,
mode="reroll", reroll_count_max=100)
samples = param.draw_samples(100)
n_twos = np.sum(samples == 2)
n_ones = np.sum(samples == 1)
assert samples.shape == (100,)
assert n_twos + n_ones == 100
assert n_twos < 5
def test_samples_same_values_for_same_seeds(self):
param = iap.ForceSign(iap.Choice([-2, 1]),
positive=True,
mode="invert")
samples1 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.array_equal(samples1, samples2)
class TestPositive(unittest.TestCase):
def setUp(self):
reseed()
def test_many_samples_reroll(self):
param = iap.Positive(iap.Deterministic(-1),
mode="reroll",
reroll_count_max=1)
samples = param.draw_samples((100,))
assert samples.shape == (100,)
assert np.all(samples == 1)
class TestNegative(unittest.TestCase):
def setUp(self):
reseed()
def test_many_samples_reroll(self):
param = iap.Negative(iap.Deterministic(1),
mode="reroll",
reroll_count_max=1)
samples = param.draw_samples((100,))
assert samples.shape == (100,)
assert np.all(samples == -1)
class TestIterativeNoiseAggregator(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.IterativeNoiseAggregator(iap.Deterministic(0),
iterations=(1, 3),
aggregation_method="max")
assert (
param.__str__()
== param.__repr__()
== (
"IterativeNoiseAggregator("
"Deterministic(int 0), "
"DiscreteUniform(Deterministic(int 1), "
"Deterministic(int 3)"
"), "
"Deterministic(max)"
")"
)
)
def test_value_is_deterministic_max_1_iter(self):
param = iap.IterativeNoiseAggregator(iap.Deterministic(1),
iterations=1,
aggregation_method="max")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert sample == 1
assert np.all(samples == 1)
def test_value_is_stochastic_avg_200_iter(self):
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]),
iterations=200,
aggregation_method="avg")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert 25 - 10 < sample < 25 + 10
assert np.all(np.logical_and(25 - 10 < samples, samples < 25 + 10))
def test_value_is_stochastic_max_100_iter(self):
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]),
iterations=100,
aggregation_method="max")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert sample == 50
assert np.all(samples == 50)
def test_value_is_stochastic_min_100_iter(self):
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]),
iterations=100,
aggregation_method="min")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert sample == 0
assert np.all(samples == 0)
def test_value_is_stochastic_avg_or_max_100_iter_evaluate_counts(self):
seen = [0, 0, 0, 0]
for _ in sm.xrange(100):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]),
iterations=100,
aggregation_method=["avg", "max"])
samples = param.draw_samples((1, 1))
diff_0 = abs(0 - samples[0, 0])
diff_25 = abs(25 - samples[0, 0])
diff_50 = abs(50 - samples[0, 0])
if diff_25 < 10.0:
seen[0] += 1
elif diff_50 < _eps(samples):
seen[1] += 1
elif diff_0 < _eps(samples):
seen[2] += 1
else:
seen[3] += 1
assert seen[2] <= 2 # around 0.0
assert seen[3] <= 2 # 0.0+eps <= x < 15.0 or 35.0 < x < 50.0 or >50.0
assert 50 - 20 < seen[0] < 50 + 20
assert 50 - 20 < seen[1] < 50 + 20
def test_value_is_stochastic_avg_tuple_as_iter_evaluate_histograms(self):
# iterations as tuple
param = iap.IterativeNoiseAggregator(
iap.Uniform(-1.0, 1.0),
iterations=(1, 100),
aggregation_method="avg")
diffs = []
for _ in sm.xrange(100):
samples = param.draw_samples((1, 1))
diff = abs(samples[0, 0] - 0.0)
diffs.append(diff)
nb_bins = 3
hist, _ = np.histogram(diffs, bins=nb_bins, range=(-1.0, 1.0),
density=False)
assert hist[1] > hist[0]
assert hist[1] > hist[2]
def test_value_is_stochastic_max_list_as_iter_evaluate_counts(self):
# iterations as list
seen = [0, 0]
for _ in sm.xrange(400):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]),
iterations=[1, 100],
aggregation_method=["max"])
samples = param.draw_samples((1, 1))
diff_0 = abs(0 - samples[0, 0])
diff_50 = abs(50 - samples[0, 0])
if diff_50 < _eps(samples):
seen[0] += 1
elif diff_0 < _eps(samples):
seen[1] += 1
else:
assert False
assert 300 - 50 < seen[0] < 300 + 50
assert 100 - 50 < seen[1] < 100 + 50
def test_value_is_stochastic_all_100_iter(self):
# test ia.ALL as aggregation_method
# note that each method individually and list of methods are already
# tested, so no in depth test is needed here
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]), iterations=100, aggregation_method=ia.ALL)
assert isinstance(param.aggregation_method, iap.Choice)
assert len(param.aggregation_method.a) == 3
assert [v in param.aggregation_method.a for v in ["min", "avg", "max"]]
def test_value_is_stochastic_max_2_iter(self):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]), iterations=2, aggregation_method="max")
samples = param.draw_samples((2, 1000))
nb_0 = np.sum(samples == 0)
nb_50 = np.sum(samples == 50)
assert nb_0 + nb_50 == 2 * 1000
assert 0.25 - 0.05 < nb_0 / (2 * 1000) < 0.25 + 0.05
def test_samples_same_values_for_same_seeds(self):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]), iterations=5, aggregation_method="avg")
samples1 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.allclose(samples1, samples2)
def test_stochastic_param_as_aggregation_method(self):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]),
iterations=5,
aggregation_method=iap.Deterministic("max"))
assert isinstance(param.aggregation_method, iap.Deterministic)
assert param.aggregation_method.value == "max"
def test_bad_datatype_for_aggregation_method(self):
with self.assertRaises(Exception) as context:
_ = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]), iterations=5, aggregation_method=False)
self.assertTrue(
"Expected aggregation_method to be" in str(context.exception))
def test_bad_datatype_for_iterations(self):
with self.assertRaises(Exception) as context:
_ = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]),
iterations=False,
aggregation_method="max")
self.assertTrue("Expected iterations to be" in str(context.exception))
class TestSigmoid(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Sigmoid(
iap.Deterministic(0),
threshold=(-10, 10),
activated=True,
mul=1,
add=0)
assert (
param.__str__()
== param.__repr__()
== (
"Sigmoid("
"Deterministic(int 0), "
"Uniform("
"Deterministic(int -10), "
"Deterministic(int 10)"
"), "
"Deterministic(int 1), "
"1, "
"0)"
)
)
def test_activated_is_true(self):
param = iap.Sigmoid(
iap.Deterministic(5),
add=0,
mul=1,
threshold=0.5,
activated=True)
expected = 1 / (1 + np.exp(-(5 * 1 + 0 - 0.5)))
sample = param.draw_sample()
samples = param.draw_samples((5, 10))
assert sample.shape == tuple()
assert samples.shape == (5, 10)
assert expected - _eps(sample) < sample < expected + _eps(sample)
assert np.all(
np.logical_and(
expected - _eps(samples) < samples,
samples < expected + _eps(samples)
)
)
def test_activated_is_false(self):
param = iap.Sigmoid(
iap.Deterministic(5),
add=0,
mul=1,
threshold=0.5,
activated=False)
expected = 5
sample = param.draw_sample()
samples = param.draw_samples((5, 10))
assert sample.shape == tuple()
assert samples.shape == (5, 10)
assert expected - _eps(sample) < sample < expected + _eps(sample)
assert np.all(
np.logical_and(
expected - _eps(sample) < samples,
samples < expected + _eps(sample)
)
)
def test_activated_is_probabilistic(self):
param = iap.Sigmoid(
iap.Deterministic(5),
add=0,
mul=1,
threshold=0.5,
activated=0.5)
expected_first = 5
expected_second = 1 / (1 + np.exp(-(5 * 1 + 0 - 0.5)))
seen = [0, 0]
for _ in sm.xrange(1000):
sample = param.draw_sample()
diff_first = abs(sample - expected_first)
diff_second = abs(sample - expected_second)
if diff_first < _eps(sample):
seen[0] += 1
elif diff_second < _eps(sample):
seen[1] += 1
else:
assert False
assert 500 - 150 < seen[0] < 500 + 150
assert 500 - 150 < seen[1] < 500 + 150
def test_value_is_stochastic_param(self):
param = iap.Sigmoid(
iap.Choice([1, 10]),
add=0,
mul=1,
threshold=0.5,
activated=True)
expected_first = 1 / (1 + np.exp(-(1 * 1 + 0 - 0.5)))
expected_second = 1 / (1 + np.exp(-(10 * 1 + 0 - 0.5)))
seen = [0, 0]
for _ in sm.xrange(1000):
sample = param.draw_sample()
diff_first = abs(sample - expected_first)
diff_second = abs(sample - expected_second)
if diff_first < _eps(sample):
seen[0] += 1
elif diff_second < _eps(sample):
seen[1] += 1
else:
assert False
assert 500 - 150 < seen[0] < 500 + 150
assert 500 - 150 < seen[1] < 500 + 150
def test_mul_add_threshold_with_various_fixed_values(self):
muls = [0.1, 1, 10.3]
adds = [-5.7, -0.0734, 0, 0.0734, 5.7]
vals = [-1, -0.7, 0, 0.7, 1]
threshs = [-5.7, -0.0734, 0, 0.0734, 5.7]
for mul, add, val, thresh in itertools.product(muls, adds, vals,
threshs):
with self.subTest(mul=mul, add=add, val=val, threshold=thresh):
param = iap.Sigmoid(
iap.Deterministic(val),
add=add,
mul=mul,
threshold=thresh)
sample = param.draw_sample()
samples = param.draw_samples((2, 3))
dt = sample.dtype
val_ = np.array([val], dtype=dt)
mul_ = np.array([mul], dtype=dt)
add_ = np.array([add], dtype=dt)
thresh_ = np.array([thresh], dtype=dt)
expected = (
1 / (
1 + np.exp(
-(val_ * mul_ + add_ - thresh_)
)
)
)
assert sample.shape == tuple()
assert samples.shape == (2, 3)
assert (
expected - 5*_eps(sample)
< sample <
expected + 5*_eps(sample)
)
assert np.all(
np.logical_and(
expected - 5*_eps(sample) < samples,
samples < expected + 5*_eps(sample)
)
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Sigmoid(
iap.Choice([1, 10]),
add=0,
mul=1,
threshold=0.5,
activated=True)
samples1 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.array_equal(samples1, samples2)
| 34.712048
| 83
| 0.557204
| 17,098
| 144,055
| 4.486022
| 0.033747
| 0.028383
| 0.023467
| 0.029686
| 0.861881
| 0.827514
| 0.785156
| 0.762001
| 0.737882
| 0.709929
| 0
| 0.060091
| 0.330672
| 144,055
| 4,149
| 84
| 34.720415
| 0.735408
| 0.02152
| 0
| 0.656814
| 0
| 0
| 0.028436
| 0.007439
| 0
| 0
| 0
| 0.000241
| 0.238955
| 1
| 0.107654
| false
| 0
| 0.006223
| 0.001245
| 0.127256
| 0.000311
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
827ac159b8342adeb18a832d9a86cfcb0600fb29
| 62
|
py
|
Python
|
modules/vqvc/__init__.py
|
reppy4620/VCon
|
cac3441443cb9b28ffbaa0646ed1826d71cb16e0
|
[
"MIT"
] | 4
|
2021-05-22T03:14:44.000Z
|
2022-01-03T04:32:54.000Z
|
modules/vqvc/__init__.py
|
reppy4620/VCon
|
cac3441443cb9b28ffbaa0646ed1826d71cb16e0
|
[
"MIT"
] | null | null | null |
modules/vqvc/__init__.py
|
reppy4620/VCon
|
cac3441443cb9b28ffbaa0646ed1826d71cb16e0
|
[
"MIT"
] | null | null | null |
from .model import VQVCModel
from .pl_model import VQVCModule
| 20.666667
| 32
| 0.83871
| 9
| 62
| 5.666667
| 0.666667
| 0.431373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 62
| 2
| 33
| 31
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
827fdac046ac07902d8fa5e1aeb478e27e40e24c
| 11,538
|
py
|
Python
|
integration_tests/test_router.py
|
madfish-solutions/quipuswap-token2token-core
|
41fd4293029e2094a564141fb389fd9a1ef19185
|
[
"MIT"
] | null | null | null |
integration_tests/test_router.py
|
madfish-solutions/quipuswap-token2token-core
|
41fd4293029e2094a564141fb389fd9a1ef19185
|
[
"MIT"
] | null | null | null |
integration_tests/test_router.py
|
madfish-solutions/quipuswap-token2token-core
|
41fd4293029e2094a564141fb389fd9a1ef19185
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
import json
from helpers import *
from pytezos import ContractInterface, pytezos, MichelsonRuntimeError
from pytezos.context.mixin import ExecutionContext
token_a = "KT1AxaBxkFLCUi3f8rdDAAxBKHfzY8LfKDRA"
token_b = "KT1PgHxzUXruWG5XAahQzJAjkk4c2sPcM3Ca"
token_c = "KT1RJ6PbjHpwc3M5rw5s2Nbmefwbuwbdxton"
token_d = "KT1Wz32jY2WEwWq8ZaA2C6cYFHGchFYVVczC"
pair_ab = {
"token_a_type" : {
"fa2": {
"token_address": token_a,
"token_id": 0
}
},
"token_b_type": {
"fa2": {
"token_address": token_b,
"token_id": 1
}
},
}
pair_bc = {
"token_a_type": {
"fa2": {
"token_address": token_b,
"token_id": 1
}
},
"token_b_type" : {
"fa2": {
"token_address": token_c,
"token_id": 2
}
}
}
pair_ac = {
"token_a_type" : {
"fa2": {
"token_address": token_a,
"token_id": 0
}
},
"token_b_type" : {
"fa2": {
"token_address": token_c,
"token_id": 2
}
}
}
pair_cd = {
"token_a_type" : {
"fa2": {
"token_address": token_c,
"token_id": 2
}
},
"token_b_type" : {
"fa2": {
"token_address": token_d,
"token_id": 3
}
}
}
class TokenToTokenRouterTest(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
dex_code = open("./integration_tests/compiled/Dex.tz", 'r').read()
cls.dex = ContractInterface.from_michelson(dex_code)
initial_storage_michelson = json.load(open("./integration_tests/compiled/storage.json", 'r'))
cls.init_storage = cls.dex.storage.decode(initial_storage_michelson)
def test_tt_token_to_token_router(self):
amount_in=10_000
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 100_000, 300_000))
res = chain.execute(self.dex.addPair(pair_bc, 500_000, 700_000))
# interpret the call without applying it
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 1,
"operation": "a_to_b",
}
],
"amount_in" : amount_in,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
contract_in = next(v for v in transfers if v["destination"] == contract_self_address)
self.assertEqual(contract_in["token_address"], token_a)
self.assertEqual(contract_in["amount"], 10_000)
routed_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(routed_out["token_address"], token_c)
# same swap but one by one
res = chain.interpret(self.dex.swap(
swaps=[{
"pair_id": 0,
"operation": "a_to_b",
}],
amount_in=amount_in,
min_amount_out=1,
receiver=julian,
deadline=100_000
))
transfers = parse_token_transfers(res)
token_b_out = next(v for v in transfers if v["destination"] == julian)
res = chain.interpret(self.dex.swap(
swaps=[{
"pair_id": 1,
"operation": "a_to_b",
}],
amount_in=token_b_out["amount"],
min_amount_out=1,
receiver=julian,
deadline=100_000,
))
transfers = parse_token_transfers(res)
token_c_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(routed_out["amount"], token_c_out["amount"])
def test_tt_router_triangle(self):
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 100_000_000_000, 100_000_000_000))
res = chain.execute(self.dex.addPair(pair_bc, 100_000_000_000, 100_000_000_000))
res = chain.execute(self.dex.addPair(pair_ac, 100_000_000_000, 100_000_000_000))
# interpret the call without applying it
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 1,
"operation": "a_to_b",
},
{
"pair_id": 2,
"operation": "b_to_a",
}
],
"amount_in" : 10_000,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
token_c_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(token_c_out["amount"], 9909) # ~ 9910 by compound interest formula
def test_tt_router_ab_ba(self):
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 100_000_000_000, 100_000_000_000))
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 0,
"operation": "b_to_a",
}
],
"amount_in" : 10_000,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
token_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(token_out["amount"], 9939)
def test_tt_router_impossible_path(self):
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 1111, 3333))
res = chain.execute(self.dex.addPair(pair_cd, 5555, 7777))
# can't find path
with self.assertRaises(MichelsonRuntimeError):
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 1,
"operation": "a_to_b",
}
],
"amount_in" : 334,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
with self.assertRaises(MichelsonRuntimeError):
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 0,
"operation": "a_to_b",
}
],
"amount_in" : 334,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
def test_tt_router_cant_overbuy(self):
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 100_000, 100_000))
res = chain.execute(self.dex.addPair(pair_bc, 10_000, 10_000))
res = chain.execute(self.dex.addPair(pair_ac, 1_000_000, 1_000_000))
# overbuy at the very beginning
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
}
],
"amount_in" : 100_000_000_000,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
token_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(token_out["amount"], 99_999)
# overbuy at the end
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 1,
"operation": "a_to_b",
}
],
"amount_in" : 100_000_000,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
token_out = next(v for v in transfers if v["destination"] == julian)
self.assertLess(token_out["amount"], 9_999)
# overbuy in the middle
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 1,
"operation": "a_to_b",
},
{
"pair_id": 2,
"operation": "b_to_a",
}
],
"amount_in" : 10_000_000_000,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
token_out = next(v for v in transfers if v["destination"] == julian)
self.assertLess(token_out["amount"], 9_999)
def test_tt_router_mixed_fa2_fa12(self):
pair_ab = {
"token_a_type" : {
"fa12": token_b,
},
"token_b_type": {
"fa2": {
"token_address": token_a,
"token_id": 1
}
},
}
pair_bc = {
"token_a_type" : {
"fa12": token_b,
},
"token_b_type" : {
"fa2": {
"token_address": token_c,
"token_id": 2
}
}
}
amount_in=10_000
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 100_000, 300_000))
res = chain.execute(self.dex.addPair(pair_bc, 500_000, 700_000))
# interpret the call without applying it
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "b_to_a",
},
{
"pair_id": 1,
"operation": "a_to_b",
}
],
"amount_in" : amount_in,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
contract_in = next(v for v in transfers if v["destination"] == contract_self_address)
self.assertEqual(contract_in["token_address"], token_a)
self.assertEqual(contract_in["amount"], 10_000)
routed_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(routed_out["token_address"], token_c)
| 30.68617
| 101
| 0.477639
| 1,172
| 11,538
| 4.403584
| 0.116041
| 0.029064
| 0.039527
| 0.042821
| 0.804108
| 0.801783
| 0.801783
| 0.787638
| 0.778338
| 0.735323
| 0
| 0.06408
| 0.410296
| 11,538
| 375
| 102
| 30.768
| 0.694444
| 0.022881
| 0
| 0.654321
| 0
| 0
| 0.151367
| 0.019531
| 0
| 0
| 0
| 0
| 0.04321
| 1
| 0.021605
| false
| 0
| 0.015432
| 0
| 0.040123
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7d57cb53958a854e64b6d878a9826f34dbca7a63
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/pip/_internal/operations/install/editable_legacy.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/pip/_internal/operations/install/editable_legacy.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/pip/_internal/operations/install/editable_legacy.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/6e/30/4e/6df13ab33dd498623bcb8f860a029ad969938275a514553b6fe8b4b10b
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4375
| 0
| 96
| 1
| 96
| 96
| 0.458333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7d58f75c60cd92e49b8842d06b9c5d9c9a1f2ca8
| 91
|
py
|
Python
|
skfda/exploratory/__init__.py
|
jiduque/scikit-fda
|
5ea71e78854801b259aa3a01eb6b154aa63bf54b
|
[
"BSD-3-Clause"
] | 147
|
2019-05-10T20:46:42.000Z
|
2022-03-25T17:23:19.000Z
|
skfda/exploratory/__init__.py
|
jiduque/scikit-fda
|
5ea71e78854801b259aa3a01eb6b154aa63bf54b
|
[
"BSD-3-Clause"
] | 306
|
2019-04-26T08:56:05.000Z
|
2022-03-30T11:12:48.000Z
|
skfda/exploratory/__init__.py
|
jiduque/scikit-fda
|
5ea71e78854801b259aa3a01eb6b154aa63bf54b
|
[
"BSD-3-Clause"
] | 38
|
2019-09-03T17:24:04.000Z
|
2022-01-06T05:09:18.000Z
|
from . import depth
from . import outliers
from . import stats
from . import visualization
| 18.2
| 27
| 0.78022
| 12
| 91
| 5.916667
| 0.5
| 0.56338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175824
| 91
| 4
| 28
| 22.75
| 0.946667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7d6a9fc0ae2c18fcc1e9420cc0d5c546fe26cbe4
| 1,267
|
py
|
Python
|
Home_Work_2_B_Naychuk_Anastasiya/Task1.py
|
NaychukAnastasiya/goiteens-python3-naychuk
|
a79d0af238a15f58a822bb5d8e4d48227d4a7bc1
|
[
"MIT"
] | null | null | null |
Home_Work_2_B_Naychuk_Anastasiya/Task1.py
|
NaychukAnastasiya/goiteens-python3-naychuk
|
a79d0af238a15f58a822bb5d8e4d48227d4a7bc1
|
[
"MIT"
] | null | null | null |
Home_Work_2_B_Naychuk_Anastasiya/Task1.py
|
NaychukAnastasiya/goiteens-python3-naychuk
|
a79d0af238a15f58a822bb5d8e4d48227d4a7bc1
|
[
"MIT"
] | null | null | null |
# Яке з 3 чисел найбільш наближене до середнього
print("Введіть перше число")
var1 = float(input())
print("Введіть друге число")
var2 = float(input())
print("Введіть третє число")
var3 = float(input())
# Avg = (var1+var2+var3)/3 # Варіант розв'язку з порівнянням чисел із середнім арифметичним:
if ((var1 > var2) and (var1 < var3)) or (var1 < var2) and (var1 > var3):
print ("Найбільш наближеним числом до середнього є ",var1)
elif ((var2 > var1) and (var2 < var3)) or ((var2 < var1) and (var12 > var3)):
print ("Найбільш наближеним числом до середнього є ",var2)
else:
print ("Найбільш наближеним числом до середнього є ",var3)
# # Варіант розв'язку з порівнянням чисел із середнім арифметичним:
# if (abs(var1-Avg))>(abs(var2-Avg)):
# if (abs(var2-Avg))>(abs(var3-Avg)):
# print ("Найбільш наближеним числом до середнього є ",var3)
# else: #(abs(var2-Avg))<(abs(var3-Avg))
# print ("Найбільш наближеним числом до середнього є ",var2)
# else: #(abs(var1-Avg))<(abs(var2-Avg))
# if (abs(var1-Avg))>(abs(var3-Avg)):
# print ("Найбільш наближеним числом до середнього є ",var3)
# else: #(abs(var1-Avg))<(abs(var3-Avg))
# print ("Найбільш наближеним числом до середнього є ",var1)
| 45.25
| 93
| 0.648777
| 175
| 1,267
| 4.697143
| 0.211429
| 0.116788
| 0.195864
| 0.246959
| 0.749392
| 0.708029
| 0.708029
| 0.708029
| 0.527981
| 0.457421
| 0
| 0.043265
| 0.197317
| 1,267
| 28
| 94
| 45.25
| 0.764995
| 0.558011
| 0
| 0
| 0
| 0
| 0.361165
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
7d72c0bcd96eb18d89e4b84f9f4aa4228039c607
| 102
|
py
|
Python
|
urlmiddleware/base.py
|
dbramwell/django-urlmiddleware
|
8f7f4a571730805cdd04f321548c8d1dc7751ec7
|
[
"MIT"
] | 4
|
2015-04-10T10:41:18.000Z
|
2016-06-16T01:19:15.000Z
|
urlmiddleware/base.py
|
dbramwell/django-urlmiddleware
|
8f7f4a571730805cdd04f321548c8d1dc7751ec7
|
[
"MIT"
] | 2
|
2015-12-18T12:24:05.000Z
|
2015-12-18T17:00:27.000Z
|
urlmiddleware/base.py
|
dbramwell/django-urlmiddleware
|
8f7f4a571730805cdd04f321548c8d1dc7751ec7
|
[
"MIT"
] | 7
|
2015-11-17T17:53:37.000Z
|
2016-03-29T06:21:17.000Z
|
from django.core.urlresolvers import Resolver404
class MiddlewareResolver404(Resolver404):
pass
| 17
| 48
| 0.823529
| 10
| 102
| 8.4
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101124
| 0.127451
| 102
| 5
| 49
| 20.4
| 0.842697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
7db8db74363fb05b1c46621fca683280e13e4190
| 67
|
py
|
Python
|
Solutions/Python/Posix command(7 kyu).py
|
collenirwin/Codewars-Solutions
|
14bad3878d3fc37c7e73cbaaaa24cd28f759ce3b
|
[
"MIT"
] | null | null | null |
Solutions/Python/Posix command(7 kyu).py
|
collenirwin/Codewars-Solutions
|
14bad3878d3fc37c7e73cbaaaa24cd28f759ce3b
|
[
"MIT"
] | null | null | null |
Solutions/Python/Posix command(7 kyu).py
|
collenirwin/Codewars-Solutions
|
14bad3878d3fc37c7e73cbaaaa24cd28f759ce3b
|
[
"MIT"
] | null | null | null |
from os import popen
def get_output(s):
return popen(s).read()
| 16.75
| 26
| 0.701493
| 12
| 67
| 3.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179104
| 67
| 4
| 26
| 16.75
| 0.836364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
8196db5a9a3e9b1ef0fc71ca07363d90aa3c3237
| 4,386
|
py
|
Python
|
aindex/demo.py
|
ad3002/Lyrebird
|
8c0a186e32d61189f073401152c52a89bfed46ed
|
[
"MIT"
] | null | null | null |
aindex/demo.py
|
ad3002/Lyrebird
|
8c0a186e32d61189f073401152c52a89bfed46ed
|
[
"MIT"
] | null | null | null |
aindex/demo.py
|
ad3002/Lyrebird
|
8c0a186e32d61189f073401152c52a89bfed46ed
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#@created: 07.01.2018
#@author: Aleksey Komissarov
#@contact: [email protected]
from aindex import *
settings = {
"index_prefix": "tests/kmers.23",
"aindex_prefix": "tests/kmers.23",
"reads_file": "tests/reads.reads",
}
index = load_aindex(settings)
k = 23
sequence = "TAAGTTATTATTTAGTTAATACTTTTAACAATATTATTAAGGTATTTAAAAAATACTATTATAGTATTTAACATAGTTAAATACCTTCCTTAATACTGTTAAATTATATTCAATCAATACATATATAATATTATTAAAATACTTGATAAGTATTATTTAGATATTAGACAAATACTAATTTTATATTGCTTTAATACTTAATAAATACTACTTATGTATTAAGTAAATATTACTGTAATACTAATAACAATATTATTACAATATGCTAGAATAATATTGCTAGTATCAATAATTACTAATATAGTATTAGGAAAATACCATAATAATATTTCTACATAATACTAAGTTAATACTATGTGTAGAATAATAAATAATCAGATTAAAAAAATTTTATTTATCTGAAACATATTTAATCAATTGAACTGATTATTTTCAGCAGTAATAATTACATATGTACATAGTACATATGTAAAATATCATTAATTTCTGTTATATATAATAGTATCTATTTTAGAGAGTATTAATTATTACTATAATTAAGCATTTATGCTTAATTATAAGCTTTTTATGAACAAAATTATAGACATTTTAGTTCTTATAATAAATAATAGATATTAAAGAAAATAAAAAAATAGAAATAAATATCATAACCCTTGATAACCCAGAAATTAATACTTAATCAAAAATGAAAATATTAATTAATAAAAGTGAATTGAATAAAATTTTGAAAAAAATGAATAACGTTATTATTTCCAATAACAAAATAAAACCACATCATTCATATTTTTTAATAGAGGCAAAAGAAAAAGAAATAAACTTTTATGCTAACAATGAATACTTTTCTGTCAAATGTAATTTAAATAAAAATATTGATATTCTTGAACAAGGCTCCTTAATTGTTAAAGGAAAAATTTTTAACGATCTTATTAATGGCATAAAAGAAGAGATTATTACTATTCAAGAAAAAGATCAAACACTTTTGGTTAAAACAAAAAAAACAAGTATTAATTTAAACACAATTAATGTGAATGAATTTCCAAGAATAAGGTTTAATGAAAAAAACGATTTAAGTGAATTTAATCAATTCAAAATAAATTATTCACTTTTAGTAAAAGGCATTAAAAAAATTTTTCACTCAGTTTCAAATAATCGTGAAATATCTTCTAAATTTAATGGAGTAAATTTCAATGGATCCAATGGAAAAGAAATATTTTTAGAAGCTTCTGACACTTATAAACTATCTGTTTTTGAGATAAAGCAAGAAACAGAACCATTTGATTTCATTTTGGAGAGTAATTTACTTAGTTTCATTAATTCTTTTAATCCTGAAGAAGATAAATCTATTGTTTTTTATTACAGAAAAGATAATAAAGATAGCTTTAGTACAGAAATGTTGATTTCAATGGATAACTTTATGATTAGTTACACATCGGTTAATGAAAAATTTCCAGAGGTAAACTACTTTTTTGAATTTGAACCTGAAACTAAAATAGTTGTTCAAAAAAATGAATTAAAAGATGCACTTCAAAGAATTCAAACTTTGGCTCAAAATGAAAGAACTTTTTTATGCGATATGCAAATTAACAGTTCTGAATTAAAAATAAGAGCTATTGTTAATAATATCGGAAATTCTCTTGAGGAAATTTCTTGTCTTAAATTTGAAGGTTATAAACTTAATATTTCTTTTAACCCAAGTTCTCTATTAGATCACATAGAGTCTTTTGAATCAAATGAAATAAATTTTGATTTCCAAGGAAATAGTAAGTATTTTTTGATAACCTCTAAAAGTGAACCTGAACTTAAGCAAATATTGGTTCCTTCAAGATAATGAATCTTTACGATCTTTTAGAACTACCAACTACAGCATCAATAAAAGAAATAAAAATTGCTTATAAAAGATTAGCAAAGCGTTATCACCCTGATGTAAATAAATTAGGTTCGCAAACTTTTGTTGAAATTAATAATGCTTATTCAATATTAAGTGATCCTAACCAAAAGGAAAAATATGATTCAATGCTGAAAGTTAATGATTTTCAAAATCGCATCAAAAATTTAGATATTAGTGTTAGATGACATGAAAATTTCATGGAAGAACTCGAACTTCGTAAGAACTGAGAATTTGATTTTTTTTCATCTGATGAAGATTTCTTTTATTCTCCATTTACAAAAA"
test_kmer = "TAAGTTATTATTTAGTTAATACT"
right_kmer = "AGTTAATACTTTTAACAATATTA"
print("Task 1. Get kmer frequency")
# raw_input("\nReady?")
for i in range(len(sequence)-k+1):
kmer = sequence[i:i+k]
print("Position %s kmer %s freq = %s" % (i, kmer, index[kmer]))
print("Task 2. Iter read by read, print the first 20 reads")
# raw_input("\nReady?")
for i, read in enumerate(index.iter_reads()):
if i == 20:
break
print(i, read)
print("Task 3. Iter reads by kmer, returs (start, next_read_start, read, pos_if_uniq|None, all_poses)")
# raw_input("\nReady?")
for read in iter_reads_by_kmer(test_kmer, index):
print(read)
print("Task 4. Get distances in reads for two kmers, returns a list of (rid, left_kmer_pos, right_kmer_pos) tuples.")
# raw_input("\nReady?")
print(get_left_right_distances(test_kmer, right_kmer, index))
print("Task 5. Get layout for kmer, returns (max_pos, reads, lefts, rights, rids, starts), for details see source code")
# raw_input("\nReady?")
max_pos, reads, lefts, rights, rids, starts = get_layout_for_kmer(right_kmer, index)
print("Central layout:")
for read in reads:
print(read)
print("Left flanks:")
print(lefts)
print("Right flanks:")
print(rights)
print("Task 6. Iter reads by sequence, returтs (start, next_read_start, read, pos_if_uniq|None, all_poses)")
# raw_input("\nReady?")
sequence = "AATATTATTAAGGTATTTAAAAAATACTATTATAGTATTTAACATA"
for read in iter_reads_by_sequence(sequence, index):
print(read)
print("Task 7. Iter reads by kmer with reads as SE, returns (start, next_read_start, subread, kmere_pos, -1|0|1 for spring_pos, was_reversed, poses_in_read)")
# raw_input("\nReady?")
user_reads = set()
sequence = "AATATTATTAAGGTATTTAAAAAATACTATTATAGTATTTAACATA"
for rid, nextrid, read, pos, spring_pos, was_reversed, poses in get_reads_se_by_kmer(kmer, index, user_reads, k=23):
print(rid, read, pos)
| 58.48
| 2,183
| 0.858413
| 338
| 4,386
| 10.95858
| 0.331361
| 0.017009
| 0.026458
| 0.013769
| 0.107991
| 0.073434
| 0.048056
| 0.030778
| 0.030778
| 0.030778
| 0
| 0.00883
| 0.070451
| 4,386
| 74
| 2,184
| 59.27027
| 0.899681
| 0.061332
| 0
| 0.116279
| 0
| 0.116279
| 0.754694
| 0.56279
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.023256
| 0
| 0.023256
| 0.44186
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
819a475b581f4721e5c8b8ee781500a5749d808c
| 8,054
|
py
|
Python
|
transformation_fnc.py
|
usrmaia/transformation-fnc
|
37ef77708892417ac985bb6f1cf62285834560d8
|
[
"MIT"
] | null | null | null |
transformation_fnc.py
|
usrmaia/transformation-fnc
|
37ef77708892417ac985bb6f1cf62285834560d8
|
[
"MIT"
] | null | null | null |
transformation_fnc.py
|
usrmaia/transformation-fnc
|
37ef77708892417ac985bb6f1cf62285834560d8
|
[
"MIT"
] | null | null | null |
from useful import *
from os import system
def remove_implication(formula):
while ">" in formula:
operator = formula.find(">")
print(formula, operator)
subform_left = get_subform_left(formula, operator)
subform_right = get_subform_right(formula, operator)
formula = get_remove_implication(formula, subform_left, subform_right, operator)
return formula
def get_remove_implication(formula, subform_left, subform_right, operator):
# ...(A>B)... |-> ...(-A#B)...
no_modification_right = formula[operator + len(subform_right) + 1:]
no_modification_left = formula[:operator - len(subform_left)]
return f"{no_modification_left}-{subform_left}#{subform_right}{no_modification_right}"
def morgan_law(formula):
while "-(" in formula:
index = formula.find("-(")
print(formula, index)
operator = get_operator(formula, index + 1)
subform_left = get_subform_left(formula, operator)
subform_right = get_subform_right(formula, operator)
formula = get_morgan_law(formula, subform_left, subform_right, operator)
return formula
def get_morgan_law(formula, subform_left, subform_right, operator):
# ...-(A&B)... |-> ...(-A#-B)...
# ...-(A#B)... |-> ...(-A&-B)...
match formula[operator]:
case "#":
new_operator = "&"
case "&":
new_operator = "#"
no_modification_right = formula[operator + len(subform_right) + 1:]
no_modification_left = formula[:operator - len(subform_left) - 1 - 1]
return f"{no_modification_left}(-{subform_left}{new_operator}-{subform_right}{no_modification_right}"
def remove_double_negation(formula):
# --A |-> A
formula = formula.replace("--", "")
return formula
def distributivity(formula):
index = 0
while index < len(formula):
# Existir "#(" ou ")#" é apenas a primeira condição para se aplicar a distributividade
# A segunda condição é existir "#(A&B)" ou "(A&B)#"
if "#(" in formula[index:index + 2]: # "#("
operator_and = get_operator(formula, index + 1)
if formula[operator_and] == "&": # "#(A&B)"
print(formula, index, operator_and)
formula, index = get_distributivity_lr(formula, index, operator_and)
if ")#" in formula[index:index + 2]: # "(#"
len_subform_left = len(get_subform_left(formula, index + 1))
operator_and = get_operator(formula, index + 1 - len_subform_left)
if formula[operator_and] == "&": # "(A&B)#"
print(formula, index + 1, operator_and)
formula, index = get_distributivity_rl(formula, index + 1, operator_and)
index += 1
return formula
def get_distributivity_lr(formula, operator_or, operator_and):
# ...(A#(B&C))... |-> ...((A#B)&(A#C))...
# Parenteses externo da fórmula
subform_left = get_subform_left(formula, operator_or)
no_modification_left = formula[:operator_or - len(subform_left)]
subform_right = get_subform_right(formula, operator_or)
no_modification_right = formula[operator_or + len(subform_right) + 1:]
# Parenteses interno da fórmula
subform_middle = get_subform_left(formula, operator_and)
subform_right = get_subform_right(formula, operator_and)
return f"{no_modification_left}({subform_left}#{subform_middle})&({subform_left}#{subform_right}){no_modification_right}", 0
def get_distributivity_rl(formula, operator_or, operator_and):
# ...((A&B)#C)... |-> ...((A#C)&(B#C))...
# Parenteses externo da fórmula
subform_left = get_subform_left(formula, operator_or)
no_modification_left = formula[:operator_or - len(subform_left)]
subform_right = get_subform_right(formula, operator_or)
no_modification_right = formula[operator_or + len(subform_right) + 1:]
# Parenteses interno da fórmula
subform_left = get_subform_left(formula, operator_and)
subform_middle = get_subform_right(formula, operator_and)
return f"{no_modification_left}({subform_left}#{subform_right})&({subform_middle}#{subform_right}){no_modification_right}", 0
def distributivity_new_aton(formula):
index = 0
while index < len(formula):
# Existir "#(" ou ")#" é apenas a primeira condição para se aplicar a distributividade
# A segunda condição é existir "#(A&B)" ou "(A&B)#"
if "#(" in formula[index:index + 2]: # "#("
operator_and = get_operator(formula, index + 1)
if formula[operator_and] == "&": # "#(A&B)"
print(formula, index, operator_and)
formula, index = get_distributivity_new_atom_lr(formula, index, operator_and)
if ")#" in formula[index:index + 2]: # "(#"
len_subform_left = len(get_subform_left(formula, index + 1))
operator_and = get_operator(formula, index + 1 - len_subform_left)
if formula[operator_and] == "&": # "(A&B)#"
print(formula, index + 1, operator_and)
formula, index = get_distributivity_new_atom_rl(formula, index + 1, operator_and)
index += 1
return formula
def get_distributivity_new_atom_lr(formula, operator_or, operator_and):
# ...(A#(B&C))... |-> ...(((A#p)&((¬p#B)&(¬p#C)))&((¬B#¬C)#p))...
# Parenteses externo da fórmula
subform_left = get_subform_left(formula, operator_or)
no_modification_left = formula[:operator_or - len(subform_left)]
subform_right = get_subform_right(formula, operator_or)
no_modification_right = formula[operator_or + len(subform_right) + 1:]
# Parenteses interno da fórmula
subform_middle = get_subform_left(formula, operator_and)
subform_right = get_subform_right(formula, operator_and)
new_operator = get_unprecedented(formula)
return f"{no_modification_left}(({subform_left}#{new_operator})&((¬{new_operator}#{subform_middle})&(¬{new_operator}#{subform_right})))&((¬{subform_middle}#¬{subform_right})#{new_operator}){no_modification_right}", 0
#return f"{no_modification_left}({subform_left}#{new_operator})&(¬{new_operator}#{subform_middle})&(¬{new_operator}#{subform_right})&(¬{subform_middle}#¬{subform_right}#{new_operator}){no_modification_right}", 0
def get_distributivity_new_atom_rl(formula, operator_or, operator_and):
# ...((A&B)#C)... |-> ...(((C#p)&((¬p#A)&(¬p#B)))&((¬A#¬B)#p))...
# Parenteses externo da fórmula
subform_left = get_subform_left(formula, operator_or)
no_modification_left = formula[:operator_or - len(subform_left)]
subform_right = get_subform_right(formula, operator_or)
no_modification_right = formula[operator_or + len(subform_right) + 1:]
# Parenteses interno da fórmula
subform_left = get_subform_left(formula, operator_and)
subform_middle = get_subform_right(formula, operator_and)
new_operator = get_unprecedented(formula)
return f"{no_modification_left}(({subform_right}#{new_operator})&((¬{new_operator}#{subform_left})&(¬{new_operator}#{subform_middle})))&((¬{subform_left}#¬{subform_middle})#{new_operator}){no_modification_right}", 0
#return f"{no_modification_left}({subform_right}#{new_operator})&(¬{new_operator}#{subform_left})&(¬{new_operator}#{subform_middle})&(¬{subform_left}#¬{subform_middle}#{new_operator}){no_modification_right}", 0
if __name__ == "__main__":
system("cls")
#system("clear")
while(True):
formula = input("Fórmula: ")
if formula == 'q': break
print(formula)
print("Removendo implicações: ")
A1 = remove_implication(formula)
print(A1)
print("Aplicando Lei de Morgan: ")
A2 = morgan_law(A1)
print(A2)
print("Removendo dupla negação: ")
A3 = remove_double_negation(A2)
print(A3)
print("Aplicando distributividade: ")
A4 = distributivity(A3)
print(A4)
print("Aplicando distributividade com novo átomo: ")
A5 = distributivity_new_aton(A3)
print(A5)
system("pause")
| 47.099415
| 220
| 0.661286
| 1,006
| 8,054
| 5.011928
| 0.089463
| 0.098175
| 0.067434
| 0.04998
| 0.849663
| 0.842721
| 0.831813
| 0.804641
| 0.801864
| 0.758429
| 0
| 0.007252
| 0.195307
| 8,054
| 171
| 221
| 47.099415
| 0.767011
| 0.157561
| 0
| 0.495935
| 0
| 0.01626
| 0.147458
| 0.118533
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089431
| false
| 0
| 0.01626
| 0
| 0.195122
| 0.138211
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c4b5547f1e3ecbc952e52b926351b009c451edf6
| 22
|
py
|
Python
|
celestial/client/system/__init__.py
|
ams-tech/celestial
|
0c4c264563fe79d6838a1c40a1d114c1d6fcf23f
|
[
"MIT"
] | null | null | null |
celestial/client/system/__init__.py
|
ams-tech/celestial
|
0c4c264563fe79d6838a1c40a1d114c1d6fcf23f
|
[
"MIT"
] | null | null | null |
celestial/client/system/__init__.py
|
ams-tech/celestial
|
0c4c264563fe79d6838a1c40a1d114c1d6fcf23f
|
[
"MIT"
] | null | null | null |
from . import cmdline
| 11
| 21
| 0.772727
| 3
| 22
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c4fbbf35cb97942fd780038b58bdfd3ad398e637
| 248
|
py
|
Python
|
w1data/metadata.py
|
swork/w1-datalogger
|
26191d57ff1c05e5c6e9de90870c5c63916f9a8c
|
[
"MIT"
] | null | null | null |
w1data/metadata.py
|
swork/w1-datalogger
|
26191d57ff1c05e5c6e9de90870c5c63916f9a8c
|
[
"MIT"
] | null | null | null |
w1data/metadata.py
|
swork/w1-datalogger
|
26191d57ff1c05e5c6e9de90870c5c63916f9a8c
|
[
"MIT"
] | null | null | null |
import logging, sys
logger = logging.getLogger(__name__)
def measurement_for_skey(sensor_key, metadata):
# logger.debug("sensor_key:{} metadata:{}".format(sensor_key, metadata))
return metadata['collector']['sensors'][sensor_key]['name']
| 31
| 76
| 0.741935
| 30
| 248
| 5.8
| 0.6
| 0.206897
| 0.293103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.100806
| 248
| 7
| 77
| 35.428571
| 0.780269
| 0.282258
| 0
| 0
| 0
| 0
| 0.114286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
482318efaad6f890a578bab42ca3ad7a7b532213
| 27
|
py
|
Python
|
src/euler_python_package/euler_python/medium/p207.py
|
wilsonify/euler
|
5214b776175e6d76a7c6d8915d0e062d189d9b79
|
[
"MIT"
] | null | null | null |
src/euler_python_package/euler_python/medium/p207.py
|
wilsonify/euler
|
5214b776175e6d76a7c6d8915d0e062d189d9b79
|
[
"MIT"
] | null | null | null |
src/euler_python_package/euler_python/medium/p207.py
|
wilsonify/euler
|
5214b776175e6d76a7c6d8915d0e062d189d9b79
|
[
"MIT"
] | null | null | null |
def problem207():
pass
| 9
| 17
| 0.62963
| 3
| 27
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 0.259259
| 27
| 2
| 18
| 13.5
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
48246329c18e90c00165cc92ef48bb7d9a328558
| 5,200
|
py
|
Python
|
tests/unit_tests/prepare_email/test_mail_segmenting.py
|
farthur/melusine
|
121fbb17da221b12186a275d5843b466ce65d954
|
[
"Apache-2.0"
] | null | null | null |
tests/unit_tests/prepare_email/test_mail_segmenting.py
|
farthur/melusine
|
121fbb17da221b12186a275d5843b466ce65d954
|
[
"Apache-2.0"
] | null | null | null |
tests/unit_tests/prepare_email/test_mail_segmenting.py
|
farthur/melusine
|
121fbb17da221b12186a275d5843b466ce65d954
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
from melusine.prepare_email.mail_segmenting import structure_email, tag_signature
structured_historic = [
{
"text": " \n \n \n Bonjours, \n \n Suite a notre conversation \
téléphonique de Mardi , pourriez vous me dire la \n somme que je vous \
dois afin d'd'être en régularisation . \n \n Merci bonne journée",
"meta": "",
},
{
"text": " \n Bonjour. \n \n Merci de bien vouloir prendre connaissance \
du document ci-joint : \n 1 - Relevé d'identité postal MUTUELLE \
(contrats) \n \n Sentiments mutualistes. \n \n La Mutuelle \n \n \
La visualisation des fichiers PDF nécessite Adobe Reader. \n ",
"meta": " \n \n Le mar. 22 mai 2018 à 10:20, \
<[email protected]> a écrit\xa0:",
},
]
output = [
{
"meta": {"date": None, "from": None, "to": None},
"structured_text": {
"header": None,
"text": [
{"part": " Bonjours, ", "tags": "HELLO"},
{
"part": " Suite a notre conversation \
téléphonique de Mardi , pourriez vous me dire la somme que je vous dois \
afin d'd'être en régularisation . \n \n ",
"tags": "BODY",
},
{"part": "Merci bonne journée", "tags": "GREETINGS"},
],
},
},
{
"meta": {
"date": " mar. 22 mai 2018 à 10:20",
"from": " <[email protected]> ",
"to": None,
},
"structured_text": {
"header": None,
"text": [
{"part": " Bonjour. \n \n ", "tags": "HELLO"},
{
"part": "Merci de bien vouloir prendre \
connaissance du document ci-joint : 1 - Relevé d'identité postal MUTUELLE \
(contrats) ",
"tags": "BODY",
},
{"part": " Sentiments mutualistes. ", "tags": "GREETINGS"},
{"part": " La Mutuelle ", "tags": "BODY"},
{
"part": " La visualisation des fichiers \
PDF nécessite Adobe Reader. \n",
"tags": "FOOTER",
},
],
},
},
]
def test_structure_email():
input_df = pd.DataFrame({"structured_historic": [structured_historic]})
output_df = pd.Series([output])
result = input_df.apply(structure_email, axis=1)
pd.testing.assert_series_equal(result, output_df)
structured_historic_signature = [
{
"text": " \n \n \n Bonjours, \n \n Suite a notre conversation \
téléphonique de Mardi , pourriez vous me dire la \n somme que je vous \
dois afin d'd'être en régularisation . \n \n Merci bonne journée\nJean Dupont",
"meta": "",
},
{
"text": " \n Bonjour. \n \n Merci de bien vouloir prendre connaissance \
du document ci-joint : \n 1 - Relevé d'identité postal MUTUELLE \
(contrats) \n \n Sentiments mutualistes. \n \n La Mutuelle \n \n \
La visualisation des fichiers PDF nécessite Adobe Reader. \n ",
"meta": " \n \n Le mar. 22 mai 2018 à 10:20, \
<[email protected]> a écrit\xa0:",
},
]
output_signature = [
{
"meta": {"date": None, "from": None, "to": None},
"structured_text": {
"header": None,
"text": [
{"part": " Bonjours, ", "tags": "HELLO"},
{
"part": " Suite a notre conversation \
téléphonique de Mardi , pourriez vous me dire la somme que je vous dois \
afin d'd'être en régularisation . \n \n ",
"tags": "BODY",
},
{"part": "Merci bonne journée", "tags": "GREETINGS"},
{"part": "Jean Dupont", "tags": "SIGNATURE"},
],
},
},
{
"meta": {
"date": " mar. 22 mai 2018 à 10:20",
"from": " <[email protected]> ",
"to": None,
},
"structured_text": {
"header": None,
"text": [
{"part": " Bonjour. \n \n ", "tags": "HELLO"},
{
"part": "Merci de bien vouloir prendre \
connaissance du document ci-joint : 1 - Relevé d'identité postal MUTUELLE \
(contrats) ",
"tags": "BODY",
},
{"part": " Sentiments mutualistes. ", "tags": "GREETINGS"},
{"part": " La Mutuelle ", "tags": "BODY"},
{
"part": " La visualisation des fichiers PDF nécessite Adobe Reader. \n",
"tags": "FOOTER",
},
],
},
},
]
def test_tag_signature():
input_df = pd.DataFrame({"structured_historic": [structured_historic_signature]})
output_df = pd.Series([output_signature])
input_df["structured_body"] = input_df.apply(structure_email, axis=1)
result = input_df.apply(tag_signature, axis=1)
pd.testing.assert_series_equal(result, output_df)
| 35.616438
| 93
| 0.497308
| 537
| 5,200
| 4.741155
| 0.195531
| 0.017282
| 0.02828
| 0.036135
| 0.890416
| 0.873134
| 0.873134
| 0.850746
| 0.808327
| 0.808327
| 0
| 0.014889
| 0.367115
| 5,200
| 145
| 94
| 35.862069
| 0.758736
| 0
| 0
| 0.556391
| 0
| 0.015038
| 0.170524
| 0.012265
| 0
| 0
| 0
| 0
| 0.015038
| 1
| 0.015038
| false
| 0
| 0.015038
| 0
| 0.030075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
48285ffa4d4045b7cf655571731a70ba6854e4b3
| 19,519
|
py
|
Python
|
cogv3/admin/managecommands.py
|
XFazze/discordbot
|
6b4201a6d6ff1bed5f65de4b4d30738b4d51e223
|
[
"MIT"
] | 2
|
2021-07-29T02:39:36.000Z
|
2021-07-29T02:39:38.000Z
|
cogv3/admin/managecommands.py
|
XFazze/discordbot
|
6b4201a6d6ff1bed5f65de4b4d30738b4d51e223
|
[
"MIT"
] | 2
|
2021-08-16T08:31:24.000Z
|
2021-09-20T16:34:58.000Z
|
cogv3/admin/managecommands.py
|
XFazze/discordbot
|
6b4201a6d6ff1bed5f65de4b4d30738b4d51e223
|
[
"MIT"
] | null | null | null |
import discord
from discord import embeds
from discord.ext import commands
from discord.ext.commands.core import command
from pymongo import MongoClient, collation
from discord_components import Button, Select, SelectOption, ComponentsBot
from discord.utils import get
class managecommands(commands.Cog):
def __init__(self, bot):
self.bot = bot
# Enable/disable command
@commands.command(pass_context=True)
@commands.has_permissions(manage_guild=True)
async def disable(self, ctx, command: str = None, role: discord.Role = None):
validcommand = False
for cmd in self.bot.commands:
if command == cmd.name:
validcommand = True
break
if not validcommand:
await ctx.reply(embed=discord.Embed(title="Provide a valid command", color=0xFD3333))
return
if role == None:
role = ctx.guild.default_role
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
if command not in settings.keys():
settings[command] = {
"guild": [],
"disabled_guild": [],
"category": {},
"disabled_category": {},
"channel": {},
"disabled_channel": {}
}
if role.id not in settings[command]['disabled_guild']:
settings[command]['disabled_guild'].append(role.id)
else:
await ctx.reply(embed=discord.Embed(title="Command is already disabled", color=0xFD3333))
return
if role.id in settings[command]['guild']:
settings[command]['guild'].remove(role.id)
newvalue = {"$set": {"settings": settings}}
collection.update_one(myquery, newvalue)
await ctx.reply(embed=discord.Embed(title="Disabled "+command+" on server for "+role.name, color=0x00FF42))
@commands.command(pass_context=True)
@commands.has_permissions(manage_guild=True)
async def disablecategory(self, ctx, category: discord.CategoryChannel = None, command: str = None, role: discord.Role = None):
validcommand = False
for cmd in self.bot.commands:
if command == cmd.name:
validcommand = True
break
if not validcommand:
await ctx.reply(embed=discord.Embed(title="Provide a valid command", color=0xFD3333))
return
if role == None:
role = ctx.guild.default_role
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
if command not in settings.keys():
settings[command] = {
"guild": [],
"disabled_guild": [],
"category": {},
"disabled_category": {},
"channel": {},
"disabled_channel": {}
}
if str(category.id) not in settings[command]['disabled_category'].keys():
settings[command]['disabled_category'][str(category.id)] = [
role.id]
else:
if role.id in settings[command]['disabled_category'][str(category.id)]:
await ctx.reply(embed=discord.Embed(title="Command is already disabled", color=0xFD3333))
return
else:
settings[command]['disabled_category'][str(
category.id)].append(role.id)
if str(category.id) in settings[command]['category'].keys():
if role.id in settings[command]['category'][str(category.id)]:
settings[command]['category'][str(category.id)].remove(role.id)
newvalue = {"$set": {"settings": settings}}
collection.update_one(myquery, newvalue)
await ctx.reply(embed=discord.Embed(title="Disabled "+command+" in category " + category.name+" for "+role.name + category.name, color=0x00FF42))
@commands.command(pass_context=True)
@commands.has_permissions(manage_guild=True)
async def disablechannel(self, ctx, channel: discord.TextChannel = None, command: str = None, role: discord.Role = None):
validcommand = False
for cmd in self.bot.commands:
if command == cmd.name:
validcommand = True
break
if not validcommand:
await ctx.reply(embed=discord.Embed(title="Provide a valid command", color=0xFD3333))
return
if role == None:
role = ctx.guild.default_role
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
if command not in settings.keys():
settings[command] = {
"guild": [],
"disabled_guild": [],
"category": {},
"disabled_category": {},
"channel": {},
"disabled_channel": {}
}
if str(channel.id) not in settings[command]['disabled_channel'].keys():
settings[command]['disabled_channel'][str(channel.id)] = [role.id]
else:
if role.id in settings[command]['disabled_channel'][str(channel.id)]:
await ctx.reply(embed=discord.Embed(title="Command is already disabled", color=0xFD3333))
return
else:
settings[command]['disabled_channel'][str(
channel.id)].append(role.id)
if str(channel.id) in settings[command]['channel'].keys():
if role.id in settings[command]['channel'][str(channel.id)]:
settings[command]['channel'][str(channel.id)].remove(role.id)
newvalue = {"$set": {"settings": settings}}
collection.update_one(myquery, newvalue)
await ctx.reply(embed=discord.Embed(title="Disabled "+command+" in channel " + channel.name+" for "+role.name, color=0x00FF42))
@commands.command(pass_context=True)
@commands.has_permissions(manage_guild=True)
async def enable(self, ctx, command: str = None, role: discord.Role = None):
validcommand = False
for cmd in self.bot.commands:
if command == cmd.name:
validcommand = True
break
if not validcommand:
await ctx.reply(embed=discord.Embed(title="Provide a valid command", color=0xFD3333))
return
if role == None:
role = ctx.guild.default_role
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
if command not in settings.keys():
settings[command] = {
"guild": [],
"disabled_guild": [],
"category": {},
"disabled_category": {},
"channel": {},
"disabled_channel": {}
}
if role.id not in settings[command]['guild']:
settings[command]['guild'].append(role.id)
else:
await ctx.reply(embed=discord.Embed(title="Command is already enabled", color=0xFD3333))
return
if role.id in settings[command]['disabled_guild']:
settings[command]['disabled_guild'].remove(role.id)
newvalue = {"$set": {"settings": settings}}
collection.update_one(myquery, newvalue)
await ctx.reply(embed=discord.Embed(title="Enabled "+command+" on server for "+role.name, color=0x00FF42))
@commands.command(pass_context=True)
@commands.has_permissions(manage_guild=True)
async def enablecategory(self, ctx, category: discord.CategoryChannel = None, command: str = None, role: discord.Role = None):
validcommand = False
for cmd in self.bot.commands:
if command == cmd.name:
validcommand = True
break
if not validcommand:
await ctx.reply(embed=discord.Embed(title="Provide a valid command", color=0xFD3333))
return
if role == None:
role = ctx.guild.default_role
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
if command not in settings.keys():
settings[command] = {
"guild": [],
"disabled_guild": [],
"category": {},
"disabled_category": {},
"channel": {},
"disabled_channel": {}
}
if str(category.id) not in settings[command]['category'].keys():
settings[command]['category'][str(category.id)] = [role.id]
else:
if role.id in settings[command]['category'][str(category.id)]:
await ctx.reply(embed=discord.Embed(title="Command is already disabled", color=0xFD3333))
return
else:
settings[command]['category'][str(category.id)].append(role.id)
if str(category.id) in settings[command]['disabled_category'].keys():
if role.id in settings[command]['disabled_category'][str(category.id)]:
settings[command]['disabled_category'][str(
category.id)].remove(role.id)
newvalue = {"$set": {"settings": settings}}
collection.update_one(myquery, newvalue)
await ctx.reply(embed=discord.Embed(title="Enabled "+command+" in category " + category.name + " for "+role.name, color=0x00FF42))
@commands.command(pass_context=True)
@commands.has_permissions(manage_guild=True)
async def enablechannel(self, ctx, channel: discord.TextChannel = None, command: str = None, role: discord.Role = None):
validcommand = False
for cmd in self.bot.commands:
if command == cmd.name:
validcommand = True
break
if not validcommand:
await ctx.reply(embed=discord.Embed(title="Provide a valid command", color=0xFD3333))
return
if role == None:
role = ctx.guild.default_role
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
if command not in settings.keys():
settings[command] = {
"guild": [],
"disabled_guild": [],
"category": {},
"disabled_category": {},
"channel": {},
"disabled_channel": {}
}
if str(channel.id) not in settings[command]['channel'].keys():
settings[command]['channel'][str(channel.id)] = [role.id]
else:
if role.id in settings[command]['channel'][str(channel.id)]:
await ctx.reply(embed=discord.Embed(title="Command is already disabled", color=0xFD3333))
return
else:
settings[command]['channel'][str(channel.id)].append(role.id)
if str(channel.id) in settings[command]['disabled_channel'].keys():
if role.id in settings[command]['disabled_channel'][str(channel.id)]:
settings[command]['disabled_channel'][str(
channel.id)].remove(role.id)
newvalue = {"$set": {"settings": settings}}
collection.update_one(myquery, newvalue)
await ctx.reply(embed=discord.Embed(title="Enabled "+command+" in channel " + channel.name + " for "+role.name, color=0x00FF42))
@commands.command(pass_context=True)
@commands.has_permissions(manage_guild=True)
async def resetperms(self, ctx, command: str = None):
validcommand = False
for cmd in self.bot.commands:
if command == cmd.name:
validcommand = True
break
if not validcommand:
await ctx.reply(embed=discord.Embed(title="Provide a valid command", color=0xFD3333))
return
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
settings[command] = {
"guild": [],
"disabled_guild": [],
"category": {},
"disabled_category": {},
"channel": {},
"disabled_channel": {}}
newvalue = {"$set": {"settings": settings}}
collection.update_one(myquery, newvalue)
await ctx.reply(embed=discord.Embed(title="Reset command permissions", color=0x00FF42))
@commands.command(pass_context=True)
async def showperms(self, ctx):
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
options=[]
for setting in settings.keys():
options.append(SelectOption(label=setting, value=setting))
message = await ctx.reply("The lower in the hiearchy will go over the other. So channel enable will go over guild disable.", components=[Select(placeholder="Select something!", options=options, custom_id="commandperms",)])
while True:
interaction = await self.bot.wait_for("select_option")
embed = discord.Embed(name="Command permissions for ", value=interaction.values[0], color=0xFFFFFF)
if len(settings[interaction.values[0]]["guild"]) > 0:
msg = ""
for roleid in settings[interaction.values[0]]["guild"]:
role_obj = get(ctx.guild.roles, id=roleid)
msg += role_obj.name+'\n'
else:
msg="None"
embed.add_field(name="Guild wide allowed", value=msg)
if len(settings[interaction.values[0]]["guild"]) > 0:
msg = ""
for roleid in settings[interaction.values[0]]["disabled_guild"]:
role_obj = get(ctx.guild.roles, id=roleid)
msg += role_obj.name+'\n'
else:
msg="None"
embed.add_field(name="Guild wide denied", value=msg)
# this is no longer a list
# its a dictionary
embed.add_field(name="Category wide allowed", value="\u200b", inline=False)
if len(settings[interaction.values[0]]["category"].keys()) > 0:
for key in settings[interaction.values[0]]["category"].keys():
if len(settings[interaction.values[0]]["category"][key]) == 0:
continue
msg = ""
for roleid in settings[interaction.values[0]]["category"][key]:
role_obj = get(ctx.guild.roles, id=roleid)
msg += role_obj.name+'\n'
name = get(ctx.guild.categories, id=int(key))
embed.add_field(name=name, value=msg)
else:
msg = "None"
embed.add_field(name="Category wide denied", value="\u200b", inline=False)
if len(settings[interaction.values[0]]["disabled_category"].keys()) > 0:
for key in settings[interaction.values[0]]["disabled_category"].keys():
if len(settings[interaction.values[0]]["disabled_category"][key]) == 0:
continue
msg = ""
for roleid in settings[interaction.values[0]]["disabled_category"][key]:
role_obj = get(ctx.guild.roles, id=roleid)
msg += role_obj.name+'\n'
name = get(ctx.guild.categories, id=int(key))
embed.add_field(name=name, value=msg)
else:
msg = "None"
embed.add_field(name="Channel wide allowed", value="\u200b", inline=False)
if len(settings[interaction.values[0]]["channel"].keys()) > 0:
for key in settings[interaction.values[0]]["channel"].keys():
if len(settings[interaction.values[0]]["channel"][key]) == 0:
continue
msg = ""
for roleid in settings[interaction.values[0]]["channel"][key]:
role_obj = get(ctx.guild.roles, id=roleid)
msg += role_obj.name+'\n'
name = get(ctx.guild.text_channels, id=int(key))
embed.add_field(name=name, value=msg)
else:
msg = "None"
embed.add_field(name="Channel wide denied", value="\u200b", inline=False)
if len(settings[interaction.values[0]]["disabled_channel"].keys()) > 0:
for key in settings[interaction.values[0]]["disabled_channel"].keys():
if len(settings[interaction.values[0]]["disabled_channel"][key]) == 0:
continue
msg = ""
for roleid in settings[interaction.values[0]]["disabled_channel"][key]:
role_obj = get(ctx.guild.roles, id=roleid)
msg += role_obj.name+'\n'
name = get(ctx.guild.text_channels, id=int(key))
embed.add_field(name=name, value=msg)
else:
msg = "There "
await message.edit(embed=embed,components=[Select(placeholder="Select something!", options=options, custom_id="commandperms",)])
def setup(bot):
bot.add_cog(managecommands(bot))
def perms(context):
command = context.command.name #str
guild_id = context.guild.id
channel_id = str(context.message.channel.id)
category_id = str(context.message.channel.category_id)
roles = []
for role in context.author.roles:
roles.append(role.id)
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": guild_id}
settings = collection.find_one(myquery)["settings"]
if command in settings.keys():
if channel_id in settings[command]["channel"].keys():
print("channels exist")
if bool(set(roles) & set(settings[command]["channel"][channel_id])):
return True
elif channel_id in settings[command]["disabled_channel"].keys():
if bool(set(roles) & set(settings[command]["disabled_channel"][channel_id])):
return False
elif category_id in settings[command]["category"].keys():
if bool(set(roles) & set(settings[command]["category"][category_id])):
return True
elif category_id in settings[command]["disabled_category"].keys():
if bool(set(roles) & set(settings[command]["disabled_category"][category_id])):
return False
elif bool(set(roles) & set(settings[command]["disabled_guild"])):
return False
elif bool(set(roles) & set(settings[command]["guild"])):
return True
return True
| 42.06681
| 230
| 0.56217
| 2,046
| 19,519
| 5.292766
| 0.074291
| 0.073414
| 0.037677
| 0.033244
| 0.890479
| 0.881984
| 0.869055
| 0.845784
| 0.81448
| 0.768861
| 0
| 0.014043
| 0.310467
| 19,519
| 464
| 231
| 42.06681
| 0.790549
| 0.003433
| 0
| 0.687831
| 0
| 0
| 0.120886
| 0
| 0
| 0
| 0.008638
| 0
| 0
| 1
| 0.007937
| false
| 0.021164
| 0.018519
| 0
| 0.082011
| 0.002646
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6f9177f95c9276da027118820c1944dc489b0063
| 137
|
py
|
Python
|
backend/elasticsurgery/views/__init__.py
|
EDITD/ElasticSurgery
|
458571d48541d1ddbbfeb20e04703592e5f869e0
|
[
"MIT"
] | null | null | null |
backend/elasticsurgery/views/__init__.py
|
EDITD/ElasticSurgery
|
458571d48541d1ddbbfeb20e04703592e5f869e0
|
[
"MIT"
] | 27
|
2019-09-25T14:19:44.000Z
|
2022-02-12T21:39:17.000Z
|
backend/elasticsurgery/views/__init__.py
|
EDITD/ElasticSurgery
|
458571d48541d1ddbbfeb20e04703592e5f869e0
|
[
"MIT"
] | null | null | null |
from flask import jsonify
from ..app import app
@app.route('/ping', methods=('GET',))
def get_ping():
return jsonify(ping='pong')
| 15.222222
| 37
| 0.671533
| 20
| 137
| 4.55
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153285
| 137
| 8
| 38
| 17.125
| 0.784483
| 0
| 0
| 0
| 0
| 0
| 0.087591
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
6fa6de489d3ecbdc05135c1a882460c438344d63
| 149
|
py
|
Python
|
tests/node_test.py
|
allenai/beaker-py
|
99c8d7f6e9938807ca5405964ef35633a19e8d68
|
[
"Apache-2.0"
] | null | null | null |
tests/node_test.py
|
allenai/beaker-py
|
99c8d7f6e9938807ca5405964ef35633a19e8d68
|
[
"Apache-2.0"
] | 20
|
2021-12-16T13:23:07.000Z
|
2022-03-31T16:40:02.000Z
|
tests/node_test.py
|
allenai/beaker-py
|
99c8d7f6e9938807ca5405964ef35633a19e8d68
|
[
"Apache-2.0"
] | null | null | null |
from beaker import Beaker
def test_node_get(client: Beaker, beaker_node_id: str):
assert client.node.get(beaker_node_id).limits.gpu_count == 8
| 24.833333
| 64
| 0.778523
| 25
| 149
| 4.36
| 0.6
| 0.12844
| 0.220183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007692
| 0.127517
| 149
| 5
| 65
| 29.8
| 0.830769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6fe3cc82a26ac5744b2544116ad6a32d14b35afa
| 30
|
py
|
Python
|
sigal/plugins/encrypt/__init__.py
|
fidergo-stephane-gourichon/sigal
|
b1f2e947700e618425e170e8758b1fbb82c91acb
|
[
"MIT"
] | null | null | null |
sigal/plugins/encrypt/__init__.py
|
fidergo-stephane-gourichon/sigal
|
b1f2e947700e618425e170e8758b1fbb82c91acb
|
[
"MIT"
] | null | null | null |
sigal/plugins/encrypt/__init__.py
|
fidergo-stephane-gourichon/sigal
|
b1f2e947700e618425e170e8758b1fbb82c91acb
|
[
"MIT"
] | null | null | null |
from .encrypt import register
| 15
| 29
| 0.833333
| 4
| 30
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6ffae9b25573e5f7348c89e03b62b498cbca2ea9
| 184
|
py
|
Python
|
reikna/core/__init__.py
|
ringw/reikna
|
0f27f86e35a9f06405de2d99580f766a1b504562
|
[
"MIT"
] | 122
|
2015-05-01T12:42:34.000Z
|
2021-09-30T22:47:59.000Z
|
lib/python/reikna-0.7.5/reikna/core/__init__.py
|
voxie-viewer/voxie
|
d2b5e6760519782e9ef2e51f5322a3baa0cb1198
|
[
"MIT"
] | 42
|
2015-05-04T16:55:47.000Z
|
2021-09-18T04:53:34.000Z
|
lib/python/reikna-0.7.5/reikna/core/__init__.py
|
voxie-viewer/voxie
|
d2b5e6760519782e9ef2e51f5322a3baa0cb1198
|
[
"MIT"
] | 14
|
2015-05-01T19:22:52.000Z
|
2021-09-30T22:48:03.000Z
|
from reikna.core.signature import Type, Annotation, Parameter, Signature
from reikna.core.computation import Computation
from reikna.core.transformation import Transformation, Indices
| 46
| 72
| 0.858696
| 22
| 184
| 7.181818
| 0.5
| 0.189873
| 0.265823
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 184
| 3
| 73
| 61.333333
| 0.940476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d230b8b07301d92ab203c4ea79e6dcb73031cdf8
| 36
|
py
|
Python
|
deepleaps/workspace/src/ipc/CustomCommand.py
|
Longseabear/deep-leaps-pytorch
|
abcb87f3079c0612bde4a4f94c75d7c05d5aee3a
|
[
"MIT"
] | 1
|
2021-02-27T18:00:39.000Z
|
2021-02-27T18:00:39.000Z
|
deepleaps/workspace/src/ipc/CustomCommand.py
|
Longseabear/deep-leaps-pytorch
|
abcb87f3079c0612bde4a4f94c75d7c05d5aee3a
|
[
"MIT"
] | null | null | null |
deepleaps/workspace/src/ipc/CustomCommand.py
|
Longseabear/deep-leaps-pytorch
|
abcb87f3079c0612bde4a4f94c75d7c05d5aee3a
|
[
"MIT"
] | null | null | null |
import deepleaps.ipc.RunningCommand
| 18
| 35
| 0.888889
| 4
| 36
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 36
| 1
| 36
| 36
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d24ca4e55e2ea29a960fa8ecd6a05a6ef87a0584
| 8,346
|
py
|
Python
|
network.py
|
tonyhu20116543/Playing-20-Question-Game-with-Policy-Based-Reinforcement-Learning
|
fb9b20181dd3e3273fcbc28144d60f01185ceffd
|
[
"MIT"
] | 12
|
2020-07-24T13:21:35.000Z
|
2021-11-08T10:13:24.000Z
|
network.py
|
tonyhu20116543/Playing-20-Question-Game-with-Policy-Based-Reinforcement-Learning
|
fb9b20181dd3e3273fcbc28144d60f01185ceffd
|
[
"MIT"
] | null | null | null |
network.py
|
tonyhu20116543/Playing-20-Question-Game-with-Policy-Based-Reinforcement-Learning
|
fb9b20181dd3e3273fcbc28144d60f01185ceffd
|
[
"MIT"
] | 7
|
2020-07-24T13:28:44.000Z
|
2021-11-08T10:13:25.000Z
|
import os
import tensorflow as tf
from util import masked_softmax
class PolicyNetwork(object):
""" Policy Function approximator. """
def __init__(self, input_size, output_size, learning_rate=0.001, summaries_dir=None, scope="policy_estimator"):
with tf.variable_scope(scope):
# Writes Tensorboard summaries to disk
self.summary_writer = None
if summaries_dir:
summary_dir = os.path.join(summaries_dir, "summaries_{}".format(scope))
if not os.path.exists(summary_dir):
os.makedirs(summary_dir)
self.summary_writer = tf.summary.FileWriter(summary_dir)
self.state = tf.placeholder(dtype=tf.float64, shape=[1, input_size], name="state")
self.action = tf.placeholder(dtype=tf.int32, name="action")
self.target = tf.placeholder(dtype=tf.float64, name="target")
self.mask = tf.placeholder(dtype=tf.float64, shape=[1, output_size], name="mask")
# This is just table lookup estimator
# self.fc_layer1 = tf.contrib.layers.fully_connected(
# inputs=self.state,
# num_outputs=len(env.state),
# activation_fn=tf.nn.relu)
self.output_layer = tf.contrib.layers.fully_connected(
inputs=self.state,
num_outputs=output_size,
activation_fn=None)
# self.action_probs = tf.squeeze(tf.nn.softmax(self.output_layer))
self.action_probs = tf.squeeze(masked_softmax(self.output_layer, self.mask))
self.picked_action_prob = tf.gather(self.action_probs, self.action)
# Loss and train op
self.loss = -tf.log(self.picked_action_prob) * self.target
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = self.optimizer.minimize(
self.loss, global_step=tf.train.get_global_step())
def predict(self, state, mask, sess=None):
sess = sess or tf.get_default_session()
return sess.run(self.action_probs, {self.state: state.reshape(1, -1),
self.mask: mask.reshape(1, -1)})
def update(self, state, target, action, mask, sess=None):
sess = sess or tf.get_default_session()
feed_dict = {self.state: state.reshape(1, -1), self.target: target,
self.action: action, self.mask: mask.reshape(1, -1)}
_, loss = sess.run([self.train_op, self.loss], feed_dict)
return loss
def restore(self, sess, checkpoint_file):
sess = sess or tf.get_default_session()
self.saver = tf.train.Saver(tf.global_variables())
self.saver.restore(sess=sess, save_path=checkpoint_file)
class ValueNetwork(object):
""" Value Function approximator. """
def __init__(self, input_size, output_size=1, learning_rate=0.01, scope="value_estimator"):
with tf.variable_scope(scope):
self.state = tf.placeholder(dtype=tf.float64, shape=[1, input_size], name="state")
self.target = tf.placeholder(dtype=tf.float64, name="target")
# This is just table lookup estimator
# self.fc_layer1 = tf.contrib.layers.fully_connected(
# inputs=self.state,
# num_outputs=input_size,
# activation_fn=tf.nn.relu)
self.output_layer = tf.contrib.layers.fully_connected(
inputs=self.state,
num_outputs=output_size,
activation_fn=None)
self.value_estimate = tf.squeeze(self.output_layer)
self.loss = tf.squared_difference(self.value_estimate, self.target)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = self.optimizer.minimize(
self.loss, global_step=tf.train.get_global_step())
def predict(self, state, sess=None):
sess = sess or tf.get_default_session()
return sess.run(self.value_estimate, {self.state: state.reshape(1, -1)})
def update(self, state, target, sess=None):
sess = sess or tf.get_default_session()
feed_dict = {self.state: state.reshape(1, -1), self.target: target}
_, loss = sess.run([self.train_op, self.loss], feed_dict)
return loss
class ObjectAwareRewardNetwork(object):
""" Object-aware Reward Function approximator. """
def __init__(self, input_size, output_size, action_num, learning_rate=0.01, scope="reward_estimator"):
with tf.variable_scope(scope):
self.state = tf.placeholder(shape=[1, input_size], dtype=tf.float64, name="state")
self.action = tf.placeholder(shape=[], dtype=tf.int32, name="question_idx")
self.object = tf.placeholder(shape=[], dtype=tf.int32, name="person_idx")
self.target = tf.placeholder(dtype=tf.float64, name="target")
object_vec = tf.one_hot(self.object, input_size, dtype=tf.float64)
action_vec = tf.one_hot(self.action, action_num, dtype=tf.float64)
concat_vec = tf.concat([object_vec, action_vec], 0)
self.output_layer = tf.contrib.layers.fully_connected(
inputs=tf.concat([self.state, tf.expand_dims(concat_vec, 0)], 1),
num_outputs=output_size,
activation_fn=tf.nn.sigmoid)
self.value_estimate = tf.squeeze(self.output_layer)
self.loss = tf.squared_difference(self.value_estimate, self.target)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = self.optimizer.minimize(
self.loss, global_step=tf.train.get_global_step())
def predict(self, state, action, object, sess=None):
sess = sess or tf.get_default_session()
return sess.run(self.value_estimate, {self.state: state.reshape(1, -1), self.action: action, self.object: object})
def update(self, state, action, object, target, sess=None):
sess = sess or tf.get_default_session()
feed_dict = {self.state: state.reshape(1, -1), self.action: action, self.object: object, self.target: target}
_, loss = sess.run([self.train_op, self.loss], feed_dict)
def restore(self, sess, checkpoint_file):
sess = sess or tf.get_default_session()
self.saver = tf.train.Saver(tf.global_variables())
self.saver.restore(sess=sess, save_path=checkpoint_file)
class RewardNetwork(object):
""" Reward Function approximator. """
def __init__(self, input_size, output_size, action_num, learning_rate=0.01, scope="reward_estimator"):
with tf.variable_scope(scope):
self.state = tf.placeholder(shape=[1, input_size], dtype=tf.float64, name="state")
self.action = tf.placeholder(shape=[], dtype=tf.int32, name="question_idx")
self.target = tf.placeholder(dtype=tf.float64, name="target")
action_vec = tf.one_hot(self.action, action_num, dtype=tf.float64)
self.output_layer = tf.contrib.layers.fully_connected(
inputs=tf.concat([self.state, tf.expand_dims(action_vec, 0)], 1),
num_outputs=output_size,
activation_fn=tf.nn.sigmoid)
self.value_estimate = tf.squeeze(self.output_layer)
self.loss = tf.squared_difference(self.value_estimate, self.target)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = self.optimizer.minimize(
self.loss, global_step=tf.train.get_global_step())
def predict(self, state, action, sess=None):
sess = sess or tf.get_default_session()
return sess.run(self.value_estimate, {self.state: state.reshape(1, -1), self.action: action})
def update(self, state, action, target, sess=None):
sess = sess or tf.get_default_session()
feed_dict = {self.state: state.reshape(1, -1), self.action: action, self.target: target}
_, loss = sess.run([self.train_op, self.loss], feed_dict)
def restore(self, sess, checkpoint_file):
sess = sess or tf.get_default_session()
self.saver = tf.train.Saver(tf.global_variables())
self.saver.restore(sess=sess, save_path=checkpoint_file)
| 46.88764
| 122
| 0.642104
| 1,071
| 8,346
| 4.815126
| 0.122316
| 0.045375
| 0.032577
| 0.025596
| 0.858639
| 0.819469
| 0.803762
| 0.785534
| 0.775645
| 0.738802
| 0
| 0.012276
| 0.238677
| 8,346
| 177
| 123
| 47.152542
| 0.799339
| 0.072011
| 0
| 0.638655
| 0
| 0
| 0.02115
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12605
| false
| 0
| 0.02521
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
96359eac01afe317df5fd3c215b39bdd662a534c
| 14,568
|
py
|
Python
|
test/pdu.py
|
praekelt/python-smpp
|
8a0753fc498ab6bcd6243aed5953cddd69cef2c0
|
[
"BSD-3-Clause"
] | 36
|
2015-01-15T09:38:06.000Z
|
2021-06-14T15:27:34.000Z
|
test/pdu.py
|
komuW/smpp_server
|
10ef5c2ebc09e2ef88bdd62c55a4280a187d1eb2
|
[
"BSD-3-Clause"
] | 8
|
2015-02-12T15:52:53.000Z
|
2017-05-22T12:28:45.000Z
|
test/pdu.py
|
komuW/smpp_server
|
10ef5c2ebc09e2ef88bdd62c55a4280a187d1eb2
|
[
"BSD-3-Clause"
] | 22
|
2015-04-29T15:06:17.000Z
|
2021-05-25T11:19:41.000Z
|
pdu_objects = [
{
'header': {
'command_length': 0,
'command_id': 'bind_transmitter',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'system_id': 'test_system',
'password': 'abc123',
'system_type': '',
'interface_version': '34',
'addr_ton': 1,
'addr_npi': 1,
'address_range': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'bind_transmitter_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'system_id': 'test_system',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'bind_receiver',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'system_id': 'test_system',
'password': 'abc123',
'system_type': '',
'interface_version': '34',
'addr_ton': 1,
'addr_npi': 1,
'address_range': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'bind_receiver_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'system_id': 'test_system',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'bind_transceiver',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'system_id': 'test_system',
'password': 'abc123',
'system_type': '',
'interface_version': '34',
'addr_ton': 1,
'addr_npi': 1,
'address_range': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'bind_transceiver_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'system_id': 'test_system',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'outbind',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'system_id': 'test_system',
'password': 'abc123',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'unbind',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
},
{
'header': {
'command_length': 0,
'command_id': 'unbind_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
},
{
'header': {
'command_length': 0,
'command_id': 'generic_nack',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
},
{
'header': {
'command_length': 0,
'command_id': 'submit_sm',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'service_type': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
'dest_addr_ton': 1,
'dest_addr_npi': 1,
'destination_addr': '',
'esm_class': 0,
'protocol_id': 0,
'priority_flag': 0,
'schedule_delivery_time': '',
'validity_period': '',
'registered_delivery': 0,
'replace_if_present_flag': 0,
'data_coding': 0,
'sm_default_msg_id': 0,
'sm_length': 1,
'short_message': 'testing 123',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'submit_sm',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'service_type': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
'dest_addr_ton': 1,
'dest_addr_npi': 1,
'destination_addr': '',
'esm_class': 0,
'protocol_id': 0,
'priority_flag': 0,
'schedule_delivery_time': '',
'validity_period': '',
'registered_delivery': 0,
'replace_if_present_flag': 0,
'data_coding': 0,
'sm_default_msg_id': 0,
'sm_length': 0,
'short_message': None,
# 'short_message' can be of zero length
},
'optional_parameters': [
{
'tag': 'message_payload',
'length': 0,
'value': '5666',
},
],
},
},
# ]
# breaker = [
{
'header': {
'command_length': 0,
'command_id': 'submit_sm_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'message_id': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'submit_sm_resp',
'command_status': 'ESME_RSYSERR',
'sequence_number': 0,
},
# submit_sm_resp can have no body for failures
},
{
'header': {
'command_length': 0,
'command_id': 'submit_multi',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'service_type': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
'number_of_dests': 0,
'dest_address': [
{
'dest_flag': 1,
'dest_addr_ton': 1,
'dest_addr_npi': 1,
'destination_addr': 'the address'
},
{
'dest_flag': 2,
'dl_name': 'the list',
},
{
'dest_flag': 2,
'dl_name': 'the other list',
},
# {}
],
'esm_class': 0,
'protocol_id': 0,
'priority_flag': 0,
'schedule_delivery_time': '',
'validity_period': '',
'registered_delivery': 0,
'replace_if_present_flag': 0,
'data_coding': 0,
'sm_default_msg_id': 0,
'sm_length': 1,
'short_message': 'testing 123',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'submit_multi_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'message_id': '',
'no_unsuccess': 5,
'unsuccess_sme': [
{
'dest_addr_ton': 1,
'dest_addr_npi': 1,
'destination_addr': '',
'error_status_code': 0,
},
{
'dest_addr_ton': 3,
'dest_addr_npi': 1,
'destination_addr': '555',
'error_status_code': 0,
},
],
},
},
},
# ]
# breaker = [
{
'header': {
'command_length': 0,
'command_id': 'deliver_sm',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'service_type': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
'dest_addr_ton': 1,
'dest_addr_npi': 1,
'destination_addr': '',
'esm_class': 0,
'protocol_id': 0,
'priority_flag': 0,
'schedule_delivery_time': '',
'validity_period': '',
'registered_delivery': 0,
'replace_if_present_flag': 0,
'data_coding': 0,
'sm_default_msg_id': 0,
'sm_length': 1,
'short_message': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'deliver_sm_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'message_id': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'data_sm',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'service_type': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
'dest_addr_ton': 1,
'dest_addr_npi': 1,
'destination_addr': '',
'esm_class': 0,
'registered_delivery': 0,
'data_coding': 0,
},
'optional_parameters': [
{
'tag': 'message_payload',
'length': 0,
'value': '',
},
],
},
},
{
'header': {
'command_length': 0,
'command_id': 'data_sm_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'message_id': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'query_sm',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'message_id': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'query_sm_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'message_id': '',
'final_date': '',
'message_state': 0,
'error_code': 0,
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'cancel_sm',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'service_type': '',
'message_id': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
'dest_addr_ton': 1,
'dest_addr_npi': 1,
'destination_addr': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'cancel_sm_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
},
{
'header': {
'command_length': 0,
'command_id': 'replace_sm',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'message_id': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
'schedule_delivery_time': '',
'validity_period': '',
'registered_delivery': 0,
'replace_if_present_flag': 0,
'data_coding': 0,
'sm_default_msg_id': 0,
'sm_length': 1,
'short_message': 'is this an = sign?',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'replace_sm_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
},
{
'header': {
'command_length': 0,
'command_id': 'enquire_link',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
},
{
'header': {
'command_length': 0,
'command_id': 'enquire_link_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
},
{
'header': {
'command_length': 0,
'command_id': 'alert_notification',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'source_addr_ton': 'international',
'source_addr_npi': 1,
'source_addr': '',
'esme_addr_ton': 9,
'esme_addr_npi': '',
'esme_addr': '',
},
},
},
]
| 28.17795
| 57
| 0.376922
| 1,059
| 14,568
| 4.766761
| 0.106704
| 0.044374
| 0.109152
| 0.114897
| 0.918582
| 0.918582
| 0.901347
| 0.890254
| 0.792591
| 0.792591
| 0
| 0.024103
| 0.48737
| 14,568
| 516
| 58
| 28.232558
| 0.651848
| 0.007757
| 0
| 0.617357
| 0
| 0
| 0.363932
| 0.018484
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.00789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
96af8b4a48adf5297e31757c90f73a77f6edf704
| 101
|
py
|
Python
|
vault_password.py
|
RMuskovets/empireofcode
|
a2a9cfe2c43c7f28999b426601063dd0af352db5
|
[
"Apache-2.0"
] | 1
|
2018-02-20T12:11:45.000Z
|
2018-02-20T12:11:45.000Z
|
vault_password.py
|
RMuskovets/empireofcode
|
a2a9cfe2c43c7f28999b426601063dd0af352db5
|
[
"Apache-2.0"
] | null | null | null |
vault_password.py
|
RMuskovets/empireofcode
|
a2a9cfe2c43c7f28999b426601063dd0af352db5
|
[
"Apache-2.0"
] | null | null | null |
def golf(p): return len(p)>9 and p!=p.lower() and p!=p.upper() and any('0'<=l and l<='9' for l in p)
| 50.5
| 100
| 0.584158
| 26
| 101
| 2.269231
| 0.538462
| 0.135593
| 0.169492
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035294
| 0.158416
| 101
| 1
| 101
| 101
| 0.658824
| 0
| 0
| 0
| 0
| 0
| 0.019802
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| false
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
73d14617d94420a3d56d21a483a4a8f9476f65c1
| 170
|
py
|
Python
|
notebooks/container/__init__.py
|
DanieleBaranzini/sktime-tutorial-pydata-amsterdam-2020
|
eb9d76a8dc7fff29e4123b940200d58eed87147c
|
[
"BSD-3-Clause"
] | 114
|
2020-06-16T09:29:30.000Z
|
2022-03-12T09:06:52.000Z
|
notebooks/container/__init__.py
|
DanieleBaranzini/sktime-tutorial-pydata-amsterdam-2020
|
eb9d76a8dc7fff29e4123b940200d58eed87147c
|
[
"BSD-3-Clause"
] | 5
|
2020-11-06T13:02:26.000Z
|
2021-06-10T18:34:37.000Z
|
notebooks/container/__init__.py
|
DanieleBaranzini/sktime-tutorial-pydata-amsterdam-2020
|
eb9d76a8dc7fff29e4123b940200d58eed87147c
|
[
"BSD-3-Clause"
] | 62
|
2020-06-16T09:25:05.000Z
|
2022-03-01T21:02:10.000Z
|
from container.base import TimeBase
from container.array import TimeArray, TimeDtype
from container.timeseries import TimeSeries
from container.timeframe import TimeFrame
| 42.5
| 48
| 0.876471
| 21
| 170
| 7.095238
| 0.47619
| 0.348993
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094118
| 170
| 4
| 49
| 42.5
| 0.967532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fb52ea45a86609e7040cf2f5adb9df43b0bf1496
| 265
|
py
|
Python
|
todo/main.py
|
shuayb/simple-todo
|
7a6c840d38ada098b5cc3458d652c7db02ffd791
|
[
"MIT"
] | null | null | null |
todo/main.py
|
shuayb/simple-todo
|
7a6c840d38ada098b5cc3458d652c7db02ffd791
|
[
"MIT"
] | null | null | null |
todo/main.py
|
shuayb/simple-todo
|
7a6c840d38ada098b5cc3458d652c7db02ffd791
|
[
"MIT"
] | null | null | null |
from app import app, db
import models
import views
if __name__ == '__main__':
app.run()
# No need to do (debug=True), as in config.py, debug = true is already set.
# app.run(debug=True)
# app.run(debug=True, use_debugger=False, use_reloader=False)
| 26.5
| 79
| 0.683019
| 43
| 265
| 3.976744
| 0.627907
| 0.210526
| 0.128655
| 0.175439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.196226
| 265
| 9
| 80
| 29.444444
| 0.802817
| 0.577358
| 0
| 0
| 0
| 0
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fb6262762a9edf203b455a0bed2e167c184ce590
| 1,947
|
py
|
Python
|
Twitter Data Extraction.py
|
scottblender/twitter-covid-19-vaccine-analysis
|
a4d273b8b885fc33db075dfc910fa39645fa3789
|
[
"MIT"
] | null | null | null |
Twitter Data Extraction.py
|
scottblender/twitter-covid-19-vaccine-analysis
|
a4d273b8b885fc33db075dfc910fa39645fa3789
|
[
"MIT"
] | null | null | null |
Twitter Data Extraction.py
|
scottblender/twitter-covid-19-vaccine-analysis
|
a4d273b8b885fc33db075dfc910fa39645fa3789
|
[
"MIT"
] | null | null | null |
import snscrape.modules.twitter as sntwitter
import pandas as pd
# Creating list to append tweet data to
tweets_list2 = []
# Using TwitterSearchScraper to scrape data and append tweets to list
for i,tweet in enumerate(sntwitter.TwitterSearchScraper('covid vaccine until:2021-05-24').get_items()):
if i>100000:
break
tweets_list2.append([tweet.date, tweet.id, tweet.content, tweet.user.username, tweet.user.verified, tweet.user.followersCount, tweet.user.friendsCount, tweet.likeCount, tweet.retweetCount, tweet.quoteCount, tweet.user.created, tweet.user.location, tweet.user.displayname, tweet.lang, tweet.coordinates, tweet.place])
# Creating a dataframe from the tweets list above
tweets_df2 = pd.DataFrame(tweets_list2, columns=['Datetime', 'Tweet Id', 'Text', 'Username', 'Verified', 'Followers Count', 'Friends Count', 'Like Count', 'Retweet Count', 'Quote Count', 'Created','Location','Display Name', 'Language', 'Coordinates', 'Place'])
tweets_df2.to_csv('First Extract.csv')
# Creating list to append tweet data to
tweets_list2 = []
# Using TwitterSearchScraper to scrape data and append tweets to list
for i,tweet in enumerate(sntwitter.TwitterSearchScraper('covid vaccine until:2021-05-13').get_items()):
if i>100000:
break
tweets_list2.append([tweet.date, tweet.id, tweet.content, tweet.user.username, tweet.user.verified, tweet.user.followersCount, tweet.user.friendsCount, tweet.likeCount, tweet.retweetCount, tweet.quoteCount, tweet.user.created, tweet.user.location, tweet.user.displayname, tweet.lang, tweet.coordinates, tweet.place])
# Creating a dataframe from the tweets list above
tweets_df3 = pd.DataFrame(tweets_list2, columns=['Datetime', 'Tweet Id', 'Text', 'Username', 'Verified', 'Followers Count', 'Friends Count', 'Like Count', 'Retweet Count', 'Quote Count', 'Created','Location','Display Name', 'Language', 'Coordinates', 'Place'])
tweets_df3.to_csv('Second Extract.csv')
| 69.535714
| 320
| 0.757062
| 260
| 1,947
| 5.615385
| 0.292308
| 0.086301
| 0.019178
| 0.027397
| 0.923288
| 0.923288
| 0.923288
| 0.923288
| 0.923288
| 0.923288
| 0
| 0.021965
| 0.111454
| 1,947
| 27
| 321
| 72.111111
| 0.821965
| 0.157678
| 0
| 0.5
| 0
| 0
| 0.240661
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fb7dc85f21a97ece3e0b036a3c4e6d6962f9001a
| 49
|
py
|
Python
|
netvisor_api_client/schemas/sales_payments/__init__.py
|
kiuru/netvisor-api-client
|
2af3e4ca400497ace5a86d0a1807ec3b9c530cf4
|
[
"MIT"
] | 5
|
2019-04-17T08:10:47.000Z
|
2021-11-27T12:26:15.000Z
|
netvisor_api_client/schemas/sales_payments/__init__.py
|
kiuru/netvisor-api-client
|
2af3e4ca400497ace5a86d0a1807ec3b9c530cf4
|
[
"MIT"
] | 7
|
2019-06-25T17:02:50.000Z
|
2021-07-21T10:14:38.000Z
|
netvisor_api_client/schemas/sales_payments/__init__.py
|
kiuru/netvisor-api-client
|
2af3e4ca400497ace5a86d0a1807ec3b9c530cf4
|
[
"MIT"
] | 10
|
2019-06-25T15:37:33.000Z
|
2021-10-16T19:40:37.000Z
|
from .list import SalesPaymentListSchema # noqa
| 24.5
| 48
| 0.816327
| 5
| 49
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 49
| 1
| 49
| 49
| 0.952381
| 0.081633
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fb8b63ad2ffbee810610ac48848eca279fdeb691
| 47
|
py
|
Python
|
primeiro programa/primeiro_programa.py
|
Cesario115/Ola-mundo
|
2949ff2c9dc1b5f8bc70825072751b19920019af
|
[
"MIT"
] | null | null | null |
primeiro programa/primeiro_programa.py
|
Cesario115/Ola-mundo
|
2949ff2c9dc1b5f8bc70825072751b19920019af
|
[
"MIT"
] | null | null | null |
primeiro programa/primeiro_programa.py
|
Cesario115/Ola-mundo
|
2949ff2c9dc1b5f8bc70825072751b19920019af
|
[
"MIT"
] | null | null | null |
print('='*50)
print("Olá mundo!")
print('='*50)
| 15.666667
| 19
| 0.574468
| 7
| 47
| 3.857143
| 0.571429
| 0.518519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 0.06383
| 47
| 3
| 20
| 15.666667
| 0.522727
| 0
| 0
| 0.666667
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
838511c8e3372a6ae2d5fbb109dbbc9156779d54
| 171
|
py
|
Python
|
stdlib/getpass_qs.py
|
bpuderer/python-snippets27
|
8d51ff34c48bee1247575536d8ed506eafde8631
|
[
"MIT"
] | 3
|
2015-11-20T14:30:53.000Z
|
2015-12-19T05:55:19.000Z
|
stdlib/getpass_qs.py
|
bpuderer/python-snippets27
|
8d51ff34c48bee1247575536d8ed506eafde8631
|
[
"MIT"
] | null | null | null |
stdlib/getpass_qs.py
|
bpuderer/python-snippets27
|
8d51ff34c48bee1247575536d8ed506eafde8631
|
[
"MIT"
] | 1
|
2016-01-05T20:54:49.000Z
|
2016-01-05T20:54:49.000Z
|
import getpass
# prompt user without echoing output
print getpass.getpass()
print getpass.getpass(prompt="Custom Prompt:")
print "user login name:", getpass.getuser()
| 17.1
| 46
| 0.766082
| 22
| 171
| 5.954545
| 0.545455
| 0.198473
| 0.290076
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128655
| 171
| 9
| 47
| 19
| 0.879195
| 0.19883
| 0
| 0
| 0
| 0
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 1
| 0.25
| null | null | 0.75
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
|
0
| 6
|
f7d56596394f7bfd79f8b0a1466fae7aaa135fac
| 2,104
|
py
|
Python
|
test/torch/mpc/test_fss.py
|
NicoSerranoP/PySyft
|
87fcd566c46fce4c16d363c94396dd26bd82a016
|
[
"Apache-2.0"
] | 3
|
2020-11-24T05:15:57.000Z
|
2020-12-07T09:52:45.000Z
|
test/torch/mpc/test_fss.py
|
NicoSerranoP/PySyft
|
87fcd566c46fce4c16d363c94396dd26bd82a016
|
[
"Apache-2.0"
] | 1
|
2020-09-29T00:24:31.000Z
|
2020-09-29T00:24:31.000Z
|
test/torch/mpc/test_fss.py
|
NicoSerranoP/PySyft
|
87fcd566c46fce4c16d363c94396dd26bd82a016
|
[
"Apache-2.0"
] | 1
|
2021-09-04T16:27:41.000Z
|
2021-09-04T16:27:41.000Z
|
import pytest
import torch as th
from syft.frameworks.torch.mpc.fss import DPF, DIF, n
@pytest.mark.parametrize("op", ["eq", "le"])
def test_fss_class(op):
class_ = {"eq": DPF, "le": DIF}[op]
th_op = {"eq": th.eq, "le": th.le}[op]
gather_op = {"eq": "__add__", "le": "__xor__"}[op]
# single value
primitive = class_.keygen(n_values=1)
alpha, s_00, s_01, *CW = primitive
mask = th.randint(0, 2 ** n, alpha.shape)
k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]
x = th.tensor([0])
x_masked = x + k0[0] + k1[0]
y0 = class_.eval(0, x_masked, *k0[1:])
y1 = class_.eval(1, x_masked, *k1[1:])
assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all()
# 1D tensor
primitive = class_.keygen(n_values=3)
alpha, s_00, s_01, *CW = primitive
mask = th.randint(0, 2 ** n, alpha.shape)
k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]
x = th.tensor([0, 2, -2])
x_masked = x + k0[0] + k1[0]
y0 = class_.eval(0, x_masked, *k0[1:])
y1 = class_.eval(1, x_masked, *k1[1:])
assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all()
# 2D tensor
primitive = class_.keygen(n_values=4)
alpha, s_00, s_01, *CW = primitive
mask = th.randint(0, 2 ** n, alpha.shape)
k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]
x = th.tensor([[0, 2], [-2, 0]])
x_masked = x + k0[0].reshape(x.shape) + k1[0].reshape(x.shape)
y0 = class_.eval(0, x_masked, *k0[1:])
y1 = class_.eval(1, x_masked, *k1[1:])
assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all()
# 3D tensor
primitive = class_.keygen(n_values=8)
alpha, s_00, s_01, *CW = primitive
mask = th.randint(0, 2 ** n, alpha.shape)
k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]
x = th.tensor([[[0, 2], [-2, 0]], [[0, 2], [-2, 0]]])
x_masked = x + k0[0].reshape(x.shape) + k1[0].reshape(x.shape)
y0 = class_.eval(0, x_masked, *k0[1:])
y1 = class_.eval(1, x_masked, *k1[1:])
assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all()
| 32.875
| 70
| 0.551331
| 369
| 2,104
| 2.96748
| 0.157182
| 0.076712
| 0.03653
| 0.076712
| 0.827397
| 0.80274
| 0.712329
| 0.712329
| 0.712329
| 0.712329
| 0
| 0.082822
| 0.225285
| 2,104
| 63
| 71
| 33.396825
| 0.588957
| 0.019962
| 0
| 0.636364
| 0
| 0
| 0.015557
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.022727
| false
| 0
| 0.068182
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
790a31602a2e6231958a1ed23fbe61a5ef5fd6fa
| 23
|
py
|
Python
|
examples/ndfd/ndfd.py
|
eLBati/pyxb
|
14737c23a125fd12c954823ad64fc4497816fae3
|
[
"Apache-2.0"
] | 123
|
2015-01-12T06:43:22.000Z
|
2022-03-20T18:06:46.000Z
|
examples/ndfd/ndfd.py
|
eLBati/pyxb
|
14737c23a125fd12c954823ad64fc4497816fae3
|
[
"Apache-2.0"
] | 103
|
2015-01-08T18:35:57.000Z
|
2022-01-18T01:44:14.000Z
|
examples/ndfd/ndfd.py
|
eLBati/pyxb
|
14737c23a125fd12c954823ad64fc4497816fae3
|
[
"Apache-2.0"
] | 54
|
2015-02-15T17:12:00.000Z
|
2022-03-07T23:02:32.000Z
|
from raw.ndfd import *
| 11.5
| 22
| 0.73913
| 4
| 23
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
790a4f9b1ca5315576470030e7218150601d0818
| 56
|
py
|
Python
|
pandoc_mustache/__init__.py
|
copart/pandoc-mustache
|
f6ace29cd0c8d6b4d8f182eedcf36ad38a2412fa
|
[
"CC0-1.0"
] | 43
|
2017-12-27T05:57:00.000Z
|
2022-03-18T10:07:28.000Z
|
pandoc_mustache/__init__.py
|
copart/pandoc-mustache
|
f6ace29cd0c8d6b4d8f182eedcf36ad38a2412fa
|
[
"CC0-1.0"
] | 10
|
2018-02-07T11:20:37.000Z
|
2021-04-22T21:44:19.000Z
|
pandoc_mustache/__init__.py
|
copart/pandoc-mustache
|
f6ace29cd0c8d6b4d8f182eedcf36ad38a2412fa
|
[
"CC0-1.0"
] | 8
|
2018-11-05T13:10:35.000Z
|
2021-08-30T18:14:02.000Z
|
from .version import __version__
import pandoc_mustache
| 18.666667
| 32
| 0.875
| 7
| 56
| 6.285714
| 0.714286
| 0.590909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 56
| 2
| 33
| 28
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7911efa6a596e02ff81a8a1e7aa08e6a17b34751
| 721
|
py
|
Python
|
tests/validation/test_is_subnational1.py
|
StuartMacKay/ebird-api
|
14b5c777548416a58abec05e25cd4b9a8e22f210
|
[
"MIT"
] | 9
|
2020-05-16T20:26:33.000Z
|
2021-11-02T06:24:46.000Z
|
tests/validation/test_is_subnational1.py
|
StuartMacKay/ebird-api
|
14b5c777548416a58abec05e25cd4b9a8e22f210
|
[
"MIT"
] | 17
|
2019-06-22T09:41:22.000Z
|
2020-09-11T06:25:21.000Z
|
tests/validation/test_is_subnational1.py
|
ProjectBabbler/ebird-api
|
14b5c777548416a58abec05e25cd4b9a8e22f210
|
[
"MIT"
] | null | null | null |
import unittest
from ebird.api.validation import is_subnational1
class IsSubnational1Tests(unittest.TestCase):
"""Tests for the is_subnational1 validation function."""
def test_is_subnational1(self):
self.assertTrue(is_subnational1("US-NV"))
def test_invalid_code_is_not_subnational1(self):
self.assertFalse(is_subnational1("U"))
self.assertFalse(is_subnational1("US-"))
def test_country_is_not_subnational1(self):
self.assertFalse(is_subnational1("US"))
def test_subnational2_is_not_subnational1(self):
self.assertFalse(is_subnational1("US-NV-VMT"))
def test_location_is_not_subnational1(self):
self.assertFalse(is_subnational1("L123456"))
| 30.041667
| 60
| 0.744799
| 87
| 721
| 5.862069
| 0.356322
| 0.247059
| 0.196078
| 0.284314
| 0.488235
| 0.488235
| 0.488235
| 0.4
| 0.203922
| 0
| 0
| 0.03437
| 0.152566
| 721
| 23
| 61
| 31.347826
| 0.800327
| 0.069348
| 0
| 0
| 0
| 0
| 0.040602
| 0
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0.357143
| false
| 0
| 0.142857
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
f70e1eec634ed0c89cd786687c6b726187e816d5
| 11,426
|
py
|
Python
|
src/train.py
|
Gordonbuck/ml-oov-we
|
ce28cd8b556a16125ba36cd41781a3e60bb26422
|
[
"MIT"
] | null | null | null |
src/train.py
|
Gordonbuck/ml-oov-we
|
ce28cd8b556a16125ba36cd41781a3e60bb26422
|
[
"MIT"
] | null | null | null |
src/train.py
|
Gordonbuck/ml-oov-we
|
ce28cd8b556a16125ba36cd41781a3e60bb26422
|
[
"MIT"
] | null | null | null |
import higher
from leap import Leap
import numpy as np
import os
import torch
import torch.nn as nn
import gc
def train(model, source_corpus, char2idx, args, device):
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr_init)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=args.lr_decay, patience=args.patience,
threshold=args.threshold)
best_valid_cosine = 1
for epoch in np.arange(args.n_epochs):
valid_cosine = []
valid_ce = []
model.train()
for batch in np.arange(args.n_batch):
train_contexts, train_targets, train_vocabs, train_inds = source_corpus.get_batch(args.batch_size,
args.n_shot,
char2idx, device,
fixed=args.fixed_shot,
return_inds=True)
optimizer.zero_grad()
if args.lang_model:
pred_emb, pred_ind = model.forward(train_contexts, train_vocabs, lang_model=args.lang_model)
loss = nn.functional.cross_entropy(pred_ind, train_inds)
loss += -nn.functional.cosine_similarity(pred_emb, train_targets).mean()
else:
pred_emb = model.forward(train_contexts, train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, train_targets).mean()
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
for batch in np.arange(args.n_batch):
valid_contexts, valid_targets, valid_vocabs, valid_inds = source_corpus.get_batch(args.batch_size,
args.n_shot,
char2idx, device,
use_valid=True,
fixed=args.fixed_shot,
return_inds=True)
if args.lang_model:
pred_emb, pred_ind = model.forward(valid_contexts, valid_vocabs, lang_model=args.lang_model)
loss = nn.functional.cross_entropy(pred_ind, valid_inds).mean()
valid_ce += [loss.cpu().numpy()]
else:
pred_emb = model.forward(valid_contexts, valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, valid_targets).mean()
valid_cosine += [loss.cpu().numpy()]
avg_valid = np.average(valid_cosine)
lr_scheduler.step(avg_valid)
if args.lang_model:
avg_ce = np.average(valid_ce)
print(f"Average cosine loss: {avg_valid}; Average cross entropy loss: {avg_ce}")
else:
print(f"Average cosine loss: {avg_valid}")
if avg_valid < best_valid_cosine:
best_valid_cosine = avg_valid
torch.save(model.state_dict(), os.path.join(args.save_dir, 'model.pt'))
if optimizer.param_groups[0]['lr'] < args.lr_early_stop:
print('LR early stop')
break
def maml_adapt(model, source_corpus, target_corpus, char2idx, args, device, lang_model_n_words=0):
model = model.to(device)
meta_optimizer = torch.optim.Adam(model.parameters(), lr=args.maml_meta_lr_init)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(meta_optimizer, factor=args.lr_decay,
patience=args.patience, threshold=args.threshold)
best_score = 3
for meta_epoch in np.arange(args.n_meta_epochs):
gc.collect()
source_valid_cosine = []
target_valid_cosine = []
model.train()
with torch.backends.cudnn.flags(benchmark=True):
for meta_batch in np.arange(args.n_meta_batch):
inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.maml_inner_lr_init)
meta_optimizer.zero_grad()
with higher.innerloop_ctx(model, inner_optimizer, copy_initial_weights=False) as (fmodel, diffopt):
for inner_batch in np.arange(args.n_inner_batch):
source_train_contexts, source_train_targets, source_train_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot)
pred_emb = fmodel.forward(source_train_contexts, source_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_train_targets).mean()
diffopt.step(loss)
target_train_contexts, target_train_targets, target_train_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = fmodel.forward(target_train_contexts, target_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_train_targets).mean()
loss.backward()
meta_optimizer.step()
model.eval()
with torch.no_grad():
for batch in np.arange(args.n_batch):
source_valid_contexts, source_valid_targets, source_valid_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot)
pred_emb = model.forward(source_valid_contexts, source_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_valid_targets).mean()
source_valid_cosine += [loss.cpu().numpy()]
target_valid_contexts, target_valid_targets, target_valid_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = model.forward(target_valid_contexts, target_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_valid_targets).mean()
target_valid_cosine += [loss.cpu().numpy()]
avg_source_valid, avg_target_valid = np.average(source_valid_cosine), np.average(target_valid_cosine)
score = avg_target_valid
lr_scheduler.step(score)
print(f"Average source cosine loss: {avg_source_valid}; Average target cosine loss: {avg_target_valid}")
if score < best_score:
best_score = score
torch.save(model.state_dict(), os.path.join(args.save_dir, 'maml_model.pt'))
if meta_optimizer.param_groups[0]['lr'] < args.maml_lr_early_stop:
print('LR early stop')
break
def leap_adapt(model, source_corpus, target_corpus, char2idx, args, device, lang_model_n_words=0):
model = model.to(device)
leap = Leap(model)
meta_optimizer = torch.optim.Adam(leap.parameters(), lr=args.leap_meta_lr_init)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(meta_optimizer, factor=args.lr_decay,
patience=args.patience, threshold=args.threshold)
best_score = 3
for meta_epoch in np.arange(args.n_meta_epochs):
source_valid_cosine = []
target_valid_cosine = []
model.train()
for meta_batch in np.arange(args.n_meta_batch):
meta_optimizer.zero_grad()
leap.init_task()
leap.to(model)
inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.leap_inner_lr_init)
for inner_batch in np.arange(args.n_task_steps):
inner_optimizer.zero_grad()
source_train_contexts, source_train_targets, source_train_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot)
pred_emb = model.forward(source_train_contexts, source_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_train_targets).mean()
loss.backward()
leap.update(loss, model)
inner_optimizer.step()
leap.init_task()
leap.to(model)
inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.leap_inner_lr_init)
for inner_batch in np.arange(args.n_task_steps):
inner_optimizer.zero_grad()
target_train_contexts, target_train_targets, target_train_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = model.forward(target_train_contexts, target_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_train_targets).mean()
loss.backward()
leap.update(loss, model)
inner_optimizer.step()
leap.normalize()
meta_optimizer.step()
leap.to(model)
model.eval()
with torch.no_grad():
for batch in np.arange(args.n_batch):
source_valid_contexts, source_valid_targets, source_valid_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot)
pred_emb = model.forward(source_valid_contexts, source_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_valid_targets).mean()
source_valid_cosine += [loss.cpu().numpy()]
target_valid_contexts, target_valid_targets, target_valid_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = model.forward(target_valid_contexts, target_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_valid_targets).mean()
target_valid_cosine += [loss.cpu().numpy()]
avg_source_valid, avg_target_valid = np.average(source_valid_cosine), np.average(target_valid_cosine)
score = avg_target_valid
lr_scheduler.step(score)
print(f"Average source cosine loss: {avg_source_valid}; Average target cosine loss: {avg_target_valid}")
if score < best_score:
best_score = score
torch.save(model.state_dict(), os.path.join(args.save_dir, 'leap_model.pt'))
if meta_optimizer.param_groups[0]['lr'] < args.leap_lr_early_stop:
print('LR early stop')
break
| 52.412844
| 120
| 0.591896
| 1,304
| 11,426
| 4.858129
| 0.092025
| 0.041673
| 0.032833
| 0.026519
| 0.86614
| 0.85588
| 0.830781
| 0.816575
| 0.781847
| 0.735754
| 0
| 0.002718
| 0.323823
| 11,426
| 217
| 121
| 52.654378
| 0.81724
| 0
| 0
| 0.668508
| 0
| 0
| 0.032295
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016575
| false
| 0
| 0.038674
| 0
| 0.055249
| 0.038674
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f72ddd7241194452b55a3968e1f8f4807cdc48eb
| 1,166
|
py
|
Python
|
pact/test/test_constants.py
|
dwang7/pact-python
|
da03551e812508652e062fc4ba6071f1119e5bf2
|
[
"MIT"
] | null | null | null |
pact/test/test_constants.py
|
dwang7/pact-python
|
da03551e812508652e062fc4ba6071f1119e5bf2
|
[
"MIT"
] | null | null | null |
pact/test/test_constants.py
|
dwang7/pact-python
|
da03551e812508652e062fc4ba6071f1119e5bf2
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from mock import patch
from .. import constants
class mock_service_exeTestCase(TestCase):
def setUp(self):
super(mock_service_exeTestCase, self).setUp()
self.addCleanup(patch.stopall)
self.mock_os = patch.object(constants, 'os', autospec=True).start()
def test_other(self):
self.mock_os.name = 'posix'
self.assertEqual(constants.mock_service_exe(), 'pact-mock-service')
def test_windows(self):
self.mock_os.name = 'nt'
self.assertEqual(constants.mock_service_exe(), 'pact-mock-service.bat')
class provider_verifier_exeTestCase(TestCase):
def setUp(self):
super(provider_verifier_exeTestCase, self).setUp()
self.addCleanup(patch.stopall)
self.mock_os = patch.object(constants, 'os', autospec=True).start()
def test_other(self):
self.mock_os.name = 'posix'
self.assertEqual(
constants.provider_verifier_exe(), 'pact-provider-verifier')
def test_windows(self):
self.mock_os.name = 'nt'
self.assertEqual(
constants.provider_verifier_exe(), 'pact-provider-verifier.bat')
| 30.684211
| 79
| 0.679245
| 141
| 1,166
| 5.432624
| 0.234043
| 0.086162
| 0.078329
| 0.073107
| 0.825065
| 0.825065
| 0.731071
| 0.731071
| 0.731071
| 0.553525
| 0
| 0
| 0.202401
| 1,166
| 37
| 80
| 31.513514
| 0.823656
| 0
| 0
| 0.592593
| 0
| 0
| 0.089194
| 0.059177
| 0
| 0
| 0
| 0
| 0.148148
| 1
| 0.222222
| false
| 0
| 0.111111
| 0
| 0.407407
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f740e9188e23989d7d8cb429eceb0134b86a65bd
| 194
|
py
|
Python
|
hallucinate/api.py
|
SySS-Research/hallucinate
|
f6dbeea0599e232707e6cf27c3fe592edba92f6f
|
[
"MIT"
] | 199
|
2021-07-27T13:47:14.000Z
|
2022-03-05T09:18:56.000Z
|
hallucinate/api.py
|
avineshwar/hallucinate
|
f6dbeea0599e232707e6cf27c3fe592edba92f6f
|
[
"MIT"
] | 1
|
2021-12-08T19:32:29.000Z
|
2021-12-08T19:32:29.000Z
|
hallucinate/api.py
|
avineshwar/hallucinate
|
f6dbeea0599e232707e6cf27c3fe592edba92f6f
|
[
"MIT"
] | 13
|
2021-07-27T18:55:03.000Z
|
2021-08-09T06:15:35.000Z
|
class BaseHandler:
def send(self, data, p):
pass
def recv(self, data, p):
pass
def shutdown(self, p, direction=2):
pass
def close(self):
pass
| 13.857143
| 39
| 0.525773
| 25
| 194
| 4.08
| 0.52
| 0.205882
| 0.176471
| 0.254902
| 0.313725
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008197
| 0.371134
| 194
| 13
| 40
| 14.923077
| 0.827869
| 0
| 0
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0.444444
| 0
| 0
| 0.555556
| 0
| 1
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
f783069506127a9b55df9ae0fb7a072477dcbc3b
| 32
|
py
|
Python
|
tests/unit/cli/test_repo.py
|
tehlingchu/anchore-cli
|
b0df36337f443749991a49263227c1d40989debb
|
[
"Apache-2.0"
] | 110
|
2017-09-14T02:15:15.000Z
|
2022-03-30T20:14:21.000Z
|
tests/unit/cli/test_repo.py
|
tehlingchu/anchore-cli
|
b0df36337f443749991a49263227c1d40989debb
|
[
"Apache-2.0"
] | 115
|
2017-09-22T12:15:30.000Z
|
2022-01-17T12:31:21.000Z
|
tests/unit/cli/test_repo.py
|
tehlingchu/anchore-cli
|
b0df36337f443749991a49263227c1d40989debb
|
[
"Apache-2.0"
] | 56
|
2017-09-22T11:26:25.000Z
|
2022-03-03T14:14:58.000Z
|
from anchorecli.cli import repo
| 16
| 31
| 0.84375
| 5
| 32
| 5.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5421bfc32b86a8ee54dfb925ef8eac6e4d16b3b0
| 212
|
py
|
Python
|
pycache/__init__.py
|
HuiiBuh/pycache
|
300bd51f9e575fd77014d6c86497dd58f313f752
|
[
"MIT"
] | 1
|
2021-09-04T05:34:26.000Z
|
2021-09-04T05:34:26.000Z
|
pycache/__init__.py
|
HuiiBuh/pycache
|
300bd51f9e575fd77014d6c86497dd58f313f752
|
[
"MIT"
] | 1
|
2021-03-14T19:26:01.000Z
|
2021-03-16T18:46:38.000Z
|
pycache/__init__.py
|
HuiiBuh/pycache
|
300bd51f9e575fd77014d6c86497dd58f313f752
|
[
"MIT"
] | null | null | null |
__version__ = '0.3.2'
# noinspection PyUnresolvedReferences
from ._cache._cache import cache
# noinspection PyUnresolvedReferences
from ._scheduler._scheduler import add_schedule, schedule, ScheduleSubscription
| 30.285714
| 79
| 0.84434
| 21
| 212
| 8.095238
| 0.619048
| 0.4
| 0.447059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015625
| 0.09434
| 212
| 6
| 80
| 35.333333
| 0.869792
| 0.334906
| 0
| 0
| 0
| 0
| 0.036232
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
583a1302a3f7562a97c1476d70bc500c24d60c4f
| 174
|
py
|
Python
|
glanceclient/common/exceptions.py
|
citrix-openstack-build/python-glanceclient
|
32d9c42816b608220ae5692e573142dab6534604
|
[
"Apache-2.0"
] | 1
|
2019-09-11T11:56:19.000Z
|
2019-09-11T11:56:19.000Z
|
tools/dockerize/webportal/usr/lib/python2.7/site-packages/glanceclient/common/exceptions.py
|
foruy/openflow-multiopenstack
|
74140b041ac25ed83898ff3998e8dcbed35572bb
|
[
"Apache-2.0"
] | null | null | null |
tools/dockerize/webportal/usr/lib/python2.7/site-packages/glanceclient/common/exceptions.py
|
foruy/openflow-multiopenstack
|
74140b041ac25ed83898ff3998e8dcbed35572bb
|
[
"Apache-2.0"
] | null | null | null |
# This is here for compatability purposes. Once all known OpenStack clients
# are updated to use glanceclient.exc, this file should be removed
from glanceclient.exc import *
| 43.5
| 75
| 0.804598
| 26
| 174
| 5.384615
| 0.884615
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155172
| 174
| 3
| 76
| 58
| 0.952381
| 0.793103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
587f22d6d391706fced03d26fcfcf342a5722cf3
| 1,394
|
py
|
Python
|
deepmedic_config.py
|
farrokhkarimi/deepmedic_project
|
b0c916171673ce3259d2458146f2db941f0bf270
|
[
"MIT"
] | 2
|
2021-07-15T18:40:18.000Z
|
2021-08-03T17:10:12.000Z
|
deepmedic_config.py
|
farrokhkarimi/deepmedic_project
|
b0c916171673ce3259d2458146f2db941f0bf270
|
[
"MIT"
] | null | null | null |
deepmedic_config.py
|
farrokhkarimi/deepmedic_project
|
b0c916171673ce3259d2458146f2db941f0bf270
|
[
"MIT"
] | 1
|
2022-01-17T12:11:51.000Z
|
2022-01-17T12:11:51.000Z
|
import os
def deepmedic_config(config_files_path, niftis_path, test_flair_file_name, test_t1c_file_name, mask, prediction_file_name, output_path):
with open(os.path.join(config_files_path, 'model', 'modelConfig.cfg'), 'r') as f:
lines = f.readlines()
lines[8] = 'folderForOutput = "%s"\n' % output_path
with open(os.path.join(config_files_path, 'model', 'modelConfig.cfg'), 'w') as f:
f.writelines(lines)
with open(os.path.join(config_files_path, 'test', 'testConfig.cfg'), 'r') as f:
lines = f.readlines()
lines[8] = 'folderForOutput = "%s"\n' % output_path
with open(os.path.join(config_files_path, 'test', 'testConfig.cfg'), 'w') as f:
f.writelines(lines)
with open(os.path.join(config_files_path, 'test', 'testChannels_flair.cfg'), 'w') as f:
f.write(os.path.join(niftis_path, test_flair_file_name))
with open (os.path.join(config_files_path, 'test', 'testChannels_t1c.cfg'), 'w') as f:
f.write(os.path.join(niftis_path, test_t1c_file_name))
with open(os.path.join(config_files_path, 'test', 'testRoiMasks.cfg'), 'w') as f:
f.write(os.path.join(niftis_path, mask))
with open(os.path.join(config_files_path, 'test' 'testNamesOfPredictions.cfg'), 'w') as f:
f.write(prediction_file_name)
| 48.068966
| 137
| 0.636298
| 199
| 1,394
| 4.236181
| 0.190955
| 0.078292
| 0.130486
| 0.132859
| 0.827995
| 0.827995
| 0.774614
| 0.774614
| 0.774614
| 0.730724
| 0
| 0.00457
| 0.215208
| 1,394
| 29
| 138
| 48.068966
| 0.765996
| 0
| 0
| 0.3
| 0
| 0
| 0.169715
| 0.035113
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5439f19ce894429f825edd092b433b960bae49d4
| 9,411
|
py
|
Python
|
src/peering/azext_peering/custom.py
|
michimune/azure-cli-extensions
|
697e2c674e5c0825d44c72d714542fe01331e107
|
[
"MIT"
] | 1
|
2022-03-22T15:02:32.000Z
|
2022-03-22T15:02:32.000Z
|
src/peering/azext_peering/custom.py
|
michimune/azure-cli-extensions
|
697e2c674e5c0825d44c72d714542fe01331e107
|
[
"MIT"
] | 1
|
2021-02-10T22:04:59.000Z
|
2021-02-10T22:04:59.000Z
|
src/peering/azext_peering/custom.py
|
michimune/azure-cli-extensions
|
697e2c674e5c0825d44c72d714542fe01331e107
|
[
"MIT"
] | 1
|
2021-06-03T19:31:10.000Z
|
2021-06-03T19:31:10.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-statements
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=unused-argument
import json
def list_peering_legacy(cmd, client,
peering_location=None,
kind=None):
return client.list(peering_location=peering_location, kind=kind)
def create_peering_asn(cmd, client,
name,
peer_asn=None,
emails=None,
phone=None,
peer_name=None,
validation_state=None):
body = {}
body['peer_asn'] = peer_asn # number
body.setdefault('peer_contact_info', {})['emails'] = None if emails is None else emails.split(',')
body.setdefault('peer_contact_info', {})['phone'] = None if phone is None else phone.split(',')
body['peer_name'] = peer_name # str
body['validation_state'] = validation_state # str
return client.create_or_update(peer_asn_name=name, peer_asn=body)
def update_peering_asn(cmd, client,
name,
peer_asn=None,
emails=None,
phone=None,
peer_name=None,
validation_state=None):
body = client.get(peer_asn_name=name).as_dict()
body.peer_asn = peer_asn # number
body.peer_contact_info.emails = None if emails is None else emails.split(',')
body.peer_contact_info.phone = None if phone is None else phone.split(',')
body.peer_name = peer_name # str
body.validation_state = validation_state # str
return client.create_or_update(peer_asn_name=name, peer_asn=body)
def delete_peering_asn(cmd, client,
name):
return client.delete(peer_asn_name=name)
def list_peering_asn(cmd, client):
return client.list_by_subscription()
def list_peering_location(cmd, client,
kind=None,
direct_peering_type=None):
return client.list(kind=kind, direct_peering_type=direct_peering_type)
def create_peering(cmd, client,
resource_group,
name,
kind,
location,
sku_name=None,
sku_tier=None,
sku_family=None,
sku_size=None,
direct_connections=None,
direct_peer_asn=None,
direct_direct_peering_type=None,
exchange_connections=None,
exchange_peer_asn=None,
peering_location=None,
tags=None):
body = {}
body.setdefault('sku', {})['name'] = sku_name # str
body.setdefault('sku', {})['tier'] = sku_tier # str
body.setdefault('sku', {})['family'] = sku_family # str
body.setdefault('sku', {})['size'] = sku_size # str
body['kind'] = kind # str
body.setdefault('direct', {})['connections'] = json.loads(direct_connections) if isinstance(direct_connections, str) else direct_connections
body.setdefault('direct', {}).setdefault('peer_asn', {})['id'] = direct_peer_asn
body.setdefault('direct', {})['direct_peering_type'] = direct_direct_peering_type # str
# body.setdefault('exchange', {})['connections'] = json.loads(exchange_connections) if isinstance(exchange_connections, str) else exchange_connections
# body.setdefault('exchange', {}).setdefault('peer_asn', {})['id'] = exchange_peer_asn
body['peering_location'] = peering_location # str
body['location'] = location # str
body['tags'] = tags # dictionary
return client.create_or_update(resource_group_name=resource_group, peering_name=name, peering=body)
def update_peering(cmd, client,
resource_group,
name,
sku_name=None,
sku_tier=None,
sku_family=None,
sku_size=None,
kind=None,
direct_connections=None,
direct_peer_asn=None,
direct_direct_peering_type=None,
exchange_connections=None,
exchange_peer_asn=None,
peering_location=None,
location=None,
tags=None):
body = client.get(resource_group_name=resource_group, peering_name=name).as_dict()
body.sku.name = sku_name # str
body.sku.tier = sku_tier # str
body.sku.family = sku_family # str
body.sku.size = sku_size # str
body.kind = kind # str
body.direct.connections = json.loads(direct_connections) if isinstance(direct_connections, str) else direct_connections
body.direct.peer_asn = direct_peer_asn
body.direct.direct_peering_type = direct_direct_peering_type # str
body.exchange.connections = json.loads(exchange_connections) if isinstance(exchange_connections, str) else exchange_connections
body.exchange.peer_asn = exchange_peer_asn
body.peering_location = peering_location # str
body.location = location # str
body.tags = tags # dictionary
return client.create_or_update(resource_group_name=resource_group, peering_name=name, peering=body)
def delete_peering(cmd, client,
resource_group,
name):
return client.delete(resource_group_name=resource_group, peering_name=name)
def list_peering(cmd, client,
resource_group):
if resource_group is not None:
return client.list_by_resource_group(resource_group_name=resource_group)
return client.list_by_subscription()
def list_peering_service_location(cmd, client):
return client.list()
def create_peering_service_prefix(cmd, client,
resource_group,
peering_service_name,
name,
prefix=None):
return client.create_or_update(resource_group_name=resource_group, peering_service_name=peering_service_name, prefix_name=name, prefix=prefix)
def update_peering_service_prefix(cmd, client,
resource_group,
peering_service_name,
name,
prefix=None):
return client.create_or_update(resource_group_name=resource_group, peering_service_name=peering_service_name, prefix_name=name, prefix=prefix)
def delete_peering_service_prefix(cmd, client,
resource_group,
peering_service_name,
name):
return client.delete(resource_group_name=resource_group, peering_service_name=peering_service_name, prefix_name=name)
def list_peering_service_prefix(cmd, client,
resource_group,
peering_service_name):
return client.list_by_peering_service(resource_group_name=resource_group, peering_service_name=peering_service_name)
def list_peering_service_provider(cmd, client):
return client.list()
def create_peering_service(cmd, client,
resource_group,
name,
location,
peering_service_location=None,
peering_service_provider=None,
tags=None):
body = {}
body['peering_service_location'] = peering_service_location # str
body['peering_service_provider'] = peering_service_provider # str
body['location'] = location # str
body['tags'] = tags # dictionary
return client.create_or_update(resource_group_name=resource_group, peering_service_name=name, peering_service=body)
def update_peering_service(cmd, client,
resource_group,
name,
peering_service_location=None,
peering_service_provider=None,
location=None,
tags=None):
body = client.get(resource_group_name=resource_group, peering_service_name=name).as_dict()
body.peering_service_location = peering_service_location # str
body.peering_service_provider = peering_service_provider # str
body.location = location # str
body.tags = tags # dictionary
return client.create_or_update(resource_group_name=resource_group, peering_service_name=name, peering_service=body)
def delete_peering_service(cmd, client,
resource_group,
name):
return client.delete(resource_group_name=resource_group, peering_service_name=name)
def list_peering_service(cmd, client,
resource_group):
if resource_group is not None:
return client.list_by_resource_group(resource_group_name=resource_group)
return client.list_by_subscription()
| 42.013393
| 154
| 0.604824
| 1,011
| 9,411
| 5.317507
| 0.086053
| 0.106399
| 0.063244
| 0.065104
| 0.837798
| 0.806734
| 0.763021
| 0.737723
| 0.705357
| 0.68657
| 0
| 0
| 0.294974
| 9,411
| 223
| 155
| 42.201794
| 0.810249
| 0.094889
| 0
| 0.596491
| 0
| 0
| 0.030918
| 0.005664
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116959
| false
| 0
| 0.005848
| 0.070175
| 0.251462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
543c4f51f177e890cbcf4f4101beb26f2ee15486
| 81
|
py
|
Python
|
tests/integration/testdata/buildcmd/PyLayerMake/layer.py
|
renanmontebelo/aws-sam-cli
|
b5cfc46aa9726b5cd006df8ecc08d1b4eedeb9ea
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 2,959
|
2018-05-08T21:48:56.000Z
|
2020-08-24T14:35:39.000Z
|
tests/integration/testdata/buildcmd/PyLayerMake/layer.py
|
renanmontebelo/aws-sam-cli
|
b5cfc46aa9726b5cd006df8ecc08d1b4eedeb9ea
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1,469
|
2018-05-08T22:44:28.000Z
|
2020-08-24T20:19:24.000Z
|
tests/integration/testdata/buildcmd/PyLayerMake/layer.py
|
renanmontebelo/aws-sam-cli
|
b5cfc46aa9726b5cd006df8ecc08d1b4eedeb9ea
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 642
|
2018-05-08T22:09:19.000Z
|
2020-08-17T09:04:37.000Z
|
import numpy
def layer_method():
return {"pi": "{0:.2f}".format(numpy.pi)}
| 13.5
| 45
| 0.617284
| 12
| 81
| 4.083333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029412
| 0.160494
| 81
| 5
| 46
| 16.2
| 0.691176
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
|
0
| 6
|
54562608a59ce9476a71d70e032f5d5bf8f6d75b
| 138
|
py
|
Python
|
datx/base_station.py
|
ipipdotnet/datx-python
|
68d6e99363abc6ae48714be38aa90a5ae6e20fd4
|
[
"Apache-2.0"
] | 39
|
2018-03-13T02:48:36.000Z
|
2021-03-18T07:51:54.000Z
|
datx/base_station.py
|
ipipdotnet/datx-python
|
68d6e99363abc6ae48714be38aa90a5ae6e20fd4
|
[
"Apache-2.0"
] | 1
|
2018-11-06T08:30:31.000Z
|
2018-11-06T08:30:31.000Z
|
datx/base_station.py
|
ipipdotnet/datx-python
|
68d6e99363abc6ae48714be38aa90a5ae6e20fd4
|
[
"Apache-2.0"
] | 10
|
2018-04-28T02:07:08.000Z
|
2020-11-09T04:26:47.000Z
|
# -*- coding: utf-8 -*-
"""
:copyright: ©2018 by IPIP.net
"""
from .district import District
class BaseStation(District):
pass
| 15.333333
| 33
| 0.623188
| 17
| 138
| 5.117647
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045872
| 0.210145
| 138
| 9
| 34
| 15.333333
| 0.743119
| 0.376812
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
5468c394ce1fe6e2cc2dd6fce2fd7d4c6e567c44
| 3,494
|
py
|
Python
|
bem/teq_planet.py
|
DanielAndreasen/bem
|
c4cca79322f08b5e9a3f3d39749c11d9f6296aae
|
[
"MIT"
] | null | null | null |
bem/teq_planet.py
|
DanielAndreasen/bem
|
c4cca79322f08b5e9a3f3d39749c11d9f6296aae
|
[
"MIT"
] | null | null | null |
bem/teq_planet.py
|
DanielAndreasen/bem
|
c4cca79322f08b5e9a3f3d39749c11d9f6296aae
|
[
"MIT"
] | null | null | null |
import numpy as np
from uncertainties import umath as um
def getTeqpl(Teffst, aR, ecc, A=0, f=1/4.):
"""Return the planet equilibrium temperature.
Relation adapted from equation 4 page 4 in http://www.mpia.de/homes/ppvi/chapter/madhusudhan.pdf
and https://en.wikipedia.org/wiki/Stefan%E2%80%93Boltzmann_law
and later updated to include the effect of excentricity on the average stellar planet distance
according to equation 5 p 25 of Laughlin & Lissauer 2015arXiv150105685L (1501.05685)
Plus Exoplanet atmospheres, physical processes, Sara Seager, p30 eq 3.9 for f contribution.
:param float/np.ndarray Teffst: Effective temperature of the star
:param float/np.ndarray aR: Ration of the planetary orbital semi-major axis over the stellar
radius (without unit)
:param float/np.ndarray A: Bond albedo (should be between 0 and 1)
:param float/np.ndarray f: Redistribution factor. If 1/4 the energy is uniformly redistributed
over the planetary surface. If f = 2/3, no redistribution at all, the atmosphere immediately
reradiate whithout advection.
:return float/np.ndarray Teqpl: Equilibrium temperature of the planet
"""
return Teffst * (f * (1 - A))**(1 / 4.) * np.sqrt(1 / aR) / (1 - ecc**2)**(1/8.)
def getTeqpl_error(Teffst, aR, ecc, A=0, f=1/4.):
"""Return the planet equilibrium temperature.
Relation adapted from equation 4 page 4 in http://www.mpia.de/homes/ppvi/chapter/madhusudhan.pdf
and https://en.wikipedia.org/wiki/Stefan%E2%80%93Boltzmann_law
and later updated to include the effect of excentricity on the average stellar planet distance
according to equation 5 p 25 of Laughlin & Lissauer 2015arXiv150105685L (1501.05685)
Plus Exoplanet atmospheres, physical processes, Sara Seager, p30 eq 3.9 for f contribution.
:param float/np.ndarray Teffst: Effective temperature of the star
:param float/np.ndarray aR: Ration of the planetary orbital semi-major axis over the stellar
radius (without unit)
:param float/np.ndarray A: Bond albedo (should be between 0 and 1)
:param float/np.ndarray f: Redistribution factor. If 1/4 the energy is uniformly redistributed
over the planetary surface. If f = 2/3, no redistribution at all, the atmosphere immediately
reradiate whithout advection.
:return float/np.ndarray Teqpl: Equilibrium temperature of the planet
"""
return Teffst * (f * (1 - A))**(1 / 4.) * um.sqrt(1 / aR) / (1 - ecc**2)**(1/8.)
def getHtidal(Ms, Rp, a, e):
# a -- in AU, semi major axis
# Teq -- in Kelvins, planetary equilibrium temperature
# M -- in Jupiter masses, planetary mass
# Z -- [Fe/H], stellar metallicity
# Rp -- radius planet
# Ms -- stellar mass
# e -- eccentricity
# G -- gravitational constant
#
#
G = 6.67408 * 10**(-11) # m3 kg-1 s-2
# Equation from Enoch et al. 2012
# Q = 10**5 # Tidal dissipation factor for high mass planets ...?
# k = 0.51 # Love number
# H_tidal = (63/4) * ((G * Ms)**(3/2) * Ms * Rp**5 * a**(-15/2)*e**2) / ((3*Q) / (2*k))
# Equation from Jackson 2008
# Qp' = (3*Qp) / (2*k)
Qp = 500 # with Love number 0.3 for terrestrial planets
H_tidal = (63 / 16*np.pi) * (((G*Ms)**(3/2) * Ms * Rp**3) / (Qp)) * a**(-15/2) * e**2
return H_tidal
def safronov_nb(Mp, Ms, Rp, a):
# Ozturk 2018, Safronov 1972
return (Mp/Ms) * (a/Rp)
| 48.527778
| 100
| 0.660561
| 529
| 3,494
| 4.349716
| 0.334594
| 0.030422
| 0.060843
| 0.066058
| 0.767492
| 0.762277
| 0.754455
| 0.754455
| 0.754455
| 0.739678
| 0
| 0.06338
| 0.227819
| 3,494
| 71
| 101
| 49.211268
| 0.789474
| 0.763022
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| false
| 0
| 0.153846
| 0.076923
| 0.769231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5480e17b073b3d2de7a418823c0645c307bf4d95
| 183
|
py
|
Python
|
reward/utils/device.py
|
lgvaz/torchrl
|
cfff8acaf70d1fec72169162b95ab5ad3547d17a
|
[
"MIT"
] | 5
|
2018-06-21T14:33:40.000Z
|
2018-08-18T02:26:03.000Z
|
reward/utils/device.py
|
lgvaz/reward
|
cfff8acaf70d1fec72169162b95ab5ad3547d17a
|
[
"MIT"
] | null | null | null |
reward/utils/device.py
|
lgvaz/reward
|
cfff8acaf70d1fec72169162b95ab5ad3547d17a
|
[
"MIT"
] | 2
|
2018-05-08T03:34:49.000Z
|
2018-06-22T15:04:17.000Z
|
import torch
CONFIG = {"device": torch.device("cuda" if torch.cuda.is_available() else "cpu")}
def get(): return CONFIG["device"]
def set_device(device): CONFIG["device"] = device
| 22.875
| 81
| 0.704918
| 26
| 183
| 4.884615
| 0.538462
| 0.283465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120219
| 183
| 7
| 82
| 26.142857
| 0.78882
| 0
| 0
| 0
| 0
| 0
| 0.136612
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
54b976c7100ab785c654b0c7ca7597f8b6235530
| 2,979
|
py
|
Python
|
tests/integration/test_labels.py
|
spmistry/crux-python
|
15a6b705d1eec7e789f6f62819429f93e02349c1
|
[
"MIT"
] | null | null | null |
tests/integration/test_labels.py
|
spmistry/crux-python
|
15a6b705d1eec7e789f6f62819429f93e02349c1
|
[
"MIT"
] | null | null | null |
tests/integration/test_labels.py
|
spmistry/crux-python
|
15a6b705d1eec7e789f6f62819429f93e02349c1
|
[
"MIT"
] | null | null | null |
import pytest
@pytest.mark.usefixtures("dataset", "helpers")
def test_add_get_label(dataset, helpers):
file_1 = dataset.create_file(
path="/test_file_" + helpers.generate_random_string(16) + ".csv"
)
label_result = file_1.add_label("label1", "value1")
assert label_result is True
assert file_1.labels.get("label1") == "value1"
@pytest.mark.usefixtures("dataset", "helpers")
def test_add_labels_set_labels(dataset, helpers):
file_1 = dataset.create_file(
path="/test_file_" + helpers.generate_random_string(16) + ".csv"
)
labels = {"label1": "value1", "label2": "value2"}
labels_result = file_1.add_labels(labels)
assert labels_result is True
assert file_1.labels == labels
# Negative Test case which verifies label search by searching unset labels without pagination.
@pytest.mark.usefixtures("dataset", "helpers")
def test_search_label(dataset, helpers):
file_1 = dataset.create_file(
path="/test_file_" + helpers.generate_random_string(16) + ".csv"
)
file_2 = dataset.create_file(
path="/test_file_" + helpers.generate_random_string(16) + ".csv"
)
label_result_1 = file_1.add_label("label1", "value1")
label_result_2 = file_2.add_label("label1", "value1")
assert label_result_1 is True
assert label_result_2 is True
predicates = [{"op": "eq", "key": "label4", "val": "value4"}]
resources = dataset.find_resources_by_label(predicates=predicates)
resource_ids = [resource.id for resource in resources]
assert len(resource_ids) == 0
# Negative Test case which verifies label search by searching unset labels with pagination.
@pytest.mark.usefixtures("dataset", "helpers")
def test_search_label_page(dataset, helpers):
file_1 = dataset.create_file(
path="/test_file_" + helpers.generate_random_string(16) + ".csv"
)
file_2 = dataset.create_file(
path="/test_file_" + helpers.generate_random_string(16) + ".csv"
)
label_result_1 = file_1.add_label("label2", "value2")
label_result_2 = file_2.add_label("label2", "value2")
assert label_result_1 is True
assert label_result_2 is True
predicates = [{"op": "eq", "key": "label3", "val": "value3"}]
resources = dataset.find_resources_by_label(predicates=predicates, max_per_page=1)
resource_ids = [resource.id for resource in resources]
assert len(resource_ids) == 0
@pytest.mark.usefixtures("dataset", "helpers")
def test_delete_label(dataset, helpers):
file_1 = dataset.create_file(
path="/test_file_" + helpers.generate_random_string(16) + ".csv"
)
file_2 = dataset.create_file(
path="/test_file_" + helpers.generate_random_string(16) + ".csv"
)
file_1.add_label("label1", "value1")
file_2.add_label("label1", "value1")
d1_result = file_1.delete_label(label_key="label1")
assert d1_result is True
d2_result = file_2.delete_label(label_key="label1")
assert d2_result is True
| 35.891566
| 94
| 0.700906
| 401
| 2,979
| 4.902743
| 0.164589
| 0.033062
| 0.069176
| 0.085453
| 0.865209
| 0.865209
| 0.801628
| 0.703459
| 0.600712
| 0.600712
| 0
| 0.031199
| 0.171534
| 2,979
| 82
| 95
| 36.329268
| 0.765397
| 0.061094
| 0
| 0.453125
| 1
| 0
| 0.130995
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 1
| 0.078125
| false
| 0
| 0.015625
| 0
| 0.09375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
49b5f0ea075bbb7b79a2d40b2e4b0bdffec0743f
| 12,388
|
py
|
Python
|
weasyl/test/web/test_site_updates.py
|
sl1-1/weasyl
|
d4f6bf3e33b85a2289a451d95d5b90ff24f5d539
|
[
"Apache-2.0"
] | 1
|
2019-02-15T04:21:48.000Z
|
2019-02-15T04:21:48.000Z
|
weasyl/test/web/test_site_updates.py
|
sl1-1/weasyl
|
d4f6bf3e33b85a2289a451d95d5b90ff24f5d539
|
[
"Apache-2.0"
] | 254
|
2017-12-23T19:36:43.000Z
|
2020-04-14T21:46:13.000Z
|
weasyl/test/web/test_site_updates.py
|
sl1-1/weasyl
|
d4f6bf3e33b85a2289a451d95d5b90ff24f5d539
|
[
"Apache-2.0"
] | 1
|
2017-12-23T18:42:16.000Z
|
2017-12-23T18:42:16.000Z
|
from __future__ import absolute_import, unicode_literals
import pytest
from libweasyl import staff
from libweasyl.legacy import UNIXTIME_OFFSET
from weasyl import errorcode
from weasyl import siteupdate
from weasyl.define import sessionmaker
from weasyl.test import db_utils
_FORM = {
u'title': u'Title',
u'content': u'Content',
}
@pytest.fixture(name='site_updates')
@pytest.mark.usefixtures('db')
def _site_updates():
user = db_utils.create_user(username='test_username')
updates = [
siteupdate.create(user, u'foo', u'content one'),
siteupdate.create(user, u'bar', u'content two'),
siteupdate.create(user, u'baz', u'content three'),
]
for update in updates:
sessionmaker().expunge(update)
return (user, updates)
@pytest.mark.usefixtures('db')
def test_select_last_empty(app):
assert siteupdate.select_last() is None
@pytest.mark.usefixtures('db')
def test_select_last(app, site_updates):
user, updates = site_updates
most_recent = updates[-1]
selected = siteupdate.select_last()
assert 'display_url' in selected.pop('user_media')['avatar'][0]
assert selected == {
'updateid': most_recent.updateid,
'userid': user,
'username': 'test_username',
'title': most_recent.title,
'content': most_recent.content,
'unixtime': most_recent.unixtime.timestamp + UNIXTIME_OFFSET,
'comment_count': 0,
}
@pytest.mark.usefixtures('db', 'cache')
def test_index_empty(app):
resp = app.get('/')
assert resp.html.find(id='home-content') is not None
assert resp.html.find(id='hc-update') is None
@pytest.mark.usefixtures('db', 'cache')
def test_index(app, site_updates):
_, updates = site_updates
resp = app.get('/')
update = resp.html.find(id='hc-update')
assert update is not None
assert update.h3.string == updates[-1].title
assert update.figure.img['alt'] == u'avatar of test_username'
@pytest.mark.usefixtures('db')
def test_list_empty(app):
resp = app.get('/site-updates/')
assert resp.html.find(None, 'content').p.string == u'No site updates to show.'
@pytest.mark.usefixtures('db')
def test_list(app, monkeypatch, site_updates):
_, updates = site_updates
resp = app.get('/site-updates/')
assert len(resp.html.findAll(None, 'text-post-item')) == 3
assert resp.html.find(None, 'text-post-actions') is None
assert len(resp.html.findAll(None, 'text-post-group-header')) == 1
user = db_utils.create_user()
cookie = db_utils.create_session(user)
monkeypatch.setattr(staff, 'ADMINS', frozenset([user]))
resp = app.get('/site-updates/', headers={'Cookie': cookie})
assert len(resp.html.findAll(None, 'text-post-item')) == 3
assert resp.html.find(None, 'text-post-actions').a['href'] == '/site-updates/%d/edit' % (updates[-1].updateid,)
@pytest.mark.usefixtures('db', 'no_csrf')
def test_create(app, monkeypatch):
user = db_utils.create_user()
cookie = db_utils.create_session(user)
monkeypatch.setattr(staff, 'ADMINS', frozenset([user]))
resp = app.post('/admincontrol/siteupdate', _FORM, headers={'Cookie': cookie}).follow()
assert resp.html.find(None, 'content').h3.string == _FORM['title']
@pytest.mark.usefixtures('db', 'no_csrf')
def test_create_strip(app, monkeypatch):
user = db_utils.create_user()
cookie = db_utils.create_session(user)
monkeypatch.setattr(staff, 'ADMINS', frozenset([user]))
resp = app.post(
'/admincontrol/siteupdate',
dict(_FORM, title=' test title \t '),
headers={'Cookie': cookie},
).follow()
assert resp.html.find(None, 'content').h3.string == u'test title'
@pytest.mark.usefixtures('db')
def test_create_csrf(app, monkeypatch):
user = db_utils.create_user()
cookie = db_utils.create_session(user)
monkeypatch.setattr(staff, 'ADMINS', frozenset([user]))
resp = app.post('/admincontrol/siteupdate', _FORM, headers={'Cookie': cookie}, status=403)
assert resp.html.find(id='error_content').p.string == errorcode.token
@pytest.mark.usefixtures('db')
def test_create_restricted(app, monkeypatch):
resp = app.get('/admincontrol/siteupdate')
assert resp.html.find(id='error_content').contents[0].strip() == errorcode.unsigned
resp = app.post('/admincontrol/siteupdate', _FORM)
assert resp.html.find(id='error_content').contents[0].strip() == errorcode.unsigned
user = db_utils.create_user()
cookie = db_utils.create_session(user)
resp = app.get('/admincontrol/siteupdate', headers={'Cookie': cookie})
assert resp.html.find(id='error_content').p.string == errorcode.permission
resp = app.post('/admincontrol/siteupdate', _FORM, headers={'Cookie': cookie})
assert resp.html.find(id='error_content').p.string == errorcode.permission
monkeypatch.setattr(staff, 'TECHNICAL', frozenset([user]))
monkeypatch.setattr(staff, 'MODS', frozenset([user]))
resp = app.get('/admincontrol/siteupdate', headers={'Cookie': cookie})
assert resp.html.find(id='error_content').p.string == errorcode.permission
resp = app.post('/admincontrol/siteupdate', _FORM, headers={'Cookie': cookie})
assert resp.html.find(id='error_content').p.string == errorcode.permission
monkeypatch.setattr(staff, 'ADMINS', frozenset([user]))
resp = app.get('/admincontrol/siteupdate', headers={'Cookie': cookie})
assert resp.html.find(id='error_content') is None
@pytest.mark.usefixtures('db', 'no_csrf')
def test_create_validation(app, monkeypatch):
user = db_utils.create_user()
cookie = db_utils.create_session(user)
monkeypatch.setattr(staff, 'ADMINS', frozenset([user]))
resp = app.post('/admincontrol/siteupdate', {'title': u'', 'content': u'Content'}, headers={'Cookie': cookie}, status=422)
assert resp.html.find(id='error_content').p.string == errorcode.error_messages['titleInvalid']
resp = app.post('/admincontrol/siteupdate', {'title': u'Title', 'content': u''}, headers={'Cookie': cookie}, status=422)
assert resp.html.find(id='error_content').p.string == errorcode.error_messages['contentInvalid']
@pytest.mark.usefixtures('db', 'no_csrf')
def test_create_notifications(app, monkeypatch):
admin_user = db_utils.create_user()
normal_user = db_utils.create_user()
admin_cookie = db_utils.create_session(admin_user)
monkeypatch.setattr(staff, 'ADMINS', frozenset([admin_user]))
resp = app.post('/admincontrol/siteupdate', _FORM, headers={'Cookie': admin_cookie}).follow()
assert resp.html.find(None, 'content').h3.string == _FORM['title']
normal_cookie = db_utils.create_session(normal_user)
resp = app.get('/messages/notifications', headers={'Cookie': normal_cookie})
assert list(resp.html.find(id='header-messages').find(title='Notifications').stripped_strings)[1] == '1'
assert resp.html.find(id='site_updates').find(None, 'item').a.string == _FORM['title']
@pytest.mark.usefixtures('db', 'no_csrf')
def test_edit(app, monkeypatch, site_updates):
_, updates = site_updates
user = db_utils.create_user()
cookie = db_utils.create_session(user)
monkeypatch.setattr(staff, 'ADMINS', frozenset([user]))
resp = app.post('/site-updates/%d' % (updates[-1].updateid,), _FORM, headers={'Cookie': cookie}).follow()
assert resp.html.find(None, 'content').h3.string == _FORM['title']
@pytest.mark.usefixtures('db', 'no_csrf')
def test_edit_strip(app, monkeypatch, site_updates):
_, updates = site_updates
user = db_utils.create_user()
cookie = db_utils.create_session(user)
monkeypatch.setattr(staff, 'ADMINS', frozenset([user]))
resp = app.post(
'/site-updates/%d' % (updates[-1].updateid,),
dict(_FORM, title=' test title \t '),
headers={'Cookie': cookie},
).follow()
assert resp.html.find(None, 'content').h3.string == u'test title'
@pytest.mark.usefixtures('db', 'no_csrf')
def test_edit_nonexistent(app, monkeypatch, site_updates):
_, updates = site_updates
user = db_utils.create_user()
cookie = db_utils.create_session(user)
monkeypatch.setattr(staff, 'ADMINS', frozenset([user]))
app.post('/site-updates/%d' % (updates[-1].updateid + 1,), _FORM, headers={'Cookie': cookie}, status=404)
@pytest.mark.usefixtures('db')
def test_edit_csrf(app, monkeypatch, site_updates):
_, updates = site_updates
user = db_utils.create_user()
cookie = db_utils.create_session(user)
monkeypatch.setattr(staff, 'ADMINS', frozenset([user]))
resp = app.post('/site-updates/%d' % (updates[-1].updateid,), _FORM, headers={'Cookie': cookie}, status=403)
assert resp.html.find(id='error_content').p.string == errorcode.token
@pytest.mark.usefixtures('db')
def test_edit_restricted(app, monkeypatch, site_updates):
_, updates = site_updates
resp = app.get('/site-updates/%d/edit' % (updates[-1].updateid,))
assert resp.html.find(id='error_content').contents[0].strip() == errorcode.unsigned
resp = app.post('/site-updates/%d' % (updates[-1].updateid,), _FORM)
assert resp.html.find(id='error_content').contents[0].strip() == errorcode.unsigned
user = db_utils.create_user()
cookie = db_utils.create_session(user)
resp = app.get('/site-updates/%d/edit' % (updates[-1].updateid,), headers={'Cookie': cookie})
assert resp.html.find(id='error_content').p.string == errorcode.permission
resp = app.post('/site-updates/%d' % (updates[-1].updateid,), _FORM, headers={'Cookie': cookie})
assert resp.html.find(id='error_content').p.string == errorcode.permission
monkeypatch.setattr(staff, 'TECHNICAL', frozenset([user]))
monkeypatch.setattr(staff, 'MODS', frozenset([user]))
resp = app.get('/site-updates/%d/edit' % (updates[-1].updateid,), headers={'Cookie': cookie})
assert resp.html.find(id='error_content').p.string == errorcode.permission
resp = app.post('/site-updates/%d' % (updates[-1].updateid,), _FORM, headers={'Cookie': cookie})
assert resp.html.find(id='error_content').p.string == errorcode.permission
monkeypatch.setattr(staff, 'ADMINS', frozenset([user]))
resp = app.get('/site-updates/%d/edit' % (updates[-1].updateid,), headers={'Cookie': cookie})
assert resp.html.find(id='error_content') is None
@pytest.mark.usefixtures('db', 'no_csrf')
def test_edit_validation(app, monkeypatch, site_updates):
_, updates = site_updates
user = db_utils.create_user()
cookie = db_utils.create_session(user)
monkeypatch.setattr(staff, 'ADMINS', frozenset([user]))
resp = app.post('/site-updates/%d' % (updates[-1].updateid,), {'title': u'', 'content': u'Content'}, headers={'Cookie': cookie}, status=422)
assert resp.html.find(id='error_content').p.string == errorcode.error_messages['titleInvalid']
resp = app.post('/site-updates/%d' % (updates[-1].updateid,), {'title': u'Title', 'content': u''}, headers={'Cookie': cookie}, status=422)
assert resp.html.find(id='error_content').p.string == errorcode.error_messages['contentInvalid']
@pytest.mark.usefixtures('db', 'no_csrf')
def test_edit_notifications(app, monkeypatch):
admin_user = db_utils.create_user()
normal_user = db_utils.create_user()
admin_cookie = db_utils.create_session(admin_user)
monkeypatch.setattr(staff, 'ADMINS', frozenset([admin_user]))
resp = app.post('/admincontrol/siteupdate', _FORM, headers={'Cookie': admin_cookie}).follow()
assert resp.html.find(None, 'content').h3.string == _FORM['title']
normal_cookie = db_utils.create_session(normal_user)
resp = app.get('/messages/notifications', headers={'Cookie': normal_cookie})
assert list(resp.html.find(id='header-messages').find(title='Notifications').stripped_strings)[1] == '1'
assert resp.html.find(id='site_updates').find(None, 'item').a.string == _FORM['title']
resp = app.post(
'/site-updates/%d' % (siteupdate.select_last()['updateid'],),
dict(_FORM, title=u'New title'),
headers={'Cookie': admin_cookie},
).follow()
assert resp.html.find(None, 'content').h3.string == u'New title'
resp = app.get('/messages/notifications', headers={'Cookie': normal_cookie})
assert list(resp.html.find(id='header-messages').find(title='Notifications').stripped_strings)[1] == '1'
assert resp.html.find(id='site_updates').find(None, 'item').a.string == u'New title'
| 39.705128
| 144
| 0.690265
| 1,609
| 12,388
| 5.168428
| 0.085146
| 0.055556
| 0.056277
| 0.075758
| 0.863035
| 0.846922
| 0.823954
| 0.798581
| 0.766114
| 0.757937
| 0
| 0.005712
| 0.137875
| 12,388
| 311
| 145
| 39.832797
| 0.77294
| 0
| 0
| 0.61674
| 0
| 0
| 0.173636
| 0.042945
| 0
| 0
| 0
| 0
| 0.207048
| 1
| 0.088106
| false
| 0
| 0.035242
| 0
| 0.127753
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
49c8b96abc3f198aa66587406ab8b7e9c78fd259
| 31
|
py
|
Python
|
lemkelcp/__init__.py
|
pritam-dey3/lemkelcp
|
4d963a6d0e6ba531496f5b0e99a52c0d288e4a6e
|
[
"MIT"
] | 10
|
2019-03-17T19:37:25.000Z
|
2022-01-02T04:29:05.000Z
|
lemkelcp/__init__.py
|
pritam-dey3/lemkelcp
|
4d963a6d0e6ba531496f5b0e99a52c0d288e4a6e
|
[
"MIT"
] | 1
|
2019-09-25T09:32:49.000Z
|
2021-12-28T05:05:55.000Z
|
lemkelcp/__init__.py
|
pritam-dey3/lemkelcp
|
4d963a6d0e6ba531496f5b0e99a52c0d288e4a6e
|
[
"MIT"
] | 4
|
2019-02-24T11:49:10.000Z
|
2020-06-06T14:07:11.000Z
|
from .lemkelcp import lemkelcp
| 15.5
| 30
| 0.83871
| 4
| 31
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b71f0ad71dba6e7fc8ad5d041a1cde7948bbc25f
| 117
|
py
|
Python
|
contacts/views/contact_views.py
|
Onlynfk/Freshdesk-CRM-Platform
|
67137af09f7daf6fa2d19a9e70d573548137c9db
|
[
"MIT"
] | null | null | null |
contacts/views/contact_views.py
|
Onlynfk/Freshdesk-CRM-Platform
|
67137af09f7daf6fa2d19a9e70d573548137c9db
|
[
"MIT"
] | null | null | null |
contacts/views/contact_views.py
|
Onlynfk/Freshdesk-CRM-Platform
|
67137af09f7daf6fa2d19a9e70d573548137c9db
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
def contact(request):
return render(request, 'contacts/contact.html')
| 19.5
| 52
| 0.735043
| 14
| 117
| 6.142857
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17094
| 117
| 5
| 53
| 23.4
| 0.886598
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
b771d6a65389f019399e4105e7ca9559208f9b9c
| 271
|
py
|
Python
|
pycon_project/apps/proposals/admin.py
|
mitsuhiko/pycon
|
73688a82080539a1c0d575cf3248f55fefb6b9ba
|
[
"BSD-3-Clause"
] | 1
|
2017-09-04T08:19:08.000Z
|
2017-09-04T08:19:08.000Z
|
pycon_project/apps/proposals/admin.py
|
mitsuhiko/pycon
|
73688a82080539a1c0d575cf3248f55fefb6b9ba
|
[
"BSD-3-Clause"
] | null | null | null |
pycon_project/apps/proposals/admin.py
|
mitsuhiko/pycon
|
73688a82080539a1c0d575cf3248f55fefb6b9ba
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from proposals.models import Proposal, ProposalSessionType
admin.site.register(ProposalSessionType)
admin.site.register(Proposal,
list_display = ["title", "session_type", "audience_level", "cancelled", "extreme_pycon", "invited"]
)
| 30.111111
| 103
| 0.785978
| 30
| 271
| 6.966667
| 0.733333
| 0.229665
| 0.267943
| 0.344498
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095941
| 271
| 9
| 104
| 30.111111
| 0.853061
| 0
| 0
| 0
| 0
| 0
| 0.220588
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
b7a7213417448a10f646593e2af28f99d94c2f47
| 3,144
|
py
|
Python
|
paper_plots/small_vs_large_box.py
|
finn-dodgson/DeepHalos
|
86e0ac6c24ac97a0a2a0a60a7ea3721a04bd050c
|
[
"MIT"
] | null | null | null |
paper_plots/small_vs_large_box.py
|
finn-dodgson/DeepHalos
|
86e0ac6c24ac97a0a2a0a60a7ea3721a04bd050c
|
[
"MIT"
] | null | null | null |
paper_plots/small_vs_large_box.py
|
finn-dodgson/DeepHalos
|
86e0ac6c24ac97a0a2a0a60a7ea3721a04bd050c
|
[
"MIT"
] | null | null | null |
import numpy as np
from plots import plots_for_predictions as pp
from utilss import distinct_colours as dc
import matplotlib.pyplot as plt
c = dc.get_distinct(4)
path = '/Users/luisals/Documents/deep_halos_files/mass_range_13.4/random_20sims_200k/lr5e-5/'
p1 = np.load(path + "seed_20/predicted_sim_6_epoch_09.npy")
t1 = np.load(path + "seed_20/true_sim_6_epoch_09.npy")
p_big = np.load("/Users/luisals/Projects/DLhalos/bigbox/raw/predicted_sim_L200_N1024_genetIC3_epoch_10.npy")
t_big = np.load("/Users/luisals/Projects/DLhalos/bigbox/raw/true_sim_L200_N1024_genetIC3_epoch_10.npy")
path_av = "/Users/luisals/Documents/deep_halos_files/mass_range_13.4/random_20sims_200k/averaged_boxes/log_alpha_-4.3/"
p_av = np.load(path_av + "predicted_sim_6_epoch_32.npy")
t_av = np.load(path_av + "true_sim_6_epoch_32.npy")
p_av_big = np.load("/Users/luisals/Projects/DLhalos/bigbox/avg/predicted_sim_L200_N1024_genetIC3_epoch_18.npy")
t_av_big = np.load("/Users/luisals/Projects/DLhalos/bigbox/avg/true_sim_L200_N1024_genetIC3_epoch_18.npy")
# Raw-density case
f1, a, m = pp.plot_histogram_predictions(p1, t1, radius_bins=False, particle_ids=None, errorbars=False,
label=r"$L_\mathrm{box}=50 \, \mathrm{Mpc} \,/ \,h$", color="C0")
f11, a1, m1 = pp.plot_histogram_predictions(p_big, t_big, radius_bins=False, particle_ids=None, errorbars=False, fig=f1,
axes=a, color="C1", label=r"$L_\mathrm{box}=200 \, \mathrm{Mpc} \,/ \,h$")
a1[0].set_ylabel(r"$n_{\mathrm{particles}}$", fontsize=16)
[a.set_xlabel(r"$\log(M_{\mathrm{predicted}}/M_{\mathrm{true}})$", fontsize=16) for a in a1]
plt.savefig("/Users/lls/Documents/Papers/dlhalos_paper/small_vs_large_box.pdf")
# Averaged-density case
f1, a, m = pp.plot_histogram_predictions(p_av, t_av, radius_bins=False, particle_ids=None, errorbars=False,
label=r"$L_\mathrm{box}=50 \, \mathrm{Mpc} \,/ \,h$", color="C0")
f11, a1, m1 = pp.plot_histogram_predictions(p_av_big, t_av_big, radius_bins=False, particle_ids=None, errorbars=False, fig=f1,
axes=a, color="C1", label=r"$L_\mathrm{box}=200 \, \mathrm{Mpc} \,/ \,h$")
a1[0].set_ylabel(r"$n_{\mathrm{particles}}$", fontsize=16)
[a.set_xlabel(r"$\log(M_{\mathrm{predicted}}/M_{\mathrm{true}})$", fontsize=16) for a in a1]
plt.savefig("/Users/luisals/Documents/Papers/dlhalos_paper/averaged_small_vs_large_box.pdf")
# Averaged-density case
f1, a, m = pp.plot_histogram_predictions(p_big, t_big, radius_bins=False, particle_ids=None, errorbars=False,
label="Raw density", color="C0")
f11, a1, m1 = pp.plot_histogram_predictions(p_av_big, t_av_big, radius_bins=False, particle_ids=None, errorbars=False, fig=f1,
axes=a, color="C1", label="Averaged density")
a1[0].set_ylabel(r"$n_{\mathrm{particles}}$", fontsize=16)
[a.set_xlabel(r"$\log(M_{\mathrm{predicted}}/M_{\mathrm{true}})$", fontsize=16) for a in a1]
plt.savefig("/Users/luisals/Documents/Papers/dlhalos_paper/raw_vs_averaged_large_box.pdf")
| 60.461538
| 126
| 0.699746
| 501
| 3,144
| 4.105788
| 0.227545
| 0.04667
| 0.043753
| 0.075839
| 0.85367
| 0.797278
| 0.783666
| 0.725328
| 0.725328
| 0.659212
| 0
| 0.050558
| 0.144402
| 3,144
| 51
| 127
| 61.647059
| 0.714126
| 0.019084
| 0
| 0.333333
| 0
| 0.027778
| 0.422078
| 0.352922
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b7ebf597cf4af041d284ceb92dfc3840fcf8cea7
| 146
|
py
|
Python
|
annuaire/commands/__init__.py
|
djacomy/layer-annuaire
|
b0312534e31dd98d98568a83918cf7dd583aa4c7
|
[
"MIT"
] | null | null | null |
annuaire/commands/__init__.py
|
djacomy/layer-annuaire
|
b0312534e31dd98d98568a83918cf7dd583aa4c7
|
[
"MIT"
] | null | null | null |
annuaire/commands/__init__.py
|
djacomy/layer-annuaire
|
b0312534e31dd98d98568a83918cf7dd583aa4c7
|
[
"MIT"
] | null | null | null |
"""Package groups the different commands modules."""
from annuaire.commands import download, import_lawyers
__all__ = [download, import_lawyers]
| 29.2
| 54
| 0.80137
| 17
| 146
| 6.529412
| 0.705882
| 0.252252
| 0.378378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109589
| 146
| 4
| 55
| 36.5
| 0.853846
| 0.315068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4d0941aea75adaa006d884337e5c4d550547f131
| 6,030
|
py
|
Python
|
updates.py
|
knowledgetechnologyuhh/hipss
|
518bf3e6a4d02e234cbe29506b9afda0a6ccb187
|
[
"MIT"
] | null | null | null |
updates.py
|
knowledgetechnologyuhh/hipss
|
518bf3e6a4d02e234cbe29506b9afda0a6ccb187
|
[
"MIT"
] | null | null | null |
updates.py
|
knowledgetechnologyuhh/hipss
|
518bf3e6a4d02e234cbe29506b9afda0a6ccb187
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
import torch.nn.functional as F
from torch.nn.utils.clip_grad import clip_grad_norm_
from mpi_utils.mpi_utils import sync_grads
def update_entropy(alpha, log_alpha, target_entropy, log_pi, alpha_optim, cfg):
if cfg.automatic_entropy_tuning:
alpha_loss = -(log_alpha * (log_pi + target_entropy).detach()).mean()
alpha_optim.zero_grad()
alpha_loss.backward()
alpha_optim.step()
alpha = log_alpha.exp()
alpha_tlogs = alpha.clone()
else:
alpha_loss = torch.tensor(0.)
alpha_tlogs = torch.tensor(alpha)
return alpha_loss, alpha_tlogs
def update_flat(actor_network, critic_network, critic_target_network, policy_optim, critic_optim, alpha, log_alpha,
target_entropy, alpha_optim, obs_norm, ag_norm, g_norm, obs_next_norm, actions, rewards, cfg):
inputs_norm = np.concatenate([obs_norm, ag_norm, g_norm], axis=1)
inputs_next_norm = np.concatenate([obs_next_norm, ag_norm, g_norm], axis=1)
inputs_norm_tensor = torch.tensor(inputs_norm, dtype=torch.float32)
inputs_next_norm_tensor = torch.tensor(inputs_next_norm, dtype=torch.float32)
actions_tensor = torch.tensor(actions, dtype=torch.float32)
r_tensor = torch.tensor(rewards, dtype=torch.float32).reshape(rewards.shape[0], 1)
if cfg.cuda:
inputs_norm_tensor = inputs_norm_tensor.cuda()
inputs_next_norm_tensor = inputs_next_norm_tensor.cuda()
actions_tensor = actions_tensor.cuda()
r_tensor = r_tensor.cuda()
with torch.no_grad():
actions_next, log_pi_next, _ = actor_network.sample(inputs_next_norm_tensor)
qf_next_target = critic_target_network(inputs_next_norm_tensor, actions_next)
min_qf_next_target = torch.min(qf_next_target, dim=0).values - alpha * log_pi_next
next_q_value = r_tensor + cfg.gamma * min_qf_next_target
# the q loss
qf = critic_network(inputs_norm_tensor, actions_tensor)
qf_loss = torch.stack([F.mse_loss(_qf, next_q_value) for _qf in qf]).mean()
# the actor loss
pi, log_pi, _ = actor_network.sample(inputs_norm_tensor)
qf_pi = critic_network(inputs_norm_tensor, pi)
min_qf_pi = torch.min(qf_pi, dim=0).values
policy_loss = ((alpha * log_pi) - min_qf_pi).mean()
# update actor network
policy_optim.zero_grad()
policy_loss.backward()
sync_grads(actor_network)
policy_optim.step()
# update the critic_network
critic_optim.zero_grad()
qf_loss.backward()
if cfg.clip_grad_norm:
clip_grad_norm_(critic_network.parameters(), cfg.max_norm)
sync_grads(critic_network)
critic_optim.step()
alpha_loss, alpha_tlogs = update_entropy(alpha, log_alpha, target_entropy, log_pi, alpha_optim, cfg)
train_metrics = dict(q_loss=qf_loss.item(),
next_q=next_q_value.mean().item(),
policy_loss=policy_loss.item(),
alpha_loss=alpha_loss.item(),
alpha_tlogs=alpha_tlogs.item())
for idx, (_qf, _qtarget) in enumerate(zip(qf, qf_next_target)):
train_metrics[f'q_{idx}'] = _qf.mean().item()
train_metrics[f'q_target_{idx}'] = _qtarget.mean().item()
return train_metrics
def update_language(actor_network, critic_network, critic_target_network, policy_optim, critic_optim, alpha, log_alpha,
target_entropy, alpha_optim, obs_norm, instruction, obs_next_norm, actions, rewards, cfg):
inputs_norm = obs_norm
inputs_next_norm = obs_next_norm
inputs_norm_tensor = torch.tensor(inputs_norm, dtype=torch.float32)
inputs_next_norm_tensor = torch.tensor(inputs_next_norm, dtype=torch.float32)
actions_tensor = torch.tensor(actions, dtype=torch.float32)
r_tensor = torch.tensor(rewards, dtype=torch.float32).reshape(rewards.shape[0], 1)
instruction_tensor = torch.tensor(instruction, dtype=torch.long)
if cfg.cuda:
inputs_norm_tensor = inputs_norm_tensor.cuda()
inputs_next_norm_tensor = inputs_next_norm_tensor.cuda()
actions_tensor = actions_tensor.cuda()
r_tensor = r_tensor.cuda()
instruction_tensor = instruction_tensor.cuda()
with torch.no_grad():
actions_next, log_pi_next, _ = actor_network.sample(inputs_next_norm_tensor, instruction_tensor)
qf_next_target = critic_target_network(inputs_next_norm_tensor, actions_next, instruction_tensor)
min_qf_next_target = torch.min(qf_next_target, dim=0).values - alpha * log_pi_next
next_q_value = r_tensor + cfg.gamma * min_qf_next_target
# the q loss
qf = critic_network(inputs_norm_tensor, actions_tensor, instruction_tensor)
qf_loss = torch.stack([F.mse_loss(_qf, next_q_value) for _qf in qf]).mean()
# the actor loss
pi, log_pi, _ = actor_network.sample(inputs_norm_tensor, instruction_tensor)
qf_pi = critic_network(inputs_norm_tensor, pi, instruction_tensor)
min_qf_pi = torch.min(qf_pi, dim=0).values
policy_loss = ((alpha * log_pi) - min_qf_pi).mean()
# update actor network
policy_optim.zero_grad()
policy_loss.backward()
sync_grads(actor_network)
policy_optim.step()
# update the critic_network
critic_optim.zero_grad()
qf_loss.backward()
if cfg.clip_grad_norm:
clip_grad_norm_(critic_network.parameters(), cfg.max_norm)
sync_grads(critic_network)
critic_optim.step()
alpha_loss, alpha_tlogs = update_entropy(alpha, log_alpha, target_entropy, log_pi, alpha_optim, cfg)
train_metrics = dict(q_loss=qf_loss.item(),
next_q=next_q_value.mean().item(),
policy_loss=policy_loss.item(),
alpha_loss=alpha_loss.item(),
alpha_tlogs=alpha_tlogs.item())
for idx, (_qf, _qtarget) in enumerate(zip(qf, qf_next_target)):
train_metrics[f'q_{idx}'] = _qf.mean().item()
train_metrics[f'q_target_{idx}'] = _qtarget.mean().item()
return train_metrics
| 42.167832
| 119
| 0.703814
| 852
| 6,030
| 4.579812
| 0.105634
| 0.056381
| 0.050231
| 0.051256
| 0.844951
| 0.839313
| 0.835725
| 0.835725
| 0.804459
| 0.784982
| 0
| 0.005575
| 0.196849
| 6,030
| 142
| 120
| 42.464789
| 0.800124
| 0.024046
| 0
| 0.654206
| 0
| 0
| 0.007148
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028037
| false
| 0
| 0.046729
| 0
| 0.102804
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4d240f3eb85f0adcecd00489cbe4d3ad31ec57c5
| 27
|
py
|
Python
|
test.py
|
justin-th/linux-pasword-protect
|
feba8712d5bc25c417cb7297aac9c0d23566378e
|
[
"MIT"
] | null | null | null |
test.py
|
justin-th/linux-pasword-protect
|
feba8712d5bc25c417cb7297aac9c0d23566378e
|
[
"MIT"
] | null | null | null |
test.py
|
justin-th/linux-pasword-protect
|
feba8712d5bc25c417cb7297aac9c0d23566378e
|
[
"MIT"
] | null | null | null |
import os
print(os.curdir)
| 9
| 16
| 0.777778
| 5
| 27
| 4.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 27
| 3
| 16
| 9
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
4d50bed8c76e8e60cc01b8081cea63dca711f207
| 805
|
py
|
Python
|
test/test_vlan_group.py
|
nrfta/python-netbox-client
|
68ba6dd4d7306513dc1ad38f3ac59122ba4f70a8
|
[
"MIT"
] | null | null | null |
test/test_vlan_group.py
|
nrfta/python-netbox-client
|
68ba6dd4d7306513dc1ad38f3ac59122ba4f70a8
|
[
"MIT"
] | null | null | null |
test/test_vlan_group.py
|
nrfta/python-netbox-client
|
68ba6dd4d7306513dc1ad38f3ac59122ba4f70a8
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
NetBox API
API to access NetBox # noqa: E501
OpenAPI spec version: 2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import netbox_client
from netbox_client.models.vlan_group import VLANGroup # noqa: E501
from netbox_client.rest import ApiException
class TestVLANGroup(unittest.TestCase):
"""VLANGroup unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testVLANGroup(self):
"""Test VLANGroup"""
# FIXME: construct object with mandatory attributes with example values
# model = netbox_client.models.vlan_group.VLANGroup() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 19.634146
| 79
| 0.680745
| 97
| 805
| 5.453608
| 0.57732
| 0.090737
| 0.060491
| 0.083176
| 0.102079
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019355
| 0.229814
| 805
| 40
| 80
| 20.125
| 0.833871
| 0.423602
| 0
| 0.214286
| 1
| 0
| 0.018957
| 0
| 0
| 0
| 0
| 0.025
| 0
| 1
| 0.214286
| false
| 0.214286
| 0.357143
| 0
| 0.642857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
4d656673d216ce0be4fe64d21204d4348b38598e
| 60
|
py
|
Python
|
pyroombaadapter/__init__.py
|
ymollard/PyRoombaAdapter
|
a4b63e9b97ac2e27a8b472f596a1111eb3c254b9
|
[
"MIT"
] | null | null | null |
pyroombaadapter/__init__.py
|
ymollard/PyRoombaAdapter
|
a4b63e9b97ac2e27a8b472f596a1111eb3c254b9
|
[
"MIT"
] | null | null | null |
pyroombaadapter/__init__.py
|
ymollard/PyRoombaAdapter
|
a4b63e9b97ac2e27a8b472f596a1111eb3c254b9
|
[
"MIT"
] | null | null | null |
from pyroombaadapter.pyroombaadapter import PyRoombaAdapter
| 30
| 59
| 0.916667
| 5
| 60
| 11
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 60
| 1
| 60
| 60
| 0.982143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4ddf8f7618bc1ce4a506f069f1a4aa3da6ef6a1b
| 22
|
py
|
Python
|
pefile/__init__.py
|
0x1F9F1/binja-msvc
|
be2577c22c8d37fd1e2e211f80b1c9a920705bd2
|
[
"MIT"
] | 9
|
2019-02-08T10:01:39.000Z
|
2021-04-29T12:27:34.000Z
|
pefile/__init__.py
|
DatBrick/binja-msvc
|
751ffc1450c569bad23ac67a761d0f1fbd4ca4c4
|
[
"MIT"
] | 1
|
2019-07-04T20:09:57.000Z
|
2019-07-12T11:10:15.000Z
|
pefile/__init__.py
|
DatBrick/binja-msvc
|
751ffc1450c569bad23ac67a761d0f1fbd4ca4c4
|
[
"MIT"
] | 2
|
2019-03-03T13:00:14.000Z
|
2020-05-01T05:35:04.000Z
|
from .pefile import *
| 11
| 21
| 0.727273
| 3
| 22
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4dfab55975cccc588661b8464faec98ada96eafa
| 11,800
|
py
|
Python
|
posthog/test/test_update_person_props.py
|
csmatar/posthog
|
4587cfe18625f302726c531f06a32c18e9749e9d
|
[
"MIT"
] | 58
|
2020-08-26T16:26:18.000Z
|
2022-03-30T05:32:23.000Z
|
posthog/test/test_update_person_props.py
|
csmatar/posthog
|
4587cfe18625f302726c531f06a32c18e9749e9d
|
[
"MIT"
] | 15
|
2021-11-09T10:49:34.000Z
|
2021-11-09T16:11:01.000Z
|
posthog/test/test_update_person_props.py
|
csmatar/posthog
|
4587cfe18625f302726c531f06a32c18e9749e9d
|
[
"MIT"
] | 13
|
2020-09-08T13:27:07.000Z
|
2022-03-19T17:27:10.000Z
|
from datetime import datetime
from django.db import connection
from posthog.models import Person
from posthog.test.base import BaseTest
# How we expect this function to behave:
# | call | value exists | call TS is ___ existing TS | previous fn | write/override
# 1| set | no | N/A | N/A | yes
# 2| set_once | no | N/A | N/A | yes
# 3| set | yes | before | set | no
# 4| set | yes | before | set_once | yes
# 5| set | yes | after | set | yes
# 6| set | yes | after | set_once | yes
# 7| set_once | yes | before | set | no
# 8| set_once | yes | before | set_once | yes
# 9| set_once | yes | after | set | no
# 10| set_once | yes | after | set_once | no
# 11| set | yes | equal | set | no
# 12| set_once | yes | equal | set | no
# 13| set | yes | equal | set_once | yes
# 14| set_once | yes | equal | set_once | no
FUTURE_TIMESTAMP = datetime(2050, 1, 1, 1, 1, 1).isoformat()
PAST_TIMESTAMP = datetime(2000, 1, 1, 1, 1, 1).isoformat()
# Refers to migration 0176_update_person_props_function
# This is a Postgres function we use in the plugin server
class TestShouldUpdatePersonProp(BaseTest):
def test_update_without_properties_last_updated_at(self):
person = Person.objects.create(
team=self.team,
properties={"a": 0, "b": 0},
properties_last_updated_at={},
properties_last_operation={"a": "set", "b": "set_once"},
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
now()::text,
array[
row('set', 'a', '1'::jsonb)::person_property_update,
row('set_once', 'b', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# dont update set_once call
self.assertEqual(updated_person.properties, {"a": 1, "b": 0})
self.assertEqual(updated_person.properties_last_operation, {"a": "set", "b": "set_once"})
self.assertIsNotNone(updated_person.properties_last_updated_at["a"])
def test_update_without_properties_last_operation(self):
person = Person.objects.create(
team=self.team,
properties={"a": 0, "b": 0},
properties_last_updated_at={"a": FUTURE_TIMESTAMP, "b": FUTURE_TIMESTAMP,},
properties_last_operation={},
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
now()::text,
array[
row('set', 'a', '1'::jsonb)::person_property_update,
row('set_once', 'b', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# dont update set_once call
self.assertEqual(updated_person.properties, {"a": 1, "b": 0})
self.assertEqual(updated_person.properties_last_operation, {"a": "set"})
self.assertNotEqual(updated_person.properties_last_updated_at["a"], FUTURE_TIMESTAMP)
# tests cases 1 and 2 from the table
def test_update_non_existent_prop(self):
person = Person.objects.create(
team=self.team, properties={}, properties_last_updated_at={}, properties_last_operation={}
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
now()::text,
array[
row('set', 'a', '1'::jsonb)::person_property_update,
row('set_once', 'b', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# both updated
self.assertEqual(updated_person.properties, {"a": 1, "b": 1})
self.assertEqual(updated_person.properties_last_operation, {"a": "set", "b": "set_once"})
self.assertIsNotNone(updated_person.properties_last_updated_at["a"])
self.assertIsNotNone(updated_person.properties_last_updated_at["b"])
# # tests cases 3 and 4 from the table
def test_set_operation_with_earlier_timestamp(self):
person = Person.objects.create(
team=self.team,
properties={"a": 0, "b": 0},
properties_last_updated_at={"a": FUTURE_TIMESTAMP, "b": FUTURE_TIMESTAMP,},
properties_last_operation={"a": "set", "b": "set_once"},
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
now()::text,
array[
row('set', 'a', '1'::jsonb)::person_property_update,
row('set', 'b', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# b updated
self.assertEqual(updated_person.properties, {"a": 0, "b": 1})
self.assertEqual(updated_person.properties_last_operation, {"a": "set", "b": "set"})
self.assertEqual(updated_person.properties_last_updated_at["a"], FUTURE_TIMESTAMP)
self.assertNotEqual(updated_person.properties_last_updated_at["b"], FUTURE_TIMESTAMP)
# # tests cases 5 and 6 from the table
def test_set_operation_with_older_timestamp(self):
person = Person.objects.create(
team=self.team,
properties={"a": 0, "b": 0},
properties_last_updated_at={"a": PAST_TIMESTAMP, "b": PAST_TIMESTAMP,},
properties_last_operation={"a": "set", "b": "set_once"},
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
now()::text,
array[
row('set', 'a', '1'::jsonb)::person_property_update,
row('set', 'b', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# both updated
self.assertEqual(updated_person.properties, {"a": 1, "b": 1})
self.assertEqual(updated_person.properties_last_operation, {"a": "set", "b": "set"})
self.assertNotEqual(updated_person.properties_last_updated_at["a"], PAST_TIMESTAMP)
self.assertNotEqual(updated_person.properties_last_updated_at["b"], PAST_TIMESTAMP)
# tests cases 7 and 8 from the table
def test_set_once_operation_with_earlier_timestamp(self):
person = Person.objects.create(
team=self.team,
properties={"a": 0, "b": 0},
properties_last_updated_at={"a": FUTURE_TIMESTAMP, "b": FUTURE_TIMESTAMP,},
properties_last_operation={"a": "set", "b": "set_once"},
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
now()::text,
array[
row('set_once', 'a', '1'::jsonb)::person_property_update,
row('set_once', 'b', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# b updated
self.assertEqual(updated_person.properties, {"a": 0, "b": 1})
self.assertEqual(updated_person.properties_last_operation, {"a": "set", "b": "set_once"})
self.assertEqual(updated_person.properties_last_updated_at["a"], FUTURE_TIMESTAMP)
self.assertNotEqual(updated_person.properties_last_updated_at["b"], FUTURE_TIMESTAMP)
# tests cases 9 and 10 from the table
def test_set_once_operation_with_older_timestamp(self):
person = Person.objects.create(
team=self.team,
properties={"a": 0, "b": 0},
properties_last_updated_at={"a": PAST_TIMESTAMP, "b": PAST_TIMESTAMP,},
properties_last_operation={"a": "set", "b": "set_once"},
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
now()::text,
array[
row('set_once', 'a', '1'::jsonb)::person_property_update,
row('set_once', 'b', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# neither updated
self.assertEqual(updated_person.properties, {"a": 0, "b": 0})
self.assertEqual(updated_person.properties_last_operation, {"a": "set", "b": "set_once"})
self.assertEqual(updated_person.properties_last_updated_at["a"], PAST_TIMESTAMP)
self.assertEqual(updated_person.properties_last_updated_at["b"], PAST_TIMESTAMP)
# # tests cases 11-14 from the table
def test_equal_timestamps(self):
timestamp = PAST_TIMESTAMP
person = Person.objects.create(
team=self.team,
properties={"a": 0, "b": 0, "c": 0, "d": 0},
properties_last_updated_at={"a": timestamp, "b": timestamp, "c": timestamp, "d": timestamp},
properties_last_operation={"a": "set", "b": "set", "c": "set_once", "d": "set_once"},
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
'{timestamp}',
array[
row('set', 'a', '1'::jsonb)::person_property_update,
row('set_once', 'b', '1'::jsonb)::person_property_update,
row('set', 'c', '1'::jsonb)::person_property_update,
row('set_once', 'd', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# update if current op is set and last op is set_once i.e. "c"
self.assertEqual(updated_person.properties, {"a": 0, "b": 0, "c": 1, "d": 0})
self.assertEqual(
updated_person.properties_last_operation, {"a": "set", "b": "set", "c": "set", "d": "set_once"}
) # c changed
self.assertEqual(updated_person.properties_last_updated_at["a"], PAST_TIMESTAMP)
self.assertEqual(updated_person.properties_last_updated_at["b"], PAST_TIMESTAMP)
self.assertEqual(updated_person.properties_last_updated_at["c"], PAST_TIMESTAMP)
self.assertEqual(updated_person.properties_last_updated_at["c"], PAST_TIMESTAMP)
| 42.446043
| 107
| 0.527203
| 1,250
| 11,800
| 4.7328
| 0.0976
| 0.099391
| 0.124408
| 0.097194
| 0.869337
| 0.839926
| 0.816092
| 0.811021
| 0.78668
| 0.745098
| 0
| 0.014057
| 0.348898
| 11,800
| 277
| 108
| 42.599278
| 0.755955
| 0.145678
| 0
| 0.674641
| 0
| 0
| 0.314317
| 0.06372
| 0
| 0
| 0
| 0
| 0.15311
| 1
| 0.038278
| false
| 0
| 0.019139
| 0
| 0.062201
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1290da62e7e73de3c4c75ef861a9d5a9bcbe1f4b
| 2,924
|
py
|
Python
|
tests/test_utils.py
|
jamesmcclain/pystac
|
993b54f5a10b0d55db18dbda81c5ad7acc06d921
|
[
"Apache-2.0"
] | 1
|
2018-08-04T05:24:58.000Z
|
2018-08-04T05:24:58.000Z
|
tests/test_utils.py
|
jamesmcclain/pystac
|
993b54f5a10b0d55db18dbda81c5ad7acc06d921
|
[
"Apache-2.0"
] | 4
|
2017-12-11T22:15:44.000Z
|
2018-06-15T15:20:34.000Z
|
tests/test_utils.py
|
jamesmcclain/pystac
|
993b54f5a10b0d55db18dbda81c5ad7acc06d921
|
[
"Apache-2.0"
] | 5
|
2018-06-15T14:51:50.000Z
|
2019-08-22T05:33:55.000Z
|
import unittest
from pystac.utils import (make_relative_href, make_absolute_href,
is_absolute_href)
class UtilsTest(unittest.TestCase):
def test_make_relative_href(self):
# Test cases of (source_href, start_href, expected)
test_cases = [
('/a/b/c/d/catalog.json', '/a/b/c/catalog.json',
'./d/catalog.json'),
('/a/b/catalog.json', '/a/b/c/catalog.json', '../catalog.json'),
('/a/catalog.json', '/a/b/c/catalog.json', '../../catalog.json'),
('http://stacspec.org/a/b/c/d/catalog.json',
'http://stacspec.org/a/b/c/catalog.json', './d/catalog.json'),
('http://stacspec.org/a/b/catalog.json',
'http://stacspec.org/a/b/c/catalog.json', '../catalog.json'),
('http://stacspec.org/a/catalog.json',
'http://stacspec.org/a/b/c/catalog.json', '../../catalog.json'),
('http://stacspec.org/a/catalog.json',
'http://cogeo.org/a/b/c/catalog.json',
'http://stacspec.org/a/catalog.json'),
('http://stacspec.org/a/catalog.json',
'https://stacspec.org/a/b/c/catalog.json',
'http://stacspec.org/a/catalog.json')
]
for source_href, start_href, expected in test_cases:
actual = make_relative_href(source_href, start_href)
self.assertEqual(actual, expected)
def test_make_absolute_href(self):
# Test cases of (source_href, start_href, expected)
test_cases = [
('item.json', '/a/b/c/catalog.json', '/a/b/c/item.json'),
('./item.json', '/a/b/c/catalog.json', '/a/b/c/item.json'),
('./z/item.json', '/a/b/c/catalog.json', '/a/b/c/z/item.json'),
('../item.json', '/a/b/c/catalog.json', '/a/b/item.json'),
('item.json', 'https://stacgeo.org/a/b/c/catalog.json',
'https://stacgeo.org/a/b/c/item.json'),
('./item.json', 'https://stacgeo.org/a/b/c/catalog.json',
'https://stacgeo.org/a/b/c/item.json'),
('./z/item.json', 'https://stacgeo.org/a/b/c/catalog.json',
'https://stacgeo.org/a/b/c/z/item.json'),
('../item.json', 'https://stacgeo.org/a/b/c/catalog.json',
'https://stacgeo.org/a/b/item.json')
]
for source_href, start_href, expected in test_cases:
actual = make_absolute_href(source_href, start_href)
self.assertEqual(actual, expected)
def test_is_absolute_href(self):
# Test cases of (href, expected)
test_cases = [('item.json', False), ('./item.json', False),
('../item.json', False), ('/item.json', True),
('http://stacgeo.org/item.json', True)]
for href, expected in test_cases:
actual = is_absolute_href(href)
self.assertEqual(actual, expected)
| 46.412698
| 77
| 0.548906
| 389
| 2,924
| 4.025707
| 0.105398
| 0.224777
| 0.045977
| 0.102171
| 0.863346
| 0.840996
| 0.784163
| 0.772031
| 0.695402
| 0.66539
| 0
| 0
| 0.25171
| 2,924
| 62
| 78
| 47.16129
| 0.715722
| 0.04446
| 0
| 0.235294
| 0
| 0
| 0.442294
| 0.007527
| 0
| 0
| 0
| 0
| 0.058824
| 1
| 0.058824
| false
| 0
| 0.039216
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
12df0714eb5fa8ab8f6068ed158fd58746d6bc32
| 37
|
py
|
Python
|
npd_well_decoder/__init__.py
|
fmell/npd-well-name-decoder
|
a44ec28a6ef3b32ba38751eeffff479008b53e2d
|
[
"MIT"
] | null | null | null |
npd_well_decoder/__init__.py
|
fmell/npd-well-name-decoder
|
a44ec28a6ef3b32ba38751eeffff479008b53e2d
|
[
"MIT"
] | null | null | null |
npd_well_decoder/__init__.py
|
fmell/npd-well-name-decoder
|
a44ec28a6ef3b32ba38751eeffff479008b53e2d
|
[
"MIT"
] | null | null | null |
from .npd import parse_wellbore_name
| 18.5
| 36
| 0.864865
| 6
| 37
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
12fda5a81fde9ab3c46b39a497e89d5ab29b6639
| 17,673
|
py
|
Python
|
symbols/block.py
|
zerofo/sdu-face-alignment
|
f4b57fde0576d2327369884fd5d5e9a7765a0790
|
[
"MIT"
] | 192
|
2019-03-27T02:40:41.000Z
|
2022-03-18T15:35:17.000Z
|
symbols/block.py
|
zerofo/sdu-face-alignment
|
f4b57fde0576d2327369884fd5d5e9a7765a0790
|
[
"MIT"
] | 4
|
2019-04-01T14:51:22.000Z
|
2020-11-25T08:22:04.000Z
|
symbols/block.py
|
zerofo/sdu-face-alignment
|
f4b57fde0576d2327369884fd5d5e9a7765a0790
|
[
"MIT"
] | 38
|
2019-03-30T05:33:48.000Z
|
2021-10-01T06:08:17.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mxnet as mx
import numpy as np
from config import config
def Conv(**kwargs):
body = mx.sym.Convolution(**kwargs)
return body
def Act(data, act_type, name):
if act_type=='prelu':
body = mx.sym.LeakyReLU(data = data, act_type='prelu', name = name)
else:
body = mx.symbol.Activation(data=data, act_type=act_type, name=name)
return body
def ConvFactory(data, num_filter, kernel, stride=(1, 1), pad=(0, 0), act_type="relu", mirror_attr={}, with_act=True, dcn=False, name=''):
bn_mom = config.bn_mom
workspace = config.workspace
if not dcn:
conv = mx.symbol.Convolution(
data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=True, workspace=workspace, name=name+'_conv')
else:
conv_offset = mx.symbol.Convolution(name=name+'_conv_offset', data = data,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
conv = mx.contrib.symbol.DeformableConvolution(name=name+"_conv", data=data, offset=conv_offset,
num_filter=num_filter, pad=(1,1), kernel=(3,3), num_deformable_group=1, stride=stride, dilate=(1, 1), no_bias=False)
bn = mx.symbol.BatchNorm(data=conv, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name+'_bn')
if with_act:
act = Act(bn, act_type, name=name+'_relu')
#act = mx.symbol.Activation(
# data=bn, act_type=act_type, attr=mirror_attr, name=name+'_relu')
return act
else:
return bn
def conv_resnet(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs):
bit = 1
ACT_BIT = config.ACT_BIT
bn_mom = config.bn_mom
workspace = config.workspace
memonger = config.memonger
#print('in unit2')
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
if not binarize:
act1 = Act(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = Conv(data=act1, num_filter=int(num_filter*0.5), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1')
else:
act1 = mx.sym.QActivation(data=bn1, act_bit=ACT_BIT, name=name + '_relu1', backward_only=True)
conv1 = mx.sym.QConvolution(data=act1, num_filter=int(num_filter*0.5), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1', act_bit=ACT_BIT, weight_bit=bit)
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
if not binarize:
act2 = Act(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = Conv(data=act2, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
else:
act2 = mx.sym.QActivation(data=bn2, act_bit=ACT_BIT, name=name + '_relu2', backward_only=True)
conv2 = mx.sym.QConvolution(data=act2, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2', act_bit=ACT_BIT, weight_bit=bit)
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
if not binarize:
act3 = Act(data=bn3, act_type='relu', name=name + '_relu3')
conv3 = Conv(data=act3, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
workspace=workspace, name=name + '_conv3')
else:
act3 = mx.sym.QActivation(data=bn3, act_bit=ACT_BIT, name=name + '_relu3', backward_only=True)
conv3 = mx.sym.QConvolution(data=act3, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv3', act_bit=ACT_BIT, weight_bit=bit)
#if binarize:
# conv3 = mx.sym.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn4')
if dim_match:
shortcut = data
else:
if not binarize:
shortcut = Conv(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
else:
shortcut = mx.sym.QConvolution(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_sc', act_bit=ACT_BIT, weight_bit=bit)
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv3 + shortcut
def conv_hpm(data, num_filter, stride, dim_match, name, binarize, dcn, dilation, **kwargs):
bit = 1
ACT_BIT = config.ACT_BIT
bn_mom = config.bn_mom
workspace = config.workspace
memonger = config.memonger
#print('in unit2')
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
if not binarize:
act1 = Act(data=bn1, act_type='relu', name=name + '_relu1')
if not dcn:
conv1 = Conv(data=act1, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(dilation,dilation), dilate=(dilation,dilation),
no_bias=True, workspace=workspace, name=name + '_conv1')
else:
conv1_offset = mx.symbol.Convolution(name=name+'_conv1_offset', data = act1,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
conv1 = mx.contrib.symbol.DeformableConvolution(name=name+'_conv1', data=act1, offset=conv1_offset,
num_filter=int(num_filter*0.5), pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=True)
else:
act1 = mx.sym.QActivation(data=bn1, act_bit=ACT_BIT, name=name + '_relu1', backward_only=True)
conv1 = mx.sym.QConvolution_v1(data=act1, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv1', act_bit=ACT_BIT, weight_bit=bit)
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
if not binarize:
act2 = Act(data=bn2, act_type='relu', name=name + '_relu2')
if not dcn:
conv2 = Conv(data=act2, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(dilation,dilation), dilate=(dilation,dilation),
no_bias=True, workspace=workspace, name=name + '_conv2')
else:
conv2_offset = mx.symbol.Convolution(name=name+'_conv2_offset', data = act2,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
conv2 = mx.contrib.symbol.DeformableConvolution(name=name+'_conv2', data=act2, offset=conv2_offset,
num_filter=int(num_filter*0.25), pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=True)
else:
act2 = mx.sym.QActivation(data=bn2, act_bit=ACT_BIT, name=name + '_relu2', backward_only=True)
conv2 = mx.sym.QConvolution_v1(data=act2, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2', act_bit=ACT_BIT, weight_bit=bit)
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
if not binarize:
act3 = Act(data=bn3, act_type='relu', name=name + '_relu3')
if not dcn:
conv3 = Conv(data=act3, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(dilation,dilation), dilate=(dilation,dilation),
no_bias=True, workspace=workspace, name=name + '_conv3')
else:
conv3_offset = mx.symbol.Convolution(name=name+'_conv3_offset', data = act3,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
conv3 = mx.contrib.symbol.DeformableConvolution(name=name+'_conv3', data=act3, offset=conv3_offset,
num_filter=int(num_filter*0.25), pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=True)
else:
act3 = mx.sym.QActivation(data=bn3, act_bit=ACT_BIT, name=name + '_relu3', backward_only=True)
conv3 = mx.sym.QConvolution_v1(data=act3, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv3', act_bit=ACT_BIT, weight_bit=bit)
conv4 = mx.symbol.Concat(*[conv1, conv2, conv3])
if binarize:
conv4 = mx.sym.BatchNorm(data=conv4, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn4')
if dim_match:
shortcut = data
else:
if not binarize:
shortcut = Conv(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
else:
#assert(False)
shortcut = mx.sym.QConvolution_v1(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_sc', act_bit=ACT_BIT, weight_bit=bit)
shortcut = mx.sym.BatchNorm(data=shortcut, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc_bn')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv4 + shortcut
#return bn4 + shortcut
#return act4 + shortcut
def block17(net, input_num_channels, scale=1.0, with_act=True, act_type='relu', mirror_attr={}, name=''):
tower_conv = ConvFactory(net, 192, (1, 1), name=name+'_conv')
tower_conv1_0 = ConvFactory(net, 129, (1, 1), name=name+'_conv1_0')
tower_conv1_1 = ConvFactory(tower_conv1_0, 160, (1, 7), pad=(1, 2), name=name+'_conv1_1')
tower_conv1_2 = ConvFactory(tower_conv1_1, 192, (7, 1), pad=(2, 1), name=name+'_conv1_2')
tower_mixed = mx.symbol.Concat(*[tower_conv, tower_conv1_2])
tower_out = ConvFactory(
tower_mixed, input_num_channels, (1, 1), with_act=False, name=name+'_conv_out')
net = net+scale * tower_out
if with_act:
act = mx.symbol.Activation(
data=net, act_type=act_type, attr=mirror_attr)
return act
else:
return net
def block35(net, input_num_channels, scale=1.0, with_act=True, act_type='relu', mirror_attr={}, name=''):
M = 1.0
tower_conv = ConvFactory(net, int(input_num_channels*0.25*M), (1, 1), name=name+'_conv')
tower_conv1_0 = ConvFactory(net, int(input_num_channels*0.25*M), (1, 1), name=name+'_conv1_0')
tower_conv1_1 = ConvFactory(tower_conv1_0, int(input_num_channels*0.25*M), (3, 3), pad=(1, 1), name=name+'_conv1_1')
tower_conv2_0 = ConvFactory(net, int(input_num_channels*0.25*M), (1, 1), name=name+'_conv2_0')
tower_conv2_1 = ConvFactory(tower_conv2_0, int(input_num_channels*0.375*M), (3, 3), pad=(1, 1), name=name+'_conv2_1')
tower_conv2_2 = ConvFactory(tower_conv2_1, int(input_num_channels*0.5*M), (3, 3), pad=(1, 1), name=name+'_conv2_2')
tower_mixed = mx.symbol.Concat(*[tower_conv, tower_conv1_1, tower_conv2_2])
tower_out = ConvFactory(
tower_mixed, input_num_channels, (1, 1), with_act=False, name=name+'_conv_out')
net = net+scale * tower_out
if with_act:
act = mx.symbol.Activation(
data=net, act_type=act_type, attr=mirror_attr)
return act
else:
return net
def conv_inception(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs):
assert not binarize
if stride[0]>1 or not dim_match:
return conv_resnet(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs)
conv4 = block35(data, num_filter, name=name+'_block35')
return conv4
def conv_cab(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs):
workspace = config.workspace
if stride[0]>1 or not dim_match:
return conv_hpm(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs)
cab = CAB(data, num_filter, 1, 4, workspace, name, dilate, 1)
return cab.get()
def conv_block(data, num_filter, stride, dim_match, name, binarize, dcn, dilate):
if config.net_block=='resnet':
return conv_resnet(data, num_filter, stride, dim_match, name, binarize, dcn, dilate)
elif config.net_block=='inception':
return conv_inception(data, num_filter, stride, dim_match, name, binarize, dcn, dilate)
elif config.net_block=='hpm':
return conv_hpm(data, num_filter, stride, dim_match, name, binarize, dcn, dilate)
elif config.net_block=='cab':
return conv_cab(data, num_filter, stride, dim_match, name, binarize, dcn, dilate)
#def lin(data, num_filter, workspace, name, binarize, dcn):
# bit = 1
# ACT_BIT = config.ACT_BIT
# bn_mom = config.bn_mom
# workspace = config.workspace
# if not binarize:
# if not dcn:
# conv1 = Conv(data=data, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0),
# no_bias=True, workspace=workspace, name=name + '_conv')
# bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
# act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
# return act1
# else:
# bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
# act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
# conv1_offset = mx.symbol.Convolution(name=name+'_conv_offset', data = act1,
# num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
# conv1 = mx.contrib.symbol.DeformableConvolution(name=name+"_conv", data=act1, offset=conv1_offset,
# num_filter=num_filter, pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=False)
# #conv1 = Conv(data=act1, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
# # no_bias=False, workspace=workspace, name=name + '_conv')
# return conv1
# else:
# bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
# act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
# conv1 = mx.sym.QConvolution_v1(data=act1, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0),
# no_bias=True, workspace=workspace, name=name + '_conv', act_bit=ACT_BIT, weight_bit=bit)
# conv1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
# return conv1
def lin3(data, num_filter, workspace, name, k, g=1, d=1):
bn_mom = config.bn_mom
workspace = config.workspace
if k!=3:
conv1 = Conv(data=data, num_filter=num_filter, kernel=(k,k), stride=(1,1), pad=((k-1)//2,(k-1)//2), num_group=g,
no_bias=True, workspace=workspace, name=name + '_conv')
else:
conv1 = Conv(data=data, num_filter=num_filter, kernel=(k,k), stride=(1,1), pad=(d,d), num_group=g, dilate=(d, d),
no_bias=True, workspace=workspace, name=name + '_conv')
bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
ret = act1
return ret
class CAB:
def __init__(self, data, nFilters, nModules, n, workspace, name, dilate, group):
self.data = data
self.nFilters = nFilters
self.nModules = nModules
self.n = n
self.workspace = workspace
self.name = name
self.dilate = dilate
self.group = group
self.sym_map = {}
def get_output(self, w, h):
key = (w, h)
if key in self.sym_map:
return self.sym_map[key]
ret = None
if h==self.n:
if w==self.n:
ret = (self.data, self.nFilters)
else:
x = self.get_output(w+1, h)
f = int(x[1]*0.5)
if w!=self.n-1:
body = lin3(x[0], f, self.workspace, "%s_w%d_h%d_1"%(self.name, w, h), 3, self.group, 1)
else:
body = lin3(x[0], f, self.workspace, "%s_w%d_h%d_1"%(self.name, w, h), 3, self.group, self.dilate)
ret = (body,f)
else:
x = self.get_output(w+1, h+1)
y = self.get_output(w, h+1)
if h%2==1 and h!=w:
xbody = lin3(x[0], x[1], self.workspace, "%s_w%d_h%d_2"%(self.name, w, h), 3, x[1])
#xbody = xbody+x[0]
else:
xbody = x[0]
#xbody = x[0]
#xbody = lin3(x[0], x[1], self.workspace, "%s_w%d_h%d_2"%(self.name, w, h), 3, x[1])
if w==0:
ybody = lin3(y[0], y[1], self.workspace, "%s_w%d_h%d_3"%(self.name, w, h), 3, self.group)
else:
ybody = y[0]
ybody = mx.sym.concat(y[0], ybody, dim=1)
body = mx.sym.add_n(xbody,ybody, name="%s_w%d_h%d_add"%(self.name, w, h))
body = body/2
ret = (body, x[1])
self.sym_map[key] = ret
return ret
def get(self):
return self.get_output(1, 1)[0]
| 54.378462
| 148
| 0.62106
| 2,661
| 17,673
| 3.937242
| 0.062758
| 0.06185
| 0.020617
| 0.054596
| 0.806433
| 0.784576
| 0.760619
| 0.737329
| 0.721199
| 0.683593
| 0
| 0.044666
| 0.227239
| 17,673
| 325
| 149
| 54.378462
| 0.722487
| 0.139026
| 0
| 0.486381
| 0
| 0
| 0.035333
| 0
| 0
| 0
| 0
| 0
| 0.003891
| 1
| 0.054475
| false
| 0
| 0.023346
| 0.003891
| 0.167315
| 0.003891
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
42500bb71a15c0815810b37eafb946db4fb96b64
| 3,713
|
py
|
Python
|
Ch2_Linked_Lists/test/test_CTCI_Ch2_Ex6.py
|
mtrdazzo/CTCI
|
30a82aed96b05fe21b7d337a138e4ec19950eb9d
|
[
"MIT"
] | null | null | null |
Ch2_Linked_Lists/test/test_CTCI_Ch2_Ex6.py
|
mtrdazzo/CTCI
|
30a82aed96b05fe21b7d337a138e4ec19950eb9d
|
[
"MIT"
] | null | null | null |
Ch2_Linked_Lists/test/test_CTCI_Ch2_Ex6.py
|
mtrdazzo/CTCI
|
30a82aed96b05fe21b7d337a138e4ec19950eb9d
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node
from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import PalindromeSinglyLinkedList, is_palindrome_brute_force
from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import is_palindrome_reverse
class TestPalindromeSinglyLinkedList(TestCase):
def setUp(self):
self.pll = PalindromeSinglyLinkedList()
def tearDown(self):
self.pll = None
def test_empty_list(self):
with self.assertRaises(Empty):
self.pll.is_palindrome()
def test_single_element(self):
self.pll.add(1)
self.assertTrue(self.pll.is_palindrome())
def test_two_elements(self):
self.pll.add(1)
self.pll.add(1)
self.assertTrue(self.pll.is_palindrome())
self.pll.remove(1)
self.pll.add(2)
self.assertFalse(self.pll.is_palindrome())
def test_more_than_two_elements_even(self):
self.pll.add(1)
self.pll.add(2)
self.pll.add(2)
self.pll.add(2)
self.assertFalse(self.pll.is_palindrome())
self.pll.remove(2)
self.pll.add(1)
self.assertTrue(self.pll.is_palindrome())
def test_more_than_two_elements_odd(self):
self.pll.add(1)
self.pll.add(2)
self.pll.add(2)
self.assertFalse(self.pll.is_palindrome())
self.pll.remove(2)
self.pll.add(1)
self.assertTrue(self.pll.is_palindrome())
class TestPalindromeBruteForce(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_empty_linked_list(self):
self.assertIsNone(is_palindrome_brute_force(None))
def test_single_element(self):
list = Node(1)
self.assertTrue(is_palindrome_brute_force(list))
def test_two_elements(self):
list = Node(1)
list.next = Node(2)
self.assertFalse(is_palindrome_brute_force(list))
list.next = Node(1)
self.assertTrue(is_palindrome_brute_force(list))
def test_odd_elements(self):
list = Node(1)
list.next = Node(2)
list.next.next = Node(2)
self.assertFalse(is_palindrome_brute_force(list))
list.next.next = Node(1)
self.assertTrue(is_palindrome_brute_force(list))
def test_even_elements(self):
list = Node(1)
list.next = Node(2)
list.next.next = Node(2)
list.next.next.next = Node(3)
self.assertFalse(is_palindrome_brute_force(list))
list.next.next.next = Node(1)
self.assertTrue(is_palindrome_brute_force(list))
class TestPalindromeReverse(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_empty_node(self):
self.assertIsNone(is_palindrome_reverse(None))
def test_single_node(self):
self.assertTrue(is_palindrome_reverse(Node(1)))
def test_two_nodes(self):
l_list = Node(1)
l_list.next = Node(2)
self.assertFalse(is_palindrome_reverse(l_list))
l_list.next = Node(1)
self.assertTrue(is_palindrome_reverse(l_list))
def test_odd_nodes(self):
l_list = Node(1)
l_list.next = Node(2)
l_list.next.next = Node(3)
self.assertFalse(is_palindrome_reverse(l_list))
l_list.next.next = Node(1)
self.assertTrue(is_palindrome_reverse(l_list))
def test_even_nodes(self):
l_list = Node(1)
l_list.next = Node(2)
l_list.next = Node(2)
l_list.next = Node(3)
self.assertFalse(is_palindrome_reverse(l_list))
l_list.next.next = Node(1)
self.assertTrue(is_palindrome_reverse(l_list))
| 24.919463
| 110
| 0.649879
| 501
| 3,713
| 4.580838
| 0.107784
| 0.135948
| 0.056645
| 0.086275
| 0.806536
| 0.732462
| 0.715468
| 0.709804
| 0.697603
| 0.682353
| 0
| 0.01745
| 0.243738
| 3,713
| 148
| 111
| 25.087838
| 0.799858
| 0
| 0
| 0.67
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.24
| 1
| 0.21
| false
| 0.04
| 0.04
| 0
| 0.28
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4259a696e067dbb5b562342c586a116816461462
| 29
|
py
|
Python
|
src/svr/tests/__init__.py
|
yottaawesome/fsnd-project-2
|
7ed478fa945a561a28af06dc8e4492a9fbea510a
|
[
"MIT"
] | 3
|
2019-05-04T12:30:00.000Z
|
2020-05-14T06:28:51.000Z
|
src/svr/tests/__init__.py
|
yottaawesome/fsnd-project-2
|
7ed478fa945a561a28af06dc8e4492a9fbea510a
|
[
"MIT"
] | 1
|
2019-05-05T01:30:37.000Z
|
2019-05-16T02:50:04.000Z
|
src/svr/tests/__init__.py
|
yottaawesome/fsnd-project-2
|
7ed478fa945a561a28af06dc8e4492a9fbea510a
|
[
"MIT"
] | 1
|
2020-03-27T07:12:40.000Z
|
2020-03-27T07:12:40.000Z
|
from .test_db import TestDal
| 14.5
| 28
| 0.827586
| 5
| 29
| 4.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
428e0c3390f490eb7e09d675c22baad9bedb5ba6
| 171
|
py
|
Python
|
nndet/evaluator/detection/__init__.py
|
joeranbosma/nnDetection
|
2ebbf1cdc8a8794c73e325f06fea50632c78ae8c
|
[
"BSD-3-Clause"
] | 242
|
2021-05-17T12:31:39.000Z
|
2022-03-31T11:51:29.000Z
|
nndet/evaluator/detection/__init__.py
|
joeranbosma/nnDetection
|
2ebbf1cdc8a8794c73e325f06fea50632c78ae8c
|
[
"BSD-3-Clause"
] | 59
|
2021-06-02T07:32:10.000Z
|
2022-03-31T18:45:52.000Z
|
nndet/evaluator/detection/__init__.py
|
joeranbosma/nnDetection
|
2ebbf1cdc8a8794c73e325f06fea50632c78ae8c
|
[
"BSD-3-Clause"
] | 38
|
2021-05-31T14:01:37.000Z
|
2022-03-21T08:24:40.000Z
|
from nndet.evaluator.detection.froc import FROCMetric
from nndet.evaluator.detection.coco import COCOMetric
from nndet.evaluator.detection.hist import PredictionHistogram
| 42.75
| 62
| 0.877193
| 21
| 171
| 7.142857
| 0.52381
| 0.18
| 0.36
| 0.54
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070175
| 171
| 3
| 63
| 57
| 0.943396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
42a78f723d388f6c17abd15949a96f2a870ca42a
| 1,933
|
py
|
Python
|
mindhome_alpha/erpnext/stock/doctype/stock_settings/test_stock_settings.py
|
Mindhome/field_service
|
3aea428815147903eb9af1d0c1b4b9fc7faed057
|
[
"MIT"
] | 1
|
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
mindhome_alpha/erpnext/stock/doctype/stock_settings/test_stock_settings.py
|
Mindhome/field_service
|
3aea428815147903eb9af1d0c1b4b9fc7faed057
|
[
"MIT"
] | null | null | null |
mindhome_alpha/erpnext/stock/doctype/stock_settings/test_stock_settings.py
|
Mindhome/field_service
|
3aea428815147903eb9af1d0c1b4b9fc7faed057
|
[
"MIT"
] | 1
|
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestStockSettings(unittest.TestCase):
def setUp(self):
frappe.db.set_value("Stock Settings", None, "clean_description_html", 0)
def test_settings(self):
item = frappe.get_doc(dict(
doctype = 'Item',
item_code = 'Item for description test',
item_group = 'Products',
description = '<p><span style="font-size: 12px;">Drawing No. 07-xxx-PO132<br></span><span style="font-size: 12px;">1800 x 1685 x 750<br></span><span style="font-size: 12px;">All parts made of Marine Ply<br></span><span style="font-size: 12px;">Top w/ Corian dd<br></span><span style="font-size: 12px;">CO, CS, VIP Day Cabin</span></p>'
)).insert()
settings = frappe.get_single('Stock Settings')
settings.clean_description_html = 1
settings.save()
item.reload()
self.assertEqual(item.description, '<p>Drawing No. 07-xxx-PO132<br>1800 x 1685 x 750<br>All parts made of Marine Ply<br>Top w/ Corian dd<br>CO, CS, VIP Day Cabin</p>')
item.delete()
def test_clean_html(self):
settings = frappe.get_single('Stock Settings')
settings.clean_description_html = 1
settings.save()
item = frappe.get_doc(dict(
doctype = 'Item',
item_code = 'Item for description test',
item_group = 'Products',
description = '<p><span style="font-size: 12px;">Drawing No. 07-xxx-PO132<br></span><span style="font-size: 12px;">1800 x 1685 x 750<br></span><span style="font-size: 12px;">All parts made of Marine Ply<br></span><span style="font-size: 12px;">Top w/ Corian dd<br></span><span style="font-size: 12px;">CO, CS, VIP Day Cabin</span></p>'
)).insert()
self.assertEqual(item.description, '<p>Drawing No. 07-xxx-PO132<br>1800 x 1685 x 750<br>All parts made of Marine Ply<br>Top w/ Corian dd<br>CO, CS, VIP Day Cabin</p>')
item.delete()
| 42.021739
| 338
| 0.698914
| 311
| 1,933
| 4.270096
| 0.273312
| 0.067771
| 0.097892
| 0.128012
| 0.787651
| 0.787651
| 0.787651
| 0.787651
| 0.787651
| 0.787651
| 0
| 0.055057
| 0.135541
| 1,933
| 45
| 339
| 42.955556
| 0.739677
| 0.053802
| 0
| 0.709677
| 0
| 0.129032
| 0.566575
| 0.04274
| 0
| 0
| 0
| 0
| 0.064516
| 1
| 0.096774
| false
| 0
| 0.096774
| 0
| 0.225806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
35f85f5cb5fab6226fab7a5a01b0882ca5ca7ca9
| 54
|
py
|
Python
|
tests/src/import_func.py
|
bayashi-cl/expander
|
b3623b656a71801233797e05781295a6101fefd8
|
[
"CC0-1.0"
] | null | null | null |
tests/src/import_func.py
|
bayashi-cl/expander
|
b3623b656a71801233797e05781295a6101fefd8
|
[
"CC0-1.0"
] | 1
|
2022-03-12T20:41:21.000Z
|
2022-03-13T06:34:30.000Z
|
tests/src/import_func.py
|
bayashi-cl/expander
|
b3623b656a71801233797e05781295a6101fefd8
|
[
"CC0-1.0"
] | null | null | null |
from testlib_a.main_a import print_name
print_name()
| 13.5
| 39
| 0.833333
| 10
| 54
| 4.1
| 0.7
| 0.439024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 54
| 3
| 40
| 18
| 0.854167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
c41f3f30efc1128fe0e35981a452b93b464ce15f
| 304
|
py
|
Python
|
configs/gdrn/ycbvPbrSO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO_09_10PottedMeatCan.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 33
|
2021-12-15T07:11:47.000Z
|
2022-03-29T08:58:32.000Z
|
configs/gdrn/ycbvPbrSO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO_09_10PottedMeatCan.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 3
|
2021-12-15T11:39:54.000Z
|
2022-03-29T07:24:23.000Z
|
configs/gdrn/ycbvPbrSO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO_09_10PottedMeatCan.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | null | null | null |
_base_ = "./resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO_01_02MasterChefCan.py"
OUTPUT_DIR = (
"output/gdrn/ycbvPbrSO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO/09_10PottedMeatCan"
)
DATASETS = dict(TRAIN=("ycbv_010_potted_meat_can_train_pbr",))
| 50.666667
| 117
| 0.871711
| 37
| 304
| 6.459459
| 0.72973
| 0.200837
| 0.23431
| 0.292887
| 0.527197
| 0.527197
| 0.527197
| 0.527197
| 0
| 0
| 0
| 0.100346
| 0.049342
| 304
| 5
| 118
| 60.8
| 0.726644
| 0
| 0
| 0
| 0
| 0
| 0.786184
| 0.786184
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c441a8d53ebaea6e35e7d68f0992cf2efeee375b
| 2,429
|
py
|
Python
|
tests/sequence_utils_test.py
|
rmcolq/genofunk
|
ffa031fb361fc736e839d0e36d36f8ed7ade30dc
|
[
"MIT"
] | 1
|
2021-01-09T23:25:02.000Z
|
2021-01-09T23:25:02.000Z
|
tests/sequence_utils_test.py
|
rmcolq/genofunk
|
ffa031fb361fc736e839d0e36d36f8ed7ade30dc
|
[
"MIT"
] | null | null | null |
tests/sequence_utils_test.py
|
rmcolq/genofunk
|
ffa031fb361fc736e839d0e36d36f8ed7ade30dc
|
[
"MIT"
] | null | null | null |
import os
import unittest
import json
import filecmp
from genofunk.sequence_utils import *
this_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class TestSequenceUtils(unittest.TestCase):
def test_get_coordinates_from_json_simple_pairs(self):
json_value = {
"start": 30,
"end": 40,
"strand": 1
}
coordinates = get_coordinates_from_json(json_value, pairs=True)
expected = [[30, 40]]
self.assertEqual(expected, coordinates)
def test_get_coordinates_from_json_simple_no_pairs(self):
json_value = {
"start": 30,
"end": 40,
"strand": 1
}
coordinates = get_coordinates_from_json(json_value, pairs=False)
expected = [30, 40]
self.assertEqual(expected, coordinates)
def test_get_coordinates_from_json_join_pairs(self):
json_value = {
"join": [
{ "start": 0, "end": 11, "strand": 1 },
{ "start": 10, "end": 20, "strand": 1 }
]
}
coordinates = get_coordinates_from_json(json_value, pairs=True)
expected = [[0,11],[10,20]]
self.assertEqual(expected, coordinates)
def test_get_coordinates_from_json_join_no_pairs(self):
json_value = {
"join": [
{ "start": 0, "end": 11, "strand": 1 },
{ "start": 10, "end": 20, "strand": 1 }
]
}
coordinates = get_coordinates_from_json(json_value, pairs=False)
expected = [0,11,10,20]
self.assertEqual(expected, coordinates)
def test_is_open_reading_frame_wrong_start(self):
amino_acid_sequence = "NATIL*"
result = is_open_reading_frame(amino_acid_sequence)
self.assertFalse(result)
def test_is_open_reading_frame_wrong_end(self):
amino_acid_sequence = "MNATIL*S"
result = is_open_reading_frame(amino_acid_sequence)
self.assertFalse(result)
def test_is_open_reading_frame_stop_in_middle(self):
amino_acid_sequence = "MNATIL*S*"
result = is_open_reading_frame(amino_acid_sequence, allow_stop_codons_in_middle=False)
self.assertFalse(result)
def test_is_open_reading_frame_stop_in_middle_allowed(self):
amino_acid_sequence = "MNATIL*S*"
result = is_open_reading_frame(amino_acid_sequence, allow_stop_codons_in_middle=True)
self.assertTrue(result)
| 34.211268
| 94
| 0.645533
| 295
| 2,429
| 4.925424
| 0.20678
| 0.038541
| 0.099105
| 0.121129
| 0.860977
| 0.836201
| 0.836201
| 0.788713
| 0.788713
| 0.788713
| 0
| 0.027518
| 0.251956
| 2,429
| 71
| 95
| 34.211268
| 0.772152
| 0
| 0
| 0.508197
| 0
| 0
| 0.051029
| 0
| 0
| 0
| 0
| 0
| 0.131148
| 1
| 0.131148
| false
| 0
| 0.081967
| 0
| 0.229508
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
674628d16822f8d4efcc764dcb583fc1ae5fb351
| 86
|
py
|
Python
|
tests/syntax/scripts/annotated_comments.py
|
toddrme2178/pyccel
|
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
|
[
"MIT"
] | null | null | null |
tests/syntax/scripts/annotated_comments.py
|
toddrme2178/pyccel
|
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
|
[
"MIT"
] | null | null | null |
tests/syntax/scripts/annotated_comments.py
|
toddrme2178/pyccel
|
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
|
[
"MIT"
] | null | null | null |
#$ header variable x :: int
#$ acc parallel private(idx)
#$ omp parallel private(idx)
| 21.5
| 28
| 0.697674
| 12
| 86
| 5
| 0.75
| 0.5
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 86
| 3
| 29
| 28.666667
| 0.833333
| 0.930233
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
674979db2e403ec19a4fc12df3f2a373c9172b77
| 86
|
py
|
Python
|
OIL/__init__.py
|
vjdad4m/OIL
|
a664fe213723fe354796245632f58f31583bcba0
|
[
"MIT"
] | 1
|
2021-06-22T22:14:16.000Z
|
2021-06-22T22:14:16.000Z
|
OIL/__init__.py
|
vjdad4m/OIL
|
a664fe213723fe354796245632f58f31583bcba0
|
[
"MIT"
] | null | null | null |
OIL/__init__.py
|
vjdad4m/OIL
|
a664fe213723fe354796245632f58f31583bcba0
|
[
"MIT"
] | null | null | null |
import OIL.color
import OIL.label
import OIL.parser
import OIL.tools
import OIL.errors
| 17.2
| 17
| 0.837209
| 15
| 86
| 4.8
| 0.466667
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104651
| 86
| 5
| 18
| 17.2
| 0.935065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.