source
string | points
list | n_points
int64 | path
string | repo
string |
|---|---|---|---|---|
from __future__ import absolute_import
import re
import os
from datetime import datetime
from .data import Data
NAME_REGEX = re.compile(
r'^:(?P<attribute>[a-z\-_]+)'
r'(?:\[(?P<type>[a-z]+)\])?:'
r'\s*(?P<value>.*)$'
)
def _read_file(path):
with open(path) as fp:
return fp.readlines()
def _to_list(value):
if not value:
return []
lines = value.split('\n')
return [x.strip() for x in lines]
def _cast(value, data_type):
value = value.strip()
if data_type == 'list':
value = _to_list(value)
if data_type == 'int':
value = int(value)
return value
class Reader(object):
def __init__(self, path):
self.path = path
def read(self):
output = Data()
attribute, value, data_type = '', '', None
for line in _read_file(self.path):
data = NAME_REGEX.match(line)
if data:
if value:
output.set(
attribute,
_cast(value, data_type),
)
attribute = data.group('attribute')
value = data.group('value')
data_type = data.group('type')
else:
value += line
output.set(
attribute,
_cast(value, data_type),
)
return output
@property
def _created_at(self):
created_at = os.path.getctime(self.path)
return datetime.fromtimestamp(created_at)
@property
def _modified_at(self):
modified_at = os.path.getmtime(self.path)
return datetime.fromtimestamp(modified_at)
|
[
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 3,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}
] | 3
|
staticpy/page/reader.py
|
toddsifleet/staticpy
|
import threading
import traceback
from camera.sdk_gige_hikvision.GrabImage import MVS_Cam # 工业相机SDK读流
class hikCamera(threading.Thread):
def __init__(self, ip_name):
threading.Thread.__init__(self)
self.ip_name = ip_name
# 初始化摄像头
self.device_camera = MVS_Cam(self.ip_name)
def run(self):
i = 0
while i < 100:
try:
# 获取图像
# 该读流方式不会缓存,读到的一定是最新帧
frame = self.device_camera.Get_Frame()
except:
print(traceback.format_exc())
else:
if type(frame) != type(None):
print(type(frame), self.ip_name, frame.shape[:2])
i += 1
# 关闭摄像头
self.device_camera.Close_Cam()
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}
] | 3
|
hikvision.py
|
simpletask1/video_stream
|
import unittest
from .context import BasicEndpointTestSuite
class EndPointScoring(BasicEndpointTestSuite):
def test_dice(self):
response = self.client.post("/scoring/dice?run_sync=true")
assert response.status_code == 200
def test_sum(self):
response = self.client.post("/scoring/sum?run_sync=true")
assert response.status_code == 200
def test_status(self):
self.client.get("/scoring/")
def test_stop(self):
self.client.delete("/scoring/")
if __name__ == "__main__":
unittest.main()
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
}
] | 3
|
tests/unit/endpoints/test_scoring.py
|
finalelement/MONAILabel
|
import pytest
# integration tests requires nomad Vagrant VM or Binary running
def test_get_nodes(nomad_setup):
assert isinstance(nomad_setup.nodes.get_nodes(), list) == True
def test_get_nodes_prefix(nomad_setup):
nodes = nomad_setup.nodes.get_nodes()
prefix = nodes[0]["ID"][:4]
nomad_setup.nodes.get_nodes(prefix=prefix)
def test_dunder_getitem_exist(nomad_setup):
n = nomad_setup.nodes["pynomad1"]
assert isinstance(n, dict)
def test_dunder_getitem_not_exist(nomad_setup):
with pytest.raises(KeyError):
j = nomad_setup.nodes["pynomad2"]
def test_dunder_contain_exists(nomad_setup):
assert "pynomad1" in nomad_setup.nodes
def test_dunder_contain_not_exist(nomad_setup):
assert "real.localdomain" not in nomad_setup.nodes
def test_dunder_str(nomad_setup):
assert isinstance(str(nomad_setup.nodes), str)
def test_dunder_repr(nomad_setup):
assert isinstance(repr(nomad_setup.nodes), str)
def test_dunder_getattr(nomad_setup):
with pytest.raises(AttributeError):
d = nomad_setup.nodes.does_not_exist
def test_dunder_iter(nomad_setup):
assert hasattr(nomad_setup.nodes, '__iter__')
for j in nomad_setup.nodes:
pass
def test_dunder_len(nomad_setup):
assert len(nomad_setup.nodes) >= 0
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
}
] | 3
|
tests/test_nodes.py
|
commarla/python-nomad
|
# Author: Martin McBride
# Created: 2022-01-22
# Copyright (C) 2022, Martin McBride
# License: MIT
from generativepy.color import Color
from generativepy.drawing import make_image, setup
import math
from generativepy.geometry import Polygon, Transform
def create_spiro(a, b, d):
dt = 0.01
t = 0
pts = []
while t < 2*math.pi*b/math.gcd(a, b):
t += dt
x = (a - b) * math.cos(t) + d * math.cos((a - b)/b * t)
y = (a - b) * math.sin(t) - d * math.sin((a - b)/b * t)
pts.append((x, y))
return pts
def draw(ctx, pixel_width, pixel_height, frame_no, frame_count):
width = 32
setup(ctx, pixel_width, pixel_height, width=width, startx=-width/2, starty=-width/2, background=Color(1))
a = 14
b = 6
d = 4
Polygon(ctx).of_points(create_spiro(a, b, d)).stroke(Color('red'), line_width=0.1)
make_image("spirograph.png", draw, 600, 600)
def draw2(ctx, pixel_width, pixel_height, frame_no, frame_count):
width = 32
setup(ctx, pixel_width, pixel_height, width=width, startx=-width/2, starty=-width/2, background=Color(1))
a = 16
b = 13
d = 5
Polygon(ctx).of_points(create_spiro(a, b, d)).stroke(Color('firebrick'), line_width=0.1)
a = 16
b = 9
d = 8
Polygon(ctx).of_points(create_spiro(a, b, d)).stroke(Color('goldenrod'), line_width=0.1)
a = 16
b = 11
d = 6
Polygon(ctx).of_points(create_spiro(a, b, d)).stroke(Color('darkgreen'), line_width=0.1)
make_image("spirograph2.png", draw2, 600, 600)
def draw3(ctx, pixel_width, pixel_height, frame_no, frame_count):
width = 32
setup(ctx, pixel_width, pixel_height, width=width, startx=-width/2, starty=-width/2, background=Color(1))
for i in range(6):
a = 13
b = 7
d = 5
with Transform(ctx).rotate(0.05*i):
Polygon(ctx).of_points(create_spiro(a, b, d)).stroke(Color('dodgerblue').with_l_factor(1.1**i), line_width=0.1)
make_image("spirograph3.png", draw3, 600, 600)
|
[
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 3,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
}
] | 3
|
blog/geometric/spirograph.py
|
sthagen/martinmcbride-generativepy
|
import requests
import os
class IntegrationDiscordDriver:
_scope = ''
_state = ''
def scopes(self, scopes):
pass
def send(self, request, state='', scopes=('identify',)):
self._scope = scopes
self._state = state
return request.redirect('https://discordapp.com/api/oauth2/authorize?response_type=code&client_id={}&scope={}&state={}&redirect_uri={}'.format(
os.getenv('DISCORD_CLIENT'),
' '.join(self._scope),
self._state,
os.getenv('DISCORD_REDIRECT'),
))
def user(self, request):
data = {
'client_id': os.getenv('DISCORD_CLIENT'),
'client_secret': os.getenv('DISCORD_SECRET'),
'grant_type': 'authorization_code',
'code': request.input('code'),
'redirect_uri': os.getenv('DISCORD_REDIRECT')
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
return requests.post('https://discordapp.com/api/oauth2/token', data, headers).json()
def refresh(self, refresh_token):
data = {
'client_id': os.getenv('DISCORD_CLIENT'),
'client_secret': os.getenv('DISCORD_SECRET'),
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
'redirect_uri': os.getenv('DISCORD_REDIRECT')
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
return requests.post('https://discordapp.com/api/oauth2/token', data, headers).json()
|
[
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 3,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
}
] | 3
|
app/integrations/IntegrationDiscordDriver.py
|
josephmancuso/gbaleague-masonite2
|
from io import StringIO
from django.core.management import call_command
from django.test import TestCase
class InventoryManagementCommandsTest(TestCase):
def test_cleanup_inventory_history(self):
out = StringIO()
call_command('cleanup_inventory_history', stdout=out)
result = out.getvalue()
self.assertIn('min date', result)
self.assertIn('machine_snapshot_commit', result)
def test_cleanup_inventory_history_quiet(self):
out = StringIO()
call_command('cleanup_inventory_history', '-q', stdout=out)
self.assertEqual("", out.getvalue())
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
}
] | 3
|
tests/inventory/test_management_commands.py
|
janheise/zentral
|
"""
File: 648.py
Title: Replace Words
Difficulty: Medium
URL: https://leetcode.com/problems/replace-words/
"""
import unittest
from typing import List
class Solution:
def replaceWords(self, dict: List[str], sentence: str) -> str:
tree = {}
for root in dict:
current = None
for c in root:
if current is None:
if c not in tree:
tree[c] = {}
current = tree[c]
else:
if c not in current:
current[c] = {}
current = current[c]
current[0] = True
def replace(word: str):
successor = ""
current = None
for c in word:
if current is None:
if c in tree:
current = tree[c]
successor += c
else:
return word
else:
if 0 in current:
return successor
elif c in current:
current = current[c]
successor += c
else:
return word
return successor
return " ".join(list(map(replace, sentence.split())))
class SolutionTestCase(unittest.TestCase):
def test_example1(self):
# Input
dict = ["cat", "bat", "rat"]
sentence = "the cattle was rattled by the battery"
# Output
output = "the cat was rat by the bat"
solution = Solution()
self.assertEqual(solution.replaceWords(dict, sentence), output)
if __name__ == "__main__":
unittest.main()
|
[
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
}
] | 3
|
leetcode/648.py
|
GihwanKim/Baekjoon
|
#!/usr/bin/env python3
#
# Copyright (c) 2019 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""List currently running applications. """
import uuid
import argparse
import empower_core.command as command
def pa_cmd(args, cmd):
"""List applications parser method. """
usage = "%s <options>" % command.USAGE.format(cmd)
desc = command.DESCS[cmd]
parser = argparse.ArgumentParser(usage=usage, description=desc)
required = parser.add_argument_group('required named arguments')
required.add_argument('-p', '--project_id', help='The project id',
required=True, type=uuid.UUID, dest="project_id")
(args, leftovers) = parser.parse_known_args(args)
return args, leftovers
def do_cmd(gargs, args, _):
"""List currently running applications. """
url = '/api/v1/projects/%s/apps' % args.project_id
_, data = command.connect(gargs, ('GET', url), 200)
for entry in data.values():
accum = []
accum.append("app_id ")
accum.append(entry['service_id'])
accum.append(" name ")
accum.append(entry['name'])
print(''.join(accum))
|
[
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
}
] | 3
|
empower/cli/apps_commands/list_apps.py
|
5g-empower/empower-runtime
|
from typing import List, Any
def transform(
data: List[dict]
) -> List[tuple]:
return [
(key, value)
for dict_item in data
for key, value in dict_item.items()
]
def main():
data = [
{"key": "value", "123": 123},
{"another_key": "another_value", "key": "value"},
{"name": "surname", 123: 123, "hello": "world"}
]
result = transform(data)
print(result)
if __name__ == "__main__":
main()
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
}
] | 3
|
Python/pyworkout/comprehensions/ex31_mod2.py
|
honchardev/Fun
|
#!/usr/bin/python3
"""
We are given two sentences A and B. (A sentence is a string of space separated
words. Each word consists only of lowercase letters.)
A word is uncommon if it appears exactly once in one of the sentences, and does
not appear in the other sentence.
Return a list of all uncommon words.
You may return the list in any order.
Example 1:
Input: A = "this apple is sweet", B = "this apple is sour"
Output: ["sweet","sour"]
Example 2:
Input: A = "apple apple", B = "banana"
Output: ["banana"]
Note:
0 <= A.length <= 200
0 <= B.length <= 200
A and B both contain only spaces and lowercase letters.
"""
from typing import List
from collections import Counter
class Solution:
def uncommonFromSentences(self, A: str, B: str) -> List[str]:
"""
need counter, only need to appear once
"""
c = Counter(A.split()) + Counter(B.split())
ret = [
k
for k, v in c.items()
if v == 1
]
return ret
def uncommonFromSentences_complext(self, A: str, B: str) -> List[str]:
"""
need counter
"""
c_A, c_B = Counter(A.split()), Counter(B.split())
ret = []
for k, v in c_A.items():
if v == 1 and k not in c_B:
ret.append(k)
for k, v in c_B.items():
if v == 1 and k not in c_A:
ret.append(k)
return ret
def uncommonFromSentences_error(self, A: str, B: str) -> List[str]:
"""
set difference
"""
s_A, s_B = set(A.split()), set(B.split())
return list(
(s_A - s_B) | (s_B - s_A)
)
|
[
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 3,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
}
] | 3
|
884 Uncommon Words from Two Sentences.py
|
krishna13052001/LeetCode
|
import os
import pytest
import shutil
import textwrap
from tests.lib.util import rand_str, create_file, execute
root = os.path.dirname(os.path.dirname(__file__))
@pytest.fixture
def repo_dir(tmpdir):
repo_dir = str(tmpdir.mkdir(rand_str()))
# collect coverage data
with open(os.path.join(root, ".coveragerc")) as f:
create_file(repo_dir, ".coveragerc", f.read(), add=False, commit=False)
os.mkdir(os.path.join(repo_dir, "reports"))
yield repo_dir
if os.environ.get("CI", "false").lower() in ["1", "true"]:
# move collect coverage data to reports directory
for root_path, _dirs, files in os.walk(os.path.join(repo_dir, "reports")):
for file in files:
shutil.move(os.path.join(root_path, file), os.path.join(root, "reports", file))
@pytest.fixture
def repo(repo_dir):
execute(repo_dir, "git init -b master")
execute(repo_dir, "git config --local user.email 'tests@example.com'")
execute(repo_dir, "git config --local user.name 'Tests runner'")
execute(repo_dir, "git add .coveragerc")
create_file(
repo_dir,
".gitignore",
textwrap.dedent(
"""
.eggs
*.egg
*.egg-info/
build
dist
*.py[oc]
reports/
"""
),
)
create_file(repo_dir, "__init__.py", "")
return repo_dir
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
}
] | 3
|
tests/conftest.py
|
bruno-fs/setuptools-git-versioning
|
# -*- coding: utf-8 -*-
"""Gtk.ListBox()."""
import gi
gi.require_version(namespace='Gtk', version='3.0')
from gi.repository import Gtk
class Handler:
def __init__(self):
listbox_1 = builder.get_object(name='listbox_1')
listbox_2 = builder.get_object(name='listbox_2')
# Loop para criar os widgets.
for n in range(1, 4):
row = Gtk.ListBoxRow.new()
hbox = Gtk.Box.new(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)
hbox.set_border_width(border_width=6)
# Adicionando container na linha
row.add(widget=hbox)
label = Gtk.Label.new(str=f'Linha {n}')
label.set_xalign(xalign=0)
hbox.pack_start(child=label, expand=True, fill=True, padding=0)
switch = Gtk.Switch.new()
hbox.pack_start(child=switch, expand=False, fill=True, padding=0)
listbox_1.add(widget=row)
# Dados que serão inseridos nas linhas do listbox_2
self.dados = ['Item 1', 'Item 2', 'Item 3', 'Item 4', 'Item 5']
# Loop para criar as linhas.
for item in self.dados:
listbox_2.add(widget=Gtk.Label.new(str=item))
def _on_row_clicked(self, listbox, listboxrow):
# Exibindo qual dos itens foi clicado.
print(f'Clicou no {self.dados[listboxrow.get_index()]}')
if __name__ == '__main__':
builder = Gtk.Builder.new()
builder.add_from_file(filename='MainWindow.glade')
builder.connect_signals(obj_or_map=Handler())
win = builder.get_object(name='MainWindow')
win.connect('destroy', Gtk.main_quit)
win.show_all()
Gtk.main()
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}
] | 3
|
src/gtk3/listbox/glade/MainWindow.py
|
alexandrebarbaruiva/gui-python-gtk
|
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import pytest
import requests
from datadog_checks.dev import docker_run
from datadog_checks.dev.conditions import CheckDockerLogs, WaitFor
from datadog_checks.dev.utils import load_jmx_config
from datadog_checks.hazelcast import HazelcastCheck
from . import common
@pytest.fixture(scope='session')
def dd_environment():
compose_file = os.path.join(common.HERE, 'docker', 'docker-compose.yaml')
with docker_run(
compose_file,
build=True,
mount_logs=True,
conditions=[
CheckDockerLogs('hazelcast_management_center', ['Hazelcast Management Center successfully started']),
CheckDockerLogs('hazelcast_management_center', ['Started communication with member']),
CheckDockerLogs('hazelcast2', [r'Hazelcast JMX agent enabled']),
CheckDockerLogs('hazelcast2', [r'is STARTED']),
WaitFor(trigger_some_tcp_data),
],
attempts=5,
attempts_wait=5,
):
config = load_jmx_config()
config['instances'] = common.INSTANCE_MEMBERS + [common.INSTANCE_MC_JMX, common.INSTANCE_MC_PYTHON]
yield config, {'use_jmx': True}
def trigger_some_tcp_data():
base_url = 'http://{}:{}'.format(common.HOST, common.MEMBER_REST_PORT)
for i in range(100):
url = "{}/hazelcast/rest/maps/mapName/foo{}".format(base_url, i)
requests.post(url, data='bar')
resp = requests.get(url)
assert resp.content.decode('utf-8') == 'bar'
@pytest.fixture(scope='session')
def hazelcast_check():
return lambda instance: HazelcastCheck('hazelcast', {}, [instance])
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
}
] | 3
|
hazelcast/tests/conftest.py
|
vbarbaresi/integrations-core
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import re
from telemetry.internal.browser import web_contents
def UrlToExtensionId(url):
return re.match(r"(chrome-extension://)([^/]+)", url).group(2)
class ExtensionPage(web_contents.WebContents):
"""Represents an extension page in the browser"""
def __init__(self, inspector_backend):
super(ExtensionPage, self).__init__(inspector_backend)
self.url = inspector_backend.url
self.extension_id = UrlToExtensionId(self.url)
def Reload(self):
"""Reloading an extension page is used as a workaround for an extension
binding bug for old versions of Chrome (crbug.com/263162). After Navigate
returns, we are guaranteed that the inspected page is in the correct state.
"""
self._inspector_backend.Navigate(self.url, None, 10)
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 3,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}
] | 3
|
telemetry/telemetry/internal/browser/extension_page.py
|
Martijnve23/catapult
|
import io
import os
from setuptools import setup
from setuptools.command.test import test as TestCommand
import sys
import pyasq
here = os.path.abspath(os.path.dirname(__file__))
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.rst')
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
# TODO: Re-enable linting in tests
# when https://github.com/PyCQA/pylint/issues/1113 is fixed
# self.test_args = ['--pylint', '--pylint-error-types=FEW']
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(
author=pyasq.__author__,
author_email='mail@jonrshar.pe',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
],
cmdclass={'test': PyTest},
description=pyasq.__doc__,
install_requires=['requests'],
license='License :: OSI Approved :: ISC License (ISCL)',
long_description=long_description,
name='pyasq',
packages=['pyasq'],
platforms='any',
tests_require=[
'pylint',
'pytest',
'pytest-pylint',
'responses',
],
url='http://github.com/textbook/pyasq/',
version=pyasq.__version__,
)
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
setup.py
|
textbook/pyasq
|
import komand
from .schema import ConnectionSchema, Input
# Custom imports below
from google.oauth2 import service_account
import apiclient
import json
class Connection(komand.Connection):
def __init__(self):
super(self.__class__, self).__init__(input=ConnectionSchema())
def connect(self, params):
self.logger.info("Connect: Connecting...")
secret_key = params.get(Input.CREDENTIALS_FILE_CONTENTS)
auth_file_contents = json.loads(secret_key.get("secretKey"))
self.logger.info(f"Contents Loaded: {type(auth_file_contents)}")
self.logger.info(f"Logging in as: {auth_file_contents.get('client_email')}")
# Build a Google credentials object
credentials = service_account.Credentials.from_service_account_info(auth_file_contents)
# Connect to Google Drive
self.drive_service = apiclient.discovery.build("drive", "v3", credentials=credentials)
self.doc_service = apiclient.discovery.build("docs", "v1", credentials=credentials)
def test(self):
self.drive_service.files().list().execute()
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}
] | 3
|
plugins/google_docs/icon_google_docs/connection/connection.py
|
lukaszlaszuk/insightconnect-plugins
|
import heapq
from operator import attrgetter
class Beam(object):
def __init__(self, maxsize, key=attrgetter("score")):
self.key = key
self.maxsize = maxsize
self.beam = []
def push(self, x):
key = self.key(x)
if len(self.beam) < self.maxsize:
heapq.heappush(self.beam, (key, x))
else:
worst_score, worst_state = self.beam[0]
if key > worst_score:
heapq.heapreplace(self.beam, (key, x))
def __len__(self):
return len(self.beam)
def full(self):
return len(self.beam) == self.maxsize
def empty(self):
return not len(self.beam)
def clear(self):
self.beam.clear()
def __iter__(self):
return iter(i[1] for i in self.beam)
def __getitem__(self, item):
return self.beam[item][1]
def best_item(self):
return sorted(self.beam, reverse=True)[0][1]
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 3,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
}
] | 3
|
beam.py
|
draplater/empty-parser
|
# Author: Jochen Gast <jochen.gast@visinf.tu-darmstadt.de>
import torch
import torch.nn as nn
from losses import factory
class ClassificationLoss(nn.Module):
def __init__(self, args, topk=(1, 2, 3), reduction='mean'):
super().__init__()
self.args = args
self.cross_entropy = torch.nn.CrossEntropyLoss(reduction=reduction)
self.topk = topk
@staticmethod
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def forward(self, output_dict, target_dict):
output = output_dict["output1"]
target = target_dict["target1"]
# compute actual losses
cross_entropy = self.cross_entropy(output, target)
# create dictonary for losses
loss_dict = {
"xe": cross_entropy,
}
acc_k = ClassificationLoss.accuracy(output, target, topk=self.topk)
for acc, k in zip(acc_k, self.topk):
loss_dict["top%i" % k] = acc
return loss_dict
factory.register("ClassificationLoss", ClassificationLoss)
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
losses/classification_losses.py
|
visinf/deblur-devil
|
import numpy
import scipy.constants as Const
def AtmLayers(hG):
Layers = ([0, 11e3, 25e3, 47e3, 53e3, 79e3, 90e3, 105e3])
FHght = numpy.digitize(hG,Layers)
switcher = {
1: numpy.array([False, -6.5e-3]),
2: numpy.array([True, 216.66]),
3: numpy.array([False, 3e-3]),
4: numpy.array([True, 282.66]),
5: numpy.array([False, -4.5e-3]),
6: numpy.array([True, 165.66]),
7: numpy.array([False, 4e-3])
}
return switcher.get(FHght, 'Ha ocurrido un error en la clasificacion atmosferica de la altitud de vuelo!')
def Temp(Height):
Height = float(Height)
hG = 6.370994e6/(6.370994e6 + Height) * Height
Layer = AtmLayers(hG)
if Layer[0] == False:
Tz = 288 + ((Layer[1]) * hG)
return Tz
elif Layer[0] == True:
return Layer[1]
else:
print('Ha ocurrido un error operacional!')
def Press(Height):
Height = float(Height)
hG = 6.370994e6/(6.370994e6 + Height) * Height
Pz = 101325 * numpy.power((288 + (-6.5e-3) * hG)/288,Const.g * (-1)/(287 * (-6.5e-3)))
return Pz
def Densy(Height):
Height = float(Height)
hG = 6.370994e6/(6.370994e6 + Height) * Height
rhoz = 1.225 * numpy.power((288 + (-6.5e-3) * hG)/288,(Const.g * (-1)/(287 * (-6.5e-3))) - 1)
return rhoz
def Visc(Height):
Height = float(Height)
hG = 6.370994e6/(6.370994e6 + Height) * Height
T = Temp(hG)
T0 = 273.15
u0 = 1.716e-5
uz = u0 * numpy.power(T/T0,1.5) * ((T0 + 110.4)/(T + 110.4))
return uz
def Ss(Height):
import pythermophy as pt
Height = float(Height)
hG = 6.370994e6/(6.370994e6 + Height) * Height
T_0 = Temp(hG)
p_0 = Press(hG)
Air = pt.Fluid.init_from_file('/usr/local/lib/python3.8/dist-packages/pythermophy-0.1-py3.8.egg/fluids/Air.yaml')
ig = pt.IG(Air)
SoundS = ig.get_speed_of_sound(T_0, p_0)
return SoundS
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 3,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
}
] | 3
|
ISA.py
|
PabloGomez96/ISAtmosphere
|
from django.core.management.base import BaseCommand, CommandError
from data_ocean.command_progress import CommandProgress
from data_ocean.savepoint import Savepoint
from person.controllers import ConnectorsController, SOURCES
class Command(BaseCommand):
help = '---'
def add_arguments(self, parser):
parser.add_argument('source', type=str, nargs=1, choices=[s.name for s in SOURCES])
def handle(self, *args, **options):
source = options['source'][0]
controller = ConnectorsController(source)
savepoint = Savepoint(f'run_person_connector-{source}')
progress = CommandProgress(self, controller.get_count())
for obj in controller.iter_objects():
if savepoint.has(obj.pk):
progress.next(silent=True)
continue
progress.next()
controller.migrate_object(obj)
savepoint.add(obj.pk)
progress.end()
savepoint.close()
self.stdout.write('Done.')
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "any_function_over_40_lines",
"question": "Is any function in this file longer than 40 lines?",
"answer": false
}
] | 3
|
person/management/commands/run_person_connector.py
|
AlenaYanish/Data_converter
|
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import Client
from organization.models import Organization
class AdminSiteTests(TestCase):
def setUp(self):
admin_email = 'admin@pnsn.org'
admin_pass = 'password123'
self.client = Client()
self.organization = Organization.objects.create(name="PNSN")
self.admin_user = get_user_model().objects.create_superuser(
email=admin_email,
password=admin_pass,
organization=self.organization,
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@pnsn.org',
password='password123',
firstname='your',
lastname='mom',
organization=self.organization
)
def test_users_listed(self):
"""Test that users are listed on the user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.firstname)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
'''Test that user didt page works'''
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
'''test that create user page works'''
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 3,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}
] | 3
|
app/core/tests/tests_admin.py
|
pnsn/squac_api
|
import chess.pgn
from models.time_control import TimeControl
DEFAULT_ELO_RATING = 1500
def get_result(game: chess.pgn.Game) -> str:
return game.headers["Result"]
def _get_elo(game: chess.pgn.Game, key: str) -> int:
elo = game.headers.get(key)
if elo == "?" or elo is None:
return DEFAULT_ELO_RATING
return int(elo)
def get_white_elo(game: chess.pgn.Game) -> int:
return _get_elo(game, "WhiteElo")
def get_black_elo(game: chess.pgn.Game) -> int:
return _get_elo(game, "BlackElo")
def get_avg_rating(game: chess.pgn.Game) -> float:
return (get_white_elo(game) + get_black_elo(game)) / 2.0
def get_time_control(game: chess.pgn.Game) -> TimeControl:
return TimeControl(
time_control_header=game.headers["TimeControl"]
)
def get_game_id(game: chess.pgn.Game) -> str:
return game.headers.get('Site').replace('"', '').replace('https://lichess.org/', '')
|
[
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
python_chess_utils/header_utils.py
|
kennethgoodman/lichess_downloader_api
|
"""Copyright (c) 2018, Haavard Kvamme
2021, Schrod Stefan"""
import numpy as np
from torch import nn
class DenseVanillaBlock(nn.Module):
def __init__(self, in_features, out_features, bias=True, batch_norm=True, dropout=0., activation=nn.ReLU,
w_init_=lambda w: nn.init.kaiming_normal_(w, nonlinearity='relu')):
super().__init__()
self.linear = nn.Linear(in_features, out_features, bias)
if w_init_:
w_init_(self.linear.weight.data)
self.activation = activation()
self.batch_norm = nn.BatchNorm1d(out_features) if batch_norm else None
self.dropout = nn.Dropout(dropout) if dropout else None
def forward(self, input):
input = self.activation(self.linear(input))
if self.batch_norm:
input = self.batch_norm(input)
if self.dropout:
input = self.dropout(input)
return input
class MLPVanilla(nn.Module):
def __init__(self, in_features, num_nodes, out_features, batch_norm=True, dropout=None, activation=nn.ReLU,
output_activation=None, output_bias=True,
w_init_=lambda w: nn.init.kaiming_normal_(w, nonlinearity='relu')):
super().__init__()
num_nodes=np.append(in_features, num_nodes)
if not hasattr(dropout, '__iter__'):
dropout = [dropout for _ in range(len(num_nodes) - 1)]
net = []
for n_in, n_out, p in zip(num_nodes[:-1], num_nodes[1:], dropout):
net.append(DenseVanillaBlock(n_in, n_out, True, batch_norm, p, activation, w_init_))
net.append(nn.Linear(num_nodes[-1], out_features, output_bias))
if output_activation:
net.append(output_activation)
self.net = nn.Sequential(*net)
def forward(self, input):
return self.net(input)
|
[
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
bites/utils/Simple_Network.py
|
sschrod/BITES
|
from abc import abstractmethod
from typing import List
from selenium.webdriver.chrome.webdriver import WebDriver
from .deafults import default_driver, default_store_path
from .Event import Event
class Source:
"""Abstract class used as super class and to create custom sources classes
Paramaters
----------
path_to_driver: `str`
required path to chrome driver (default)
driver: `'WebDriver'`, default `None`
optional driver used instead of the deafult one
path_to_store: `str`, default `""`
path to file storage (folder storing tokens, driver data...)
"""
def __init__(self, path_to_driver: str, driver: 'WebDriver' = None, path_to_store: str = "") -> None:
if path_to_store == "":
self.path_to_store = default_store_path
else:
self.path_to_store = path_to_store
self.path_to_driver = path_to_driver
if driver == None:
self.driver = default_driver(
path_to_store=self.path_to_store, path_to_driver=self.path_to_driver)
else:
self.driver = driver
@abstractmethod
def load_events(self, *args, **kwargs) -> List['Event']:
"""Load events from external source (an API for instance)
Returns
`List['Event']`
list of parsed events
"""
...
@abstractmethod
def login(self, *args, **kwargs) -> str:
"""Handle authentication to API/External Source.
Returns
-------
str
new url when authenticated
"""
...
|
[
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 3,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
}
] | 3
|
auto_events/Source.py
|
fedecech/form_automator
|
import pytest
from click.testing import CliRunner
from cpplibhub.cli import main
@pytest.fixture(scope="module")
def runner():
return CliRunner()
def test_main(runner):
# assert main([]) == 0 # run without click
result = runner.invoke(main)
# result = runner.invoke(main, ['--name', 'Amy'])
assert result.exit_code == 0
# assert result.output == 'Hello Amy!\n'
# TODO: test more command line options and args
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
tests/test_cpplibhub.py
|
iotanbo/cpplibhub
|
import pytest
import itertools
import pandas as pd
import numpy as np
from scTenifoldXct.core import null_test
def generate_fake_df_nn(n_ligand=3000, n_receptors=3000, n_cands=200):
gene_names = [f"GENE{i}" for i in range(max(n_ligand, n_receptors))]
iteration = itertools.product(gene_names, gene_names)
inds, ligands, receptors = [], [], []
for i, j in iteration:
inds.append(f"{i}_{j}")
ligands.append(i)
receptors.append(j)
df = pd.DataFrame({"ligand": ligands,
"receptor": receptors,
"dist": np.random.chisquare(1, (n_ligand * n_receptors,)),
"correspondence": np.random.lognormal(0, 4, size=(n_ligand * n_receptors,))},
index=inds)
return df, np.random.choice(df.index, size=(n_cands,), replace=False)
@pytest.mark.parametrize("df_nn,candidates", [
generate_fake_df_nn(3000, 3000, 200),
generate_fake_df_nn(1000, 1000, 200),
])
@pytest.mark.parametrize("filter_zeros", [True])
def test_null_test(df_nn, candidates, filter_zeros):
null_test(df_nn=df_nn, candidates=candidates, filter_zeros=filter_zeros)
def test_chi2_test(xct_skin):
xct_skin.train_nn(n_steps= 1000, lr = 0.001)
xct_skin.chi2_test(dof=3, pval=0.05, cal_FDR=True, plot_result=True)
|
[
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
}
] | 3
|
tests/test_stat.py
|
cailab-tamu/scTenifoldXct
|
import pytest
from naturalnets.brains.i_layer_based_brain import ILayerBasedBrainCfg
from tests.pytorch_brains import IPytorchBrainCfg
@pytest.fixture
def torch_config() -> IPytorchBrainCfg:
return IPytorchBrainCfg(type="GRU_PyTorch", num_layers=3,
hidden_size=8,
use_bias=False)
@pytest.fixture
def numpy_config() -> ILayerBasedBrainCfg:
return ILayerBasedBrainCfg(type="GRULayered", hidden_layer_structure=[8, 8, 8], diagonal_hidden_to_hidden=False,
use_bias=False)
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
}
] | 3
|
tests/conftest.py
|
bjuergens/NaturalNets
|
from unittest import TestCase
from unittest.mock import MagicMock, patch
class TestS3BucketObjectFinder(TestCase):
@patch('boto3.client')
@patch('justmltools.s3.aws_credentials.AwsCredentials', autospec=True)
def test_get_matching_s3_objects(
self,
aws_credentials_mock: MagicMock,
client_mock: MagicMock,
):
from types import GeneratorType
from justmltools.s3.s3_bucket_object_finder import S3BucketObjectFinder
sut = S3BucketObjectFinder()
result = sut.get_matching_s3_objects(bucket="")
self.assertIsInstance(result, GeneratorType)
@patch('boto3.client')
@patch('justmltools.s3.aws_credentials.AwsCredentials', autospec=True)
def test_get_matching_s3_keys(
self,
aws_credentials_mock: MagicMock,
client_mock: MagicMock,
):
from types import GeneratorType
from justmltools.s3.s3_bucket_object_finder import S3BucketObjectFinder
sut = S3BucketObjectFinder()
result = sut.get_matching_s3_keys(bucket="")
self.assertIsInstance(result, GeneratorType)
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
tests/s3/test_s3_bucket_object_finder.py
|
BigNerd/justmltools
|
import os
import time
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
import matplotlib.pyplot as plt
from PIL import Image
class MultilayerPerceptron(torch.nn.Module):
def __init__(self, num_features):
super(MultilayerPerceptron, self).__init__()
### 1st hidden layer
self.linear_1 = torch.nn.Linear(num_features, 64)
### 2nd hidden layer
self.linear_2 = torch.nn.Linear(64, 1)
def forward(self, x):
out = x.view(x.size(0), -1)
out = self.linear_1(out)
out = F.relu(out)
out = self.linear_2(out)
probas = torch.cat(((out < 0.), (out >= 0.)), 1)
return out, probas
class MyHingeLoss(torch.nn.Module):
def __init__(self):
super(MyHingeLoss, self).__init__()
def forward(self, output, target):
target_new = target.clone()
target_new[target < 1.] = -1.
hinge_loss = 1 - torch.mul(torch.squeeze(output), target_new.float())
hinge_loss[hinge_loss < 0] = 0
return hinge_loss
def fcn(num_features):
model = MultilayerPerceptron(num_features)
return model
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
}
] | 3
|
dlfairness/original_code/FairALM/Experiments-CelebA/label_ablation/fcn.py
|
lin-tan/fairness-variance
|
from flask.ext.bcrypt import generate_password_hash, \
check_password_hash
def authenticate_user(password, user):
return not user.locked and checkpw(password, user.password)
def hashpw(password):
return generate_password_hash(password, 10).decode('utf-8')
def checkpw(password, hashed_password):
return check_password_hash(hashed_password, password)
|
[
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
app/encryption.py
|
robot2051/dto-digitalmarketplace-api
|
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from urllib import urlencode
import hashlib
import csv
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class thepetexpress_spider(BaseSpider):
name = 'thepetexpress.co.uk'
allowed_domains = ['thepetexpress.co.uk', 'www.thepetexpress.co.uk']
start_urls = ('http://www.thepetexpress.co.uk/',)
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# categories
categories = hxs.select(u'//nav[@class="cat"]/ul/li/ul/li/a/@href').extract()
for url in categories:
url = urljoin_rfc(get_base_url(response), url + u'?sort=titledesc')
yield Request(url)
# pagination
next_page = hxs.select(u'//a[@class="nxt"]/@href').extract()
if next_page:
next_page = urljoin_rfc(get_base_url(response), next_page[0])
yield Request(next_page)
# products
products = hxs.select(u'//div[@class="products"]//a/@href').extract()
for url in products:
url = urljoin_rfc(get_base_url(response), url)
yield Request(url, callback=self.parse_product)
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
loader = ProductLoader(item=Product(), response=response)
loader.add_value('url', response.url)
loader.add_xpath('name', u'//div[@id="product"]/h1/text()')
loader.add_xpath('price', u'//p[@class="price"]/span[@class="our_price"]/text()')
if loader.get_output_value('price'):
yield loader.load_item()
|
[
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
portfolio/Python/scrapy/seapets/thepetexpress.py
|
0--key/lib
|
from Number_Theory.optimized_gcd import *
import numpy as np
##################################################################
# Function : mod_inv
# Utilizes the extended gcd function defined in optimized_gcd.py
# to find the modular inverse of a mod(n) when a and n are
# relatively prime.
#
# Throws an error if a and n are not relatively prime.
#
# params : a - The number who's mod inverse needs to be found
# : n - The mod number
#
# returns: nothing
#################################################################
def mod_inv(a, n):
if a < 0:
a = a + n
if a > n:
a = a % n
g, x, y = inner_ex_gcd(a, n)
if g != 1:
raise Exception('Mod inv does not exist because gcd != 1')
else:
return x % n
##################################################################
# Function : mod_inv
# Implements the mod inverse
#
# Throws an error if a and n are not relatively prime.
#
# params : matrix - The matrix who's mod inverse needs to be found
# : n - The mod number
#
# returns: nothing
#################################################################
def matrix_mod_inv(matrix, n):
det = np.linalg.det(matrix)
matrix_inv = np.linalg.inv(matrix) * det
det = round(det) % n
with np.nditer(matrix_inv, op_flags=['readwrite']) as it:
for x in it:
x[...] = int((np.matrix.round(x) * mod_inv(det,n)) % n)
matrix_inv.astype(int)
return matrix_inv
|
[
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
Number_Theory/mod_inv.py
|
SherwynBraganza31/csc514-crypto
|
from selenium import webdriver
from bs4 import BeautifulSoup
import pandas as pd
from Domain.website import Website
from selenium.webdriver.firefox.options import Options
from Repository.file_repository import FileRepository
class WebsiteService:
def __init__(self, website_repository: FileRepository):
self.__website_repository = website_repository
def get_all(self):
return self.__website_repository.get_all()
def add(self,name, url, container_class, classes):
website = Website(name,url, container_class, classes)
self.__website_repository.add(website)
def get_files_from_file(self):
self.__website_repository.read_file()
def scrap(self):
options = Options()
options.headless = True
driver = webdriver.Firefox(options=options)
websites = self.get_all()
for website in websites:
data = {}
df = pd.DataFrame()
driver.get(website.url)
content = driver.page_source
soup = BeautifulSoup(content, features="html.parser")
for div in soup.find_all('div', class_=website.container_class):
for ScrapedClass in website.classes:
try:
data[f"{ScrapedClass}"] = div.find('div', class_=ScrapedClass).text
except:
data[f"{ScrapedClass}"] = "null"
df = df.append(data, ignore_index=True)
df.to_csv(f'{website.name}.csv', index=False, encoding='utf-8')
driver.quit()
|
[
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
}
] | 3
|
Service/website_service.py
|
H0R4T1U/SRI
|
import discord, json, requests
from discord.ext import commands
from utils import checks, rpc_module as rpc
class Wallet:
def __init__(self, bot):
self.bot = bot
self.rpc = rpc.Rpc()
@commands.command(hidden=True)
@commands.check(checks.is_owner)
async def wallet(self):
"""Show wallet info [ADMIN ONLY]"""
info = self.rpc.getinfo()
wallet_balance = str(float(info["balance"]))
block_height = info["blocks"]
connection_count = self.rpc.getconnectioncount()
embed = discord.Embed(colour=discord.Colour.red())
embed.add_field(name="Balance", value="{:.8f} MUE".format(float(wallet_balance)))
embed.add_field(name="Connections", value=connection_count)
embed.add_field(name="Block Height", value=block_height)
try:
await self.bot.say(embed=embed)
except discord.HTTPException:
await self.bot.say("I need the `Embed links` permission to send this")
def setup(bot):
bot.add_cog(Wallet(bot))
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 3,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}
] | 3
|
cogs/wallet_info.py
|
MUEDEV/MUE-Discord-Tips
|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import pytest
import jax.numpy as jnp
from jaxga.mv import MultiVector
from jaxga.signatures import positive_signature
def _jaxga_mul(a, b):
return a * b
def _mv_ones(num_elements, num_bases):
return MultiVector(
values=jnp.ones([num_bases, num_elements], dtype=jnp.float32),
indices=tuple((i,) for i in range(num_bases)),
signature=positive_signature
)
@pytest.mark.parametrize("num_bases", list(range(1, 10)))
def test_jaxga_mul_mv_mv(num_bases, benchmark):
a = _mv_ones(100, num_bases)
b = _mv_ones(100, num_bases)
_jaxga_mul(a, b)
benchmark(_jaxga_mul, a, b)
if __name__ == '__main__':
pytest.main()
|
[
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
}
] | 3
|
benchmarks/test_jaxga.py
|
RobinKa/jaxga
|
from pavo_cristatus.tests.doubles.module_fakes.module_fake_class import ModuleFakeClass
from trochilidae.interoperable_with_metaclass import interoperable_with_metaclass_future
__all__ = ["ModuleFakeClassWithNestedAnnotatedCallables"]
class ModuleFakeClassWithNestedAnnotatedCallables(interoperable_with_metaclass_future(ModuleFakeClass)):
def symbol_of_interest(self, a, b): pass
def non_symbol_of_interest(self, a : int, b : str) -> bool:
def nested(a, b): pass
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 3,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}
] | 3
|
pavo_cristatus/tests/doubles/module_fakes/non_annotated/module_fake_class_with_nested_annotated_callables.py
|
MATTHEWFRAZER/pavo_cristatus
|
import base64
from wptserve.utils import isomorphic_decode
# Use numeric references to let the HTML parser take care of inserting the correct code points
# rather than trying to figure out the necessary bytes for each encoding. (The latter can be
# especially tricky given that Python does not implement the Encoding Standard.)
def numeric_references(input):
output = b""
for cp in input:
output += b"&#x" + format(ord(cp), b"X") + b";"
return output
def main(request, response):
# Undo the "magic" space with + replacement as otherwise base64 decoding will fail.
value = request.GET.first(b"value").replace(" ", "+")
encoding = request.GET.first(b"encoding")
output_value = numeric_references(base64.b64decode(value).decode(b"utf-8"))
return (
[(b"Content-Type", b"text/html;charset=" + encoding)],
b"""<!doctype html>
<a href="https://doesnotmatter.invalid/?%s#%s">test</a>
""" % (output_value, output_value))
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
}
] | 3
|
url/resources/percent-encoding.py
|
xi/wpt
|
#!/usr/bin/env python3
"""generate data"""
import random
class Data():
"""generate data"""
file_name = "./data.csv"
def __init__(self):
pass
def generate(self):
"""generate data"""
with open(self.file_name, "w") as file:
print("pid,hindex,hindex2,hindex3,hindex4,data_type,target", file=file)
for _ in range(1, 10001):
pid = random.randint(1e5, 9e5)
hindex = random.randint(0, 25)
hindex2 = random.randint(0, 25)
hindex3 = random.randint(0, 25)
hindex4 = random.randint(0, 100)
max_hindex = max(hindex, hindex2, hindex3, hindex4)
data_type = random.choice(['test', 'production'])
target = 0
if data_type == 'production' and max_hindex > 20:
target = 1
print("{},{},{},{},{},{},{}".format(pid, hindex, hindex2,
hindex3, hindex4,
data_type, target), file=file)
def main():
"""main"""
data = Data()
data.generate()
if __name__ == "__main__":
main()
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
data.py
|
garethcmurphy/ml-quality
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import is_fully_defined
from openvino.tools.mo.graph.graph import Node, Graph
from openvino.tools.mo.ops.op import Op
class ConstantFill(Op):
""" Constant blob generation by broadcasting specified value to a given shape.
It is assumed that there is no equivalent of this op in IE,
so it is usually relevant to constant folding.
"""
op = 'ConstantFill'
enabled = False
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'type': None,
'op': self.op,
'input_as_shape': 1,
'in_ports_count': 1,
'out_ports_count': 1,
'infer': self.infer
}
super().__init__(graph, mandatory_props, attrs)
def supported_attrs(self):
return [
'input_as_shape',
'fill_value'
]
@staticmethod
def infer(node: Node):
assert len(node.in_nodes()) == 1
assert node.fill_value is not None
assert node.input_as_shape
shape = node.in_port(0).data.get_value()
assert shape is not None
if is_fully_defined(shape):
node.out_port(0).data.set_value(np.full(shape, node.fill_value, np.float32))
else:
node.out_port(0).data.set_shape(shape)
|
[
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
tools/mo/openvino/tools/mo/ops/constant_fill.py
|
ryanloney/openvino-1
|
from typing import Any
from allauth.account.adapter import DefaultAccountAdapter
from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
from django.conf import settings
from django.http import HttpRequest
class AccountAdapter(DefaultAccountAdapter):
def is_open_for_signup(self, request: HttpRequest):
return getattr(settings, "ACCOUNT_ALLOW_REGISTRATION", True)
def save_user(self, request, user, form):
if len(user.socialaccount_set.all()) == 0:
name = request.data.get('name', None)
email = request.data.get('email', None)
username = request.data.get('username', None)
password1 = request.data.get('password1', None)
password2 = request.data.get('password2', None)
user.name = name
user.email = email
user.username = username
if(password1 == password2):
user.set_password(password1)
user.save()
class SocialAccountAdapter(DefaultSocialAccountAdapter):
def is_open_for_signup(self, request: HttpRequest, sociallogin: Any):
return getattr(settings, "ACCOUNT_ALLOW_REGISTRATION", True)
|
[
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}
] | 3
|
yoongram/users/adapters.py
|
happyjy/yoonGram
|
import matplotlib.pyplot as plt
import numpy as np
def format_plot(func):
def func_wrapper(*args):
func(*args)
plt.ylabel("Intensity [dBm]")
plt.xlabel("Wavelength [nm]")
plt.tight_layout()
plt.show()
return func
return func_wrapper
def format_ani_plot(func):
def func_wrapper(*args):
func(*args)
plt.ylabel("Intensity [dBm]")
plt.xlabel("Wavelength [nm]")
plt.tight_layout()
return func
return func_wrapper
def interactive_off_on(func):
def func_wrapper(*args):
plt.ioff()
func(*args)
plt.ion()
return func
return func_wrapper
def config_matplotlib(debug_mode: bool) -> None:
plt.style.use("seaborn-whitegrid")
if not debug_mode:
plt.ion()
if __name__ == '__main__':
x = np.random.random(15)
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
}
] | 3
|
handlers/plotting.py
|
manuelprogramming/OSA
|
"""Transfer Out item definition."""
from gaphas.geometry import Rectangle
from gaphor.core import gettext
from gaphor.core.modeling import DrawContext
from gaphor.diagram.presentation import (
Classified,
ElementPresentation,
from_package_str,
)
from gaphor.diagram.shapes import Box, IconBox, Text, stroke
from gaphor.diagram.support import represents
from gaphor.diagram.text import FontStyle, FontWeight
from gaphor.RAAML import raaml
from gaphor.RAAML.fta.constants import DEFAULT_FTA_MAJOR
from gaphor.RAAML.fta.transferin import draw_transfer_in
from gaphor.UML.recipes import stereotypes_str
@represents(raaml.TransferOut)
class TransferOutItem(ElementPresentation, Classified):
def __init__(self, diagram, id=None):
super().__init__(diagram, id, width=DEFAULT_FTA_MAJOR, height=DEFAULT_FTA_MAJOR)
self.watch("subject[NamedElement].name").watch(
"subject[NamedElement].namespace.name"
)
def update_shapes(self, event=None):
self.shape = IconBox(
Box(
draw=draw_transfer_out,
),
Text(
text=lambda: stereotypes_str(self.subject, [gettext("Transfer Out")]),
),
Text(
text=lambda: self.subject.name or "",
width=lambda: self.width - 4,
style={
"font-weight": FontWeight.BOLD,
"font-style": FontStyle.NORMAL,
},
),
Text(
text=lambda: from_package_str(self),
style={"font-size": "x-small"},
),
)
def draw_transfer_out(box, context: DrawContext, bounding_box: Rectangle):
draw_transfer_in(box, context, bounding_box)
cr = context.cairo
cr.move_to(bounding_box.width / 4.0, bounding_box.height / 2.0)
cr.line_to(0, bounding_box.height / 2.0)
stroke(context)
|
[
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
}
] | 3
|
gaphor/RAAML/fta/transferout.py
|
Texopolis/gaphor
|
import re
def pairup(str):
results=[]
for i in range(len(str)-1):
results.append(str[i]+str[i+1])
return results
def ascending(seq):
for i in range(len(seq)-1):
if (int(seq[i])>int(seq[i+1])):
return False
return True
def check_tripple(seq):
if len(seq) == 1:
return True
elif((re.search(regex, seq[0])) or (re.search(regex, seq[-1]))):
return True
return False
#"""
#def reverse_pair(seq):
# result=""
# for s in seq:
# result+=s[0]
# result+=seq[-1][-1]
# return result
#"""
regex=r"([0-9])\1"
regex2=r"([0-9])\1\1+"
#2365
numbers=[str(i) for i in range(178416,676462)]
passwds=list(filter(lambda x: ascending(x), numbers))
passwds=list(filter(lambda x: re.search(regex,x), passwds))
print(len(numbers))
print(len(passwds))
passwds=list(map(lambda x: re.split(regex2,x), passwds))
passwds=list(filter(lambda x: check_tripple(x), passwds))
print(len(passwds))
with open("results2.txt","w") as f:
for p in passwds:
f.write(p+"\n")
|
[
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 3,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
}
] | 3
|
Day4/adventofcode4.py
|
oomoepoo/Adventofcode2k19
|
from django.db.models.aggregates import Sum
from django.forms.models import model_to_dict
from .models import LdaSimilarity
from .lda_model_builder import LdaModelManager
from django.db.models import Q
class ContentBasedRecommender():
def __init__(self, min_sim=0.1):
self.min_sim = min_sim
@staticmethod
def recommend_items_by_items(table_name, items_id):
source_records = LdaSimilarity.objects.filter(source=items_id, item_type=table_name)
target_records = LdaSimilarity.objects.filter(target=items_id, item_type=table_name)
records = []
records = records + [{'id': item.target, 'similarity': item.similarity} for item in list(source_records)]
records = records + [{'id': item.source, 'similarity': item.similarity} for item in list(target_records)]
records = sorted(records, key = lambda i: i['similarity'],reverse=True)
return records
@staticmethod
def train_items_by_items(table_name):
manager = LdaModelManager()
manager.train_model(table_name=table_name)
return
|
[
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 3,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
}
] | 3
|
recommender/dimadb/content_based_recommender.py
|
cnam0203/trivi-backend
|
# third-party
from flask import render_template, url_for, request, jsonify
# locals
from . import warehouse
@warehouse.route('/element_types', methods=['GET'])
def index():
return render_template("warehouse/element_types.html")
@warehouse.route('/element_type', methods=['POST'])
def create_new_element_type():
print(request.__dict__)
print(request.data)
print(request.get_json())
return jsonify({
"success": True
})
# @warehouse.route('/element_type', methods=['GET'])
# @warehouse.route('/element_type/<element_type_id>', methods=['GET'])
# def element_type(element_type_id=None):
# pass
# @warehouse.route('/element_type', methods=['POST'])
# def new_element_type()
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
warehouse/views.py
|
thiagolcmelo/dynamic
|
import websocket
try:
import thread
except ImportError: #TODO use Threading instead of _thread in python3
import _thread as thread
import time
import sys
def on_message(ws, message):
print(message)
def on_error(ws, error):
print(error)
def on_close(ws):
print("### closed ###")
def on_open(ws):
def run(*args):
for i in range(3):
# send the message, then wait
# so thread doesnt exit and socket
# isnt closed
ws.send("Hello %d" % i)
time.sleep(1)
time.sleep(1)
ws.close()
print("Thread terminating...")
thread.start_new_thread(run, ())
if __name__ == "__main__":
websocket.enableTrace(True)
if len(sys.argv) < 2:
host = "ws://echo.websocket.org/"
else:
host = sys.argv[1]
ws = websocket.WebSocketApp(host,
on_message = on_message,
on_error = on_error,
on_close = on_close)
ws.on_open = on_open
ws.run_forever()
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
}
] | 3
|
examples/echoapp_client.py
|
yarshure/websocket_client
|
""" Topological Sort
Topological sorting for Directed Acyclic Graph (DAG) is a linear ordering of
vertices such that for every directed edge uv, vertex u comes before v in
the ordering. Topological Sorting for a graph is not possible if the graph is
not a DAG.
"""
def topological_sort(graph):
""" topological sort python implementation """
stack = []
visited = set()
def topological_sort_util(vertex):
""" modified depth-first search recursive algorithm """
visited.add(vertex)
for node in graph[vertex]:
if node not in visited:
topological_sort_util(node)
stack.append(vertex)
for vertex in list(graph):
if vertex not in visited:
topological_sort_util(vertex)
stack.reverse()
return stack
def main():
""" operational function """
graph = {
0: [1, 2, 5],
1: [4],
2: [],
3: [2, 4, 5, 6],
4: [],
5: [2],
6: [0, 4]
}
print(topological_sort(graph))
if __name__ == "__main__":
main()
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
}
] | 3
|
graph/topological_sort.py
|
x899/algorithms
|
import pdftables.line_segments as line_segments
from nose.tools import assert_equals, raises
from pdftables.line_segments import LineSegment
def segments(segments):
return [line_segments.LineSegment.make(a, b) for a, b in segments]
def test_segments_generator():
seg1, seg2 = segs = segments([(1, 4), (2, 3)])
values = list(line_segments.segments_generator(segs))
assert_equals(
[(1, seg1, False),
(2, seg2, False),
(3, seg2, True),
(4, seg1, True)],
values
)
def test_histogram_segments():
segs = segments([(1, 4), (2, 3)])
values = list(line_segments.histogram_segments(segs))
assert_equals([((1, 2), 1), ((2, 3), 2), ((3, 4), 1)], values)
def test_segment_histogram():
segs = segments([(1, 4), (2, 3)])
values = list(line_segments.segment_histogram(segs))
assert_equals([(1, 2, 3, 4), (1, 2, 1)], values)
@raises(RuntimeError)
def test_malformed_input_segments_generator():
segs = segments([(1, -1)])
list(line_segments.segments_generator(segs))
def test_hat_point_generator():
segs = segments([(1, 4), (2, 3)])
result = list(line_segments.hat_point_generator(segs))
x = 2.5
expected = [(1, set()),
(2, set([LineSegment(start=1, end=4, object=None)])),
(x, set([LineSegment(start=1, end=4, object=None),
LineSegment(start=2, end=3, object=None)])),
(3, set([LineSegment(start=1, end=4, object=None)])),
(4, set())]
assert_equals(expected, result)
def test_hat_generator():
segs = segments([(0, 4), (1, 3)])
result = list(line_segments.hat_generator(segs))
expected = [(0, 0), (1, 0.75), (2.0, 2.0), (3, 0.75), (4, 0)]
assert_equals(expected, result)
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
test/test_linesegments.py
|
tessact/pdftables
|
from django.conf import settings
from places.models import Place
import requests
PLACES_API_ROOT = "https://maps.googleapis.com/maps/api/place"
PLACES_DETAILS_URL = "{ROOT_URL}/details/json?inputtype=textquery&key={key}&place_id={place_id}&fields={fields}"
PLACES_PHOTO_URL = "{ROOT_URL}/photo?key={key}&photoreference={photo_ref}&maxwidth={width}"
FETCH_FIELDS = "photo,website,formatted_address,geometry,permanently_closed,type,name,rating,user_ratings_total"
def fetch_photo_redirect(photo_ref):
photo_url = PLACES_PHOTO_URL.format(
ROOT_URL=PLACES_API_ROOT,
key=settings.GOOGLE_PLACES_API_KEY,
photo_ref=photo_ref,
width=800
)
photo_req = requests.get(photo_url, allow_redirects=False)
redirect = photo_req.headers.get("Location")
return redirect or None
def fetch_details_for_place_id(place_id):
full_url = PLACES_DETAILS_URL.format(
ROOT_URL=PLACES_API_ROOT,
key=settings.GOOGLE_PLACES_API_KEY,
place_id=place_id,
fields=FETCH_FIELDS)
resp = requests.get(full_url)
data = resp.json()
if not 'result' in data:
return {}, None, None
r = data['result']
photo_url = None
photo_attrib = None
if r.get('photos'):
first_photo = r['photos'][0]
photo_attrib = first_photo['html_attributions']
photo_ref = first_photo['photo_reference']
photo_url = fetch_photo_redirect(photo_ref)
return r, photo_url, photo_attrib
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
backend/places/google_places_helper.py
|
nuwen/saveourfaves-server
|
import importlib
import numpy as np
from game import MTD_Game
from utils import *
__author__ = "Sailik Sengupta"
class Strategy:
def __init__(self, game_to_play=MTD_Game(), gamma=0.5):
self.game = game_to_play
self.DISCOUNT_FACTOR = gamma
self.lib = importlib.import_module('gurobi')
def set_gamma(self, gamma):
self.DISCOUNT_FACTOR = gamma
def initilize_V(self, S):
V = {}
for s in S:
V[s] = 0
return V
def update_Q(self, S, A1, A2, R, T, V):
Q = {}
# Update the Q values for each state
for s in S:
for d in range(len(A1[s])):
for a in range(len(A2[s])):
sda = '{}_{}_{}'.format(s, A1[s][d], A2[s][a])
Q[sda] = R[s][a][d]
for s_new in S:
Q[sda] += T[s][a][d][s_new] * self.DISCOUNT_FACTOR * V[s_new]
return Q
'''
Given the new Q values, updates the optimal values for each state.
Each agent type selects the implementation of this method.
'''
def get_value(self, s, A1, A2, R, T, Q):
raise NotImplementedError
def run(self):
S = self.game.get_S()
A1 = self.game.get_A(1)
A2 = self.game.get_A(0)
R = self.game.get_R()
T = self.game.get_T()
V = self.initilize_V(S)
for k in range(301):
Q = self.update_Q(S, A1, A2, R, T, V)
# Update Value function
V_new = {}
pi = {}
for s in S:
V_new[s], pi[s] = self.get_value(s, A1[s], A2[s], R[s], T[s], Q)
V = V_new
print_iteration_info(k, V, pi)
return (V, pi)
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 3,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": false
}
] | 3
|
src/zero_sum/strategy.py
|
sailik1991/MarkovGameSolvers
|
import unittest
import requests_mock
from canvasapi import Canvas
from canvasapi.todo import Todo
from tests import settings
@requests_mock.Mocker()
class TestTodo(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
self.todo = Todo(
self.canvas._Canvas__requester,
{
"type": "grading",
"assignment": {},
"ignore": ".. url ..",
"ignore_permanently": ".. url ..",
"html_url": ".. url ..",
"needs_grading_count": 3,
"context_type": "course",
"course_id": 1,
"group_id": None,
},
)
def test_str(self, m):
test_str = str(self.todo)
self.assertIsInstance(test_str, str)
self.assertEqual(test_str, "Todo Item (grading)")
|
[
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
tests/test_todo.py
|
damianfs/canvasapi
|
from rest_framework.views import APIView
class BlindDetail(APIView):
def get(self):
pass
def put(self):
pass
def post(self):
pass
def delete(self):
pass
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 3,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}
] | 3
|
blinds/views.py
|
BoraDowon/BackendBlackberry
|
import time
def execution_time(method, repeat_count=1):
def timed(*args, **kwargs):
for i in range(repeat_count):
ts = time.time()
result = method(*args, **kwargs)
te = time.time()
print('>>> function %r executed in %2.2f ms <<<' % (method.__name__, (te - ts) * 1000))
return timed
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "any_function_over_40_lines",
"question": "Is any function in this file longer than 40 lines?",
"answer": false
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
}
] | 3
|
parchments/core/debug.py
|
idlelosthobo/parchment
|
import yaml
def load_config_data(path: str) -> dict:
with open(path) as f:
cfg: dict = yaml.load(f, Loader=yaml.FullLoader)
return cfg
def save_config_data(data: dict, path: str) -> None:
with open(path, "w") as f:
yaml.dump(data, f)
|
[
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
l5kit/l5kit/configs/config.py
|
xiaoxiaoheimei/l5kit
|
#!/usr/bin/env python3
def ceil_div(n, k):
return n//k + (n%k!=0)
def lowbit(x):
return x & (-x)
def sum_until(rs, n):
c = 0
while n > 0:
c += rs[n]
n -= lowbit(n)
return c
def increase(rs, n):
while n < len(rs):
rs[n] += 1
n += lowbit(n)
def method_a(ls, k):
ls = [1+(n-1)//k for n in ls]
rs = [0 for _ in range(1+ceil_div(len(ls), k))]
c = 0
for i, n in enumerate(ls):
c += i - sum_until(rs, n)
increase(rs, n)
return c
def method_b(ls, k):
ls = [(n-1)//k for n in ls]
def aux(i, go_zone):
at_zone = i // k
if go_zone < at_zone:
return (go_zone+1)*k - 1 - i
if go_zone > at_zone:
return go_zone*k - i
return 0
return max(abs(aux(i, go_zone)) for i, go_zone in enumerate(ls))
if __name__ == '__main__':
for t in range(int(input())):
_, k = [int(n) for n in input().split()]
ns = [int(n) for n in input().split()]
print('Case {}: {}'.format(t+1, method_a(ns, k)-method_b(ns, k)))
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
}
] | 3
|
acm/livearchive/6604-airport-sort.py
|
neizod/problems
|
#!/usr/bin/python
################################################################################
# 20de4144-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "20de4144-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry DWORD
dword = cli.get_reg_dword(r'HKLM:\Software\Policies\Microsoft\EMET\SysSettings', 'ASLR')
# Output Lines
self.output = [r'HKLM:\Software\Policies\Microsoft\EMET\SysSettings', ('ASLR=' + str(dword))]
if dword == 3:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\EMET'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\EMET\SysSettings'")
cli.powershell(r"Set-ItemProperty -path 'HKLM:\Software\Policies\Microsoft\EMET\SysSettings' -name 'ASLR' -value 3 -Type DWord")
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
}
] | 3
|
pcat2py/class/20de4144-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import markdown
from django import template
from portfolio.models import (Artifact, FileArtifact, ImageArtifact,
TextArtifact)
register = template.Library()
@register.assignment_tag()
def get_artifact_list(project, artifact_type=""):
if artifact_type == "":
return Artifact.objects.filter(project=project) \
.filter(published=True)
elif artifact_type == "file":
return FileArtifact.objects.filter(project=project) \
.filter(published=True)
elif artifact_type == "image":
return ImageArtifact.objects.filter(project=project) \
.filter(published=True)
elif artifact_type == "text":
return TextArtifact.objects.filter(project=project) \
.filter(published=True)
else:
return []
@register.filter
def convert_markdown(text):
return markdown.markdown(text, safe_mode='escape')
|
[
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
portfolio/templatetags/portfolio_tags.py
|
raymcbride/django-portfolio
|
import numpy as np
import matplotlib.pyplot as plt
from one_a import one_a
from one_b import one_b
from one_c import one_c
from one_d import one_d
from one_e import one_e
def random_generator(seed, m=2 ** 64 - 1, a=2349543, c=913842, a1=21, a2=35, a3=4, a4=4294957665):
"""
Generates psuedorandom numbers with a combination of (M)LCC, 64 bit shift, and MWC
:param seed: Seed to use
:param m: Determines period of the MLCC
:param a: For the MLCC
:param c: For the MLCC
:param a1: For the first bit shift
:param a2: For the second bit shift
:param a3: For the third bit shift
:param a4: For the MWC
:return:
"""
# First linear congruential generator
# While true, so the generator never stops making new numbers
# This is used to make sure teh XOR shift is 64 bit
bit_64 = 0xffffffffffffffff
while True:
# This is MLCC part
generated_number = (a * seed + c) % m
# Now bit shift
generated_number = generated_number ^ (generated_number >> a1) & bit_64
generated_number = generated_number ^ (generated_number << a2) & bit_64
generated_number = generated_number ^ (generated_number >> a3) & bit_64
# Now MWC part
mwc_out = a4 * (generated_number & (2 ** 32 - 1)) + (generated_number >> 32)
seed = mwc_out # set the seed to a new number, so a different number generated next time
mwc_out = mwc_out / m
if mwc_out > 1.:
# Have to make it between 1 and 0, so mod 1. makes sure its between 0 and 1 now
close_to_final = mwc_out % 1.
else:
close_to_final = mwc_out
yield close_to_final
def all_one(rand_gen):
one_a(rand_gen)
plt.cla()
one_b(rand_gen)
plt.cla()
one_c(rand_gen)
plt.cla()
one_d(rand_gen)
plt.cla()
one_e(rand_gen)
plt.cla()
|
[
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
}
] | 3
|
one.py
|
jacobbieker/NUR_Handin2
|
import os
import numpy as np
from maskrcnn.lib.data.preprocessing import mold_inputs
from maskrcnn.lib.config import cfg
from maskrcnn.lib.utils import io_utils
def test_mold_inputs_ones():
image = np.ones((cfg.IMAGE.MAX_DIM, cfg.IMAGE.MAX_DIM, 3), dtype=np.uint8) * 255
molded_images, image_metas = mold_inputs([image], cfg)
mean = molded_images[0, 0, :, :].mean()
assert abs(mean - ((1 - 0.485) / 0.229)) < 1e-5
mean = molded_images[0, 1, :, :].mean()
assert abs(mean - ((1 - 0.456) / 0.224)) < 1e-5
mean = molded_images[0, 2, :, :].mean()
assert abs(mean - ((1 - 0.406) / 0.225)) < 1e-5
assert molded_images.shape == (1, 3, cfg.IMAGE.MAX_DIM, cfg.IMAGE.MAX_DIM)
assert image_metas[0][1] == 1
def test_mold_image():
image_path = os.path.join("data", "COCO_val2014_000000018928.jpg")
image = io_utils.read_image(image_path)
molded_images, image_metas = mold_inputs([image], cfg)
print("image_metas", image_metas)
assert image_metas.shape[0] == 1
assert molded_images.shape[1] == 3
assert molded_images.shape == (1, 3, cfg.IMAGE.MAX_DIM, cfg.IMAGE.MAX_DIM)
assert abs(image_metas[0][1] - 2.048) < 1e-10
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
tests/data/test_processing.py
|
quanhua92/maskrcnn-pytorch
|
import json
from database.database import Database, Inspector
class HIT:
def __init__(self, task_id=None, hit_id=None):
super().__init__()
if task_id is not None:
self.task_id = task_id
self.inspector = Inspector(task_id)
self.__dict__.update(self.info)
self.task_group_id = self.inspector.hit_info['task_group_id']
if hit_id is not None:
self.hit_id = hit_id
@property
def info(self):
try:
data = self.inspector.hit_info
del data['task_group_id']
return data
except:
return {}
@property
def complete(self):
try:
return self.inspector.task_info['complete']
except:
return False
@property
def human_speaks_first(self):
try:
return self.inspector.task_info['human_speaks_first']
except:
return None
@property
def dialog(self):
try:
result = {}
for agent in self.inspector.agents:
result[agent] = Database().load_messages(self.task_id, agent)
return result
except:
return {}
@property
def forms(self):
try:
result = {}
for form in self.inspector.forms:
result[form] = self.inspector.form_data(form).replace('""', '"').replace("''", "'").replace('\n', '')
result[form] = json.loads(result[form])
return result
except Exception as error:
print("Error retreiving form data in hit.py:", error)
return {}
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}
] | 3
|
app/database/hit.py
|
yooli23/MTurk
|
import os,shutil, pyzipper
from datetime import datetime
## gatting time object
now = datetime.now()
# dd/mm/YY H:M:S
dt_string = now.strftime("%m%d%y")
## function to compress file
def compressFile(folder_name):
zip_file_location = f"./{dt_string}.zip"
zipPW = "latenight"
to_zip = list()
with pyzipper.AESZipFile(os.path.abspath(zip_file_location), 'w', compression=pyzipper.ZIP_LZMA) as zf:
zf.setpassword(bytes(zipPW, "utf-8"))
zf.setencryption(pyzipper.WZ_AES, nbits=128)
for root,dirs,files in os.walk(folder_name):
for filed in files:
zf.write(f"{root}/{filed}")
zf.close()
return os.path.abspath(zip_file_location)
## backing the file up by movign the file to the onedrive
def backup_da_file(zipfile):
shutil.move(zipfile, "C:/Users/mochi/OneDrive/Backup")
## Compressing the file and getting the absolute path of the zipfile
abs_path_zipfile = compressFile("C:/Users/mochi/Desktop/Documents")
backup_da_file(abs_path_zipfile)
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
blueTeam/backup.py
|
cloudfellows/stuxnet-sandworm
|
""" Module for image operations """
import numpy as np
import tensorflow as tf
def apply_grey_patch(image, top_left_x, top_left_y, patch_size):
"""
Replace a part of the image with a grey patch.
Args:
image (numpy.ndarray): Input image
top_left_x (int): Top Left X position of the applied box
top_left_y (int): Top Left Y position of the applied box
patch_size (int): Size of patch to apply
Returns:
numpy.ndarray: Patched image
"""
patched_image = np.array(image, copy=True)
patched_image[
top_left_y : top_left_y + patch_size, top_left_x : top_left_x + patch_size, :
] = 127.5
return patched_image
@tf.function
def transform_to_normalized_grayscale(tensor):
"""
Transform tensor over RGB axis to grayscale.
Args:
tensor (tf.Tensor): 4D-Tensor with shape (batch_size, H, W, 3)
Returns:
tf.Tensor: 4D-Tensor of grayscale tensor, with shape (batch_size, H, W, 1)
"""
grayscale_tensor = tf.reduce_sum(tensor, axis=-1)
normalized_tensor = tf.cast(
255 * tf.image.per_image_standardization(grayscale_tensor), tf.uint8
)
return normalized_tensor
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
}
] | 3
|
tf_explain/utils/image.py
|
sicara/mentat
|
import datetime
import numpy as np
"""
Class to define a 'RiskAssessment' from FHIR.
Currently only produces JSON.
{
"date": date assesment was made in ISO format yyyy-mm-dd,
"results": {
"five_year_abs": Five year Absolute Risk for this patient as decimal
"five_year_ave": Five year Risk for an average patient
"lifetime_abs": Lifetime Absolute Risk for this patient as decimal
"lifetime_ave": Lifetime Risk for an average patient
}
}
"""
class BasicRiskAssessment:
def __init__(self):
self.resourceType = "RiskAssessment"
#self.date = datetime.datetime.now().isoformat()
self.date = datetime.date.today().isoformat()
self.fiveyearABS = np.float64(-1)
self.fiveyearAVE = np.float64(-1)
self.lifetimeABS = np.float64(-1)
self.lifetimeAVE = np.float64(-1)
def setRiskScores(self, fiveABS, fiveAVE, lifeABS, lifeAVE):
self.fiveyearABS = fiveABS
self.fiveyearAVE = fiveAVE
self.lifetimeABS = lifeABS
self.lifetimeAVE = lifeAVE
def getJson(self):
return {"date":self.date,
"results": {
"five_year_abs": self.fiveyearABS,
"five_year_ave": self.fiveyearAVE,
"lifetime_abs": self.lifetimeABS,
"lifetime_ave": self.lifetimeAVE
}}
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
}
] | 3
|
RiskAssessment.py
|
VisExcell/riskmodels
|
import sys
class PrintUtils:
progress = 0
total_progress = 0
@classmethod
def print_progress_bar(cls, iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', print_end = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
print_end - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
cls.progress = iteration
cls.total_progress = total
if prefix != "":
prefix += " "
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'{prefix}[{bar}] {percent}% {suffix}', end = print_end)
if iteration == total:
print()
@classmethod
def smart_print(cls, msg, end = "\x1b[2K\r\n", file = sys.stdout):
if not isinstance(msg, str):
msg = str(msg)
print("\x1b[2K", end = "\r", flush = True)
if file == sys.stderr:
print("\x1b[31m" + msg + "\x1b[37m", file = sys.stderr, flush = True)
else:
print(msg, flush = True)
if cls.total_progress != 0:
cls.print_progress_bar(cls.progress, cls.total_progress)
|
[
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
print_utils.py
|
FieryRider/matrix-archive
|
import logging
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.utils import apply_defaults
class GoogleCloudStorageDownloadOperator(BaseOperator):
"""
Downloads a file from Google Cloud Storage.
"""
template_fields = ('bucket','object',)
template_ext = ('.sql',)
ui_color = '#f0eee4'
@apply_defaults
def __init__(
self,
bucket,
object,
filename,
google_cloud_storage_conn_id='google_cloud_storage_default',
*args,
**kwargs):
"""
Create a new GoogleCloudStorageDownloadOperator.
:param bucket: The Google cloud storage bucket where the object is.
:type bucket: string
:param object: The name of the object to download in the Google cloud
storage bucket.
:type object: string
:param filename: The file path on the local file system (where the
operator is being executed) that the file should be downloaded to.
:type filename: string
:param google_cloud_storage_conn_id: The connection ID to use when
connecting to Google cloud storage.
:type google_cloud_storage_conn_id: string
"""
super(GoogleCloudStorageDownloadOperator, self).__init__(*args, **kwargs)
self.bucket = bucket
self.object = object
self.filename = filename
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
def execute(self, context):
logging.info('Executing download: %s, %s, %s', self.bucket, self.object, self.filename)
hook = GoogleCloudStorageHook(google_cloud_storage_conn_id=self.google_cloud_storage_conn_id)
print(hook.download(self.bucket, self.object, self.filename))
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
}
] | 3
|
airflow/contrib/operators/gcs_download_operator.py
|
djeps/airflow
|
from webapp.user.models import User
from webapp.db import db
class Category(db.Model):
__tablename__ = 'categories'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
name = db.Column(db.String(50), nullable=False)
is_income = db.Column(db.Boolean, nullable=False)
transactions = db.relationship('Transaction', backref='trans_cat', lazy='dynamic')
def __repr__(self):
return (
f"<Category: name = {self.name}, id = {self.id},"
f" is_income = {self.is_income}>"
)
class Transaction(db.Model):
__tablename__ = 'transactions'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
category_id = db.Column(db.Integer, db.ForeignKey(Category.id))
is_actual = db.Column(db.Boolean, nullable=False)
value = db.Column(db.DECIMAL(15, 2), nullable=False)
date = db.Column(db.Date, nullable=False)
comment = db.Column(db.String(255), nullable=False)
def __repr__(self):
return (
f"<Transaction: id = {self.id}, value = {self.value},"
f" date = {self.date}, actual = {self.is_actual}>"
)
|
[
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 3,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
}
] | 3
|
webapp/main/models.py
|
mign0n/super_budget
|
import uuid
from typing import Any, Dict
from loguru import logger
from analytics.signal import analytic_signal
from users.models import CustomUser
class UserInterface:
@staticmethod
def get_username(*, user_id: uuid.UUID) -> Dict[str, Any]:
return {"username": CustomUser.objects.get(user_uuid=user_id).username}
@staticmethod
def get_user(*, username: str) -> Dict[str, CustomUser]:
return {"username": CustomUser.objects.get(username=username)}
class AnalyticInterface:
@staticmethod
def create_analytic(*, model: Any, instance: Any, request: Any) -> None:
analytic_signal.send(sender=model, instance=instance, request=request)
logger.success(f"analytic data was created for {instance}")
|
[
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}
] | 3
|
e_learning/interfaces.py
|
Mohamed-Kaizen/django_playground
|
from collections import defaultdict
import psutil
from system_monitor.msg import Cpu as CpuMsg
_prev_total = defaultdict(int)
_prev_busy = defaultdict(int)
def collect_all():
"""
parse /proc/stat and calculate total and busy time
(more specific USER_HZ see man 5 proc for further information )
"""
msgs = []
running_processes = len(psutil.pids())
timings = _get_cpu_stats()
overall_usage = 0
for cpu, timings in timings.items():
cpu_total = sum(timings)
del timings[3:5]
cpu_busy = sum(timings)
cpu_usage = _calculate_usage(cpu, cpu_total, cpu_busy)
msgs.append(CpuMsg(
cpu_name=cpu,
cpu_usage=cpu_usage
))
overall_usage += cpu_usage
# compute mean of cpu usages
overall_usage_percentage = overall_usage / len(timings)
return running_processes, msgs, overall_usage_percentage
def _get_cpu_stats():
"""
read and parse /proc/stat
:returns timings which contains accumulative busy and total cpu time
"""
timings = {}
with open('/proc/stat', 'r') as file_obj:
for line in file_obj:
# only evaluate lines like cpu0, cpu1, cpu2, ...
if line.startswith('cpu') and line.strip().split()[0] != 'cpu':
line = line.strip().split()
timings[line[0]] = [int(x) for x in line[1:]]
return timings
def _calculate_usage(cpu_num, total, busy):
"""
calculate usage percentage based on busy/total time
"""
diff_total = total - _prev_total[cpu_num]
diff_busy = busy - _prev_busy[cpu_num]
_prev_total[cpu_num] = total
_prev_busy[cpu_num] = busy
if diff_total == 0:
return 0
else:
return int(diff_busy / diff_total * 100)
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 3,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
}
] | 3
|
bitbots_misc/system_monitor/src/system_monitor/cpus.py
|
MosHumanoid/bitbots_thmos_meta
|
import logging
import shelve
from ftplib import FTP
import requests
import requests_cache
from io import BytesIO
_cache_file_path = None
def set_cache_http(cache_file_path):
requests_cache.install_cache(cache_file_path)
def open_url(url):
return requests.get(url).text
def set_cache_ftp(cache_file_path):
global _cache_file_path
_cache_file_path = cache_file_path
def ftp_retrieve(server, path, filename):
logging.info('loading: ftp://%s/%s/%s' % (server, path, filename))
ftp = FTP(server)
ftp.login()
ftp.cwd(path)
buffer = BytesIO()
ftp.retrbinary('RETR %s' % filename, buffer.write)
return buffer
def download_ftp(server, path, filename, refresh_cache=False):
"""
TODO: drop shelve (too unstable) and use a simple filesystem implementation.
:param server:
:param path:
:param filename:
:param refresh_cache:
:return:
"""
if _cache_file_path:
with shelve.open(_cache_file_path) as url_cache:
location = '/'.join([server, path, filename])
if location not in url_cache or refresh_cache:
url_cache[location] = ftp_retrieve(server, path, filename)
try:
output = url_cache[location]
except KeyError:
del url_cache[location]
raise
except EOFError:
del url_cache[location]
raise
else:
output = ftp_retrieve(server, path, filename)
return output
|
[
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
src/urlcaching.py
|
chris-ch/sec-edgar
|
# Copyright Materialize, Inc. and contributors. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
import argparse
from typing import Callable
import boto3
from mypy_boto3_ec2.service_resource import Instance
from materialize.cli.scratch import check_required_vars
from materialize.scratch import launched_by, print_instances, tags, whoami
def configure_parser(parser: argparse.ArgumentParser) -> None:
check_required_vars()
parser.add_argument(
"who",
nargs="*",
help="Whose instances to show (defaults to yourself)",
default=[whoami()],
)
parser.add_argument("--all", help="Show all instances", action="store_true")
parser.add_argument("--output-format", choices=["table", "csv"], default="table")
def run(args: argparse.Namespace) -> None:
filter: Callable[[Instance], bool] = (
(lambda _i: True) if args.all else (lambda i: launched_by(tags(i)) in args.who)
)
ists = [i for i in boto3.resource("ec2").instances.all() if filter(i)]
print_instances(ists, args.output_format)
|
[
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
misc/python/materialize/cli/scratch/mine.py
|
moyun/materialize
|
import pytest
import numpy as np
from astropy.utils.data import download_file
from jdaviz.app import Application
# This file is originally from
# https://data.sdss.org/sas/dr14/manga/spectro/redux/v2_1_2/7495/stack/manga-7495-12704-LOGCUBE.fits.gz
URL = 'https://stsci.box.com/shared/static/28a88k1qfipo4yxc4p4d40v4axtlal8y.fits'
""" The purpose of this test is to check that both methods:
- app.get_viewer('spectrum-viewer').data()
- app.get_data_from_viewer("spectrum-viewer")
return the same spectrum values.
"""
@pytest.fixture
def jdaviz_app():
return Application(configuration='cubeviz')
@pytest.mark.filterwarnings('ignore')
@pytest.mark.remote_data
def test_data_retrieval(jdaviz_app):
fn = download_file(URL, cache=True)
jdaviz_app.load_data(fn)
# two ways of retrieving data from the viewer.
# They should return the same spectral values
a1 = jdaviz_app.get_viewer('spectrum-viewer').data()
a2 = jdaviz_app.get_data_from_viewer("spectrum-viewer")
test_value_1 = a1[0].data
test_value_2 = list(a2.values())[0].data
assert np.allclose(test_value_1, test_value_2, atol=1e-5)
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
jdaviz/configs/cubeviz/plugins/tests/test_data_retrieval.py
|
check-spelling/jdaviz
|
from django.contrib import admin
class BaseOwnerAdmin(admin.ModelAdmin):
"""
自动补充owner字段
用来针对queryset过滤当前用户的数据
"""
exclude = ('owner',)
def get_queryset(self, request):
qs = super(BaseOwnerAdmin, self).get_queryset(request)
return qs.filter(owner=request.user)
def save_model(self, request, obj, form, change):
obj.owner = request.user
return super(BaseOwnerAdmin, self).save_model(request, obj, form, change)
|
[
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}
] | 3
|
blog/blog/base_admin.py
|
drunkwretch/python_learning
|
import pymc3 as pm
from .helpers import SeededTest
import numpy as np
import theano
class TestShared(SeededTest):
def test_deterministic(self):
with pm.Model() as model:
data_values = np.array([.5, .4, 5, 2])
X = theano.shared(np.asarray(data_values, dtype=theano.config.floatX), borrow=True)
pm.Normal('y', 0, 1, observed=X)
model.logp(model.test_point)
def test_sample(self):
x = np.random.normal(size=100)
y = x + np.random.normal(scale=1e-2, size=100)
x_pred = np.linspace(-3, 3, 200)
x_shared = theano.shared(x)
with pm.Model() as model:
b = pm.Normal('b', 0., 10.)
pm.Normal('obs', b * x_shared, np.sqrt(1e-2), observed=y)
prior_trace0 = pm.sample_prior_predictive(1000)
trace = pm.sample(1000, init=None, progressbar=False)
pp_trace0 = pm.sample_ppc(trace, 1000)
x_shared.set_value(x_pred)
prior_trace1 = pm.sample_prior_predictive(1000)
pp_trace1 = pm.sample_ppc(trace, 1000)
assert prior_trace0['b'].shape == (1000,)
assert prior_trace0['obs'].shape == (1000, 100)
np.testing.assert_allclose(x, pp_trace0['obs'].mean(axis=0), atol=1e-1)
assert prior_trace1['b'].shape == (1000,)
assert prior_trace1['obs'].shape == (1000, 200)
np.testing.assert_allclose(x_pred, pp_trace1['obs'].mean(axis=0), atol=1e-1)
|
[
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
}
] | 3
|
pymc3/tests/test_shared.py
|
MaximeJumelle/pymc3
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Help Plugin Configuration Page."""
# Third party imports
from qtpy.QtWidgets import QGroupBox, QVBoxLayout
# Local imports
from spyder.config.base import _
from spyder.utils import icon_manager as ima
from spyder.api.preferences import PluginConfigPage
class HistoryConfigPage(PluginConfigPage):
"""Configuration page for History plugin."""
def get_icon(self):
"""Get icon to use in Configurations dialog."""
return ima.icon('history')
def setup_page(self):
"""Setup config page widgets and options."""
sourcecode_group = QGroupBox(_("Display"))
wrap_mode_box = self.create_checkbox(_("Wrap lines"), 'wrap')
linenumbers_mode_box = self.create_checkbox(_("Show line numbers"),
'line_numbers')
go_to_eof_box = self.create_checkbox(
_("Scroll automatically to last entry"), 'go_to_eof')
sourcecode_layout = QVBoxLayout()
sourcecode_layout.addWidget(wrap_mode_box)
sourcecode_layout.addWidget(linenumbers_mode_box)
sourcecode_layout.addWidget(go_to_eof_box)
sourcecode_group.setLayout(sourcecode_layout)
vlayout = QVBoxLayout()
vlayout.addWidget(sourcecode_group)
vlayout.addStretch(1)
self.setLayout(vlayout)
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
}
] | 3
|
spyder/plugins/history/confpage.py
|
ximion/spyder
|
# Copyright 2019 The Johns Hopkins University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from craedl.core import Profile
__version__ = '0.5.5'
def auth():
return Profile()
def configure(token=None):
from craedl.__main__ import main
main(token)
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
}
] | 3
|
craedl/__init__.py
|
craedl/craedl-sdk-python
|
import tempfile
from subprocess import call
from isbntools.app import get_isbnlike
from isbntools.app import get_canonical_isbn
from . import utils
from .errors import ISBNNotFoundError
def _pdf_to_text_tool(pdf_file, output, first_page, last_page):
first_page, last_page = map(str, (first_page, last_page))
call([
'pdftotext',
pdf_file,
'-f', first_page,
'-l', last_page,
'-q', # Don't print any messages or errors
output,
])
def _get_text_from_pdf(pdf_file, first_page=1, last_page=6):
with tempfile.NamedTemporaryFile() as temp_file:
output = temp_file.name
_pdf_to_text_tool(pdf_file, output, first_page, last_page)
return utils.get_text_from_file(output)
def get_isbn_from_pdf(pdf_file):
pdf_text = _get_text_from_pdf(pdf_file)
for isbn_like in get_isbnlike(pdf_text, level='normal'):
isbn = get_canonical_isbn(isbn_like)
if isbn:
return isbn
else:
raise ISBNNotFoundError(pdf_file)
def handler(file_path):
isbn = get_isbn_from_pdf(file_path)
return isbn
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
}
] | 3
|
bookpy/pdfhandler.py
|
stsewd/bookpy
|
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 15:39, 20/04/2020 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
from opfunu.cec.cec2005.root import Root
from numpy import sum, dot, cos, exp, pi, e, sqrt
class Model(Root):
def __init__(self, f_name="Shifted Rotated Ackley's Function with Global Optimum on Bounds", f_shift_data_file="data_ackley",
f_ext='.txt', f_bias=-140, f_matrix=None):
Root.__init__(self, f_name, f_shift_data_file, f_ext, f_bias)
self.f_matrix = f_matrix
def _main__(self, solution=None):
problem_size = len(solution)
if problem_size > 100:
print("CEC 2005 not support for problem size > 100")
return 1
if problem_size == 10 or problem_size == 30 or problem_size == 50:
self.f_matrix = "ackley_M_D" + str(problem_size)
else:
print("CEC 2005 F8 function only support problem size 10, 30, 50")
return 1
shift_data = self.load_shift_data()[:problem_size]
t1 = int(problem_size/2)
for j in range(0, t1-1):
shift_data[2*(j+1)-1] = -32 * shift_data[2*(j+1)]
matrix = self.load_matrix_data(self.f_matrix)
z = dot((solution - shift_data), matrix)
result = -20 * exp(-0.2 * sum(z ** 2) / problem_size) - exp(sum(cos(2 * pi * z))) + 20 + e
return result + self.f_bias
|
[
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
opfunu/cec/cec2005/F8.py
|
ElliottP-13/opfunu
|
"""
Sample Hook to provide helpful message that project generated successfully.
"""
from __future__ import print_function
import os
TERMINATOR = "\x1b[0m"
INFO = "\x1b[1;33m [INFO]: "
SUCCESS = "\x1b[1;32m [SUCCESS]: "
HINT = "\x1b[3;33m"
def remove_optional_files():
filenames = ["event.json"]
for file in filenames:
if os.path.isfile(file):
print(INFO + "Removing {} from project due to chosen options...".
format(file) + TERMINATOR)
os.remove(file)
return True
def main():
project_name = '{{ cookiecutter.project_name }}'
apigw_choice = '{{ cookiecutter.include_apigw }}'.lower()
if apigw_choice == 'n':
remove_optional_files()
print(SUCCESS +
"Project initialized successfully! You can now jump to {} folder".
format(project_name) + TERMINATOR)
print(INFO +
"{}/README.md contains instructions on how to proceed.".
format(project_name) + TERMINATOR)
if __name__ == '__main__':
main()
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
}
] | 3
|
hooks/post_gen_project.py
|
adjogahm/cookiecutter-aws-sam-python
|
# coding=utf-8
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from unittest import TestCase
from po_localization.strings import escape, unescape, UnescapeError
class EscapeTextCase(TestCase):
def test_empty(self):
self.assertEqual("", escape(""))
def test_simple(self):
self.assertEqual(r"First\nSecond", escape("First\nSecond"))
def test_multiple(self):
self.assertEqual(r"\a \b \f \n \r \t \v \\ \"", escape("\a \b \f \n \r \t \v \\ \""))
class UnescapeTestCase(TestCase):
def test_empty(self):
self.assertEqual("", unescape(""))
def test_simple(self):
self.assertEqual("First\nSecond", unescape(r"First\nSecond"))
def test_multiple(self):
self.assertEqual("\a \b \f \n \r \t \v \\ \"", unescape(r"\a \b \f \n \r \t \v \\ \""))
def test_octal_escape(self):
self.assertEqual("e", unescape(r"\145"))
self.assertEqual("€", unescape(r"\20254"))
def test_hexadecimal_escape(self):
self.assertEqual("e", unescape(r"\x65"))
self.assertEqual("€", unescape(r"\x20ac"))
self.assertEqual("€", unescape(r"\x20AC"))
self.assertEqual("€", unescape(r"\x20aC"))
def test_unfinished_escape(self):
self.assertRaises(UnescapeError, unescape, "\\")
def test_invalid_escape(self):
self.assertRaises(UnescapeError, unescape, "\\FAIL")
|
[
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
po_localization/tests/test_strings.py
|
movermeyer/po-localization
|
#!/usr/bin/env python3
# This script was created with the "basic" environment which does not support
# adding dependencies with pip.
# Taken from https://iterm2.com/python-api/examples/theme.html
import asyncio
import iterm2
async def update(connection, theme):
# Themes have space-delimited attributes, one of which will be light or dark.
parts = theme.split(" ")
if "dark" in parts:
preset = await iterm2.ColorPreset.async_get(connection, "base16-summerfruit-dark-256")
else:
preset = await iterm2.ColorPreset.async_get(connection, "base16-summerfruit-light-256")
# Update the list of all profiles and iterate over them.
profiles=await iterm2.PartialProfile.async_query(connection)
for partial in profiles:
# Fetch the full profile and then set the color preset in it.
profile = await partial.async_get_full_profile()
await profile.async_set_color_preset(preset)
async def main(connection):
app = await iterm2.async_get_app(connection)
await update(connection, await app.async_get_variable("effectiveTheme"))
async with iterm2.VariableMonitor(connection, iterm2.VariableScopes.APP, "effectiveTheme", None) as mon:
while True:
# Block until theme changes
theme = await mon.async_get()
await update(connection, theme)
iterm2.run_forever(main)
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
Library/Application Support/iTerm2/Scripts/AutoLaunch/change_color_preset_on_theme_change.py
|
timriley/dotfiles
|
from unittest import TestCase
from dataclass_bakery.generators import defaults
from dataclass_bakery.generators.random_int_generator import RandomIntGenerator
class TestRandomIntGenerator(TestCase):
def setUp(self):
self.random_int_generator = RandomIntGenerator()
def test_generate_int_ok(self):
random_int = self.random_int_generator.generate()
self.assertIsInstance(random_int, int)
def test_generate_int_correct_min_limit_ok(self):
min_limit = defaults.NUMBER_MAX_LIMIT - 1
random_int = self.random_int_generator.generate(
**{defaults.NUMBER_MIN_LIMIT_ARG: min_limit}
)
self.assertIsInstance(random_int, int)
self.assertTrue(min_limit <= random_int <= defaults.NUMBER_MAX_LIMIT)
def test_generate_int_correct_max_limit_ok(self):
max_limit = defaults.NUMBER_MIN_LIMIT + 1
random_int = self.random_int_generator.generate(
**{defaults.NUMBER_MAX_LIMIT_ARG: max_limit}
)
self.assertIsInstance(random_int, int)
self.assertTrue(defaults.NUMBER_MIN_LIMIT <= random_int <= max_limit)
def test_generate_int_incorrect_min_limit_ko(self):
with self.assertRaises(ValueError):
self.random_int_generator.generate(**{defaults.NUMBER_MIN_LIMIT_ARG: "asd"})
def test_generate_int_incorrect_max_limit_ko(self):
with self.assertRaises(TypeError):
self.random_int_generator.generate(**{defaults.NUMBER_MAX_LIMIT_ARG: "asd"})
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
}
] | 3
|
src/tests/dataclass_bakery/generators/test_random_int_generator.py
|
miguelFLG13/dataclass-bakery
|
from django.test import TestCase
from dojo.tools.acunetix.parser import AcunetixParser
from dojo.models import Test
class TestAcunetixParser(TestCase):
def test_parse_without_file(self):
parser = AcunetixParser()
findings = parser.get_findings(None, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_finding(self):
testfile = open("dojo/unittests/scans/acunetix/one_finding.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_multiple_finding(self):
testfile = open("dojo/unittests/scans/acunetix/many_findings.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(4, len(findings))
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 3,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}
] | 3
|
dojo/unittests/tools/test_acunetix_parser.py
|
brunoduruzup/django-DefectDojo
|
from discord.ext import commands
import os
import discord
import random
token = 'token'
bot = discord.Client()
bot = commands.Bot(command_prefix='!')
bot.remove_command('help')
for file in os.listdir("cogs"):
if file.endswith(".py"):
name = file[:-3]
bot.load_extension(f"cogs.{name}")
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Game(name='with my dick'))
print('We have logged in as {0.user}'.format(bot))
@bot.command()
async def help(ctx):
embed = discord.Embed(title="Help Commands", description="List of commands that you can access from this bot.")
embed.add_field(name="!ping", value="sends a pong bitch message", inline=False)
embed.add_field(name="!say", value="literally repeats whatever argument you state", inline=False)
embed.add_field(name="!roll", value="roll now to get mapotofu", inline=False)
embed.add_field(name="!slap", value="slap someone (e.g !slap @Ghrek for being such a faggot)", inline=False)
embed.add_field(name="!8ball", value="ask a question and maybe you'll get an answer", inline=False)
await ctx.channel.send(embed=embed)
bot.run(token)
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
}
] | 3
|
hat_bot.py
|
Ghrek/Hat
|
from uuid import uuid4
from sqlalchemy_jsonapi.errors import (RelationshipNotFoundError,
ResourceNotFoundError)
def test_200_result_of_to_one(post, client):
response = client.get(
'/api/blog-posts/{}/author/'.format(post.id)).validate(
200)
assert response.json_data['data']['type'] == 'users'
def test_200_collection_of_to_many(comment, client):
response = client.get('/api/blog-posts/{}/comments/'.format(
comment.post.id)).validate(200)
assert len(response.json_data['data']) > 0
def test_404_when_relationship_not_found(post, client):
client.get('/api/blog-posts/{}/last_comment/'.format(
post.id)).validate(404, RelationshipNotFoundError)
def test_404_when_resource_not_found(client):
client.get('/api/blog-posts/{}/comments/'.format(uuid4())).validate(
404, ResourceNotFoundError)
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
sqlalchemy_jsonapi/tests/test_related_get.py
|
jimbobhickville/sqlalchemy-jsonapi
|
import torch
import torch.nn as nn
class MNIST(nn.Module):
def __init__(self):
super(MNIST, self).__init__()
self.shared_encoder = torch.nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Flatten()
)
self.private_encoder = torch.nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Flatten()
)
self.clf = torch.nn.Sequential(
torch.nn.Linear(64*7*7*2, 512), # 乘2因为global_feat和local_feat拼在一起
torch.nn.ReLU(inplace=True),
torch.nn.Linear(512, 10)
)
def forward(self, x):
gFeature = self.shared_encoder(x)
lFeature = self.private_encoder(x)
feature = torch.cat((gFeature, lFeature), dim=-1)
output = self.clf(feature)
return output
if __name__ == '__main__':
model = MNIST()
_x = torch.rand((50, 1, 28, 28))
_output = model(_x)
print(f'{_x.shape}->{_output.shape}')
print("Parameters in total {}".format(sum(x.numel() for x in model.parameters())))
print("Comm.")
total = 0
for key, param in model.named_parameters():
if key.startswith('shared'):
total += param.numel()
print("Comm. Parameters {}".format(total))
|
[
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 3,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}
] | 3
|
models/fedsp/mnist/MNIST.py
|
tdye24/LightningFL
|
from crdt import CRDT
class DistributedCounter(CRDT):
def add(self, number):
return self + number
def remove(self, number):
return self - number
def inc(self):
"""
Increase the counters value by one
"""
return self + 1
def dec(self):
"""
Reduce the counters value by one
"""
return self - 1
def __abs__(self):
"""
Do the set operation and return the iterable over the result
"""
if self.state is None:
return self.value
if self.value is None:
return self.state
return self.value + self.state
def __repr__(self):
return "%s" % self.__abs__()
def __add__(self, number):
if isinstance(self.state, (int, long, float, complex)):
self._update(self.state + number)
else:
self._update(number)
return self
def __sub__(self, number):
if isinstance(self.state, (int, long, float, complex)):
self._update(self.state - number)
else:
self._update(-number)
return self
def _parse(self, data):
return data.json()['rows'][0]['value']
|
[
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 3,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}
] | 3
|
couchcrdt/counter.py
|
drsm79/couch-crdt
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=2
# total number=9
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.X.on(input_qubit[1])) # number=5
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=4
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=6
c.append(cirq.X.on(input_qubit[1])) # number=7
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=8
c.append(cirq.X.on(input_qubit[1])) # number=3
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq85.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
}
] | 3
|
data/p2DJ/New/R2/benchmark/startCirq85.py
|
UCLA-SEAL/QDiff
|
# web_app/routes/company_routes.py
import pandas as pd
from flask import Blueprint, jsonify, request, render_template #, flash, redirect
from web_app.models import *
company_routes = Blueprint("company_routes", __name__)
@company_routes.route("/div_yield")
def seeDivYield():
return render_template("highest_DivYield_charts.html")
@company_routes.route("/highest_increasing_divs")
def seecompanies_w_highest_dividend_increases():
return render_template("companies_w_highest_dividend_increases.html")
@company_routes.route("/most_affordable_div_payers")
def seemost_affordable_div_payers():
return render_template("most_affordable.html")
@company_routes.route("/companies")
def list_companies_for_humans():
return render_template("All_SP500.html", message="Here's all the companies on the S&P 500",
companies=get_AllCompanies())
@company_routes.route("/test")
def seeTEST():
return render_template("test.html", message="Here's all the companies on the S&P 500")
def get_AllCompanies():
all = Company_Info.query.all()
names = [record.Company_Name for record in all]
return names
def createCompanyInfoTable(): # ran once
SandP500 = pd.read_csv('../DIYInvestmentPrimer/SandP_500_companies.csv')
for x in range(0, len(SandP500)):
db.create_all()
company_entry = Company_Info.query.get
(Company_Info(Company_Name=SandP500['Security'][x],
Company_Ticker=SandP500['Symbol'][x],
Sector=SandP500['GICS Sector'][x],
SubIndustry=SandP500['GICS Sub-Industry'][x],
HQ_Location=SandP500['Headquarters Location'][x],
Date_first_added_to_SP500=SandP500['Date first added'][x],
Founded=SandP500['Founded'][x]))
db.session.add(company_entry)
db.session.commit()
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
web_app/routes/company_routes.py
|
ssbyrne89/DIYInvestmentPrimer
|
# ------ Python standard library imports ---------------------------------------
from typing import Optional
import os
# ------ External imports ------------------------------------------------------
# ------ Imports from own package or module ------------------------------------
from movieverse.movieverse import Movieverse
from movieverse.metadatalib import MetaDataLib
#-------------------------------------------------------------------------------
def _dataset_directory():
fallback = os.path.join(os.path.expanduser("~"), ".movieverse_data")
dir_ = os.environ.get("MOVIEVERSE_DATASET_DIR", fallback)
if not os.path.exists(dir_):
os.makedirs(dir_)
return dir_
def load_movielens(dataset: str = '100k',
movieverse_name: Optional[str] = None,
directory: str = '') -> Movieverse:
path = directory or _dataset_directory()
if dataset == '100k':
url = 'http://files.grouplens.org/datasets/movielens/ml-100k.zip'
|
[
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
movieverse/public_datasets.py
|
KoenBaak/movieverse
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 22 18:44:02 2017
@author: Tirthajyoti Sarkar
Simple selection sort with counter for total number of operations (finding minimum and swapping)
Accepts user input on minimum and maximum bound of the array and the size of the array.
"""
import random
def find_min(array):
n=len(array)
r = array[0]
count=0
for i in range(1,n):
count+=1
if r>array[i]:
r=array[i]
return(r,count)
def selection_sort(array):
n=len(array)
num_op=0
# Iterate over the length of the array, pushing smaller values to the left
for i in range(n):
# Scan the array from i-th element (where the iterator is currently) to the end for minimum
m,c_min=find_min(array[i:n])
# IMPORTANT: Get the index of the minimum element w.r.t. to the main array
m_index=array[i:n].index(m)+i
# If the first element of the unsorted portion i.e. i-th element> minimum, then SWAP
if (array[i]>m):
# Print statement for examining minimum and its index, Troubleshooting
#print("Minimum found {} at position {}. Swapping positions {} and {}".format(m,m_index,i,m_index))
temp=array[i]
array[i]=m
array[m_index]=temp
num_op+=(c_min+1)
print(array)
else:
pass
return (array,num_op)
# User inputs for generating the random arrays
mini = int(input("Enter the minimum bound:"))
maxi = int(input("Enter the maximum bound:"))
num = int(input("Enter the size:"))
# Create random array based on user-specified minimum/maximum bounds and number of elements
a= []
for i in range(num):
a.append(random.randint(mini,maxi))
print("\nInitial array:",a)
# Get the sorted array back along with the count of # of operations it took to sort
sorted_array,n_op=selection_sort(a)
print("Sorted array: {}\nTook {} operations".format(sorted_array,n_op))
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
}
] | 3
|
Selection_sort.py
|
anantvikram/General_Code
|
"""Simple quantum computations simulation."""
import numpy as np
def I():
"""Identity operator."""
return np.identity(2)
def X():
"""X-rotation, negation operator."""
return np.identity(2)[..., ::-1]
def H():
"""Adamara operator, superposition."""
return np.array([[1, 1], [1, -1]]) / np.sqrt(2)
def SWAP():
"""Swap 2 qubits"""
m = np.identity(4)
m[[1, 2]] = m[[2, 1]]
return m
def CX():
"""Controlled negation."""
m = np.identity(4)
m[[3, 2]] = m[[2, 3]]
return m
def apply(v, *gates):
m = gates[0]
gates = gates[1:]
for gate in gates:
m = np.kron(gate, m)
return m.dot(v)
def observe(v):
v2 = np.absolute(v) ** 2
c = np.random.choice(v.size, 1, p=v2)
return c[0]
# Usage example
# create 3 qubits in state 000, array size 2 ^ n
a = np.array([1, 0, 0, 0, 0, 0, 0, 0])
# transform the 2nd qubit into a superposition of 0 and 1
a = apply(a, I(), H(), I())
# entangle the 1st and 2nd qubit
a = apply(a, CX(), I())
# swap the 2nd and 3rd qubit
a = apply(a, I(), SWAP())
# observe the state
observe(a)
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
}
] | 3
|
quantum.py
|
duboviy/misc
|
import unittest
import user
import task
import project
C_NAME = "Test Name"
t1 = task.Task("t1", 1, 'text')
t2 = task.Task("t2", 2, 'text2')
C_TASKS = [t1, t2]
p1 = project.Project
C_PROJECTS = [p1]
class UserTest(unittest.TestCase):
def setUp(self):
self.u = user.User()
self.u.name = C_NAME
t1.add_user_id(self.u.userid)
t2.add_user_id(self.u.userid)
def test_str(self):
self.assertTrue(str(C_NAME) in str(self.u))
def test_name(self):
self.assertEqual(self.u.name, C_NAME)
self.u.name = "Peter Goosens"
self.assertEqual(self.u.name, "Peter Goosens")
if __name__ == "__main__":
unittest.main()
|
[
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
}
] | 3
|
tests/test_user.py
|
MeViMo/TeamRocket
|
"""Helper functions for the various strategies
"""
import structlog
class StrategyUtils():
"""Helper functions for the various strategies
"""
def __init__(self):
"""Initializes Utils class
"""
self.logger = structlog.get_logger()
def get_high_prices(self, historical_data):
"""Returns high prices within a specified time frame for a symbol pair
Args:
historical_data (list): A matrix of historical OHLCV data.
Returns:
list: A list of high prices extracted from the OHLCV data
"""
high_prices = []
for data_point in historical_data:
high_prices.append(data_point[2])
return high_prices
def get_low_prices(self, historical_data):
"""Returns low prices within a specified time frame for a symbol pair
Args:
historical_data (list): A matrix of historical OHLCV data.
Returns:
list: A list of low prices extracted from the OHLCV data
"""
low_prices = []
for data_point in historical_data:
low_prices.append(data_point[3])
return low_prices
def get_closing_prices(self, historical_data):
"""Returns closing prices within a specified time frame for a symbol pair
Args:
historical_data (list): A matrix of historical OHLCV data.
Returns:
list: A list of closing prices extracted from the OHLCV data
"""
closing_prices = []
for data_point in historical_data:
closing_prices.append(data_point[4])
return closing_prices
|
[
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
}
] | 3
|
app/indicators/analyzers/utils.py
|
cristian-codorean/crypto-signal
|
from . import commands
from re import match
from logging import debug
from .commandtools import execute
def command_mode(document):
pass
commands.commandmode = command_mode
def publics(obj):
"""Return all objects in __dict__ not starting with '_' as a dict"""
return dict((name, obj) for name, obj in vars(obj).items() if not name.startswith('_'))
def get_scope(document):
scope = publics(commands)
scope.update({'self': document})
return scope
def get_completions(document, string):
"""Get completions given a string."""
yield (string, '')
scope = get_scope(document)
# If string is a completable identifier
if match(r'^[\w.]*$', string) == None:
debug('no identifier')
return
split_result = string.rsplit('.', 1)
debug(split_result)
if len(split_result) == 2:
obj_name, attr = split_result
try:
obj = eval(obj_name, scope)
except NameError:
return
else:
scope = publics(obj)
for name, obj in scope.items():
if name.lower().startswith(attr.lower()):
yield (obj_name + '.' + name, repr(obj))
else:
for name, obj in scope.items():
if name.lower().startswith(string.lower()):
yield (name, repr(obj))
def evaluate(document, command):
"""Evaluate a command."""
scope = get_scope(document)
try:
result = eval(command, scope)
except SyntaxError:
# Probably command is a statement, not an expression
try:
exec(command, scope)
except Exception as e:
return command + ' : ' + str(e)
except Exception as e:
return command + ' : ' + str(e)
else:
return execute(result, document)
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
}
] | 3
|
fate/commandmode.py
|
Mattias1/fate
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author: owefsad@huoxian.cn
# datetime: 2021/7/16 下午12:17
# project: dongtai-engine
from dongtai.utils import const
class Replay:
"""
封装重放操作为单独的类
"""
def __init__(self, replay):
self.replay = replay
self.vul = None
@staticmethod
def do_replay(replay):
replay_handler = Replay(replay)
status = replay_handler.has_relation_id()
if status is False:
pass
def has_relation_id(self):
return self.replay.relation_id is None
@staticmethod
def replay_failed(replay, timestamp):
"""
当重放请求处理失败时,执行该方法
"""
replay.update_time = timestamp
replay.verify_time = timestamp
replay.state = const.SOLVED
replay.result = const.RECHECK_ERROR
replay.save(update_fields=['update_time', 'verify_time', 'state', 'result'])
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 3,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
}
] | 3
|
core/replay.py
|
Maskhe/DongTai-engine
|
"Example extension, also used for testing."
from idlelib.config import idleConf
ztext = idleConf.GetOption('extensions', 'ZzDummy', 'z-text')
class ZzDummy:
## menudefs = [
## ('format', [
## ('Z in', '<<z-in>>'),
## ('Z out', '<<z-out>>'),
## ] )
## ]
def __init__(self, editwin):
self.text = editwin.text
z_in = False
@classmethod
def reload(cls):
cls.ztext = idleConf.GetOption('extensions', 'ZzDummy', 'z-text')
def z_in_event(self, event):
"""
"""
text = self.text
text.undo_block_start()
for line in range(1, text.index('end')):
text.insert('%d.0', ztest)
text.undo_block_stop()
return "break"
def z_out_event(self, event): pass
ZzDummy.reload()
##if __name__ == "__main__":
## import unittest
## unittest.main('idlelib.idle_test.test_zzdummy',
## verbosity=2, exit=False)
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}
] | 3
|
toolchain/riscv/MSYS/python/Lib/idlelib/zzdummy.py
|
zhiqiang-hu/bl_iot_sdk
|
#!/usr/bin/env python
# encoding: utf-8
"""
Plot distributions of difference pixels.
"""
import os
import numpy as np
import astropy.io.fits
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import matplotlib.gridspec as gridspec
def plot_diffs(mosaic_doc, plot_dir):
"""Make diff pixels histogram plots for all differences in the given
mosaic document.
Parameters
----------
mosaic_doc : dict
The document from MosaicDB for this mosaic.
plot_dir : str
Directory to save plots to.
"""
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
for pair_key, diff in mosaic_doc['couplings']['diff_paths'].iteritems():
median = mosaic_doc['couplings']['diffs'][pair_key]
sigma = mosaic_doc['couplings']['sigmas'][pair_key]
plot_path = os.path.join(plot_dir, pair_key)
plot_diff(diff, median, sigma, plot_path)
def plot_diff(diff_path, median, sigma, plot_path):
"""Plot histogram of the difference image."""
fits = astropy.io.fits.open(diff_path)
pixels = fits[0].data
pixels = pixels[np.isfinite(pixels)].ravel()
fig = Figure(figsize=(3.5, 3.5))
canvas = FigureCanvas(fig)
gs = gridspec.GridSpec(1, 1, left=0.15, right=0.95, bottom=0.15, top=0.95,
wspace=None, hspace=None, width_ratios=None, height_ratios=None)
ax = fig.add_subplot(gs[0])
ax.hist(pixels, 1000, histtype='stepfilled',
edgecolor='None', facecolor='dodgerblue')
ax.axvline(median, ls='-', c='k', lw=2)
ax.axvline(median - sigma, ls='--', c='k', lw=1)
ax.axvline(median + sigma, ls='--', c='k', lw=1)
ax.text(0.1, 0.9, r"$%.2f \pm %.2f$" % (median, sigma),
ha='left', va='top',
transform=ax.transAxes)
ax.set_xlim(median - 3 * sigma, median + 3 * sigma)
gs.tight_layout(fig, pad=1.08, h_pad=None, w_pad=None, rect=None)
canvas.print_figure(plot_path + ".pdf", format="pdf")
fits.close()
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
}
] | 3
|
skyoffset/diffplot.py
|
jonathansick/skyoffset
|
from __future__ import division
import unittest
import numpy as np
from wyrm.types import Data, BlockBuffer
from wyrm.processing import append_cnt
from functools import reduce
class TestBlockBuffer(unittest.TestCase):
def setUp(self):
self.empty_dat = Data(np.array([]), [], [], [])
self.dat_1 = Data(np.array([0, 0])[np.newaxis, :], [np.array([0]), np.array(['ch1', 'ch2'])], ['time', 'channel'], ['ms', '#'])
self.dat_1.fs = 1000
self.dat_1.markers = [[0, 'x']]
self.dat_5 = reduce(append_cnt, [self.dat_1 for i in range(5)])
def test_append_empty(self):
"""Appending several emtpy dats must not modify the Block Buffer."""
b = BlockBuffer(5)
b.append(self.empty_dat)
b.append(self.empty_dat)
b.append(self.empty_dat)
b.append(self.empty_dat)
b.append(self.empty_dat)
b.append(self.empty_dat)
self.assertEqual(self.empty_dat, b.get())
def test_append_until_full(self):
"""Appending fractions of block_length, must accumulate in the buffer until block_length is reached."""
b = BlockBuffer(5)
for i in range(4):
b.append(self.dat_1)
ret = b.get()
self.assertEqual(self.empty_dat, ret)
b.append(self.dat_1)
ret = b.get()
self.assertEqual(self.dat_5, ret)
def test_append_with_markers(self):
"""Check if markers are handled correctly."""
markers = [[i, 'x'] for i in range(5)]
b = BlockBuffer(5)
for i in range(4):
b.append(self.dat_1)
ret = b.get()
self.assertEqual(self.empty_dat, ret)
b.append(self.dat_1)
ret = b.get()
self.assertEqual(ret.markers, markers)
if __name__ == '__main__':
unittest.main()
|
[
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
}
] | 3
|
test/test_blockbuffer.py
|
jscastanoc/wyrm
|
import boto3
def get_event_client(access_key, secret_key, region):
"""
Returns the client object for AWS Events
Args:
access_key (str): AWS Access Key
secret_key (str): AWS Secret Key
region (str): AWS Region
Returns:
obj: AWS Cloudwatch Event Client Obj
"""
return boto3.client(
"events",
region_name=region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
def check_rule_exists(rule_name, access_key, secret_key, region):
"""
Check wheter the given cloudwatch rule already exists in AWS account
Args:
rule_name (str): Cloudwatch rule name
access_key (str): AWS Access Key
secret_key (str): AWS Secret Key
region (str): AWS Region
Returns:
Boolean: True if env exists else False
"""
client = get_event_client(access_key, secret_key, region)
try:
response = client.describe_rule(Name=rule_name)
return True if response else False
except:
return False
|
[
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
}
] | 3
|
installer/core/providers/aws/boto3/cloudwatch_event.py
|
dabest1/pacbot
|
End of preview. Expand
in Data Studio
code-judge-ast-python-15k-it1
15k examples of Python code + yes/no structural property questions with deterministic AST ground truth. 3 points per example, balanced to 40-60% per property.
Source: The Stack v1 (deduplicated Python).
- Downloads last month
- -