source
string | points
list | n_points
int64 | path
string | repo
string |
|---|---|---|---|---|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
from sentry.api.serializers import serialize
from sentry.models import EventUser, GroupTagValue, TagValue
from sentry.testutils import TestCase
class GroupTagValueSerializerTest(TestCase):
def test_with_user(self):
user = self.create_user()
project = self.create_project()
euser = EventUser.objects.create(
project=project,
email='foo@example.com',
)
tagvalue = TagValue.objects.create(
project=project,
key='sentry:user',
value=euser.tag_value,
)
grouptagvalue = GroupTagValue.objects.create(
project_id=project.id,
group_id=self.create_group(project=project).id,
key=tagvalue.key,
value=tagvalue.value,
)
result = serialize(grouptagvalue, user)
assert result['id'] == six.text_type(grouptagvalue.id)
assert result['key'] == 'user'
assert result['value'] == grouptagvalue.value
assert result['name'] == euser.get_label()
def test_with_no_tagvalue(self):
user = self.create_user()
project = self.create_project()
grouptagvalue = GroupTagValue.objects.create(
project_id=project.id,
group_id=self.create_group(project=project).id,
key='sentry:user',
value='email:foo@example.com',
)
result = serialize(grouptagvalue, user)
assert result['id'] == six.text_type(grouptagvalue.id)
assert result['key'] == 'user'
assert result['value'] == grouptagvalue.value
assert result['name'] == grouptagvalue.value
|
[
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
tests/sentry/api/serializers/test_grouptagvalue.py
|
seukjung/sentry-custom
|
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import generics
# importar modelo
from Login.models import Example2
# importar serializer
from Login.serializer import Example2Serializers
class ExampleList2(APIView):
def get(self, request, format=None):
print("Metodo get filter")
queryset = Example2.objects.filter(delete=False)
# many=True Si aplica si retorno multiples objetos
serializer = Example2Serializers(queryset, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = Example2Serializers(data=request.data)
if serializer.is_valid():
serializer.save()
datas = serializer.data
return Response(datas)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CustonAuthToken(ObtainAuthToken):
def post(self, request, * args, **kwars):
serializer = self.serializer_class(data=request.data,
context={
'request': request,
}
)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
token, created = Token.objects.get_or_create(user=user)
return Response({
'token': token.key,
'user_id': user.pk,
'username': user.username
})
|
[
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
}
] | 3
|
Login/views.py
|
Carlos-Caballero/cs01
|
import data
import rally_api
from typing import List, Optional
from fastapi import APIRouter, Query
from .models import CoinPrice, CoinPrices
router = APIRouter(prefix="/coins", tags=["coins"])
@router.get("/{coin}/price", response_model=CoinPrice)
async def read_price(coin: str, include_24hr_change: Optional[bool] = False):
price = rally_api.get_current_price(coin)
if not include_24hr_change:
return {"coinKind": coin, "priceInUSD": price["priceInUSD"]}
last_24hr = data.get_last_24h_price(coin)
percentage_24h_change = (
(float(price["priceInUSD"]) - float(last_24hr["priceInUSD"]))
/ float(last_24hr["priceInUSD"])
) * 100
return {
"coinKind": coin,
"priceInUSD": str(price["priceInUSD"]),
"usd_24h_change": str(percentage_24h_change),
}
@router.get("/{coin}/historical_price", response_model=List[CoinPrices])
async def read_prices(
coin: str,
limit: Optional[int] = Query(
None,
title="Query string",
description="Maximum number of data points to return",
),
):
return list(reversed([prices for prices in data.get_coin_prices(coin, limit)]))
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
}
] | 3
|
rallyrolebot/api/price_data.py
|
Ju99ernaut/RallyRoleBot
|
import math
from time import perf_counter
def is_prime(num):
if num == 2:
return True
if num <= 1 or not num % 2:
return False
for div in range(3,int(math.sqrt(num)+1),2):
if not num % div:
return False
return True
def benchtest():
start = perf_counter()
for i in range(10000000):
is_prime(i)
end = perf_counter()
print (start)
print (end)
benchtest()
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
Python/pybenchmark.py
|
magnopaz/benchmarks
|
import gc
from adastra_analysis.common.dataset import Dataset
from adastra_analysis.common.run import Run
from adastra_analysis.runs.util import tfidf_utils
from adastra_analysis.runs.util import wordcloud_utils
class Wordcloud(Run):
"""
"""
def __init__(
self,
name,
file,
dataset,
image,
documents_col,
where,
countvectorizer_args,
wordcloud_args,
):
self.name = name
self.file = file
self.dataset = dataset
self.image = image
self.documents_col = documents_col
self.where = where
self.countvectorizer_args = countvectorizer_args
self.wordcloud_args = wordcloud_args
def build(self, datasets):
"""
"""
_data = Dataset(**self.dataset).build_dataset(datasets=datasets)
_term_freqs = tfidf_utils.get_term_freqs(
data=_data.copy(),
doc_col=self.documents_col,
countvectorizer_args=self.countvectorizer_args
)
# The documents are sourced by a subset of rows in the dataset.
# Use these to build TF-IDF word frequencies.
_filtered_term_freqs = tfidf_utils.filter_term_freqs(
_term_freqs,
where=self.where
)
_word_freqs = tfidf_utils.build_filtered_tfidf_word_freqs(
_term_freqs, _filtered_term_freqs
)
return wordcloud_utils.word_freqs_to_wordcloud(
_word_freqs,
image=self.image,
wordcloud_args=self.wordcloud_args
)
def save(self, result):
"""
"""
self.prepare_directories(self.file)
result.to_file(self.file)
# Reset the wordcloud to prevent memory overflows.
result = None
gc.collect()
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 3,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}
] | 3
|
adastra_analysis/runs/wordcloud.py
|
jayckaiser/adastra-analysis
|
import sys
import argparse
import logging
from typing import List
from pacos2.mock.best_effort_actor import BestEffortActor
from pacos2.mock.impulses import PeriodicImpulse, IDiscreteImpulse, IClock
from pacos2.message import Message, Address
from pacos2.impul_discr_evt_engine import (
ImpulsiveDiscreteEventEngine, DiscreteEventEngine)
from pacos2.manual_clock import ManualClock
from pacos2.discr_policies import MsgReadyPolicy
from pacos2.msg_routers import SingleEngineRouter
def _gen_trigger_msgs(_: IDiscreteImpulse, clock: IClock) -> Message:
return [Message(None, Address(actor='a', pin='trigger'), time=clock.time)]
def _gen_data_msgs(_: IDiscreteImpulse, clock: IClock) -> Message:
return [Message(None, Address(actor='a', pin='data'), time=clock.time)]
def run():
print('=== besteffort-dropping ===')
engine = ImpulsiveDiscreteEventEngine(
DiscreteEventEngine(msg_ready_policy=MsgReadyPolicy()))
actor = BestEffortActor('a')
engine.discr_engine.add_actor(actor)
engine.add_impulse(PeriodicImpulse(_gen_data_msgs))
# TODO TBC continue here
engine.add_impulse(PeriodicImpulse(_gen_trigger_msgs, interval=2))
router = SingleEngineRouter(ManualClock(), engine)
for _ in range(5):
interval = engine.step(router)
if interval > 0:
router.clock.advance(interval)
else:
break
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--log", default='WARNING')
args = parser.parse_args(sys.argv[1:])
logging.basicConfig(format='%(levelname)s:%(message)s',
level=logging.getLevelName(args.log.upper()))
run()
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": true
}
] | 3
|
pacos3/examples/besteffort/dropping.py
|
jadnohra/PaCoS
|
from http import HTTPStatus
from unittest import TestCase
from pyrdf4j.api_graph import APIGraph
from pyrdf4j.errors import URINotReachable
from pyrdf4j.rdf4j import RDF4J
from tests.constants import AUTH, RDF4J_BASE_TEST
class TestEmpty(TestCase):
def setUp(self):
self.rdf4j = RDF4J(RDF4J_BASE_TEST)
self.rdf4j.create_repository('test_bulk_load', auth=AUTH['admin'], overwrite=True, repo_type='native')
self.response_code_ok = HTTPStatus.OK
def tearDown(self):
sparql_endpoint = self.rdf4j.drop_repository('test_bulk_load', auth=AUTH['admin'], accept_not_exist=True)
def test_empty(self):
response = self.rdf4j.bulk_load_from_uri(
'test_bulk_load',
'https://opendata.potsdam.de/api/v2/catalog/exports/ttl',
'application/x-turtle',
auth=AUTH['admin'],
)
response = self.rdf4j.empty_repository('test_bulk_load', auth=AUTH['admin'])
QUERY = "CONSTRUCT {?s ?o ?p} WHERE {?s ?o ?p}"
response = self.rdf4j.get_triple_data_from_query(
'test_bulk_load',
QUERY,
auth=AUTH['viewer'],
)
self.assertTrue('Potsdam' not in response.decode('utf-8'))
class TestEmptyGraph(TestEmpty):
def setUp(self):
self.rdf4j = RDF4J(RDF4J_BASE_TEST, api=APIGraph)
self.rdf4j.create_repository('test_bulk_load', auth=AUTH['admin'], overwrite=True)
self.response_code_ok = HTTPStatus.NO_CONTENT
|
[
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
tests/test_empty_repo.py
|
BB-Open/datenadler_rdf4j
|
import json
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
class APIClient:
__API_URL = 'https://api.1inch.exchange/v2.0/'
def __init__(self, api_url=__API_URL):
self.api_url = api_url
self.request_timeout = 60
self.session = requests.Session()
retries = Retry(total=5, backoff_factor=0.5, status_forcelist=[502, 503, 504])
self.session.mount('http://', HTTPAdapter(max_retries=retries))
def __request(self, url):
try:
response = self.session.get(url, timeout=self.request_timeout)
response.raise_for_status()
content = json.loads(response.content.decode('utf-8'))
return content
except Exception as e:
try:
content = json.loads(response.content.decode('utf-8'))
raise ValueError(content)
except json.decoder.JSONDecodeError:
pass
raise
def isLife(self):
request_url = f"{self.api_url}healthcheck"
return self.__request(request_url)
# Approve
def getCallData(self):
request_url = f"{self.api_url}approve/calldata"
return self.__request(request_url)
def getSpenderAddress(self):
request_url = f"{self.api_url}approve/spender"
return self.__request(request_url)
# Quote/Swap
def getQuote(self):
request_url = f"{self.api_url}quote"
return self.__request(request_url)
def swap(self):
request_url = f"{self.api_url}swap"
return self.__request(request_url)
# Protocols
def getProtocols(self):
request_url = f"{self.api_url}protocols"
return self.__request(request_url)
# Tokens
def getTokens(self):
request_url = f"{self.api_url}tokens"
return self.__request(request_url)
|
[
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}
] | 3
|
py1inch/py1inch.py
|
hmallen/py1inch
|
import arrow
def getCurrentTime(obj, eng):
obj['data']['currentTime'] = arrow.now().to('Asia/Kolkata').format('DD-MMM-YYYY HH:mm:ss ZZ')
def getZoneTime(obj, eng):
obj['data']['zonedTime'] = arrow.now().to(obj['tz']).format('DD-MMM-YYYY HH:mm:ss ZZ')
|
[
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
}
] | 3
|
engine/library/clock.py
|
rudimk/celery-workflow-experiments
|
from appium.webdriver.common.mobileby import MobileBy
from app_APPium_test.src.BasePage import BasePage
from app_APPium_test.src.Manual_add import Manual_add
class Add_Member(BasePage):
def go_to_Manual_add(self):
# self.driver.find_element(MobileBy.XPATH, '//*[@text="手动输入添加"]').click()
self.click(MobileBy.XPATH, '//*[@text="手动输入添加"]')
return Manual_add(self.driver)
def get_add_Toast(self):
# ele = self.driver.find_element(MobileBy.XPATH, '//*[@class="android.widget.Toast"]').text
ele = self.text(MobileBy.XPATH, '//*[@class="android.widget.Toast"]')
return ele
|
[
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}
] | 3
|
app_APPium_test/src/Add_Member.py
|
XuXuClassMate/My_Test_PyProject
|
import aiokafka
from aiokafka.helpers import create_ssl_context
from service import config
from service.entities import Event
async def kafka_producer_factory(config):
if config["ssl_context"]:
config = dict(config, ssl_context=create_ssl_context(**config["ssl_context"]))
producer = aiokafka.AIOKafkaProducer(**config)
await producer.start()
return producer
async def kafka_consumer_factory(topic, config):
if config["ssl_context"]:
config = dict(config, ssl_context=create_ssl_context(**config["ssl_context"]))
consumer = aiokafka.AIOKafkaConsumer(topic, **config)
await consumer.start()
return consumer
async def put_results_to_kafka(producer: aiokafka.AIOKafkaProducer, event: Event):
await producer.send_and_wait(config.KAFKA_TOPIC, event.serialize())
|
[
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
}
] | 3
|
service/kafka.py
|
tbicr/sites-availability-checker
|
from queue_interface import QueueInterface
from src.list.node import Node
class LinkedQueueImproved(QueueInterface):
""" implementation of a queue using a linked list """
def __init__(self):
""" create an empty queue """
self.length = 0
self.head = None
self.tail = None
def isEmpty(self):
""" check if the queue is empty """
return (self.length == 0)
def insert(self, cargo):
""" insert a new node a the end of the queue: O(1) """
node = Node(cargo)
node.next = None
if self.length == 0:
self.head = self.tail = node
else:
tail = self.tail
tail.next = node
self.tail = node
self.length = self.length + 1
def remove(self):
""" remove and return the node at the top of the queue: O(1) """
if self.isEmpty(): return
cargo = self.head.cargo
self.head = self.head.next
self.length = self.length - 1
if self.length == 0:
self.tail = None
return cargo
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
}
] | 3
|
python/src/queues/linked_queue_improved.py
|
marioluan/abstract-data-types
|
# Python program for Bitonic Sort. Note that this program
# works only when size of input is a power of 2.
# The parameter dir indicates the sorting direction, ASCENDING
# or DESCENDING; if (a[i] > a[j]) agrees with the direction,
# then a[i] and a[j] are interchanged.
def compAndSwap(a, i, j, dire):
if (dire == 1 and a[i] > a[j]) or (dire == 0 and a[i] < a[j]):
a[i], a[j] = a[j], a[i]
# It recursively sorts a bitonic sequence in ascending order,
# if dir = 1, and in descending order otherwise (means dir=0).
# The sequence to be sorted starts at index position low,
# the parameter cnt is the number of elements to be sorted.
def bitonic_merge(a, low, cnt, dire):
if cnt > 1:
k = int(cnt / 2)
for i in range(low, low + k):
compAndSwap(a, i, i + k, dire)
bitonic_merge(a, low, k, dire)
bitonic_merge(a, low + k, k, dire)
# This function first produces a bitonic sequence by recursively
# sorting its two halves in opposite sorting orders, and then
# calls bitonic_merge to make them in the same order
def bitonic_sort(a, low, cnt, dire):
if cnt > 1:
k = int(cnt / 2)
bitonic_sort(a, low, k, 1)
bitonic_sort(a, low + k, k, 0)
bitonic_merge(a, low, cnt, dire)
# Caller of bitonic_sort for sorting the entire array of length N
# in ASCENDING order
def sort(a, N, up):
bitonic_sort(a, 0, N, up)
if __name__ == "__main__":
n = int(input().strip())
a = [int(input().strip()) for _ in range(n)]
up = 1
sort(a, n, up)
print("\n\nSorted array is")
for i in range(n):
print("%d" % a[i])
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
sorts/bitonic_sort.py
|
sourcery-ai-bot/Python
|
from datetime import datetime
from celery.signals import task_postrun, task_prerun
from arcusd.contracts import Contract
from arcusd.data_access.tasks import save_task_info, update_task_info
@task_prerun.connect
def task_before_run(task_id, task, *args, **kwargs):
request_id = task.request.kwargs.get('request_id', task_id)
task_info = dict(
task_id=task_id,
task_sender=task.request.origin,
task_args=task.request.args,
task_kwargs=task.request.kwargs,
task_retries=task.request.retries,
task_start=datetime.utcnow(),
request_id=request_id,
)
save_task_info(task_info)
@task_postrun.connect
def task_after_run(task_id, task, retval, state, *args, **kwargs):
request_id = task.request.kwargs.get('request_id', task_id)
task_info = dict(
task_state=state,
task_eta=task.request.eta,
task_end=datetime.utcnow(),
)
if isinstance(retval, Contract):
task_info['task_retval'] = retval.to_dict()
update_task_info({'request_id': request_id}, task_info)
|
[
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
arcusd/daemon/arcusd_signals.py
|
cuenca-mx/arcusd
|
from discord.ext import commands
import datetime
from discord.ext.commands.errors import MissingRequiredArgument, CommandNotFound
class Manager(commands.Cog):
""" Manage the bot """
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print(f'Estamos conectados como {self.bot.user}')
now = datetime.datetime.now()
print(f'Conexão iniciada: {now.strftime("%d/%m/%Y às %H:%M:%S")}')
print('>' + '-'*34 + '<')
@commands.Cog.listener()
async def on_message(self, message):
if message.author == self.bot.user:
return
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if isinstance(error, MissingRequiredArgument):
await ctx.send("Favor enviar todos os Argumentos. Digite \\help para ver os parâmetros de cada comando")
elif isinstance(error, CommandNotFound):
await ctx.send("O comando não existe. Digite \\help para ver todos os comandos")
else:
raise error
def setup(bot):
bot.add_cog(Manager(bot))
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 3,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}
] | 3
|
Zephyrus/manager.py
|
MateusCohuzer/Zephyrus-Discord-Bot
|
import sys
def inputli():
return list(map(int,input().split()))
def inputls():
return input().split()
def inputlf():
return list(map(float,input().split()))
N = 500003
tree = [0] * (2 * N)
def make_tree(arr) :
for i in range(n) :
tree[n + i] = arr[i]
for i in range(n - 1, 0, -1) :
tree[i] = min(tree[i << 1] ,tree[i << 1 | 1])
def update(p, value) :
tree[p + n] = value
p = p + n
i = p
while i > 1 :
tree[i >> 1] = min(tree[i],tree[i ^ 1])
i >>= 1
def query(l, r) :
res = 10e9+1
l += n
r += n
while l < r :
if (l & 1) :
res = min(res,tree[l])
l += 1
if (r & 1) :
r -= 1
res = min(res,tree[r])
l >>= 1
r >>= 1
return res
import sys
import threading
sys.setrecursionlimit(10**6+1)
threading.stack_size(10**8)
n = int(input())
numbers = inputli()
make_tree(numbers)
def solve2():
while(True):
try:
a = input().split()
kind = a[0]
i = int(a[1])
j = int(a[2])
if kind == 'set':
update(i-1,j)
else:
print(query(i-1,j))
except EOFError :
break
t = threading.Thread(target=solve2)
t.start()
t.join()
|
[
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
Classes/Class-06/contest-06/B.py
|
CristianLazoQuispe/ITMO-Training-Camp-2021
|
import os
import pytest
import sqlalchemy as sa
from libweasyl.configuration import configure_libweasyl
from libweasyl.models.meta import registry
from libweasyl.models.tables import metadata
from libweasyl.test.common import NotFound
from libweasyl.test.common import media_link_formatter
from libweasyl import cache
engine = sa.create_engine(os.environ.get('WEASYL_TEST_SQLALCHEMY_URL', 'postgresql+psycopg2cffi:///weasyl_test'))
sessionmaker = sa.orm.scoped_session(sa.orm.sessionmaker(bind=engine))
@pytest.fixture(scope='session', autouse=True)
def setup(request):
db = sessionmaker()
db.execute('DROP SCHEMA public CASCADE')
db.execute('CREATE SCHEMA public')
db.execute('CREATE EXTENSION HSTORE')
db.commit()
metadata.create_all(engine)
cache.region.configure('dogpile.cache.memory')
@pytest.fixture(autouse=True)
def staticdir(tmpdir):
tmpdir = tmpdir.join('libweasyl-staticdir')
configure_libweasyl(
dbsession=sessionmaker,
not_found_exception=NotFound,
base_file_path=tmpdir.strpath,
staff_config_dict={},
media_link_formatter_callback=media_link_formatter.format_media_link,
)
return tmpdir
@pytest.fixture
def db(request):
db = sessionmaker()
# If a previous test has failed due to an SQL problem, the session will be
# in a broken state, requiring a rollback. It's not harmful to
# unconditionally rollback, so just do that.
db.rollback()
def tear_down():
"Clears all rows from the test database."
for k, cls in registry.items():
if not k[0].isupper():
continue
db.query(cls).delete()
db.flush()
db.commit()
request.addfinalizer(tear_down)
return db
|
[
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
libweasyl/libweasyl/conftest.py
|
hyena/weasyl
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import recipe_util # pylint: disable=F0401
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=W0232
class Mojo(recipe_util.Recipe):
"""Basic Recipe class for Mojo."""
@staticmethod
def fetch_spec(props):
url = 'https://github.com/domokit/mojo.git'
solution = {
'name' :'src',
'url' : url,
'deps_file': 'DEPS',
'managed' : False,
'custom_deps': {},
'safesync_url': '',
}
spec = {
'solutions': [solution],
}
if props.get('target_os'):
spec['target_os'] = props['target_os'].split(',')
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'src'
def main(argv=None):
return Mojo().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
[
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 3,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}
] | 3
|
recipes/mojo.py
|
azunite/chrome_build
|
#
# Copyright (c) 2019 UAVCAN Development Team
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <pavel.kirienko@zubax.com>
#
from __future__ import annotations
import abc
import typing
class CRCAlgorithm(abc.ABC):
"""
Implementations are default-constructible.
"""
@abc.abstractmethod
def add(self, data: typing.Union[bytes, bytearray, memoryview]) -> None:
"""
Updates the value with the specified block of data.
"""
raise NotImplementedError
@abc.abstractmethod
def check_residue(self) -> bool:
"""
Checks if the current state matches the algorithm-specific residue.
"""
raise NotImplementedError
@property
@abc.abstractmethod
def value(self) -> int:
"""
The current CRC value, with output XOR applied, if applicable.
"""
raise NotImplementedError
@property
@abc.abstractmethod
def value_as_bytes(self) -> bytes:
"""
The current CRC value serialized in the algorithm-specific byte order.
"""
raise NotImplementedError
@classmethod
def new(cls, *fragments: typing.Union[bytes, bytearray, memoryview]) -> CRCAlgorithm:
"""
A factory that creates the new instance with the value computed over the fragments.
"""
self = cls()
for frag in fragments:
self.add(frag)
return self
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
}
] | 3
|
pyuavcan/transport/commons/crc/_base.py
|
pcarranzav2/pyuavcan
|
from pycontacts.managers import BaseManager
from conftest import ExtenedBaseModel
class ExampleManager(BaseManager):
cls = ExtenedBaseModel
def test_new_manager(address_book):
examples = ExampleManager(address_book)
assert examples.book == address_book
def test_manager_create(address_book):
examples = ExampleManager(address_book)
empty_example = examples.create()
assert isinstance(empty_example, ExtenedBaseModel)
assert not empty_example['test_set']
assert not empty_example['test_not_set']
def test_manager_filter(address_book):
examples = ExampleManager(address_book)
example = examples.create(test_set="Jack")
example.save()
results = examples.filter(test_set="Jack")
assert results.values()[0]['test_set'] == "Jack"
def test_manager_convert_results(address_book):
examples = ExampleManager(address_book)
example = examples.create(test_set="Jack")
example.save()
results = examples.filter(test_set="Jack")
example_objs = examples.convert_results(results)
assert len(example_objs) == 1
assert isinstance(example_objs[0], ExtenedBaseModel)
assert example_objs[0]['test_set'] == "Jack"
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
}
] | 3
|
tests/test_base_manager.py
|
kibernick/pycontacts
|
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from sauce_searcher_server.api import (
get_anime,
get_doujin,
get_light_novel,
get_manga,
get_visual_novel,
)
from sauce_searcher_server.models import Anime, Doujin, LightNovel, Manga, VisualNovel
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=['*'],
allow_headers=['*'],
)
@app.get('/')
async def read_status():
"""Check if server is running."""
return {'message': 'Server is running'}
@app.get('/anime/{name:path}', response_model=Anime)
async def read_anime(name: str):
"""Get anime data by name."""
anime = get_anime(name)
return anime
@app.get('/manga/{name:path}', response_model=Manga)
async def read_manga(name: str):
"""Get manga data by name."""
manga = get_manga(name)
return manga
@app.get('/ln/{name:path}', response_model=LightNovel)
@app.get('/light_novel/{name:path}', response_model=LightNovel)
async def read_light_novel(name: str):
"""Get light novel data by name."""
light_novel = get_light_novel(name)
return light_novel
@app.get('/vn/{name:path}', response_model=VisualNovel)
@app.get('/visual_novel/{name:path}', response_model=VisualNovel)
async def read_visual_novel(name: str):
"""Get visual novel data by name."""
visual_novel = get_visual_novel(name)
return visual_novel
@app.get('/doujin/{digits:path}', response_model=Doujin)
async def read_doujin(digits: int):
"""Get doujin data by digits."""
doujin = get_doujin(digits)
return doujin
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 3,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": true
}
] | 3
|
server/sauce_searcher_server/main.py
|
f4str/sauce-searcher
|
"""
https://leetcode.com/problems/average-of-levels-in-binary-tree/description/
Given a non-empty binary tree, return the average value of the nodes on each level in the form of an array.
Example 1:
Input:
3
/ \
9 20
/ \
15 7
Output: [3, 14.5, 11]
Explanation:
The average value of nodes on level 0 is 3, on level 1 is 14.5, and on level 2 is 11. Hence return [3, 14.5, 11].
Note:
The range of node's value is in the range of 32-bit signed integer.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def averageOfLevels(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if root is None:
return []
from collections import defaultdict
level = 0
levels = defaultdict(list)
levels[0] = [root]
ans = []
while len(levels[level]) != 0:
n = 0.0
for node in levels[level]:
n += node.val
if node.left is not None:
levels[level+1].append(node.left)
if node.right is not None:
levels[level+1].append(node.right)
ans.append(n/len(levels[level]))
level += 1
return ans
|
[
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
solutions/637.py
|
abawchen/leetcode
|
from node import Node
class LinkedList:
"""
This class is the one you should be modifying!
Don't change the name of the class or any of the methods.
Implement those methods that current raise a NotImplementedError
"""
def __init__(self):
self.__root = None
def get_root(self):
return self.__root
def add_to_list(self, node):
"""
This method should add at the beginning of the linked list.
"""
marker = self.get_root()
self.__root = node
node.set_next(marker)
def print_list(self):
marker = self.__root
while marker:
marker.print_details()
marker = marker.get_next()
def find(self, name):
"""
This method should find a node in the linked list with a given name.
:param name: the name of the node to find in this list.
:return: the node found, or raises a LookupError if not found.
"""
marker = self.__root
while marker:
if marker.name == name:
return marker
marker = marker.get_next()
else:
raise LookupError("Didn't find anyone by the name '{}'".format(name))
'''
nodes = [Node(*x) for x in [['George', '0123x', 1997],
['Sarah', '4567x', 2001],
['John', '8901x', 1984]]]
ll = LinkedList()
for node in nodes:
ll.add_to_list(node)
ll.print_list()
print(ll.get_root())
print('Found {}'.format(ll.find('Sarah')))
print(ll.find('Foo'))
'''
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 3,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
}
] | 3
|
1_linked_list/linkedlist.py
|
PythonPostgreSQLDeveloperCourse/Section13
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .feedback import Feedback
class SearchResultFeedback(Feedback):
"""
Database model representing feedback about search results (e.g. empty results).
"""
search_query = models.CharField(max_length=1000, verbose_name=_("search term"))
@property
def object_name(self):
"""
This property returns the name of the object this feedback comments on.
:return: The name of the object this feedback refers to
:rtype: str
"""
return _("Search results for {}").format(self.search_query)
@property
def object_url(self):
"""
This property returns the url to the object this feedback comments on.
:return: The url to the referred object
:rtype: str
"""
return ""
@property
def related_feedback(self):
"""
This property returns all feedback entries which relate to the same object and have the same is_technical value.
:return: The queryset of related feedback
:rtype: ~django.db.models.query.QuerySet [ ~integreat_cms.cms.models.feedback.search_result_feedback.SearchResultFeedback ]
"""
return SearchResultFeedback.objects.filter(
region=self.region,
language=self.language,
search_query=self.search_query,
is_technical=self.is_technical,
)
class Meta:
#: The verbose name of the model
verbose_name = _("search result feedback")
#: The plural verbose name of the model
verbose_name_plural = _("search result feedback")
#: The default permissions for this model
default_permissions = ()
|
[
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
}
] | 3
|
integreat_cms/cms/models/feedback/search_result_feedback.py
|
Carlosbogo/integreat-cms
|
import textwrap
import pytest
from .common import (
precollected,
researcher,
researchers,
some_other_paper,
some_paper,
)
def test_bibtex(some_paper):
expected = textwrap.dedent(
"""
@inproceedings{merrienboer2018-differentiation97,
author = {Bart van Merrienboer and Olivier Breuleux and Arnaud Bergeron and Pascal Lamblin},
title = {Automatic differentiation in ML: Where we are and where we should be going},
year = {2018},
booktitle = {Advances in Neural Information Processing Systems},
pages = {8757-8767}
}"""[
1:
]
)
assert some_paper.bibtex() == expected
def test_shortcuts(some_paper):
assert (
some_paper.title
== "Automatic differentiation in ML: Where we are and where we should be going"
)
assert some_paper.year == 2018
assert (
some_paper.venue == "Advances in Neural Information Processing Systems"
)
assert some_paper.conference == "neurips2018"
def test_authors(some_paper):
auth = some_paper.authors
assert len(auth) == 4
me = auth[1]
assert me.name == "Olivier Breuleux"
assert me.role == {"old"}
assert me.affiliations == ["Université de Montréal"]
def test_authors_2(some_other_paper):
auth = some_other_paper.authors
assert len(auth) == 3
me = auth[0]
assert me.name == "Olivier Breuleux"
assert me.role == {"young"}
assert me.affiliations == ["Université de Montréal"]
def test_query_author(precollected):
name = "pascal vincent"
results = precollected.query({"author": name})
for result in results:
assert any(auth.name.lower() == name for auth in result.authors)
for paper in precollected:
if paper not in results:
assert all(auth.name.lower() != name for auth in paper.authors)
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
}
] | 3
|
tests/test_papers.py
|
notoraptor/paperoni
|
# SPDX-License-Identifier: MIT
from pytest_mock.plugin import MockerFixture as MockFixture
import pytest
from upkeep import CONFIG_GZ, KernelConfigError, rebuild_kernel
def test_rebuild_kernel_no_config_yes_gz(mocker: MockFixture) -> None:
def isfile(x: str) -> bool:
if x == '.config':
return False
if x == CONFIG_GZ:
return True
raise Exception(x)
mocker.patch('upkeep.isfile', new=isfile)
mocker.patch('upkeep.chdir')
open_f = mocker.patch('upkeep.open')
gzip_open = mocker.patch('upkeep.gzip.open')
with pytest.raises(KernelConfigError):
rebuild_kernel()
assert gzip_open.call_count == 1
assert open_f.call_count == 1
|
[
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
tests/test_rebuild_kernel.py
|
Tatsh/upkeep
|
import re
from collections import defaultdict
from datetime import datetime
from elasticsearch_dsl import Keyword, Text
from protean import BaseAggregate, BaseValueObject
from protean.core.model import BaseModel
from protean.fields import DateTime, Integer, String
from protean.fields import Text as ProteanText
from protean.fields import ValueObject
class Person(BaseAggregate):
first_name = String(max_length=50, required=True)
last_name = String(max_length=50, required=True)
age = Integer(default=21)
created_at = DateTime(default=datetime.now())
class Alien(BaseAggregate):
first_name = String(max_length=50, required=True)
last_name = String(max_length=50, required=True)
age = Integer(default=21)
class User(BaseAggregate):
email = String(max_length=255, required=True, unique=True)
password = String(max_length=3026)
class Email(BaseValueObject):
REGEXP = r"\"?([-a-zA-Z0-9.`?{}]+@\w+\.\w+)\"?"
# This is the external facing data attribute
address = String(max_length=254, required=True)
def clean(self):
"""Business rules of Email address"""
errors = defaultdict(list)
if not bool(re.match(Email.REGEXP, self.address)):
errors["address"].append("is invalid")
return errors
class ComplexUser(BaseAggregate):
email = ValueObject(Email, required=True)
password = String(required=True, max_length=255)
class Provider(BaseAggregate):
name = ProteanText()
about = ProteanText()
class ProviderCustomModel(BaseModel):
id = Keyword()
name = Text(fields={"raw": Keyword()})
about = Text()
class Meta:
schema = "providers"
class Receiver(BaseAggregate):
name = String()
age = Integer()
|
[
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 3,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
}
] | 3
|
tests/adapters/model/elasticsearch_model/elements.py
|
mpsiva89/protean
|
import pytest
import flopt
from flopt import Variable, Problem, Solver
from flopt.performance import CustomDataset
@pytest.fixture(scope='function')
def a():
return Variable('a', lowBound=2, upBound=4, cat='Continuous')
@pytest.fixture(scope='function')
def b():
return Variable('b', lowBound=2, upBound=4, cat='Continuous')
@pytest.fixture(scope='function')
def prob(a, b):
_prob = Problem()
_prob += a + b
return _prob
def test_compute_nosolver(prob):
logs = flopt.performance.compute(prob, timelimit=0.5, msg=True)
def test_compute_RandomSearch(prob):
rs_solver = Solver('RandomSearch')
logs = flopt.performance.compute(
prob, rs_solver,
timelimit=0.1, msg=True
)
def test_CustomDataset(prob):
cd = CustomDataset(name='user')
cd += prob # add problem
log = flopt.performance.compute(
cd, timelimit=0.1, msg=True
)
def test_compute_permutation(prob):
prob.prob_type = 'permutation'
logs = flopt.performance.compute(prob, timelimit=0.1, msg=True)
|
[
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
}
] | 3
|
tests/test_Performance.py
|
nariaki3551/flopt
|
import time
import socket
import random
from subprocess import run, PIPE
test_dir = '"/Users/oliver/Google Drive/Cambridge/CST_II/project/testing/gtspeed"'
def test_git():
with open('test_strings.txt') as f:
for line in f:
p = run(['gitmaildir_cli', 'deliver', '--dir='+test_dir], stdout=PIPE, input=line, encoding='ascii')
def test_non_git():
with open('test_strings.txt') as f:
for line in f:
filename = str(int(time.time() * 1000000)) + "." + str(random.random() * 1000000000) + "." + socket.gethostname()
mail_file = open('gtspeed/'+filename, 'w')
mail_file.write(line)
mail_file.close
test_git()
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
evaluation/scripts/data_generation/deliver_many.py
|
odnh/gitmaildir
|
from operator import itemgetter
from .. import support_utils as sup
def print_stats(log, conformant, traces):
print('complete traces:', str(len(traces)),
', events:', str(len(log.data)), sep=' ')
print('conformance percentage:',
str(sup.ffloat((len(conformant) / len(traces)) * 100, 2)) + '%', sep=' ')
def get_traces(data, one_timestamp):
"""
returns the data splitted by caseid and ordered by start_timestamp
"""
cases = list(set([x['caseid'] for x in data]))
traces = list()
for case in cases:
order_key = 'end_timestamp' if one_timestamp else 'start_timestamp'
trace = sorted(
list(filter(lambda x: (x['caseid'] == case), data)),
key=itemgetter(order_key))
traces.append(trace)
return traces
|
[
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
}
] | 3
|
src/simod/log_repairing/conformance_checking.py
|
AdaptiveBProcess/SiMo-Discoverer
|
from flexlmtools import parse_query, is_valid_server_name
import pytest
APP_FEATURES = {'feat1': (2, 0), 'feat2': (5, 1),
'feat6': (3, 3), 'feat-add': (2, 0),
'feat_opt': (1, 1)}
def test_parse_query():
with open('./test/app-features.txt', 'r') as f:
lines = f.read()
def check(feature):
dct = parse_query(lines, features=[feature])
assert feature in dct
assert dct[feature] == APP_FEATURES[feature]
check('feat1')
check('feat2')
check('feat6')
check('feat-add')
check('feat_opt')
dct = parse_query(lines, features=['feat3'])
# Check if dct is empty
assert not dct
dct = parse_query(lines)
assert all(feature in dct for feature in APP_FEATURES)
assert all(dct[feature] == values for feature,
values in APP_FEATURES.items())
def test_is_valid_server_name():
assert is_valid_server_name('6200@test-server.org')
assert not is_valid_server_name('0123@test-serv')
assert not is_valid_server_name('1234@test_serv')
assert not is_valid_server_name('3100@test#serv')
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
}
] | 3
|
test/flexlmtools_test.py
|
shohirose/flexlm-python-scripts
|
# Problem 35: Circular primes
# https://projecteuler.net/problem=35
def prime_sieve(n):
primes = set(range(2, n+1))
for i in range(2, (n+1) // 2):
if i in primes:
m = 2
while i*m <= n:
primes.discard(i*m)
m += 1
return primes
def circulars(n):
result = []
s = str(n)
for i in range(len(s)):
result.append(int(s[i:] + s[0:i]))
return result
def all_in(iterable, primes):
for x in iterable:
if x not in primes:
return False
return True
def cp(max):
primes = prime_sieve(max)
count = 0
for p in primes:
cs = circulars(p)
if all_in(cs, primes):
count += 1
return count
#
def test():
if cp(100) == 13:
return 'Pass'
else:
return 'Fail'
def main():
return cp(1 * 1000 * 1000)
if __name__ == '__main__':
import sys
if len(sys.argv) >= 2 and sys.argv[1] == 'test':
print(test())
else:
print(main())
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
p035.py
|
yehnan/project_euler_python
|
import unittest
import score
class Testquadratic_weighted_kappa(unittest.TestCase):
def test_confusion_matrix(self):
conf_mat = score.confusion_matrix([1,2],[1,2])
self.assertEqual(conf_mat,[[1,0],[0,1]])
conf_mat = score.confusion_matrix([1,2],[1,2],0,2)
self.assertEqual(conf_mat,[[0,0,0],[0,1,0],[0,0,1]])
conf_mat = score.confusion_matrix([1,1,2,2,4],[1,1,3,3,5])
self.assertEqual(conf_mat,[[2,0,0,0,0],[0,0,2,0,0],[0,0,0,0,0],
[0,0,0,0,1],[0,0,0,0,0]])
conf_mat = score.confusion_matrix([1,2],[1,2],1,4)
self.assertEqual(conf_mat,[[1,0,0,0],[0,1,0,0],[0,0,0,0],[0,0,0,0]])
def test_quadratic_weighted_kappa(self):
kappa = score.quadratic_weighted_kappa([1,2,3],[1,2,3])
self.assertAlmostEqual(kappa, 1.0)
kappa = score.quadratic_weighted_kappa([1,2,1],[1,2,2],1,2)
self.assertAlmostEqual(kappa, 0.4)
kappa = score.quadratic_weighted_kappa([1,2,3,1,2,2,3],[1,2,3,1,2,3,2])
self.assertAlmostEqual(kappa, 0.75)
# todo: test cases for linear_weighted_kappa
def test_mean_quadratic_weighted_kappa(self):
kappa = score.mean_quadratic_weighted_kappa([1, 1])
self.assertAlmostEqual(kappa, 0.999)
kappa = score.mean_quadratic_weighted_kappa([0.5, 0.8], [1,.5])
self.assertAlmostEqual(kappa, 0.624536446425734)
kappa = score.mean_quadratic_weighted_kappa([-1, 1])
self.assertAlmostEqual(kappa, 0.0)
if __name__ == '__main__':
unittest.main()
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}
] | 3
|
Evaluation_Metrics/Python/score/test/test_score.py
|
Teeefa/AES-Benchmark
|
# -*- coding: utf-8 -*-
import sys
import os
import re
sys.path.append('../') # noqa
from jinja2 import Template
from cli_bdd.core.steps import (
command,
environment,
file as file_steps,
)
BASE_PATH = os.path.dirname(os.path.normpath(__file__))
TEMPLATES_PATH = os.path.join(BASE_PATH, 'templates')
STEPS_MODULES = [
command,
environment,
file_steps,
]
def _prepare_docstring(value):
if not value:
return ''
remove_spaces = 0
for line in value.split('\n')[1:]:
if line:
for char in line:
if char != ' ':
break
else:
remove_spaces += 1
break
return re.sub(
r'^ {%s}' % remove_spaces,
'',
unicode(value),
flags=re.MULTILINE
).strip()
def _render_and_save_template(path, dest, context):
template_path = os.path.join(TEMPLATES_PATH, path + '.tpl')
destination_path = os.path.join(BASE_PATH, dest + '.md')
with open(destination_path, 'wt') as dest_file:
dest_file.write(
Template(open(template_path).read()).render(context)
)
def generate_api_reference():
generate_steps_reference()
def generate_steps_reference():
steps_by_types = []
for step_module in STEPS_MODULES:
name = step_module.__name__.split('.')[-1]
steps_by_types.append({
'name': name,
'module': step_module.__name__,
'base_steps': step_module.base_steps
})
steps_dir = os.path.join(BASE_PATH, 'steps/')
if not os.path.exists(steps_dir):
os.makedirs(steps_dir)
for step_type in steps_by_types:
_render_and_save_template(
'steps',
'steps/' + step_type['name'],
{
'step_type': step_type,
'prepare_docstring': _prepare_docstring
}
)
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 3,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
}
] | 3
|
docs/generator.py
|
chibisov/cli-bdd
|
# -*- coding: utf-8 -*-
import requests
import hashlib
import json
import os
def saveNewData():
with open(fn, 'w') as f:
json.dump(TargetData.json(), f,ensure_ascii=False)
def saveHashvalue():
with open(fnHash, 'w') as hashread:
hashread.write(NewHash)
def CalHashvalue():
data = hashlib.md5()
data.update(TargetData.text.encode('utf-8'))
return data.hexdigest()
url = 'http://opendata.epa.gov.tw/webapi/Data/ATM00679/?$orderby=MonitorDate%20desc&$skip=0&$top=1000&format=json'
try:
TargetData = requests.get(url)
print("下載成功")
except Exception as error:
print("下載失敗")
fn = "/Users/tsaichangyang 1/Desktop/CODING/03302020-ForPy自動化檢測資料更新JSON/NewData.json"
fnHash = "/Users/tsaichangyang 1/Desktop/CODING/03302020-ForPy自動化檢測資料更新JSON/Hashvalue.txt"
NewHash = CalHashvalue()
if os.path.exists(fnHash):
print('新的哈希值 = ', NewHash)
with open(fnHash,'r') as hashread:
OldHash = hashread.read()
print('舊的哈希值 = ', OldHash)
if NewHash == OldHash:
print('目標資料未更新!')
else:
print('目標資料已經更新!')
saveNewData()
saveHashvalue()
else:
print('第一次截取目標資料!')
FirstHash = CalHashvalue()
print('哈希值 = ', FirstHash)
saveNewData()
saveHashvalue()
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
}
] | 3
|
03302020.py
|
igiscy/ForPy-automatic-update-JSON
|
import tkinter as tk
class App(tk.Tk):
def __init__(self):
super().__init__()
self.title("Basic canvas")
self.canvas = tk.Canvas(self, bg="white")
self.label = tk.Label(self)
self.canvas.bind("<Motion>", self.mouse_motion)
self.canvas.pack()
self.label.pack()
def mouse_motion(self, event):
x, y = event.x, event.y
text = "Mouse position: ({}, {})".format(x, y)
self.label.config(text=text)
if __name__ == "__main__":
app = App()
app.mainloop()
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}
] | 3
|
Chapter07/code/chapter7_01.py
|
sTone3/Tkinter-GUI-Application-Development-Cookbook
|
# -*- coding: utf-8 -*-
# Python imports
# 3rd Party imports
# App imports
from .individual import Individual
class Crossover:
parent_a: Individual
parent_b: Individual
individual_class: type
def __init__(self, individual_class: type = Individual):
self.individual_class = individual_class
def set_parents(self, parent_a: Individual, parent_b: Individual):
self.parent_a = parent_a
self.parent_b = parent_b
def create_offspring(self) -> Individual:
raise NotImplementedError
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}
] | 3
|
src/GenPro/genetic_algorithm/crossover.py
|
Hispar/procedural_generation
|
from werkzeug.security import safe_str_cmp
from user import User
users = [
User(1, 'bob', '1234')
]
username_mapping = {u.username: u for u in users}
userid_mapping = {u.id: u for u in users}
def authenticate(username, password):
user = User.find_by_username(username)
if user and safe_str_cmp(user.password, password):
return user
def identity(payload):
user_id = payload['identity']
return User.find_by_id(user_id)
|
[
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
}
] | 3
|
Session 5/security.py
|
valdirsalustino/python-rest-api
|
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
from __future__ import print_function
from collections import defaultdict
import logging
# validate, , validate things, internal
from yotta.lib import validate
def addOptions(parser):
parser.add_argument('--all', '-a', dest='list_all', default=False, action='store_true',
help='List all licenses, not just each unique license.'
)
def execCommand(args, following_args):
c = validate.currentDirectoryModule()
if not c:
return 1
if not args.target:
logging.error('No target has been set, use "yotta target" to set one.')
return 1
target, errors = c.satisfyTarget(args.target)
if errors:
for error in errors:
logging.error(error)
return 1
dependencies = c.getDependenciesRecursive(
target = target,
available_components = [(c.getName(), c)]
)
errors = []
if args.list_all:
for name, dep in dependencies.items():
if not dep:
errors.append(u'%s is missing: license unknown!' % name)
else:
print(u'%s: %s' % (name, u', '.join(dep.licenses())))
else:
licenses = defaultdict(list)
for name, dep in dependencies.items():
if not dep:
errors.append(u'%s is missing: license unknown!' % name)
else:
for lic in dep.licenses():
licenses[lic].append(name)
for lic in licenses:
print(lic)
if len(errors):
for err in errors:
logging.error(err)
return 1
return 0
|
[
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
yotta/licenses.py
|
microbit-foundation/yotta
|
from typing import List, Any
import os
def searchFiles(keyword: str , path: str):
"""Search for files"""
files: List[Any] = []
for root, dirs, files in os.walk(path):
for file in files:
if keyword in file:
files.append(root + '\\' + str(file))
return files
def searchDirs(keyword: str, path: str):
"""Search for folders"""
folders = []
for root, dirs, files in os.walk(path):
for dir in dirs:
if keyword in dir:
folders.append(root + '\\' + str(dir))
return folders
def searchExts(ext: str, path: str):
"""Search for extensions"""
files: List[Any] = []
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(ext):
files.append(root + '\\' + str(file))
return files
def searchList(listOfTerms, query: str, filter='in'):
"""Search within a list"""
matches = []
for item in listOfTerms:
if filter == 'in' and query in item:
matches.append(item)
elif filter == 'start' and item.startswith(query):
matches.append(item)
elif filter == 'end' and item.endswith(query):
matches.append(item)
elif filter == 'exact' and item == query:
matches.append(item)
return matches
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 3,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
}
] | 3
|
py_everything/search.py
|
Morgan-Phoenix/py_everything
|
from django.contrib.auth import views as auth_views
from django.urls import reverse_lazy
from django.views.generic import FormView
from .. import forms, mails
from ..tokens import password_reset_token_generator
class PasswordReset(FormView):
form_class = forms.PasswordResetForm
template_name = 'django_auth2/reset_password/form.html'
success_url = reverse_lazy('password_reset_done')
def form_valid(self, form):
response = super().form_valid(form)
user = form.get_user_from_email()
mails.send_reset_password_mail(self.request, user)
return response
password_reset = PasswordReset.as_view()
def password_reset_done(request, **kwargs):
return auth_views.password_reset_done(
request, template_name='django_auth2/reset_password/done.html',
**kwargs
)
def password_reset_confirm(request, **kwargs):
return auth_views.password_reset_confirm(
request,
set_password_form=forms.SetPasswordForm,
token_generator=password_reset_token_generator,
template_name='django_auth2/reset_password/confirm.html',
**kwargs
)
def password_reset_complete(request, **kwargs):
return auth_views.password_reset_done(
request,
template_name='django_auth2/reset_password/complete.html',
**kwargs)
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 3,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
}
] | 3
|
django_auth2/views/reset_password.py
|
Nick1994209/django-auth2
|
'''
Created on 01 gen 2018
@author: Andrea Graziani - matricola 0189326
@version: 1.0
'''
from GraphNode import GraphAdjacencyListNode
from AcyclicDirectGraph import AcyclicDirectGraph
class AdjacencyListGraph(AcyclicDirectGraph):
'''
This class represents a graph rappresented with adjacency list.
'''
def __init__(self):
'''
Constructs a newly allocated 'AdjacencyListGraph' object.
'''
super().__init__()
def addNewNode(self, value):
"""
@see: AcyclicDirectGraph.addNewNode
"""
# Creation new 'GraphNode' object...
# ---------------------------------------------------- #
newGraphAdjacencyListNode = GraphAdjacencyListNode(self._nextKey, value)
# Adding it to node set...
# ---------------------------------------------------- #
self._nodeSet[self._nextKey] = newGraphAdjacencyListNode
self._nextKey += 1
def insertNewEdge(self, predecessor, successor):
"""
@see: AcyclicDirectGraph.insertNewEdge
"""
self._nodeSet[predecessor]._adjacencyList.append(successor)
def getSuccessors(self, nodeId):
"""
@see: AcyclicDirectGraph.getSuccessors
"""
return self._nodeSet[nodeId]._adjacencyList
def printAdjacencyList(self):
"""
This function is used to print adjacency lists.
"""
print("\n\n{0}".format("-"*70))
# Check if empty...
# ---------------------------------------------------- #
if (self.isNodeSetEmpty()):
print("Graph is empty!")
else:
for elem in self._nodeSet:
print(" {0} -> {1}".format(elem, self._nodeSet[elem]._adjacencyList))
print("\n\n{0}".format("-"*70))
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
}
] | 3
|
AdjacencyListGraph.py
|
AndreaG93/Visibility-Degree-Graph
|
from django.contrib.auth.base_user import BaseUserManager
from django.utils.translation import ugettext_lazy as _
class CustomUserManager(BaseUserManager):
def create_user(self, nick, email, password, first_name, last_name, **extra_fields):
"""
Create and save a User with the given nickname, email and password.
"""
if not nick:
raise ValueError(_('The Nickname must be provided'))
if not email:
raise ValueError(_('The Email must be provided'))
email = self.normalize_email(email)
user = self.model(nick=nick, first_name=first_name, last_name=last_name, email=email, **extra_fields)
user.set_password(password)
user.save()
return user
def create_superuser(self, nick, email, password, **extra_fields):
"""
Create and save a SuperUser with the given nickname, email and password.
"""
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('is_active', True)
if not nick:
raise ValueError(_('The Nickname must be provided'))
if not email:
raise ValueError(_('The Email must be provided'))
if extra_fields.get('is_staff') is not True:
raise ValueError(_('Superuser must have is_staff=True.'))
if extra_fields.get('is_superuser') is not True:
raise ValueError(_('Superuser must have is_superuser=True.'))
return self.create_user(nick, email, password, first_name=None, last_name=None, **extra_fields)
|
[
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 3,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
}
] | 3
|
calc/managers.py
|
BAcode-X/webCalc
|
from flask import jsonify
from flask_api import status
def ok(json_data: dict) -> tuple:
return json_data, status.HTTP_200_OK
def created(json_data: dict) -> tuple:
return json_data, status.HTTP_201_CREATED
def bad_request(message: dict) -> tuple:
return jsonify(message), status.HTTP_400_BAD_REQUEST
def unauthorized(message: dict) -> tuple:
return message, status.HTTP_401_UNAUTHORIZED
def forbidden(message: dict) -> tuple:
return jsonify(message), status.HTTP_403_FORBIDDEN
def not_found(message: dict) -> tuple:
return jsonify(message), status.HTTP_404_NOT_FOUND
def conflict(message: dict) -> tuple:
return jsonify(message), status.HTTP_409_CONFLICT
def internal_server_error(message: dict):
return jsonify(message), status.HTTP_500_INTERNAL_SERVER_ERROR
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 3,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": true
}
] | 3
|
utils/responses.py
|
pablobascunana/your-locations-flask-mongo
|
"""
Revision ID: 0122_add_service_letter_contact
Revises: 0121_nullable_logos
Create Date: 2017-09-21 12:16:02.975120
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
revision = "0122_add_service_letter_contact"
down_revision = "0121_nullable_logos"
def upgrade():
op.create_table(
"service_letter_contacts",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("service_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("contact_block", sa.Text(), nullable=False),
sa.Column("is_default", sa.Boolean(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=False),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["service_id"],
["services.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_service_letter_contact_service_id"),
"service_letter_contacts",
["service_id"],
unique=False,
)
def downgrade():
op.drop_index(
op.f("ix_service_letter_contact_service_id"),
table_name="service_letter_contacts",
)
op.drop_table("service_letter_contacts")
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
migrations/versions/0122_add_service_letter_contact.py
|
cds-snc/notifier-api
|
def destructure(obj, *params):
import operator
return operator.itemgetter(*params)(obj)
def greet(**kwargs):
year, day, puzzle = destructure(kwargs, 'year', 'day', 'puzzle')
print('Advent of Code')
print(f'-> {year}-{day}-{puzzle}')
print('--------------')
def load_data(filename):
with filename.open('r') as handle:
return handle.read()
def start(fn):
import pathlib
base_path = pathlib.Path(__file__).parent.parent / 'data'
def wrapped(*args, **kwargs):
greet(**kwargs)
data = load_data(base_path / f'{kwargs["year"]}.{kwargs["day"]}.txt')
return fn(data, *args, **kwargs)
return wrapped
def flatten_json(nested_json):
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
for i, a in enumerate(x):
flatten(a, name + str(i) + '_')
else:
out[name[:-1]] = x
flatten(nested_json)
return out
def sparse_matrix():
from collections import defaultdict
return defaultdict(lambda: 0)
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
core/functions/__init__.py
|
annapoulakos/advent-of-code
|
__author__ = 'Gobin'
from redditcli.api import base
class Account(base.Resource):
resource_name = 'Account'
class AccountManager(base.ResourceManager):
resource_class = Account
def me(self):
return self._get('/api/v1/me')
def getkarma(self):
return self._get('/api/v1/me/karma')
def getfriends(self):
return self._get('/api/v1/me/friends', 'data')
def getprefs(self):
return self._get('/api/v1/me/prefs')
def gettrophies(self):
return self._get('/api/v1/me/trophies')
|
[
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
}
] | 3
|
redditcli/api/account.py
|
gobins/python-oauth2
|
#!/usr/bin/env python3
import subprocess
import os
import sys
sys.path.append("../lib/")
import json_parser
import ibofos
import cli
import test_result
import MOUNT_ARRAY_BASIC_1
def clear_result():
if os.path.exists( __file__ + ".result"):
os.remove( __file__ + ".result")
def set_result(detail):
code = json_parser.get_response_code(detail)
result = test_result.expect_false(code)
with open(__file__ + ".result", "w") as result_file:
result_file.write(result + " (" + str(code) + ")" + "\n" + detail)
def execute():
clear_result()
MOUNT_ARRAY_BASIC_1.execute()
out = cli.mount_ibofos()
return out
if __name__ == "__main__":
out = execute()
set_result(out)
ibofos.kill_ibofos()
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
}
] | 3
|
test/system/array/MOUNT_ARRAY_ALD_MOUNTED_ERROR.py
|
mjlee34/poseidonos
|
# O(n^2)
def arithmetic_series(n: int) -> int:
if n < 0:
raise ValueError('Argument n is not a natural number')
return int((n + 1) * n * 0.5)
def arithmetic_series_loop(n: int) -> int:
if n < 0:
raise ValueError('Argument n is not a natural number')
s: int = 0
while n > 0:
s += n
n -= 1
return s
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
math/arithmetic-series.py
|
src24/algos
|
import praw
import passwords
import calendar, datetime, time
# Version!
version = "0.1"
with_version = lambda msg: msg + "\n\nI'm using Ploverscript v{}. Learn more [here](https://github.com/codingJWilliams/Ploverscript).".format(version)
# Formatting stuff.
small_indent = " | "
big_indent = " | "
def with_status(status, operation):
print(small_indent + status + "...")
res = operation()
print(big_indent + "...done.")
return res
# Time stuff.
def get_time():
return calendar.timegm(time.gmtime())
def show_delta(second):
return datetime.timedelta(seconds=round(second))
# Reddit stuff.
reddit = praw.Reddit(client_id=passwords.client_id,
client_secret=passwords.client_secret,
password=passwords.password,
user_agent=passwords.user_agent,
username=passwords.username)
def fetch_post(tor_thread):
foreign_thread = reddit.submission(url=tor_thread.url)
# TODO: deal with links to imgur albums -- at least those with a single image
return dict(
tor_thread=tor_thread,
foreign_thread=foreign_thread,
content=foreign_thread.url)
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
}
] | 3
|
util.py
|
Cysioland/Ploverscript
|
from . import Link
def iterate_words(lines):
for line in lines:
words = line.split()
if len(words) == 0:
continue
for word in words[:-1]:
yield word, is_stop_word(word)
yield words[-1], True # EOL is considered a stop word
def is_stop_word(word):
return any(word.endswith(stopchar) for stopchar in '.;?!')
def tokenize(source, link_length):
head = []
end = []
is_start = True
words_iter = iterate_words(source)
while len(head) < link_length - 1:
word, is_end = next(words_iter)
head += [word]
end += [is_end]
for word, is_end in iterate_words(source):
yield Link(head, word, is_start, is_end)
head = head[1:] + [word]
# If the start of the current link is a stop word, the next link
# is a starting link
is_start = end[0]
end = end[1:] + [is_end]
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
pymk/tokenize.py
|
calzoneman/MarkovBot
|
import logging
import requests
import time
import urllib.parse
OSMO_HISTORICAL_NODE = "https://api-osmosis.imperator.co"
class OsmoHistoricalAPI:
@classmethod
def get_symbol(cls, ibc_address):
uri = "/search/v1/symbol?denom={}".format(urllib.parse.quote(ibc_address))
data = cls._query(uri)
if "symbol" in data:
return data["symbol"]
else:
return None
@classmethod
def _query(cls, uri):
url = "{}{}".format(OSMO_HISTORICAL_NODE, uri)
logging.info("Querying url=%s...", url)
response = requests.get(url)
data = response.json()
time.sleep(1)
return data
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 3,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}
] | 3
|
src/osmo/api_historical.py
|
johnny-wang/staketaxcsv
|
# import sharpy.utils.settings as settings
# import sharpy.utils.exceptions as exceptions
# import sharpy.utils.cout_utils as cout
import numpy as np
import importlib
import unittest
import os
import sharpy.utils.cout_utils as cout
class TestCoupledPrescribed(unittest.TestCase):
"""
"""
@classmethod
def setUpClass(cls):
# run all the cases generators
# case = 'smith_2deg_prescribed'
# mod = importlib.import_module('tests.coupled.prescribed.' + case + '.generate_' + case)
# case = 'rotating_wing'
# mod1 = importlib.import_module('tests.coupled.prescribed.' + case + '.generate_' + case)
pass
@classmethod
def tearDownClass(cls):
pass
# def test_smith2deg_prescribed(self):
# import sharpy.sharpy_main
# solver_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) +
# '/smith_2deg_prescribed/smith_2deg_prescribed.sharpy')
# sharpy.sharpy_main.main(['', solver_path])
#
# # read output and compare
# output_path = os.path.dirname(solver_path) + 'output/aero/'
# forces_data = np.genfromtxt(output_path + 'smith_2deg_prescribed_aeroforces.csv')
# self.assertAlmostEqual(forces_data[-1, 3], -3.728e1, 1)
def test_rotating_wing(self):
# import sharpy.sharpy_main
# solver_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) +
# '/rotating_wing/rotating_wing.sharpy')
# sharpy.sharpy_main.main(['', solver_path])
cout.cout_wrap('No tests for prescribed dynamic configurations (yet)!', 1)
pass
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
tests/coupled/prescribed/prescribed_test.py
|
ACea15/sharpy
|
# not to use sys and use python packages
from .control_task_base import ControlTaskBase
""" This test control task demonstrates how to set and read info from the SFR"""
class TestControlTask(ControlTaskBase):
def __init__(self):
pass
def default(self):
self.sfr.set("test", True)
def execute(self):
res = self.sfr.get("test")
print(res)
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}
] | 3
|
src/ControlTasks/test_control_task.py
|
CornellDataScience/self-driving-car
|
import abc
class My_ABC_Class(metaclass=abc.ABCMeta):
@abc.abstractmethod
def set_val(self, val):
return
@abc.abstractmethod
def get_val(self):
return
class MyClass(My_ABC_Class):
def set_val(self, input):
self.val = input
def hello(self):
print("\nCalling the hello() method")
print("I'm *not* part of the Abstract Methods defined in My_ABC_Class()")
if __name__ == '__main__':
my_class = MyClass()
my_class.set_val(10)
print(my_class.get_val())
my_class.hello()
#错误使用的例子
|
[
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
}
] | 3
|
PythonAndOop/N35_abstractclasses_2.py
|
jiauy/before_work
|
from BaseModel.BaseModel import BaseModel
from django.db import models
from levels.models import Level
class FilmCategory(models.Model):
"""电影类别表"""
name = models.CharField(max_length=50, verbose_name='名称')
class Meta:
db_table = 'tb_film_category'
verbose_name = '电影类别'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Film(models.Model):
"""电影表"""
# 外键, 关联广告类别
CHARGE_CHOICES = ((0, '免费'), (1, '会员'), (2, '付费'))
category = models.ForeignKey(FilmCategory, on_delete=models.PROTECT,
verbose_name='类别')
title = models.CharField(max_length=20, verbose_name='片名')
url = models.CharField(max_length=300,
verbose_name='影视链接')
image = models.ImageField(null=True, blank=True, verbose_name='图片')
release_date = models.DateField(max_length=20, verbose_name='上映时间')
grade = models.DecimalField(decimal_places=1, max_digits=2, verbose_name='豆瓣评分')
director = models.CharField(max_length=30, verbose_name='导演', blank=True)
protagonist = models.CharField(max_length=50, verbose_name='主演', blank=True)
collection_num = models.IntegerField(default=0, verbose_name='收藏量')
play_times = models.IntegerField(default=0, verbose_name='点播次数')
charge = models.SmallIntegerField(choices=CHARGE_CHOICES, default=0, verbose_name='费用')
fcomment = models.CharField(max_length=200, null=True, verbose_name='描述信息')
recommendation_level = models.ForeignKey(Level, on_delete=models.PROTECT, verbose_name='推荐分级')
class Meta:
db_table = 'tb_film'
verbose_name = '电影'
verbose_name_plural = verbose_name
def __str__(self):
return self.category.name + self.title
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}
] | 3
|
recommendation/recommendation/apps/films/models.py
|
WillionLei/recommendation
|
import pytest
from django.db import models
from rest_framework import serializers
from datahub.dbmaintenance.utils import parse_choice
class SampleTextChoice(models.TextChoices):
"""Example text choices."""
ONE = ('one', 'One')
class SampleIntegerChoice(models.IntegerChoices):
"""Example integer choices."""
_2 = (2, 'Two')
class TestParseChoiceValue:
"""Tests for parse_choice()."""
@pytest.mark.parametrize(
'input_value,choices,expected_value',
(
('one', SampleTextChoice.choices, SampleTextChoice.ONE),
('2', SampleIntegerChoice.choices, SampleIntegerChoice._2),
),
)
def test_accepts_and_transforms_valid_values(self, input_value, choices, expected_value):
"""Test that valid values are accepted and transformed to the internal value."""
assert parse_choice(input_value, choices) == expected_value
def test_raises_error_on_invalid_value(self):
"""Test that an error is raised if an invalid value is passed."""
with pytest.raises(serializers.ValidationError) as excinfo:
parse_choice('invalid', SampleTextChoice)
assert excinfo.value.detail == ['"invalid" is not a valid choice.']
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": true
},
{
"point_num": 3,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
}
] | 3
|
datahub/dbmaintenance/test/test_utils.py
|
Staberinde/data-hub-api
|
from django.shortcuts import render
from flightApp.models import Flight, Passenger, Reservation
from flightApp.serializers import FlightSerializer, PassengerSerializer, ReservationSerializer
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
@api_view(['POST'])
def find_flights(request):
flights = Flight.objects.filter(
departureCity=request.data['departureCity'],
arrivalCity=request.data['arrivalCity'],
dateOfDeparture=request.data['dateOfDeparture'],
)
serializer = FlightSerializer(flights, many=True)
return Response(serializer.data)
@api_view(['POST'])
def save_reservation(request):
reservation = Reservation.objects.create(
flight=Flight.objects.get(id=request.data['flightId']),
passenger=Passenger.objects.create(
firstName=request.data['firstName'],
lastName=request.data['lastName'],
middleName=request.data['middleName'],
email=request.data['email'],
phone=request.data['phone'],
),
)
return Response(status=status.HTTP_201_CREATED, data=ReservationSerializer(reservation).data)
class FlightViewSet(viewsets.ModelViewSet):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
permission_classes = (IsAuthenticated,)
class PassengerViewSet(viewsets.ModelViewSet):
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class ReservationViewSet(viewsets.ModelViewSet):
queryset = Reservation.objects.all()
serializer_class = ReservationSerializer
|
[
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
}
] | 3
|
flightServices/flightApp/views.py
|
saibottrenham/djangorest
|
import http3
def test_status_code_as_int():
assert http3.codes.NOT_FOUND == 404
assert str(http3.codes.NOT_FOUND) == "404"
def test_lowercase_status_code():
assert http3.codes.not_found == 404
def test_reason_phrase_for_status_code():
assert http3.codes.get_reason_phrase(404) == "Not Found"
def test_reason_phrase_for_unknown_status_code():
assert http3.codes.get_reason_phrase(499) == ""
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
tests/test_status_codes.py
|
ambrozic/http3
|
from .lookup_provider import LookupProvider
class MerriamProvider(LookupProvider):
'''Concrete provider which provides web results from Merriam-Webster
dictionary.
'''
def lookup(self, word, limit=0):
'''Yield str results for `word` up to `limit`. When `limit == 0`,
return all results. These results will be limited to the first
page only. We do not search further back.
'''
soup = self.url_to_soup(
f'http://www.merriam-webster.com/dictionary/{word}'
)
for definition in soup.find_all('span', class_='dt', limit=limit):
yield definition.get_text()[2:].capitalize()
class MerriamBuilder:
'''Builder class which maintains a single instance of `MerriamProvider`,
returning it when called, creating it if necessary.
'''
def __init__(self):
self._instance = None
def __call__(self):
if not self._instance:
self._instance = MerriamProvider()
return self._instance
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}
] | 3
|
word_tools/merriam_provider.py
|
ncdulo/word_tools
|
from django.contrib import admin
from .models import *
class SaleInvHrdModelAdmin(admin.ModelAdmin):
list_display = ["id", "doc_no", "doc_dt", "party_id_name",
"doctor_id_name", "mode", "net_amount", "session_id"]
list_display_links = ["doc_no"]
list_filter = ["doc_dt"]
search_fields = ["doc_no", "id", "party_id__name", "doctor_id__name"]
def party_id_name(self, obj):
return obj.party_id.name
def doctor_id_name(self, obj):
return obj.doctor_id.name
class Meta:
model = SalesInvHrd
class SaleInvDtlModelAdmin(admin.ModelAdmin):
list_display = ["sequence", "hrd_id", "item_name",
"batch_no", "strip_qty", "nos_qty", "strip_free", "nos_free", "amount"]
# list_display_links = ["doc_no"]
search_fields = ["sequence", "hrd_id__id", "item_id__name"]
def item_name(self, obj):
return obj.item_id.name
class Meta:
model = SalesInvDtl
admin.site.register(Doctor)
admin.site.register(Party)
admin.site.register(SalesInvHrd, SaleInvHrdModelAdmin)
admin.site.register(SalesInvDtl, SaleInvDtlModelAdmin)
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
}
] | 3
|
src/sales/admin.py
|
vishalhjoshi/croma
|
from typing import TypeVar, AsyncIterator, Sequence
from chris.common.types import PluginUrl
from chris.common.client import AuthenticatedClient
from chris.common.search import get_paginated, to_sequence
import chris.common.decorator as http
from chris.cube.types import ComputeResourceName, PfconUrl
from chris.cube.deserialization import CubeCollectionLinks, CubePlugin, ComputeResource
_T = TypeVar("_T")
class CubeClient(AuthenticatedClient[CubeCollectionLinks, CubePlugin, "CubeClient"]):
@http.post("/chris-admin/api/v1/")
async def register_plugin(
self, plugin_store_url: PluginUrl, compute_name: ComputeResourceName
) -> CubePlugin:
...
@http.post("/chris-admin/api/v1/computeresources/")
async def create_compute_resource(
self,
name: ComputeResourceName,
compute_url: PfconUrl,
compute_user: str,
compute_password: str,
description: str = "",
) -> ComputeResource:
...
def get_compute_resources_of(
self, plugin: CubePlugin
) -> AsyncIterator[ComputeResource]:
return get_paginated(
session=self.s, url=plugin.compute_resources, element_type=ComputeResource
)
def search_compute_resources(
self, max_requests=100, **query
) -> AsyncIterator[ComputeResource]:
return self.search(
url=self.collection_links.compute_resources,
query=query,
element_type=ComputeResource,
max_requests=max_requests,
)
async def get_all_compute_resources(self) -> Sequence[ComputeResource]:
return await to_sequence(self.search_compute_resources())
|
[
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
}
] | 3
|
chris/cube/client.py
|
FNNDSC/chrisomatic
|
import json
def add_to_map(data_map, key):
if key in data_map:
data_map[key] += 1
elif key not in data_map:
data_map[key] = 1
return data_map
def get_json(line, headers):
sample_data = {}
for i, val in enumerate(line):
sample_data[headers[i]] = val
return sample_data
def read_pop_data(data_file):
data = []
headers = []
with open(data_file) as fi:
first = True
for line in fi:
spl = line.rstrip('\n').split('\t')
if first:
first = False
headers = spl
else:
data.append(get_json(spl, headers))
return data
def read_sample_data(data_file):
allowed_sites = [6, 7, 9, 10, 11, 12, 13, 14]
relevant_fields = ["SMN1_read_support", "SMN2_read_support", "SMN1_fraction", "SMN1_CN_raw", "Confidence"]
with open(data_file, 'r') as fi:
data = json.load(fi)
for sample in data:
for field in relevant_fields:
arr = data[sample][field]
data[sample][field] = [(i + 1, x) for i, x in enumerate(arr) if i in allowed_sites]
return data
def get_pop_column(pop_data, column):
data_map = {}
for line in pop_data:
cn = round(float(line[column]), 2)
data_map = add_to_map(data_map, cn)
return data_map
def get_sample_col_map(sample_data, column):
result = {}
for sample in sample_data:
result[sample] = sample_data[sample][column]
return result
def get_key_map(cols, data):
col_map = {}
for col in cols:
col_map[col] = data[col]
return col_map
|
[
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
}
] | 3
|
charts/data_utils.py
|
Illumina/SMNCopyNumberCaller
|
import face_recognition
def compare_faces(original_face, captured_face):
same_person = False
original_face_encoding = get_image_encoding(original_face)
captured_face_encoding = get_image_encoding(captured_face)
if len(captured_face_encoding) > 0 and len(original_face_encoding) > 0:
same_face = face_recognition.compare_faces([original_face_encoding[0]], captured_face_encoding[0])
distance = face_recognition.face_distance(original_face_encoding, captured_face_encoding[0])[0]
accuracy = round((1 - distance) * 100, 2)
if same_face[0]:
same_person = True
print(accuracy);
return same_person, accuracy
def get_image_encoding(image_file):
image = face_recognition.load_image_file(image_file)
return face_recognition.face_encodings(image)
def detect_number_of_faces(face_image):
image = face_recognition.load_image_file(face_image)
return len(face_recognition.face_locations(image))
|
[
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 3,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
}
] | 3
|
face_recgonition/face_recognition_api.py
|
lab-03/face_recognition
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""List the versions within a key."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.cloudkms import base as cloudkms_base
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.kms import flags
class List(base.ListCommand):
r"""List the versions within a key.
Lists all of the versions within the given key.
## EXAMPLES
The following command lists all versions within the
key `frodo`, keyring `fellowship`, and location `global`:
$ {command} --location global \
--keyring fellowship \
--key frodo
"""
@staticmethod
def Args(parser):
flags.AddKeyResourceFlags(parser)
parser.display_info.AddFormat('table(name, state)')
def Run(self, args):
# pylint: disable=line-too-long
client = cloudkms_base.GetClientInstance()
messages = cloudkms_base.GetMessagesModule()
crypto_key_ref = flags.ParseCryptoKeyName(args)
request = messages.CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListRequest(
parent=crypto_key_ref.RelativeName())
return list_pager.YieldFromList(
client.projects_locations_keyRings_cryptoKeys_cryptoKeyVersions,
request,
field='cryptoKeyVersions',
limit=args.limit,
batch_size_attribute='pageSize')
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}
] | 3
|
lib/surface/kms/keys/versions/list.py
|
bshaffer/google-cloud-sdk
|
"""
Aqualink API documentation
The Aqualink public API documentation # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import aqualink_sdk
from aqualink_sdk.model.user_location import UserLocation
class TestUserLocation(unittest.TestCase):
"""UserLocation unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUserLocation(self):
"""Test UserLocation"""
# FIXME: construct object with mandatory attributes with example values
# model = UserLocation() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
sdk/test/test_user_location.py
|
aqualinkorg/aqualink-sdk
|
import cv2
from app.Model.Model_cascades import Cascades
class FaceDetection:
def __init__(self):
self.type_cascade = Cascades.FACECASCADE
def get_type_cascade(self):
return self.type_cascade
def detection_rectangle_dimensions(self):
scaleFactor = 1.3
minNeighbors = 5
minSize = (30, 30)
return [scaleFactor, minNeighbors, minSize]
def format_rectangle(self, image, x, y, w, h):
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
def detection_rectangle(self, rectangle: list, x, y, w, h):
new_rectangle = rectangle[y:y + h, x:x + w]
return new_rectangle
def detection_color(self, image):
color = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return color
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
app/Model/Model_facedetection.py
|
Renanrbsc/System_Face_Recognition
|
from unittest import TestCase
import elegy
import jax.numpy as jnp
import pytest
class MetricTest(TestCase):
def test_basic(self):
class MAE(elegy.Metric):
def call(self, y_true, y_pred):
return jnp.abs(y_true - y_pred)
y_true = jnp.array([1.0, 2.0, 3.0])
y_pred = jnp.array([2.0, 3.0, 4.0])
mae = MAE()
loss = mae.call_with_defaults()(y_true, y_pred)
assert jnp.alltrue(loss == jnp.array([1.0, 1.0, 1.0]))
def test_slice(self):
class MAE(elegy.Metric):
def call(self, y_true, y_pred):
return jnp.abs(y_true - y_pred)
y_true = dict(a=jnp.array([1.0, 2.0, 3.0]))
y_pred = dict(a=jnp.array([2.0, 3.0, 4.0]))
mae = MAE(on="a")
# raises because it doesn't use kwargs
with pytest.raises(BaseException):
sample_loss = mae.call_with_defaults()(y_true, y_pred)
# raises because it doesn't use __call__ which filters
with pytest.raises(BaseException):
sample_loss = mae.call(y_true=y_true, y_pred=y_pred)
loss = mae.call_with_defaults()(y_true=y_true, y_pred=y_pred)
assert jnp.alltrue(loss == jnp.array([1.0, 1.0, 1.0]))
|
[
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 3,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
}
] | 3
|
elegy/metrics/metric_test.py
|
sooheon/elegy
|
import torch
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.optim as optim
x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1)
y = x.pow(2) + 0.2 * torch.rand(x.size())
# plt.scatter(x.numpy(), y.numpy())
# plt.show()
class Net(nn.Module):
def __init__(self, n_features, n_hidden, n_output):
super(Net, self).__init__()
self.hidden = nn.Linear(n_features, n_hidden)
self.predict = nn.Linear(n_hidden, n_output)
def forward(self, x):
x = F.relu(self.hidden(x))
x = self.predict(x)
return x
net = Net(1, 20, 1)
print(net)
plt.ion()
plt.show()
optimizer = optim.SGD(net.parameters(), lr=0.02, momentum=0.9)
loss_function = nn.MSELoss()
for t in range(200):
prediction = net(x)
loss = loss_function(prediction, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (t % 5) == 0:
print("Loss : %f" % loss.data)
# plot and show learning process
plt.cla()
plt.scatter(x.numpy(), y.numpy())
plt.plot(x.numpy(), prediction.data.numpy(), 'r-', lw=5)
plt.text(0.5, 0, 'Loss=%.4f' % loss.data, fontdict={'size': 20, 'color': 'red'})
plt.pause(0.1)
plt.ioff()
plt.show()
|
[
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}
] | 3
|
Basics/Regression.py
|
Fyy10/ML-DL_Practice
|
import argparse
from .greet import greet
def parse_args():
parser = argparse.ArgumentParser(
description='Create automated github reports'
)
parser.add_argument('name', metavar='NAME', type=str,
help='name to greet')
return parser.parse_args()
def main():
args = parse_args()
greet(args.name)
if __name__ == "__main__":
main()
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
tako/cli.py
|
gr0und-s3ct0r/github-reports
|
import numpy as np
class LogisticRegression:
def __init__(self, lr = 0.001, n_iters = 1000):
self.lr = lr
self.n_iters = n_iters
self.weights = None
self.bias = None
def fit(self, X, y):
#init parameters
n_samples, n_features = X.shape
self.weights = np.zeros(n_features)
self.bias = 0
#gradient descent
for _ in range(self.n_iters):
linear_model = np.dot(X, self.weights) + self.bias
y_predicted = self._sigmoid(linear_model)
dw = (1 / n_samples) * np.dot(X.T, (y_predicted - y))
db = (1 / n_samples) * np.sum(y_predicted - y)
self.weights -= self.lr * dw
self.bias -= self.lr * db
def predict(self, X):
linear_model = np.dot(X, self.weights) + self.bias
y_predicted = self._sigmoid(linear_model)
y_predicted_cls = (1 if i > 0.5 else 0 for i in y_predicted)
return y_predicted_cls
def _sigmoid(self, x):
return 1 / (1 + np.exp(-x))
|
[
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
ML from sratch/logistic_regression.py
|
nane121/HacktoberFest2020
|
"""posts table
Revision ID: 5c80010c853a
Revises: 6ca7139bbbf2
Create Date: 2018-06-25 17:18:29.165993
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5c80010c853a'
down_revision = '6ca7139bbbf2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('post',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.String(length=140), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_post_timestamp'), 'post', ['timestamp'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_post_timestamp'), table_name='post')
op.drop_table('post')
# ### end Alembic commands ###
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
migrations/versions/5c80010c853a_posts_table.py
|
ChanForPres/Social-Blogging-App
|
# coding: utf-8
import numpy as np
import chainer
import chainer.functions as F
import testtools
import numpy as np
class A(chainer.Chain):
def forward(self):
y1 = np.zeros((3, 4), dtype=np.float32)
return y1
# ======================================
def main():
testtools.generate_testcase(A(), [])
if __name__ == '__main__':
main()
|
[
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 3,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}
] | 3
|
elichika/tests/node/ndarray/NpZeros.py
|
disktnk/chainer-compiler
|
class GameStats():
"""跟踪游戏的统计信息"""
def __init__(self, ai_settings):
"""初始化统计信息"""
self.ai_settings = ai_settings
self.reset_stats()
# 游戏刚启动时出于活跃状态
self.game_active = True
# 让游戏一开始处于非活动状态
self.game_active = False
# 在任何情况下都不应重置最高得分
self.high_score = 0
def reset_stats(self):
"""初始化在游戏运行期间可能变化的统计信息"""
self.ships_left = self.ai_settings.ship_limit
self.score = 0
# 当前等级
self.level = 1
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}
] | 3
|
alien_invasion/game_stats.py
|
turoDog/LearningPython
|
import os
import pathlib
import random
import socket
def create_producer_data_file(name):
no_of_integers_in_a_file = 100;
f = open(os.getcwd() + "/Output/Producer_Socket/" + name + ".txt", "w")
for no_of_lines in range(no_of_integers_in_a_file):
f.write(str(random.randint(1, no_of_integers_in_a_file)))
f.close()
def Main():
host = '127.0.0.1'
port = 51564
so = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
so.bind((host, 8080))
so.connect((host, port))
pathlib.Path(os.getcwd() + "/Output/Producer_Socket").mkdir(parents=True, exist_ok=True)
produced_file_num = 0;
while produced_file_num < 100:
# create a file
create_producer_data_file("produceddatafile_" + str(produced_file_num))
# File user send to a Consumer(Server)
file = open(os.getcwd() + "/Output/Producer_Socket/produceddatafile_" + str(produced_file_num) + ".txt",
"rb")
# Send data
SendData = file.read(4096)
so.send(SendData)
# Message received from Consumer(Server)
note = so.recv(2048)
print('Obtained from the server :', str(note.decode('ascii')))
produced_file_num += 1;
so.close()
if __name__ == '__main__':
Main()
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
IPC_PythonScripts/IPC3/Client.py
|
pooja-n1424/CEG4350-6350
|
import random
from pygame import Color
from pygame.image import load
from pygame.math import Vector2
from pygame.mixer import Sound
def load_sprite(name, with_alpha=True):
path = f"assets/sprites/{name}.png"
loaded_sprite = load(path)
if with_alpha:
return loaded_sprite.convert_alpha()
else:
return loaded_sprite.convert()
def wrap_position(position, surface):
x, y = position
w, h = surface.get_size()
return Vector2(x % w, y % h)
def get_random_position(surface):
return Vector2(
random.randrange(surface.get_width()),
random.randrange(surface.get_height()),
)
def get_random_velocity(min_speed, max_speed):
speed = random.randint(min_speed, max_speed)
angle = random.randrange(0, 360)
return Vector2(speed, 0).rotate(angle)
def load_sound(name):
path = f"assets/sounds/{name}.wav"
return Sound(path)
def print_text(surface, text, font, color=Color("tomato")):
text_surface = font.render(text, True, color)
rect = text_surface.get_rect()
rect.center = Vector2(surface.get_size()) / 2
surface.blit(text_surface, rect)
|
[
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
space_rocks/utils.py
|
frenesoto/spacerocks
|
import os
import pytest
import asyncio
from jina import __default_host__
from daemon.clients import AsyncJinaDClient
cur_dir = os.path.dirname(os.path.abspath(__file__))
CLOUD_HOST = 'localhost:8000' # consider it as the staged version
success = 0
failure = 0
client = AsyncJinaDClient(host=__default_host__, port=8000)
async def get_alive():
global success, failure
while True:
is_alive = await client.alive
if is_alive:
success += 1
else:
failure += 1
@pytest.mark.asyncio
async def test_nonblocking_server():
workspace_id = await client.workspaces.create(
paths=[os.path.join(cur_dir, 'delayed_flow')]
)
alive_task = asyncio.create_task(get_alive())
create_flow_task = asyncio.create_task(
client.flows.create(workspace_id=workspace_id, filename='delayed_flow.yml')
)
done, pending = await asyncio.wait(
{alive_task, create_flow_task}, return_when=asyncio.FIRST_COMPLETED
)
assert create_flow_task in done
flow_id = create_flow_task.result()
assert alive_task in pending
alive_task.cancel()
await client.flows.delete(flow_id)
await client.workspaces.delete(workspace_id)
assert success > 0, f'#success is {success} (expected >0)'
assert failure == 0, f'#failure is {failure} (expected =0)'
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
}
] | 3
|
tests/distributed/test_workspaces/test_nonblocking.py
|
vishalbelsare/jina
|
from django.shortcuts import render
from core.forms.ContatoForm import ContatoForm
from core.components.GerenciadorEmail import Email
def enviarEmailAluno(form):
contexto = {
"aluno":form.cleaned_data['nome'],
"email":form.cleaned_data['email'],
"assunto":form.cleaned_data['assunto'],
"mensagem":form.cleaned_data['mensagem']
}
email = Email("contato@handcode.com", "Faculdade Handcode - {}".format(form.cleaned_data['assunto']))
email.html("emails/contatoAluno.html", contexto)
email.enviar(form.cleaned_data['email'])
def enviarEmailFaculdade(form):
contexto = {
"aluno":form.cleaned_data['nome'],
"email":form.cleaned_data['email'],
"assunto":form.cleaned_data['assunto'],
"mensagem":form.cleaned_data['mensagem']
}
email = Email("contato@handcode.com", "Faculdade Handcode - {}".format(form.cleaned_data['assunto']))
email.html("emails/contatoFaculdade.html", contexto)
email.enviar("handcode@amazestudio.com.br")
def contato (request):
form = None
if request.POST:
form = ContatoForm(request.POST)
if form.is_valid():
enviarEmailAluno(form)
enviarEmailFaculdade(form)
else:
form = ContatoForm()
contexto = {
"form" : form
}
return render(request,"contato/contato.html", contexto)
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 3,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
}
] | 3
|
core/views/Contato.py
|
roimpacta/exemplos
|
from __future__ import with_statement
import unittest
import flask
from healthcheck import HealthCheck, EnvironmentDump
class BasicHealthCheckTest(unittest.TestCase):
def setUp(self):
self.path = '/h'
self.app = flask.Flask(__name__)
self.hc = self._hc()
self.client = self.app.test_client()
def _hc(self):
return HealthCheck(self.app, self.path)
def test_basic_check(self):
response = self.client.get(self.path)
self.assertEqual(200, response.status_code)
def test_failing_check(self):
def fail_check():
return False, "FAIL"
self.hc.add_check(fail_check)
response = self.client.get(self.path)
self.assertEqual(500, response.status_code)
class BasicEnvironmentDumpTest(unittest.TestCase):
def setUp(self):
self.path = '/e'
self.app = flask.Flask(__name__)
self.hc = self._hc()
self.client = self.app.test_client()
def _hc(self):
return EnvironmentDump(self.app, self.path)
def test_basic_check(self):
def test_ok():
return "OK"
self.hc.add_section("test_func", test_ok)
response = self.client.get(self.path)
self.assertEqual(200, response.status_code)
jr = flask.json.loads(response.data)
self.assertEqual("OK", jr["test_func"])
class LazyHealthCheckTest(BasicHealthCheckTest):
def setUp(self):
super(LazyHealthCheckTest, self).setUp()
self.hc.init_app(self.app, self.path)
def _hc(self):
return HealthCheck()
class LazyEnvironmentDumpTest(unittest.TestCase):
def setUp(self):
super(LazyEnvironmentDumpTest, self).setUp()
self.hc.init_app(self.app, self.path)
def _hc(self):
return EnvironmentDump()
if __name__ == '__main__':
unittest.main()
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 3,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
}
] | 3
|
tests/test_unit/test_healthcheck.py
|
jab/healthcheck
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from ..utils.compat.odict import OrderedDict
from ..utils.misc import isiterable
__all__ = ['FlagCollection']
class FlagCollection(OrderedDict):
"""
The purpose of this class is to provide a dictionary for
containing arrays of flags for the `NDData` class. Flags should be
stored in Numpy arrays that have the same dimensions as the parent
data, so the `FlagCollection` class adds shape checking to an
ordered dictionary class.
The `FlagCollection` should be initialized like an `OrderedDict`,
but with the addition of a ``shape=`` keyword argument used to
pass the NDData shape.
"""
def __init__(self, *args, **kwargs):
if 'shape' in kwargs:
self.shape = kwargs.pop('shape')
if not isiterable(self.shape):
raise ValueError("FlagCollection shape should be an iterable object")
else:
raise Exception("FlagCollection should be initialized with the shape of the data")
OrderedDict.__init__(self, *args, **kwargs)
def __setitem__(self, item, value, **kwargs):
if isinstance(value, np.ndarray):
if value.shape == self.shape:
OrderedDict.__setitem__(self, item, value, **kwargs)
else:
raise ValueError("flags array shape {0:s} does not match data shape {1:s}".format(value.shape, self.shape))
else:
raise TypeError("flags should be given as a Numpy array")
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}
] | 3
|
astropy/nddata/flag_collection.py
|
xiaomi1122/astropy
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 30.03.2018 16:35
:Licence MIT
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import *
from grammpy.parsers import cyk
from grammpy.transforms import ContextFree, InverseContextFree
class S(Nonterminal): pass
class My:
def __init__(self, prop):
self.prop = prop
def __hash__(self):
return hash(My)
class R(Rule):
rule = (
[S],
[My, My]
)
class CorrectSimpleTerminalsTest(TestCase):
def setUp(self):
self.g = Grammar(terminals=[My],
nonterminals=[S],
rules=[R],
start_symbol=S)
ContextFree.remove_useless_symbols(self.g, inplace=True)
ContextFree.remove_rules_with_epsilon(self.g, True)
ContextFree.remove_unit_rules(self.g, True)
ContextFree.remove_useless_symbols(self.g, inplace=True)
ContextFree.transform_to_chomsky_normal_form(self.g, True)
def test_sameHashes(self):
self.assertEqual(hash(My), hash(My(1)))
def test_shouldParseClass(self):
parsed = cyk(self.g, [My, My])
def test_shouldParseInstances(self):
parsed = cyk(self.g, [My(1), My(2)])
def test_shouldParseAndUseValues(self):
parsed = cyk(self.g, [My(1), My(2)])
parsed = InverseContextFree.transform_from_chomsky_normal_form(parsed)
parsed = InverseContextFree.unit_rules_restore(parsed)
parsed = InverseContextFree.epsilon_rules_restore(parsed)
self.assertIsInstance(parsed, S)
self.assertIsInstance(parsed.to_rule, R)
left = parsed.to_rule.to_symbols[0]
right = parsed.to_rule.to_symbols[1]
self.assertIsInstance(left, Terminal)
self.assertIsInstance(left.s, My)
self.assertEqual(left.s.prop, 1)
self.assertIsInstance(right, Terminal)
self.assertIsInstance(right.s, My)
self.assertEqual(right.s.prop, 2)
if __name__ == '__main__':
main()
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}
] | 3
|
tests/parsers_test/CYK/CorrectSimpleTerminalsTest.py
|
PatrikValkovic/grammpy
|
import numpy as np
from .attributes_incrementally import StandardDeviation, LengthOfDiagonal, \
FirstHuMoment, Area
from ..utils.data_types import Pixel
def construct_area_matrix(image: np.ndarray) -> np.ndarray:
matrix = np.ones(image.shape, dtype=Area)
image_width = image.shape[1]
for index, _ in enumerate(image.flatten()):
x = index % image_width
y = int(index / image_width)
matrix[y, x] = Area()
return matrix
def construct_std_dev_matrix(image: np.ndarray) -> np.ndarray:
image_width = image.shape[1]
std_dev_matrix = np.zeros(image.shape, dtype=StandardDeviation)
for index, pixel_value in enumerate(image.flatten()):
x = index % image_width
y = int(index / image_width)
std_dev_matrix[y, x] = StandardDeviation(value=pixel_value)
return std_dev_matrix
def construct_length_of_diagonal_matrix(image: np.ndarray) -> np.ndarray:
width = image.shape[1]
image_size = image.size
matrix = np.zeros(image.shape, dtype=LengthOfDiagonal)
for index in range(0, image_size):
x = index % width
y = int(index / width)
matrix[y, x] = LengthOfDiagonal(x, x, y, y)
return matrix
def construct_first_hu_moment_matrix(image: np.ndarray) -> np.ndarray:
width = image.shape[1]
max_ = float(np.amax(image))
min_ = float(np.amin(image))
matrix = np.zeros(image.shape, dtype=FirstHuMoment)
for index, pixel_value in enumerate(image.flatten()):
x = index % width
y = int(index / width)
norm_pixel_value = (float(pixel_value) - min_) / (max_ - min_)
matrix[y, x] = FirstHuMoment(Pixel(x, y, norm_pixel_value))
return matrix
matrix_constructs = {
'area': construct_area_matrix,
'stddev': construct_std_dev_matrix,
'diagonal': construct_length_of_diagonal_matrix,
'moment': construct_first_hu_moment_matrix
}
def construct_matrix(attribute_name: str, image: np.ndarray) -> np.ndarray:
return matrix_constructs[attribute_name](image)
|
[
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
python_research/preprocessing/attribute_profiles/max_tree/attribute_matrix_construction.py
|
myychal/hypernet
|
import sys
from string import String
from matrix import *
from math import *
def display_key(key):
lines = 0
cols = 0
while lines != key.lines:
cols = 0
while cols != key.cols:
print (key.matrice[lines][cols], end = '')
if cols + 1 != key.cols:
print (" ", end = '')
cols = cols + 1
print ()
lines = lines + 1
def display_message_encrypted(matrice):
lines = 0
cols = 0
print ("Encrypted message :")
while lines != matrice.lines:
cols = 0
while cols != matrice.cols:
print (matrice.matrice[lines][cols], end = '')
if lines != matrice.lines and cols != matrice.cols:
print (" ", end = '')
cols = cols + 1
lines = lines + 1
print ()
def display_encrypted(key, matrice):
print ("Key matrix :")
display_key(key)
print ()
display_message_encrypted(matrice);
def encrypted():
key = String(sys.argv[2])
key = Key_matrix(key)
message = String(sys.argv[1])
message = Message_matrix(message, key)
matrice = message.multiplication(key)
display_encrypted(key, matrice)
|
[
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 3,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
}
] | 3
|
encrypted.py
|
FlorianMarcon/103cipher
|
import cv2
import numpy
from pathlib import Path
from scandir import scandir
def get_folder(path):
output_dir = Path(path)
# output_dir.mkdir(parents=True, exist_ok=True)
return output_dir
def get_image_paths(directory):
return [x.path for x in scandir(directory) if x.name.endswith('.jpg') or x.name.endswith('.jpeg') or x.name.endswith('.png')]
def load_images(image_paths, convert=None):
iter_all_images = (cv2.imread(fn) for fn in image_paths)
if convert:
iter_all_images = (convert(img) for img in iter_all_images)
for i, image in enumerate(iter_all_images):
if i == 0:
all_images = numpy.empty((len(image_paths), ) + image.shape, dtype=image.dtype)
all_images[i] = image
return all_images
|
[
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 3,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
}
] | 3
|
todo/[Keras]DeepFakes_FaceSwap/faceswap-master/lib/utils.py
|
tonyhuang84/notebook_dnn
|
import sys, signal
from PyQt5.QtWidgets import QApplication, QWidget, QLabel
from PyQt5.QtGui import QIcon, QPixmap
from PyQt5.QtCore import Qt
class Application(QWidget):
def __init__(self):
super().__init__()
self.setWindowFlags(Qt.CustomizeWindowHint | Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint | Qt.Tool)
self.setGeometry(1366, 694, 253, 350)
self.create_image()
def create_image(self):
label = QLabel(self)
pixmap = QPixmap('/home/neko/Pictures/.Mãe de Deus.png').scaled(253, 350, Qt.KeepAspectRatio)
label.setPixmap(pixmap)
label.mousePressEvent = self.on_click
self.show()
self.pos = 1
def on_click(self, event = None):
if self.pos == 1:
self.setGeometry(3033, 694, 253, 350)
self.pos = 2
elif self.pos == 2:
self.setGeometry(0, 1082, 253, 350)
self.pos = 3
else:
self.setGeometry(1366, 694, 253, 350)
self.pos = 1
signal.signal(signal.SIGINT, signal.SIG_DFL)
root = QApplication(sys.argv)
app = Application()
sys.exit(root.exec_())
|
[
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}
] | 3
|
iconQt.py
|
EGobi/icon-corner
|
import os
import sys
import time
import signal
import logging
import argparse
class BaseApp:
def __init__(self, desc, ver_num):
signal.signal(signal.SIGINT, self.sig_handler)
signal.signal(signal.SIGTERM, self.sig_handler)
self.quit_flag = False
sfile = sys.argv[0]
ver = ("Ver %s, " % ver_num) + time.strftime("%Y/%m/%d %H:%M %Z, loblab",
time.localtime(os.path.getmtime(sfile)))
self.argps = argparse.ArgumentParser(description=desc)
self.argps.add_argument('-V', '--version', action='version', version=ver)
self.argps.add_argument('-D', '--debug', action='store_true',
help="output more logs (debug level)")
self.init_args()
self.args = self.argps.parse_args()
self.init_logger()
self.log.info(desc)
self.log.info(ver)
if self.args.debug:
self.log.debug("Debug: on")
def sig_handler(self, signum, frame):
self.log.info("Got signal %d" % signum)
self.quit_flag = True
def init_args(self):
pass
def init_logger(self):
FORMAT = '%(asctime)s.%(msecs)03d - %(levelname)s - %(message)s'
DATEFMT = '%m/%d %H:%M:%S'
logging.basicConfig(format=FORMAT, datefmt=DATEFMT)
self.log = logging.getLogger()
if self.args.debug:
self.log.setLevel(logging.DEBUG)
else:
self.log.setLevel(logging.INFO)
def startup(self):
pass
def cleanup(self):
pass
def run(self):
return 0
def main(self):
try:
self.startup()
rc = self.run()
except Exception as e:
self.log.exception(str(e))
rc = -1
finally:
self.cleanup()
return rc
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
}
] | 3
|
baseapp.py
|
loblab/lightctrl
|
import pytest
import bionic as bn
def test_pyplot_no_parens(builder):
@builder
@bn.pyplot
def plot(pyplot):
ax = pyplot.subplot()
ax.plot([1, 2, 3], [1, 3, 9])
img = builder.build().get("plot")
assert img.width > 0
assert img.height > 0
def test_pyplot_no_args(builder):
@builder
@bn.pyplot()
def plot(pyplot):
ax = pyplot.subplot()
ax.plot([1, 2, 3], [1, 3, 9])
img = builder.build().get("plot")
assert img.width > 0
assert img.height > 0
def test_pyplot_name_arg(builder):
@builder
@bn.pyplot("plt")
def plot(plt):
ax = plt.subplot()
ax.plot([1, 2, 3], [1, 3, 9])
img = builder.build().get("plot")
assert img.width > 0
assert img.height > 0
def test_pyplot_missing_dep(builder):
with pytest.raises(ValueError):
@builder
@bn.pyplot
def plot(some_arg):
pass
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
}
] | 3
|
tests/test_flow/test_plotting.py
|
IDl0T/bionic
|
# coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import logicmonitor_sdk
from logicmonitor_sdk.models.aws_ec2_reserved_instance_collector_attribute import AwsEC2ReservedInstanceCollectorAttribute # noqa: E501
from logicmonitor_sdk.rest import ApiException
class TestAwsEC2ReservedInstanceCollectorAttribute(unittest.TestCase):
"""AwsEC2ReservedInstanceCollectorAttribute unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAwsEC2ReservedInstanceCollectorAttribute(self):
"""Test AwsEC2ReservedInstanceCollectorAttribute"""
# FIXME: construct object with mandatory attributes with example values
# model = logicmonitor_sdk.models.aws_ec2_reserved_instance_collector_attribute.AwsEC2ReservedInstanceCollectorAttribute() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 3,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
}
] | 3
|
test/test_aws_ec2_reserved_instance_collector_attribute.py
|
JeremyTangCD/lm-sdk-python
|
""" Step Scheduler
Basic step LR schedule with warmup, noise.
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
import torch
from .scheduler import Scheduler
class StepLRScheduler(Scheduler):
"""
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
decay_t: float,
decay_rate: float = 1.,
warmup_t=0,
warmup_lr_init=0,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True,
) -> None:
super().__init__(
optimizer, param_group_field="lr",
noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed,
initialize=initialize)
self.decay_t = decay_t
self.decay_rate = decay_rate
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.t_in_epochs = t_in_epochs
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
lrs = [v * (self.decay_rate ** (t // self.decay_t)) for v in self.base_values]
return lrs
def get_epoch_values(self, epoch: int):
if self.t_in_epochs:
return self._get_lr(epoch)
else:
return None
def get_update_values(self, num_updates: int):
if not self.t_in_epochs:
return self._get_lr(num_updates)
else:
return None
|
[
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 3,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
}
] | 3
|
timm/scheduler/step_lr.py
|
xuritian317/pytorch-image-models
|
from account import Account
from person import Person
class CurrentAccount(Account):
def __init__(self, nombre, apellido, numeroCuenta, cantidad, tipo = 0.0, tarjetaDebito = False, tarjetaCredito = False, cuota = 0.0):
Account.__init__(self, numeroCuenta, cantidad)
self.__tipoInteres = 1 + float(tipo)
self.__tarjetaDebito = tarjetaDebito
self.__tarjetaCredito = tarjetaCredito
self.__cuotaMantenimiento = cuota
self.__persona = Person(nombre, apellido)
def getTipoInteres(self):
return self.__tipoInteres
def setTipoInteres(self, interes):
self.__tipoInteres = interes
def getTarjetaDebito(self):
return self.__tarjetaDebito
def setTarjetaDebito(self, tajeta):
self.__tarjetaDebito = tarjeta
def getTarjetaCredito(self):
return self.__tarjetaCredito
def setTarjetaCredito(self, tajeta):
self.__tarjetaCredito = tarjeta
def getCuotaMantenimiento(self):
return self.__cuotaMantenimiento
def setCuotaMantenimiento(self, cuota):
self.__cuotaMantenimiento = cuota
def getSaldo(self):
return self._saldo*self.__tipoInteres
def getNombre(self):
return self.__persona.getNombre()
def setNombre(self, nombre):
self.__persona.setNombre(nombre)
def getApellido(self):
return self.__persona.getApellido()
def setApellido(self, apellido):
self.__persona.setApellido(apellido)
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 3,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
}
] | 3
|
currentAccount.py
|
thebigyovadiaz/poo_account_py
|
from typing import Callable
def joinThread(treadId: str) -> None:
"""Ожидает завершения указанного потока.
Параметры
---------
treadId: :class:`str`
id потока
"""
raise NotImplementedError
def killThread(treadId: str) -> None:
"""Заканчивает исполнение указанного потока.
Параметры
---------
treadId: :class:`str`
id потока
"""
raise NotImplementedError
def receiveMessage(wait: bool) -> str:
"""Запрашивает принятое сообщение.
Параметры
---------
wait: :class:`bool`
Если `True`, то ожидает, пока не придет сообщение.
"""
raise NotImplementedError
def sendMessage(treadId: str, message: str) -> None:
"""Посылает сообщение указанному потоку.
Параметры
---------
treadId: :class:`str`
id потока
message: :class:`str`
Сообщение
"""
raise NotImplementedError
def startThread(newThreadId: str, functionName: Callable):
"""Запускает переданную в качестве параметра функцию в отдельном потоке.
Параметры
---------
newTreadId: :class:`str`
id нового потока
functionName: Callable
Функция
"""
|
[
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 3,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
}
] | 3
|
trik/Treading.py
|
m1raynee/trikset.py-typehint
|
class CyclicDependencyError(ValueError):
pass
def topological_sort_as_sets(dependency_graph):
"""
Variation of Kahn's algorithm (1962) that returns sets.
Take a dependency graph as a dictionary of node => dependencies.
Yield sets of items in topological order, where the first set contains
all nodes without dependencies, and each following set contains all
nodes that may depend on the nodes only in the previously yielded sets.
"""
todo = dependency_graph.copy()
while todo:
current = {node for node, deps in todo.items() if not deps}
if not current:
raise CyclicDependencyError('Cyclic dependency in graph: {}'.format(
', '.join(repr(x) for x in todo.items())))
yield current
# remove current from todo's nodes & dependencies
todo = {node: (dependencies - current) for node, dependencies in
todo.items() if node not in current}
def stable_topological_sort(nodes, dependency_graph):
result = []
for layer in topological_sort_as_sets(dependency_graph):
for node in nodes:
if node in layer:
result.append(node)
return result
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 3,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}
] | 3
|
django/utils/topological_sort.py
|
ni-ning/django
|
import sys
import os
import copy
import json
import datetime
opt = dict()
opt['dataset'] = '../data/citeseer'
opt['hidden_dim'] = 16
opt['input_dropout'] = 0.5
opt['dropout'] = 0
opt['optimizer'] = 'adam'
opt['lr'] = 0.01
opt['decay'] = 5e-4
opt['self_link_weight'] = 1.0
opt['pre_epoch'] = 2000
opt['epoch'] = 100
opt['iter'] = 1
opt['use_gold'] = 1
opt['draw'] = 'smp'
opt['tau'] = 0.0
opt['save'] = 'exp_citeseer'
opt['mixup_alpha'] =1.0
opt['partition_num'] = 0
opt['task_ratio'] = 0
### ict hyperparameters ###
opt['ema_decay'] = 0.999
opt['consistency_type'] = "mse"
opt['consistency_rampup_starts'] = 500
opt['consistency_rampup_ends'] = 1000
opt['mixup_consistency'] = 10.0
def generate_command(opt):
cmd = 'python3 train.py'
for opt, val in opt.items():
cmd += ' --' + opt + ' ' + str(val)
return cmd
def run(opt):
opt_ = copy.deepcopy(opt)
os.system(generate_command(opt_))
os.system('rm record.txt')
os.system('echo -n -> record.txt')
os.system('rm record_val.txt')
os.system('echo -n -> record_val.txt')
partition_num_list = [8,9,10,11,12,13,14,15,16]
task_ratio_list = [0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
for p in partition_num_list:
for t in task_ratio_list:
os.system('rm record.txt')
os.system('echo -n -> record.txt')
opt['partition_num'] = p
opt['task_ratio'] = t
for k in range(10):
seed = k + 1
opt['seed'] = seed
run(opt)
os.system('python result_cal.py')
with open('record_val.txt', 'a') as f:
f.write(str(p) + ' ' + str(t) + '\n')
|
[
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
}
] | 3
|
SS-GMNN-GraphMix/GraphMix-par/run_citeseer_ss.py
|
TAMU-VITA/SS-GCNs
|
"""Parse Warren2020 fluxes.
Fluxes from https://zenodo.org/record/3952926 (DOI:10.5281/zenodo.3952926)
See https://arxiv.org/abs/1902.01340 and https://arxiv.org/abs/1912.03328
for description of the models.
"""
import h5py
from sntools.formats import gamma, get_starttime, get_endtime
flux = {}
def parse_input(input, inflv, starttime, endtime):
"""Read simulations data from input file.
Arguments:
input -- prefix of file containing neutrino fluxes
inflv -- neutrino flavor to consider
starttime -- start time set by user via command line option (or None)
endtime -- end time set by user via command line option (or None)
"""
f = h5py.File(input, 'r')
for (t, r) in f['sim_data']['shock_radius']:
if r > 1:
tbounce = t * 1000 # convert to ms
break
starttime = get_starttime(starttime, 1000 * f['sim_data']['shock_radius'][0][0] - tbounce)
endtime = get_endtime(endtime, 1000 * f['sim_data']['shock_radius'][-1][0] - tbounce)
# Save flux data to dictionary to look up in nu_emission() below
global flux
flux = {}
path = {'e': 'nue_data', 'eb': 'nuae_data', 'x': 'nux_data', 'xb': 'nux_data'}[inflv]
for i, (t, lum) in enumerate(f[path]['lum']):
t = 1000 * t - tbounce # convert to time post-bounce in ms
if (t < starttime - 30) or (t > endtime + 30):
# Ignore data outside of the requested time span.
continue
lum *= 1e51 * 624.151 # convert from 10^51 erg/s to MeV/ms
mean_e = f[path]['avg_energy'][i][1]
mean_e_sq = f[path]['rms_energy'][i][1]**2
flux[t] = (mean_e, mean_e_sq, lum)
f.close()
return (starttime, endtime, sorted(flux.keys()))
def prepare_evt_gen(binned_t):
global flux
gamma.flux = flux
gamma.prepare_evt_gen(binned_t)
flux = gamma.flux
def nu_emission(eNu, time):
gamma.flux = flux
return gamma.nu_emission(eNu, time)
|
[
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 3,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
}
] | 3
|
sntools/formats/warren2020.py
|
arfon/sntools
|
# encoding: utf-8
"""
@author: liaoxingyu
@contact: liaoxingyu2@jd.com
"""
import math
import random
class RandomErasing(object):
""" Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
Args:
probability: The probability that the Random Erasing operation will be performed.
sl: Minimum proportion of erased area against input image.
sh: Maximum proportion of erased area against input image.
r1: Minimum aspect ratio of erased area.
mean: Erasing value.
"""
def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=(0.4914, 0.4822, 0.4465)):
self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
if random.uniform(0, 1) >= self.probability:
return img
for attempt in range(100):
area = img.size()[1] * img.size()[2]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.size()[2] and h < img.size()[1]:
x1 = random.randint(0, img.size()[1] - h)
y1 = random.randint(0, img.size()[2] - w)
if img.size()[0] == 3:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]
img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]
else:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
return img
return img
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 3,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
}
] | 3
|
data/transforms/transforms.py
|
nodiz/reid-strong-baseline
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_custom_resource_column_definition import V1beta1CustomResourceColumnDefinition
class TestV1beta1CustomResourceColumnDefinition(unittest.TestCase):
""" V1beta1CustomResourceColumnDefinition unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1CustomResourceColumnDefinition(self):
"""
Test V1beta1CustomResourceColumnDefinition
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1beta1_custom_resource_column_definition.V1beta1CustomResourceColumnDefinition()
pass
if __name__ == '__main__':
unittest.main()
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
kubernetes/test/test_v1beta1_custom_resource_column_definition.py
|
Scalr/kubernetes-client-python
|
def demo():
"""Output:
---------⌝
----------
----?????-
----------
----------
--!!!-----
--!!!-----
----------
----------
⌞---------
"""
n = 10
# Construction is easy:
grid = {}
# Assignment is easy:
grid[(0, 0)] = "⌞"
grid[(n - 1, n - 1)] = "⌝"
# Helper functions that just work on the dictionary:
fill(grid, "!", start=(2, 3), stop=(5, 5))
fill(grid, "?", start=(4, 7), stop=(9, 8))
print(stringify(grid, n))
def fill(grid: dict, value: str, start=(0, 0), stop=(0, 0)):
"""Using product allows for flatter loops."""
from itertools import product
for coord in product(range(start[0], stop[0]), range(start[1], stop[1])):
grid[coord] = value
def stringify(grid: dict, n: int) -> str:
"""Stringify with (0, 0) in the lower-left corner."""
rows = []
for y in reversed(range(n)):
row = []
for x in range(n):
value = grid.get((x, y), "-")
row.append(value)
rows.append(row)
return "\n".join("".join(row) for row in rows)
if __name__ == "__main__":
demo()
|
[
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
}
] | 3
|
examples/grids/python/grid.py
|
ssangervasi/examples
|
#!/usr/bin/env python
import rospy
from nav_msgs.msg import Odometry
from uf_common.msg import PoseTwistStamped
from neural_control.nn_controller import NN_controller
from geometry_msgs.msg import PoseStamped
def odom_callback(odom_msg):
controller.give_new_state(odom_msg.pose.pose, odom_msg.twist.twist, odom_msg.header.stamp.to_sec())
def reference_callback(ref_msg):
pose_ref_pub.publish(PoseStamped(header=ref_msg.header, pose=ref_msg.posetwist.pose))
controller.give_new_reference(ref_msg.posetwist.pose, ref_msg.posetwist.twist)
controller = NN_controller(dof=3, kp=[1000, 1000, 5600], kd=[1200, 1200, 6000],
kv=2, kw=2, N=10, sig='tanh', nn_limit=[10**10]*3,
wrench_topic='/wrench/autonomous', neuralwrench_topic='/adaptation')
rospy.init_node('controller')
rospy.Subscriber('/odom', Odometry, odom_callback)
rospy.Subscriber('/trajectory', PoseTwistStamped, reference_callback)
pose_ref_pub = rospy.Publisher('/pose_ref', PoseStamped, queue_size=100)
rospy.spin()
|
[
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
}
] | 3
|
gnc/navigator_controller/nodes/run_nn_controller.py
|
saltyan007/kill_test
|
from flask import render_template, request
from flask_script import Manager, Server
from app import app
from model import Content, Summary, Article
import app.static.summ as summarizationModel
import os, json, logging
@app.route('/', endpoint='ACCESS')
@app.route('/index.html', endpoint='ACCESSFILE')
def index():
try:
all_pairs = Article.objects.all()
return render_template('index.html', history=all_pairs)
except Exception as e:
logging.error(e)
raise e
@app.route('/run_decode', methods=['POST'])
def run_decode():
logging.debug('decode your input by our pretrained model')
try:
source = request.get_json()['source'] # GET request with String from frontend directly
logging.debug('input: {}'.format(source)) # GET String-type context from the backend
try:
logging.debug('using the pretrained model.')
sentNums, summary = summarizationModel.decode.run_(source)
except Exception as e:
logging.error(e)
else:
logging.debug('The number of sentences is {}'.format(sentNums))
logging.debug('The abstract is that {}'.format(summary))
results = {'sent_no': sentNums, 'final': summary}
try:
article = Content(text=source)
abstract = Summary(text=summary)
pair = Article(article=article.id, abstract=abstract.id)
article.save()
abstract.save()
pair.save()
except Exception as e:
logging.error(e)
return json.dumps(results)
except:
message = {'message' : 'Fail to catch the data from client.'}
return json.dumps(message)
manager = Manager(app)
manager.add_command('runserver', Server(
use_debugger = True,
use_reloader = True,
host = os.getenv('IP', '0.0.0.0'),
port = int(os.getenv('PORT', 5001))
))
if __name__ == "__main__":
manager.run()
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 3,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
}
] | 3
|
WebDemo/flask_app/main.py
|
silenceliang/Cascading-agents-hybridSum
|
import os
from os.path import dirname as _dir
import logging
def get_logger(name):
return logging.getLogger('conftest.%s' % name)
def pytest_sessionstart(session):
BASE_FORMAT = "[%(name)s][%(levelname)-6s] %(message)s"
FILE_FORMAT = "[%(asctime)s]" + BASE_FORMAT
root_logger = logging.getLogger('conftest')
dir_path = os.path.dirname(os.path.realpath(__file__))
top_level = _dir(_dir(dir_path))
log_file = os.path.join(top_level, 'pytest-functional-tests.log')
print(log_file)
root_logger.setLevel(logging.INFO)
# File Logger
fh = logging.FileHandler(log_file)
fh.setLevel(logging.DEBUG)
fh.setFormatter(logging.Formatter(FILE_FORMAT, "%Y-%m-%d %H:%M:%S"))
root_logger.addHandler(fh)
|
[
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 3,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}
] | 3
|
tests/hooks.py
|
j-mechacorta/atoolbox
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.