prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
# -*- coding: utf-8 -*-
impo | rt unittest
class TestExample(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("**************************************** setUpClass ****************************************")
@classmethod
def tearDownClass(cls):
print("************************************** tearDownClass ************************************ | ***")
def setUp(self):
print("****** setUp *******")
def tearDown(self):
print("***** tearDown *****")
def _example(self):
print("This is a test example.")
|
#!/usr/bin/env python3
"""
Check of automatic algorithm rollover scenario.
"""
import collections
import os
import shutil
import datetime
import random
import subprocess
from subprocess import check_call
from dnstest.utils import *
from dnstest.keys import Keymgr
from dnstest.test import Test
from dnstest.module import ModOnlineSign
def pregenerate_key(server, zone, alg):
class a_class_with_name:
def __init__(self, name):
self.name = name
server.gen_key(a_class_with_name("nonexistent.zone."), ksk=True, alg=alg,
addtopolicy="blahblah")
# check zone if keys are present and used for signing
def check_zone(server, zone, dnskeys, dnskey_rrsigs, cdnskeys, soa_rrsigs, msg):
qdnskeys = server.dig("example.com", "DNSKEY", bufsize=4096)
found_dnskeys = qdnskeys.count("DNSKEY")
qdnskeyrrsig = server.dig("example.com", "DNSKEY", dnssec=True, bufsize=4096)
found_rrsigs = qdnskeyrrsig.count("RRSIG")
qcdnskey = server.dig("example.com", "CDNSKEY", bufsize=4096)
found_cdnskeys = qcdnskey.count("CDNSKEY")
qsoa = server.dig("example.com", "SOA", dnssec=True, bufsize=4096)
found_soa_rrsigs = qsoa.count("RRSIG")
check_log("DNSKEYs: %d (expected %d)" % (found_dnskeys, dnskeys));
check_log("RRSIGs: %d (expected %d)" % (found_soa_rrsigs, soa_rrsigs));
check_log("DNSKEY-RRSIGs: %d (expected %d)" % (found_rrsigs, dnskey_rrsigs));
check_log("CDNSKEYs: %d (expected %d)" % (found_cdnskeys, cdnskeys));
if found_dnskeys != dnskeys:
set_err("BAD DNSKEY COUNT: " + msg)
detail_log("!DNSKEYs not published and activated as expected: " + msg)
if found_soa_rrsigs != soa_rrsigs:
set_err("BAD RRSIG COUNT: " + msg)
detail_log("!RRSIGs not published and activated as expected: " + msg)
if found_rrsigs != dnskey_rrsigs:
set_err("BAD DNSKEY RRSIG COUNT: " + msg)
detail_log("!RRSIGs not published and activated as expected: " + msg)
if found_cdnskeys != cdnskeys:
set_err("BAD CDNSKEY COUNT: " + msg)
detail_log("!CDNSKEYs not published and activated as expected: " + msg)
detail_log(SEP)
def wait_for_rrsig_count(t, server, rrtype, rrsig_count, timeout):
rtime = 0
while True:
qdnskeyrrsig = server.dig("example.com", rrtype, dnssec=True, bufsize=4096)
found_rrsigs = qdnskeyrrsig.count("RRSIG")
if found_rrsigs == rrsig_count:
break
rtime = rtime + 1
t.sleep(1)
if rtime > timeout:
break
def wait_for_dnskey_count(t, server, dnskey_count, timeout):
rtime = 0
while True:
qdnskeyrrsig = server.dig("example.com", "DNSKEY", dnssec=True, bufsize=4096)
found_dnskeys = qdnskeyrrsig.count("DNSKEY")
if found_dnskeys == dnskey_count:
break
rtime = rtime + 1
t.sleep(1)
if rtime > timeout:
break
def wait_for_cds_change(t, server, timeout):
rtime = 0
CDS1 = str(server.dig(ZONE, "CDS").resp.answer[0].to_rdataset())
while True:
CDS2 = str(server.dig(ZONE, "CDS").resp.answer[0].to_rdataset())
if CDS1 != CDS2:
break
rtime = rtime + 1
t.sleep(1)
if rtime > timeout:
break
def watch_alg_rollover(t, server, zone, before_keys, after_keys, desc, set_alg, key_len, submission_cb):
check_zone(server, zone, before_keys | , 1, 1, 1, desc + ": initial keys")
z = server.zones[zone[0].name];
z.get_module("onlinesign").algorithm = set_alg
z.get_module("onlinesign").key_size = key_len
server.gen_confile()
server.reload()
wait_for_rrsig_count(t, server, "SOA", 2, 20)
check_zone(server, zone, before_keys, 1, 1, 2, desc + ": pre active")
wait_for_dnskey_count(t, server, before_keys + after_keys, 20)
check_zone(server, zon | e, before_keys + after_keys, 2, 1, 2, desc + ": both algorithms active")
# wait for any change in CDS records
CDS1 = str(server.dig(ZONE, "CDS").resp.answer[0].to_rdataset())
t.sleep(3)
while CDS1 == str(server.dig(ZONE, "CDS").resp.answer[0].to_rdataset()):
t.sleep(1)
check_zone(server, zone, before_keys + after_keys, 2, 1, 2, desc + ": new KSK ready")
submission_cb()
t.sleep(4)
check_zone(server, zone, before_keys + after_keys, 2, 1, 2, desc + ": both still active")
wait_for_dnskey_count(t, server, after_keys, 20)
check_zone(server, zone, after_keys, 1, 1, 2, desc + ": post active")
wait_for_rrsig_count(t, server, "SOA", 1, 20)
check_zone(server, zone, after_keys, 1, 1, 1, desc + ": old alg removed")
def watch_ksk_rollover(t, server, zone, before_keys, after_keys, total_keys, desc, set_ksk_lifetime, submission_cb):
check_zone(server, zone, before_keys, 1, 1, 1, desc + ": initial keys")
z = server.zones[zone[0].name];
orig_ksk_lifetime = z.get_module("onlinesign").ksk_life
z.get_module("onlinesign").ksk_life = set_ksk_lifetime if set_ksk_lifetime > 0 else orig_ksk_lifetime
server.gen_confile()
server.reload()
wait_for_dnskey_count(t, server, total_keys, 20)
t.sleep(3)
check_zone(server, zone, total_keys, 2, 1, 1, desc + ": published new")
z.get_module("onlinesign").ksk_life = orig_ksk_lifetime
server.gen_confile()
server.reload()
wait_for_cds_change(t, server, 20)
expect_zone_rrsigs = (2 if before_keys == 1 and after_keys > 1 else 1) # there is an exception for CSK->KZSK rollover that we have double signatures for the zone. Sorry, we don't care...
check_zone(server, zone, total_keys, 2, 1, expect_zone_rrsigs, desc + ": new KSK ready")
submission_cb()
t.sleep(4)
if before_keys < 2 or after_keys > 1:
check_zone(server, zone, total_keys, 2, 1, 1, desc + ": both still active")
# else skip the test as we have no control on KSK and ZSK retiring asynchronously
wait_for_dnskey_count(t, server, after_keys, 28)
check_zone(server, zone, after_keys, 1, 1, 1, desc + ": old key removed")
t = Test(stress=False)
ModOnlineSign.check()
parent = t.server("knot")
parent_zone = t.zone("com.", storage=".")
t.link(parent_zone, parent)
parent.dnssec(parent_zone).enable = True
child = t.server("knot")
child_zone = t.zone("example.com.", storage=".")
t.link(child_zone, child)
def cds_submission():
cds = child.dig(ZONE, "CDS")
cds_rdata = cds.resp.answer[0].to_rdataset()[0].to_text()
up = parent.update(parent_zone)
up.delete(ZONE, "DS")
up.add(ZONE, 7, "DS", cds_rdata)
up.send("NOERROR")
child.zonefile_sync = 24 * 60 * 60
child.dnssec(child_zone).ksk_sbm_check = [ parent ]
child.add_module(child_zone, ModOnlineSign("ECDSAP384SHA384", key_size="384", prop_delay=11, ksc = [ parent ],
ksci = 2, ksk_shared=True, cds_publish="always",
cds_digesttype=random.choice(["sha256", "sha384"])))
# parameters
ZONE = "example.com."
t.start()
child.zone_wait(child_zone)
cds_submission() # pass initially generated key to active state
t.sleep(4) # let the server accept the submission before forced reload
pregenerate_key(child, child_zone, "ECDSAP384SHA384")
watch_ksk_rollover(t, child, child_zone, 1, 1, 2, "CSK rollover", 22, cds_submission)
pregenerate_key(child, child_zone, "ECDSAP256SHA256")
watch_alg_rollover(t, child, child_zone, 1, 1, "CSK to CSK alg", "ECDSAP256SHA256", 256, cds_submission)
t.end()
|
import numpy as np
import pytest
from numpy.testing import assert_allclose
try:
import scipy
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
import astropy.units as u
from astropy.timeseries.periodograms.lombscargle import LombScargle
from astropy.timeseries.periodograms.lombscargle._statistics import (fap_single, inv_fap_single,
METHODS)
from astropy.timeseries.periodograms.lombscargle.utils import convert_normalization, compute_chi2_ref
METHOD_KWDS = dict(bootstrap={'n_bootstraps': 20, 'random_seed': 42})
NORMALIZATIONS = ['standard', 'psd', 'log', 'model']
def make_data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0, units=False):
"""Generate some data for testing"""
rng = np.random.RandomState(rseed)
t = 5 * period * rng.rand(N)
omega = 2 * np.pi / period
y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t)
dy = dy * (0.5 + rng.rand(N))
y += dy * rng.randn(N)
fmax = 5
if units:
return t * u.day, y * u.mag, dy * u.mag, fmax / u.day
else:
return t, y, dy, fmax
def null_data(N=1000, dy=1, rseed=0, units=False):
"""Generate null hypothesis data"""
rng = np.random.RandomState(rseed)
t = 100 * rng.rand(N)
dy = 0.5 * dy * (1 + rng.rand(N))
y = dy * rng.randn(N)
fmax = 40
if units:
return t * u.day, y * u.mag, dy * u.mag, fmax / u.day
else:
return t, y, dy, fmax
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
@pytest.mark.parametrize('with_errors', [True, False])
@pytest.mark.parametrize('units', [False, True])
def test_distribution(normalization, with_errors, units):
t, y, dy, fmax = null_data(units=units)
if not with_errors:
dy = None
ls = LombScargle(t, y, dy, normalization=normalization)
freq, power = ls.autopower(maximum_frequency=fmax)
z = np.linspace(0, power.max(), 1000)
# Test that pdf and cdf are consistent
dz = z[1] - z[0]
z_mid = z[:-1] + 0.5 * dz
pdf = ls.distribution(z_mid)
cdf = ls.distribution(z, cumulative=True)
if isinstance(dz, u.Quantity):
dz = dz.value
assert_allclose(pdf, np.diff(cdf) / dz, rtol=1E-5, atol=1E-8)
# psd normalization without specified errors produces bad results
if not (normalization == 'psd' and not with_errors):
# Test that observed power is distributed according to the theoretical pdf
hist, bins = np.histogram(power, 30, density=True)
midpoints = 0.5 * (bins[1:] + bins[:-1])
pdf = ls.distribution(midpoints)
assert_allclose(hist, pdf, rtol=0.05, atol=0.05 * pdf[0])
@pytest.mark.parametrize('N', [10, 100, 1000])
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
def test_inverse_single(N, normalization):
fap = np.linspace(0, 1, 11)
z = inv_fap_single(fap, N, normalization)
fap_out = fap_single(z, N, normalization)
assert_allclose(fap, fap_out)
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
@pytest.mark.parametrize('use_errs', [True, False])
@pytest.mark.parametrize('units', [False, True])
def test_inverse_bootstrap(normalization, use_errs, units):
t, y, dy, fmax = null_data(units=units)
if not use_errs:
dy = None
fap = np.linspace(0, 1, 11)
method = 'bootstrap'
method_kwds = METHOD_KWDS['bootstrap']
ls = LombScargle(t, y, dy, normalization=normalization)
z = ls.false_alarm_level(fap, maximum_frequency=fmax,
method=method, method_kwds=method_kwds)
fap_out = ls.false_alarm_probability(z, maximum_frequency=fmax,
method=method,
method_kwds=method_kwds)
# atol = 1 / n_bootstraps
assert_allclose(fap, fap_out, atol=0.05)
@pytest.mark.parametrize('method', sorted(set(METHODS) - {'bootstrap'}))
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
@pytest.mark.parametrize('use_errs', [True, False])
@pytest.mark.parametrize('N', [10, 100, 1000])
@pytest.mark.parametrize('units', [False, True])
def test_inverses(method, normalization, use_errs, N, units, T=5):
if not HAS_SCIPY and method in ['baluev', 'davies']:
pytest.skip("SciPy required")
t, y, dy, fmax = make_data(N, rseed=543, units=units)
if not use_errs:
dy = None
method_kwds = METHOD_KWDS.get(method, None)
fap = np.logspace(-10, 0, 11)
ls = LombScargle(t, y, dy, normalization=normalization)
z = ls.false_alarm_level(fap, maximum_frequency=fmax,
method=method,
method_kwds=method_kwds)
fap_out = ls.false_alarm_probability(z, maximum_frequency=fmax,
method=method,
method_kwds=method_kwds)
assert_allclose(fap, fap_out)
@pytest.mark.parametrize('method', sorted(METHODS))
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
@pytest.mark.parametrize('units', [False, True])
def test_false_alarm_smoketest(method, normalization, units):
if not HAS_SCIPY and method in ['baluev', 'davies']:
pytest.skip("SciPy required")
kwds = METHOD_KWDS.get(method, None)
t, y, dy, fmax = make_data(units=units)
ls = LombScargle(t, y, dy, normalization=normalization)
freq, power = ls.autopower(maximum_frequency=fmax)
Z = np.linspace(power.min(), power.max(), 30)
fap = ls.false_alarm_probability(Z, maximum_frequency=fmax,
method=method, method_kwds=kwds)
assert len(fap) == len(Z)
if method != 'davies':
assert np.all(fap <= 1)
assert np.all(fap[:-1] >= fap[1:]) # monotonically decreasing
@pytest.mark.parametrize('method', sorted(METHODS))
@pytest.mark.parametrize('use_errs', [True, False])
@pytest.mark.parametrize('normalization', sorted(set(NORMALIZATIONS) - {'psd'}))
@pytest.mark.parametrize('units', [False, True])
def test_false_alarm_equivalence(method, normalization, use_errs, units):
# Note: the PSD normalization is not equivalent to the others, in that it
# depends on the absolute errors rather than relative errors. Because the
# scaling contributes to the distribution, it cannot be converted directly
# from any of the three normalized versions.
if not HAS_SCIPY and method in ['baluev', 'davies']:
pytest.skip("SciPy required")
kwds = METHOD_KWDS.get(method, None)
t, y, dy, fmax = make_data(units=units)
if not use_errs:
dy = None
ls = LombScargle(t, y, dy, normalization=normalization)
freq, power = ls.autopower(maximum_frequency=fmax)
Z = np.linspace(power.min(), power.max(), 30)
fap = ls.false_alarm_probability(Z, maximum_frequency=fmax,
method=method, method_kwds=kwds)
# Compute the equivalent Z values in the standard normalization
# and check that the FAP is consistent
Z_std = convert_normalization( | Z, len(t),
from_normalization=normalization,
to_normalization='standard',
| chi2_ref=compute_chi2_ref(y, dy))
ls = LombScargle(t, y, dy, normalization='standard')
fap_std = ls.false_alarm_probability(Z_std, maximum_frequency=fmax,
method=method, method_kwds=kwds)
assert_allclose(fap, fap_std, rtol=0.1)
|
# -*- coding: utf-8 -*-
# Generated by | Django 1.11.4 on 2017-08-21 19:00
from __future__ import unicode_literals
from django.db import migrations, m | odels
class Migration(migrations.Migration):
dependencies = [
('books', '0007_auto_20170821_2052'),
]
operations = [
migrations.AlterField(
model_name='book',
name='slug',
field=models.SlugField(help_text='wykorzystywane w adresie strony', max_length=100, unique=True, verbose_name='identyfikator'),
),
]
|
#!/usr/bin/env python
"""
.. py:currentmodule:: FileFormat.Results.Phirhoz
.. moduleauthor:: Hendrix Demers <hendrix.demers@mail.mcgill.ca>
MCXRay phirhoz result file.
"""
# Script information for the file.
__author__ = "Hendrix Demers (hendrix.demers@mail.mcgill.ca)"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2012 Hendrix Demers"
__license__ = ""
# Subversion informations for the file.
__svnRevision__ = "$Revision$"
__svnDate__ = "$Date$"
__svnId__ = "$Id$"
# Standard library modules.
# Third party modules.
# Local modules.
# Project modules
# Globals and constants variables.
KEY_SYMBOL = "symbol"
KEY_SHELL = "shell"
KEY_INTENSITY = "Intensity"
KEY_DEPTHS_A = "depths_A"
KEY_VALUES = "values"
class Phirhoz(object):
def __init__(self, symbol, shell):
self._parameters = {}
self.symbol = symbol
self.shell = shell
self._label = "%s [Shell %s]," % (symbol, shell)
def readFromLines(self, lines):
indexLine = 0
for line in | lines[indexLine:]:
indexLine += 1
if line.startswith(self._label):
items = line.split('=')
self.intensity = float(items[-1])
break
|
self.depths_A = []
self.values = []
for _index in range(len(lines[indexLine:])):
line = lines[indexLine]
indexLine += 1
try:
items = line.split()
depth_A = float(items[0])
value = float(items[1])
self.depths_A.append(depth_A)
self.values.append(value)
except:
break
return indexLine
@property
def symbol(self):
return self._parameters[KEY_SYMBOL]
@symbol.setter
def symbol(self, symbol):
self._parameters[KEY_SYMBOL] = symbol
@property
def shell(self):
return self._parameters[KEY_SHELL]
@shell.setter
def shell(self, shell):
self._parameters[KEY_SHELL] = shell
@property
def intensity(self):
return self._parameters[KEY_INTENSITY]
@intensity.setter
def intensity(self, intensity):
self._parameters[KEY_INTENSITY] = intensity
@property
def depths_A(self):
return self._parameters[KEY_DEPTHS_A]
@depths_A.setter
def depths_A(self, depths_A):
self._parameters[KEY_DEPTHS_A] = depths_A
@property
def values(self):
return self._parameters[KEY_VALUES]
@values.setter
def values(self, values):
self._parameters[KEY_VALUES] = values
|
ass Telefone(AcoesTelefone):
def _get_telefone1(self):
return self._telefone1
def _set_telefone1(self,telefone):
self._telefone1 = telefone1
def _get_telefone2(self):
return self._telefone2
def _set_telefone2(self,telefone):
self._telefone2 = telefone2
telefone1 = property(_get_telefone1,_set_telefone1)
telefone2 = property(_get_telefone2,_set_telefone2)
class Meta:
abstract = True
class PessoaAbs(models.Model):
_nome = models.CharField(verbose_name='Nome', max_length=50)
_email = models.EmailField(verbose_name='E-Mail')
_cpf = models.CharField(verbose_name='CPF', max_length=11)
def __unicode__(self):
return u'%s' % (self.nome)
def __str__(self):
return u'%s' % (self.nome)
def _get_nome(self):
return self._nome
def _get_email(self):
return self._email
def _get_cpf(self):
return self._cpf
def _set_nome(self,nome):
self._nome = nome
def _set_email(self,email):
self._email = email
def _set_cpf(self,cpf):
self._cpf = cpf
nome = property(_get_nome,_set_nome)
email = property(_get_email,_set_email)
cpf = property(_get_cpf,_set_cpf)
class Meta:
abstract = True
class AcoesTutor(PessoaAbs):
def __unicode__(self):
return u'%s' % (self.nome)
class Meta:
verbose_name_plural = "Tutores"
abstract = True
class Tutor(AcoesTutor):
class Meta:
abstract = True
#mudar o nome para tutor_detalhe ou tutordetalhe ou tutordetalhes
class TutorEndTel(Tutor, Endereco, Telefone):
def get_absolute_url(self):
return reverse('tutorendtel_detail', kwargs={'pk': self.pk})
class AnimalAbs(models.Model):
_nome = models.CharField(verbose_name='Nome', max_length=50)
_rg = models.PositiveSmallIntegerField(verbose_name='RG', unique=True, blank = True)
_especie = models.CharField(verbose_name='Espécie', max_length=50)
_raca = models.CharField(verbose_name='Raça', max_length=50)
sexo = models.CharField(verbose_name='Sexo', max_length=15, choices=GENERO_CHOICES)
_nascimento = models.DateField(verbose_name='Data de Nascimento')
_obito = models.DateField(verbose_name='Data d | e Óbito', null = True ,blank = True)
_idade = models.PositiveSmallIntegerField(verbose_name='Idade')
tutor = models.ForeignKey(TutorEndTel, on_delete = models.CASCADE, related_name='animais')
class Meta:
verbose_name_plural = "Animais"
abstract = True
def get_absolute_url(self):
return reverse('animal_detalhes', kw | args={'pk': self.pk})
class AcoesAnimal(AnimalAbs):
def __unicode__(self):
return u'%s' % (self.nome)
def __str__(self):
return u'%s' % (self.nome)
class Meta:
abstract = True
class Animal(AcoesAnimal):
def get_absolute_url(self):
return reverse('animal_detail', kwargs={'pk': self.pk})
def _get_nome(self):
return self._nome
def _get_rg(self):
return self._rg
def _get_especie(self):
return self._especie
def _get_raca(self):
return self._raca
def _get_nascimento(self):
return self._nascimento
def _get_obito(self):
return self._obito
def _get_idade(self):
return self._idade
def _set_nome(self,nome):
self._nome = nome
def _set_rg(self,rg):
self._rg = rg
def _set_especie(self,especie):
self._especie = especie
def _set_raca(self,raca):
self._raca = raca
def _set_nascimento(self,nascimento):
self._nascimento = nascimento
def _set_obito(self,obito):
self._obito = obito
def _set_idade(self,idade):
self._idade = idade
nome = property(_get_nome,_set_nome)
rg = property(_get_rg,_set_rg)
especie = property(_get_especie,_set_especie)
raca = property(_get_raca,_set_raca)
nascimento = property(_get_nascimento,_set_nascimento)
idade = property(_get_idade,_set_idade)
obito = property(_get_obito,_set_obito)
# referente a veterinario
class AcoesVeterinario(PessoaAbs):
class Meta:
verbose_name_plural = "Veterinarios"
abstract = True
class Veterinario(AcoesVeterinario):
_crmv = models.CharField(verbose_name='CRMV', max_length=10)
def __unicode__(self):
return u'%s' % (self.nome)
def __str__(self):
return u'%s' % (self.nome)
def _get_crmv(self):
return self._crmv
def _set_crmv(self,crmv):
self._crmv = crmv
crmv = property(_get_crmv,_set_crmv)
# referente a tecnico
class AcoesTecnico(PessoaAbs):
class Meta:
verbose_name_plural = "Tecnicos"
abstract = True
class Tecnico(AcoesTecnico):
_crf = models.CharField(verbose_name='CRF', max_length=10)
def __unicode__(self):
return u'%s' % (self.nome)
def __str__(self):
return u'%s' % (self.nome)
def _get_crf(self):
return self._crf
def _set_crf(self,crf):
self._crf = crf
crf = property(_get_crf,_set_crf)
# classes para servico,consulta e exame
class AtendimentoAbs(models.Model):
_data = models.DateField(auto_now_add=True)
_diagnostico = models.TextField(default = 'Pendente', blank = True, verbose_name='Diagnóstico', max_length=200)
cliente = models.ForeignKey(TutorEndTel,on_delete=models.CASCADE, related_name='cliente_a_ser_atendido', null = True ,blank = True)
def _get_data(self):
return self._data
def _get_diagnostico(self):
return self._diagnostico
def _get_cliente(self):
return self.cliente
def _set_diagnostico(self,diagnostico):
self._diagnostico = diagnostico
def _set_data(self,data):
self._data = data
diagnostico = property(_get_diagnostico,_set_diagnostico)
data = property(_get_data,_set_data)
class ConsultaAbs (AtendimentoAbs):
_retorno = models.BooleanField()
animal = models.ForeignKey(Animal, on_delete=models.CASCADE, related_name='a_ser_consultado')
veterinario = models.ForeignKey(Veterinario, on_delete=models.CASCADE, related_name='realiza_consulta')
_data_realizacao = models.DateField(verbose_name='Data Agendada')
class Meta:
abstract = True
verbose_name_plural = "Consultas"
class AcoesConsulta(ConsultaAbs):
class Meta:
abstract = True
class Consulta (AcoesConsulta):
def _get_retorno(self):
return self._retorno
def _set_retorno(self,retorno):
self._retorno = retorno
def _get_data_realizacao(self):
return self._data_realizacao
def _set_data_realizacao(self,data_realizacao):
self._data_realizacao = data_realizacao
retorno = property(_get_retorno,_set_retorno)
data_realizacao = property(_get_data_realizacao,_set_data_realizacao)
#classes referentes a laboratório
class Laboratorio (models.Model):
_nome = models.CharField(verbose_name='Nome', max_length=50)
_local = models.CharField(verbose_name='local', max_length=50)
def get_absolute_url(self):
return reverse('laboratorio_detail', kwargs={'pk': self.pk})
def _get_nome(self):
return self._nome
def _get_local(self):
return self._local
def _set_nome(self,nome):
self._nome = nome
def _set_local(self,local):
self.local = local
nome = property(_get_nome,_set_nome)
local = property(_get_local,_set_local)
def __unicode__(self):
return u'%s' % (self.nome)
def __str__(self):
return u'%s' % (self.nome)
class ExameAbs (AtendimentoAbs):
animal = models.ForeignKey(Animal,null = True, blank = True,on_delete=models.CASCADE, related_name='mostrado_para_exame')
veterinario = models.ForeignKey(Veterinario, on_delete=models.CASCADE, related_name='realiza_diagnostico')
tecnico = models.ForeignKey(Tecnico, on_delete=models.CASCADE, related_name='realiza_exame', blank = True, null = True)
_resultado = models.TextField(default = 'Pendente', blank = True, verbose_name='Resultado', max_length=200)
observacoes = models.CharField(blank=True, null=True, verbose_name='Observações', max_length=200)
numero_amostra = models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='Número de amostra')
estadoexame = models.NullBooleanField(null = True, blank = True, verbose_name='Estado do Exame')
laboratorio = models.ForeignKey(Laboratorio, on_delete=models.CASCADE, related_name='exames', blank=True, null=True)
class Meta:
abstract = True
verbose_name_plural = "Exames"
class A |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, mo | dels
class Migration(migrations.Migration):
dependencies = [
('layers', '24_to_26'),
]
operations = [
migrations.CreateModel(
name='QGISServerLayer',
fields=[
('layer', models.OneToOneField(primary_key=True, serialize=False, to='layers.Layer')),
('base_layer_path', models.CharField(help_text=b'Location of the base layer.', max_length=100, ver | bose_name=b'Base Layer Path')),
],
),
]
|
"""
A set of built-in default checks for the platform heartbeat endpoint
Other checks should be included in their respective modules/djangoapps
"""
from datetime import datetime, timedelta
from time import sleep, time
from django.conf import settings
from django.core.cache import cache
from django.db import connection
from django.db.utils import DatabaseError
from xmodule.modulestore.django import modulestore
from xmodule.exceptions import HeartbeatFailure
from .defaults import HEARTBEAT_CELERY_TIMEOUT
from .tasks import sample_task
# DEFAULT SYSTEM CHECKS
# Modulestore
def check_modulestore():
""" Check the modulestore connection
Returns:
(string, Boolean, unicode): A tuple containing the name of the check, whether it succeeded, and a unicode
string of either "OK" or the failure message
"""
# This refactoring merely delegates to the default modulestore (which if it's mixed modulestore will
# delegate to all configured modulestores) and a quick test of sql. A later refactoring may allow
# any service to register itself as participating in the heartbeat. It's important that all implementation
# do as little as possible but give a sound determination that they are ready.
try:
#@TODO Do we want to parse the output for split and mongo detail and return it?
modulestore().heartbeat()
return 'modulestore', True, u'OK'
except HeartbeatFailure as fail:
return 'modulestore', False, unicode(fail)
def check_database():
""" Check the database connection by attempting a no-op query
Returns:
(string, Boolean, unicode): A tuple containing the name of the check, whether it succeeded, and a unicode
string of either "OK" or the failure message
"""
cursor = connection.cursor()
try:
cursor.execute("SELECT 1")
cursor.fetchone()
return 'sql', True, u'OK'
except DatabaseError as fail:
return 'sql', False, unicode(fail)
# Caching
CACHE_KEY = 'heartbeat-test'
CACHE_VALUE = 'abc123'
def check_cache_set():
""" Check setting a cache value
Returns:
(string, Boolean, unicode): A tuple containing the name of the check, whether it succeeded, and a unicode
string of either "OK" or the failure message
"""
| try:
cache.set(CACHE_KEY, CACHE_VALUE, 30)
return 'cache_set', True, u'OK'
except Exception as fail:
return 'cache_set', False, unicode(fail)
def check_cache_get():
""" Check getting a cache value
Returns:
(string, Boolean, unicode): A tuple containing the name of the check, whether it succeeded, and a unic | ode
string of either "OK" or the failure message
"""
try:
data = cache.get(CACHE_KEY)
if data == CACHE_VALUE:
return 'cache_get', True, u'OK'
else:
return 'cache_get', False, u'value check failed'
except Exception as fail:
return 'cache_get', False, unicode(fail)
# Celery
def check_celery():
""" Check running a simple asynchronous celery task
Returns:
(string, Boolean, unicode): A tuple containing the name of the check, whether it succeeded, and a unicode
string of either "OK" or the failure message
"""
now = time()
datetimenow = datetime.now()
expires = datetimenow + timedelta(seconds=getattr(settings, 'HEARTBEAT_CELERY_TIMEOUT', HEARTBEAT_CELERY_TIMEOUT))
try:
task = sample_task.apply_async(expires=expires)
while expires > datetime.now():
if task.ready() and task.result:
finished = str(time() - now)
return 'celery', True, unicode({'time': finished})
sleep(0.25)
return 'celery', False, "expired"
except Exception as fail:
return 'celery', False, unicode(fail)
|
import argpar | se
import requests
import logging
import pip._internal
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Get the nth version of a given package')
parser.add_argument('--package', type=str, required=True, help='The | PyPI you want to inspect')
parser.add_argument('--nth_last_version', type=int, default=1, help='The nth last package will be retrieved')
parser.add_argument('--prerelease', help='Get PreRelease Package Version', action='store_true')
parser.add_argument('--debug', help='Print debug information', action='store_true')
args = parser.parse_args()
logger = logging.getLogger("PyPI_CLI")
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
if args.debug:
logger.setLevel(logging.DEBUG)
logger.debug("Package: %s" % args.package)
logger.debug("nth_last_version: %s" % args.nth_last_version)
logger.debug("prerelease: %s" % args.prerelease)
logger.debug("debug: %s" % args.debug)
finder = pip._internal.index.PackageFinder([], ['https://pypi.python.org/simple'], session=requests.Session())
results = finder.find_all_candidates(args.package)
tmp_versions = [str(p.version) for p in results]
logger.debug("%s" % tmp_versions)
versions = list()
for el in tmp_versions:
if el not in versions:
versions.append(el)
pos = -1
nth_version = 1
while True:
fetched_version = versions[pos]
logger.debug("Version: %s" % fetched_version)
if nth_version == args.nth_last_version:
if args.prerelease or not ("rc" in fetched_version or "a" in fetched_version or "b" in fetched_version):
break
else:
pos -= 1
continue
pos -= 1
nth_version += 1
print(fetched_version)
|
# pylint: disable=C0103,R0801
import sqlalchemy
import migrate
meta = sqlalchemy.MetaData()
# define the previous state of tenants
tenant = {}
tenant['id'] = sqlalchemy.Column('id', sqlalchemy.Integ | er, primary_key=True,
autoincrement=True)
tenant['name'] = sqlalchemy.Column('name', sqlalchemy.String(255), unique=True)
tenant['desc'] = sqlalchemy.Column('desc', sqlalchemy.String(255))
tenant['enabled'] = sqlalchemy | .Column('enabled', sqlalchemy.Integer)
tenants = sqlalchemy.Table('tenants', meta, *tenant.values())
# this column will become unique/non-nullable after populating it
tenant_uid = sqlalchemy.Column('uid', sqlalchemy.String(255),
unique=False, nullable=True)
def upgrade(migrate_engine):
meta.bind = migrate_engine
migrate.create_column(tenant_uid, tenants)
assert tenants.c.uid is tenant_uid
def downgrade(migrate_engine):
meta.bind = migrate_engine
migrate.drop_column(tenant_uid, tenants)
assert not hasattr(tenants.c, 'uid')
|
from pygame import *
from key_dict import *
''' The pla | yer class '''
class Cursor:
def __init__(self, x, y, size):
self.x = int(x)
self.y = int(y)
self.size = size
self.speed = 1
self.cooldown = 0
self.block = 0
self.menu_switch = {'Build | ' : True}
self.menu_block = {
0 : 'Wall',
1 : 'Heavy tower',
2 : 'Light tower',
3 : 'Torch',
4 : 'Farm'}
def check_border(self, level, location):
if location < 0 or location >= level.map_size:
return False
return True
def update(self, keys, level, dt):
self.cooldown -= 1 * dt
if self.cooldown < 0:
self.cooldown = 0
tile = level.terrain_map[self.x + self.y * level.map_size]
for key in KEY_DICT:
if keys[key] and self.cooldown == 0:
if KEY_DICT[key] == 'left' and self.check_border(level, self.x - self.speed):
self.x -= self.speed
if KEY_DICT[key] == 'right' and self.check_border(level, self.x + self.speed):
self.x += self.speed
if KEY_DICT[key] == 'up' and self.check_border(level, self.y - self.speed):
self.y -= self.speed
if KEY_DICT[key] == 'down' and self.check_border(level, self.y + self.speed):
self.y += self.speed
# Toggles between building / building removal
#if KEY_DICT[key] == 'switch':
# self.menu_switch['Build'] = not self.menu_switch['Build']
if KEY_DICT[key] == 'block':
self.block += 1
if self.block >= len(self.menu_block):
self.block = 0
if KEY_DICT[key] == 'action':
if self.menu_switch['Build'] and level.gold > 0:
if tile.passable:
level.create_tile(self.x, self.y, self.menu_block[self.block])
elif not self.menu_switch['Build']:
if not tile.passable:
level.break_tile(self.x, self.y)
level.gold += tile.tile_price
self.cooldown = 0.2
def draw(self, screen, xoff, yoff):
draw.rect(screen, (255, 255, 255), ((self.x + xoff) * self.size, (self.y + yoff) * self.size, self.size, self.size), int(self.size/(self.size/3)))
|
# ==============================================================================
# Copyright 2019 - Philip Paquette
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Runs tests for the current model and adapter """
from diplomacy_research.models.policy.tests.policy_adapter_test_setup import PolicyAdapterTestSetup
from diplomacy_research.models.policy.token_based import PolicyAdapter, BaseDatasetBuilder
from diplomacy_research.models.policy.token_based.v004_language_model import PolicyModel, load_args
from diplomacy_research.models.value.v001_val_relu_7 import ValueModel, load_args as load_value_args
from diplomacy_research.models.self_play.algorithms.a2c import Algorithm as A2CAlgo, load_args as a2c_args
from diplomacy_research.models.self_play.algorithms.ppo import Algorithm as PPOAlgo, load_args as ppo_args
from diplomacy_research.models.self_play.algorithms.reinforce import Algorithm as ReinforceAlgo,\
load_args as reinforce_args
from diplomacy_research.models.self_play.algorithms.tests.algorithm_test_setup import AlgorithmSetup
from diplomacy_research.utils.process import run_in_separate_process
# ----------- Testable Class --------------
class BaseTestClass(AlgorithmSetup):
""" Tests the algorithm """
def __init__(self, algorithm_ctor, algo_load_args):
""" Constructor """
AlgorithmSetup.__init__(self, algorithm_ctor, algo_load_args, 'token_based')
def get_policy_model(self):
""" Returns the PolicyModel """
return PolicyModel
def get_policy_builder(self):
""" Returns the Policy's BaseDatasetBuilder """
return BaseDatasetBuilder
def get_policy_adapter(self):
""" Returns the PolicyAdapter """
return PolicyAdapter
def get_policy_load_args(self):
""" Returns the policy args """
return load_args()
# ----------- Launch Scripts --------------
def launch_a2c():
""" Launches tests for a2c """
test_object = BaseTestClass(A2CAlgo, a2c_args)
test_object.run_tests()
def launch_ppo():
""" Launches tests for ppo """
test_object = BaseTestClass(PPOAlgo, ppo_args)
test_object.run_tests()
def launch_reinforce():
""" Launches tests for reinforce """
test_object = BaseTestClass(ReinforceAlgo, reinforce_args)
test_object.run_tests()
def launch_adapter():
""" Launches the tests """
testable_class = PolicyAdapterTestSetup(policy_model_ctor=PolicyModel,
value_model_ctor=ValueModel,
draw_model_ctor=None,
dataset_builder=BaseDatasetBuilder(),
policy_adapter_ctor=PolicyAdapter,
load_policy_args=load_args,
load_value_args=load_value_args,
load_draw_args=None,
strict=False)
testable_class.run_tests()
# ----------- Tests --------------
def test_run_a2c():
""" Runs the a2c test """
run_in_separate_process(target=launch_a2c, timeout=240)
def test_run_ppo():
""" Runs the ppo test """
run_in_separate_process(target=launch_ppo, timeout=240)
def test_run_reinforce():
""" Runs the reinforce test """
run_in_separate_process(target=launch_reinforce, timeout=240)
def test_run_adapter():
""" Runs the adapter test " | ""
run_in_separate_process(target=launch_ad | apter, timeout=240)
|
ment for Certbot.
When this module is run as a script, it takes the arguments that should
be passed to pip to install the Certbot packages as command line
arguments. If no arguments are provided, all Certbot packages and their
development dependencies are installed. The virtual environment will be
created with the name "venv" in the current working directory. You can
change the name of the virtual environment by setting the environment
variable VENV_NAME.
"""
from __future__ import print_function
import glob
import os
import re
import shutil
import subprocess
import sys
import time
REQUIREMENTS = [
'-e acme[test]',
'-e certbot[all]',
'-e certbot-apache',
'-e certbot-dns-cloudflare',
'-e certbot-dns-cloudxns',
'-e certbot-dns-digitalocean',
'-e certbot-dns-dnsimple',
'-e certbot-dns-dnsmadeeasy',
'-e certbot-dns-gehirn',
'-e certbot-dns-google',
'-e certbot-dns-linode',
'-e certbot-dns-luadns',
'-e certbot-dns-nsone',
'-e certbot-dns-ovh',
'-e certbot-dns-rfc2136',
'-e certbot-dns-route53',
'-e certbot-dns-sakuracloud',
'-e certbot-nginx',
'-e certbot-compatibility-test',
'-e certbot-ci',
'-e letstest',
]
if sys.platform == 'win32':
REQUIREMENTS.append('-e windows-installer')
REQUIREMENTS.remove('-e certbot-apache')
REQUIREMENTS.remove('-e certbot-compatibility-test')
VERSION_PATTERN = re.compile(r'^(\d+)\.(\d+).*$')
class PythonExecutableNotFoundError(Exception):
pass
def find_python_executable() -> str:
"""
Find the relevant python executable that is of the given python major version.
Will test, in decreasing priority order:
* the current Python interpreter
* 'pythonX' executable in PATH (with X the given major version) if available
* 'python' executable in PATH if available
* Windows Python launcher 'py' executable in PATH if available
Incompatible python versions for Certbot will be evicted (e.g. Python 3
versions less than 3.6).
:rtype: str
:return: the relevant python executable path
:raise RuntimeError: if no relevant python executable path could be found
"""
python_executable_path = None
# First try, current python executable
if _check_version('{0}.{1}.{2}'.format(
sys.version_info[0], sys.version_info[1], sys.version_info[2])):
return sys.executable
# Second try, with python executables in path
for one_version in ('3', '',):
try:
one_python = 'python{0}'.format(one_version)
output = subprocess.check_output([one_python, '--version'],
universal_newlines=True, stderr=subprocess.STDOUT)
if _check_version(output.strip().split()[1]):
return subprocess.check_output([one_python, '-c',
'import sys; sys.stdout.write(sys.executable);'],
universal_newlines=True)
except (subprocess.CalledProcessError, OSError):
pass
# Last try, with Windows Python launcher
try:
output_version = subprocess.check_output(['py', '-3', '--version'],
universal_newlines=True, stderr=subprocess.STDOUT)
if _check_version(output_version.strip().split()[1]):
return subprocess.check_output(['py', env_arg, '-c',
'import sys; sys.stdout.write(sys.executable);'],
universal_newlines=True)
except (subprocess.CalledProcessError, OSError):
pass
if not python_executable_path:
raise RuntimeError('Error, no compatible Python executable for Certbot could be found.')
def _check_version(version_str):
search = VERSION_PATTERN.search(version_str)
if not search:
return False
version = (int(search.group(1)), int(search.group(2)))
if version >= (3, 6):
return True
print('Incompatible python version for Certbot found: {0}'.format(version_str))
return False
def subprocess_with_print(cmd, env=None, shell=False):
if env is None:
env = os.environ
print('+ {0}'.format(subprocess.list2cmdline(cmd)) if isinstance(cmd, list) else cmd)
subprocess.check_call(cmd, env=env, shell=shell)
def subprocess_output_with_print(cmd, env=None, shell=False):
if env is None:
env = os.environ
print('+ {0}'.format(subprocess.list2cmdline(cmd)) if isinstance(cmd, list) else cmd)
return subprocess.check_output(cmd, env=env, shell=shell)
def get_venv_python_path(venv_path):
python_linux = os.path.join(venv_path, 'bin/python')
if os.path.isfile(python_linux):
return os.path.abspath(python_linux)
python_windows = os.path.join(venv_path, 'Scripts\\python.exe')
if os.path.isfile(python_windows):
| return os.path.abspath(python_windows)
raise ValueError((
'Error, could not find python executable in venv path { | 0}: is it a valid venv ?'
.format(venv_path)))
def prepare_venv_path(venv_name):
"""Determines the venv path and prepares it for use.
This function cleans up any Python eggs in the current working directory
and ensures the venv path is available for use. The path used is the
VENV_NAME environment variable if it is set and venv_name otherwise. If
there is already a directory at the desired path, the existing directory is
renamed by appending a timestamp to the directory name.
:param str venv_name: The name or path at where the virtual
environment should be created if VENV_NAME isn't set.
:returns: path where the virtual environment should be created
:rtype: str
"""
for path in glob.glob('*.egg-info'):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
env_venv_name = os.environ.get('VENV_NAME')
if env_venv_name:
print('Creating venv at {0}'
' as specified in VENV_NAME'.format(env_venv_name))
venv_name = env_venv_name
if os.path.isdir(venv_name):
os.rename(venv_name, '{0}.{1}.bak'.format(venv_name, int(time.time())))
return venv_name
def install_packages(venv_name, pip_args):
"""Installs packages in the given venv.
:param str venv_name: The name or path at where the virtual
environment should be created.
:param pip_args: Command line arguments that should be given to
pip to install packages
:type pip_args: `list` of `str`
"""
# Using the python executable from venv, we ensure to execute following commands in this venv.
py_venv = get_venv_python_path(venv_name)
subprocess_with_print([py_venv, os.path.abspath('tools/pipstrap.py')])
command = [py_venv, os.path.abspath('tools/pip_install.py')]
command.extend(pip_args)
subprocess_with_print(command)
if os.path.isdir(os.path.join(venv_name, 'bin')):
# Linux/OSX specific
print('-------------------------------------------------------------------')
print('Please run the following command to activate developer environment:')
print('source {0}/bin/activate'.format(venv_name))
print('-------------------------------------------------------------------')
elif os.path.isdir(os.path.join(venv_name, 'Scripts')):
# Windows specific
print('---------------------------------------------------------------------------')
print('Please run one of the following commands to activate developer environment:')
print('{0}\\Scripts\\activate.bat (for Batch)'.format(venv_name))
print('.\\{0}\\Scripts\\Activate.ps1 (for Powershell)'.format(venv_name))
print('---------------------------------------------------------------------------')
else:
raise ValueError('Error, directory {0} is not a valid venv.'.format(venv_name))
def create_venv(venv_path):
"""Create a Python virtual environment at venv_path.
:param str venv_path: path where the venv should be created
"""
python = find_pytho |
# The name of the dashboard to be added to HORIZON['dashboards']. Required.
DASHBOARD = 'help_about'
DISABLED = False
# A list of | applications to be added to INSTALLED_APPS.
ADD_INSTALLED_APPS = [
'openstack_dashboard.dashb | oards.help_about',
]
|
from django.conf.urls import url
from core.views.generic import get_dashboard, delete
from users.views.individuals import RegisterView
from users.views.base import LoginView, logout_user
from core.views.dis | play import IndexView
urlpatterns = [#url(r'^$', LoginView.as_view(), name='index'),
url(r'^$', IndexView.as_view(), name='index'),
url(r'^login$', LoginView.as_view(), name='login'),
url(r'^logout$', logout_user, name='logout'),
url(r'^register$', RegisterView.as_view(), name='register'),
#url(r'^delete/(?P<content_type_id>\d+)/(?P<object_id>\d+)$', delete, name='delete'),
| url(r'^delete$', delete, name='delete'),
url(r'^dashboard$', get_dashboard, name='dashboard')] |
from requests import HTTPError
from database import Database
import simplejson as json
db = Database.getDatabaseConnection()["cras"]
from log_session import LogSession
import datetime
class DB:
def __init__(self):
pass
@staticmethod
def add_user(user_id, user_name, mail,picture,fcm_token):
print ("DEBUG: adding user with data: " + user_id + " "+ user_name + " " + mail + " " + fcm_token)
data = {
"_id": user_id,
"fcm_token" : fcm_token,
"name": user_name,
"mail": mail,
"picture": picture,
"supervise": [],
"supervised_by" : [],
"currently_monitoring" : [],
"currently_monitored_by": "",
"log_sessions": {}
}
try:
db.create_document(data)
except HTTPError:
print "CloudantException: user already exists"
return data
@staticmethod
def get_user_by_ID(user_ID):
try:
return db[user_ID]
except Exception:
print "DB exception : User does not exists"
return None
@staticmethod
def add_supervisor(user_id, other_id):
user = get_user_by_id(user_id)
other_user = get_user_by_id(other_id)
user["supervised_by"].append(other_id)
other_user["supervise"].append(user_id)
user.save()
other_user.save()
@staticmethod
def get_user_supervise(user_id):
currently_monitoring = db[user_id]["currently_monitoring"]
user_arr = []
for id in db[user_id]["supervise"]:
current = False
if id in currently_monitoring:
current = True
# user_arr.append({"user": get_user_by_id(id),
# "status" : current})
user = get_user_by_id(id).copy()
user.update({"status":current})
user_arr.append(user)
return json.dumps(user_arr)
@staticmethod
def get_user_supervised_by(user_id):
user_arr = []
for id in db[user_id]["supervised_by"]:
user_arr.append(get_user_by_id(id))
return json.dumps(user_arr)
@staticmethod
def get_user_name(id):
return db[id]["name"]
@staticmethod
def update_monitoring_status(user_id, sup_id, monitoring,is_sup):
user = db[user_id]
sup = db[sup_id]
if monitoring:
user["currently_monitored_by"] = sup_id
sup["currently_monitoring"].append(user_id)
else:
if is_sup:
if sup_id in user["log_sessions"]:
num_of_logs = len(user["log_sessions"][sup_id])
user["log_sessions"][sup_id][num_of_logs-1].update({"end_time": str(datetime.datetime.now())})
user.save()
user["currently_monitoring"].remove(sup_id)
sup["currently_monitored_by"] = ""
else:
if user_id in sup["log_sessions"]:
num_of_logs = len(sup["log_sessions"][user_id])
sup["log_sessions"][user_id][num_of_logs - 1].update({"end_time": str(datetime.datetime.now())})
sup.save()
sup["currently_monitoring"].remove(user_id)
user["currently_monitored_by"] = ""
user.save()
sup.save()
@staticmethod
def add_log_session(user_id,to_monitor_id):
user = db[user_id]
if to_monitor_id not in user["log_sessions"]:
user["log_sessions"].update({to_monitor_id: []})
user["log_sessions"][to_monitor_id].append(json.loads(LogSession(datetime.datetime.now(), to_monitor_id).toJSON()))
try:
user.save()
except Exception,e:
print e
@staticmethod
def get_currently_monitored_by(user_id):
return db[user_id]["currently_monitored_by"]
@staticmethod
def get_logs(user_id,sup_id):
log_sessions = db[user_id]["log_sessions"]
if sup_id in log_sessions:
return log_sessions[sup_id]
else:
print "There are no logs available for: " + sup_id
return None
@staticmethod
def add_log_event(user_id, sup_id,event):
user = get_user_by_id(user_id)
log_sessions = db[user_id]["log_sessions"]
if sup_id in log_sessions:
try:
last_session = db[user_id]["log_sessions"][sup_id][len(db[user_id]["log_sessions"][sup_id]) - 1]
last_session["events"].append(event | )
| db[user_id]["log_sessions"][sup_id][len(db[user_id]["log_sessions"][sup_id]) - 1] = last_session
user.save()
except Exception,e:
print e
def get_user_by_id(user_id):
user = db[user_id]
return user
def db_exists(user_id):
try:
user = db[user_id]
except Exception,e:
return False
print "DEBUG: the name is : " + user["name"]
return user.json()
def get_fcm_token(user_id):
return db[user_id]["fcm_token"]
|
. However,
# where sphinx-quickstart hardcodes values in this file that you input, this
# file has been changed to pull from your module's metadata module.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# Import project metadata
from ecs import metadata
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# show todos
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = metadata.project
copyright = metadata.copyright
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = metadata.version
# The full version, including alpha/beta/rc tags.
release = metadata.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = metadata.project_no_spaces + 'doc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
latex_documents = [
('index', metadata.project_no_spaces + '.tex',
metadata.project + ' Documentation', metadata.authors_string,
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are par | ts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = | []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', metadata.package, metadata.project + ' Documentation',
metadata.authors_string, 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', metadata.project_no_spaces,
metadata.project + ' Documentation', metadata.authors_string,
metadata.project_no_spaces, metadata.description, 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.version import version
class GoogleCloudStorageCreateBucketOperator(BaseOperator):
"""
Creates a new bucket. Google Cloud Storage uses a flat namespace,
so you can't create a bucket with a name that is already in use.
.. seealso::
For more information, see Bucket Naming Guidelines:
https://cloud.google.com/storage/docs/bucketnaming.html#requirements
:param bucket_name: The name of the bucket. (templated)
:type bucket_name: str
:param resource: An optional dict with parameters for creating the bucket.
For information on available parameters, see Cloud Storage API doc:
https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
:type resource: dict
:param storage_class: This defines how objects in the bucket are stored
and determines the SLA and the cost of storage (templated). Values include
- ``MULTI_REGIONAL``
- ``REGIONAL``
- ``STANDARD``
- ``NEARLINE``
- ``COLDLINE``.
If this value is not specified when the bucket is
created, it will default to STANDARD.
:type storage_class: str
:param location: The location of the bucket. (templated)
Object data for objects in the bucket resides in physical storage
within this region. Defaults to US.
.. seealso:: https://developers.google.com/storage/docs/bucket-locations
:type location: str
:param project_id: The ID of the GCP Project. (templated)
:type project_id: str
:param labels: User-provided labels, in key/value pairs.
:type labels: dict
:param google_cloud_storage_conn_id: The connection ID to use when
connecting to Google cloud storage.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must
have domain-wide delegation enabled.
:type delegate_to: str
:Example::
The following Operator would create a new bucket ``test-bucket``
with ``MULTI_REGIONAL`` storage class in ``EU`` region ::
CreateBucket = GoogleCloudStorageCreateBucketOperator(
task_id='CreateNewBucket',
bucket_name='test-bucket',
storage_class='MULTI_REGIONAL',
location='EU',
labels={'env': 'dev', 'team': 'airflow'},
google_cloud_storage_conn_id='airflow-service-account'
)
"""
template_fields = ('bucket_name', 'storage_class',
'location', 'project_id')
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
bucket_name,
resource=None,
storage_class='MULTI_REGIONAL',
location='US',
project_id=None,
labels=None,
google_cloud_storage_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
super(GoogleCloudStorageCreateBucketOperator, self).__init__(*args, **kwargs)
self.bucket_name = bucket_name
self.resource = resource
self.storage_class = storage_class
self.location = location
self.project_id = project_id
self.labels = labels
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
def execute(self, context):
if self.labels is not None:
self.labels.update(
{'airflow-version': 'v' + version.replace('.', '-').replace('+', '-')}
)
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn | _id,
delegate_to=self.delegate_to
)
hook.create_bucket(bucket | _name=self.bucket_name,
resource=self.resource,
storage_class=self.storage_class,
location=self.location,
project_id=self.project_id,
labels=self.labels)
|
from rest_framework import serializers
from emotion_annotator.models import F | rameEmotions
class FrameEmotionsSerializer(serializers.Hype | rlinkedModelSerializer):
class Meta:
model = FrameEmotions
fields = ('video', 'frameTime', 'emotionType')
|
import unittest
import copy
import gc
import rpy2.rinterface as rinterface
rinterface.initr()
class SexpTestCase(unittest.TestCase):
def testNew_invalid(se | lf):
x = "a"
self.assertRaises(ValueError, rinterface.Sexp, x)
def testNew(self):
sexp = rinterface.baseenv.get("letters")
sexp_new = rinterface.Sexp(sexp)
idem = rinterface.baseenv.get( | "identical")
self.assertTrue(idem(sexp, sexp_new)[0])
sexp_new2 = rinterface.Sexp(sexp)
self.assertTrue(idem(sexp, sexp_new2)[0])
del(sexp)
self.assertTrue(idem(sexp_new, sexp_new2)[0])
def testTypeof_get(self):
sexp = rinterface.baseenv.get("letters")
self.assertEquals(sexp.typeof, rinterface.STRSXP)
sexp = rinterface.baseenv.get("pi")
self.assertEquals(sexp.typeof, rinterface.REALSXP)
sexp = rinterface.baseenv.get("plot")
self.assertEquals(sexp.typeof, rinterface.CLOSXP)
def testDo_slot(self):
data_func = rinterface.baseenv.get("data")
data_func(rinterface.SexpVector(["iris", ], rinterface.STRSXP))
sexp = rinterface.globalenv.get("iris")
names = sexp.do_slot("names")
iris_names = ("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width", "Species")
self.assertEquals(len(iris_names), len(names))
for i, n in enumerate(iris_names):
self.assertEquals(iris_names[i], names[i])
self.assertRaises(LookupError, sexp.do_slot, "foo")
def testDo_slot_assign(self):
data_func = rinterface.baseenv.get("data")
data_func(rinterface.SexpVector(["iris", ], rinterface.STRSXP))
sexp = rinterface.globalenv.get("iris")
iris_names = rinterface.StrSexpVector(['a', 'b', 'c', 'd', 'e'])
sexp.do_slot_assign("names", iris_names)
names = [x for x in sexp.do_slot("names")]
self.assertEquals(['a', 'b', 'c', 'd', 'e'], names)
def testDo_slot_assign_create(self):
#test that assigning slots is also creating the slot
x = rinterface.IntSexpVector([1,2,3])
x.do_slot_assign("foo", rinterface.StrSexpVector(["bar", ]))
slot = x.do_slot("foo")
self.assertEquals(1, len(slot))
self.assertEquals("bar", slot[0])
def testSexp_rsame_true(self):
sexp_a = rinterface.baseenv.get("letters")
sexp_b = rinterface.baseenv.get("letters")
self.assertTrue(sexp_a.rsame(sexp_b))
def testSexp_rsame_false(self):
sexp_a = rinterface.baseenv.get("letters")
sexp_b = rinterface.baseenv.get("pi")
self.assertFalse(sexp_a.rsame(sexp_b))
def testSexp_rsame_wrongType(self):
sexp_a = rinterface.baseenv.get("letters")
self.assertRaises(ValueError, sexp_a.rsame, 'foo')
def testSexp_sexp(self):
sexp = rinterface.IntSexpVector([1,2,3])
cobj = sexp.__sexp__
sexp = rinterface.IntSexpVector([4,5,6,7])
self.assertEquals(4, len(sexp))
sexp.__sexp__ = cobj
self.assertEquals(3, len(sexp))
def testSexp_sexp_wrongtypeof(self):
sexp = rinterface.IntSexpVector([1,2,3])
cobj = sexp.__sexp__
sexp = rinterface.StrSexpVector(['a', 'b'])
self.assertEquals(2, len(sexp))
self.assertRaises(ValueError, sexp.__setattr__, '__sexp__', cobj)
def testSexp_sexp_destroyCobj(self):
sexp = rinterface.IntSexpVector([1,2,3])
cobj = sexp.__sexp__
del(cobj)
gc.collect()
# no real test, just make sure that it does
# not cause a segfault
def testSexp_deepcopy(self):
sexp = rinterface.IntSexpVector([1,2,3])
self.assertEquals(0, sexp.named)
rinterface.baseenv.get("identity")(sexp)
self.assertEquals(2, sexp.named)
sexp2 = sexp.__deepcopy__()
self.assertEquals(sexp.typeof, sexp2.typeof)
self.assertEquals(list(sexp), list(sexp2))
self.assertFalse(sexp.rsame(sexp2))
self.assertEquals(0, sexp2.named)
# should be the same as above, but just in case:
sexp3 = copy.deepcopy(sexp)
self.assertEquals(sexp.typeof, sexp3.typeof)
self.assertEquals(list(sexp), list(sexp3))
self.assertFalse(sexp.rsame(sexp3))
self.assertEquals(0, sexp3.named)
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(SexpTestCase)
return suite
if __name__ == '__main__':
tr = unittest.TextTestRunner(verbosity = 2)
tr.run(suite())
|
from chainer import function
class Flatten(function.Function):
"""Flatten function."""
def forward(self, inputs):
self.retain_inputs(())
self._in_shape = inputs[0].shape
return inputs[0].ravel(),
def backward(self, inputs, grads):
return grads[0].reshape(self._in_shape),
def flatten(x):
"""Flatten a given array into one dimension.
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable flatten to one dimension.
.. note::
When you input a scalar array (i.e. the shape is ``()``),
you can also get the one dimension array whose shape is ``(1,)``.
.. admonition:: Example
>>> x = np.array([[1, 2], [3, 4]])
>>> x.shape
| (2, 2)
>>> y = F.flatten(x)
>>> y.shape
(4,)
>>> y.data
array([1, 2, 3, 4])
>>> x = np. | arange(8).reshape(2, 2, 2)
>>> x.shape
(2, 2, 2)
>>> y = F.flatten(x)
>>> y.shape
(8,)
>>> y.data
array([0, 1, 2, 3, 4, 5, 6, 7])
"""
return Flatten()(x)
|
ERPOLATION_MODEL = "SpectraInterpolation"
KEY_VOXEL_SIMPLIFICATION = "VoxelSimplification"
KEY_ELASTIC_CROSS_SECTION_SCALING_FACTOR = "ElasticCrossSectionScalingFactor"
KEY_ENERGY_LOSS_SCALING_FACTOR = "EnergyLossScalingFactor"
class SimulationParameters(object):
def __init__(self):
self.version = copy.deepcopy(Version.CURRENT_VERSION)
self._keys = self._createKeys()
self._parameters = {}
self.defaultValues()
def _createKeys(self):
keys = []
keys.append(KEY_BASE_FILENAME)
keys.append(KEY_NUMBER_ELECTRONS)
keys.append(KEY_NUMBER_PHOTONS)
keys.append(KEY_NUMBER_WINDOWS)
keys.append(KEY_NUMBER_FILMS_X)
keys.append(KEY_NUMBER_FILMS_Y)
keys.append(KEY_NUMBER_FILMS_Z)
if self.version == Version.BEFORE_VERSION:
keys.append(KEY_NUMBER_CHANNELS)
else:
keys.append(KEY_ENERGY_CHANNEL_WIDTH)
keys.append(KEY_SPECTRA_INTERPOLATION_MODEL)
keys.append(KEY_VOXEL_SIMPLIFICATION)
if self.version >= Version.VERSION_1_4_4:
keys.append(KEY_ELASTIC_CROSS_SECTION_SCALING_FACTOR)
keys.append(KEY_ENERGY_LOSS_SCALING_FACTOR)
return keys
def defaultValues(self):
baseFilenameRef = r"Results\McXRay"
self.baseFilename = baseFilenameRef
self.numberElectrons = 1000
self.numberPhotons = 10000
self.numberWindows = 64
self.numberFilmsX = 128
self.numberFilmsY = 128
self.numberFilmsZ = 128
self.numberChannels = 1024
self.energyChannelWidth_eV = 5.0
self.spectrumInterpolationModel = MCXRayModel.SpectrumInterpolationModel.TYPE_LINEAR_DOUBLE
self.voxelSimplification = None
self.elasticCrossSectionScalingFactor = 1.0
self.energyLossScalingFactor = 1.0
def _createExtractMethod(self):
extractMethods = {}
extractMethods[KEY_BASE_FILENAME] = str
extractMethods[KEY_NUMBER_ELECTRONS] = int
extractMethods[KEY_NUMBER_PHOTONS] = int
extractMethods[KEY_NUMBER_WINDOWS] = int
extractMethods[KEY_NUMBER_FILMS_X] = int
extractMethods[KEY_NUMBER_FILMS_Y] = int
extractMethods[KEY_NUMBER_FILMS_Z] = int
extractMethods[KEY_NUMBER_CHANNELS] = int
extractMethods[KEY_ENERGY_CHANNEL_WIDTH] = float
extractMethods[KEY_SPECTRA_INTERPOLATION_MODEL] = self._extractSpectrumInterpolationModel
extractMethods[KEY_VOXEL_SIMPLIFICATION] = bool
extractMethods[KEY_ELASTIC_CROSS_SECTION_SCALING_FACTOR] = float
extractMethods[KEY_ENERGY_LOSS_SCALING_FACTOR] = float
return extractMethods
def _createFormatMethod(self):
fromatMethods = {}
fromatMethods[KEY_BASE_FILENAME] = "%s"
fromatMethods[KEY_NUMBER_ELECTRONS] = "%i"
fromatMethods[KEY_NUMBER_PHOTONS] = "%i"
fromatMethods[KEY_NUMBER_WINDOWS] = "%i"
fromatMethods[KEY_NUMBER_FILMS_X] = "%i"
fromatMethods[KEY_NUMBER_FILMS_Y] = "%i"
fromatMethods[KEY_NUMBER_FILMS_Z] = "%i"
fromatMethods[KEY_NUMBER_CHANNELS] = "%i"
fromatMethods[KEY_ENERGY_CHANNEL_WIDTH] = "%s"
fromatMethods[KEY_SPECTRA_INTERPOLATION_MODEL] = "%s"
fromatMethods[KEY_VOXEL_SIMPLIFICATION] = "%s"
fromatMethods[KEY_ELASTIC_CROSS_SECTION_SCALING_FACTOR] = "%.5f"
fromatMethods[KEY_ENERGY_LOSS_SCALING_FACTOR] = "%.5f"
return fromatMethods
def _extractSpectrumInterpolationModel(self, text):
model = MCXRayModel.SpectrumInterpolationModel(int(text))
return model
def read(self, filepath):
self.version.readFromFile(filepath)
lines = open(filepath, 'r').re | adlines()
extractMethods = self._createExtractMethod()
for line in lines:
line = line.strip()
for key in self._keys:
if line.startswith(key):
items = line.split('=')
self._parameters[key] = extractMethods[key](items[-1])
def write(self, filepath):
outputFile = open(filepath, 'w')
self._writeHeader(outputFile)
self.version.writeLine(outputFile)
| formatMethods = self._createFormatMethod()
keys = self._createKeys()
for key in keys:
if key == KEY_SPECTRA_INTERPOLATION_MODEL:
value = formatMethods[key] % (self._parameters[key].getModel())
else:
value = formatMethods[key] % (self._parameters[key])
if value is not None and value != "None":
line = "%s=%s\n" % (key, value)
outputFile.write(line)
def _writeHeader(self, outputFile):
if self._parameters[KEY_VOXEL_SIMPLIFICATION] is not None:
headerLines = [ "********************************************************************************",
"*** SIMULATION PARAMETERS",
"***",
"*** BaseFileName = All output files will be named using this term",
"*** ElectronNbr = Total number of electrons to simulate",
"*** PhotonNbr = Total number of photons to simulate in EDS",
"*** WindowNbr = Number of energy windows in PhiRo computations",
"*** FilmNbrX = Number of X layers in PhiRo computations",
"*** FilmNbrY = Number of Y layers in PhiRo computations",
"*** FilmNbrZ = Number of Z layers in PhiRo computations",
"*** SpectraChannel = Number of channels in spectraa",
"*** SpectraInterpolation = Interpolation type for spectras",
"*** VoxelSimplification = Use only middle voxel of trajectories to store energy",
"***",
"********************************************************************************"]
elif self.version == Version.BEFORE_VERSION:
headerLines = [ "********************************************************************************",
"*** SIMULATION PARAMETERS",
"***",
"*** BaseFileName = All output files will be named using this term",
"*** ElectronNbr = Total number of electrons to simulate",
"*** PhotonNbr = Total number of photons to simulate in EDS",
"*** WindowNbr = Number of energy windows in PhiRo computations",
"*** FilmNbrX = Number of X layers in PhiRo computations",
"*** FilmNbrY = Number of Y layers in PhiRo computations",
"*** FilmNbrZ = Number of Z layers in PhiRo computations",
"*** SpectraChannel = Number of channels in spectraa",
"*** SpectraInterpolation = Interpolation type for spectras",
"***",
"********************************************************************************"]
elif self.version >= Version.VERSION_1_4_4:
headerLines = [ "********************************************************************************",
"*** SIMULATION PARAMETERS",
"***",
"*** BaseFileName = All output files will be named using this term",
"*** ElectronNbr = Total number |
"""
Unit tests for email feature flag in new instructor dashboard.
Additionally tests that bulk email is always disabled for
non-Mongo backed courses, regardless of email feature flag, and
that the view is conditionally available when Course Auth is turned on.
"""
from __future__ import absolute_import
from django.urls import reverse
from opaque_keys.edx.keys import CourseKey
from six import text_type
from bulk_email.models import BulkEmailFlag, CourseAuthorization
from student.tests.factories import AdminFactory
from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_MODULESTORE, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class TestNewInstructorDashboardEmailViewMongoBacked(SharedModuleStoreTestCase):
"""
Check for email view on the new instructor dashboard
for Mongo-backed courses
"""
@classmethod
def setUpClass(cls):
super(TestNewInstructorDashboardEmailViewMongoBacked, cls).setUpClass()
cls.course = CourseFactory.create()
# URL for instructor dash
cls.url = reverse('instructor_dashboard', kwargs={'course_id': text_type(cls.course.id)})
# URL for email view
cls.email_link = '<button type="button" class="btn-link send_email" data-section="send_email">Email</button>'
def setUp(self):
super(TestNewInstructorDashboardEmailViewMongoBacked, self).setUp()
# Create instructor account
instructor = AdminFactory.create()
self.client.login(username=instructor.username, password="test")
def tearDown(self):
super(TestNewInstructorDashboardEmailViewMongoBacked, self).tearDown()
BulkEmailFlag.objects.all().delete()
# In order for bulk email to work, we must have both the BulkEmailFlag.is_enabled()
# set to True and for the course to be Mongo-backed.
# The flag is enabled and the course is Mongo-backed (should work)
def test_email_flag_true_mongo_true(self):
BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=False)
# Assert that instructor email is enabled for this course - since REQUIRE_COURSE_EMAIL_AUTH is False,
# all courses should be authorized to use email.
self.assertTrue(BulkEmailFlag.feature_enabled(self.course.id))
# Assert that the URL for the email view is in the response
response = self.client.get(self.url)
self.assertIn(self.email_link, response.content)
send_to_label = '<div class="send_to_list">Send to:</div>'
self.assertIn(send_to_label, response.content)
self.assertEqual(response.status_code, 200)
# The course is Mongo-backed but the flag is disabled (should not work)
def test_email_flag_false_mongo_true(self):
BulkEmailFlag.objects.create(enabled=False)
# Assert that the URL for the email view is not in the response
response = self.client.get(self.url)
self.assertNotIn(self.email_link, response.content)
# Flag is enabled, but we require course auth and haven't turned it on for this course
def test_course_not_authorized(self):
BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=True)
# Assert that instructor email is not enabled for this course
self.assertFalse(BulkEmailFlag.feature_enabled(self.course.id))
# Assert that the URL for the email view is not in the response
response = self.client.get(self.url)
self.assertNotIn(self.email_link, response.content)
# Flag is enabled, we require course auth and turn it on for this course
def test_course_authorized(self):
BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=True)
# Assert that instructor email is not enabled for this course
self.assertFalse(BulkEmailFlag.feature_enabled(self.course.id))
# Assert that the URL for the email view is not in the response
response = self.client.get(self.url)
self.assertNotIn(self.email_link, response.content)
# Authorize the course to use email
cauth = CourseAuthorization(course_id=self.course.id, email_enabled=True)
cauth.save()
# Assert that instructor email is enabled for this course
self.assertTrue(BulkEmailFlag.feature_enabled(self.course.id))
# Assert that the URL for the email view is in the response
response = self.client.get(self.url)
self.assertIn(self.email_link, response.content)
# Flag is disabled, but course is authorized
def test_course_authorized_feature_off(self):
BulkEmailFlag.objects.create(enabled=False, require_course_email_auth=True)
# Authorize the course to use email
cauth = CourseAuthorization(course_id=self.course.id, email_enabled=True)
cauth.save()
# Assert that this course is authorized for instructor email, but the feature is not enabled
self.assertFalse(BulkEmailFlag.feature_enabled(self.course.id))
self.assertTrue(CourseAuthorization.instructor_email_enabled(self.course.id))
# Assert that the URL for the email view IS NOT in the response
response = self.client.get(self.url)
self.assertNotIn(self.email_link, response.content)
class TestNewInstructorDashboardEmailViewXMLBacked(SharedModuleStoreTestCase):
"""
Check for email view on the new instructor dashboard
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
@classmethod
def setUpClass(cls):
super(TestNewInstructorDashboardEmailViewXMLBacked, cls).setUpClass()
cls.course_key = CourseKey.from_string('edX/toy/2012_Fall')
# URL for instructor dash
cls.url = reverse('instructor_dashboard', kwargs={'course_id': text_type(cls.course_key)})
# URL for email view
cls.email_link = '<button type="button" class="btn-link send_email" data-section="send_email">Email</button>'
def setUp(self):
super(TestNewInstructorDashboardEmailViewXMLBacked, self).setUp()
# Create instructor account
instructor = AdminFactory.create()
self.client.login(username=instructor.username, password="test")
# URL for instructor dash
self.url = reverse('instructor_dashboard', kwargs={'course_id': text_type(self.course_key)})
# URL for email view
self.email_link = '<button type="button" class="btn-link send_email" data-section="send_email">Email</button>'
def tearDown(self):
super(TestNewInstructorDashboardEmailViewXMLBacked, self).tearDown()
BulkEmailFlag.objects.all().delete()
# The flag is enabled, and since REQUIRE_COURSE_EMAIL_AUTH is False, all courses should
# be authorized to use email. But the course is not Mongo-backed (should not work)
def test_email_flag_true_mongo_false(self):
BulkEmailFlag.objects.create(enabled=True, req | uire_course_email_auth=False)
response = self.client.get(self.url)
self.assertNotIn(self.email_lin | k, response.content)
# The flag is disabled and the course is not Mongo-backed (should not work)
def test_email_flag_false_mongo_false(self):
BulkEmailFlag.objects.create(enabled=False, require_course_email_auth=False)
response = self.client.get(self.url)
self.assertNotIn(self.email_link, response.content)
|
from SimpleLexicon import SimpleLexicon
from LOTlib.Evaluation.EvaluationException import RecursionDepthException
class RecursiveLexicon(SimpleLexicon):
"""
A lexicon where word meanings can call each other. Analogous to a RecursiveLOTHypothesis from a LOTHypothesis.
To achieve this, we require the LOThypotheses in self.values to take a "recurse" call that is always passed in by
default here on __call__ as the first argument.
This throws a RecursionDepthException when it gets too deep.
See Examples.EvenOdd
"""
def __init__(self, recursive_depth_bound=10, *args, **kwargs):
self.recursive_depth_bound = recursive_depth_bound
SimpleLexicon.__init__(self, *args, **kwargs)
def __call__(self, word, *args):
"""
Wrap in self as a first argument that we don't have to in the grammar. This way, we can use self(word, X Y) as above.
"""
self.recursive_call_depth = 0
return self.value[word](self.recursive_call, *args) # pass in "self" as lex, using the recursive versio | n
def recursive_call(self, word, *args):
"""
This gets called internally on recursive calls. It keeps track of the depth to allow us to escape
"""
self.recursive_call_depth += 1
if self.recursive_call_depth > self.recur | sive_depth_bound:
raise RecursionDepthException
# print ">>>", self.value[word]
return self.value[word](self.recursive_call, *args) |
# | flake8: noqa
"""
Public testing utility functions.
"""
from pandas.util.testing import (
assert_frame_equal, assert_index_equal, asse | rt_series_equal)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-03-21 12:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
c | lass Migration(migrations.Migration):
dependencies = [("clusters", "0020_emr_release_model")]
operations = [
migrations.RenameField(
model_name="cluster", old_name="emr_release", new_name="emr_release_version"
),
migrations.AddField(
model_name="cluster",
name="emr_release",
| field=models.ForeignKey(
blank=True,
help_text='Different AWS EMR versions have different versions of software like Hadoop, Spark, etc. See <a href="http://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-whatsnew.html">what\'s new</a> in each.',
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="created_clusters",
to="clusters.EMRRelease",
verbose_name="EMR release",
),
),
]
|
from gettext import gettext as _
SECTION_ROOT = 'puppet'
DESC_ROOT = _('manage Puppet bindings')
def ensure_puppet_root(cli):
| """
Verifies that the root of puppet-related commands exists in the CLI,
creating it using constants from this module if it does not.
:param cli: CLI instance being configured
:type cli: pulp.client.extensions.core.PulpCli
"""
root_section = cli.find_section(SECTION_ROOT)
if root_section is None:
root_section = cli.create_section(SECTION_ROOT, DESC_ROOT)
return root_section
def root_section(cli):
return cli.root_section.find_subsection(SE | CTION_ROOT)
|
from flask import Flask
from flask.ext.bootstrap import Bootstrap
from flask.ext.mail import Mail
from flask.ext.moment import Moment
from flask.ext.sqlalchemy | import SQLAlchemy
from flask.ext.login import LoginManager
from config import config
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
db = SQLAlchemy()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
print 'in app __init__.py', config_name, config[config_name]
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.i | nit_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
return app
|
import subprocess
def convert_chinese(text):
return subprocess.getoutput("echo '%s' | | opencc -c hk2 | s.json" % text) |
import sqlite3
from config import appConfig
def createTBLS(path=None):
conn = sqlite3.connect(path)
cursor = conn.cursor()
cursor.execute("""CREATE TABLE links(id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL,
name TEXT NOT NULL
);""")
cursor.execute("""CREATE TABLE tags(id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL,
tag TEXT NOT NULL
);""")
cursor.execute("""CREATE TABLE assc(id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL,
links_id INTEGER NOT NULL,
tags_id INTEGER NOT NULL,
| FOREIGN KEY (links_id) REFERENCES links(id),
FOREIGN KEY (tags_id) REFERENCES tags(id)
);""")
conn.commit()
conn.close()
if __name__ == '__main__':
try:
path = appConfig.db_path
print path
createTBLS(str(path))
except IOError as e:
print | (str(e))
|
"""This module is part of Swampy, a suite of programs available from
allendowney.com/swampy.
Copyright 2011 Allen B. Downey
Distributed under the GNU General Public License at gnu.org/licenses/gpl.html.
"""
import math
from World import World
class CellWorld(World):
"""Contains cells and animals that move between cells."""
def __init__(self, canvas_size=500, cell_size=5, interactive=False):
World.__init__(self)
self.title('CellWorld')
self.canvas_size = canvas_size
self.cell_size = cell_size
# cells is a map from index tuples to Cell objects
self.cells = {}
if interactive:
self.make_canvas()
self.make_control()
def make_canvas(self):
"""Creates the GUI."""
self.canvas = self.ca(width=self.canvas_size,
height=self.canvas_size,
bg='white',
scale = [self.cell_size, self.cell_size])
def make_control(self):
"""Adds GUI elements that allow the user to change the scale."""
self.la(text='Click or drag on the canvas to create cells.')
self.row([0,1,0])
self.la(text='Cell size: ')
self.cell_size_en = self.en(width=10, text=str(self.cell_size))
self.bu(text='resize', command=self.rescale)
self.endrow()
def bind(self):
"""Creates bindings for the canvas."""
self.canvas.bind('<ButtonPress-1>', self.click)
self.canvas.bind('<B1-Motion>', self.click)
def click(self, event):
"""Event handler for clicks and drags.
It creates a new cell or toggles an existing cell.
"""
# convert the button click coordinates to an index tuple
x, y = self.canvas.invert([event.x, event.y])
i, j = int(math.floor(x)), int(math.floor(y))
# toggle the cell if it exists; create it otherwise
cell = self.get_cell(i,j)
if cell:
cell.toggle()
else:
self.make_cell(x, y)
def make_cell(self, i, j):
"""Creates and returns a new cell at i,j."""
cell = Cell(self, i, j)
self.cells[i,j] = cell
return cell
def cell_bounds(self, i, j):
"""Return the bounds of the cell with indices i, j."""
p1 = [i, j]
p2 = [i+1, j]
p3 = [i+1, j+1]
p4 = [i, j+1]
bounds = [p1, p2, p3, p4]
return bounds
def get_cell(self, i, j, default=None):
"""Gets the cell at i, j or returns the default value."""
cell = self.cells.get((i,j), default)
return cell
four_neighbors = [(1,0), (-1,0), (0,1), (0,-1)]
eight_neighbors = four_neighbors + [(1,1), (1,-1), (-1,1), (-1,-1)]
def get_four_neighbors(self, cell, default=None):
"""Return the four Von Neumann neighbors of a cell."""
return self.get_neighbors(cell, default, CellWorld.four_neighbors)
def get_eight_neighbors(self, cell, default=None):
"""Returns the eight Moore neighbors of a cell."""
return self.get_neighbors(cell, default, CellWorld.eight_neighbors)
def get_neighbors(self, cell, default=None, deltas=[(0,0)]):
"""Return the neighbors of a cell.
Args:
cell: Cell
deltas: a list of tuple offsets.
"""
i, j = cell.indices
cells = [self.get_cell(i+di, j+dj, default) for di, dj in deltas]
return cells
def rescale(self):
"""Event handler that rescales the world.
Reads the new scale from the GUI,
changes the canvas transform, and redraws the world.
"""
cell_size = self.cell_size_en.get()
cell_size = int(cell_size)
self.canvas.transforms[0].scale = [cell_size, cell_size]
self.redraw()
def redraw(self):
"""Clears the canvas and redraws all cells and animals."""
self.canvas.clear()
for cell in self.cells.itervalues():
cell.draw()
for animal in self.animals:
animal.draw()
class Cell(object):
"""A rectangular region in CellWorld"""
def __init__(self, world, i, j):
self.world = world
self.indices = i, j
self.bounds = self.world.cell_bounds(i, j)
# options used for a marked cell
self.marked_options = dict(fill='black', outline='gray80')
# options used for an unmarked cell
self.unmarked_options = dict(fill='yellow', outline='gray80')
self.marked = False
self.draw()
def draw(self):
"""Draw the cell."""
if self.marked:
options = self.marked_options
else:
options = self.unmarked_options
# bounds returns all four corners, so slicing every other
# element yields two opposing corners, which is what we
# pass to Canvas.rectangle
coords = self.bounds[::2]
self.item = self.world.canvas.rectangle(coords, **options)
def undraw(self):
"""Delete any items with this cell's tag."""
self.item.delete()
self.item = None
def get_config(self, option):
"""Gets the configuration of this cell."""
return self.item.cget(option)
def config(self, **options):
"""Configure this cell with the given options."""
self.item.config(**options)
def mark(self):
"""Marks this cell."""
self.marked = True
self.config(**self.marked_options)
|
def unmark(self):
"""Unmarks this cell."""
self.marked = False
self.config(**self.unmarked_options)
def is_marked(self):
"""Checks whether this cell is marked."""
return self.marked
def toggle(self):
"""Toggles the state of this cell."""
if self.is_ma | rked():
self.unmark()
else:
self.mark()
if __name__ == '__main__':
world = CellWorld(interactive=True)
world.bind()
world.mainloop()
|
onfig()
cache = config.get("general", "package_cache")
cache_limit = config.get("general", "package_cache_limit")
cache_limit = int(cache_limit) if cache_limit else 0
cache_dir | = config.get("directories", "cached_packages_dir")
cache_dir = str(cache_dir) if cache_dir else '/var/cache/pisi/packages'
# If pisi.conf does not have it yet, default is use package cache
if not cache or cache == "True":
| enableCache = True
else:
enableCache = False
self.cacheEnabled = enableCache
self.cacheSize = cache_limit
self.settings.cacheGroup.setEnabled(self.cacheEnabled)
self.settings.useCacheCheck.setChecked(enableCache)
self.settings.useCacheSpin.setValue(cache_limit)
self.settings.cacheDirPath.setText(cache_dir)
bandwidth_limit = config.get("general", "bandwidth_limit")
bandwidth_limit = int(bandwidth_limit) if bandwidth_limit else 0
self.settings.useBandwidthLimit.setChecked(not bandwidth_limit == 0)
self.settings.bandwidthSpin.setValue(bandwidth_limit)
def connectSignals(self):
self.settings.clearCacheButton.clicked.connect(self.clearCache)
self.settings.selectCacheDir.clicked.connect(self.selectCacheDir)
self.settings.useCacheCheck.toggled.connect(self.markChanged)
self.settings.useCacheSpin.valueChanged.connect(self.markChanged)
self.settings.useBandwidthLimit.toggled.connect(self.markChanged)
self.settings.bandwidthSpin.valueChanged.connect(self.markChanged)
self.settings.openCacheDir.clicked.connect(self.openCacheDir)
def openCacheDir(self):
cache_dir = unicode(self.settings.cacheDirPath.text())
if path.exists(cache_dir):
QDesktopServices.openUrl(QUrl("file://%s" % cache_dir, QUrl.TolerantMode))
def selectCacheDir(self):
selected_dir = QFileDialog.getExistingDirectory(self.settings, self.tr("Open Directory"), "/",
QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks)
if not selected_dir == '':
if not selected_dir == self.settings.cacheDirPath.text():
self.settings.cacheDirPath.setText(selected_dir)
self.markChanged()
def clearCache(self):
if QMessageBox.Yes == QMessageBox.warning(self.settings,
self.tr("Warning"),
self.tr("All the cached packages will be deleted. Are you sure? "),
QMessageBox.Yes | QMessageBox.No):
try:
self.iface.clearCache(0)
except Exception, e:
self.settings.parent.cw.exceptionCaught(str(e))
def save(self):
self.iface.setCacheLimit(self.settings.useCacheCheck.isChecked(), self.settings.useCacheSpin.value())
self.iface.setConfig("directories", "cached_packages_dir", unicode(self.settings.cacheDirPath.text()))
if self.settings.useBandwidthLimit.isChecked():
self.iface.setConfig("general", "bandwidth_limit", str(self.settings.bandwidthSpin.value()))
else:
self.iface.setConfig("general", "bandwidth_limit", "0")
class RepositorySettings(SettingsTab):
def setupUi(self):
self.settings.repoListView.horizontalHeader().setStretchLastSection(True)
self.settings.repoListView.verticalHeader().hide()
self.settings.repoListView.setColumnWidth(0, 32)
self.initialize(firstRun = True)
def connectSignals(self):
self.settings.addRepoButton.clicked.connect(self.addRepository)
self.settings.removeRepoButton.clicked.connect(self.removeRepository)
self.settings.moveUpButton.clicked.connect(self.moveUp)
self.settings.moveDownButton.clicked.connect(self.moveDown)
self.settings.repoListView.itemChanged.connect(self.markChanged)
def get_repo_names(self):
repos = []
for row in range(self.settings.repoListView.rowCount()):
repos.append(unicode(self.settings.repoListView.item(row, 1).text()))
return repos
def initialize(self, firstRun = False):
self.repositories = self.iface.getRepositories(
repos = None if firstRun else self.get_repo_names())
self.__clear()
for name, address in self.repositories:
self.__insertRow(unicode(name), address)
def __clear(self):
while self.settings.repoListView.rowCount():
self.settings.repoListView.removeRow(0)
def __insertRow(self, repoName, repoAddress):
currentRow = self.settings.repoListView.rowCount()
self.settings.repoListView.insertRow(currentRow)
checkbox = QCheckBox(self.settings.repoListView)
checkbox.toggled.connect(self.markChanged)
self.settings.repoListView.setCellWidget(currentRow, 0, checkbox)
self.settings.repoListView.cellWidget(currentRow, 0).setChecked(self.iface.isRepoActive(repoName))
repoNameItem = QTableWidgetItem()
repoNameItem.setText(repoName)
repoNameItem.setTextAlignment(Qt.AlignLeft|Qt.AlignVCenter)
self.settings.repoListView.setItem(currentRow, 1, repoNameItem)
repoAddressItem = QTableWidgetItem()
repoAddressItem.setText(repoAddress)
repoAddressItem.setTextAlignment(Qt.AlignLeft|Qt.AlignVCenter)
self.settings.repoListView.setItem(currentRow, 2, repoAddressItem)
def addRepository(self):
self.repoDialog = repodialog.RepoDialog()
self.repoDialog.buttonBox.accepted.connect(self.__addRepository)
self.repoDialog.show()
def __addRepository(self):
repoName = self.repoDialog.repoName.text()
repoAddress = self.repoDialog.repoAddress.currentText()
if not re.match("^[0-9%s\-\\_\\.\s]*$" % str(pmutils.letters()), str(repoName)) or str(repoName) == '':
QMessageBox.warning(self.settings,
self.tr("Pisi Error"),
self.tr("Not a valid repository name"))
return
if not repoAddress.endsWith("xml") and not repoAddress.endsWith("xml.bz2") and not repoAddress.endsWith('xz'):
QMessageBox.warning(self.settings,
self.tr("Pisi Error"),
self.tr('<qt>Repository address should end with xml or xml.bz2 or xz suffix.<p>Please try again.</qt>'))
return
self.__insertRow(repoName, repoAddress)
self.markChanged()
def removeRepository(self):
self.settings.repoListView.removeRow(self.settings.repoListView.currentRow())
self.markChanged()
def __setRow(self, row, rowItems):
for col in range(self.settings.repoListView.columnCount()):
self.settings.repoListView.setItem(row, col, rowItems[col])
def __takeRow(self, row):
rowItems = []
for col in range(self.settings.repoListView.columnCount()):
rowItems.append(self.settings.repoListView.takeItem(row, col))
return rowItems
def __move(self, up):
srcRow = self.settings.repoListView.currentRow()
dstRow = srcRow - 1 if up else srcRow + 1
if dstRow < 0 or dstRow >= self.settings.repoListView.rowCount():
return
srcRowChecked = self.settings.repoListView.cellWidget(srcRow, 0).checkState()
dstRowChecked = self.settings.repoListView.cellWidget(dstRow, 0).checkState()
srcItems = self.__takeRow(srcRow)
destItems = self.__takeRow(dstRow)
self.__setRow(srcRow, destItems)
self.__setRow(dstRow, srcItems)
self.settings.repoListView.cellWidget(srcRow, 0).setCheckState(dstRowChecked)
self.settings.repoListView.cellWidget(dstRow, 0).setCheckState(srcRowChecked)
self.settings.repoListView.setCurrentItem(srcItems[1])
self.markChanged()
def moveUp(self):
self.__move(True)
def moveDown(self):
self.__move(False)
def getRepo(self, row) |
#!/usr/bin/python3
import os, sys, random
import argparse
# this script processes all the log simulations in one dir, and writes the values of one particular attribute into one single file.
def prepareProcess(inputDir,simulationFile, separator, output, attribute ):
output = open(output, 'w')
simulation = open(inputDir+'/'+simulationFile, 'r')
# headers
output.write('var'+'\n')
splittedHeader = simulation.readline().split(separator)
for i in range(len(splittedHeader)):
if splittedHeader[i] == attribute:
indexAttribute = i
for line in simulation:
splittedLine = line.split(separator)
output.write('step' + splittedLine[0]+'\n')
output.close()
return indexAttribute
def processSimulation(inputDir, simulationFile, separator, outputName, attributeIndex):
output = open(outputName, 'r')
outputTmp = open('tmp', 'w')
simulation = open(inputDir+'/'+simulationFile, 'r')
# header
outputTmp.write(output.readline().strip('\n')+separator+simulationFile+'\n') |
simulation.readline()
for simulationLine in simulation:
previousLine = output.readline().strip('\n')+separator
splittedLine = simulationLine.split(separator)
value = splittedLine[attributeIndex]
outputTmp.write(previousLine+value+'\n')
simulation.close()
output.close()
outputTmp.close()
os.rename('tmp', outputName)
def main():
parser = argparse.ArgumentParser()
parser.add_argument( | '-i', '--input', default='input', help='directory where simulated files are stored')
parser.add_argument('-o', '--output', default='results.csv', help='output file')
parser.add_argument('-s', '--separator', default=';', help='separator token between values')
parser.add_argument('-a', '--attribute', default='Number of agents', help='name of the attribute column to process')
args = parser.parse_args()
outputFile = open(args.output, 'w')
outputFile.close()
header = 0
for root, dirs, simulationFiles in os.walk(args.input):
for simulationFile in simulationFiles:
if not simulationFile.endswith('.csv'):
continue
if header == 0:
attributeIndex = prepareProcess(args.input,simulationFile, args.separator, args.output, args.attribute)
header = 1
print 'processing simulation results in file: ' + simulationFile
processSimulation(args.input, simulationFile, args.separator, args.output, attributeIndex)
if __name__ == "__main__":
main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
|
"""
PyCOMPSs Testbench Arguments Warnings
=====================================
"""
# Imports
import unittest
from modules.testArgumentError import testArgum | entError
def main():
suite = unittest.TestLoader().loadTestsFromTestCase(testArgumentError)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == "__main__":
main()
|
import os
from configurations import values
from boto.s3.connection import OrdinaryCallingFormat
from {{cookiecutter.app_name}}.config.common import Common
try:
# Python 2.x
import urlparse
except ImportError:
# Python 3.x
from urllib import parse as urlparse
class Production(Common):
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
# https://devcenter.heroku.com/articles/getting-started-with-django
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
INSTALLED_APPS = Common.INSTALLED_APPS
SECRET_KEY = values.SecretValue()
# Postgres
DATABASES = values.DatabaseURLValue('postgres://localhost/{{cookiecutter.app_name}}')
# django-secure
# http://django-secure.readthedocs.org/en/v0.1.2/settings.html
INSTALLED_APPS += ("djangosecure", )
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
# Site
# https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
INSTALLED_APPS += ("gunicorn", )
# Template
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
# Me | dia files
# http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += ('storages',)
DEFAULT_F | ILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = values.Value('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = values.Value('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = values.Value('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
MEDIA_URL = 'https://s3.amazonaws.com/{}/'.format(AWS_STORAGE_BUCKET_NAME)
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# https://developers.google.com/web/fundamentals/performance/optimizing-content-efficiency/http-caching#cache-control
# Response can be cached by browser and any intermediary caches (i.e. it is "public") for up to 1 day
# 86400 = (60 seconds x 60 minutes x 24 hours)
AWS_HEADERS = {
'Cache-Control': 'max-age=86400, s-maxage=86400, must-revalidate',
}
# Static files
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# Caching
redis_url = urlparse.urlparse(os.environ.get('REDISTOGO_URL', 'redis://localhost:6379'))
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': '{}:{}'.format(redis_url.hostname, redis_url.port),
'OPTIONS': {
'DB': 0,
'PASSWORD': redis_url.password,
'PARSER_CLASS': 'redis.connection.HiredisParser',
'CONNECTION_POOL_CLASS': 'redis.BlockingConnectionPool',
'CONNECTION_POOL_CLASS_KWARGS': {
'max_connections': 50,
'timeout': 20,
}
}
}
}
# Django RQ production settings
RQ_QUEUES = {
'default': {
'URL': os.getenv('REDISTOGO_URL', 'redis://localhost:6379'),
'DB': 0,
'DEFAULT_TIMEOUT': 500,
},
}
Common.VERSATILEIMAGEFIELD_SETTINGS['create_images_on_demand'] = False
|
basis in
repo_info. Defaults to "". Not required.
default_depth:
description:
Default clone depth (int) in case not specified
on an individual repo basis. Defaults to 10.
Not required.
retries:
description:
Integer number of retries allowed in case of git
clone failure. Defaults to 1. Not required.
delay:
description:
Integer time delay (seconds) between git clone
retries in case of failure. Defaults to 0. Not
required.
force:
description:
Boolean. Apply --force flags to git clones wherever
possible. Defaults to False. Not required.
core_multiplier:
description:
Integer multiplier on the number of cores
present on the machine to use for
multithreading. For example, on a 2 core
machine, a multiplier of 4 would use 8
threads. Defaults to 4. Not required.
"""
EXAMPLES = r"""
- name: Clone repos
git_requirements:
repo_info: "[{'src':'https://github.com/ansible/',
'name': 'ansible'
'dest': '/etc/opt/ansible'}]"
"""
def init_signal():
signal.signal(signal.SIGINT, signal.SIG_IGN)
def check_out_version(repo, version, pull=False, force=False,
refspec=None, tag=False, depth=10):
try:
repo.git.fetch(tags=tag, force=force, refspec=refspec, depth=depth)
except Exception as e:
return ["Failed to fetch %s\n%s" % (repo.working_dir, str(e))]
try:
repo.git.checkout(version, force=force)
except Exception as e:
return [
"Failed to check out version %s for %s\n%s" %
(version, repo.working_dir, str(e))]
if repo.is_dirty(untracked_files=True) and force:
try:
repo.git.clean(force=force)
except Exception as e:
return [
"Failed to clean up repository% s\n%s" %
(repo.working_dir, str(e))]
if pull:
try:
repo.git.pull(force=force, refspec=refspec, depth=depth)
except Exception as e:
return ["Failed to pull repo %s\n%s" % (repo.working_dir, str(e))]
return []
def pull_wrapper(info):
role_info = info
retries = info[1]["retries"]
delay = info[1]["delay"]
for i in range(retries):
success = pull_role(role_info)
if success:
return True
else:
time.sleep(delay)
info[2].append(["Role {0} failed after {1} retries\n".format(role_info[0],
retries)])
return False
def pull_role(info):
role, config, failures = info
required_version = role["version"]
version_hash = False
if 'version' in role:
# If the version is the length of a hash then treat is as one
if len(required_version) == 40:
version_hash = True
def get_repo(dest):
try:
return git.Repo(dest)
except Exception:
failtxt = "Role in {0} is broken/not a git repo.".format(
role["dest"])
failtxt += "Please delete or fix it manually"
failures.append(failtxt)
return False
# if repo exists
if os.path.exists(role["dest"]):
repo = get_repo(role["dest"])
if not repo:
return False # go to next role
repo_url = list(repo.remote().urls)[0]
if repo_url != role["src"]:
repo.remote().set_url(role["src"])
# if they want master then fetch, checkout and pull to stay at latest
# master
if required_version == "master":
fail = check_out_version(repo, required_version, pull=True,
force=config["force"],
refspec=role["refspec"],
depth=role["depth"])
# If we have a hash then reset it to
elif version_hash:
fail = check_out_version(repo, required_version,
force=config["force"],
refspec=role["refspec"],
depth=role["depth"])
else:
# describe can fail in some cases so be careful:
try:
current_version = repo.git.describe(tags=True)
except Exception:
current_version = ""
if current_version == required_version and not config["force"]:
fail = []
pass
else:
fail = check_out_version(repo, required_version,
force=config["force"],
refspec=role["refspec"],
depth=role["depth"],
tag=True)
else:
try:
# If we have a hash id then treat this a little differently
if version_hash:
git.Repo.clone_from(role["src"], role["dest"],
branch='master',
no_single_branch=True,
depth=role["depth"])
repo = get_repo(role["dest"])
if not repo:
return False # go to next role
fail = check_out_version(repo, required_version,
force=config["force"],
refspec=role["refspec"],
depth=role["depth"])
else:
git.Repo.clone_from(role["src"], role["dest"],
branch=required_version,
depth=role["depth"],
no_single_branch=True)
fail = []
except Exception as e:
fail = ('Failed cloning repo %s\n%s' % (role["dest"], str(e)))
if fail == []:
return True
else:
failures.append(fail)
return False
def set_default(dictionary, key, defaults):
if key not in dictionary.keys():
dictionary[key] = defaults[key]
def main():
# Define variables
failures = multiprocessing.Manager().list()
# Data we can pass in to the module
fields = {
"repo_info": {"required": True, "type": "list"},
"default_path": {"required": True,
"type": "str"},
"default_version": {"required": False,
"type": "str",
"default": "master"},
"default_refspec": {"required": False,
"type": "str",
"default": None},
"default_depth": {"required": False,
"type": "int",
"default": 10},
"retries": {"required": False,
"type": "int",
"default": 1},
"delay": {"require | d": False,
"type": "int",
"default": 0},
"force": {"required": False,
"type": "bool",
"default": False},
"core_multiplier": {"required": False,
| "type": "int",
"default": 4},
}
# Pull in module fields and pass into variables
module = AnsibleModule(argument_spec=fields)
git_repos = module.params['repo_info']
defaults = {
"path": module.params["default_path"],
"depth": module.params["default_depth"],
"version": module.params["default_version"],
"refspec": module.params["default_refspec"]
}
config = {
"retries": module.params["retries"],
"delay": module.params["delay"],
"force": module.params["force"],
"core_multiplier": module.params["core_multiplier"]
}
# Set up defaults
for repo in git_repos:
for key in ["path", "refspec", "version", "depth"]:
set_default(repo, key, defaults)
if "name" not in repo.keys():
repo["name"] = os.path.b |
import os;
f = open('depsVerified', 'w');
f.write('ok');
f. | close(); | |
from django.db import models
from django.utils import timezone
# Create your models here.
def formatDateTime(dateTime):
return timezone.localtime(dateTime).strftime("%Y-%m-%d %H:%M:%S")
class Beacon(models.Model):
macAddr = models.CharField(max_length=20, unique=True)
uuid = models.UUIDField(editable=False)
major = models.CharField(max_length=10, null=False)
minor = models.CharField(max_length=10, null=False)
def getDict(self):
dict = {}
dict['macAddr'] = self.macAddr
dict['uuid'] = str(self.uuid)
dict['major'] = self.major
dict['minor'] = self.minor
return dict
class Meta:
unique_together = ('uuid', 'major', 'minor')
class DetectorDevice(models.Model):
""" device which detects beacons, now only cellph | ones """
externalId = models.CharField(max_length=32, unique=True)
def getDict(self):
dict = {}
dict['deviceId'] = self.externalId
return dict
def __str__(self):
return self.externalId
class Be | aconLog(models.Model):
time = models.DateTimeField(null=False)
rssi = models.IntegerField(null=False)
measurePower = models.IntegerField(null=False)
beacon = models.ForeignKey(Beacon, on_delete=models.CASCADE)
device = models.ForeignKey(DetectorDevice, on_delete=models.CASCADE)
def __str__(self):
return "time: {} | rssi: {} | measurePower: {}".format(
self.time, self.rssi, self.measurePower)
def getDict(self):
dict = {}
dict['time'] = formatDateTime(self.time)
dict['rssi'] = self.rssi
dict['measurePower'] = self.measurePower
return dict
class Event(models.Model):
time = models.DateTimeField(null=False)
event = models.TextField(null=False)
device = models.ForeignKey(DetectorDevice, on_delete=models.CASCADE)
def getDict(self):
dict = {}
dict['time'] = formatDateTime(self.time)
dict['event'] = self.event
return dict
|
import json
import os
from processes.postgres import Postgres
from processes.gather_exception import GatherException
try:
DB_SERVER = os.environ['DB_SERVER']
DB_PORT = os.environ['DB_PORT']
DB_DATABASE = os.environ['DB_DATABASE']
DB_USER = os.environ['DB_USER']
DB_PASSWORD = os.environ['DB_PASSWORD']
except KeyError:
try:
from processes.GLOBALS import DB_SERVER, DB_PORT, DB_DATABASE, DB_USER, DB_PASSWORD
except ImportError:
print("No parameters provided")
exit()
class Main(object):
def __init__(self):
self.pg = Postgres(DB_SERVER, DB_PORT, DB_DATABASE, DB_USER, DB_PASSWORD)
self.source_topic = 'youtube'
self.destination_topic = 'movies'
def run(self, data) | :
"""
This inserts the relevant json information
into the table kino.movies.
:param data: json data holding information on films.
"""
imdb_id = data['imdb_id']
omdb_movie_data = data['omdb_main']
tmdb_movie_data = da | ta['tmdb_main']
sql = """insert into kino.languages(language)
select y.language
from json_to_recordset(%s) x (original_language varchar(1000))
join kino.iso2language y
on x.original_language = y.iso3166
where language not in (select language
from kino.languages)"""
self.pg.pg_cur.execute(sql, (json.dumps(tmdb_movie_data),))
self.pg.pg_conn.commit()
# We delete our record from kino.movies first.
# Due to foreign keys with 'on delete cascade', this clears all records from
# the database associated with that imdb_id.
sql = """delete from kino.movies
where imdb_id = '{0}'""".format(imdb_id)
self.pg.pg_cur.execute(sql)
self.pg.pg_conn.commit()
# We also delete any records in errored attached to this imdb_id, as
# we have successfully gathered information for the film.
sql = """delete from kino.errored
where imdb_id = '{0}'""".format(imdb_id)
self.pg.pg_cur.execute(sql)
self.pg.pg_conn.commit()
sql = """insert into kino.movies (imdb_id, title, runtime, rated, released, orig_language, plot, tstamp)
select x.imdb_id
, y.title
, y.runtime
, x.rated
, y.release_date::date
, z.language
, y.plot
, CURRENT_DATE
from json_to_recordset(%s) x ( imdb_id varchar(15), rated varchar(10) )
join json_to_recordset(%s) y ( imdb_id varchar(15), title varchar(1000), runtime integer
, release_date date, plot varchar(4000), original_language varchar(1000))
on x.imdb_id = y.imdb_id
join kino.iso2language z
on y.original_language = z.iso3166
"""
self.pg.pg_cur.execute(sql, (json.dumps(omdb_movie_data), json.dumps(tmdb_movie_data)))
if self.pg.pg_cur.rowcount != 1:
raise GatherException(omdb_movie_data[0]['imdb_id'], 'No insert into movies, most likely due to a new language')
self.pg.pg_conn.commit()
sql = """insert into kino.kino_ratings (imdb_id, rating) values (%s, 3) on conflict do nothing"""
self.pg.pg_cur.execute(sql, (imdb_id,))
self.pg.pg_conn.commit()
return data
|
"""A modest set of tools to work with Django models."""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
# with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
# long_description = f.read()
setup(
name='sculpt.model_tools',
version='0.1',
description='A modest set of tools to work with Django models.',
long_description='',
url='ht | tps://github.com/damienjones/sculpt-model-tools',
author='Damien M. Jones',
author_email='damien@codesculpture.com',
license='LGPLv2',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU Lesser General Public License v2 ( | LGPLv2)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
keywords='',
packages=find_packages(),
install_requires=[
'sculpt-common>=0.2',
],
# package_data={},
# data_files=[],
# entry_points={},
# console_scripts={},
)
|
from JumpScale import j
descr = """
This jumpscript returns network info
"""
category = " | monitoring"
organization = "jumpscale"
author = "kristof@incubaid.com"
license = "bsd"
version = "1.0"
roles = []
def action():
return j.sal.nettools.getNetworkInfo()
if __name__ == "__main__":
print(ac | tion())
|
kedObjects with the following members set:
offset
obj_type_num
obj_chunks (for non-delta types)
delta_base (for delta types)
decomp_chunks
decomp_len
crc32 (if compute_crc32 is True)
:raise ChecksumMismatch: if the checksum of the pack contents does not
match the checksum in the pack trailer.
:raise zlib.error: if an error occurred during zlib decompression.
:raise IOError: if an error occurred writing to the output file.
"""
pack_version, self._num_objects = read_pack_header(self.read)
if pack_version is None:
return
for i in range(self._num_objects):
offset = self.offset
unpacked, unused = unpack_object(
self.read, read_some=self.recv, compute_crc32=compute_crc32,
zlib_bufsize=self._zlib_bufsize)
unpacked.offset = offset
# prepend any unused data to current read buffer
buf = BytesIO()
buf.write(unused)
buf.write(self._rbuf.read())
buf.seek(0)
self._rbuf = buf
yield unpacked
if self._buf_len() < 20:
# If the read buffer is full, then the last read() got the whole
# trailer off the wire. If not, it means there is still some of the
# trailer to read. We need to read() all 20 bytes; N come from the
# read buffer and (20 - N) come from the wire.
self.read(20)
pack_sha = bytearray(self._trailer)
if pack_sha != self.sha.digest():
raise ChecksumMismatch(sha_to_hex(pack_sha), self.sha.hexdigest())
class PackStreamCopier(PackStreamReader):
"""Class to verify a pack stream as it is being read.
The pack is read from a ReceivableProtocol using read() or recv() as
appropriate and written out to the given file-like object.
"""
def __init__(self, read_all, read_some, outfile, delta_iter=None):
"""Initialize the copier.
:param read_all: Read function that blocks until the number of
requested bytes are read.
:param read_some: Read function that returns at least one byte, but may
not return the number of bytes requested.
:param outfile: File-like object to write output through.
:param delta_iter: Optional DeltaChainIterator to record deltas as we
read them.
"""
super(PackStreamCopier, self).__init__(read_all, read_some=read_some)
self.outfile = outfile
self._delta_iter = delta_iter
def _read(self, read, size):
"""Read data from the read callback and write it to the file."""
data = super(PackStreamCopier, self)._read(read, size)
self.outfile.write(data)
return data
def verify(self):
"""Verify a pack stream and write it to the output file.
See PackStreamReader.iterobjects for a list of exceptions this may
throw.
"""
if self._delta_iter:
for unpacked in self.read_objects():
self._delta_iter.record(unpacked)
else:
for _ in self.read_objects():
pass
def obj_sha(type, chunks):
"""Compute the SHA for a numeric type and object chunks."""
sha = sha1()
sha.update(object_header(type, chunks_length(chunks)))
if isinstance(chunks, bytes):
sha.update(chunks)
else:
for chunk in chunks:
sha.update(chunk)
return sha.digest()
def compute_file_sha(f, start_ofs=0, end_ofs=0, buffer_size=1 << 16):
"""Hash a portion of a file into a new SHA.
:param f: A file-like object to read from that supports seek().
:param start_ofs: The offset in the file to start reading at.
:param end_ofs: The offset in the file to end reading at, relative to the
end of the file.
:param buffer_size: A buffer size for reading.
:return: A new SHA object updated with data read from the file.
"""
sha = sha1()
f.seek(0, SEEK_END)
length = f.tell()
if (end_ofs < 0 and length + end_ofs < start_ofs) or end_ofs > length:
raise AssertionError(
"Attempt to read beyond file length. "
"start_ofs: %d, end_ofs: %d, file length: %d" % (
start_ofs, end_ofs, length))
todo = length + end_ofs - start_ofs
f.seek(start_ofs)
while todo:
data = f.read(min(todo, buffer_size))
sha.update(data)
todo -= len(data)
return sha
class PackData(object):
"""The data contained in a packfile.
Pack files can be accessed both sequentially for exploding a pack, and
directly with the help of an index to retrieve a specific object.
The objects within are either complete or a delta against another.
| The header is variable length. If the MSB of each byte is set then it
indicates that the subsequent byte is still part of the header.
For the first byte the next MS bits are the type, which tells you the type
of object, and whether it is a delta. The LS byte is the lowest bits of the
size | . For each subsequent byte the LS 7 bits are the next MS bits of the
size, i.e. the last byte of the header contains the MS bits of the size.
For the complete objects the data is stored as zlib deflated data.
The size in the header is the uncompressed object size, so to uncompress
you need to just keep feeding data to zlib until you get an object back,
or it errors on bad data. This is done here by just giving the complete
buffer from the start of the deflated object on. This is bad, but until I
get mmap sorted out it will have to do.
Currently there are no integrity checks done. Also no attempt is made to
try and detect the delta case, or a request for an object at the wrong
position. It will all just throw a zlib or KeyError.
"""
def __init__(self, filename, file=None, size=None):
"""Create a PackData object representing the pack in the given filename.
The file must exist and stay readable until the object is disposed of.
It must also stay the same size. It will be mapped whenever needed.
Currently there is a restriction on the size of the pack as the python
mmap implementation is flawed.
"""
self._filename = filename
self._size = size
self._header_size = 12
if file is None:
self._file = GitFile(self._filename, 'rb')
else:
self._file = file
(version, self._num_objects) = read_pack_header(self._file.read)
self._offset_cache = LRUSizeCache(
1024*1024*20, compute_size=_compute_object_size)
self.pack = None
@property
def filename(self):
return os.path.basename(self._filename)
@property
def path(self):
return self._filename
@classmethod
def from_file(cls, file, size):
return cls(str(file), file=file, size=size)
@classmethod
def from_path(cls, path):
return cls(filename=path)
def close(self):
self._file.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _get_size(self):
if self._size is not None:
return self._size
self._size = os.path.getsize(self._filename)
if self._size < self._header_size:
errmsg = ('%s is too small for a packfile (%d < %d)' %
(self._filename, self._size, self._header_size))
raise AssertionError(errmsg)
return self._size
def __len__(self):
"""Returns the number of objects in this pack."""
return self._num_objects
def calculate_checksum(self):
"""Calculate the checksum for this pack.
:return: 20-byte binary SHA1 digest
"""
return compute_file_sha(self._file, end_ofs=-20).digest()
def get_ref(self, sha):
"""Get the object for a ref SHA, only looking in this pack."""
# TODO: cache these results
|
#!/usr/bin/env python
"""
Copyright 2010-2018 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This module defines the configuration parameters for the BBToolbox script
"""
from __future__ import division, print_function
# Import Python modules
import os
import sys
# Import Broadband modules
import cc
import bband_utils
class BBToolboxCfg(object):
"""
Define the configuration parameters for the SDSU BBToolbox program
"""
cfgdict = {}
def | getval(self, attr):
try:
val = self.cfgdict[attr]
except KeyError:
print("Invalid Source File - Missing attribute: %s" % (attr))
print("Exiting")
sys.exit(1)
return val
def parse_src(self, a_srcfile):
"""
This function calls bband_utils's pars | e property file function
to get a dictionary of key, value pairs and then looks for a
the parameters needed by bbtoolbox
"""
self.cfgdict = bband_utils.parse_properties(a_srcfile)
val = self.getval("depth_to_top")
self.DEPTH_TO_TOP = float(val)
val = self.getval("fault_length")
self.LENGTH = float(val)
val = self.getval("dip")
self.DIP = float(val)
val = self.getval("rake")
self.RAKE = float(val)
val = self.getval("hypo_along_stk")
self.HYPO_ALONG_STK = float(val)
val = self.getval("hypo_down_dip")
self.HYPO_DOWN_DIP = float(val)
val = self.getval("magnitude")
self.MAG = float(val)
val = self.getval("seed")
self.SEED = int(float(val))
# Now look for the optional grid parameters
if 'grid_x' in self.cfgdict:
self.grid_x = float(self.getval("grid_x"))
if 'grid_y' in self.cfgdict:
self.grid_y = float(self.getval("grid_y"))
if 'grid_z' in self.cfgdict:
self.grid_z = float(self.getval("grid_z"))
#
# Read parameters out of the source file to obtain parameters
# needed by the BBcoda codes
#
fcodes = cc.find_fx_fy_fz(self.HYPO_ALONG_STK,
self.LENGTH,
self.DIP,
self.HYPO_DOWN_DIP,
self.DEPTH_TO_TOP)
self.fsx = fcodes[0]
self.fsy = fcodes[1]
self.fsz = fcodes[2]
#print ("ETH conversion from hypalongstk: "
# "%f flength: %f dip: %f hypdowndip: %f depthtotop: %f\n" %
# (self.HYPO_ALONG_STK,
# self.LENGTH,
# self.DIP,
# self.HYPO_DOWN_DIP,
# self.DEPTH_TO_TOP))
#print ("resulting fsx: %f fxy: %f fsz: %s\n" % (self.fsx,
# self.fsy,
# self.fsz))
def calculate_stress(self):
"""
This function calculates the stress parameters for SDSU based
on the depth of the fault. These values are calibrated for use
in Eastern North America
"""
stress = 16.0 * self.DEPTH_TO_TOP + 225
stress = stress * 10**6
return stress
def __init__(self, a_srcfile=None):
"""
Set up some parameters for BBToolbox
"""
self.MAG = None
self.grid_x = None
self.grid_y = None
self.grid_z = 125.0
self.copy_lf_seismograms = True
# Parse src file, if given
if a_srcfile:
self.parse_src(a_srcfile)
self.MODALITY = 1
# GS_FLAG: Don't change it here, override it in the velocity
# model config file using a 'CODEBASE_SDSU_GS_FLAG = XXX' line
# 1: Western US (active region),
# 2: Eastern NA (stable region),
# 3: Japan
self.GS_FLAG = 1
# NGAW_FLAG: Don't change it here, override it in the velocity
# model config file using a 'CODEBASE_SDSU_NGAW_FLAG = XXX' line
# 1: NGA-WEST1
# 2: NGA-WEST2
self.NGAW_FLAG = 2
self.KAPPA = 0.04
self.Q_CODA = 150.0
self.FDEC = 0.8
self.AFAC = 41.0
self.BFAC = 34.0
self.SOURCE_MECH = "rs"
self.SOURCE_FUNC = "dreg"
self.VERBOSE = "on"
self.TR_SCA = 0.075
self.STR_FAC = 50.e6
# 06/10/11: Sandarsh MK
# Note: Setting FMAX = 20.00 Hz will
# cause BBtoolbox to produce NaNs in 000 and 090 seismograms.
self.FMAX = 100.00
if __name__ == "__main__":
BBCODA2 = BBToolboxCfg()
print("Created Test Config Class: %s" % (os.path.basename(sys.argv[0])))
|
#! /usr/bin/env python
###############################################################################
#
# simulavr - A simulator for the Atmel AVR family of microcontrollers.
# Copyright (C) 2001, 2002 Theodore A. Roth
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
###############################################################################
#
# $Id: test_LD_X_decr.py,v 1.1 2004/07/31 00:59:11 rivetwa Exp $
#
"""Test the LD_X_decr opcode.
"""
import base_test
from registers import Reg, SREG
class LD_X_decr_TestFail(base_test.TestFail): pass
class base_LD_X_decr(base_test.opcode_test):
"""Generic test case for testing LD_X_decr opcode.
LD_X_decr - Load Indirect from data space to Register using index X and
pre decrement X.
Operation: X <- X - 1 then Rd <- (X)
opcode is '1 | 001 000d dddd 1110' where 0 <= d <= 31 and d != {26,27}
Only registers PC, R26, R27 and Rd should be changed.
"""
def setup(self):
# Set the register values
self.setup_regs[self.Rd] = 0
self.setup_regs[Reg.R26] = (self.X & 0xff)
self.setup_regs[Reg.R27] = ((self.X >> 8) & 0xff)
# set up the val in memory (memory is read after X is decremented,
# | thus we need to write to memory _at_ X - 1)
self.mem_byte_write( self.X - 1, self.Vd )
# Return the raw opcode
return 0x900E | (self.Rd << 4)
def analyze_results(self):
self.reg_changed.extend( [self.Rd, Reg.R26, Reg.R27] )
# check that result is correct
expect = self.Vd
got = self.anal_regs[self.Rd]
if expect != got:
self.fail('LD_X_decr: expect=%02x, got=%02x' % (expect, got))
# check that X was decremented
expect = self.X - 1
got = (self.anal_regs[Reg.R26] & 0xff) | ((self.anal_regs[Reg.R27] << 8) & 0xff00)
if expect != got:
self.fail('LD_X_decr X not decr: expect=%04x, got=%04x' % (expect, got))
#
# Template code for test case.
# The fail method will raise a test specific exception.
#
template = """
class LD_X_decr_r%02d_X%04x_v%02x_TestFail(LD_X_decr_TestFail): pass
class test_LD_X_decr_r%02d_X%04x_v%02x(base_LD_X_decr):
Rd = %d
X = 0x%x
Vd = 0x%x
def fail(self,s):
raise LD_X_decr_r%02d_X%04x_v%02x_TestFail, s
"""
#
# automagically generate the test_LD_X_decr_rNN_vXX class definitions.
#
# Operation is undefined for d = 26 and d = 27.
#
code = ''
for d in range(0,26)+range(28,32):
for x in (0x10f, 0x1ff):
for v in (0xaa, 0x55):
args = (d,x,v)*4
code += template % args
exec code
|
import sys
import lofarpipe.support.lofaringredient as ingredient
from lofarpipe.support.baserecipe import BaseRecipe
from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
from lofarpipe.support.remotecommand import ComputeJob
from lofarpipe.support.data_map import DataMap, validate_data_maps, \
align_data_maps
class imager_finalize(BaseRecipe, RemoteCommandRecipeMixIn):
"""
The Imager_finalizer performs a number of steps needed for integrating the
msss_imager_pipeline in the LOFAR framework: It places the image on the
output location in the correcy image type (hdf5).
It also adds some meta data collected from the individual measurement sets
and the found data.
This recipe does not have positional commandline arguments
"""
inputs = {
'awimager_output_map': ingredient.FileField(
'--awimager-output-mapfile',
help = """Mapfile containing (host, path) pairs of created sky
images """
),
'ms_per_image_map': ingredient.FileField(
'--ms-per-image-map',
help = '''Mapfile containing (host, path) pairs of mapfiles used
to create image on that node'''
),
'sourcelist_map': ingredient.FileField(
'--sourcelist-map',
help = '''mapfile containing (host, path) pairs to a list of sources
found in the image'''
),
'sourcedb_map': ingredient.FileField(
'--sourcedb_map',
help = '''mapfile containing (host, path) pairs to a db of sources
found in the image'''
),
'target_mapfile': ingredient.FileField(
'--target-mapfile',
help = "Mapfile containing (host, path) pairs to the concatenated and"
"combined measurement set, the source for the actual sky image"
),
'minbaseline': ingredient.FloatField(
'--minbaseline',
help = '''Minimum length of the baseline used for the images'''
),
'maxbaseline': ingredient.FloatField(
'--maxbaseline',
help = '''Maximum length of the baseline used for the images'''
),
'output_image_mapfile': ingredient.FileField(
'--output-image-mapfile',
help = '''mapfile containing (host, path) pairs with the final
output image (hdf5) location'''
),
'processed_ms_dir': ingredient.StringField(
'--processed-ms-dir',
help = '''Path to directory for processed measurment sets'''
),
'fillrootimagegroup_exec': ingredient.ExecField(
'--fillrootimagegroup_exec',
help = '''Full path to the fillRootImageGroup executable'''
),
'placed_image_mapfile': ingredient.FileField(
'--placed-image-mapfile',
help = "location of mapfile with proced and correctly placed,"
" hdf5 images"
)
}
outputs = {
'placed_image_mapfile': ingredient.StringField()
}
def go(self):
"""
Steps:
1. Load and validate the input datamaps
2. Run the node parts of the recipe
3. Validate node output and format the recipe output
"""
super(imager_finalize, self).go()
# *********************************************************************
# 1. Load the datamaps
awimager_output_map = DataMap.load(
self.inputs["awimager_output_map"])
ms_per_image_map = DataMap.load(
self.inputs["ms_per_image_map"])
sourcelist_map = DataMap.load(self.inputs["sourcelist_map"])
sourcedb_map = DataMap.load(self.inputs["sourcedb_map"])
target_mapfile = DataMap.load(self.inputs["target_mapfile"])
output_image_mapfile = DataMap.load(
self.inputs["output_image_mapfile"])
processed_ms_dir = self.inputs["processed_ms_dir"]
fillrootimagegroup_exec = self.inputs["fillrootimagegroup_exec"]
# Align the skip fields
align_data_maps(awimager_output_map, ms_per_image_map,
sourcelist_map, target_mapfile, output_image_mapfile,
sourcedb_map)
# Set the correct iterator
sourcelist_map.iterator = awimager_output_map.iterator = \
ms_per_image_map.iterator = target_mapfile.iterator = \
output_image_mapfile.iterator = sourcedb_map.iterator = \
DataMap.SkipIterator
# *********************************************************************
# 2. Run the node side of the recupe
command = " python3 %s" % (self.__file__.replace("master", "nodes"))
jobs = []
for (awimager_output_item, ms_per_image_item, sourcelist_item,
target_item, output_image_item, sourcedb_item) in zip(
awimager_output_map, ms_per_image_map, sourcelist_map,
target_mapfile, output_image_mapfile, sourcedb_map):
# collect the files as argument
arguments = [awimager_output_item.file,
ms_per_image_item.file,
sourcelist_item.file,
target_item.file,
output_image_item.file,
self.input | s["minbaseline"],
self.inputs["maxbaseline"],
processed_ms_dir,
| fillrootimagegroup_exec,
self.environment,
sourcedb_item.file]
self.logger.info(
"Starting finalize with the folowing args: {0}".format(
arguments))
jobs.append(ComputeJob(target_item.host, command, arguments))
self._schedule_jobs(jobs)
# *********************************************************************
# 3. Validate the performance of the node script and assign output
succesful_run = False
for (job, output_image_item) in zip(jobs, output_image_mapfile):
if not "hdf5" in job.results:
# If the output failed set the skip to True
output_image_item.skip = True
else:
succesful_run = True
# signal that we have at least a single run finished ok.
# No need to set skip in this case
if not succesful_run:
self.logger.warn("Failed finalizer node run detected")
return 1
output_image_mapfile.save(self.inputs['placed_image_mapfile'])
self.logger.debug(
"Wrote mapfile containing placed hdf5 images: {0}".format(
self.inputs['placed_image_mapfile']))
self.outputs["placed_image_mapfile"] = self.inputs[
'placed_image_mapfile']
return 0
if __name__ == '__main__':
sys.exit(imager_finalize().main())
|
"""
Functions and decorators for making sure the parameters they work on are of
iterable types.
Copyright 2014-2015, Outernet Inc.
Some rights reserved.
This software is free software licensed under the terms of GPLv3. See COPYING
file that comes with the source code, or http://www.gnu.org/licenses/gpl.txt.
"""
import functools
import numbers
def is_integral(obj):
"""
Determine whether the passed in object is a number of integral type.
"""
return isinstance(obj, numbers.Integral)
def is_string(obj):
"""
Determine if the passed in object is a string.
"""
try:
return isinstance(obj, basestring)
except NameError:
return isinstance(obj, str)
def is_iterable(obj):
"""
Determine if the passed in object is an iterable, but not a string or dict.
"""
return (hasattr(obj, '__iter__') and
not isinstance(obj, dict) and
not is_string(obj))
def as_iterable(params=None):
"""
Make sure the marked parameters are iterable. In case a single-unwrapped
parameter is found among them (e.g. an int, string, ...), wrap it in a
list and forward like that to the wrapped function. The marked parameters,
if not explicitly specified, defaults to the 1st argument (``args[1]``).
"""
# set up default converter and separate positional from keyword arguments
params = params or [1]
indexes = [i for i in params if is_integral(i)]
keys = [k for k in params if is_string(k)]
def decorator(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
# patch positional arguments, if needed
if indexes:
# copy `args` into a new list and wrap it's elements in a list
# on the specified indexes, which are not iterables themselves
args = [[x] if i in indexes an | d not is_iterable(x) else x
for (i, x) in enumerate(args)]
# patch keyword arguments, if needed
| if keys:
for key in keys:
if not is_iterable(kwargs[key]):
kwargs[key] = [kwargs[key]]
# invoke ``fn`` with patched parameters
return fn(*args, **kwargs)
return wrapper
return decorator
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Copyright 2011 Yaşar Arabacı
This file is part of packagequiz.
packagequiz is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRAN | TY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import pyalpm
from pycman import config
import qu | estion as q
from random import choice, randint
from sys import modules
config.init_with_config("/etc/pacman.conf")
localdb = pyalpm.get_localdb()
#questionTypes= (q.definition,q.depends,
# q.fileOwner,q.installedSize,
# q.packager)
types = [getattr(q, t) for t in dir(q) if str(type(getattr(q, t))) == "<class 'type'>"]
questionTypes = [qtype for qtype in types if (issubclass(qtype, q.Question) and qtype is not q.Question)]
del(types)
def getRandomQuestion(package=None, numWrongAnswers=3):
"""Returns a tuple with size of 3, first question text,
second correct answer, third list of wrong answers
@param package: A pyalpm.package type
@param numWrongAnswers: integer
@return: tuple
"""
qToReturn = None
if package == None:
package = getRandomPackage()
global questionTypes
while not qToReturn:
qtype = choice(questionTypes)
question = qtype(package)
func = getattr(modules[__name__], "_" + question.type)
qToReturn = func(question, numWrongAnswers)
return qToReturn
def getRandomPackage(exception=[]):
"""
Return a random package
@ param exception: list of packages as an exception
@ return: a package
"""
global localdb
package = choice(localdb.pkgcache)
if len(exception) == 0:
return package
else:
while package.name in exception:
package = choice(localdb.pkgcache)
return package
def qgenerator(function):
def generate(question, numWrongAnswers=3):
if question.correctAnswer is None:
return None
if isinstance(question.correctAnswer, list):
if len(question.correctAnswer) > 0:
correct_answer = choice(question.correctAnswer)
else:
return None
else:
correct_answer = question.correctAnswer
wrong_answers = []
while len(wrong_answers) < numWrongAnswers:
answer = function(question, numWrongAnswers)
if answer not in wrong_answers and answer is not None:
wrong_answers.append(answer)
return (question.text, correct_answer, wrong_answers,question.points)
return generate
@qgenerator
def _definition(question, numWrongAnswers=3):
return getRandomPackage([question.package.name]).desc
@qgenerator
def _depends(question, numWrongAnswers=3):
pkg = getRandomPackage([question.correctAnswer])
return pkg.name + "(" + pkg.desc + ")"
def _requiredBy(question, numWrongAnswers=3):
global localdb
if len(question.correctAnswer) > 0:
correct_answer_name = choice(question.correctAnswer)
correct_answer_package = localdb.get_pkg(correct_answer_name)
correct_answer = correct_answer_name + "(" + correct_answer_package.desc + ")"
else:
return None
wrong_answers = []
while len(wrong_answers) < numWrongAnswers:
pkg = getRandomPackage([pkg for pkg in question.correctAnswer])
answer = pkg.name + "(" + pkg.desc + ")"
if answer not in wrong_answers and answer is not None:
wrong_answers.append(answer)
return (question.text, correct_answer, wrong_answers,question.points)
#@qgenerator
#def _installedSize(question, numWrongAnswers=3):
# (type(question.correctAnswer))
# while True:
# rand = randint(int(question.correctAnswer * 0.1), int(question.correctAnswer * 1.9))
# (rand)
# (type(rand))
# if rand != question.correctAnswer:
# return rand
#
#@qgenerator
#def _maintainer(question, numWrongAnswers=3):
# while True:
# rand_pack = getRandomPackage()
# if rand_pack.packager != question.correctAnswer:
# return rand_pack.packager
#
#@qgenerator
#def _fileOwner(question, numWrongAnswers=3):
#
# return getRandomPackage([question.correctAnswer]).name
if __name__ == "__main__":
(getRandomQuestion())
|
else:
| return self._set_closed()
def _get(self):
if self._is_opened():
return "OPEN"
else:
return "CLOSED"
class Shutter(object):
MANUAL,EXTERNAL,CONFIGURATION = range(3) # modes
MODE2STR = {MANUAL: ("MANUAL", "Manual mode"),
EXTERNAL: ("EXTERNAL", "External trigger mode"),
CONFIGURATION: ("CONFIGURATION", "Configuration mode"),
}
OPEN,CLOSED,UNKNOWN = | range(3) # state
STATE2STR = { OPEN: ("OPEN", "Shutter is open"),
CLOSED: ("CLOSED", "Shutter is closed"),
UNKNOWN: ("UNKNOWN", "Unknown shutter state"),
}
"""
Generic shutter object
This interface should be used for all type of shutter (motor,fast...)
You may want to link this shutter with an external
control i.e: wago,musst.... in that case you have to put
in configuration **external-control** with the object reference.
This external control should be compatible with the Switch object
and have an OPEN/CLOSED states.
"""
def lazy_init(func):
@functools.wraps(func)
def func_wrapper(self,*args,**kwargs):
self.init()
with Lock(self):
return func(self,*args,**kwargs)
return func_wrapper
def __init__(self,name,config):
self.__name = name
self.__config = config
self._external_ctrl = config.get('external-control')
self.__settings = HashObjSetting('shutter:%s' % name)
self.__initialized_hw = Cache(self,"initialized",
default_value = False)
self.__state = Cache(self,"state",
default_value = Shutter.UNKNOWN)
self._init_flag = False
self.__lock = lock.Semaphore()
def init(self):
"""
initialize the shutter in the current mode.
this is method is called by lazy_init
"""
if self._external_ctrl is not None:
# Check if the external control is compatible
# with a switch object and if it has open/close state
ext_ctrl = self._external_ctrl
name = ext_ctrl.name if hasattr(ext_ctrl,'name') else "unknown"
try:
states = ext_ctrl.states_list()
ext_ctrl.set
ext_ctrl.get
except AttributeError:
raise ValueError('external-ctrl : {0} is not compatible '
'with a switch object'.format(name))
else:
if(not 'OPEN' in states or
not 'CLOSED' in states):
raise ValueError("external-ctrl : {0} doesn't"
" have 'OPEN' and 'CLOSED' states".format(name))
if not self._init_flag:
self._init_flag = True
try:
self._init()
with Lock(self):
with self.__lock:
if not self.__initialized_hw.value:
self._initialize_hardware()
self.__initialized_hw.value = True
except:
self._init_flag = False
raise
def _init(self):
"""
This method should contains all software initialization
like communication, internal state...
"""
raise NotImplementedError
def _initialize_hardware(self):
"""
This method should contains all commands needed to
initialize the hardware.
It's will be call only once (by the first client).
"""
pass
@property
def name(self):
return self.__name
@property
def config(self):
return self.__config
@property
def settings(self):
return self.__settings
@property
def mode(self):
"""
shutter mode can be MANUAL,EXTERNAL,CONFIGURATION
In CONFIGURATION mode, shutter can't be opened/closed.
**CONFIGURATION** could mean that the shutter is in tuning mode
i.e: changing open/close position in case of a motor.
In EXTERNAL mode, the shutter will be controlled
through the external-control handler.
If no external control is configured open/close
won't be authorized.
"""
return self.__settings.get('mode',Shutter.MANUAL)
@mode.setter
def mode(self,value):
if value not in self.MODE2STR:
raise ValueError("Mode can only be: %s" %\
','.join((x[0] for x in self.MODE2STR.values())))
self.init()
self._set_mode(value)
if value in (self.CONFIGURATION,self.EXTERNAL):
# Can't cache the state if external or configuration
self.__state.value = self.UNKNOWN
self.__settings['mode'] = value
@property
def state(self):
self.init()
mode = self.mode
if mode == self.MANUAL and self.__state.value == self.UNKNOWN:
return_state = self._state()
self.__state.value = return_state
return return_state
else:
if mode == self.EXTERNAL:
if self.external_control is not None:
switch_state = self.external_control.get()
return self.OPEN if switch_state == "OPEN" else self.CLOSED
else:
return self.UNKNOWN
elif mode == self.CONFIGURATION:
return self.UNKNOWN
return self.__state.value
def _state(self):
raise NotImplementedError
@property
def state_string(self):
return self.STATE2STR.get(self.state,self.STATE2STR[self.UNKNOWN])
@property
def external_control(self):
return self._external_ctrl
@lazy_init
def opening_time(self):
"""
Return the opening time if available or None
"""
return self._opening_time()
def _opening_time(self):
return self.__settings.get('opening_time')
@lazy_init
def closing_time(self):
"""
Return the closing time if available or None
"""
return self._closing_time()
def _closing_time(self):
return self.__settings.get('closing_time')
def measure_open_close_time(self):
"""
This small procedure will in basic usage do an open and close
of the shutter to measure the opening and closing time.
Those timing will be register into the settings.
returns (opening,closing) time
"""
previous_mode = self.mode()
try:
if previous_mode != self.MANUAL:
self.mode(self.MANUAL)
opening_time,closing_time = self._measure_open_close_time()
self.__settings['opening_time'] = opening_time
self.__settings['closing_time'] = closing_time
return open_time,close_time
finally:
if previous_mode != self.MANUAL:
self.mode(previous_mode)
def _measure_open_close_time(self):
"""
This method can be overloaded if needed.
Basic timing on
"""
self.close() # ensure it's closed
start_time = time.time()
self.open()
opening_time = time.time() - start_time
start_time = time.time()
self.close()
closing_time = time.time() - start_time
return opening_time,closing_time
@lazy_init
def open(self):
mode = self.mode
if mode == self.EXTERNAL:
if self._external_ctrl is None:
raise RuntimeError("Can't open the shutter because no "
"external-control is configured")
else:
return self._external_ctrl.set("OPEN")
elif mode != self.MANUAL:
raise RuntimeError("Can't open the shutter, in %s" %\
self.MODE2STR.get(mode,"Unknown"))
return self._open()
def _open(sel |
#!/usr/bin/env python3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is generated, do not edit. Update BuildConfigGenerator.groovy and
# 3ppFetch.template instead.
import argparse
import json
import os
import re
import urllib.request
_REPO_URL = 'https://repo.maven.apache.org/maven2'
_GROUP_NAME = 'io/github/java-diff-utils'
_MODULE_NAME = 'java-diff-utils'
_FILE_EXT = 'jar'
_OVERRIDE_LATEST = None
_PATCH_VERSION = 'cr1'
def do_latest():
if _OVERRIDE_LATEST is not None:
print(_OVERRIDE_LATEST + f'.{_PATCH_VERSION}')
return
maven_metadata_url = '{}/{}/{}/maven-metadata.xml'.format(
_REPO_URL, _GROUP_NAME, _MODULE_NAME)
metadata = urllib.request.urlopen(maven_metadata_url).read().decode(
'utf-8')
# Do not parse xml with the python included parser since it is susceptible
# to maliciously crafted xmls. Only use regular expression parsing to be
# safe. RE should be enough to handle what we need to extract.
match = re.search('<latest>([^<]+)</latest>', metadata)
if match:
latest = match.group(1)
else:
# if no latest info was found just hope the versions are sorted and the
# last one is the latest (as is commonly the case).
latest = re.findall('<version>([^<]+)</version>', metadata)[-1]
print(latest + f'.{_PATCH_VERSION}')
def get_download_url(version):
# Remove the patch version when getting the download url
version_no_patch, patch = version.rsplit('.', 1)
if patch.startswith('cr'):
version = version_no_patch
file_url = '{0}/{1}/{2}/{3}/{2}-{3}.{4}'.format(_REPO_URL, _GROUP_NAME,
| _MODULE_NAME, version,
_FILE_EXT)
file_name = file_url.rsplit('/', 1)[-1]
partial_manifest = {
'url': [file_url],
'name': [file_name], |
'ext': '.' + _FILE_EXT,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser('latest')
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser('get_url')
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from django.utils.translation import ugettext as _
class UserProfile(models.Model):
'''
username: 用户名是唯一的,可以为Null
nickname: 昵称是可以变的,可以重复
'''
user = models.OneToOneField(User, unique=True, related_name='profile', verbose_name=_('用户'))
username = models.CharField(blank=True, null=True, unique=True, max_length=255, verbose_name=_('用户名(唯一)'))
nickname = models.CharField(blank=True, max_length=255, db_index=True, default='', verbose_name=_('昵称(可变)'))
created_at = models.DateTimeField(auto_now_add=True, verbose_name=_('创建日期'))
updated_at = mode | ls.DateTimeField(auto_now=True, verbose_name=_('修改日期'))
class Meta:
db_table = "auth_userprofile"
verbose_name = _('用户附加信息')
verbose_name_plural = _('用户附加信息')
@receiver(pre_save, sender=User)
def pre_save_user_handler(sen | der, instance, **kwargs):
'''
保存用户前如果开启了EMAIL_AS_USERNAME, 需要将email字段设为username
'''
if settings.FEATURES.get('EMAIL_AS_USERNAME'):
if not instance.email or instance.email.strip() != instance.username.strip():
instance.email = instance.username
@receiver(post_save, sender=User)
def post_save_user_handler(sender, instance, created, **kwargs):
try:
profile = instance.profile
except UserProfile.DoesNotExist:
profile = UserProfile(user=instance)
profile.save()
@receiver(pre_save, sender=UserProfile)
def pre_save_userprofile_handler(sender, instance, **kwargs):
'''
保存profile前,如果用户名为空,则设置为None, 躲避unique检查
'''
if not instance.username:
instance.username = None
|
from __future__ import print_function
import sys
sys.path.append('..') # help python find cyton.py relative to scripts folder
from openbci import cyton as bci
import logging
import time
de | f printData(sample):
# os.system('clear')
print("----------------")
print("%f" % (sample.id))
print(sample.channel_data)
print(sample.aux_dat | a)
print("----------------")
if __name__ == '__main__':
# port = '/dev/tty.OpenBCI-DN008VTF'
port = '/dev/tty.usbserial-DB00JAM0'
# port = '/dev/tty.OpenBCI-DN0096XA'
baud = 115200
logging.basicConfig(filename="test.log", format='%(asctime)s - %(levelname)s : %(message)s', level=logging.DEBUG)
logging.info('---------LOG START-------------')
board = bci.OpenBCICyton(port=port, scaled_output=False, log=True)
print("Board Instantiated")
board.ser.write('v')
time.sleep(10)
board.start_streaming(printData)
board.print_bytes_in()
|
from src import model as mdl
class LaTeXPrinter(object):
def __init__(self, target_file_path):
self._target_file_path = target_file_path
def run(self):
with open(self._target_file_path, 'w') as output:
text = self._generate_text()
output.write(text)
def _generate_text(self):
raise NotImplementedError('Override me!')
class TablePrinter(LaTeXPrinter):
def __init__(self, target_file_path):
super(TablePrinter, self).__init__(target_file_path)
def _generate_text(self):
text = '\\rowcolors{3}{aubergine}{white}\n'
text += self._get_table_definition()
text += '\\toprule\n'
text += self._get_headers()
text += '\\midrule\n\\endhead\n'
for element in self._get_content():
text += ' & '.join(element) + '\\\\\n'
text += '\\bottomrule\n'
caption, label = self._get_caption_and_label()
text += ('\\rowcolor{white}' + '\\caption{' + caption +
'}\\label{' + label + '}\n')
text += '\\end{longtable}\n'
return text
def _get_table_definition(self):
raise NotImplementedError('Override me!')
def _get_headers(self):
raise NotImplementedError('Override me!')
def _get_content(self):
"""Returns an iterable of 3-tuples with the ID, the description and the
parent of the item that needs to be printed.
"""
raise NotImplementedError('Override me!')
def _get_caption_and_label(self):
"""Returns the caption and label of the table to print.
"""
raise NotImplementedError('Override me!')
class UseCaseTablePrinter(TablePrinter):
def __init__(self, target_file_path):
super(UseCaseTablePrinter, self).__init__(target_file_path)
self._uc_id_list = mdl.dal.get_all_use_case_ids()
def _get_table_definition(self):
return '\\begin{longtable}{lp{.5\\textwidth}l}\n'
def _get_headers(self):
return ('\\sffamily\\bfseries ID & \\sffamily\\bfseries Descrizione '
'& \\sffamily\\bfseries Padre\\\n')
def _get_content(self):
"""Returns an iterable (generator) containing a 3-tuple with the
ID, description and parent of every use case.
"""
for uc_id in self._uc_id_list:
uc = mdl.dal.get_use_case(uc_id)
yield (uc.uc_id, uc.description, uc.parent_id or '--')
def _get_caption_and_label(self):
return ('Prospetto riepilogativo dei casi d\'uso', 'tab:uclist')
class RequirementTablePrinter(TablePrinter):
def __init__(self, req_type, priority, target_file_path):
super(RequirementTablePrinter, self).__init__(target_file_path)
self._req_type = req_type
self._priority = priority
self._req_id_list = mdl.dal.get_all_requirement_ids_spec(
req_type, priority)
def _get_table_definition(self):
return '\\begin{longtable}{lp{.5\\textwidth}ll}\n'
def _get_headers( | self):
return ('\\sffamily\\bfseries ID & \\sffamily\\bfseries Descrizione & '
'\\sffamily\\bfseries Fonte & '
'\\sffamily\\bfseries Padre\\\\\n')
def _get_content(self):
for req_id in self._req_id_list:
req = mdl.dal.get_requirement(req_id)
| source = mdl.dal.get_source(req.source_id)
yield (req.req_id, req.description, source.name,
req.parent_id or '--')
def _get_caption_and_label(self):
return ('Elenco dei requisiti {0} {1}.'.format(
('funzionali' if self._req_type == 'F' else
'dichiarativi' if self._req_type == 'D' else
'prestazionali' if self._req_type == 'P' else 'qualitativi'),
('obbligatori' if self._priority == 'O' else
'facoltativi' if self._priority == 'F' else 'desiderabili')),
'tab:reqlist{0}{1}'.format(self._req_type, self._priority))
class UseCaseRequirementTrackPrinter(TablePrinter):
def __init__(self, target_file_path):
super(UseCaseRequirementTrackPrinter, self).__init__(target_file_path)
self._uc_id_list = mdl.dal.get_all_use_case_ids()
def _get_table_definition(self):
return '\\begin{longtable}{lp{.8\textwidth}}\n'
def _get_headers(self):
return ('\\sffamily\\bfseries Caso d\'uso & '
'\\sffamily\\bfseries Requisiti associati\\\\\n')
def _get_content(self):
for uc_id in self._uc_id_list:
req_ids = mdl.dal.get_use_case_associated_requirements(uc_id)
yield (uc_id, ', '.join(req_ids))
def _get_caption_and_label(self):
return ('Tracciamento requisiti -- casi d\'uso.', 'tab:ucreqtrack')
|
#import logging
#logging.basicConfig(level=logging.INFO, datefmt='%H:%M:%S',
# format='%(asctime)s %(levelname)s: %(message)s')
import unittest
import SocketServer, socket
import random, time
import threading
import cStringIO
from datetime import datetime
from shapy import register_settings
register_settings('tests.emulation.settings')
from shapy.emulation.shaper import Shaper
from tests.mixins import ShaperMixin, ServerMixin
from tests.utils import total_seconds
class TestCWCShaping(unittest.TestCase, ShaperMixin, ServerMixin):
filesize = 2**19 # 0.5MB
def setUp(self):
self.server_addr = ('127.0.0.2', 55000)
self.client_addr = ('127.0.0.3', 55001)
# shaping init
ShaperMixin.setUp(self)
ServerMixin.run_server(self)
with open('/dev/urandom', 'rb') as f:
self.randomfile = bytearray(f.read(self.filesize))
def test_transfer(self):
self.sock_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# SO_REUSEADDR: http://stackoverflow.com/questions/3229860/what-is-the-me | aning-of-so-reuseaddr-setsockopt-option-linux
s = self.sock_client
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(self.client_addr)
s.connect(self.server_addr)
start = datetime.now()
# client -> server
sent = 0
while sent < self.filesize:
sent += s.send(self.randomfile[sent:sent+4096])
# We have to wait unti | l the server finishes reading data from its socket
# and closes the connection.
rcvd = s.recv(1024)
delay = total_seconds(datetime.now() - start)
#delay = delta.seconds + delta.microseconds/float(10**6)
tt = self.estimate_transfer_time(self.filesize, self.client_addr[0],
self.server_addr[0])
self.assertAlmostEqual(delay, tt, delta=0.4)
# server -> client
start = datetime.now()
while len(rcvd) < self.filesize:
rcvd += s.recv(1024)
delay = total_seconds(datetime.now() - start)
tt = self.estimate_transfer_time(self.filesize, self.server_addr[0],
self.client_addr[0])
self.assertAlmostEqual(delay, tt, delta=0.4)
# statistics of qdiscs on IFB must correctly reflect the transmitted data
self._test_traffic()
s.close()
def _test_traffic(self):
c = self.sh.get_traffic(self.client_addr[0])
s = self.sh.get_traffic(self.server_addr[0])
# qdisc statistics reflect all traffic, including header of each layer,
# not only filesize
delta = self.filesize/100
self.assertAlmostEqual(c[0], self.filesize, delta=delta)
self.assertAlmostEqual(c[1], self.filesize, delta=delta)
self.assertAlmostEqual(s[0], self.filesize, delta=delta)
self.assertAlmostEqual(s[1], self.filesize, delta=delta)
def tearDown(self):
if hasattr(self, 'sock_client'):
self.sock_client.close()
ShaperMixin.tearDown(self)
|
import os
impor | t jug.backends.redis_store
import jug.backends.file_store
import jug.backends.dict_store
from jug.backends.redis_store import redis
import pytest
if not os.getenv('TEST_REDIS'):
redis = None
try:
redisConnectionError = redis.ConnectionError
except:
redisConnectionError = SystemError
@pytest.fixture(scope='function' | , params=['file', 'dict', 'redis'])
def store(tmpdir, request):
if request.param == 'file':
tmpdir = str(tmpdir)
yield jug.backends.file_store.file_store(tmpdir)
jug.backends.file_store.file_store.remove_store(tmpdir)
elif request.param == 'dict':
yield jug.backends.dict_store.dict_store()
elif request.param == 'redis':
if redis is None:
pytest.skip()
try:
st = jug.redis_store.redis_store('redis:')
yield st
st.close()
except redisConnectionError:
pytest.skip()
def test_load_get(store):
assert len(list(store.list())) == 0
key = b'jugisbestthingever'
assert not store.can_load(key)
object = list(range(232))
store.dump(object, key)
assert store.can_load(key)
assert store.load(key) == object
flist = list(store.list())
assert len(flist) == 1
assert flist[0] == key
store.remove(key)
assert not store.can_load(key)
def test_lock(store):
assert len(list(store.listlocks())) == 0
key = b'jugisbestthingever'
lock = store.getlock(key)
assert not lock.is_locked()
assert lock.get()
assert not lock.get()
lock2 = store.getlock(key)
assert not lock2.get()
assert len(list(store.listlocks())) == 1
lock.release()
assert lock2.get()
lock2.release()
def test_lock_remove(store):
assert len(list(store.listlocks())) == 0
key = b'jugisbestthingever'
lock = store.getlock(key)
assert not lock.is_locked()
assert lock.get()
assert not lock.get()
assert len(list(store.listlocks())) == 1
store.remove_locks()
assert len(list(store.listlocks())) == 0
def test_lock_fail(store):
assert len(list(store.listlocks())) == 0
key = b'jugisbestthingever'
lock = store.getlock(key)
assert not lock.is_locked()
assert lock.get()
assert not lock.get()
lock.fail()
assert lock.is_failed()
assert len(list(store.listlocks())) == 1
store.remove_locks()
assert not lock.is_failed()
assert len(list(store.listlocks())) == 0
def test_lock_fail_other(store):
# is_failed should return True even if we can't acquire the lock
assert len(list(store.listlocks())) == 0
key = b'jugisbestthingever'
lock1 = store.getlock(key)
lock2 = store.getlock(key)
assert not lock1.is_locked()
assert not lock2.is_locked()
assert lock1.get()
assert not lock2.get()
assert not lock1.is_failed()
assert not lock2.is_failed()
lock1.fail()
assert lock2.is_failed()
assert len(list(store.listlocks())) == 1
store.remove_locks()
assert not lock1.is_failed()
assert not lock2.is_failed()
assert len(list(store.listlocks())) == 0
def test_numpy_array(tmpdir):
try:
import numpy as np
except ImportError:
pytest.skip()
store = jug.backends.file_store.file_store(str(tmpdir))
arr = np.arange(100) % 17
arr = arr.reshape((10,10))
key = 'mykey'
store.dump(arr, key)
arr2 = store.load(key)
assert np.all(arr2 == arr)
store.remove(key)
store.close()
def test_numpy_array_no_compress(tmpdir):
try:
import numpy as np
except ImportError:
pytest.skip()
store = jug.backends.file_store.file_store(str(tmpdir), compress_numpy=False)
arr = np.arange(100) % 17
arr = arr.reshape((10,10))
key = 'mykey'
store.dump(arr, key)
arr2 = store.load(key)
assert np.all(arr2 == arr)
store.remove(key)
store.close()
|
#!/usr/bin/python3
import sys
def process_import(filename, statement):
statement = statement.replace(",", " ")
modules = statement.split()
for module in modules[1:]:
print('"%s" -> "%s"' % (filename, module))
def process_from(fi | lename, statement):
statement = statement.replace(",", " ")
modules = statement.split()
main_module = modules[1]
for module in modules[3:]:
print('"%s" -> "%s" -> "%s"' % (filename, main_module, module))
def print_header():
print("digraph WeCase {")
print("ratio=2")
def print_footer():
print("}")
print_header()
for line in sys.stdin:
line = l | ine.replace("\n", "")
if line.endswith(".py"):
filename = line
else:
if line.startswith("import"):
process_import(filename, line)
elif line.startswith("from"):
process_from(filename, line)
print_footer()
|
"""
A pretty-printing dump function for the ast module. The code was copied from
the ast.dump function and modified slightly to pretty-prin | t.
Alex Leone (acleone ~AT~ gmail.com), 2010-01-30
"""
from ast import *
def dump(node, annotate_fields=True, include_attri | butes=False, indent=' '):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node, level=0):
if isinstance(node, AST):
fields = [(a, _format(b, level)) for a, b in iter_fields(node)]
if include_attributes and node._attributes:
fields.extend([(a, _format(getattr(node, a), level))
for a in node._attributes])
return ''.join([
node.__class__.__name__,
'(',
', '.join(('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)),
')'])
elif isinstance(node, list):
lines = ['[']
lines.extend((indent * (level + 2) + _format(x, level + 2) + ','
for x in node))
if len(lines) > 1:
lines.append(indent * (level + 1) + ']')
else:
lines[-1] += ']'
return '\n'.join(lines)
return repr(node)
if isinstance(node, list):
return '\n'.join(_format(n) for n in node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
if __name__ == '__main__':
import sys
for filename in sys.argv[1:]:
print('=' * 50)
print('AST tree for', filename)
print('=' * 50)
f = open(filename, 'r')
fstr = f.read()
f.close()
print(dump(parse(fstr, filename=filename), include_attributes=True))
print()
|
# flake8: noqa: F401
from pandas.core.arrays | .sparse.accessor import SparseAccessor, SparseFrameAccessor
from pandas.core.arrays.sparse.array import (
BlockIndex,
IntIndex,
SparseArray,
_make_index,
)
from pandas.core.arrays.sparse.dtype import Spars | eDtype
|
from stard.services import B | aseService
class Service(BaseService):
def init_service(self):
se | lf.children = {self.service('child')}
|
if force_unicode(initial_value) != force_unicode(data_value):
return True
return False
def render(self, name, value, attrs=None):
# THIS IS A COPY OF django.forms.widgets.MultiWidget.render()
# (except for the last line)
# value is a list of values, each corresponding to a widget
# in self.widgets.
site_choices = get_site_choices()
page_choices = get_page_choices()
self.site_choices = site_choices
self.choices = page_choices
self.widgets = (Select(choices=site_choices ),
Select(choices=[('', '----')]),
Select(choices=self.choices, attrs={'style': "display:none;"} ),
)
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(widget.render(name + '_%s' % i, widget_value, final_attrs))
output.append(r'''<script type="text/javascript">
(function($) {
var handleSiteChange = function(site_name, selected_id) {
$("#id_%(name)s_1 optgroup").remove();
var myOptions = $("#id_%(name)s_2 optgroup[label='" + site_name + "']").clone();
$("#id_%(name)s_1").append(myOptions);
$("#id_%(name)s_1").change();
};
var handlePageChange = function(page_id) {
if (page_id) {
$("#id_%(name)s_2 option").removeAttr('selected');
$("#id_%(name)s_2 option[value=" + page_id + "]").attr('selected','selected');
} else {
$("#id_%(name)s_2 option[value=]").attr('selected','selected');
};
};
$("#id_%(name)s_0").change(function(){
var site_label = $("#id_%(name)s_0").children(":selected").text();
handleSiteChange( site_label );
});
$("#id_%(name)s_1").change(function(){
var page_id = $(this).find('option:selected').val();
handlePageChange( page_id );
});
$(function(){
handleSiteChange( $("#id_%(name)s_0").children(":selected").text() );
$("#add_id_%(name)s").hide();
});
})(django.jQuery);
</script>''' % {'name': name})
return mark_safe(self.format_output(output))
def format_output(self, rendered_widgets):
return u' '.join(rendered_widgets)
class PageSmartLinkWidget(TextInput):
def __init__(self, attrs=None, ajax_view=None):
super(PageSmartLinkWidget, self).__init__(attrs)
self.ajax_url = self.get_ajax_url(ajax_view=ajax_view)
def get_ajax_url(self, ajax_view):
try:
return reverse_lazy(ajax_view)
except NoReverseMatch:
raise Exception(
'You should provide an ajax_view argument that can be reversed to the PageSmartLinkWidget'
)
def render(self, name=None, value=None, attrs=None):
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
output = [r'''<script type="text/javascript">
(function($){
$(function(){
$("#%(element_id)s").select2({
placeholder: "%(placeholder_text)s",
allowClear: tr | ue,
minimumInputLength: 3,
ajax: {
url: "%(ajax_url)s",
dataType: 'json',
data: function (term, page) {
| return {
q: term, // search term
language_code: '%(language_code)s'
};
},
results: function (data, page) {
return {
more: false,
results: $.map(data, function(item, i){
return {
'id':item.redirect_url,
'text': item.title + ' (/' + item.path + ')'}
}
)
};
}
},
// Allow creation of new entries
createSearchChoice:function(term, data) { if ($(data).filter(function() { return this.text.localeCompare(term)===0; }).length===0) {return {id:term, text:term};} },
multiple: false,
initSelection : function (element, callback) {
var initialValue = element.val()
callback({id:initialValue, text: initialValue});
}
});
})
})(django.jQuery);
</script>''' % {
'element_id': id_,
'placeholder_text': final_attrs.get('placeholder_text', ''),
'language_code': self.language,
'ajax_url': force_unicode(self.ajax_url)
}]
output.append(super(PageSmartLinkWidget, self).render(name, value, attrs))
return mark_safe(u''.join(output))
class Media:
css = {
'all': ('cms/js/select2/select2.css',
'cms/js/select2/select2-bootstrap.css',)
}
js = (#'cms/js/libs/jquery.min.js',
'cms/js/select2/select2.js',)
class UserSelectAdminWidget(Select):
"""Special widget used in page permission inlines, because we have to render
an add user (plus) icon, but point it somewhere else - to special user creation
view, which is accessible only if user haves "add user" permissions.
Current user should be assigned to widget in form constructor as an user
attribute.
"""
def render(self, name, value, attrs=None, choices=()):
output = [super(UserSelectAdminWidget, self).render(name, value, attrs, choices)]
if hasattr(self, 'user') and (self.user.is_superuser or \
self.user.has_perm(PageUser._meta.app_label + '.' + PageUser._meta.get_add_permission())):
# append + icon
add_url = '../../../cms/pageuser/add/'
output.append(u'<a href="%s" class="add-another" id="add_id_%s" onclick="return showAddAnotherPopup(this);"> ' % \
(add_url, name))
output.append(u'<img src="%sicon_addlink.gif" width="10" height="10" alt="%s"/></a>' % (CMS_ADMIN_ICON_BASE, _('Add Another')))
return mark_safe(u''.join(output))
class AppHookSelect(Select):
"""Special widget used for the App Hook selector in the Advanced Settings
of the Page Admin. It adds support for a data attribute per option and
includes supporting JS into the page.
"""
class Media:
js = ('cms/js/modules/cms.base.js', 'cms/js/modules/cms.app_hook_select.js', )
def __init__(self, attrs=None, choices=(), app_namespaces={}):
self.app_namespaces = app_namespaces
super(AppHookSelect, self).__init__(attrs, choices)
def render_option(self, selected_choices, option_value, option_label):
if option_value is None:
option_value = ''
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
if option_value in self.app_namespaces:
data_html = mark_safe(' data-namespace="%s"' % self.app_namespaces[option_value])
else:
data_html = ''
return '<option value="%s"%s%s>%s</option>' % (
option_value,
selected_html,
data_html,
force_text(option_label),
)
def render_options(self, choices, selected_choices):
selected_choices = set(force_text(v) for v in selected_choices)
output = []
for option_value, option_label in chain(self.choices, choices):
output.append(self.render_option(selected_choices, option_value, option_label))
|
from django.forms import Media
from wagtail.admin.staticfiles import versioned_static
# Feature objects: these are mapped to feature identifiers within the rich text
# feature registry (wagtail.core.rich_text.features). Each one implements
# a `construct_options` method which modifies an options dict | as appropriate to
# enable that feature.
# Additionally, a Feature object defines a media property
# (https://docs.djangoproject.com/en/stable/topics/forms/media/) to specify css/js
# files to import when the feature is active.
class Feature:
def __init__(self, js=None, css=None):
self.js = js or []
self.css = css or {}
@property
def media(self):
js = [versioned_static(js_file) for js_file in self.js]
| css = {}
for media_type, css_files in self.css.items():
css[media_type] = [versioned_static(css_file) for css_file in css_files]
return Media(js=js, css=css)
class BooleanFeature(Feature):
"""
A feature which is enabled by a boolean flag at the top level of
the options dict
"""
def __init__(self, option_name, **kwargs):
super().__init__(**kwargs)
self.option_name = option_name
def construct_options(self, options):
options[self.option_name] = True
class ListFeature(Feature):
"""
Abstract class for features that are defined in a list within the options dict.
Subclasses must define option_name
"""
def __init__(self, data, **kwargs):
super().__init__(**kwargs)
self.data = data
def construct_options(self, options):
if self.option_name not in options:
options[self.option_name] = []
options[self.option_name].append(self.data)
class EntityFeature(ListFeature):
"""A feature which is listed in the entityTypes list of the options"""
option_name = 'entityTypes'
class BlockFeature(ListFeature):
"""A feature which is listed in the blockTypes list of the options"""
option_name = 'blockTypes'
class InlineStyleFeature(ListFeature):
"""A feature which is listed in the inlineStyles list of the options"""
option_name = 'inlineStyles'
|
from contextlib import closing
from flask import current_app
from summer.app import create_app
from summer.db.connect import connect_db
def init_db():
app = create_app('product')
_context = app.app_context()
_context.push()
with closing(connect_db()) as db | :
with open('./summer/schema.sql', mode='r') as f:
db.cursor().execu | tescript(f.read())
db.commit()
if __name__ == '__main__':
init_db()
|
fr | om Model import *
| |
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import wraps
import json
import logging
from flask import make_response, request, Response, current_app
from werkzeug.routing import BaseConverter
from netman.api import NETMAN_API_VERSION
from netman.core.objects.exceptions import UnknownResource, Conflict, InvalidValue
def to_response(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
try:
result = fn(self, *args, **kwargs)
if isinstance(result, Response):
return result
else:
code, data = result
if data is not None:
response = json_response(data, code)
else:
response = make_response("", code)
except InvalidValue as e:
response = exception_to_response(e, 400)
except UnknownResource as e:
response = exception_to_response(e, 404)
except Conflict as e:
response = exception_to_response(e, 409)
except NotImplementedError as e:
response = exception_to_response(e, 501)
except Exception as e:
logging.exception(e)
response = exception_to_response(e, 500)
self.logger.info("Responding {} : {}".format(response.status_code, response.data))
if 'Netman-Max-Version' in request.headers:
response.headers['Netman-Version'] = min(
float(request.headers['Netman-Max-Version']),
NETMAN_API_VERSION)
return response
return wrapper
def exception_to_response(exception, code):
data = {'error': str(exception)}
if "Netman-Verbose-Errors" in request.headers:
if hasattr(exception, "__module__"):
data["error-module"] = exception.__module__
data["error-class"] = exception.__class__.__name__
else:
if data['error'] == "":
if hasattr(exception, "__module__"):
data['error'] = "Unexpected error: {}.{}".format(exception.__module__, exception.__class__.__name__)
else:
data['error'] = "Unexpected error: {}".format(exception.__class__.__name__)
response = json_response(data, code)
response.status_code = code
return response
def json_response(data, code):
json_data = json.dumps(data, indent=None)
response = current_app.response_class(json_data, mimetype='application/json; charset=UTF-8')
response.status_code = code
return response
class RegexConverter(BaseConverter):
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
class BadRequest(InvalidValue):
pass
class MultiContext(object):
def __init__(self, switch_api, parameters, *contexts):
self.context_instances = []
for context in contexts:
obj = context(switch_api)
| obj.process(parameters)
self.context_instances.append(obj)
self.parameters = parameters
| def __enter__(self):
return [(obj.__enter__()) for obj in self.context_instances]
def __exit__(self, type_, value, traceback):
for context in self.context_instances:
context.__exit__(type_, value, traceback)
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This | program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for mor | e details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Bwtool(AutotoolsPackage):
"""bwtool is a command-line utility for bigWig files."""
homepage = "https://github.com/CRG-Barcelona/bwtool"
url = "https://github.com/CRG-Barcelona/bwtool/archive/1.0.tar.gz"
version('1.0', 'cdd7a34ae457b587edfe7dc8a0bdbedd')
depends_on('libbeato')
|
import bleach
from pyramid.config import Configurator
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.session import UnencryptedCookieSessionFactoryConfig
from sqlalchemy import engine_from_config
from .models import (
DBSession,
Base,
)
from .utils import load_local_settings
from sqlalchemy_i18n.manager import translation_manager
from .security import (
RootFactory,
group_membership,
)
from .views.task import check_task_expiration
from apscheduler.schedulers.background import BackgroundScheduler
scheduler = BackgroundScheduler()
scheduler.start()
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
settings['mako.directories'] = 'osmtm:templates'
load_local_settings(settings)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.bind = engine
authn_policy = AuthTktAuthenticationPolicy(
secret='super_secret',
callback=group_membership)
authz_policy = ACLAuthorizationPolicy()
config = Configurator(settings=settings,
root_factory=RootFactory,
authentication_policy=authn_policy,
authorization_policy=authz_policy)
# fixes backwards incompatibilities when running Pyramid 1.5a
# https://pypi.python.org/pypi/pyramid#features
config.include('pyramid_mako')
# pyramid_tm uses the transaction module to begin/commit/rollback
# transaction when requests begin/end.
config.include('pyramid_tm')
# enable exception logger
config.include('pyramid_exclog')
session_factory = UnencryptedCookieSessionFactoryConfig('itsasecret')
config.set_session_factory(session_factory)
config.add_static_view('static', 'static', cachebust=True)
config.add_route('home', '/')
config.add_route('home_json', '/projects.json')
config.add_route('about', '/about')
config.add_route('login', '/login')
config.add_route('logout', '/logout')
config.add_route('oauth_callback', '/oauth_callback')
config.add_route('project_new', '/project/new')
config.add_route('project_new_grid', '/project/new/grid')
config.add_route('project_new_arbitrary', '/project/new/arbitrary')
config.add_route('project_grid_simulate', '/project/grid_simulate')
config.add_route('project_json', '/project/{project:\d+}.json')
config.add_route('project', '/project/{project:\d+}')
config.add_route('project_edit', '/project/{project:\d+}/edit')
config.add_route('project_publish', '/project/{project:\d+}/publish')
config.add_route('project_check_for_update',
'/project/{project:\d+}/check_for_updates')
config.add_route('project_contributors',
'/project/{project:\d+}/contributors', xhr=True)
config.add_route('project_stats', '/project/{project:\d+}/stats')
config.add_route('project_tasks_json', '/project/{project:\d+}/tasks.json')
config.add_route('project_user_add', '/project/{project:\d+}/user/{user}',
request_method="PUT")
config.add_route('project_user_delete',
'/project/{project:\d+}/user/{user}',
request_method="DELETE")
config.add_route('project_preset', '/project/{project:\d+}/preset')
config.add_route('project_users', '/project/{project:\d+}/users')
config.add_route('project_invalidate_all',
'/project/{project:\d+}/invalidate_all')
config.add_route('project_message_all',
'/project/{project:\d+}/message_all')
config.add_route('task_random', '/project/{project:\d+}/random', xhr=True)
config.add_route('task_empty', '/project/{project:\d+}/task/empty',
xhr=True)
config.add_route('task_xhr', '/project/{project:\d+}/task/{task:\d+}',
xhr=True)
config.add_route('task_done',
'/project/{project:\d+}/task/{task:\d+}/done', xhr=True)
config.add_route('task_lock',
'/project/{project:\d+}/task/{task:\d+}/lock', xhr=True)
config.add_route('task_unlock',
'/project/{project:\d+}/task/{task:\d+}/unlock', xhr=True)
c | onfig.add_route('task_split',
'/project/{project:\d+}/task/{task:\d+}/split', xhr=True)
config.add_route( | 'task_validate',
'/project/{project:\d+}/task/{task:\d+}/validate',
xhr=True)
config.add_route('task_cancel_done',
'/project/{project:\d+}/task/{task:\d+}/cancel_done',
xhr=True)
config.add_route('task_comment',
'/project/{project:\d+}/task/{task:\d+}/comment',
xhr=True)
config.add_route('task_gpx', '/project/{project:\d+}/task/{task:\d+}.gpx')
config.add_route('task_osm', '/project/{project:\d+}/task/{task:\d+}.osm')
config.add_route('task_assign',
'/project/{project:\d+}/task/{task:\d+}/user/{user}',
xhr=True)
config.add_route('task_assign_delete',
'/project/{project:\d+}/task/{task:\d+}/user', xhr=True,
request_method="DELETE")
config.add_route('task_difficulty',
'/project/{project:\d+}/task/{task:\d+}/difficulty/' +
'{difficulty:\d+}', xhr=True)
config.add_route('task_difficulty_delete',
'/project/{project:\d+}/task/{task:\d+}/difficulty',
xhr=True, request_method='DELETE')
config.add_route('task_users',
'/project/{project:\d+}/task/{task:\d+}/users')
config.add_route('labels', '/labels')
config.add_route('label_new', '/label/new')
config.add_route('label_edit', '/label/{label:\d+}/edit')
config.add_route('label_delete', '/label/{label:\d+}/delete')
config.add_route('users', '/users')
config.add_route('users_json', '/users.json')
config.add_route('user_messages', '/user/messages')
config.add_route('user_messages_check', '/user/messages/check')
config.add_route('user', '/user/{username}')
config.add_route('user_admin', '/user/{id:\d+}/admin')
config.add_route('user_project_manager', '/user/{id:\d+}/project_manager')
config.add_route('user_validator', '/user/{id:\d+}/validator')
config.add_route('user_experienced_mapper',
'/user/{id:\d+}/experienced_mapper')
config.add_route('user_prefered_editor',
'/user/prefered_editor/{editor}', xhr=True)
config.add_route('user_prefered_language',
'/user/prefered_language/{language}', xhr=True)
config.add_route('licenses', '/licenses')
config.add_route('license_new', '/license/new')
config.add_route('license', '/license/{license:\d+}')
config.add_route('license_edit', '/license/{license:\d+}/edit')
config.add_route('license_delete', '/license/{license:\d+}/delete')
config.add_route('message_read', '/message/read/{message:\d+}')
config.add_route('markdown_ref', '/markdown_ref')
config.add_translation_dirs('osmtm:locale')
config.set_locale_negotiator('osmtm.i18n.custom_locale_negotiator')
translation_manager.options.update({
'locales': settings['available_languages'].split(),
'get_locale_fallback': True
})
config.scan(ignore=['osmtm.tests', 'osmtm.scripts'])
bleach.ALLOWED_TAGS.append(u'p')
bleach.ALLOWED_TAGS.append(u'pre')
check_expiration_interval = int(
settings.get('check_expiration_interval', 5)
)
scheduler.add_job(check_task_expiration, 'interval',
seconds=check_expiration_interval,
replace_existing=True)
return config.make_wsgi_app()
|
# DJANGO 1.10.5 LOCAL SETTINGS
# https://docs.djangopr | oject.com/en/1.10/topics/settings/
# ==================================================================================================
from .base import *
DEBUG = True
# APP CONFIGURATION
# https://docs.djangoproject.com/en/1.10/ref/applications
# ============================================================================ | ======================
# Add your local apps here
INSTALLED_APPS += []
|
(job_ids)", nl=True, tab=1)
self._print(message="finally:")
self._print(message="pl.log_pipeline_footer(l)", tab=1)
self._print(message="pl.log_info(l, '{0} pipeline finished with exit code {1}. Please check the logs.'.format(pipeline_name, rc))", tab=1)
self._print(message="sys.exit(rc)", nl=True, tab=1)
logging.info('successfully pasted pipelines.')
def _get_parent_tasks(self):
for t in self.tags:
ptasks = ["{0}_{1}_function".format(self.wf.nodes[p].component_name, p)
for p in self.wf.nodes[t].dependencies]
ptasks = ", ".join(ptasks)
self.parent_tasks[t] = ptasks
def _get_io_connections(self):
group_ioc = lambda n, p: [ioc for ioc in n.io_connections if ioc.stop_param == p]
for t in self.tags:
node = self.wf.nodes[t]
iocs = dict((ioc.stop_param, group_ioc(node, ioc.stop_param))
for ioc in node.io_connections)
iostr = []
for k, v_list in iocs.items():
## prepend key k (parameter name) with '__pipeline__' to avoid
## accidental collision with python reserved keywords
k = '__pipeline__' + k
## v_list is a list of tuples (tag, param)
## change it to list of [tag_component.args.param]
v = [i.start_node + "_component.args." + i.start_param for i in v_list]
if len(v) == 1:
## make sure merge node always has a list as its input.
if '_MERGER_' in t:
v = '[' + v[0] + ']'
else:
v = v[0]
## change the list to a string like '[1,2,...]'
else:
v = '[' + ', '.join(v) + ']'
## paste [k, v] pair together to generate the string
## '__pipeline__k=[__tagname__.component.args.v]'
iostr.append("=".join([k, v]))
self.io_connections[t] = ", ".join(iostr)
def _get_input_arguments(self):
for t in self.tags:
d = self.wf.nodes[t].input_arguments
c = self.wf.nodes[t].component_name
astr = ["=".join(['__pipeline__' + k, repr(validate_argument(v,k,c))])
for k,v in d.iteritems() if v != '__OPTIONAL__']
self.input_arguments[t] = ", ".join(astr)
def _get_decorators(self):
for t in self.tags:
c = self.wf.nodes[t].component_name
dp = self.wf.nodes[t].dependencies
decor = "@ruffus.follows(*[{0}])\n".format(self.parent_tasks[t])
if c == 'breakpoint':
decor += "@ruffus.parallel('{0}', '{1}', {2})\n".format(c, t, dp)
else:
decor += "@ruffus.parallel({0}_component.component_name, '{0}', {1})\n".format(t, dp)
decor += "@ruffus.check_if_uptodate(rm.sentinel_file_exists)\n"
decor += "@LogWarnErr(l)\n"
decor += "@LogInfo(l)"
self.decors[t] = decor
def _get_exception_handler(self, tag):
mode = self.modes[tag]
mem = self.mems[tag]
ncpu = self.num_cpus[tag]
newline = '\n'
indent = ' ' * 4
if self.wf.nodes[tag].component_name == 'breakpoint':
expt_str = ("{ind}rm.generate_sentinel_file(task_name){nl}"
"{ind}raise KeyboardInterrupt('breakpoint')"
).format(ind=indent, nl=newline)
return expt_str
if not mode:
expt_str = ("{ind}try:{nl}{ind}{ind}rc = ljm.run_job"
"(cmd=run_script, job_name=job_name){nl}"
).format(ind=indent, nl=newline)
else:
expt_str = ("{ind}try:{nl}{ind}{ind}rc = cjm.run_job"
"(cmd=run_script, mem='{mem}', ncpus={ncpu}, job_name=job_name)"
"{nl}").format(mem=mem, ncpu=ncpu, ind=indent, nl=newline)
expt_str += ("{ind}{ind}job_rcs.put(rc){nl}"
"{ind}{ind}if rc == 0:{nl}"
"{ind}{ind}{ind}rm.generate_sentinel_file(task_name){nl}"
).format(ind=indent, nl=newline)
expt_str += ("{ind}except KeyboardInterrupt:{ | nl}"
"{ind}{ind}raise{nl}"
| "{ind}except:{nl}"
"{ind}{ind}job_rcs.put(98){nl}"
"{ind}{ind}traceback.print_exc()").format(ind=indent, nl=newline)
return expt_str
def _get_function_signatures(self):
newline = '\n'
indent = ' ' * 4
for t in self.tags:
c = self.wf.nodes[t].component_name
chunk = self.wf.nodes[t].chunk
bp = self.wf.nodes[t].boilerplate
## print it with quotations if string
if isinstance(chunk, str):
chunk = repr(chunk)
if isinstance(bp, str):
bp = repr(bp)
func_str = ("def {0}_{1}_function(*inargs):{nl}"
"{ind}component_name, task_name, _ = inargs{nl}"
"{ind}print '%s for %s started in %s pipeline' % "
"(task_name, component_name, args.pipeline_name)"
"{nl}").format(c, t, ind=indent, nl=newline)
if c == 'breakpoint':
func_str += ("{ind}print 'breakpoint happened in %s' % (task_name)"
"{nl}").format(ind=indent, nl=newline)
else:
func_str += ("{ind}run_script = rm.generate_script({0}_task, {1}, {2}){nl}"
"{ind}job_name = rm.get_filename_prefix(task_name){nl}"
"{ind}time.sleep(1)"
"{nl}").format(t, chunk, bp, ind=indent, nl=newline)
func_str += self._get_exception_handler(t)
self.func_strs[t] = func_str
def _print(self, message=None, comment=None, nl=False, tab=None):
try:
if tab:
self.pipeline_script.write(" " * 4 * tab)
if message:
self.pipeline_script.write(message + '\n')
if comment:
comment_str = "#" + "=" * 80 + "\n#" + comment + "\n#" + "-" * 80
self.pipeline_script.write(comment_str + '\n')
if nl:
self.pipeline_script.write('\n')
except:
raise Exception("failed to write to %s" % self.pipeline_script)
def _write_importing(self, import_dict, comment='import modules'):
self._print(comment=comment)
for k, v in import_dict.iteritems():
if len(v) == 0:
self._print(message = "import {0}".format(k))
else:
v = ", ".join(v)
self._print(message = "from {0} import {1}".format(k,v))
self._print(nl=True)
def _write_initilization(self):
self._print(comment="initialization")
self._print(message="args = kronos.pipelineui.args")
# self._print(message="sample_id = args.sample_id")
self._print(message="rm = RunManager(args.run_id, args.pipeline_name, args.working_dir)")
if not all(self.modes.values()):
self._print(message="ljm = LocalJobManager(rm.logs_dir, rm.outputs_dir)")
if any(self.modes.values()):
self._print(message="if args.job_scheduler.upper() == 'SGE':")
self._print(message="cjm = SgeJobManager(rm.logs_dir, rm.outputs_dir, args.qsub_options)", tab=1)
self._print(message="elif args.job_scheduler.upper() == 'DRMAA':")
self._print(message="try:", tab=1)
self._print(message="cjm = DrmaaJobManager(args.drmaa_library_path, rm.logs_dir, rm.outputs_dir, args.qsub_options)", tab=2)
self._print(message="except:", tab=1)
self._print(message="print >> sys.stderr, 'failed to load DrmaaJobManager' |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import urlparse
def uc2utf8(input):
## argh! this feels wrong, but seems to be needed.
if type(input) == unicode:
return input.encode('utf-8')
else:
return input
class URL:
"""
This class is for wrapping URLs into objects. It's used
internally in the library, end users should not need to know
anything about this class. All methods that accept URLs can be
fed either with an URL object, a string or an urlparse.ParsedURL
object.
Addresses may be one out of three:
1) a path relative to the DAV-root, i.e. "someuser/calendar" may
refer to
"http://my.davical-server.example.com/pycaldav.php/someuser/calendar".
2) an absolute path, i.e. "/pycaldav.php/someuser/calendar"
3) a fully qualified URL,
i.e. "http://someuser:somepass@my.davical-server.example.com/pycaldav.php/someuser/calendar".
Remark that hostname, port, user, pass is typically given when
instantiating the DAVClient object and cannot be overridden later.
As of 2013-11, some methods in the pycaldav library expected strings
and some expected urlparse.ParseResult objects, some expected
fully qualified URLs and most expected absolute paths. The purpose
of this class is to ensure consistency and at the same time
maintaining backward compatibility. Basically, all methods should
accept any kind of URL.
"""
def __init__(self, url):
if isinstance(url, urlparse.ParseResult) or isinstance(url, urlparse.SplitResult):
self.url_parsed = url
self.url_raw = None
else:
self.url_raw = url
self.url_parsed = None
def __nonzero__(self):
if self.url_raw or self.url_parsed:
return True
else:
return False
def __ne__(self, other):
return not self == other
def __eq__(self, other):
if str(self) == str(other):
return True
## The URLs could have insignificant differences
me = self.canonical()
if hasattr(other, 'canonical'):
other = other.canonical()
return str(me) == str(other)
## TODO: better naming? Will return url if url is already an URL
## object, else will instantiate a new URL object
@classmethod
def objectify(self, url):
if url is None:
return None
if isinstance(url, URL):
return url
else:
return URL(url)
## To deal with all kind of methods/properties in the ParseResult
## class
def __getattr__(self, attr):
if self.url_parsed is None:
self.url_parsed = urlparse.urlparse(self.url_raw)
if hasattr(self.url_parsed, attr):
return getattr(self.url_parsed, attr)
else:
return getattr(self.__unicode__(), attr)
## returns the url in text format
def __str__(self):
return self.__unicode__().encode('utf-8')
## returns the url in text format
def __unicode__(self):
if self.url_raw is None:
self.url_raw = self.url_parsed.geturl()
if isinstance(self.url_raw, unicode):
return self.url_raw
else:
return unicode(self.url_raw, 'utf-8')
def __repr__(self):
return "URL(%s)" % str(self)
def is_auth(self):
return self.username is not None
def unauth(self):
if not self.is_auth():
return self
return URL.objectify(urlparse.ParseResult(
self.scheme, '%s:%s' % (self.hostname, self.port),
self.path.replace('//', '/'), self.params, self.query, self.fragment))
def canonical(self):
"""
a canonical URL ... remove authentication details, make sure there
are no double slashes, and to make sure the URL is always the same,
run it through the urlparser
"""
url = self.unauth()
## this is actually already done in the unauth method ...
if '//' in url.path:
raise NotImplementedError("remove the double slashes")
## TODO: optimize - we're going to burn some CPU cycles here
if url.endswith('/'):
url = URL.objectify(str(url)[:-1])
## This looks like a noop - but it may have the side effect
## that urlparser be run (actually not - unauth ensures we
## have an urlparse.ParseResult object)
url.scheme
## make sure to delete the string version
url.url_raw = None
return url
def join(self, path):
"""
assumes this object is the base URL or base path. If the path
is relative, it should be appended to the base. If the path
is absolute, it should be added to the connection details of
self. If the path already contains connection details and the
connection details differ from self, raise an error.
"""
if not path:
return self
path = URL.objectify(path)
if (
(path.scheme and self.scheme and path.scheme != self.scheme)
or
(path.hostname and self.hostname and path.hostname != self.hostname)
or
(path.port and self.port and path.port != self.port)
):
rai | se ValueError("%s can't be joined with %s" % (self, path))
if path.path[0] == '/':
ret_path = uc2utf8(path.path)
else:
sep = "/"
if s | elf.path.endswith("/"):
sep = ""
ret_path = "%s%s%s" % (self.path, sep, uc2utf8(path.path))
return URL(urlparse.ParseResult(
self.scheme or path.scheme, self.netloc or path.netloc, ret_path, path.params, path.query, path.fragment))
def make(url):
"""Backward compatibility"""
return URL.objectify(url)
|
import notorm
import momoko
from tornado import gen
import psycopg2.extras
class AsyncRecord(notorm.record):
@gen.coroutine
def update(self, **args):
for k,v in args.items():
setattr(self, k, v)
cursor = yield notorm.db.execute(
self.update_qry,
self._asdict(),
cursor_factory=psycopg2.extras.NamedTupleCursor)
@gen.coroutine
def save(self):
if self.id:
self.update()
else:
cursor = yield notorm.db.execute(
| self.insert_qry,
self.__dict__,
cursor_factory=psycopg2.extras.NamedTupleCursor)
| results = cursor.fetchone()
if results:
self.id = results[0]
|
from pylons import tmpl_context as c
from adhocracy.lib.auth import can
from util import render_tile, BaseTile
class VariantRow(object):
def __init__(self, tile, variant, poll):
self.tile = tile
self.variant = variant
self.poll = poll
if tile.frozen:
freeze_time = tile.selection.proposal.adopt_poll.begin_time
self.text = tile.selection.page.variant_at(variant, freeze_time)
else:
self.text = tile.selection.page.variant_head(variant)
@property
def selected(self):
return self.tile.selected == self.variant
@property
def show(self):
return not self.tile.frozen or self.selected
@property
def can_edit(self):
return (not self.tile.frozen) and \
can.variant.edit(self.tile.selection.page, self.variant)
@property
def num_comments(self):
return len(self.tile.selection.page.variant_comments(self.variant))
class SelectionTile(BaseTile):
def __init__(self, selection):
self.selection | = selection
self.selected = selection.selected
self.variant_polls = self.selection.variant_polls
@property
def has_variants(self):
return len(self.selection.page.variants) < 2
@property
def num_varian | ts(self):
return len(self.selection.page.variants) - 1
@property
def selected_text(self):
variant = self.selected
if self.frozen:
freeze_time = self.selection.proposal.adopt_poll.begin_time
return self.selection.page.variant_at(variant, freeze_time)
else:
return self.selection.page.variant_head(variant)
@property
def selected_num_comments(self):
return len(self.selection.page.variant_comments(self.selected))
@property
def frozen(self):
return self.selection.proposal.is_adopt_polling()
def variant_rows(self):
for (variant, poll) in self.variant_polls:
row = VariantRow(self, variant, poll)
yield row
@property
def show_new_variant_link(self):
if self.frozen:
return False
return can.norm.edit(self.selection.page, 'any')
def row(selection):
if not selection or selection.is_deleted():
return ""
tile = SelectionTile(selection)
return render_tile('/selection/tiles.html', 'row', tile,
selection=selection, user=c.user, cached=True)
def variants(selection, tile=None):
if tile is None:
tile = SelectionTile(selection)
return render_tile('/selection/tiles.html', 'variants', tile,
selection=selection, user=c.user, cached=True)
|
from _ | _future__ import absolute_import
# Start a Celery worker by executing:
# celery -A proj worker -l info
# Import available tasks
from proj.tasks import add, mul, xsum, fib
# Test short-running tasks
add.delay(2, 2)
mul.delay(10, 12)
xsum.delay(range(100))
fib.delay(10)
# Test medium-running tasks
fib. | delay(35)
fib.delay(35)
fib.delay(35)
|
import cherrypy
from cherrypy._cpcompat import ntou
from cherrypy.test import helper
class ETagTest(helper.CPWebCase):
@staticmethod
def setup_server():
class Root:
@cherrypy.expose
def resource(self):
return "Oh wah ta goo Siam."
@cherrypy.expose
def fail(self, code):
code = int(code)
if 300 <= code <= 399:
raise cherrypy.HTTPRedirect([], code)
else:
raise cherrypy.HTTPError(code)
@cherrypy.expose
# In Python 3, tools.encode is on by default
@cherrypy.config(**{'tools.encode.on': True})
def unicoded(self):
return ntou('I am a \u1ee4nicode string.', 'escape')
conf = {'/': {'tools.etags.on': True,
'tools.etags.autotags': True,
}}
cherrypy.tree.mount(Root(), config=conf)
def test_etags(self):
self.getPage("/resource")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
self.assertBody('Oh wah ta goo Siam.')
etag = self.assertHeader('ETag')
# Test If-Match (both valid and invalid)
self.getPage("/resource", headers=[('If-Match', etag)])
self.assertStatus("200 OK")
self.getPage("/resource", headers=[('If-Match', "*")])
self.assertStatus("200 OK")
self.getPage("/resource", headers=[('If-Match', "*")], method="POST")
self.assertStatus("200 OK")
self.getPage("/resource", headers=[('If-Match', "a bogus tag")])
self.assertStatus("412 Precondition Failed")
# Test If-None-Match (both valid and invalid)
self.getPage("/resource", headers=[('If-None-Match', etag)])
self.assertStatus(304)
self.getPage("/resource", method='POST',
headers=[('If-None-Match', etag)])
self.assertStatus("412 Precondition Failed")
self.getPage("/resource", headers=[('If-None-Match', "*")])
self.assertStatus(304)
self.getPage("/resource", headers=[('If-None-Match', "a bogus tag")])
self | .assertStatus("200 OK")
def test_errors(self):
self.getPage("/resource")
self.assertStatus(200)
etag = self.assertHeader('ETag')
# Test raising errors in page handler
self.getPage("/fail/412", headers=[('If-Match', etag)])
self.assertStatus(412)
self.getPage("/fail/304", | headers=[('If-Match', etag)])
self.assertStatus(304)
self.getPage("/fail/412", headers=[('If-None-Match', "*")])
self.assertStatus(412)
self.getPage("/fail/304", headers=[('If-None-Match', "*")])
self.assertStatus(304)
def test_unicode_body(self):
self.getPage("/unicoded")
self.assertStatus(200)
etag1 = self.assertHeader('ETag')
self.getPage("/unicoded", headers=[('If-Match', etag1)])
self.assertStatus(200)
self.assertHeader('ETag', etag1)
|
import unittest
import matmath
import numpy as np
import math
class TestMatrix(unittest.TestCase):
def testRotX(self):
mat = matmath.xRotationMatrix(math.radians(90))
pt = np.array([1, 0, 0, 1])
npt = pt.dot(mat)
np.testing.assert_almost_equal(npt, [1, 0, 0, 1]) |
pt = np.array([0, 1, 0, 1])
npt = pt.dot(mat)
np.testing.assert_almost_equal(npt, [0, 0, 1, 1])
pt = np.array([0, 0, 1, 1])
npt = pt.dot(mat)
np.testing.assert_almost_equal(npt, [0, -1, 0, 1])
def testRotY(self):
| pt = np.array([0, 0, 1, 1])
mat = matmath.yRotationMatrix(math.radians(90))
npt = pt.dot(mat)
np.testing.assert_almost_equal(npt, [1, 0, 0, 1])
def testRotZ(self):
pt = np.array([1, 0, 0, 1])
mat = matmath.zRotationMatrix(math.radians(90))
npt = pt.dot(mat)
np.testing.assert_almost_equal(npt, [0, 1, 0, 1])
def testQuaternionMatrix(self):
q = matmath.axisAngleToQuaternion([1, 0, 0], np.radians(90))
qmat = matmath.quaternionToRotationMatrix(q)
rmat = matmath.xRotationMatrix(math.radians(90))
np.testing.assert_almost_equal(qmat, rmat)
q = matmath.axisAngleToQuaternion([0, 1, 0], np.radians(90))
qmat = matmath.quaternionToRotationMatrix(q)
rmat = matmath.yRotationMatrix(math.radians(90))
np.testing.assert_almost_equal(qmat, rmat)
q = matmath.axisAngleToQuaternion([0, 0, 1], np.radians(90))
qmat = matmath.quaternionToRotationMatrix(q)
rmat = matmath.zRotationMatrix(math.radians(90))
np.testing.assert_almost_equal(qmat, rmat)
def testMultipleRotates(self):
r1 = matmath.xRotationMatrix(np.radians(90))
r2 = matmath.zRotationMatrix(np.radians(90))
mat = r1.dot(r2)
pt = np.array([0, 0, 1, 1])
npt = pt.dot(mat)
np.testing.assert_almost_equal(npt, [1, 0, 0, 1])
def test2M(self):
# 2 Meters away depth scan
pt = np.array([0, 0, 2, 1])
print "PC", pt
mat = matmath.pcToSoSMatrix()
npt = pt.dot(mat)
print "SoS ", npt
trans = np.array([0, 0, 0])
quaternion = matmath.axisAngleToQuaternion([1, 0, 0], np.radians(90))
mat = matmath.getPC2WorldMatrix(trans, quaternion)
npt = pt.dot(mat)
print "Device", npt
pt = np.array([0, 1, 2, 1])
print "PC", pt
mat = matmath.pcToSoSMatrix()
npt = pt.dot(mat)
print "SoS ", npt
mat = matmath.getPC2WorldMatrix(trans, quaternion)
npt = pt.dot(mat)
print "Device", npt
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
clas | s Voropp(MakefilePackage):
"""Voro++ is a open source software library for the c | omputation of the
Voronoi diagram, a widely-used tessellation that has applications in many
scientific fields."""
homepage = "http://math.lbl.gov/voro++/about.html"
url = "http://math.lbl.gov/voro++/download/dir/voro++-0.4.6.tar.gz"
variant('pic', default=True,
description='Position independent code')
version('0.4.6', sha256='ef7970071ee2ce3800daa8723649ca069dc4c71cc25f0f7d22552387f3ea437e')
def edit(self, spec, prefix):
filter_file(r'CC=g\+\+',
'CC={0}'.format(self.compiler.cxx),
'config.mk')
filter_file(r'PREFIX=/usr/local',
'PREFIX={0}'.format(self.prefix),
'config.mk')
# We can safely replace the default CFLAGS which are:
# CFLAGS=-Wall -ansi -pedantic -O3
cflags = ''
if '+pic' in spec:
cflags += self.compiler.cc_pic_flag
filter_file(r'CFLAGS=.*',
'CFLAGS={0}'.format(cflags),
'config.mk')
|
# Copyright (c) 2012 Roberto Alsina y otros.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# O | THERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from docutils import nodes
from docutils.parsers.rst import Directive, directives
CODE = """\
<iframe width="{width}"
height="{height}"
src="http://www.youtube.com/embed/{yid}?rel=0&hd=1&wmode=transparent"
></iframe>"""
class Youtube(Directive):
""" Restructured text extension for inserting youtube embedded videos
Usage:
.. youtube:: lyViVmaBQDg
:height: 400
:width: 600
"""
has_content = True
required_arguments = 1
option_spec = {
"width": directives.positive_int,
"height": directives.positive_int,
}
def run(self):
self.check_content()
options = {
'yid': self.arguments[0],
'width': 425,
'height': 344,
}
options.update(self.options)
return [nodes.raw('', CODE.format(**options), format='html')]
def check_content(self):
if self.content:
raise self.warning("This directive does not accept content. The "
"'key=value' format for options is deprecated, "
"use ':key: value' instead")
directives.register_directive('youtube', Youtube)
|
def process(self):
#GUJARATI VOWEL SIGN CANDRA E
#GUJARATI VOWE | L CANDRA E
self.edit("GUJARATI")
self.edit("LETTER")
self.edit("DIGIT")
self.processAs("Helper Indic")
self.edit("VOWEL SIGN", "sign")
self.edit("VOWEL")
self.edit("SIGN")
self.edit("THREE-DOT NUKTA ABOVE", "threedotnuktaabove")
self.edit("TWO-CIRCLE NUKTA ABOVE", "twocirclenuktaabove")
self.processAs("Helper Numbers")
self.lower()
self.compress()
self.scriptPrefix | ()
if __name__ == "__main__":
from glyphNameFormatter.exporters import printRange
from glyphNameFormatter.tools import debug
printRange("Gujarati")
debug(0x0AFA) |
# -*- mode: python; indent-tabs-mode: nil; tab-width: 2 -*-
"""
aria_api.py - implements handlers which are for the Aria to talk to helvetic.
"""
from __future__ import absolute_import
from base64 import b16encode
from crc16 import crc16xmodem
from datetime import timedelta
from decimal import Decimal
from django.contrib.auth.models import User
from django.db import transaction
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseBadRequest
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from string import hexdigits
import struct
from time import time
from ..models import AuthorisationToken, Measurement, Scale, utcnow
class ScaleValidateView(View):
def get(self, request):
# Context: https://github.com/micolous/helvetic/issues/1
#
# Sometimes the scale is trying to verify that it authenticated with the
# correct token. We don't really care about these requests (it is handled
# by /scale/register aka ScaleRegisterView), so we can just always return
# "T" (OK).
#
# The real service returns "F" on error.
return HttpResponse('T')
class ScaleRegisterView(View):
def get(self, request):
if 'serialNumber' not in request.GET:
return HttpResponseBadRequest('serialNumber missing')
if 'token' not in request.GET:
return HttpResponseBadRequest('token missing')
if 'ssid' not in request.GET:
return HttpResponseBadRequest('ssid missing')
serial = request.GET['serialNumber'].upper()
token = request.GET['token']
ssid = request.GET['ssid']
if len(serial) != 12:
return HttpResponseBadRequest('serialNumber must be 12 bytes')
if any(((x not in hexdigits) for x in serial)):
return HttpResponseBadRequest('serial must only contain hex')
# Lookup the authorisation token
auth_token = AuthorisationToken.lookup_token(token)
if auth_token is None:
return HttpResponseForbidden('Bad auth token')
owner = auth_token.user
# Delete the token.
auth_token.delete()
# Register the Aria
scale = Scale.objects.create(
hw_address=serial,
ssid=ssid,
owner=owner,
)
# Only return 200 OK
return HttpResponse('')
class ScaleUploadView(View):
@method_decorator(csrf_exempt)
@method_decorator(transaction.atomic)
def dispatch(self, *args, **kwargs):
return super(ScaleUploadView, self).dispatch(*args, **kwargs)
def post(self, request):
now = utcnow()
body = request.body
# Version 3 protocol
proto_ver, battery_pc, mac, auth_code = struct.unpack('<LL6s16s', body[:30])
body = body[30:]
if proto_ver != 3:
return HttpResponseBadRequest('Unknown protocol version: %d' % proto_ver)
if battery_pc > 100 or battery_pc < 0:
return HttpResponseBadRequest('Battery percentage must be 0..100 (got %d)' % battery_pc)
mac, auth_code | = [b16encode(x) for x in (mac, auth_code)]
scale = None
try:
scale = Scale.objects.get(hw_address=mac)
except Scale.DoesNotExist:
return HttpResponseBadRequest('Unknown scale: %s' % mac)
# Check authcode
if scale.auth_code is None or scale.auth_code == '':
scale.auth_code = auth_code
elif | scale.auth_code != auth_code:
return HttpResponseForbidden('Invalid auth code')
scale.battery_percent = battery_pc
fw_ver, unknown2, scale_now, measurement_count = struct.unpack('<LLLL', body[:16])
body = body[16:]
scale.fw_version = fw_ver
scale.save()
for x in range(measurement_count):
if len(body) < 32:
return HttpResponseBadRequest('Measurement truncated.')
id2, imp, weight, ts, uid, fat1, covar, fat2 = \
struct.unpack('<LLLLLLLL', body[:32])
# Record the measurement
# Look up the owner of this measurement
if uid == 0:
measured_user = None
else:
try:
measured_user = User.objects.get(id=uid)
except User.NotFound:
measured_user = None
measurement = Measurement.objects.create(
user=measured_user,
scale=scale,
when=now - timedelta(seconds=scale_now - ts),
weight=weight,
body_fat=Decimal(fat1) / Decimal(1000),
)
body = body[32:]
# Formulate a response
scale_users = scale.users.all()
response = struct.pack('<LBBBL',
int(time()), # Fill with current time, to account for processing delay
scale.unit,
0x32, # status = configured
0x01, # unknown
len(scale_users)
)
# Insert user info
for profile in scale_users:
last_weight = min_var = max_var = 0
last_measurement = profile.latest_measurement()
if last_measurement is not None:
last_weight = ((last_measurement.weight) // 1000) * 1000
min_var = last_weight - 4000
if min_var < 0:
min_var = 0
max_var = last_weight + 4000
response += struct.pack('<L16x20sLLLBLLLLLL',
profile.user.id,
profile.short_name_formatted(),
min_var,
max_var,
profile.age(),
profile.gender,
profile.height,
0, # some weight
0, # body fat
0, # covariance
0, # another weight
0, # timestamp
)
response = response + struct.pack('<LLL',
0, # always 0
3, # update status: no
0, # unknown
)
trailer = 0x19 + (len(scale_users) * 0x4d)
response = response + struct.pack('<HH',
crc16xmodem(response), # checksum
trailer,
)
hr = HttpResponse(response)
# Content-Length is a required element
hr['Content-Length'] = str(len(response))
return hr
|
Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import sys, os, TestUtil, shlex
from threading import Thread
#
# Set nreplicas to a number N to test replication with N replicas.
#
#nreplicas=0
nreplicas=1
iceGridPort = 12010;
nodeOptions = r' --Ice.Warn.Connections=0' + \
r' --IceGrid.Node.Endpoints=default' + \
r' --IceGrid.Node.WaitTime=240' + \
r' --Ice.ProgramName=icegridnode' + \
r' --IceGrid.Node.Trace.Replica=0' + \
r' --IceGrid.Node.Trace.Activator=0' + \
r' --IceGrid.Node.Trace.Adapter=0' + \
r' --IceGrid.Node.Trace.Server=0' + \
r' --IceGrid.Node.ThreadPool.SizeWarn=0' + \
r' --IceGrid.Node.PrintServersReady=node1' + \
r' --Ice.NullHandleAbort' + \
r' --Ice.ThreadPool.Server.Size=0' + \
r' --Ice.ServerIdleTime=0'
registryOptions = r' --Ice.Warn.Connections=0' + \
r' --IceGrid.Registry.PermissionsVerifier=IceGrid/NullPermissionsVerifier' + \
r' --IceGrid.Registry.AdminPermissionsVerifier=IceGrid/NullPermissionsVerifier' + \
r' --IceGrid.Registry.SSLPermissionsVerifier=IceGrid/NullSSLPermissionsVerifier' + \
r' --IceGrid.Registry.AdminSSLPermissionsVerifier=IceGrid/NullSSLPermissionsVerifier' + \
r' --IceGrid.Registry.Server.Endpoints=default' + \
r' --IceGrid.Registry.Internal.Endpoints=default' + \
r' --IceGrid.Registry.SessionManager.Endpoints=default' + \
r' --IceGrid.Registry.AdminSessionManager.Endpoints=default' + \
r' --IceGrid.Registry.Trace.Session=0' + \
r' --IceGrid.Registry.Trace.Application=0' + \
r' --IceGrid.Registry.Trace.Node=0' + \
r' --IceGrid.Registry.Trace.Replica=0' + \
r' --IceGrid.Registry.Trace.Adapter=0' + \
r' --IceGrid.Registry.Trace.Object=0' + \
r' --IceGrid.Registry.Trace.Server=0' + \
r' --IceGrid.Registry.Trace.Locator=0' + \
r' --Ice.ThreadPool.Server.Size=0 ' + \
r' --Ice.ThreadPool.Client.SizeWarn=0' + \
r' --IceGrid.Registry.Client.ThreadPool.SizeWarn=0' + \
r' --Ice.ServerIdleTime=0' + \
r' --IceGrid.Registry.DefaultTemplates="' + \
os.path.abspath(os.path.join(TestUtil.toplevel, "cpp", "config", "templates.xml") + '"')
def getDefaultLocatorProperty():
i = 0
property = '--Ice.Default.Locator="IceGrid/Locator';
objrefs = ""
while i < nreplicas + 1:
objrefs = objrefs + ':default -p ' + str(iceGridPort + i)
i = i + 1
return ' %s%s"' % (property, objrefs)
def startIceGridRegistry(testdir, dynamicRegistration = False):
iceGrid = TestUtil.getIceGridRegistry()
command = ' --nowarn ' + registryOptions
if dynamicRegistration:
command += r' --IceGrid.Registry.DynamicRegistration'
procs = []
i = 0
while i < (nreplicas + 1):
if i == 0:
name = "registry"
else:
name = "replica-" + str(i)
dataDir = os.path.join(testdir, "db", name)
if not os.path.exists(dataDir):
os.mkdir(dataDir)
else:
cleanDbDir(dataDir)
sys.stdout.write("starting icegrid " + name + "... ")
sys.stdout.flush()
cmd = command + ' ' + TestUtil.getQtSqlOptions('IceGrid') + \
r' --Ice.ProgramName=' + name + \
r' --IceGrid.Registry.Client.Endpoints="default -p ' + str(iceGridPort + i) + '" ' + \
r' --IceGrid.Registry.Data="' + dataDir + '" '
if i > 0:
cmd += r' --IceGrid.Registry.ReplicaName=' + name + ' ' + getDefaultLocatorProperty()
driverConfig = TestUtil.DriverConfig("server")
driverConfig.lang = "cpp"
proc = TestUtil.startServer(iceGrid, cmd, driverConfig, count = 5)
procs.append(proc)
print("ok")
i = i + 1
return procs
def shutdownIceGridRegistry(procs):
i = nreplicas
while i > 0:
sys.stdout.write("shutting down icegrid replica-" + str(i) + "... ")
sys.stdout.flush()
iceGridAdmin("registry shutdown replica-" + str(i))
print("ok")
i = i - 1
sys.stdout.write("shutting down icegrid registry... ")
sys.stdout.flush()
iceGridAdmin("registry shutdown")
print("ok")
for p in procs:
p.waitTestSuccess()
def iceGridNodePropertiesOverride():
#
# Create property overrides from command line options.
#
overrideOptions = ''
for opt in shlex.split(TestUtil.getCommandLineProperties("", TestUtil.DriverConfig("server"))):
opt = opt.strip().replace("--", "")
index = opt.find("=")
if index == -1:
overrideOptions += ("%s=1 ") % opt
else:
key = opt[0:index]
value = opt[index + 1:]
if(value.find(' ') == -1):
overrideOptions += ("%s=%s ") % (key, value)
else:
#
# NOTE: We need 2 backslash before the quote to run the
| # C# test/IceGrid/simple test with SSL.
#
overrideOptions += ("%s=\\\"%s\\\" ") % (key, value.replace('"', '\\\\\\"'))
return overrideOptions
def startIceGridNode(testdir):
iceGrid = TestUtil.getIceGridNod | e()
dataDir = os.path.join(testdir, "db", "node")
if not os.path.exists(dataDir):
os.mkdir(dataDir)
else:
cleanDbDir(dataDir)
overrideOptions = '" ' + iceGridNodePropertiesOverride()
overrideOptions += ' Ice.ServerIdleTime=0 Ice.PrintProcessId=0 Ice.PrintAdapterReady=0"'
sys.stdout.write("starting icegrid node... ")
sys.stdout.flush()
command = r' --nowarn ' + nodeOptions + getDefaultLocatorProperty() + \
r' --IceGrid.Node.Data="' + dataDir + '"' \
r' --IceGrid.Node.Name=localnode' + \
r' --IceGrid.Node.PropertiesOverride=' + overrideOptions
driverConfig = TestUtil.DriverConfig("server")
driverConfig.lang = "cpp"
proc = TestUtil.startServer(iceGrid, command, driverConfig, adapter='node1')
print("ok")
return proc
def iceGridAdmin(cmd, ignoreFailure = False):
iceGridAdmin = TestUtil.getIceGridAdmin()
user = r"admin1"
if cmd == "registry shutdown":
user = r"shutdown"
command = getDefaultLocatorProperty() + r" --IceGridAdmin.Username=" + user + " --IceGridAdmin.Password=test1 " + \
r' -e "' + cmd + '"'
if TestUtil.appverifier:
TestUtil.setAppVerifierSettings([TestUtil.getIceGridAdmin()])
driverConfig = TestUtil.DriverConfig("client")
driverConfig.lang = "cpp"
proc = TestUtil.startClient(iceGridAdmin, command, driverConfig)
status = proc.wait()
if TestUtil.appverifier:
TestUtil.appVerifierAfterTestEnd([TestUtil.getIceGridAdmin()])
if not ignoreFailure and status:
print(proc.buf)
sys.exit(1)
return proc.buf
def killNodeServers():
for server in iceGridAdmin("server list"):
server = server.strip()
iceGridAdmin("server disable " + server, True)
iceGridAdmin("server signal " + server + " SIGKILL", True)
def iceGridTest(application, additionalOptions = "", applicationOptions = ""):
testdir = os.getcwd()
if not TestUtil.isWin32() and os.getuid() == 0:
print
print("*** can't run test as root ***")
print
return
if TestUtil.getDefaultMapping() == "java":
os.environ['CLASSPATH'] = os.path.join(os.getcwd(), "classes") + os.pathsep + os.environ.get("CLASSPATH", "")
client = TestUtil.getDefaultClientFile()
if TestUtil.getDefaultMapping() != "java":
client = os.path.join(testdir, client)
clientOptions = ' ' + getDefaultLocatorProperty() + ' ' + addit |
print(" | My script | ") |
# $Id$
#
import inc_const as const
PJSUA = ["--null- | audio --max-calls=1 $SIPP_URI"]
PJS | UA_EXPECTS = [[0, const.STATE_CONFIRMED, "v"]]
|
import paho.mqtt.publish as | publish
import paho.mqtt.client as mqtt
import socket
import json
from datetime import datetime
import configparser
'''
Author: GYzheng, guanggyz@gmail.com
###Server side
We have two topic, one is from client to server, the other one is from client to server
1. Server->Client : sc_topic
2. Client->Server : cs_topic
'''
class command_handler:
def __init__(self,host,port,topic):
self.host = host
self.port = int(port)
| self.sc_topic = 'sc_'+topic
self.cs_topic = 'cs_'+topic
self.get_host_info()
self.subscribe_msg()
def send_command(self,cmd):
msg = self.json_generator(cmd,'run')#cmd,status
self.send_msg(msg)
def get_host_info(self):
self.host_name = socket.gethostname()
self.host_ip = socket.gethostbyname(socket.gethostname())
def subscribe_msg(self):
self.subscriber = mqtt.Client()
self.subscriber.on_connect = self.on_connect
self.subscriber.on_message = self.on_message
self.is_connect = False #using this variable to wait for connect ready
self.subscriber.connect(self.host,self.port);#keep_alive=60
self.subscriber.loop_start()
while self.is_connect == False:
pass#donothig...
def send_msg(self,msg):
publish.single(self.sc_topic,msg, hostname=self.host, port=self.port)
def on_connect(self,client, userdata, flags, rc):
self.is_connect = True
#subscribe data from server
client.subscribe(self.cs_topic);
def on_message(self,client,user_data,msg):
try:
tmp = json.loads(msg.payload.decode('utf-8','ignore'))
client_name = tmp['name']
client_ip = tmp['ip']
client_status = tmp['status']
client_result = tmp['result']
print(client_name+","+client_ip+","+client_status)
print(client_result)
except:
print("Not Json format!")
def json_generator(self,cmd,status):
msg = json.dumps({'name':self.host_name,'ip':self.host_ip,'timestamp':datetime.now().strftime('%Y-%m-%d %H:%M:%S'),'status':status,'cmd':cmd})
return msg
#main function
if __name__ == "__main__":
config = configparser.ConfigParser()
config.read('server.conf')
broker_ip = config['server.conf']['broker_ip']
broker_port = config['server.conf']['broker_port']
topic = config['server.conf']['topic']
ch = command_handler(broker_ip,broker_port,topic);
print("Server start! Broker IP = "+broker_ip+", Broker PORT = "+broker_port+", topic = "+topic)
while True:
cmd = input("Please input command:\n")
ch.send_command(cmd)
pass
|
from bravado_core.spec import Spec
import mock
from pyramid.config import Configurator
from pyramid.registry import Registry
import pytest
from swagger_spec_validator.common import SwaggerValidationError
import pyramid_swagger
from pyramid_swagger.model import SwaggerSchema
@mock.patch('pyramid_swagger.register_api_doc_endpoints')
@mock.patch('pyramid_swagger.get_swagger_schema')
@mock.patch('pyramid_swagger.get_swagger_spec')
def test_disable_api_doc_views(_1, _2, mock_register):
settings = {
'pyramid_swagger.enable_api_doc_views': False,
'pyramid_swagger.enable_swagger_spec_validation': False,
}
mock_config = mock.Mock(
spec=Configurator,
registry=mock.Mock(spec=Registry, settings=settings))
pyramid_swagger.includeme(mock_config)
assert not mock_register.called
def test_bad_schema_validated_on_include():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/bad_app/',
'pyramid_swagger.enable_swagger_spec_validation': True,
}
mock_config = mock.Mock(registry=mock.Mock(settings=settings))
with pytest.raises(SwaggerValidationError):
pyramid_swagger.includeme(mock_config)
# TODO: Figure out why this assertion fails on travis
# assert "'info' is a required property" in str(excinfo.value)
@mock.patch('pyramid_swagger.get_swagger_spec')
def test_bad_schema_not_validated_if_spec_validation_is_disabled(_):
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/bad_app/',
'pyramid_swagger.enable_ | swagger_spec_validation': False,
}
mock_config = mock.Mock(
spec=Configurator, registry=mock.Mock(settings=settings))
pyramid_swagger.includeme(mock_config)
@mock.patch('pyramid_sw | agger.register_api_doc_endpoints')
def test_swagger_12_only(mock_register):
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.swagger_versions': ['1.2']
}
mock_config = mock.Mock(registry=mock.Mock(settings=settings))
pyramid_swagger.includeme(mock_config)
assert isinstance(settings['pyramid_swagger.schema12'], SwaggerSchema)
assert mock_register.call_count == 1
@mock.patch('pyramid_swagger.register_api_doc_endpoints')
def test_swagger_20_only(mock_register):
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.swagger_versions': ['2.0']
}
mock_config = mock.Mock(registry=mock.Mock(settings=settings))
pyramid_swagger.includeme(mock_config)
assert isinstance(settings['pyramid_swagger.schema20'], Spec)
assert not settings['pyramid_swagger.schema12']
assert mock_register.call_count == 1
@mock.patch('pyramid_swagger.register_api_doc_endpoints')
def test_swagger_12_and_20(mock_register):
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.swagger_versions': ['1.2', '2.0']
}
mock_config = mock.Mock(registry=mock.Mock(settings=settings))
pyramid_swagger.includeme(mock_config)
assert isinstance(settings['pyramid_swagger.schema20'], Spec)
assert isinstance(settings['pyramid_swagger.schema12'], SwaggerSchema)
assert mock_register.call_count == 2
|
view"].customize_template_get(
request.cr, request.uid, xml_id, full=full, context=request.context)
@http.route('/website/get_view_translations', type='json', auth='public', website=True)
def get_view_translations(self, xml_id, lang=None):
lang = lang or request.context.get('lang')
return request.registry["ir.ui.view"].get_view_translations(
request.cr, request.uid, xml_id, lang=lang, context=request.context)
@http.route('/website/set_translations', type='json', auth='public', website=True)
def set_translations(self, data, lang):
irt = request.registry.get('ir.translation')
for view_id, trans in data.items():
view_id = int(view_id)
for t in trans:
initial_content = t['initial_content'].strip()
new_content = t['new_content'].strip()
tid = t['translation_id']
if not tid:
old_trans = irt.search_read(
request.cr, request.uid,
[
('type', '=', 'view'),
('res_id', '=', view_id),
('lang', '=', lang),
('src', '=', initial_content),
])
if old_trans:
tid = old_trans[0]['id']
if tid:
vals = {'value': new_content}
irt.write(request.cr, request.uid, [tid], vals)
else:
new_trans = {
'name': 'website',
'res_id': view_id,
'lang': lang,
'type': 'view',
'source': initial_content,
'value': new_content,
}
if t.get('gengo_translation'):
new_trans['gengo_translation'] = t.get('gengo_translation')
new_trans['gengo_comment'] = t.get('gengo_comment')
irt.create(request.cr, request.uid, new_trans)
return True
@http.route('/website/translations', type='json', auth="public", website=True)
def get_website_translations(self, lang):
module_obj = request.registry['ir.module.module']
module_ids = module_obj.search(request.cr, request.uid, [('name', 'ilike', 'website'), ('state', '=', 'installed')], context=request.context)
modules = [x['name'] for x in module_obj.read(request.cr, request.uid, module_ids, ['name'], context=request.context)]
return WebClient().translations(mods=modules, lang=lang)
@http.route('/website/attach', type='http', auth='user', methods=['POST'], website=True)
def attach(self, func, upload=None, url=None, disable_optimization=None):
Attachments = request.registry['ir.attachment']
website_url = message = None
if not upload:
website_url = url
name = url.split("/").pop()
attachment_id = Attachments.create(request.cr, request.uid, {
'name': name,
'type': 'url',
'url': url,
'res_model': 'ir.ui.view',
}, request.context)
else:
try:
image_data = upload.read()
image = Image.open(cStringIO.StringIO(image_data))
w, h = image.size
if w*h > 42e6: # Nokia Lumia 1020 photo resolution
raise ValueError(
u"Image size excessive, uploaded images must be smaller "
u"than 42 million pixel")
if not disable_optimization and image.format in ('PNG', 'JPEG'):
image_data = image_save_for_web(image)
attachment_id = Attachments.create(request.cr, request.uid, {
'name': upload.filename,
'datas': image_data.encode('base64'),
'datas_fname': upload.filename,
'res_model': 'ir.ui.view',
}, request.context)
[attachment] = Attachments.read(
request.cr, request.uid, [attachment_id], ['website_url'],
context=request.context)
website_url = attachment['website_url']
except Exception, e:
logger.exception("Failed to upload image to attachment")
message = unicode(e)
return """<script type='text/javascript'>
window.parent['%s'](%s, %s);
</script>""" % (func, json.dumps(website_url), json.dumps(message))
@http.route(['/website/publish'], type='json', auth="public", website=True)
def publish(self, id, object):
_id = int(id)
_object = request.registry[object]
obj = _object.browse(request.cr, request.uid, _id)
values = {}
if 'website_published' in _object._fields:
values['website_published'] = not obj.website_published
_object.write(request.cr, request.uid, [_id],
values, context=request.context)
obj = _object.browse(request.cr, request.uid, _id)
return bool(obj.website_published)
@http.route(['/website/seo_suggest/<keywords>'], type='http', auth="public", websit | e=True)
def seo_suggest(self, keywords):
url = "http://google.com/complete/search"
| try:
req = urllib2.Request("%s?%s" % (url, werkzeug.url_encode({
'ie': 'utf8', 'oe': 'utf8', 'output': 'toolbar', 'q': keywords})))
request = urllib2.urlopen(req)
except (urllib2.HTTPError, urllib2.URLError):
return []
xmlroot = ET.fromstring(request.read())
return json.dumps([sugg[0].attrib['data'] for sugg in xmlroot if len(sugg) and sugg[0].attrib['data']])
#------------------------------------------------------
# Helpers
#------------------------------------------------------
@http.route(['/website/kanban'], type='http', auth="public", methods=['POST'], website=True)
def kanban(self, **post):
return request.website.kanban_col(**post)
def placeholder(self, response):
return request.registry['website']._image_placeholder(response)
@http.route([
'/website/image',
'/website/image/<model>/<id>/<field>',
'/website/image/<model>/<id>/<field>/<int:max_width>x<int:max_height>'
], auth="public", website=True)
def website_image(self, model, id, field, max_width=None, max_height=None):
""" Fetches the requested field and ensures it does not go above
(max_width, max_height), resizing it if necessary.
If the record is not found or does not have the requested field,
returns a placeholder image via :meth:`~.placeholder`.
Sets and checks conditional response parameters:
* :mailheader:`ETag` is always set (and checked)
* :mailheader:`Last-Modified is set iif the record has a concurrency
field (``__last_update``)
The requested field is assumed to be base64-encoded image data in
all cases.
"""
try:
idsha = id.split('_')
id = idsha[0]
response = werkzeug.wrappers.Response()
return request.registry['website']._image(
request.cr, request.uid, model, id, field, response, max_width, max_height,
cache=STATIC_CACHE if len(idsha) > 1 else None)
except Exception:
logger.exception("Cannot render image field %r of record %s[%s] at size(%s,%s)",
field, model, id, max_width, max_height)
response = werkzeug.wrappers.Response()
return self.placeholder(response)
#------------------------------------------------------
# Server actions
#------------------------------------------------------
@http.route([
'/website/action/<path_or_xml_id_or_id>',
'/website/action/<path_or_xml_id_or_id>/<path:path>',
], type='http', auth="public", we |
n Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have r | eceived a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import re
import sys
from copy import deepcopy
from weboob.tools.log import getLogger, DEBUG_FILTERS
from weboob.tools.ordereddict import OrderedDict
from weboob.browser.pages import NextPage
from .filters.standard import _Filter, CleanText
from . | filters.html import AttributeNotFound, XPathNotFound
__all__ = ['DataError', 'AbstractElement', 'ListElement', 'ItemElement', 'TableElement', 'SkipItem']
class DataError(Exception):
"""
Returned data from pages are incoherent.
"""
def method(klass):
"""
Class-decorator to call it as a method.
"""
def inner(self, *args, **kwargs):
return klass(self)(*args, **kwargs)
return inner
class AbstractElement(object):
_creation_counter = 0
def __init__(self, page, parent=None, el=None):
self.page = page
self.parent = parent
if el is not None:
self.el = el
elif parent is not None:
self.el = parent.el
else:
self.el = page.doc
if parent is not None:
self.env = deepcopy(parent.env)
else:
self.env = deepcopy(page.params)
# Used by debug
self._random_id = AbstractElement._creation_counter
AbstractElement._creation_counter += 1
self.loaders = {}
def use_selector(self, func, key=None):
if isinstance(func, _Filter):
func._obj = self
func._key = key
value = func(self)
elif isinstance(func, type) and issubclass(func, ItemElement):
value = func(self.page, self, self.el)()
elif callable(func):
value = func()
else:
value = deepcopy(func)
return value
def parse(self, obj):
pass
def cssselect(self, *args, **kwargs):
return self.el.cssselect(*args, **kwargs)
def xpath(self, *args, **kwargs):
return self.el.xpath(*args, **kwargs)
def handle_loaders(self):
for attrname in dir(self):
m = re.match('load_(.*)', attrname)
if not m:
continue
name = m.group(1)
if name in self.loaders:
continue
loader = getattr(self, attrname)
self.loaders[name] = self.use_selector(loader, key=attrname)
class ListElement(AbstractElement):
item_xpath = None
flush_at_end = False
ignore_duplicate = False
def __init__(self, *args, **kwargs):
super(ListElement, self).__init__(*args, **kwargs)
self.logger = getLogger(self.__class__.__name__.lower())
self.objects = OrderedDict()
def __call__(self, *args, **kwargs):
for key, value in kwargs.iteritems():
self.env[key] = value
return self.__iter__()
def find_elements(self):
"""
Get the nodes that will have to be processed.
This method can be overridden if xpath filters are not
sufficient.
"""
if self.item_xpath is not None:
for el in self.el.xpath(self.item_xpath):
yield el
else:
yield self.el
def __iter__(self):
self.parse(self.el)
items = []
for el in self.find_elements():
for attrname in dir(self):
attr = getattr(self, attrname)
if isinstance(attr, type) and issubclass(attr, AbstractElement) and attr != type(self):
item = attr(self.page, self, el)
item.handle_loaders()
items.append(item)
for item in items:
for obj in item:
obj = self.store(obj)
if obj and not self.flush_at_end:
yield obj
if self.flush_at_end:
for obj in self.flush():
yield obj
self.check_next_page()
def flush(self):
for obj in self.objects.itervalues():
yield obj
def check_next_page(self):
if not hasattr(self, 'next_page'):
return
next_page = getattr(self, 'next_page')
try:
value = self.use_selector(next_page)
except (AttributeNotFound, XPathNotFound):
return
if value is None:
return
raise NextPage(value)
def store(self, obj):
if obj.id:
if obj.id in self.objects:
if self.ignore_duplicate:
self.logger.warning('There are two objects with the same ID! %s' % obj.id)
return
else:
raise DataError('There are two objects with the same ID! %s' % obj.id)
self.objects[obj.id] = obj
return obj
class SkipItem(Exception):
"""
Raise this exception in an :class:`ItemElement` subclass to skip an item.
"""
class _ItemElementMeta(type):
"""
Private meta-class used to keep order of obj_* attributes in :class:`ItemElement`.
"""
def __new__(mcs, name, bases, attrs):
_attrs = []
for base in bases:
if hasattr(base, '_attrs'):
_attrs += base._attrs
filters = [(re.sub('^obj_', '', attr_name), attrs[attr_name]) for attr_name, obj in attrs.items() if attr_name.startswith('obj_')]
# constants first, then filters, then methods
filters.sort(key=lambda x: x[1]._creation_counter if hasattr(x[1], '_creation_counter') else (sys.maxsize if callable(x[1]) else 0))
new_class = super(_ItemElementMeta, mcs).__new__(mcs, name, bases, attrs)
new_class._attrs = _attrs + [f[0] for f in filters]
return new_class
class ItemElement(AbstractElement):
__metaclass__ = _ItemElementMeta
_attrs = None
_loaders = None
klass = None
condition = None
validate = None
class Index(object):
pass
def __init__(self, *args, **kwargs):
super(ItemElement, self).__init__(*args, **kwargs)
self.logger = getLogger(self.__class__.__name__.lower())
self.obj = None
def build_object(self):
if self.klass is None:
return
return self.klass()
def __call__(self, obj=None):
if obj is not None:
self.obj = obj
for obj in self:
return obj
def __iter__(self):
if self.condition is not None and not self.condition():
return
try:
if self.obj is None:
self.obj = self.build_object()
self.parse(self.el)
self.handle_loaders()
for attr in self._attrs:
self.handle_attr(attr, getattr(self, 'obj_%s' % attr))
except SkipItem:
return
if self.validate is not None and not self.validate(self.obj):
return
yield self.obj
def handle_attr(self, key, func):
try:
value = self.use_selector(func, key=key)
except Exception as e:
# Help debugging as tracebacks do not give us the key
self.logger.warning('Attribute %s raises %s' % (key, repr(e)))
raise
logger = getLogger('b2filters')
logger.log(DEBUG_FILTERS, "%s.%s = %r" % (self._random_id, key, value))
setattr(self.obj, key, value)
class TableElement(ListElement):
head_xpath = None
cleaner = CleanText
def __init__(self, *args, **kwargs):
super(TableElement, self).__init__(*args, **kwargs)
self._cols = {} |
from P_14 import *
print( | "Shape of data: {}".format(iris_dataset["data"].shape | ))
input()
|
# encoding: utf-8
"""Gherkin step implementations for chart data features."""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import datetime
from behave import given, then, when
from pptx.chart.data import (
BubbleChartData, Category, CategoryChartData, XyChartData
)
from pptx.enum.chart import XL_CHART_TYPE
from pptx.util import Inches
# given ===================================================
@given('a BubbleChartData object with number format {strval}')
def given_a_BubbleChartData_object_with_number_format(context, strval):
params = {}
if strval != 'None':
params['number_format'] = int(strval)
context.chart_data = BubbleChartData(**params)
@given('a Categories object with number format {init_nf}')
def given_a_Categories_object_with_number_format_init_nf(context, init_nf):
categories = CategoryChartData().categories
if init_nf != 'left as default':
categories.number_format = init_nf
context.categories = categories
@given('a Category object')
def given_a_Category_object(context):
context.category = Category(None, None)
@given('a CategoryChartData object')
def given_a_CategoryChartData_object(context):
context.chart_data = CategoryChartData()
@given('a CategoryChartData object having date categories')
def given_a_CategoryChartData_object_having_date_categories(context):
chart_data = CategoryChartData()
chart_data.categories = [
datetime.date(2016, 12, 27),
datetime.date(2016, 12, 28),
datetime.date(2016, 12, 29),
]
context.chart_data = chart_data
@given('a CategoryChartData object with number format {strval}')
def given_a_CategoryChartData_object_with_number_format(context, strval):
params = {}
if strval != 'None':
params['number_format'] = int(strval)
context.chart_data = CategoryChartData(**params)
@given('a XyChartData object with number format {strval}')
def given_a_XyChartData_object_with_number_format(context, strval):
params = {}
if strval != 'None':
params['number_format'] = int(strval)
context.chart_data = XyChartData(**params)
@given('the categories are of type {type_}')
def given_the_categories_are_of_type(context, type_):
label = {
'date': datetime.date(2016, 12, 22),
'float': 42.24,
'int': 42,
'str': 'foobar',
}[type_]
context.categories.add_category(label)
# when ====================================================
@when('I add a bubble data point with number format {strval}')
def when_I_add_a_bubble_data_point_with_number_format(context, strval):
series_data = context.series_data
params = {'x': 1, 'y': 2, 'size': 10}
if strval != 'None':
params['number_format'] = int(strval)
context.data_point = series_data.add_data_point(**params)
@when('I add a data point with number format {strval}')
def when_I_add_a_data_point_with_number_format(context, strval):
series_data = context.series_data
params = {'value': 42}
if strval != 'None':
params['number_format'] = int(strval)
context.data_point = series_data.add_data_point(**params)
@when('I add an XY data point with number format {strval}')
def when_I_add_an_XY_data_point_with_number_format(context, strval):
series_data = context.series_data
params = {'x': 1, 'y': 2}
if strval != 'None':
params['number_format'] = int(strval)
context.data_point = series_data.add_data_point(**params)
@when('I add an {xy_type} chart having 2 series of 3 points each')
def when_I_add_an_xy_chart_having_2_series_of_3_points(context, xy_type):
chart_type = getattr(XL_C | HART_TYPE, xy_type)
data = (
('Series 1', ((-0.1, 0.5), (16.2, 0.0), (8.0, 0.2))),
('Series 2', ((12.4, 0.8), (-7. | 5, -0.5), (-5.1, -0.2)))
)
chart_data = XyChartData()
for series_data in data:
series_label, points = series_data
series = chart_data.add_series(series_label)
for point in points:
x, y = point
series.add_data_point(x, y)
context.chart = context.slide.shapes.add_chart(
chart_type, Inches(1), Inches(1), Inches(8), Inches(5), chart_data
).chart
@when("I assign ['a', 'b', 'c'] to chart_data.categories")
def when_I_assign_a_b_c_to_chart_data_categories(context):
chart_data = context.chart_data
chart_data.categories = ['a', 'b', 'c']
# then ====================================================
@then("[c.label for c in chart_data.categories] is ['a', 'b', 'c']")
def then_c_label_for_c_in_chart_data_categories_is_a_b_c(context):
chart_data = context.chart_data
assert [c.label for c in chart_data.categories] == ['a', 'b', 'c']
@then('categories.number_format is {value}')
def then_categories_number_format_is_value(context, value):
expected_value = value
number_format = context.categories.number_format
assert number_format == expected_value, 'got %s' % number_format
@then('category.add_sub_category(name) is a Category object')
def then_category_add_sub_category_is_a_Category_object(context):
category = context.category
context.sub_category = sub_category = category.add_sub_category('foobar')
assert type(sub_category).__name__ == 'Category'
@then('category.sub_categories[-1] is the new category')
def then_category_sub_categories_minus_1_is_the_new_category(context):
category, sub_category = context.category, context.sub_category
assert category.sub_categories[-1] is sub_category
@then('chart_data.add_category(name) is a Category object')
def then_chart_data_add_category_name_is_a_Category_object(context):
chart_data = context.chart_data
context.category = category = chart_data.add_category('foobar')
assert type(category).__name__ == 'Category'
@then('chart_data.add_series(name, values) is a CategorySeriesData object')
def then_chart_data_add_series_is_a_CategorySeriesData_object(context):
chart_data = context.chart_data
context.series = series = chart_data.add_series('Series X', (1, 2, 3))
assert type(series).__name__ == 'CategorySeriesData'
@then('chart_data.categories is a Categories object')
def then_chart_data_categories_is_a_Categories_object(context):
chart_data = context.chart_data
assert type(chart_data.categories).__name__ == 'Categories'
@then('chart_data.categories[-1] is the category')
def then_chart_data_categories_minus_1_is_the_category(context):
chart_data, category = context.chart_data, context.category
assert chart_data.categories[-1] is category
@then('chart_data.number_format is {value_str}')
def then_chart_data_number_format_is(context, value_str):
chart_data = context.chart_data
number_format = value_str if value_str == 'General' else int(value_str)
assert chart_data.number_format == number_format
@then('chart_data[-1] is the new series')
def then_chart_data_minus_1_is_the_new_series(context):
chart_data, series = context.chart_data, context.series
assert chart_data[-1] is series
@then('series_data.number_format is {value_str}')
def then_series_data_number_format_is(context, value_str):
series_data = context.series_data
number_format = value_str if value_str == 'General' else int(value_str)
assert series_data.number_format == number_format
|
# -*- coding: utf-8 -*-
import pytest
from irc3.plugins import slack
pytestmark = pytest.mark.asyncio
async def test_simple_matches(irc3_bot_factory):
bot = irc3_bot_factory(includes=['irc3.plugins.slack'])
plugin = bot.get_plugin(slack.Slack)
setattr(plugin, 'config', {'token': 'xoxp-faketoken'})
assert '' == await plugin.parse_text('\n')
assert '' == await plugin.parse_text('\r\n')
assert '' == await plugin.parse_text('\r')
assert '@channel' == await plugin.parse_text('<!channel>')
assert '@group' == await plugin.parse_text('<!group>')
assert '@everyone' == await plugin.parse_text('<!everyone>')
assert '<' == await plugin.parse_text('<')
assert '>' == await plugin.parse_text('>')
assert '&' == await plugin.parse_text('&')
assert 'daniel' == await plugin.parse_text('<WHATEVER|daniel>')
async def test_channel_matches(irc3_bot_factory):
bot = irc3_bot_factory(includes=['irc3.plugins.slack'])
plugin = bot.get_plugin(slack.Slack)
setattr(plugin, 'config', {'token': 'xoxp-faketoken'})
async def api_call(self, method, date=None):
return ({'channel': {'name': 'testchannel'}})
plugin.api_call = api_call
assert '#testchannel' == await plugin.parse_text('<#C12345>')
assert 'channel' == await plugin.parse_text('<#C12345|channel>')
async def test_user_matches(irc3_bot_factory):
bot = irc3_bot_factory(includes=['irc3.plugins.slack'])
plug | in = bot.get_plugin(slack.Slack)
setattr(plugin, 'config', {'t | oken': 'xoxp-faketoken'})
async def api_call(self, method, date=None):
return ({'user': {'name': 'daniel'}})
plugin.api_call = api_call
assert '@daniel' == await plugin.parse_text('<@U12345>')
assert 'user' == await plugin.parse_text('<@U12345|user>')
async def test_emoji_matches(irc3_bot_factory):
bot = irc3_bot_factory(includes=['irc3.plugins.slack'])
plugin = bot.get_plugin(slack.Slack)
setattr(plugin, 'config', {'token': 'xoxp-faketoken'})
assert ':-)' == await plugin.parse_text(':smiley:')
assert ':@' == await plugin.parse_text(':rage:')
|
import six
try:
from logging import NullHandler
except ImportErro | r: # Python 2.6
from logging import Handler
class NullHandler(Handler):
def emit(self, record):
pass
try:
from urllib import urlencode as format_query
except ImportError:
from urllib.parse import urlencode as format_query
try:
from urlparse import urlparse as parse_url
except ImportError:
from urllib.parse import urlparse as parse_url
try:
memoryview = memoryview
except NameErr | or:
memoryview = buffer
def get_int(*args):
try:
return int(get_character(*args))
except ValueError:
return ord(get_character(*args))
def get_character(x, index):
return chr(get_byte(x, index))
def get_byte(x, index):
return six.indexbytes(x, index)
def encode_string(x):
return x.encode('utf-8')
def decode_string(x):
return x.decode('utf-8')
|
from random import randint
from position import Position, Size
from block import Room, Block
class Room(object):
def __init__(self, pos_row=0, pos_col=0, rows=1, cols=1, fill=Block.empty,
left=Room.left, right=Room.right,
top=Room.top, bottom=Room.bottom,
top_left=Room.top_left, top_right=Room.top_right,
bottom_left=Room.bottom_left, bottom_right=Room.bottom_right):
self.pos = Position(pos_row, pos_col)
self.center = Position(pos_row + (rows // 2), pos_col + (cols // 2))
self.size = Size(rows, cols)
self.fill = fill
# Specific the block of walls
self.left = left
self.right = right
self.top = top
self.bottom = bottom
self.top_left = top_left
self.top_right = top_right
self.bottom_left = bottom_left
self.bottom_right = bottom_right
@classmethod
def from_objects(cls, pos, size, **kwargs):
return cls(pos.row, pos.col, size.rows, size.cols, **kwargs)
def collision(self, other_room):
"""
Checks if two rooms intersect each other
The logic is clearer as a one dimension line
"""
pos_2 = Position(self.pos.row + self.size.rows,
self.pos.col + self.size.cols)
other_room_pos_2 = Position(other_room.pos.row + other_room.size.rows,
other_room.pos.col + other_room.size.cols)
return (self.pos.col <= other_room_pos_2.col and
pos_2.col >= other_room.pos.col and
self.pos.row <= other_room_pos_2.row and
pos_2.row >= other_room.pos.row)
@classmethod
def generate(cls, min_pos, max_pos, min_size, max_size):
"""
Create room from min_size to max_size between min_pos and max_pos
"""
size = Size(randint(min_size.rows, max_size.rows),
randint(min_size.cols, max_size.cols))
pos = Position(randint(min_pos.row, max_pos.row - size.rows),
randint(min_pos.col, max_pos. | col - size.cols))
return cls.from_objects(pos, size)
class RoomList():
def __init__(self):
self._room_list = []
def __iter__(self):
return iter(self._room_list)
def __getitem__(self, key):
return self._room_list[key]
def __len__(self):
| return len(self._room_list)
def append(self, room):
self._room_list.append(room)
def generate(self, num, min_pos, max_pos, min_size, max_size):
"""
Given a number of rooms, generate rooms that don't intersect
"""
for i in range(num):
room = Room.generate(min_pos, max_pos, min_size, max_size)
while self.is_collision(room):
room = Room.generate(min_pos, max_pos, min_size, max_size)
self.append(room)
def is_collision(self, room):
"""
Iterate through the list of rooms to test for collisions
"""
for other_room in self:
if other_room.collision(room):
return True
return False
|
#!/usr/bin/python3
## @package domomaster
# Master daemon for D3 boxes.
#
# Developed by GreenLeaf.
import sys;
import os;
import random;
import string;
from hashlib import sha1
from subprocess import *
import socket;
sys.path.append("/usr/lib/domoleaf");
from DaemonConfigParser import *;
MASTER_CONF_FILE_BKP = '/etc/domoleaf/master.conf.save';
MASTER_CONF_FILE_TO = '/etc/domoleaf/master.conf';
SLAVE_CONF_FILE = '/etc/domoleaf/slave.conf';
## Copies the conf data from a backup file to a new one.
def master_conf_copy():
file_from = DaemonConfigParser(MASTER_CONF_FILE_BKP);
file_to = DaemonConfigParser(MASTER_CONF_FILE_TO);
#listen
var = file_from.getValueFromSection('listen', 'port_sla | ve');
file_to.writeValueFromSection('listen', 'port_slave', var);
var = file_from.getValueFromSection('listen', 'port_cmd');
file_to.writeValueFromSection('listen', 'port_cmd', var);
#connect
var = file_from.getValueFromSection('connect', 'port');
file_to.writeV | alueFromSection('connect', 'port', var);
#mysql
var = file_from.getValueFromSection('mysql', 'user');
file_to.writeValueFromSection('mysql', 'user', var);
var = file_from.getValueFromSection('mysql', 'database_name');
file_to.writeValueFromSection('mysql', 'database_name', var);
#greenleaf
var = file_from.getValueFromSection('greenleaf', 'commercial');
file_to.writeValueFromSection('greenleaf', 'commercial', var);
var = file_from.getValueFromSection('greenleaf', 'admin_addr');
file_to.writeValueFromSection('greenleaf', 'admin_addr', var);
## Initializes the conf in database.
def master_conf_initdb():
file = DaemonConfigParser(MASTER_CONF_FILE_TO);
#mysql password
password = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(128))
password = sha1(password.encode('utf-8'))
file.writeValueFromSection('mysql', 'password', password.hexdigest());
os.system('sed -i "s/define(\'DB_PASSWORD\', \'domoleaf\')/define(\'DB_PASSWORD\', \''+password.hexdigest()+'\')/g" /etc/domoleaf/www/config.php')
#mysql user
query1 = 'DELETE FROM user WHERE User="domoleaf"';
query2 = 'DELETE FROM db WHERE User="domoleaf"';
query3 = 'INSERT INTO user (Host, User, Password) VALUES (\'%\', \'domoleaf\', PASSWORD(\''+password.hexdigest()+'\'));';
query4 = 'INSERT INTO db (Host, Db, User, Select_priv, Insert_priv, Update_priv, Delete_priv, Create_priv, Drop_priv, Grant_priv, References_priv, Index_priv, Alter_priv, Create_tmp_table_priv, Lock_tables_priv, Create_view_priv, Show_view_priv, Create_routine_priv, Alter_routine_priv, Execute_priv, Event_priv, Trigger_priv) VALUES ("%","domoleaf","domoleaf","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y");';
query5 = 'FLUSH PRIVILEGES';
call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'mysql', '-e', query1]);
call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'mysql', '-e', query2]);
call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'mysql', '-e', query3]);
call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'mysql', '-e', query4]);
call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'mysql', '-e', query5]);
## Initializes the conf in file.
def master_conf_init():
file = DaemonConfigParser(SLAVE_CONF_FILE);
personnal_key = file.getValueFromSection('personnal_key', 'aes');
hostname = socket.gethostname();
#KNX Interface
if os.path.exists('/dev/ttyAMA0'):
knx = "tpuarts"
knx_interface = 'ttyAMA0';
elif os.path.exists('/dev/ttyS0'):
knx = "tpuarts"
knx_interface = 'ttyS0';
else:
knx = "ipt"
knx_interface = '127.0.0.1';
domoslave = os.popen("dpkg-query -W -f='${Version}\n' domoslave").read().split('\n')[0];
query1 = "INSERT INTO daemon (name, serial, secretkey, validation, version) VALUES ('"+hostname+"','"+hostname+"','"+personnal_key+"',1,'"+domoslave+"')"
query2 = "INSERT INTO daemon_protocol (daemon_id, protocol_id, interface, interface_arg) VALUES (1,1,'"+knx+"','"+knx_interface+"')"
call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'domoleaf',
'-e', query1]);
call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'domoleaf',
'-e', query2]);
if __name__ == "__main__":
#Upgrade
if os.path.exists(MASTER_CONF_FILE_BKP):
master_conf_copy()
os.remove(MASTER_CONF_FILE_BKP);
else:
master_conf_init()
master_conf_initdb()
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Create missing snapshot revisions.
Create Date: 2017-01-05 23:10:37.257161
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from ggrc.migrations.utils.snapshot_revisions import handle_objects
# revision identifiers, used by Alembic.
revision = '579239d161e1'
down_revision = '353e5f281799'
def upgrade():
"""Create missing revisions for sna | pshottable objects."""
# copy pasted from ggrc.snapshoter.rules.Types.all
snapshot_objects = sorted([
"AccessGroup",
"Clause",
"Control",
"DataAsset",
"Facility",
"Market",
"Objective",
"OrgGroup",
"Product",
"Section",
"Vendor",
"Policy",
"Regulation",
"Standard",
"Contract",
"System",
"Process",
"Risk",
"Threat",
])
handle_objects(snapshot_objects)
def downgrade():
"""Data | correction migrations can not be downgraded."""
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Matthias Luescher
#
# Authors:
# Matthias Luescher
#
# This file is part of edi.
#
# edi is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# edi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with edi. If not, see <http://www.gnu.org/licenses/>.
from edi.lib.edicommand import EdiCommand
from edi.lib.versionhelpers import get_edi_version
class Version(EdiCommand):
@classmethod
def advertise(cls, subparsers):
help_text = "print the program version"
description_text = "Print | the program version."
subparsers.add_parser(cls._get_short_command_name(),
help=help_text,
description=description_text)
def run_cli(self, _):
version = self.run()
print(version)
@staticmethod
def run():
| return get_edi_version()
|
#!/bin/python
import sys
import vlc
import os
import re
from tempfile import *
from gtts import gTTS
from remote2text import | RGBRemote2Text
parser = RGBRemote2Text(verbose=True)
while True:
ir_out = input()
response = parser.process(ir_out)
if response:
tts = gTTS(text=response, lang='pt')
tmp = NamedTemporaryFile(delete=False)
tts.write_to_fp(tmp)
path = os.path.join(gettempdir(), str(tmp.name))
vlc.MediaPlayer(path).play()
| tmp.close()
|
# -*- coding: utf-8 -*-
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.db.models import Max, F
from django.shortcuts import render, get_object_or_404, redirect
from django.utils.decorators import method_decorator
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from ponyFiction import signals
from ponyFiction.forms.chapter import ChapterForm
from ponyFiction.models import Story, Chapter, Author
from django.views.decorators.csrf import csrf_protect
from cacheops import invalidate_obj
from .story import get_story
def chapter_view(request, story_id=False, chapter_order=False):
story = get_story(request, pk=story_id)
if chapter_order:
chapter = get_object_or_404(story.chapter_set, order=chapter_order)
page_title = "{} — {}".format(chapter.title[:80], story.title)
prev_chapter = chapter.get_prev_chapter()
next_chapter = chapter.get_next_chapter()
if request.user.is_authenticated():
signals.story_viewed.send(sender=Author, instance=request.user, story=story, chapter=chapter)
data = {
'story': story,
'chapter': chapter,
'prev_chapter': prev_chapter,
'next_chapter': next_chapter,
'page_title': page_title,
'allchapters': False
}
else:
chapters = story.chapter_set.order_by('order').cache()
page_title = "{} — все главы".format(story.title)
if request.user.is_authenticated():
signals.story_viewed.send(sender=Author, instance=request.user, story=story, chapter=None)
data = {
'story': story,
'chapters': chapters,
'page_title': page_title,
'allchapters': True
}
return render(request, 'chapter_view.html', data)
class ChapterAdd(CreateView):
model = Chapter
| form_class = ChapterForm
template_name = 'chapter_work.html'
initial = {'button_submit': 'Добавить'}
story = None
@ | method_decorator(login_required)
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
self.story = get_object_or_404(Story, pk=kwargs['story_id'])
if self.story.editable_by(request.user):
return CreateView.dispatch(self, request, *args, **kwargs)
else:
raise PermissionDenied
def form_valid(self, form):
chapter = form.save(commit=False)
chapter.story = self.story
chapter.order = (self.story.chapter_set.aggregate(o=Max('order'))['o'] or 0) + 1
chapter.save()
return redirect('chapter_edit', chapter.id)
def get_context_data(self, **kwargs):
context = super(ChapterAdd, self).get_context_data(**kwargs)
extra_context = {'page_title': 'Добавить новую главу', 'story': self.story}
context.update(extra_context)
return context
class ChapterEdit(UpdateView):
model = Chapter
form_class = ChapterForm
template_name = 'chapter_work.html'
initial = {'button_submit': 'Сохранить изменения'}
chapter = None
@method_decorator(login_required)
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
return UpdateView.dispatch(self, request, *args, **kwargs)
def get_object(self, queryset=None):
self.chapter = UpdateView.get_object(self, queryset=queryset)
if self.chapter.story.editable_by(self.request.user):
return self.chapter
else:
raise PermissionDenied
def form_valid(self, form):
self.chapter = form.save()
return redirect('chapter_edit', self.chapter.id)
def get_context_data(self, **kwargs):
context = super(ChapterEdit, self).get_context_data(**kwargs)
extra_context = {'page_title': 'Редактирование «%s»' % self.chapter.title, 'chapter': self.chapter}
context.update(extra_context)
return context
class ChapterDelete(DeleteView):
model = Chapter
chapter = None
story = None
chapter_id = None
template_name = 'chapter_confirm_delete.html'
@method_decorator(login_required)
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
return DeleteView.dispatch(self, request, *args, **kwargs)
def get_object(self, queryset=None):
self.chapter = DeleteView.get_object(self, queryset=queryset)
self.story = self.chapter.story
self.chapter_id = self.chapter.id
if self.story.editable_by(self.request.user):
return self.chapter
else:
raise PermissionDenied
def delete(self, request, *args, **kwargs):
self.chapter = self.get_object()
self.story.chapter_set.filter(order__gt=self.chapter.order).update(order=F('order')-1)
for chapter in self.story.chapter_set.filter(order__gt=self.chapter.order):
invalidate_obj(chapter)
self.chapter.delete()
return redirect('story_edit', self.story.id)
def get_context_data(self, **kwargs):
context = super(ChapterDelete, self).get_context_data(**kwargs)
extra_context = {'page_title': 'Подтверждение удаления главы', 'story': self.story, 'chapter': self.chapter}
context.update(extra_context)
return context
|
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_patro_surname_only(raw_surn_data_list):
"""method for the '1y' symbol: patronymic surname only"""
for raw_surn_data in raw_surn_data_list:
if (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or
raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
result = "%s" % (raw_surn_data[_SURNAME_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_patro_prefix_only(raw_surn_data_list):
"""method for the '0y' symbol: patronymic prefix only"""
for raw_surn_data in raw_surn_data_list:
if (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or
raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
result = "%s" % (raw_surn_data[_PREFIX_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_patro_conn_only(raw_surn_data_list):
"""method for the '2y' symbol: patronymic conn only"""
for raw_surn_data in raw_surn_data_list:
if (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or
raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
result = "%s" % (raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_nonpatro_surname(raw_surn_data_list):
"""method for the 'o' symbol: full surnames without pa/matronymic or
primary
"""
result = ""
for raw_surn_data in raw_surn_data_list:
if ((not raw_surn_data[_PRIMARY_IN_LIST]) and
raw_surn_data[_TYPE_IN_LIST][0] != _ORIGINPATRO and
raw_surn_data[_TYPE_IN_LIST][0] != _ORIGINMATRO):
result += "%s %s %s " % (raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split()).strip()
def _raw_nonprimary_surname(raw_surn_data_list):
"""method for the 'r' symbol: nonprimary surnames"""
result = ''
for raw_surn_data in raw_surn_data_list:
if not raw_surn_data[_PRIMARY_IN_LIST]:
result = "%s %s %s %s" % (result, raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split())
def _raw_prefix_surname(raw_surn_data_list):
"""method for the 'p' symbol: all prefixes"""
result = ""
for raw_surn_data in raw_surn_data_list:
result += "%s " % (raw_surn_data[_PREFIX_IN_LIST])
return ' '.join(result.split()).strip()
def _raw_single_surname(raw_surn_data_list):
"""method for the 'q' symbol: surnames without prefix and connectors"""
result = ""
for raw_surn_data in raw_surn_data_list:
result += "%s " % (raw_surn_data[_SURNAME_IN_LIST])
return ' '.join(result.split()).strip()
def cleanup_name(namestring):
"""Remove too long white space due to missing name parts,
so "a b" becomes "a b" and "a , b" becomes "a, b"
"""
parts = namestring.split()
if not parts:
return ""
result = parts[0]
for val in parts[1:]:
if len(val) == 1 and val in [',', ';', ':',
ARABIC_COMMA, ARABIC_SEMICOLON]:
result += val
else:
result += ' ' + val
return result
#-------------------------------------------------------------------------
#
# NameDisplay class
#
#-------------------------------------------------------------------------
class NameDisplay:
"""
Base class for displaying of Name instances.
Property:
*default_format*
the default name format to use
*pas_as_surn*
if only one surname, see if pa/ma should be considered as 'the' surname.
"""
format_funcs = {}
raw_format_funcs = {}
def __init__(self, xlocale=glocale):
"""
Initialize the NameDisplay class.
If xlocale is passed in (a GrampsLocale), then
the translated script will be returned instead.
:param xlocale: allow selection of the displayer script
:type xlocale: a GrampsLocale instance
"""
global WITH_GRAMPS_CONFIG
global PAT_AS_SURN
# translators: needed for Arabic, ignore otherwise
COMMAGLYPH = xlocale.translation.gettext(',')
self.STANDARD_FORMATS = [
(Name.DEF, _("Default format (defined by Gramps preferences)"),
'', _ACT),
(Name.LNFN, _("Surname, Given Suffix"),
'%l' + COMMAGLYPH + ' %f %s', _ACT),
(Name.FN, _("Given"),
'%f', _ACT),
(Name.FNLN, _("Given Surname Suffix"),
'%f %l %s', _ACT),
# primary name primconnector other, given pa/matronynic suffix, primprefix
# translators: long string, have a look at Preferences dialog
(Name.LNFNP, _("Main Surnames, Given Patronymic Suffix Prefix"),
'%1m %2m %o' + COMMAGLYPH + ' %f %1y %s %0m', _ACT),
# DEPRECATED FORMATS
(Name.PTFN, _("Patronymic, Given"),
'%y' + COMMAGLYPH + ' %s %f', _INA),
]
self.LNFN_STR = "%s" + COMMAGLYPH + " %s %s"
self.name_formats = {}
if WITH_GRAMPS_CONFIG:
self.default_format = config.get('preferences.name-format')
if self.default_format == 0:
self.default_format = Name.LNFN
config.set('preferences.name-format', self.default_format)
#if only one surname, see if pa/ma should be considered as
# 'the' surname.
PAT_AS_SURN = config.get('preferences.patronimic-surname')
config.connect('preferences.patronimic-surname', self.change_pa_sur)
else:
self.default_format = Name.LNFN
PAT_AS_SURN = False
#preinit the name formats, this should be updated with the data
#in the database once a database is loaded
self.set_name_format(self.STANDARD_FORMATS)
def change_pa_sur(self, *args):
""" How to handle single patronymic as surname is changed"""
global PAT_AS_SURN
PAT_AS_SURN = config.get('preferences.patronimic-surname')
def get_pat_as_surn(self):
global PAT_AS_SURN
return PAT_AS_SURN
def _format_fn(self, fmt_str):
return lambda x: self.format_str(x, fmt_str)
def _format_raw_fn(self, fmt_str):
return lambda x: self.format_str_raw(x, fmt_str)
def _raw_lnfn(self, raw_data):
result = self.LNFN_STR % (_raw_full_surname(raw_data[_SURNAME_LIST]),
raw_data[_FIRSTNAME],
| raw_data[_SUFFIX])
return ' '.join(result.split())
def _raw_fnln(self, raw_data):
result = "%s %s %s" % (raw_data[_FIRSTNAME],
_raw_full_surname(raw_data[_SURNAME_LIST]),
| raw_data[_SUFFIX])
return ' '.join(result.split())
def _raw_fn(self, raw_data):
result = raw_data[_FIRSTNAME]
return ' '.join(result.split())
def set_name_format(self, formats):
raw_func_dict = {
Name.LNFN : self._raw_lnfn,
Name.FNLN : self._raw_fnln,
Name.FN : self._raw_fn,
}
for (num, name, fmt_str, act) in formats:
func = self._format_fn(fmt_str)
func_raw = raw_func_dict.get(num, self._format_raw_fn(fmt_str))
self.name_formats[num] = (name, fmt_str, act, func, func_raw)
self.set_default_format(self.get_default_format())
def add_name_format(self, name, fmt_str):
for num in self.name_formats:
if fmt_str in self.name_formats.get(num):
return num
num = -1
while num in self.name_formats:
num -= 1
self.set_name_format([(num, n |
from .enums import IncrementalSearchDirection
from .filters import SimpleFilter, Never
__all__ = (
'SearchState',
)
class SearchState(object):
"""
A search 'query'.
"""
__slots__ = ('text', 'direction', 'ignore_case')
def __init__(self, text='', dir | ection=IncrementalSearchDirection.FORWARD, ignore_case=Never()):
assert isinstance(ignore_case, SimpleFilter)
self.text = text
self.direction = direction
self.ignore_case = ignore_case
def __repr__(self):
return '%s(%r, direction=%r, ignore_case=%r)' % (
self.__class__.__name__, self.text, self.direction, self.ignore_case)
def __invert__(self):
"""
Create a new SearchState where backwards becomes forwards | and the other
way around.
"""
if self.direction == IncrementalSearchDirection.BACKWARD:
direction = IncrementalSearchDirection.FORWARD
else:
direction = IncrementalSearchDirection.BACKWARD
return SearchState(text=self.text, direction=direction, ignore_case=self.ignore_case)
|
oto import connect_s3
s3_avail = True
except ImportError: #pragma: no cover
s3_avail = False
#=================================================================
def is_http(filename):
return filename.startswith(('http://', 'https://'))
#=================================================================
def is_s3(filename):
return filename.startswith('s3://')
#=================================================================
def to_file_url(filename):
""" Convert a filename to a file:// url
"""
url = os.path.abspath(filename)
url = urlparse.urljoin('file:', urllib.pathname2url(url))
return url
#=================================================================
def load_yaml_config(config_file):
import yaml
configdata = BlockLoader().load(config_file)
config = yaml.load(configdata)
return config
#=================================================================
def extract_post_query(method, mime, length, stream, buffered_stream=None):
"""
Extract a url-encoded form POST from stream
If not a application/x-www-form-urlencoded, or no missing
content length, return None
"""
if method.upper() != 'POST':
return None
if ((not mime or
not mime.lower().startswith('application/x-www-form-urlencoded'))):
return None
try:
length = int(length)
except (ValueError, TypeError):
return None
if length <= 0:
return None
#todo: encoding issues?
post_query = ''
while length > 0:
buff = stream.read(length)
length -= len(buff)
if not buff:
break
post_query += buff
if buffered_stream:
buffered_stream.write(post_query)
buffered_stream.seek(0)
post_query = urllib.unquote_plus(post_query)
return post_query
#=================================================================
def append_post_query(url, post_query):
if not post_query:
return url
if '?' not in url:
url += '?'
else:
url += '&'
url += post_query
return url
#=================================================================
def extract_client_cookie(env, cookie_name):
cookie_header = env.get('HTTP_COOKIE')
if not cookie_header:
return None
# attempt to extract cookie_name only
inx = cookie_header.find(cookie_name)
if inx < 0:
return None
end_inx = cookie_header.find(';', inx)
if end_inx > 0:
value = cookie_header[inx:end_inx]
else:
value = cookie_header[inx:]
value = value.split('=')
if len(value) < 2:
return None
value = value[1].strip()
return value
#=================================================================
def read_last_line(fh, offset=256):
""" Read last line from a seekable file. Start reading
from buff before end of file, and double backwards seek
until line break is found. If reached beginning of file
(no lines), just return whole file
"""
fh.seek(0, 2)
size = fh.tell()
while offset < size:
fh.seek(-offset, 2)
lines = fh.readlines()
if len(lines) > 1:
return lines[-1]
offset *= 2
fh.seek(0, 0)
return fh.readlines()[-1]
#=================================================================
class BlockLoader(object):
"""
a loader which can stream blocks of content
given a uri, offset and optional length.
Currently supports: http/https and file/local file system
"""
def __init__(self, cookie_maker=None):
self.cookie_maker = cookie_maker
self.session = None
self.s3conn = N | one
def load(self, url, offset=0, length=-1):
"""
Determine loading method based on uri
"""
if is_http(url):
return self.load_http(url, offset, length)
elif is_s3(url):
return self.load_s3(url, offset, length)
else:
return self.load_file | _or_resource(url, offset, length)
def load_file_or_resource(self, url, offset=0, length=-1):
"""
Load a file-like reader from the local file system
"""
# if starting with . or /, can only be a file path..
file_only = url.startswith(('/', '.'))
# convert to filename
if url.startswith('file://'):
file_only = True
url = urllib.url2pathname(url[len('file://'):])
try:
# first, try as file
afile = open(url, 'rb')
except IOError:
if file_only:
raise
# then, try as package.path/file
pkg_split = url.split('/', 1)
if len(pkg_split) == 1:
raise
afile = pkg_resources.resource_stream(pkg_split[0],
pkg_split[1])
if offset > 0:
afile.seek(offset)
if length >= 0:
return LimitReader(afile, length)
else:
return afile
@staticmethod
def _make_range_header(offset, length):
if length > 0:
range_header = 'bytes={0}-{1}'.format(offset, offset + length - 1)
else:
range_header = 'bytes={0}-'.format(offset)
return range_header
def load_http(self, url, offset, length):
"""
Load a file-like reader over http using range requests
and an optional cookie created via a cookie_maker
"""
headers = {}
if offset != 0 or length != -1:
headers['Range'] = self._make_range_header(offset, length)
if self.cookie_maker:
if isinstance(self.cookie_maker, basestring):
headers['Cookie'] = self.cookie_maker
else:
headers['Cookie'] = self.cookie_maker.make()
if not self.session:
self.session = requests.Session()
r = self.session.get(url, headers=headers, stream=True)
return r.raw
def load_s3(self, url, offset, length):
if not s3_avail: #pragma: no cover
raise IOError('To load from s3 paths, ' +
'you must install boto: pip install boto')
if not self.s3conn:
try:
self.s3conn = connect_s3()
except Exception: #pragma: no cover
self.s3conn = connect_s3(anon=True)
parts = urlparse.urlsplit(url)
bucket = self.s3conn.get_bucket(parts.netloc)
headers = {'Range': self._make_range_header(offset, length)}
key = bucket.get_key(parts.path)
result = key.get_contents_as_string(headers=headers)
key.close()
return BytesIO(result)
#=================================================================
# Signed Cookie-Maker
#=================================================================
class HMACCookieMaker(object):
"""
Utility class to produce signed HMAC digest cookies
to be used with each http request
"""
def __init__(self, key, name, duration=10):
self.key = key
self.name = name
# duration in seconds
self.duration = duration
def make(self, extra_id=''):
expire = str(long(time.time() + self.duration))
if extra_id:
msg = extra_id + '-' + expire
else:
msg = expire
hmacdigest = hmac.new(self.key, msg)
hexdigest = hmacdigest.hexdigest()
if extra_id:
cookie = '{0}-{1}={2}-{3}'.format(self.name, extra_id,
expire, hexdigest)
else:
cookie = '{0}={1}-{2}'.format(self.name, expire, hexdigest)
return cookie
#=================================================================
# Limit Reader
#=================================================================
class LimitReader(object):
"""
A reader which will not read more than specified limit
"""
def __init__(self, stream, limit):
self.stream = stream
self.limit = limit
def read(self, length=None):
if length is not None:
|
"""
logitech-m720-config - A config script for Logitech M720 button mappings
Copyright (C) 2017 Fin Christensen <christensen.fin@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR | PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath (path.dirname (__file__))
# Get the long description from the README.rst file
with open (path.join (here, "README.md"), encoding = "utf-8") as readm | e:
long_description = readme.read ()
setup (
name = "m720-config",
version = "0.0.1",
description = "A config script for Logitech M720 button mappings.",
long_description = long_description,
url = "",
author = "Fin Christensen",
author_email = "christensen.fin@gmail.com",
license = "GPLv3+",
classifiers = [
"Development Status :: 2 - Pre-Alpha",
"Environment :: Console",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3 :: Only",
],
keywords = "config logitech m720 hid++",
packages = find_packages (),
install_requires = ["solaar"],
extras_require = {},
package_data = {
"m720_config": [],
},
data_files = [],
entry_points = {
"console_scripts": [
"m720-config=m720_config:main"
],
},
)
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.Str | ingValidator):
d | ef __init__(
self,
plotly_name="family",
parent_name="layout.ternary.aaxis.title.font",
**kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs
)
|
"""TailorDev Biblio
Bib | liography management with Django.
"""
__version__ = "2.0. | 0"
default_app_config = "td_biblio.apps.TDBiblioConfig"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.