blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
f77c0810c638b53eda1fd2dbf1c74c4ddc571f7b
|
Shell
|
selajdinbilali/kubernetes-mini-cluster
|
/master.sh
|
UTF-8
| 4,900
| 3.046875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# arrete et desactive le firewall
systemctl stop firewalld
systemctl disable firewalld
# install ntp pour la synchronisation du temps et l'active au demarrage
yum -y install ntp
systemctl start ntpd
systemctl enable ntpd
# installation des paquets etcd, kubernetes et flannel
yum -y install etcd kubernetes flannel
# configuration de flannel
echo "
# Flanneld configuration options
# etcd url location. Point this to the server where etcd runs
# adresse du master qui contient etcd
FLANNEL_ETCD=\"http://192.168.50.130:2379\"
# etcd config key. This is the configuration key that flannel queries
# For address range assignment
FLANNEL_ETCD_KEY=\"/atomic.io/network\"
# Any additional options that you want to pass
# ajout de l'option --iface=eth1 pour pointer sur la bonne interface ethernet
FLANNEL_OPTIONS=\"--iface=eth1\"
" > /etc/sysconfig/flanneld
# desactivation de selinux
echo "
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - No SELinux policy is loaded.
SELINUX=permissive
# SELINUXTYPE= can take one of three two values:
# targeted - Targeted processes are protected,
# minimum - Modification of targeted policy. Only selected processes are protected.
# mls - Multi Level Security protection.
SELINUXTYPE=targeted
" > /etc/sysconfig/selinux
# configuration d'etcd
echo "
ETCD_NAME=default
ETCD_DATA_DIR=\"/var/lib/etcd/default.etcd\"
ETCD_LISTEN_CLIENT_URLS=\"http://0.0.0.0:2379\"
ETCD_ADVERTISE_CLIENT_URLS=\"http://localhost:2379\"
" > /etc/etcd/etcd.conf
# configuration de l'apiserver
echo "
# ecoute sur toutes les adresse de maniere non securisee
KUBE_API_ADDRESS=\"--insecure-bind-address=0.0.0.0\"
KUBE_API_PORT=\"--port=8080\"
KUBELET_PORT=\"--kubelet_port=10250\"
# pointe sur le server etcd qui est dans le master donc localhost
KUBE_ETCD_SERVERS=\"--etcd_servers=http://127.0.0.1:2379\"
# plage des adresse pour les services
KUBE_SERVICE_ADDRESSES=\"--service-cluster-ip-range=10.254.0.0/16\"
# securite pour l'admission des services
KUBE_ADMISSION_CONTROL=\"--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota\"
KUBE_API_ARGS=\"\"
" > /etc/kubernetes/apiserver
# configuration de docker pour le registre prive
echo "
# /etc/sysconfig/docker
# Modify these options if you want to change the way the docker daemon runs
OPTIONS='--selinux-enabled --log-driver=journald'
DOCKER_CERT_PATH=/etc/docker
# If you want to add your own registry to be used for docker search and docker
# pull use the ADD_REGISTRY option to list a set of registries, each prepended
# with --add-registry flag. The first registry added will be the first registry
# searched.
#ADD_REGISTRY='--add-registry registry.access.redhat.com'
# If you want to block registries from being used, uncomment the BLOCK_REGISTRY
# option and give it a set of registries, each prepended with --block-registry
# flag. For example adding docker.io will stop users from downloading images
# from docker.io
# BLOCK_REGISTRY='--block-registry'
# If you have a registry secured with https but do not have proper certs
# distributed, you can tell docker to not look for full authorization by
# adding the registry to the INSECURE_REGISTRY line and uncommenting it.
INSECURE_REGISTRY='--insecure-registry=192.168.50.130:5000'
# On an SELinux system, if you remove the --selinux-enabled option, you
# also need to turn on the docker_transition_unconfined boolean.
# setsebool -P docker_transition_unconfined 1
# Location used for temporary files, such as those created by
# docker load and build operations. Default is /var/lib/docker/tmp
# Can be overriden by setting the following environment variable.
# DOCKER_TMPDIR=/var/tmp
# Controls the /etc/cron.daily/docker-logrotate cron job status.
# To disable, uncomment the line below.
# LOGROTATE=false
#
# docker-latest daemon can be used by starting the docker-latest unitfile.
# To use docker-latest client, uncomment below line
#DOCKERBINARY=/usr/bin/docker-latest
" > /etc/sysconfig/docker
# on active les services a chaque demarrage
for SERVICES in etcd kube-apiserver kube-controller-manager kube-scheduler docker flanneld; do
systemctl restart $SERVICES
systemctl enable $SERVICES
systemctl status $SERVICES
done
# on inscrit dans etcd la plage des noeuds
etcdctl mk /atomic.io/network/config '{"Network":"172.17.0.0/16"}'
# a decommenter si on veut installer un registre prive
#docker run --restart=always -d -p 5000:5000 --name registry registry:2
echo "INSTALLATION DU MASTER TERMINEE"
# on redemmare
reboot
| true
|
9ba46dd3e45991eb40c2985cbfecfea46152a69c
|
Shell
|
opencrmitalia-official/validators
|
/server-check.sh
|
UTF-8
| 705
| 3.734375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# https://misc.flogisoft.com/bash/tip_colors_and_formatting
good() {
echo -e ">> $1: \e[1m\e[32m[OK!]\e[0m"
}
fail() {
echo -e "\e[31m==================================\e[0m"
echo -e "\e[31mFATAL ERROR with $1\e[0m"
echo -e "\e[31m==================================\e[0m"
}
release=$(lsb_release -r | grep "Release:" | cut -d: -f2 | xargs)
echo "Inizio fare di controllo..."
echo ""
if [[ "$release" = "18.04" ]]; then
good "Release di ubuntu corretta"
else
fail "Problemi con la relase di ubuntu ($release) necessaria la 18.04"
fi
if [[ "$release" = "19.04" ]]; then
good "Release di ubuntu corretta"
else
fail "Problemi con la relase di ubuntu necessaria la 18.04"
fi
| true
|
337b31ffe3aba199b9c1c234238822a2a88e1f0d
|
Shell
|
developgo/logpaste
|
/docker_entrypoint
|
UTF-8
| 1,183
| 3.828125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Exit build script on first failure.
set -e
# Exit on unset variable.
set -u
readonly DB_PATH="/app/data/store.db"
env_vars_to_flags() {
set +u
local flags=""
if [[ ! -z "${SITE_TITLE}" ]]; then
flags+=" -title \"${SITE_TITLE}\""
fi
if [[ ! -z "${SITE_SUBTITLE}" ]]; then
flags+=" -subtitle \"${SITE_SUBTITLE}\""
fi
if [[ "${SITE_SHOW_DOCUMENTATION}" == "false" ]]; then
flags+=" -showdocs=false"
fi
set -u
echo "${flags}"
}
# Set litestream configuration
cat > /etc/litestream.yml <<EOF
access-key-id: "${AWS_ACCESS_KEY_ID}"
secret-access-key: "${AWS_SECRET_ACCESS_KEY}"
region: "${AWS_REGION}"
dbs:
- path: "${DB_PATH}"
replicas:
- url: "${DB_REPLICA_URL}"
sync-interval: "${DB_SYNC_INTERVAL}"
EOF
# Echo commands to stdout.
set -x
# Restore database from S3.
if [[ "${CREATE_NEW_DB}" != 'true' ]]; then
litestream restore -v "${DB_PATH}"
fi
# Begin replication to S3 in the background.
# Note: It would be nicer to use the systemd service, but systemd
# is trickier within Docker.
litestream replicate "${DB_PATH}" "${DB_REPLICA_URL}" &
# Start server.
eval "/app/server $(env_vars_to_flags)"
| true
|
ee65e295d155b20d14660154f4d233f7c649df22
|
Shell
|
gov466/Linux-Bash-Scripting
|
/Functions/pipe.sh
|
UTF-8
| 333
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
FILES=`ls -1 | sort -r | head -3 ##runs ls command and limit col to one##
##sort is builtin in linux -r reverse sort order##head -3 takes first 3results
COUNT=1
for FILE in $FILES
do
echo "File #$COUNT = $FILE"
((COUNT++))
done
exit 0
###display files in current directory in descending alphabetic order(pipes)
| true
|
2060b96ccb74d18754dd1c2a903240ec8c8caa9d
|
Shell
|
FauxFaux/utils
|
/tags/aughiimask-20021127T0120Z/cvscat
|
UTF-8
| 206
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
# $Id: cvscat,v 1.2 2002/07/15 13:21:51 sunny Exp $
if [ $# -le 1 ]; then
cat <<END
Syntax: cvscat file revision
END
exit 1
fi
cvs -d $(cat CVS/Root) co -p -r $2 $(cat CVS/Repository)/$1
| true
|
c40cbb2aa4de071157314266f3c4d801defb548c
|
Shell
|
chocolatkey/YahooBookstoreDB
|
/cron.sh
|
UTF-8
| 568
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
# Henry (chocolatkey) 2017-08-06
if [ $# -eq 0 ]
then
echo "YDB updater"
echo "Please provide ARGUMENT for additional DB file name"
else
python3.6 /home/media/manga/yrip.py -c true -f true -d true -a "$@"-cron.json -r 801000-820000
sudo chmod 0777 /home/media/manga/db.json
sudo cp -R /home/media/manga/"$@"-cron.json /var/solr
sudo su - solr -c "/opt/solr/bin/post -c ydb *-cron.json"
sleep 1
curl http://localhost:8983/solr/ydb/update?optimize=true
sudo rm -f "$@"-cron.json && sudo rm -f /var/solr/"$@"-cron.json
fi
| true
|
3af75e11c23306b5227c9030c154077d37b3144f
|
Shell
|
nw55/dotnet-integration-runescape
|
/build.sh
|
UTF-8
| 580
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
readonly THIS_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly THIS_FILE="$THIS_DIR/$(basename "${BASH_SOURCE[0]}")"
artifactsDir="$THIS_DIR/artifacts"
if [ -d artifactsDir ]; then
rm -R artifactsDir
fi
build_number=${TRAVIS_BUILD_NUMBER:=1}
dotnet restore --verbosity normal /property:BuildNumber=$build_number
dotnet build --configuration Release --verbosity normal /property:BuildNumber=$build_number
dotnet pack --configuration Release --output "$artifactsDir" --no-build --verbosity normal /property:BuildNumber=$build_number
| true
|
7693fc2a0f4cf262cae77b79d53a40f6a1579c07
|
Shell
|
JacobFischer/CS5400-Puzzle
|
/run.sh
|
UTF-8
| 665
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
# HW2: solves puzzle 1-4 via A* GS
# default arguments
algorithm="astar_gs"
puzzles="puzzles/puzzle1.txt puzzles/puzzle2.txt puzzles/puzzle3.txt puzzles/puzzle4.txt"
# command line arg overrides
if [ -n "$1" ]; then
algorithm="$1"
fi
if [ -n "$2" ]; then
puzzles="${@:2}"
fi
echo "Algorithm: $algorithm"
echo "Puzzles: $puzzles"
echo "====================================================================="
for puzzle in $puzzles; do
echo "---------------------------------------------------------------------"
echo "Running program with algorithm '$algorithm' for puzzle '$puzzle'"
echo ""
python3 ./src/main.py $algorithm $puzzle
done
| true
|
19b48fa40de1e24099be7b6331afc6566784ba97
|
Shell
|
yunque/thinkingaboutthinking
|
/tips/dbg.sh
|
UTF-8
| 896
| 2.625
| 3
|
[
"CC0-1.0"
] |
permissive
|
# Find the ARM GCC toolchain directory
locate arm-none-eabi-gcc
# e.g. @ /home/imr/ti/ccsv6/tools/compiler/gcc-arm-none-eabi-4_8-2014q3/arm-none-eabi/
# Location of assembler (as), linker(ld), etc.
/usr/arm-linux-gnueabi/bin
# GDB
# basix @ http://www.thegeekstuff.com/2010/03/debug-c-program-using-gdb/
# 1. Call a program with $ "gdb /path/to/program.c"
# 2. Set breakpoints
# 3. Run program with command line arguments w/ $ "run -m /path/to/model.bsm" etc
# step into
s
# next
n
# set breakpoint
b filename.ext:line#
# delete breakpoint, cf. http://www.delorie.com/gnu/docs/gdb/gdb_32.html
clear filename.ext:line#
# print variable value
p VAR
# examine contents of address
x 0x7FFFFFF
# show current breakpoints
info breakpoints
# Segmentation fault diagnosis
valgrind <cmd_that_segfaults>
valgrind --leak-check=full <cmd_that_segfaults>
# Heap memory usage
valgrind -massif
| true
|
3b4a0569afb211ce2358e62aa7a0811f65532b52
|
Shell
|
andreimunteanu/Bash_project
|
/src/main.sh
|
UTF-8
| 2,145
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
bold=`tput bold` # per formattare il testo in corsivo
normal=`tput sgr0` # resetta gli attributi del testo
# lista degli script bash componenti il progetto:
INIT_REPO=./init_repo.sh
COMMIT=./commit.sh
REMOVE=./remove.sh
REVERT=./revert.sh
STATUS=./status.sh
FILE_SEARCH=./file_search.sh
# lista di funzioni che richiamano i rispettivi script bash
init_repo(){
echo "Inserisci <repo>"
read lista_parametri
$INIT_REPO $lista_parametri
}
commit(){
echo "Inserisci <repo>"
read lista_parametri
$COMMIT $lista_parametri
}
remove(){
echo "Inserisci nome file"
read lista_parametri
$REMOVE $lista_parametri
}
revert(){
echo "Inserisci nome <repo>"
read lista_parametri
$REVERT $lista_parametri
}
status(){
echo "Inserisci nome <repo>"
read lista_parametri
$STATUS $lista_parametri
}
file_search(){
echo "Inserisci pattern"
read lista_parametri
$FILE_SEARCH $lista_parametri
}
# stampa il menu
stampa_menu(){
echo -e "\n"
# '\E[37;44m' = font bianco su sfondo blue, mentre ${bold} e ${normal} richiamano i parametri inizializzati a inizio script
echo -e " ${bold} Inserisci l'operazione da effettuare:${normal} "
echo -e "\t"'\E[37;44m'"${bold}<1>${normal} Init-repo"
echo -e "\t"'\E[37;44m'"${bold}<2>${normal} Commit"
echo -e "\t"'\E[37;44m'"${bold}<3>${normal} Remove"
echo -e "\t"'\E[37;44m'"${bold}<4>${normal} Revert"
echo -e "\t"'\E[37;44m'"${bold}<5>${normal} Status"
echo -e "\t"'\E[37;44m'"${bold}<6>${normal} File_search\n\n"
}
lista_parametri="" # stringa utilizzata nelle varie funzioni
scelta=1 # scelta inizializzato ad 1 per entrare nel while
clear # pulisce il terminale prima di stampare il menu per la prima volta
while [ "$scelta" != "0" ] # 0 = per uscire
do
stampa_menu
read -p "> " scelta # legge la scelta dell'utente dopo aver stampato il prompt >
case $scelta in
0) exit;;
1) init_repo;;
2) commit;;
3) remove;;
4) revert;;
5) status;;
6) file_search;;
*) echo "ERRORE: Valore inserito non valido." # in caso l'utente non avesse inserito un numero da 0 a 6
esac
done
| true
|
5f68d20093da15bf35cc3a3812110736f3793883
|
Shell
|
gliderlabs/glidergun
|
/src/env.bash
|
UTF-8
| 550
| 3.96875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
declare -a _env
env-import() {
declare var="$1" default="$2"
if [[ -z "${!var+x}" ]]; then
if [[ -z "${2+x}" ]]; then
echo "!! Imported variable $var must be set in profile or environment." | >&2 red
exit 2
else
export $var="$default"
fi
fi
_env+=($var)
}
env-show() {
declare desc="Shows relevant environment variables"
local longest=0
for var in "${_env[@]}"; do
if [[ "${#var}" -gt "$longest" ]]; then
longest="${#var}"
fi
done
for var in "${_env[@]}"; do
printf "%-${longest}s = %s\n" "$var" "${!var}"
done
}
| true
|
065b11262ffcf09a79753860013fed1d848c96ec
|
Shell
|
echo9305/earth_config
|
/kubeapp-ide/helm/pull.sh
|
UTF-8
| 597
| 3.359375
| 3
|
[
"Apache-2.0"
] |
permissive
|
###########################################
##
## Copyright (2019,) Institute of Software
## Chinese Academy of Sciences
## [email protected]
##
##########################################
VALUE=gcr.azk8s.cn
KEY=gcr.io
while read line
do
img=$(echo $line | awk -F":" '{print$1}')
ver=$(echo $line | awk -F":" '{print$2}')
res=$(docker images | grep "$img" | grep "$ver" | grep -v grep)
if [[ -z $res ]]
then
name=${line//$KEY/$VALUE}
echo docker pull $name
docker pull $name
docker tag $name $line
docker rmi $name
fi
done < images.conf
| true
|
ef10f294b74890e2885430415c428347f768a37f
|
Shell
|
deep-42-thought/archlinuxewe
|
/embree-isa/PKGBUILD
|
UTF-8
| 1,465
| 2.71875
| 3
|
[] |
no_license
|
# Contributor: Lukas Jirkovsky <[email protected]>
# Contributor: gucong <[email protected]>
# Maintainer: Erich Eckner <arch at eckner dot net>
pkgname=embree-isa
pkgver=3.13.3
pkgrel=3
pkgdesc="A collection of high-performance ray tracing kernels (with build-time ISA detection)"
arch=('x86_64')
url="https://embree.github.io/"
license=('Apache')
_pinned_dependencies=(
'gcc-libs=12.1.0'
'glibc>=2.31'
'tbb=2021.5.0'
)
depends=("${_pinned_dependencies[@]}")
provides=('embree')
conflicts=('embree')
makedepends=('cmake' 'ispc' 'freeglut' 'libxmu' 'openexr')
source=("embree-${pkgver}.tar.gz::https://github.com/embree/embree/archive/v${pkgver}.tar.gz")
sha512sums=('eef8d9101f0bf95d6706a495a9aa628c10749862aeb2baa6bba2f82fcc3a96467a28ca1f522d672eb5aa7b29824363674feda25832724da361b3334334a218cd')
build() {
cd "$srcdir/embree-$pkgver"
MAX_ISA="SSE2"
cat /proc/cpuinfo | grep sse3 > /dev/null && MAX_ISA="SSE3"
cat /proc/cpuinfo | grep sse4_1 > /dev/null && MAX_ISA="SSE4.1"
cat /proc/cpuinfo | grep sse4_2 > /dev/null && MAX_ISA="SSE4.2"
cat /proc/cpuinfo | grep avx > /dev/null && MAX_ISA="AVX"
cat /proc/cpuinfo | grep avx2 > /dev/null && MAX_ISA="AVX2"
# ICC required for avx512 ?
echo MAX_ISA: $MAX_ISA
cmake . \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_INSTALL_LIBDIR=lib \
-DCMAKE_BUILD_TYPE=Release \
-DEMBREE_TUTORIALS=OFF \
-DEMBREE_MAX_ISA="$MAX_ISA"
make
}
package() {
cd "$srcdir/embree-$pkgver"
make DESTDIR="$pkgdir" install
}
| true
|
2efa5d4c211f63f3d64c5dd2b65839b52bf511a6
|
Shell
|
dronovep/instructions
|
/PhpVersion.sh
|
UTF-8
| 1,247
| 2.953125
| 3
|
[] |
no_license
|
#Нужно проверить, есть ли репозиторий REPEL - репозиторий экстра пакетов от проекта Fedora
rpm -qa | grep epel
#Если репозитория нет, то устанавливаем
sudo dnf install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
#Аналогично проверяем и ставим репозиторий REMI - специальный репозиторий различных версий php
rpm -qa | grep remi
sudo dnf install https://rpms.remirepo.net/enterprise/remi-release-8.rpm
# Получаем список источников php
dnf module list php
# Если нам нужен например модуль с php версии 8
sudo dnf module enable php:remi-8.0
# или скорее всего linux centos выдаст предупреждение о конфликте с существующей версией php, тогда
sudo dnf module reset php:remi-8.0
# Далее мы либо ставим php если еще не был установлен либо обновляем - классическим способом обновления пакетов через пакетный менеджер
| true
|
a2b85fb8d5ab19cc95f05003ff1b88d788e2a36d
|
Shell
|
jdelgadoalfonso/bscript
|
/reboot.sh
|
UTF-8
| 241
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
IP_FILE="$1"
IPS=()
while IFS='' read -r line || [[ -n "${line}" ]]; do
IPS+=(${line})
done < "${IP_FILE}"
for IP in "${IPS[@]}"
do
sshpass -p root ssh -o StrictHostKeyChecking=no -t root@${IP} ash -c "'reboot'"
done
| true
|
e312e9d846e5010784dbecc34297613b45433e13
|
Shell
|
ksorat/Injection
|
/yellowstone/doH5pCat.sh
|
UTF-8
| 907
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
#
#BSUB -a poe # set parallel operating environment
#BSUB -P UJHB0003 # project code
#BSUB -J h5pC # job name
#BSUB -W 24:00 # wall-clock time (hrs:mins)
#BSUB -n 1 # number of tasks in job
#BSUB -q geyser # queue
#BSUB -R "span[ptile=1]"
#BSUB -e h5pC.%I.log # error file name in which %J is replaced by the job ID
#BSUB -o h5pC.%I.log # output file name in which %J is replaced by the job ID
export ComS="h5pcat.py -eq"
declare -a runs=("p_psInj" "O_psInj" "Hep_psInj" "Hepp_psInj")
export BASE="$HOME/Work/Injection"
export DATA="$BASE/psInj"
echo "Environment ..."
cd $DATA
module restore lfmtp
module list
echo "Running in $PWD"
echo "Running on host `hostname` on `date`"
#Loop through array of runs
for i in "${runs[@]}"
do
echo "Running $ComS on $i"
$ComS $i.*.h5part
done
| true
|
73b5e4593ec731dba1b61ecf4836f3fe82fe8ec0
|
Shell
|
willghatch/dotfileswgh
|
/commands/atrs
|
UTF-8
| 292
| 3
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# reschedule at commands
#
# duplicates header crap -- but that probably shouldn't change
# anyway...
if [[ -z "$1" || "$1" = "--help" ]]; then
echo "usage: $0 <at job id> <new time spec>" 1>&2
exit 1
fi
jobnum=$1
shift
at -c $jobnum | at "$@" && atrm $jobnum
| true
|
e3fba0045b800cd1f54e7710254b888a26887d7b
|
Shell
|
bkrishna2006/system
|
/ansible/roles/docker-registry/files/gencert.sh
|
UTF-8
| 610
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
common_name=$1
if [ -z $common_name ]; then
echo "You must enter common_name"
exit 1
fi
echo "Run file in {{ cert_dir }}"
if [ ! -f domain.key ] || [ ! -f domain.crt ]; then
openssl req \
-newkey rsa:4096 -nodes -sha256 -keyout domain.key \
-x509 -days 365 -out domain.crt -subj "/C=VN/ST=HN/L=Hanoi/O=Global IT/OU=IT Department/CN=${common_name}"
fi
echo "Copy the domain.crt file to /etc/docker/certs.d/${common_name}:5000/ca.crt on every Docker host. You do not need to restart Docker."
echo "Refer: https://docs.docker.com/registry/insecure/#troubleshooting-insecure-registry"
| true
|
14fa9064d744ebe96142075ca84a0d639333cbd6
|
Shell
|
dylannorthrup/dotfiles
|
/bin/auth-to-all-kube-clusters
|
UTF-8
| 364
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
cp /dev/null ~/.kube/config
set -x
for proj in $(gcloud projects list | awk '{print $1}'); do
for line in $(gcloud container clusters list --project="${proj}" --format="value[separator=;](name,zone)")
do
IFS=';' read CLUSTER ZONE <<< "$line"
gcloud container clusters get-credentials ${CLUSTER} --zone ${ZONE} --project="${proj}"
done
done
| true
|
52104eba3ec7c174face9a075d5bb325593bab5a
|
Shell
|
harshaswarrier/FossLab
|
/fourth.sh
|
UTF-8
| 485
| 2.796875
| 3
|
[] |
no_license
|
#! /bin/bash
speaker-test -t sine -f 1000 -l 1
echo "Deleting the file ls_output.txt ..."
sleep 5
rm ls_output.txt;
sleep 5
echo "Redirecting output of ls command ..."
sleep 5
ls > ls_output.txt;
sleep 5
echo "displaying the content of ls_output.txt ..."
sleep 5
cat ls_output.txt;
sleep 5
echo "Deleting the file ls_output.txt ..."
rm ls_output.txt;
echo "Executing the command ls ..."
ls
echo "My current working directory ..."
pwd
echo "Rebooting my PC ..."
| true
|
c1ff33dee6dfeca79a8bf3fe67e646859cd9bcc7
|
Shell
|
AlexJacobs95/PDS
|
/setup.sh
|
UTF-8
| 286
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "Creating required directories"
mkdir dataset -p
mkdir dataset/features -p
echo "Installing all Python packages for the project"
sudo pip3 install -r requirements.txt
echo "Installing the English model for the Spacy library"
sudo python3 -m spacy download en
| true
|
fa1308683656cb2e7f4d015c8ace5f74273e0439
|
Shell
|
karma0/home-scripts
|
/.scripts/localeCheck
|
UTF-8
| 2,972
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#Connection location: function to test where I'm logging in from and what fallbacks should be used.
# There are a number of possibilites to check, we want to use RGB coloursets as much as possible, but have 256 fallbacks when we cannot.
# 1. On Soma (or another linux machine) with .Xresources available and running X and a local connection. [RGB]
# 2. On Soma (or another linux machine) with .Xresources available and running X and a remoting to another machine. [RGB]
# 3. On Soma (or another linux machine) with .Xresources available but NOT running X and a local (or remote) connection. [256]
# 4. Using putty with a saved, setup session, remote or local. [RGB]
# 5. Using putty without a saved session, remote or local. [256]
#Check answerback from terminal. Expecting ApokPuT from known putty sessions, ^[[?1;2c from urxvt (at least)
read -n7 -t1 -s -p $(echo -en "\005") answerBack 2>/dev/null
#First, are we remote or local?
if [ -n "$SSH_CONNECTION" ]; then
# remote
#Using RGB enabled putty session?
if [ "$answerBack" == "ApokPuT" ]; then
export apoklinonRGB=1
elif [ "$answerBack" == "ApokPuW" ]; then
export apoklinonRGB=1
#Workman keyboard
export keyboard=workman
else
#either a remote with X, or runlevel!=5 or non RGB putty session
#For the moment, I cant figure out a way to check runlevel of the ssh client or find out .Xresources if exists
#without envoking a reverse connection or using sendenv. Many servers I use dont have sendenv ancive and this
#method, I feel is sloppy If anyone has a better idea at how to check this, please let me know.
#client=$(~/.scripts/getClientHost)
#if [ -n $client ] && [[ "$(ssh $client '~/.scripts/envData')" == "1,5" ]]; then #both Xresources and runlevel are good
#Instead of above, Im using a quick override - I set TERM=xterm-256color in bash_profile of machines I always remote in to if
#urxvt sends its own name (which screws my emulation up on those machines that dont have urxvt)
if [ "$TERM" == "xterm-256color" ]; then
export apoklinonRGB=1
else
export apoklinonRGB=0
fi
fi
else
# local
#Is Xresources available?
if [ -f ~/.Xresources ]; then
# linux machine
#Runlevel is currently broken on Arch! https://bugs.archlinux.org/task/34657
#Check the runlevel
#if [[ $(who -r | awk '{print $2}') -eq 5 ]]; then
export apoklinonRGB=1
#else
# export apoklinonRGB=0
#fi
else
#Using RGB enabled putty session?
if [ "$answerBack" == "ApokPuT" ]; then
export apoklinonRGB=1
else
#This catches any machine not using a RGB putty enabled session, or linux machines without .Xresources, regardless of runlevel
export apoklinonRGB=0
fi
fi
fi
| true
|
0688a12597bc2492894449ae1de2e314fde3ffed
|
Shell
|
8l/lfscript
|
/scripts/blfs-13994-unchecked/acpid
|
UTF-8
| 1,288
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# The instructions in this file are extracted from
# 'Beyond Linux From Scratch' (2014-08-22 / r13994) but are modified for use
# with LFScript 4 which installs the software to a fake root directory.
#
# Beyond Linux From Scratch is released under the MIT license.
# Copyright (C) 2001-2014, The BLFS Development Team
WGETLIST="http://downloads.sourceforge.net/acpid2/acpid-2.0.22.tar.xz
http://www.linuxfromscratch.org/blfs/downloads/svn/blfs-bootscripts-20140810.tar.bz2"
MD5SUMLIST="c8ba756030d1b21fc973ec3d640f27f1
179a6c22d0f7d2619cba4eb794fdc1cb"
###############################################
installation() { # INSTALLING SYSTEM SOFTWARE #
###############################################
./configure --prefix=/usr --docdir=/usr/share/doc/acpid-2.0.22
make
make DESTDIR=${FAKEROOT} install
install -v -m755 -d ${FAKEROOT}/etc/acpi/events
cp -r samples ${FAKEROOT}/usr/share/doc/acpid-2.0.22
cat > ${FAKEROOT}/etc/acpi/events/lid << "EOF"
event=button/lid
action=/etc/acpi/lid.sh
EOF
cat > ${FAKEROOT}/etc/acpi/lid.sh << "EOF"
#!/bin/sh
/bin/grep -q open /proc/acpi/button/lid/LID/state && exit 0
/usr/sbin/pm-suspend
EOF
chmod +x ${FAKEROOT}/etc/acpi/lid.sh
includeBootscript acpid
#################
} # END OF FILE #
#################
| true
|
ef7319f8033dbaf0a22953b29e9e4e2123f7cb0d
|
Shell
|
jenv/jenv
|
/libexec/jenv-sh-shell
|
UTF-8
| 1,145
| 4.40625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# Summary: Set or show the shell-specific Java version
#
# Usage: jenv shell <version>
# jenv shell --unset
#
# Sets a shell-specific Java version by setting the `JENV_VERSION'
# environment variable in your shell. This version overrides local
# application-specific versions and the global version.
#
# <version> should be a string matching a Java version known to jenv.
# The special version string `system' will use your default system Java.
# Run `jenv versions' for a list of available Java versions.
set -e
[ -n "$JENV_DEBUG" ] && set -x
# Provide jenv completions
if [ "$1" = "--complete" ]; then
echo --unset
echo system
exec jenv-versions --bare
fi
version="$1"
if [ -z "$version" ]; then
if [ -z "$JENV_VERSION" ]; then
echo "jenv: no shell-specific version configured" >&2
exit 1
else
echo "echo \"\$JENV_VERSION\""
exit
fi
fi
if [ "$version" = "--unset" ]; then
echo "unset JENV_VERSION"
exit
fi
# Make sure the specified version is installed.
if jenv-prefix "$version" >/dev/null; then
echo "export JENV_VERSION=\"${version}\""
else
echo "return 1"
exit 1
fi
| true
|
5dd49f08bfa02656d118978047bf01616f0c2205
|
Shell
|
duckdb/duckdb
|
/scripts/node_build_win.sh
|
UTF-8
| 320
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
node --version
npm --version
which node
cd tools/nodejs
make clean
./configure
npm install --build-from-source
npm test
npx node-pre-gyp package testpackage testbinary
if [[ "$GITHUB_REF" =~ ^(refs/heads/main|refs/tags/v.+)$ ]] ; then
npx node-pre-gyp publish
npx node-pre-gyp info
fi
| true
|
0f888dbcf4c960adfcad1d9182b772554a0f1439
|
Shell
|
collinjlesko/provision-aws-bastion
|
/scripts/gcloud-renew-gke-login.sh
|
UTF-8
| 2,227
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
exec 3>&1 4>&2
trap 'exec 2>&4 1>&3' 0 1 2 3
exec 1>>/tmp/gcloud-renew-gke-login-log.out 2>&1
### Script to renew GKE login configurations ###
# This script was added to root crontab during acl provisioning.
# Everything below will go to the file 'gcloud-renew-gke-login-log.out' on the /tmp directory
YLW='\033[1;33m'
NC='\033[0m'
CURRENT_DATE=$(date)
GRN='\033[0;32m'
numberofusers=$1
gcp_zone=$2
gcp_project=$3
prefix=$4
base=$5
password=$6
if [ -z $numberofusers ]; then
echo "Please provide the number of users as first parameter"
echo ""
echo "$ ./gcloud-renew-gke-login.sh 5"
exit 1
fi
if [ -z $gcp_zone ]; then
echo "Please provide the gcp zone as the second parameter"
echo ""
echo "$ ./gcloud-renew-gke-login.sh 5 us-central1-a"
exit 1
fi
if [ -z $gcp_project ]; then
echo "Please provide the gcp project as the third parameter"
echo ""
echo "$ ./gcloud-renew-gke-login.sh 5 us-central1-a detroit-acl-v2"
exit 1
fi
if [ -z $prefix ]; then
echo "Please provide the lab prefix as the fourth parameter"
echo ""
echo "$ ./gcloud-renew-gke-login.sh 5 us-central1-a detroit-acl-v2 acl-det-oct19"
exit 1
fi
if [ -z $base ]; then
echo "Please provide the username base as the fifth parameter"
echo ""
echo "$ ./gcloud-renew-gke-login.sh 5 us-central1-a detroit-acl-v2 acl-det-oct19 acllab"
exit 1
fi
if [ -z $password ]; then
echo "Please provide the user's password as the sixth parameter"
echo ""
echo "$ ./gcloud-renew-gke-login.sh 5 us-central1-a detroit-acl-v2 acl-det-oct19 acllab userpassword"
exit 1
fi
echo -e "${YLW}${CURRENT_DATE}${NC}"
echo "GCP zone: ${gcp_zone}"
echo "Project name: ${gcp_project}"
echo ""
# Loop through all acllab users and renew their GKE configuration
i=1
while [ $i -le $numberofusers ]
do
echo -e "${GRN}Renewing GKE login for user ${base}${i}${NC}"
sudo -u $base$i -p $password -H /snap/bin/gcloud auth activate-service-account --key-file=/home/$base$i/$prefix-$base$i-key.json
sudo -u $base$i -p $password -H /snap/bin/gcloud container clusters get-credentials $prefix-$base$i --zone $gcp_zone --project $gcp_project
echo ""
i=$((i+1))
done
echo ""
| true
|
9951aed338fe0221cd6087e6649e1257bf8d4b1e
|
Shell
|
grayasm/git-main
|
/script/i3/setup-i3.sh
|
UTF-8
| 1,463
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
# verbose
set -x
# pwd in this script location
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# last directory must match "i3" -> run from git repository
if [ "`basename $DIR`" != "i3" ]; then
echo "run this script only from git repository"
exit
fi
if [ ! -f /usr/bin/i3 ]; then
echo "i3 not found at '/usr/bin/i3' ; install i3"
exit
fi
if [ ! -f /usr/bin/i3bar ]; then
echo "i3bar status bar program not found at '/usr/bin/i3bar'; install i3bar"
exit
fi
if [ ! -f /usr/bin/i3status ]; then
# i3status is an info output program
# $> i3status | dzen2 ; to get a statusbar
echo "i3status program not found at '/usr/bin/i3status'; install i3status"
exit
fi
if [ ! -f /usr/bin/feh ]; then
echo "feh wallpaper setting tool not found at '/usr/bin/feh'; install feh"
exit
#[copr-admiralnemo-feh]
#name=feh for i3 window manager
#baseurl=http://copr-be.cloud.fedoraproject.org/results/admiralnemo/i3wm-el7/epel-7-$basearch/
#failovermethod=priority
#enabled=1
#gpgcheck=1
#gpgkey=https://copr-be.cloud.fedoraproject.org/results/admiralnemo/i3wm-el7/pubkey.gpg
#priority=90
fi
mkdir -pv $HOME/.config/i3status
if [ -f $HOME/.config/i3status/config ]; then
cp $HOME/.config/i3status/config $HOME/.config/i3status/config.old
fi
cp ./i3status-config $HOME/.config/i3status/config
echo "please add content of i3-config at the end of ~/.config/i3/config"
| true
|
39ce4d69f549c2c0d4bde60471df9133e5f8ef6e
|
Shell
|
Ascend/ModelZoo-PyTorch
|
/ACL_PyTorch/built-in/cv/YoloX_Tiny_for_Pytorch/test/eval_acc.sh
|
UTF-8
| 1,112
| 2.96875
| 3
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
#!/bin/bash
set -eu
datasets_path="/root/datasets"
batch_size=1
mmdetection_path="/home"
for para in $*
do
if [[ $para == --datasets_path* ]]; then
datasets_path=`echo ${para#*=}`
fi
if [[ $para == --batch_size* ]]; then
batch_size=`echo ${para#*=}`
fi
if [[ $para == --mmdetection_path* ]]; then
mmdetection_path=`echo ${para#*=}`
fi
done
arch=`uname -m`
rm -rf val2017_bin
rm -rf val2017_bin_meta
python3 YOLOX_preprocess.py --image_src_path ${datasets_path}/val2017
python3 gen_dataset_info.py \
${datasets_path} \
${mmdetection_path}/configs/yolox/yolox_s_8x8_300e_coco.py \
val2017_bin val2017_bin_meta \
yolox.info yolox_meta.info \
640 640
if [ $? != 0 ]; then
echo "fail!"
exit -1
fi
rm -rf result
python3 -m ais_bench --model yolox.om --input val2017_bin --output ./ --output_dirname result --outfmt BIN
rm -rf results.txt
python YOLOX_postprocess.py --dataset_path ${datasets_path} --model_config ${mmdetection_path}/configs/yolox/yolox_tiny_8x8_300e_coco.py
if [ $? != 0 ]; then
echo "fail!"
exit -1
fi
echo "success"
| true
|
a274c5fcc4576c1253a458d781b623959dacc30d
|
Shell
|
girijamanoj/BTech-Labsheets
|
/OperatingSystems/count.sh
|
UTF-8
| 180
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
# SCRIPT: count.sh
# USAGE : ./count.sh
# PURPOSE: To count the number of lines and words in a file
echo "Enter the file name "
read a
echo `wc -l $a`
echo `wc -w $a`
| true
|
e57ad89c14bbcd176441df038f1576200657e4c2
|
Shell
|
o2r-project/erc-checker
|
/etc/extract_original_paper_from_ERC.sh
|
UTF-8
| 1,477
| 3.59375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# (C) Copyright 2017 o2r-project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
printf "Beginning extraction. \n\n"
erc_ID="$1"
path_to_image="${2%/}/data/image.tar"
base_dir="$(pwd)"
echo "$base_dir"
if [ ! "$3" ] ; then
html_name="main.html" ;
else
html_name="$3" ;
fi
temp_path="/tmp/imageContentERC/$erc_ID/"
original_paper_specific_dir="original_papers/$erc_id/"
mkdir -p $temp_path $pwd/$original_paper_specific_dir
# extract image content to a temporary storage directory including some id
tar xvf $path_to_image -C $temp_path
# switcheroo over to the temp storage
cd $temp_path
# find all layer.tar balls, and for each found, while searching,
# copy main.html to a Original Papers' Storage Directory on the platform (permanent)
while
read -r result ;
do echo $result ;
tar xf $result | cp "erc/$html_name" $base_dir/$original_paper_specific_dir ;
printf "\n" ;
done < <(find . -name layer.tar)
rm -r $temp_path/* -f
| true
|
33e6d75c38260a8a8bdf87c7a571cbc6ca90f59c
|
Shell
|
kunpengcompute/hmpi
|
/config/ompi_check_gpfs.m4
|
UTF-8
| 2,765
| 2.8125
| 3
|
[
"BSD-3-Clause-Open-MPI"
] |
permissive
|
dnl -*- shell-script -*-
dnl
dnl Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
dnl University Research and Technology
dnl Corporation. All rights reserved.
dnl Copyright (c) 2004-2005 The University of Tennessee and The University
dnl of Tennessee Research Foundation. All rights
dnl reserved.
dnl Copyright (c) 2004-2018 High Performance Computing Center Stuttgart,
dnl University of Stuttgart. All rights reserved.
dnl Copyright (c) 2004-2006 The Regents of the University of California.
dnl All rights reserved.
dnl Copyright (c) 2018 University of Houston. All rights reserved.
dnl $COPYRIGHT$
dnl
dnl Additional copyrights may follow
dnl
dnl $HEADER$
dnl
# OMPI_CHECK_GPFS(prefix, [action-if-found], [action-if-not-found])
# --------------------------------------------------------
# check if GPFS support can be found. sets prefix_{CPPFLAGS,
# LDFLAGS, LIBS} as needed and runs action-if-found if there is
# support, otherwise executes action-if-not-found
AC_DEFUN([OMPI_CHECK_GPFS],[
check_gpfs_CPPFLAGS=
check_gpfs_LDFLAGS=
check_gpfs_LIBS=
check_gpfs_save_LIBS="$LIBS"
check_gpfs_save_LDFLAGS="$LDFLAGS"
check_gpfs_save_CPPFLAGS="$CPPFLAGS"
check_gpfs_configuration="none"
ompi_check_gpfs_happy="yes"
# Get some configuration information
AC_ARG_WITH([gpfs],
[AC_HELP_STRING([--with-gpfs(=DIR)],
[Build Gpfs support, optionally adding DIR/include, DIR/lib, and DIR/lib64 to the search path for headers and libraries])])
OPAL_CHECK_WITHDIR([gpfs], [$with_gpfs], [include/gpfs.h])
AS_IF([test "$with_gpfs" = "no"],
[ompi_check_gpfs_happy="no"],
[AS_IF([test -z "$with_gpfs" || test "$with_gpfs" = "yes"],
[ompi_check_gpfs_dir="/usr"],
[ompi_check_gpfs_dir=$with_gpfs])
if test -e "$ompi_check_gpfs_dir/lib64" ; then
ompi_check_gpfs_libdir="$ompi_check_gpfs_dir/lib64"
else
ompi_check_gpfs_libdir="$ompi_check_gpfs_dir/lib"
fi
# Add correct -I and -L flags
OPAL_CHECK_PACKAGE([$1], [gpfs.h], [gpfs], [gpfs_lib_init],
[], [$ompi_check_gpfs_dir], [$ompi_check_gpfs_libdir],
[ompi_check_gpfs_happy="yes"],
[ompi_check_gpfs_happy="no"])
])
AS_IF([test "$ompi_check_gpfs_happy" = "yes"],
[$2],
[AS_IF([test ! -z "$with_gpfs" && test "$with_gpfs" != "no"],
[AC_MSG_ERROR([GPFS support requested but not found. Aborting])])
$3])
])
| true
|
cb0b0a3d36ec3e52ea5a29faccd775d7359778ea
|
Shell
|
ferhaty/developer_mac
|
/install_on_my_mac.sh
|
UTF-8
| 1,408
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
### Manage the (developer) software on your Mac with this command.
### @bbaassssiiee
### https://github.com/bbaassssiiee/developer_mac
## download_apple_xcode: http://appstore.com
# You need to download XCode, or only the command line tools
xcrun -f git || (open https://developer.apple.com/xcode/downloads/ && exit 1)
## install_apple_xcode_command_line_tools:
# then you can install the command line tools you need
xcrun -f git ||sudo xcodebuild -license
xcrun -f git ||xcode-select --install
# To avoid pre-heartbleed SSL certificate errors boostrap without check
echo insecure > ~/.curlrc
## install_homebrew: http://brew.sh
# Brew is the package manager of choice on the Mac
if [ ! -x /usr/local/bin/brew ]
then
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" < /dev/null
fi
## install_ansible: http://ansible.com
brew install --update ansible
## install_cask: http://caskroom.io
brew install --update caskroom/cask/brew-cask
##install_from_galaxy:
# re-use the role from 'Ansible for DevOps, Jeff Geerling, 2014'
ansible-galaxy install --force -r requirements.yml
##install_software_on_this_mac:
# specify the apps you want in vars/main.yml
ansible-playbook provisioner.yml -i inventory.ini --ask-sudo-pass
# To avoid post-heartbleed SSL certificate errors use default curl
rm ~/.curlrc
# Update this Mac immediately!
./update_my_mac.sh
| true
|
74d80616b3d1a966fc2585dce4849718f7afe9f5
|
Shell
|
istlab/ba-lab
|
/makelabs.sh
|
UTF-8
| 1,090
| 3.390625
| 3
|
[
"CC0-1.0"
] |
permissive
|
#!/bin/bash
mkdir html
html="html"
cp common/ba-lab-index.html $html
for dir in `find . -type dir -maxdepth 1|grep '[0-9]\{1,2\}-'|grep -v CVS|sed -e 's/\.\///g'`;
do
cp $dir/$dir.pdf $html
title=`cat $dir/$dir.tex|grep \\labtitle\}\{|perl -ne '$_ =~/\{([\+\-\*A-Za-z0-9 ]+)\}/; print $1;'`
version=`cat $dir/$dir.tex |grep Revision:|perl -ne '$_ =~/([0-9].[0-9]+)/; print $1."\n";'`
rawsize=`stat -f "%z" $dir/$dir.pdf`
size=$(echo "scale=0;$rawsize/1024"|bc -l)
link=`printf "\\\<li\\\>\\\<a href=\"%s\" alt=\"%s\"\>%s\\\<\/a\> \<font size\=\"-2\"\>\(Revision:%s %skb\)\<\/font\>\\\<\/li\\\>" $dir.pdf $dir.pdf "$title" $version $size`
echo $title $version $size
cat $html/ba-lab-index.html |sed "s/\<\!--$dir--\>/$link/" > ba-lab-index.html.tmp
mv -f ba-lab-index.html.tmp $html/ba-lab-index.html
done
cat $html/ba-lab-index.html |sed "s/\<\!--date--\>/$date/" > ba-lab-index.html.tmp
mv -f ba-lab-index.html.tmp $html/index.html
rm $html/ba-lab-index.html
scp html/* [email protected]:~/public_html/labs/ba-lab
rm -R $html
| true
|
6cdebab336f7950a08f7574424ab681ba92a8ed0
|
Shell
|
iftekharnaim/MSA-Scribe
|
/msa_astar/bin/shell_script_all.sh
|
UTF-8
| 730
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
start=0
end=360
beamsize=20.0
## declare an array variable
declare -a arr=(e6p11 e6p22 sch1p11 sch1p22)
declare -a arrchunk=(5 7 10 15 20 30 40 60)
#declare -a arrweight=(2 2.5 3 3.5 5 6 7)
declare -a arrweight=(1.7 1.75 1.8 2 2.5 3 4 5 6 7 8)
for weight in ${arrweight[@]}
do
## now loop through the dataset array
for i in ${arr[@]}
do
# loop through the chunksize array
for j in ${arrchunk[@]}
do
echo $i $j
# or do whatever with individual element of the array
command='./results_affine_same_closed/output_'$i'_'$j'_'$weight'_'$start'_'$end'.txt'
echo $command
java Main_class -w$weight -c$j -d/home/vax7/u6/inaim/MSA/data/$i -s$start -e$end -b$beamsize -go0.1 -ge0.1 -r1 > $command
done
done
done
| true
|
cd1ad7a2525319e4ed2df1949bc403ed524cd70d
|
Shell
|
danielyaa5/flightfutures
|
/scripts/Server.sh
|
UTF-8
| 199
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
parent_path=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P )
cd "$parent_path"
. ~/.nvm/nvm.sh use stable && node -v && kill -9 $(lsof -i tcp:3031);
cd ../server && DEBUG=* npm start &
| true
|
a84fce73a1afc3cb84c1f7007463a347c346c2b2
|
Shell
|
occlum/occlum
|
/demos/grpc/grpc_musl/run_server_on_host.sh
|
UTF-8
| 200
| 2.703125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
INSTALL_DIR=/usr/local/occlum/x86_64-linux-musl
export PATH=$PATH:$INSTALL_DIR/bin
cd server
make -j$(nproc)
if [ $? -ne 0 ]
then
echo "demo make failed"
exit 1
fi
./greeter_server
| true
|
802863a888515cfa1dff6665b54dc4bf79065acc
|
Shell
|
ministry-of-silly-code/buddy_metarepo
|
/test_scripts/test_flow.sh
|
UTF-8
| 885
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
# Setup phase - Set the correct user.email and user.name and activate the ssh agent
eval "$(ssh-agent)" && ssh-add
git config --global user.email "$GIT_MAIL"
git config --global user.name "$GIT_NAME"
# Setup phase - Also remember to set your $WANDB_API_KEY (More info in the readme)
if [[ -z "$WANDB_API_KEY" ]]; then
echo set \"export WANDB_API_KEY=[your-wandb-key]\" which can be found here: https://wandb.ai/settings
exit
fi
# 1 - Create a new virtual env
virtualenv -q buddy-env > /dev/null
source ./buddy-env/bin/activate > /dev/null
# 2 - Clone [or create] your project, in this case I'm using a basic mnist_classifier
git clone -q -b $BUDDY_CURRENT_TESTING_BRANCH [email protected]:ministry-of-silly-code/examples.git
cd examples
# 3 - Install the dependencies
pip -q install -r ./requirements.txt
# Run your experiments
python ./mnist_classifier.py
| true
|
ba46298da6fcecf9803482bccc794290fb0d4989
|
Shell
|
estelle-tock/scripts
|
/tracking-imports/2020_03_05_prod_clutch_bar.sh
|
UTF-8
| 2,133
| 2.671875
| 3
|
[] |
no_license
|
# --------------------- DETAILS --------------------------
# Business Name - Clutch Bar
# Final Demo - 3160
# Prod ID - 8717
# Booking System - OPENTABLE_GUESTCENTER
# Config Type - Flex
# Reminder Texts - Yes
# Welcome Texts - YES
# PUSH CONFIGURATIONS
# from: 3159 (demo)
# to: 3160 (final demo)
# to: 8717 (prod)
# --------------------------------------------------------
# Optional Jenkins Cron for export/import configs
# https://cron.tocktix.com/job/Export%20configurations/build?delay=0sec
# Run Locally
@local.env.flags
com.tocktix.cron.dataimport.ImportJob
--businessId=1
--guest_csv=/Users/estelle/clutchbar_resos.csv
--reso_csv=/Users/estelle/empty.txt
--config_json="{formatVersion:OPENTABLE_GUESTCENTER,separator:',', guestDateFormat:YYYY_M_D_DASH,guestTimeFormat:H_M_A, quoteDetectionEnabled: false, smsReminderEnabled: true}"
scp -i ~/.ssh/google_compute_engine ~/clutchbar_resos.csv estelle@crawl-server:~/
ssh -i ~/.ssh/google_compute_engine crawl-server
sudo su robinanil
cp clutchbar_resos.csv ~
cd ~/importer/server
git checkout master
./gradlew regenerateSource shadowJar cronJar
./export_configuration.sh demo 3159 3159.demo.proto
./import_configuration.sh demo 3160 3159.demo.proto
# FINAL DEMO
./run_importer.sh -e demo -b 3160 -g ../../clutchbar_resos.csv -r ../../empty.txt -c "{formatVersion:OPENTABLE_GUESTCENTER,separator:',', guestDateFormat:YYYY_M_D_DASH,guestTimeFormat:H_M_A, quoteDetectionEnabled: false, smsReminderEnabled: true}"
# PRODUCTION
./import_configuration.sh prod 8717 3159.demo.proto
# Must Check BEFORE PROD:
# PUSHED CONFIGURATIONS FOR PROD (if necessary)
# ENABLED/DISABLED WELCOME TEXTS (-t true)
# NO GUESTS OR RESERVATIONS in dashboard prod (unless AM says ok)
# BUSINESS ID FOR PRODUCTION IS THE RIGHT ONE (!!!!!)
# IF BIG FILE USE nohup TO RUN IN BACKGROUND & DO NOT RE-RUN
./run_importer.sh -e prod -b 8717 -g ../../clutchbar_resos.csv -r ../../empty.txt -c "{formatVersion:OPENTABLE_GUESTCENTER,separator:',', guestDateFormat:YYYY_M_D_DASH,guestTimeFormat:H_M_A, quoteDetectionEnabled: false, smsReminderEnabled: true}" -t true
| true
|
e8eec0bf08abd52f208e81407781f898317f0570
|
Shell
|
Nogal/gamepad-switcher
|
/modules/xpad.sh
|
UTF-8
| 235
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
# Check if the gamepad module is loaded, if not, load it.
module=xpad
if lsmod | grep -q "^$module "
then echo "Xpad already loaded."
else echo "Controller has been returned to normal function."
sudo modprobe xpad
echo
fi
| true
|
02f09b49f20793088533ade92467c7654ce1f4d7
|
Shell
|
tomsmeding/latexmd
|
/latexmd
|
UTF-8
| 572
| 3.625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
if test $# -ne 1; then
echo "Usage: $0 <file.md>"
exit 1
fi
resolved="$(readlink "$0")"
if test $? -ne 0; then mybase="$(dirname "$0")"; else mybase="$(dirname "$resolved")"; fi
mdfile="$1"
filebase="$(echo "$mdfile" | sed 's/\.[^.]\+$//')"
cat "$mdfile" | sed 's/`/\`/g' | sed 's/\$/`/g' | markdown | node "$mybase/htmltotex.js" >".$filebase.tex"
output="$(pdflatex ".$filebase.tex" </dev/null)"
texstatus=$?
rm ".$filebase.aux"
if test $texstatus -ne 0; then
echo "$output"
exit 1
fi
rm ".$filebase."{log,tex}
mv ".$filebase.pdf" "$filebase.pdf"
| true
|
93980febdb299f73be58fa326f94f2ead77bb3c9
|
Shell
|
PacktPublishing/Qt-5-Cookbook
|
/ch01/distributing-linux-raspberrypi-applications/application-launcher.sh
|
UTF-8
| 252
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
appname=`basename $0 | sed s,\.sh$,,`
dirname=`dirname $0`
tmp="${dirname#?}"
if [ "${dirname%$tmp}" != "/" ]; then
dirname=$PWD/$dirname
fi
export LD_LIBRARY_PATH=$dirname/lib
export QML2_IMPORT_PATH=$dirname/qml
$dirname/$appname "$@"
| true
|
c5f97c76a5e0f6134e7b8226ead58a9e132a2edd
|
Shell
|
Odka33/b3c-parsage-2019
|
/parsage.sh
|
UTF-8
| 4,262
| 3.09375
| 3
|
[] |
no_license
|
#!/usr/bin/bash
#-*- coding: utf-8-*-
# ==============================================================================
# title :parsage.sh #
# description :Parse log file . #
# author :Odka33 - Elie BEN AYOUN #
# date :28/04/2019 #
# version :0.1 #
# usage : ./parsage.sh . #
# ==============================================================================
#NIKTO
#NIKTO IP/DNS
cat *_NIKTO.txt | grep -i "target host" | awk -F':' '{print$NF}'"IP" | sed 's/[^;]$/&,/' > NIKTO/nikto_ip.csv
#NIKTO PORT:
cat *_NIKTO.txt | awk '/Port/ {print $4}' | sed 's/[^;]$/&,/' > NIKTO/nikto_port.csv
#NIKTO NAME:
cat *_NIKTO.txt | grep "Nikto" | awk '{print $2, $3 }' | sed 's/[^;]$/&,/' > NIKTO/nikto_name.csv
#NIKTO DATE:
#date -r *_NIKTO.txt "+%M:%d:%Y" | sed 's/[^;]$/&,/' > NIKTO/nikto_date.csv
stat -c "%z" *_NIKTO.txt | awk '{print $1}' | sed 's/[^;]$/&,/' > NIKTO/nikto_date.csv
#NIKTO FILE LOCATION:
find /root/logs_scan/ -name "*_NIKTO.txt" | sed 's/[^;]$/&,/' > NIKTO/nikto_flocation.csv
#NIKTO SHASUM:
sha1sum *_NIKTO.txt | awk '{print $1}' | sed 's/[^;]$/&,/' > NIKTO/nikto_shasum.csv
#NIKTO CVSS:
cat *_NIKTO.txt | grep "OSVDB" | sed 'N;s/\n/ /' | sed 's/[^;]$/&,/' > NIKTO/nikto_cvss.csv
#NIKTO BREACH:
cat *_NIKTO.txt | grep "+ GET" | tr -d ',' | paste -d " " - - - - - - - - - - - - - - - | sed 's/[^;]$/&,/' > NIKTO/nikto_breach.csv
paste -d '' NIKTO/nikto_name.csv NIKTO/nikto_date.csv NIKTO/nikto_flocation.csv NIKTO/nikto_shasum.csv NIKTO/nikto_ip.csv NIKTO/nikto_port.csv NIKTO/nikto_cvss.csv NIKTO/nikto_breach.csv > $(date +%m.%d.%Y)_nikto.csv
#NMAP
#NMAP IP/DNS
cat *_NMAP.txt | grep -a "addr=" | cut -d'"' -f2 | sed 's/[^;]$/&,/' > NMAP/nmap_ip.csv
#NMAP PORT:
cat *_NMAP.txt | grep "portid=" | cut -d'"' -f4 | paste -d " " - - - - - - - - - - - | sed 's/[^;]$/&,/' > NMAP/nmap_port.csv
#NMAP SCAN_NAME:
cat *_NMAP.txt | grep "Nmap 7" | awk '{print $2, $3 }' | sed 's/[^;]$/&,/' > NMAP/nmap_name.csv
#NMAP SCAN_DATE:
#date -r *_NMAP.txt "+%M:%d:%Y %H:%M:%S" | sed 's/[^;]$/&,/' > NMAP/nmap_date.csv
stat -c "%z" *_NMAP.txt | awk '{print $1}' | sed 's/[^;]$/&,/' > NMAP/nmap_date.csv
#NMAP SCAN_LOCATION:
find /root/logs_scan/ -name "*_NMAP.txt" | sed 's/[^;]$/&,/' > NMAP/nmap_flocation.csv
#NMAP LOG_SHA1:
sha1sum *_NMAP.txt | awk '{print $1}' | sed 's/[^;]$/&,/' > NMAP/nmap_shasum.csv
#NMAP CVSS:
echo "-," > NMAP/nmap_cvss.csv
#NMAP BREACH:
cat *_NMAP.txt | grep "state" | awk '{print $7$8$9$10$11}' | paste -d " " - - - - - - - - - - - | sed 's/[^;]$/&,/' > NMAP/nmap_breach.csv
paste -d '' NMAP/nmap_name.csv NMAP/nmap_date.csv NMAP/nmap_flocation.csv NMAP/nmap_shasum.csv NMAP/nmap_ip.csv NMAP/nmap_port.csv NMAP/nmap_cvss.csv NMAP/nmap_breach.csv > $(date +%m.%d.%Y)_nmap.csv
#FIERCE
#FIERCE IP/DNS:
cat *_fierce.txt | awk '{print $2}' | grep "www" | sed 's/[^;]$/&,/' > FIERCE/fierce_ip.csv
#FIERCE NAME:
echo 'fierce,' > FIERCE/fierce_name.csv
#FIERCE PORT:
echo '53,' > FIERCE/fierce_port.csv
#FIERCE SCAN_DATE:
#date -r *_fierce.txt "+%M:%d:%Y" | sed 's/[^;]$/&,/' > FIERCE/fierce_date.csv
stat -c "%z" *_fierce.txt | awk '{print $1}' | sed 's/[^;]$/&,/' > NIKTO/nikto_date.csv
#FIERCE SCAN_LOCATION:
find /root/logs_scan/ -name "*_fierce.txt" | sed 's/[^;]$/&,/' > FIERCE/fierce_flocation.csv
#FIERCE LOG_SHA1:
sha1sum *_fierce.txt | awk '{print $1}' | sed 's/[^;]$/&,/' > FIERCE/fierce_shasum.csv
#FIERCE BREACH:
cat *_fierce.txt | awk '{print $2}' | grep "ns" | paste -d " " - - - - - - - - - - - - - | sed 's/[^;]$/&,/' > FIERCE/fierce_breach.csv
#FIERCE CVSS:
echo "-," > FIERCE/fierce_cvss.csv
paste -d '' FIERCE/fierce_name.csv FIERCE/fierce_date.csv FIERCE/fierce_flocation.csv FIERCE/fierce_shasum.csv FIERCE/fierce_ip.csv FIERCE/fierce_port.csv FIERCE/fierce_cvss.csv FIERCE/fierce_breach.csv > $(date +%m.%d.%Y)_fierce.csv
mkdir -p /root/logs_scan/$(date +%m.%d.%Y-%T)_scan/
paste -d '\n' *_fierce.csv *_nikto.csv *_nmap.csv > scanner_all.csv
cp scanner_all.csv *_scan/
| true
|
c3d5c92568e0e027037268694c9af39e17ed3c6f
|
Shell
|
axltxl/i3-desktop
|
/bootstrap
|
UTF-8
| 1,323
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
# Setup i3wm configuration
# FIXME: document me
set -e
# Absolute path to this script
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
packages=( stow
i3
i3lock-fancy
dex
xinit
x11-xserver-utils
xsel
xclip
xbacklight
xautolock
xsettingsd
xfonts-terminus
compton
rofi
rxvt-unicode
nitrogen
playerctl
build-essential
python3-venv
libpython3-dev
libiw-dev
libdbus-1-dev
libglib2.0-dev
)
# Install packages
sudo apt-get update
sudo apt-get -y install ${packages[@]}
# Install icon theme
# FIXME: document me
[[ ! -d ~/.icons ]] && mkdir ~/.icons
cd /tmp
rm -rf Tela-icon-theme
git clone https://github.com/vinceliuice/Tela-icon-theme.git
cd Tela-icon-theme; ./install.sh -purple --dest ~/.icons
# python venv
I3_VENV=~/.i3pyvenv
[[ -a "$I3_VENV" ]] && rm -rf $I3_VENV
mkdir $I3_VENV
python3 -m venv $I3_VENV
. $I3_VENV/bin/activate
pip3 install -r ${script_dir}/requirements.txt
# Stow directories
cd $script_dir
scripts/pkg add X11
scripts/pkg add i3
scripts/pkg add dunst
scripts/pkg add compton
scripts/pkg add urxvt
scripts/pkg add theme
| true
|
f19abbe7727fd065fb684a5e9765610563139059
|
Shell
|
jtaca/BSc-Computer-Engineering-Projects
|
/2nd_Year/SO/Trabalho_SO (Versão 10)(Entregue 21-10-2017)(1º Entrega)/cria_utilizador.sh
|
UTF-8
| 1,918
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
ficheiroPasswd=/etc/passwd
ficheiroUtilizadores=./utilizadores.txt
cicloUser=0
cicloTurma=0
cicloSaldo=0
numeroDeCaracteresMinimo=2
numeroDeLetrasParaTurma=4
touch $ficheiroUtilizadores
echo "Insira o numero de aluno válido por favor: "
read numberGiven
if [ $(cat $ficheiroPasswd | cut -d':' -f1 | grep "^a$numberGiven$") ];
then
nome=$(cat $ficheiroPasswd | grep "^a$numberGiven:" | cut -d':' -f5 | cut -d',' -f1)
if [ "$(cat $ficheiroUtilizadores | cut -d';' -f5 | grep "^a$numberGiven@" | wc -l)" == "0" ];
then
while [ $cicloUser = 0 ]; do
printf "Utilizador:"
read user
if [ -z "$(cat $ficheiroUtilizadores | cut -d';' -f1 | grep "^$user$")" ] && [ $(echo $user | wc -m) -ge $(($numeroDeCaracteresMinimo+1)) ]; then
cicloUser=1
else
echo "O Username que inserio já está a ser utilizado ou contem menos de $numeroDeCaracteresMinimo letras, tente novamente."
fi
done
printf "Password:"
read -s password
echo ""
while [ $cicloTurma = 0 ]; do
echo "Insira a turma a que pertence por favor (até $numeroDeLetrasParaTurma letras): "
read turma
if [ $(echo $turma | wc -m) -le $(($numeroDeLetrasParaTurma+1)) ]; then
cicloTurma=1
else
echo "A turma excede $numeroDeLetrasParaTurma letras, tente novamente."
fi
done
while [ $cicloSaldo = 0 ]; do
echo "Insira o saldo atual: "
read saldo
if [ "$(echo $saldo | tr "0-9" " " | wc -w)" = "0" ]; then
cicloSaldo=1
else
echo "Apenas insira números (positivos) no saldo. Tente novamente."
fi
done
if [ -z $saldo ]; then
saldo=0
fi
echo "$user;$password;$((1000000+$numberGiven));$nome;[email protected];$turma;$saldo" >> $ficheiroUtilizadores
echo "Parabéns conseguiu inserir o utilizador $nome"
else
echo "O utilizador já estava registado!"
fi
else
echo "Não foi inserido um numero válido... :/"
fi
| true
|
5ccf3607b63436c669e5541f663fd6d02c26ac56
|
Shell
|
flexflow/FlexFlow
|
/conda/build.sh
|
UTF-8
| 813
| 2.703125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /usr/bin/env bash
set -euo pipefail
# Cd into FF_HOME
cd "${BASH_SOURCE[0]%/*}/../"
# build flexflow
# "search and replace" bash syntax used below to make shellcheck happy.
# see here: https://wiki-dev.bash-hackers.org/syntax/pe
CXXFLAGS="${CXXFLAGS//-O2/}"
CXXFLAGS="${CXXFLAGS//-std=c++17/}"
CXXFLAGS="${CXXFLAGS//-DNDEBUG/}"
CXXFLAGS="${CXXFLAGS//-D_FORTIFY_SOURCE=2/}"
export CXXFLAGS
CPPFLAGS="${CPPFLAGS//-O2/}"
CPPFLAGS="${CPPFLAGS//-std=c++17/}"
CPPFLAGS="${CPPFLAGS//-DNDEBUG/}"
CPPFLAGS="${CPPFLAGS//-D_FORTIFY_SOURCE=2/}"
export CPPFLAGS
#export CUDNN_HOME=/projects/opt/centos7/cuda/10.1
#export CUDA_HOME=/projects/opt/centos7/cuda/10.1
export PROTOBUF_DIR=$BUILD_PREFIX
export FF_HOME=$SRC_DIR
export LG_RT_DIR=$SRC_DIR/legion/runtime
#export FF_ENABLE_DEBUG=1
#export DEBUG=0
cd python
make
| true
|
d515837218b2bc9154955b58cd0b61cfababa15c
|
Shell
|
genneko/freebsd-install-on-zfs
|
/install.sh
|
UTF-8
| 13,300
| 3.375
| 3
|
[] |
no_license
|
#
# install.sh
# FreeBSD ZFS custom installation script for use with bsdinstall.
#
# Usage
# First, create a config/script folder for this installation
# by executing prepare.sh script on a host such as your laptop.
#
# cd ~/work
# /somewhere/freebsd-install-on-zfs/prepare.sh myfolder
#
# Then, edit config (.cfg) and/or script (.scp) files as required.
#
# cd myfolder
# mv baremetal.cfg whatyoulike.cfg
# vi myserver.cfg
# vi init.scp
# vi base.scp
# vi pkg.scp
#
# Then spin up web server on the host.
# One way to do this is running the following command in
# the directory containing this script and config files.
#
# python -m SimpleHTTPServer
#
# Finally on the target host, do something like the following
# at the FreeBSD installer's shell prompt.
# (Assume that 192.168.10.120 is your laptop.)
#
# dhclient em0
# export NAMESERVER=192.168.10.1
# cd /tmp
# fetch http://192.168.10.120:8000/install.sh
# fetch http://192.168.10.120:8000/whatyoulike.cfg
# export CUSTOM_CONFIG_FILE=whatyoulike.cfg
# bsdinstall script install.sh
# less bsdinstall_log
# reboot
#
# Full descriptions are found on the following URL.
# https://github.com/genneko/freebsd-install-on-zfs
#
####################################################
# PREAMBLE
####################################################
. ${CUSTOM_CONFIG_DIR:-/tmp}/${CUSTOM_CONFIG_FILE:=install.cfg}
: ${CUSTOM_CONFIG_BASEURL:=http://192.168.10.120:8000}
: ${DISTRIBUTIONS:=base.txz kernel.txz}
: ${ZFSBOOT_DISKS:=ada0}
: ${ZFSBOOT_VDEV_TYPE:=stripe}
: ${ZFSBOOT_SWAP_SIZE:=0}
: ${ZFSBOOT_POOL_CREATE_OPTIONS:=-O compression=lz4 -O atime=off -O com.sun:auto-snapshot=true}
: ${ZFSBOOT_POOL_NAME:=zroot}
: ${ZFSBOOT_BEROOT_NAME:=ROOT}
: ${ZFSBOOT_BOOTFS_NAME:=default}
: ${ZFSBOOT_GELI_ENCRYPTION=}
if [ -z "$ZFSBOOT_DATASETS" ]; then
ZFSBOOT_DATASETS="
# DATASET OPTIONS (comma or space separated; or both)
# Boot Environment [BE] root and default boot dataset
/$ZFSBOOT_BEROOT_NAME mountpoint=none
/$ZFSBOOT_BEROOT_NAME/$ZFSBOOT_BOOTFS_NAME mountpoint=/
# Compress /tmp, allow exec but not setuid
# Omit from auto snapshot
/tmp mountpoint=/tmp,exec=on,setuid=off,com.sun:auto-snapshot=false
# Don't mount /usr so that 'base' files go to the BEROOT
/usr mountpoint=/usr,canmount=off
# Don't mount /usr/local too for the same reason.
/usr/local canmount=off
# Home directories separated so they are common to all BEs
/home mountpoint=/home
# Ports tree
/usr/ports setuid=off,com.sun:auto-snapshot=false
# Source tree (compressed)
/usr/src com.sun:auto-snapshot=false
/usr/obj com.sun:auto-snapshot=false
# Create /var and friends
/var mountpoint=/var,canmount=off
/var/audit exec=off,setuid=off
/var/crash exec=off,setuid=off,com.sun:auto-snapshot=false
/var/log exec=off,setuid=off
/var/mail atime=on
/var/tmp setuid=off,com.sun:auto-snapshot=false
" # END-QUOTE
fi
### DISTRIBUTIONS is exported from /usr/libexec/bsdinstall/script.
export nonInteractive="YES"
export CUSTOM_CONFIG_FILE
export CUSTOM_CONFIG_BASEURL
export ZFSBOOT_DISKS
export ZFSBOOT_VDEV_TYPE
export ZFSBOOT_SWAP_SIZE
export ZFSBOOT_POOL_CREATE_OPTIONS
export ZFSBOOT_POOL_NAME
export ZFSBOOT_BEROOT_NAME
export ZFSBOOT_BOOTFS_NAME
export ZFSBOOT_GELI_ENCRYPTION
export ZFSBOOT_DATASETS
####################################################
# POST INSTALLATION SETUP
####################################################
#!/bin/sh
#
# void load_script <filename>
#
load_script(){
local filename="$1"
if [ -n "$filename" ]; then
cd /tmp
fetch --no-proxy=* ${CUSTOM_CONFIG_BASEURL}/${filename}
. ./${filename}
fi
}
#
# void load_file <filename> <destpath>
#
load_file(){
local filename="$1"
local destpath="$2"
if [ -n "$filename" -a -n "$destpath" ]; then
if [ -e "$destpath" ]; then
if [ ! -e "$destpath.dist" ]; then
bkpath="$destpath.dist"
else
bkpath="$destpath.$(date '+%s')"
fi
mv "$destpath" "$bkpath"
fi
fetch --no-proxy=* -o "$destpath" ${CUSTOM_CONFIG_BASEURL}/${filename}
if [ $? != 0 -a ! -e "$destpath" -a -e "$bkpath" ]; then
mv "$bkpath" "$destpath"
fi
fi
}
_write_file(){
local flag="$1"
local destpath="$2"
local content="$3"
if [ -n "$destpath" ]; then
IFS_SAVE=$IFS
IFS=
if echo "$flag" | fgrep -q "insertnewline"; then
echo >> "$destpath"
fi
if echo "$flag" | fgrep -q "overwrite"; then
if [ -e "$destpath" ]; then
if [ ! -e "$destpath.dist" ]; then
bkpath="$destpath.dist"
else
bkpath="$destpath.$(date '+%s')"
fi
mv "$destpath" "$bkpath"
fi
echo $content > "$destpath"
else
echo $content >> "$destpath"
fi
IFS=$IFS_SAVE
fi
}
#
# void write_file <destpath> <content>
#
write_file(){
_write_file "" "$1" "$2"
}
# insert newline first
write_file_nl(){
_write_file "insertnewline" "$1" "$2"
}
# overwrite
write_file_new(){
_write_file "overwrite" "$1" "$2"
}
#
# Shorthands (PART1)
#
cf_rc=/etc/rc.conf
cf_sysctl=/etc/sysctl.conf
cf_loader=/boot/loader.conf
cf_resolv=/etc/resolv.conf
cf_sshd=/etc/ssh/sshd_config
#
# Create temporary /etc/resolv.conf for name resolution.
#
if [ -n "$NAMESERVER" ]; then
write_file_nl $cf_resolv "nameserver $NAMESERVER"
fi
#
# Load default configuration file.
#
load_script "${CUSTOM_CONFIG_FILE}"
: ${HOSTNAME:=freebsd}
: ${NIC_LIST=em0}
: ${IP_LIST=192.168.10.5}
: ${NETMASK_LIST=255.255.255.0}
: ${DEFAULTROUTER=192.168.10.1}
: ${SEARCHDOMAINS=example.com}
: ${NAMESERVER_LIST=192.168.10.1}
: ${DEFAULT_ROOT_PASSWORD:=root}
: ${DEFAULT_USER_GROUP_NAME:=users}
: ${DEFAULT_USER_GROUP_ID:=100}
: ${DEFAULT_USER_NAME:=freebsd}
: ${DEFAULT_USER_FULLNAME:=User &}
: ${DEFAULT_USER_ID:=500}
: ${DEFAULT_USER_PASSWORD:=freebsd}
: ${PKG_LIST=beadm sudo zfstools}
: ${ZVOL_SWAP_SIZE:=2G}
: ${KEYMAP=}
: ${TIME_ZONE=}
: ${PROXY_SERVER=}
: ${NO_PROXY=}
: ${SSH_AUTHORIZED_KEYS_FILE=}
: ${OPTIONAL_SCRIPT_INIT=}
: ${OPTIONAL_SCRIPT_BASE=}
: ${OPTIONAL_SCRIPT_PKG=}
export HOSTNAME
export NIC_LIST
export IP_LIST
export NETMASK_LIST
export DEFAULTROUTER
export SEARCHDOMAINS
export NAMESERVER_LIST
export DEFAULT_ROOT_PASSWORD
export DEFAULT_USER_GROUP_NAME
export DEFAULT_USER_GROUP_ID
export DEFAULT_USER_NAME
export DEFAULT_USER_FULLNAME
export DEFAULT_USER_ID
export DEFAULT_USER_PASSWORD
export PKG_LIST
export ZVOL_SWAP_SIZE
export KEYMAP
export TIME_ZONE
export PROXY_SERVER
export NO_PROXY
export SSH_AUTHORIZED_KEYS_FILE
export OPTIONAL_SCRIPT_INIT
export OPTIONAL_SCRIPT_BASE
export OPTIONAL_SCRIPT_PKG
#
# Shorthands (PART2)
#
username=$DEFAULT_USER_NAME
groupname=$DEFAULT_USER_GROUP_NAME
dir_user_home=/home/$username
dir_user_ssh=$dir_user_home/.ssh
dir_root_home=/root
dir_root_ssh=$dir_root_home/.ssh
cf_user_cshrc=$dir_user_home/.cshrc
cf_root_cshrc=$dir_root_home/.cshrc
cf_user_ssh_ak=$dir_user_ssh/authorized_keys
cf_root_ssh_ak=$dir_root_ssh/authorized_keys
tmp_cshrc=/cshrc.addon
#
# SNAPSHOT 00: (A) BASE SYSTEM INSTALLED.
#
zfs snapshot -r ${ZFSBOOT_POOL_NAME}/${ZFSBOOT_BEROOT_NAME}/${ZFSBOOT_BOOTFS_NAME}@install-00A-basesys-installed
#
# SCRIPT 00: ADDITIONAL INITIALIZATIONS via "init" script.
#
if [ -n "$OPTIONAL_SCRIPT_INIT" ]; then
load_script "$OPTIONAL_SCRIPT_INIT"
#
# SNAPSHOT 00: (B) INIT SCRIPT EXECUTED.
#
zfs snapshot -r ${ZFSBOOT_POOL_NAME}/${ZFSBOOT_BEROOT_NAME}/${ZFSBOOT_BOOTFS_NAME}@install-00B-script-init-done
fi
##############################################################
# PART 01 of 02: BASIC SYSTEM CONFIGURATIONS
##############################################################
#
# Is this on a virtual environment (hypervisor)?
#
hv=$(sysctl -n kern.vm_guest)
#
# /etc/sysctl.conf
#
if [ -n "$hv" ]; then
sysrc -f $cf_sysctl net.inet.tcp.tso=0
fi
#
# /boot/loader.conf
#
sysrc -f $cf_loader beastie_disable="NO"
sysrc -f $cf_loader autoboot_delay="3"
if [ -n "$hv" ]; then
if [ "xen" = "$hv" ]; then
sysrc -f $cf_loader console="vidconsole,comconsole"
else
sysrc -f $cf_loader console="vidconsole"
fi
fi
#
# /etc/rc.conf
#
sysrc zfs_enable="YES"
sysrc hostname="${HOSTNAME}"
hostname $HOSTNAME
if [ -n "$KEYMAP" ]; then
sysrc keymap="${KEYMAP}"
fi
if [ -n "$DEFAULTROUTER" ]; then
sysrc defaultrouter="${DEFAULTROUTER}"
fi
sysrc sshd_enable="YES"
sysrc dumpdev="NO"
sysrc ntpd_enable="YES"
sysrc ntpd_sync_on_start="YES"
if [ -n "$hv" ]; then
ifopt=" -tso"
else
ifopt=""
fi
i=1
for nic in $NIC_LIST; do
ip=`echo $IP_LIST | cut -d " " -f $i`
mask=`echo $NETMASK_LIST | cut -d " " -f $i`
if echo "$ip" | grep -qi "^\(DHCP\|SYNCDHCP\|NOSYNCDHCP\|NOAUTO\|WPA\|HOSTAP\)\$"; then
sysrc ifconfig_${nic}="$ip${ifopt}"
else
sysrc ifconfig_${nic}="inet $ip netmask ${mask:-255.255.255.0}${ifopt}"
fi
if [ $i -eq 1 ]; then
export NIC1=$nic
elif [ $i -eq 2 ]; then
export NIC2=$nic
fi
i=`expr "$i" + 1`
done
#
# /etc/resolv.conf
#
write_file_new $cf_resolv "# auto-generated"
if [ -n "$SEARCHDOMAINS" ]; then
write_file $cf_resolv "search ${SEARCHDOMAINS}"
fi
for nameserver in $NAMESERVER_LIST; do
write_file $cf_resolv "nameserver ${nameserver}"
done
#
# Timezone
#
if [ -n "$TIME_ZONE" ]; then
cp /usr/share/zoneinfo/${TIME_ZONE} /etc/localtime
fi
#
# ZFS Pool's root dataset has no need to be mounted.
#
zfs set mountpoint=none ${ZFSBOOT_POOL_NAME}
#
# User configurations
#
echo ${DEFAULT_ROOT_PASSWORD} | pw usermod root -h 0 -s /bin/tcsh
pw groupadd -n ${DEFAULT_USER_GROUP_NAME} -g ${DEFAULT_USER_GROUP_ID}
echo ${DEFAULT_USER_PASSWORD} | pw useradd -n ${DEFAULT_USER_NAME} -c "${DEFAULT_USER_FULLNAME}" -u ${DEFAULT_USER_ID} -g ${DEFAULT_USER_GROUP_NAME} -G wheel -h 0 -m -s /bin/tcsh
#
# /etc/ssh/sshd_config
#
if [ -n "$SSH_AUTHORIZED_KEYS_FILE" ]; then
mkdir $dir_user_ssh
chown $username:$groupname $dir_user_ssh
chmod 700 $dir_user_ssh
load_file "$SSH_AUTHORIZED_KEYS_FILE" $cf_user_ssh_ak
chown $username:$groupname $cf_user_ssh_ak
chmod 600 $cf_user_ssh_ak
write_file $cf_sshd "ChallengeResponseAuthentication no"
fi
write_file $cf_sshd "AllowUsers $DEFAULT_USER_NAME"
#
# Shell configuration
#
cat <<-EOF> $tmp_cshrc
alias rm rm -i
alias mv mv -i
alias cp cp -i
alias ls ls -Fw
set noclobber
set ignoreeof
EOF
if [ -n "${PROXY_SERVER}" ]; then
cat <<-EOF>> $tmp_cshrc
setenv http_proxy ${PROXY_SERVER}
setenv https_proxy ${PROXY_SERVER}
setenv ftp_proxy ${PROXY_SERVER}
EOF
if [ -n "${NO_PROXY}" ]; then
cat <<-EOF>> $tmp_cshrc
setenv no_proxy "${NO_PROXY}"
EOF
fi
export http_proxy=${PROXY_SERVER}
export https_proxy=${PROXY_SERVER}
export ftp_proxy=${PROXY_SERVER}
if [ -n "${NO_PROXY}" ]; then
export no_proxy="${NO_PROXY}"
fi
fi
cat $tmp_cshrc >> $cf_root_cshrc
cat $tmp_cshrc >> $cf_user_cshrc
rm $tmp_cshrc
#
# crontabs
#
if [ ! -d /etc/cron.d ]; then
mkdir -p /etc/cron.d
fi
cat <<-'EOF'> /etc/cron.d/00zfstools
#
# Added for zfs-auto-snapshot(zfstools)
#
PATH=/etc:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin
15,30,45 * * * * root /usr/local/sbin/zfs-auto-snapshot 15min 4
0 * * * * root /usr/local/sbin/zfs-auto-snapshot hourly 24
7 0 * * * root /usr/local/sbin/zfs-auto-snapshot -k daily 7
14 0 * * 7 root /usr/local/sbin/zfs-auto-snapshot -k weekly 4
28 0 1 * * root /usr/local/sbin/zfs-auto-snapshot -k monthly 12
EOF
#
# periodic.conf
#
sysrc -f /etc/periodic.conf daily_status_zfs_enable="YES"
#
# User temporary directories
#
mkdir -p $dir_root_home/tmp
mkdir -p $dir_user_home/tmp
chown $username:$groupname $dir_user_home/tmp
#
# Swap on ZFS volume
#
if [ -n "${ZVOL_SWAP_SIZE}" -a "${ZVOL_SWAP_SIZE}" != "0" ]; then
zfs create -V ${ZVOL_SWAP_SIZE} -o org.freebsd:swap=on -o checksum=off -o sync=disabled -o primarycache=none -o secondarycache=none -o com.sun:auto-snapshot=false ${ZFSBOOT_POOL_NAME}/swap
fi
#
# SNAPSHOT 01: (A) BASE SYSTEM CONFIGURATIONS DONE.
#
zfs snapshot -r ${ZFSBOOT_POOL_NAME}/${ZFSBOOT_BEROOT_NAME}/${ZFSBOOT_BOOTFS_NAME}@install-01A-basesys-configured
#
# SCRIPT 01: ADDITIONAL BASE-SYSTEM CONFIGURATIONS via "base" script.
#
if [ -n "$OPTIONAL_SCRIPT_BASE" ]; then
load_script "$OPTIONAL_SCRIPT_BASE"
#
# SNAPSHOT 01: (B) BASE SCRIPT EXECUTED.
#
zfs snapshot -r ${ZFSBOOT_POOL_NAME}/${ZFSBOOT_BEROOT_NAME}/${ZFSBOOT_BOOTFS_NAME}@install-01B-script-base-done
fi
##############################################################
# PART 02 of 02: BASIC PACKAGE INSTALLATION
##############################################################
if [ "xen" = "$hv" ]; then
PKG_LIST="${PKG_LIST} xe-guest-utilities"
sysrc xenguest_enable="YES"
sysrc xe_daemon_enable="YES"
fi
if [ -n "${PKG_LIST}" ]; then
export ASSUME_ALWAYS_YES=yes
pkg install ${PKG_LIST}
fi
#
# SNAPSHOT 02: (A) BASIC PACKAGES INSTALLED.
#
zfs snapshot -r ${ZFSBOOT_POOL_NAME}/${ZFSBOOT_BEROOT_NAME}/${ZFSBOOT_BOOTFS_NAME}@install-02A-basicpkg-installed
#
# SCRIPT 02: ADDITIONAL PACKAGE CONFIGURATIONS via "pkg" script.
#
if [ -n "$OPTIONAL_SCRIPT_PKG" ]; then
load_script "$OPTIONAL_SCRIPT_PKG"
#
# SNAPSHOT 02: (B) PKG SCRIPT EXECUTED.
#
zfs snapshot -r ${ZFSBOOT_POOL_NAME}/${ZFSBOOT_BEROOT_NAME}/${ZFSBOOT_BOOTFS_NAME}@install-02B-script-pkg-done
fi
#
# SNAPSHOT 03: (Z) INSTALLATION COMPLETE.
#
zfs snapshot -r ${ZFSBOOT_POOL_NAME}/${ZFSBOOT_BEROOT_NAME}/${ZFSBOOT_BOOTFS_NAME}@install-03Z-complete
| true
|
b44c0f6366dbd91970bb3b0c55ad1d3597b5b590
|
Shell
|
raghav007bisht/Intrusion-Detection-System-IDS-v5.0
|
/IDS V5.0/ids/Firewall/Block_Website.sh
|
UTF-8
| 1,481
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
# This file is part of Intrusion Detection System By - Raghav Bisht.
#
# Intrusion Detection System By - Raghav Bisht is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Intrusion Detection System By - Raghav Bisht is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
echo "Enter option <1-3> to perform to Block Website "
echo
echo "1 = Manually Blocking Website"
echo "2 = Manually Unblocking Website"
echo "3 = Exit "
echo "Enter :"
read NUM
if [ "$NUM" -eq 1 ]; then
echo "You Have Choosen Manually Blocking Website : "
echo
echo "Enter The website name you want to block : "
read site
sudo sh -c 'echo "0.0.0.0 '$site'" >> /etc/hosts'
sudo sh -c 'echo "127.0.0.1 '$site'" >> /etc/hosts'
echo "[+] Website blocked successfully "
elif [ "$NUM" -eq 2 ]; then
echo "You Have choosen Manually Unblocking Website :"
echo
gedit /etc/hosts
elif [ "$NUM" -eq 3 ]; then
echo " Bye Bye "
exit 0
else
echo "Wrong Option Selected"
echo " Bye Bye "
exit 0
fi
| true
|
403d8ac66528281022150a8a0258b0d8c06aff80
|
Shell
|
orpheus/fabric
|
/scripts/buildNodes.sh
|
UTF-8
| 730
| 3.796875
| 4
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
#!/bin/bash
for i in "$@"
do
case $i in
-p=*|--peer=*)
PEER="${i#*=}"
shift
;;
-o=*|--orderer=*)
ORDERER="${i#*=}"
shift
;;
--default)
DEFAULT=YES
shift
;;
*)
;;
esac
done
echo "PEER = ${PEER}"
echo "ORDERER = ${ORDERER}"
if [ ! -z $PEER ]; then
for ((i=0;i<$PEER;i++)); do
echo
echo "Creating peer-$i"
buildPeer.sh $i
done
fi
if [ ! -z $ORDERER ]; then
for (( i=0; i < $ORDERER; i++ )); do
echo
echo "Creating orderer-$i"
buildOrderer.sh $i
done
fi
if [[ -n $1 ]]; then
echo "Last line of file specified as non-opt/last argument: $1"
fi
echo
| true
|
363f636bb78e15feac4193b6915b16c4830d7890
|
Shell
|
rlrq/MINORg_sh
|
/find_gRNA.sh
|
UTF-8
| 37,698
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
## ?? TODO: properly enable --query file to be used as background automatically without overwriting --background ## and --nonreference
## TODO: ensure that using --background doesn't override -a when --target is used (i.e. allow bg check in both user-provided background file and VdW's renseq if -b and -a used along with --target)
## TODO: update manual to reflect new background screening behaviour
ORIGINAL_DIR=$(pwd)
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
TOOLS_DIR="${SCRIPT_DIR}/tools"
## if no arguments, show manual
if [[ $# -eq 0 ]]; then
man -l ${SCRIPT_DIR}/MANUAL_find_gRNA.2 ## TODO lol
exit 1
fi
## minimum set
if [[ ${1} == 'minimumset' ]]; then
${TOOLS_DIR}/minimumset.sh ${@:2}
exit 1
fi
## because aliases don't really work with non-interactive shell :(
get_seq=/mnt/chaelab/rachelle/scripts/get_seqs_generic/get_seqs.sh
aln2gff=/mnt/chaelab/rachelle/scripts/aln_to_gff/aln2gff.sh
## some built-in supported PSSMs
declare -A DOMAINS
# DOMAINS["TIR"]='366714';DOMAINS["RX-CC_like"]='392282';DOMAINS["CC"]='392282';DOMAINS["RPW8"]='384063';DOMAINS["NB-ARC"]='391514';DOMAINS["NBS"]='391514';DOMAINS["Rx_N"]='375519'
DOMAINS["TIR"]='366714';DOMAINS["RX-CC_like"]='392282';DOMAINS["CC"]='392282';DOMAINS["RPW8"]='384063';DOMAINS["NB-ARC"]='366375';DOMAINS["NBS"]='366375';DOMAINS["Rx_N"]='375519'
HEADER_CDD="qseqid,sseqid,pident,length,qstart,qend"
HEADER_AL70="qseqid,sseqid,pident,length,sstart,send"
# DB_CDD_v3_17='/mnt/chaelab/shared/blastdb/RPSdb/Cdd'
DB_CDD_v3_18='/mnt/chaelab/shared/blastdb/Cdd.v3.18/Cdd'
DB_CDD=${DB_CDD_v3_18}
DB_AL70='/mnt/chaelab/shared/blastdb/anna_lena70.contigs/anna_lena70.contigs'
RPS_DB_DEFAULT=${DB_CDD}
TARGET_DB_DEFAULT=${DB_AL70}
CLUSTER_LOOKUP_DEFAULT='/mnt/chaelab/rachelle/data/NLR/cluster_aliases.txt'
CLUSTER_DIR_DEFAULT='/mnt/chaelab/rachelle/data/NLR/clusters/cluster_combined'
# DIR_DEFAULT="/mnt/chaelab/$(whoami)/find_gRNA"
DIR_DEFAULT="$(pwd)"
DOMAIN_DEFAULT="gene"
QUERY_DEFAULT='/mnt/chaelab/shared/anna_lena/anna_lena70.contigs.fasta'
BACKGROUND_DEFAULT="${QUERY_DEFAULT}"
REFERENCE_DEFAULT='/mnt/chaelab/shared/genomes/TAIR10/fasta/a_thaliana_all.fasta'
BED_DEFAULT='/mnt/chaelab/shared/genomes/TAIR10/features/TAIR10_GFF3_genes.bed'
SC_ALGO_DEFAULT='LAR'
PREFIX_DEFAULT='minorg'
PAM_DEFAULT='GG'
MISMATCH_DEFAULT=0
GAP_DEFAULT=0
MINID_DEFAULT=95
MINLEN_DEFAULT=0
MERGE_WITHIN_DEFAULT=100
LENGTH_DEFAULT=20
SEP_DEFAULT='.'
SETS_DEFAULT=1
MAX_CDS_INSERTION_DEFAULT=15
params="$@"
while (( "$#" )); do
case "$1" in
### -g|--gene and -c|--cluster can be either gene or cluster names (comma-separated), or paths to file containing newline (\n)-separated gene or cluster names
-g|--gene) GENE="${2}";; ## mutually exclusive with -c|--cluster and -f|--fasta
-c|--cluster) CLUSTER="${2}";; ## mutually exclusive with -g|--gene and -f|--fasta
-t|--target) TARGET="${2}";; ## mutually exclusive with -g|--gene -c|--cluster
-f|--fasta) FASTA="${2}";; ## -q|--query preferred
-q|--query) QUERY="${2}";;
-a|--acc) ACC="${2}";;
-i|--input) ACCS_F="${2}";;
-p|--pam) PAM="${2}";;
-l|--length) LENGTH="${2}";;
-d|--dir) DIR="${2}";;
# -n|--nonreference) NONREFERENCE="${2}";; ## -q|--query preferred
-r|--reference) REFERENCE="${2}";;
-b|--background) BACKGROUND="${2}";;
-e|--exclude) EXCLUDE="${2}";;
-s|--sets) SETS="${2}";;
--gff) GFF="${2}";;
--bed) BED="${2}";;
--prefix) PREFIX="${2}";;
--domain) DOMAIN="${2}";;
--db) RPS_DB="${2}";;
--mismatch) MISMATCH="${2}";;
--gap) GAP="${2}";;
--minid) MINID="${2}";;
--minlen) MINLEN="${2}";;
--merge-within) MERGE_WITHIN="${2}";;
--sc-algorithm) SC_ALGO="${2}";;
--cluster-lookup) CLUSTER_LOOKUP="${2}";;
--cluster-dir) CLUSTER_DIR="${2}";;
--target-db) TARGET_DB="${2}";;
--check-id-before-merge) CHECK_ID_PREMERGE='True';;
--max-cds-insertion) MAX_CDS_INSERTION="${2}";;
# --relax) RELAX='True';;
# --relax-cds-within) RELAX_CDS_WITHIN="${2}";;
--auto) AUTO='True';;
--check-reciprocal|--check-recip) CHECK_RECIP='True';;
--relax-reciprocal|--relax-recip) RELAX_RECIP='True'; CHECK_RECIP='True';;
--screen-ref) SCREEN_REF='True';;
--unmask-ref) MASK_REF='False'; UNMASK_REF='True';;
--skip-bg-check) CHECK_BG='False'; SKIP_BG_CHECK='True';; ## overrides --screen-ref
--pamless-bg-check) PAMLESS_BG_CHECK='True';; ## excludes off-target gRNA hits even if no PAM nearby
--merge-redundant) MERGE_REDUNDANT='True';;
# --by-gene) BY_GENE='True';; ## TODO: change this to --merge-redundant, where raising this flag merges identical protein sequences, and should allow domain search time to be shorted significantly if there are multiple redundancies. Bypass --by-gene because this seems to combine any overlapping CDS ranges, which could lead to problems if some CDS are in a different frame from an overlapping one (anyway, --by-gene currently breaks cuz of conversion from domain aa range to domain nt range due to different fasta sequence naming convention of --by-gene sequences
-h|--help) man -l ${SCRIPT_DIR}/MANUAL_find_gRNA.2; exit 0;;
--readme) cat ${SCRIPT_DIR}/README; exit 0;;
--aliases) cat /mnt/chaelab/rachelle/data/NLR/cluster_aliases.txt | \
sed 's/.txt//' | tr ' ' '\t'; exit 0;;
--accessions) cat /mnt/chaelab/shared/anna_lena/accession_map.txt; exit 0;;
--members) CHECK_MEMBERS="${2}";;
--attr-mod) ATTR_MOD="${2}";; ## see get_seqs_generic.sh for explanation
# --extend-gff) GFF_EXT="${2}";; ## TODO: implement this and --extend-bed
# --extend-bed) BED_EXT="${2}";;
--extend-cds) CDS_EXT="${2}";;
--extend-genome) GENOME_EXT="${2}";;
--sep) SEP="${2}";;
--report-bg) REPORT_BG='True';;
-v|--version) echo "MINORg v2.2"; exit 0;;
esac
shift
done
BED="${BED:-${BED_DEFAULT}}"
DIR="${DIR:-${DIR_DEFAULT}}"
CLUSTER_LOOKUP="${CLUSTER_LOOKUP:-${CLUSTER_LOOKUP_DEFAULT}}"
CLUSTER_DIR="${CLUSTER_DIR:-${CLUSTER_DIR_DEFAULT}}"
QUERY="${QUERY:-${FASTA}}"
QUERY="${QUERY:-${QUERY_DEFAULT}}"
# NONREFERENCE="${FASTA:-${QUERY}}"
# NONREFERENCE="${NONREFERENCE:-${NONREFERENCE_DEFAULT}}"
# BACKGROUND="${BACKGROUND:-${BACKGROUND_DEFAULT}}"
REFERENCE="${REFERENCE:-${REFERENCE_DEFAULT}}"
DOMAIN="${DOMAIN:-${DOMAIN_DEFAULT}}"
PREFIX="${PREFIX:-${PREFIX_DEFAULT}}"
SC_ALGO="${SC_ALGO:-${SC_ALGO_DEFAULT}}"
RPS_DB="${RPS_DB:-${RPS_DB_DEFAULT}}"
TARGET_DB="${TARGET_DB:-${TARGET_DB_DEFAULT}}"
PAM="${PAM:-${PAM_DEFAULT}}"
MISMATCH="${MISMATCH:-${MISMATCH_DEFAULT}}"
GAP="${GAP:-${GAP_DEFAULT}}"
MINID="${MINID:-${MINID_DEFAULT}}"
MINLEN="${MINLEN:-${MINLEN_DEFAULT}}"
MERGE_WITHIN="${MERGE_WITHIN:-${MERGE_WITHIN_DEFAULT}}"
LENGTH="${LENGTH:-${LENGTH_DEFAULT}}"
CHECK_ID_PREMERGE="${CHECK_ID_PREMERGE:-$(if [[ ${DOMAIN} == 'gene' ]]; then echo 'True'; else echo 'False'; fi)}"
CHECK_BG="${CHECK_BG:-True}"
SKIP_BG_CHECK="${SKIP_BG_CHECK:-False}"
PAMLESS_BG_CHECK="${PAMLESS_BG_CHECK:-False}"
AUTO="${AUTO:-False}"
RELAX="${RELAX:-False}"
RELAX_CDS_WITHIN="${RELAX_CDS_WITHIN:-None}"
BY_GENE="${BY_GENE:-False}"
SCREEN_REF="${SCREEN_REF:-False}"
MERGE_REDUNDANT="${MERGE_REDUNDANT:-False}"
MASK_REF="${MASK_REF:-${SCREEN_REF}}"
UNMASK_REF="${UNMASK_REF:-False}"
CHECK_RECIP="${CHECK_RECIP:-False}" ## check if each candidate target has a better bitscore to non-target genes, and if so remove it from the list of candidate targets
SEP="${SEP:-${SEP_DEFAULT}}"
SETS="${SETS:-${SETS_DEFAULT}}"
RELAX_RECIP="${RELAX_RECIP:-False}"
REPORT_BG="${REPORT_BG:-False}"
MAX_CDS_INSERTION="${MAX_CDS_INSERTION:-${MAX_CDS_INSERTION_DEFAULT}}"
## only instantiate BACKGROUND if not ( TARGET provided & ACCS not provided ) and --skip-bg-check not raised
if [[ "${CHECK_BG}" == "True" ]] && ! ( ! [ -z "${TARGET}" ] && [ -z "${ACC}" ] && [ -z "${ACCS_F}" ] ); then
## if BACKGROUND not provided, default to QUERY
BACKGROUND="${BACKGROUND:-${QUERY}}"
fi
## if checking members
if ! [ -z "${CHECK_MEMBERS}" ]; then
readarray -d ',' -t clusters_a <<< "$(tr '\n' ',' < ${CLUSTER_LOOKUP} | sed 's/,\+$//g')"
## get cluster gene members (use a lookup file)
for ref_cluster in "${clusters_a[@]}"; do
ref_cluster_a=( ${ref_cluster} )
if [[ ";${ref_cluster_a[2]};" =~ ";${CHECK_MEMBERS};" ]]; then
cat ${CLUSTER_DIR}/${ref_cluster_a[0]}
exit 0
fi
done
## if not found:
echo "${cluster} is not a recognised cluster name."
exit 0
## throw error if directory not provided
elif [ -z "${DIR}" ]; then
echo "Directory required. Please provide directory using '-d <path to directory>'"
exit 1
## throw error if GENE or CLUSTER or QUERY or TARGET are not provided
elif [ -z "${GENE}" ] && [ -z "${CLUSTER}" ] && [ -z "${TARGET}" ] && \
[[ "${QUERY}" == "${QUERY_DEFAULT}" ]]; then
echo "Gene ID(s) or cluster names or fasta file required. Please provide gene ID(s) using '-g <gene ID(s)>', cluster name(s) using '-c <cluster name(s)>', a fasta file to query using '-q <path to file>', or a fasta file of target sequences using '-t <path to file>'."
exit 1
## else if any combination of 2 or more of GENE or CLUSTER or TARGET (note that QUERY is compatible with CLUSTER or GENE but not with TARGET) are provided for some reason
elif (! [ -z "${GENE}" ] && ! [ -z "${CLUSTER}" ]) || \
# (! [ -z "${GENE}" ] && (! [ -z "${TARGET}" ])) || \
# (! [ -z "${CLUSTER}" ] && (! [ -z "${TARGET}" ])) || \
(! [ -z "${TARGET}" ] && (! [[ "${QUERY}" == "${QUERY_DEFAULT}" ]])); then
# echo "Please only use either '-g <gene ID(s)>', '-c <cluster name(s)>', '-t <path to file>', and not 2 or more at the same time. These parameters are mutually exclusive. '-f <path to file>' is also mutually exclusive with '-t <path to file>'."
echo "'-g <gene ID(s)>' and '-c <cluster name(s)>' are mutually exclusive. '-t <path to file>' and '-q <path to file>' are also mutually exclusive."
exit 1
## else if both ACCS_F, ACC are provided for some reason
elif (! [ -z "${ACCS_F}" ] && ! [ -z "${ACC}" ]); then
echo "Please only use either '-a <accession number>' or '-i <path to file>', and not both at the same time. These parameters are mutually exclusive."
exit 1
# ## else if either gene or cluster is provided w/ target, but --skip-bg-check raised or --screen-ref not raised
# elif ( (! [ -z "${GENE}" ] || ! [ -z "${CLUSTER}" ] ) &&
# (! [ -z "${TARGET}" ] ) &&
# ( [[ "${SCREEN_REF}" == "False" ]] || [[ "${CHECK_BG}" == "False" ]] ) ); then
# echo "'-g <gene ID(s)>', '-c <cluster name(s)>' should not be used with '-t <path to file>' unless --screen-ref is raised (and --skip-bg-check is NOT raised)."
# exit 1
## else if either ACCS_F or ACC is provided w/ target, but --skip-bg-check raised
elif ( (! [ -z "${ACCS_F}" ] || ! [ -z "${ACC}" ] ) &&
(! [ -z "${TARGET}" ] ) && [[ "${CHECK_BG}" == "False" ]] ); then
echo "'-a <accession number(s)>', '-i <path to file>' should not be used with '-t <path to file>' unless --skip-bg-check is NOT raised."
exit 1
## else if either gene or cluster is provided, but accessions are missing
elif ( (! [ -z "${GENE}" ] || (! [ -z "${CLUSTER}" ])) && [ -z "${ACCS_F}" ] &&
[ -z "${ACC}" ] && [[ "${QUERY}" == "${QUERY_DEFAULT}" ]] ); then
echo "Fasta file or accession name(s) or number(s) required. Please provide a fasta file in which to query for target(s) using '-q <path to file>', a file of accession number(s) using '-i <path to file>', or a comma-separated list of accessions numbers using '-a <accession number>'"
exit 1
# elif ( (! [ -z "${CDS_EXT}" ] && [ -z "${GENOME_EXT}" ]) ||
# ( [ -z "${CDS_EXT}"] && (! [ -z "${GENOME_EXT}" ]))); then
# echo "'--extend-cds <path to file>' must be used with '--extend-genome <path to file>'."
# exit 1
elif ! [[ "${QUERY}" == "${QUERY_DEFAULT}" ]] && ! [ -f "${QUERY}" ]; then
echo "Fasta file ${QUERY} does not exist."
exit 1
elif ! [ -z "${TARGET}" ] && ! [ -f "${TARGET}" ]; then
echo "Target file ${TARGET} does not exist."
exit 1
elif ! [ -z "${CDS_EXT}" ] && ! [ -f "${CDS_EXT}" ]; then
echo "Fasta file ${CDS_EXT} (--extend-cds) does not exist."
exit 1
elif ! [ -z "${GENOME_EXT}" ] && ! [ -f "${GENOME_EXT}" ]; then
echo "Fasta file ${GENOME_EXT} (--extend-genome) does not exist."
exit 1
fi
## if --screen-ref raised and --skip-bg-check not raised, let user know that -g or -c will be used for bg check
if ( (! [ -z "${GENE}" ] || ! [ -z "${CLUSTER}" ] ) &&
(! [ -z "${TARGET}" ] ) && [[ "${SCREEN_REF}" == "True" ]] && [[ "${CHECK_BG}" == "True" ]] ); then
echo "--screen-ref raised: ${GENE} will be masked in the reference genome during background screening in reference."
fi
## convert values of variables storing file and directory paths to absolute paths
path_vars_to_convert=( "QUERY" "TARGET" "ACCS_F" "DIR" "REFERENCE" "BACKGROUND" "EXCLUDE" "BED" "RPS_DB" "CLUSTER_LOOKUP" "CLUSTER_DIR" "CDS_EXT" "GENOME_EXT" )
for varname in ${path_vars_to_convert[@]}; do
if ! [ -z "${!varname}" ] && ([ -f "${!varname}" ] || [ -d "${!varname}" ]); then
eval ${varname}="$(realpath ${!varname})"
fi
done
## if acc == 'ref', set background to reference (for background checking reasons)
if [[ "${ACC}" == 'ref' ]]; then
BACKGROUND="${REFERENCE}"
fi
## move to output dir, create if doesn't exist
mkdir -p ${DIR}
cd ${DIR}
DIR=$(pwd)
echo "Output files will be generated in $(pwd)"
tmp_f=${DIR}/tmp.txt
tmp_f2=${DIR}/tmp2.txt
## write log
to_log=''
short_long=( "g,gene,GENE" "c,cluster,CLUSTER" "a,acc,ACC" "i,input,ACCS_F" "d,dir,DIR" "q,query,QUERY" "b,background,BACKGROUND" "r,reference,REFERENCE" "e,exclude,EXCLUDE" "s,sets,SETS" "p,pam,PAM" "l,length,LENGTH" )
long_only=( "bed,BED" "prefix,PREFIX" "domain,DOMAIN" "db,RPS_DB" "target-db,TARGET_DB" "mismatch,MISMATCH" "gap,GAP" "minlen,MINLEN" "minid,MINID" "merge-within,MERGE_WITHIN" "sc-algorithm,SC_ALGO" "cluster-lookup,CLUSTER_LOOKUP" "cluster-dir,CLUSTER_DIR" "check-id-before-merge,CHECK_ID_PREMERGE" "auto,AUTO" "pam,PAM" "length,LENGTH" "max-cds-insertion,MAX_CDS_INSERTION" "skip-bg-check,SKIP_BG_CHECK" "screen-ref,SCREEN_REF" "pamless-bg-check,PAMLESS_BG_CHECK" "unmask-ref,UNMASK_REF" "check-recip,CHECK_RECIP" "relax-recip,RELAX_RECIP" "attr-mod,ATTR_MOD" "extend-cds,CDS_EXT" "extend-genome,GENOME_EXT" )
for variable in ${short_long[@]}; do
readarray -d ',' -t v <<< "${variable}"
varname=${v[2]}
eval "var=\$${varname}"
if ! [ -z ${var} ]; then
to_log+="-${v[0]}|--${v[1]}:\t${var}"
varname_default=${varname::-1}_DEFAULT
eval "var_default=\$${varname_default}"
if ! [ -z ${var_default} ] && [[ ${var} == ${var_default} ]]; then
to_log+=" (default)\n"
else
to_log+="\n"
fi
fi
done
for variable in ${long_only[@]}; do
readarray -d ',' -t v <<< "${variable}"
varname=${v[1]}
eval "var=\$${varname}"
if ! [ -z ${var} ]; then
to_log+="--${v[0]}:\t${var}"
varname_default=${varname::-1}_DEFAULT
eval "var_default=\$${varname_default}"
if ! [ -z ${var_default} ] && [[ ${var} == ${var_default} ]]; then
to_log+=" (default)\n"
else
to_log+="\n"
fi
fi
done
printf -- "${params}\n\n${to_log}" > ${DIR}/${PREFIX}_findgRNA.log
## generate extended GFF/BED & reference genome
if ! [ -z "${GENOME_EXT}" ] && ! [ -z "${CDS_EXT}" ]; then
dir_ext=${DIR}/extension
aln_bed=${dir_ext}/aln2gff_tmp.bed
new_bed=${dir_ext}/ext_$(date +%s).bed
new_ref=${dir_ext}/ext_reference.fasta
${aln2gff} -c ${CDS_EXT} -g ${GENOME_EXT} -d ${dir_ext} -o ${aln_bed} -s ${SEP} \
--outfmt bed --attr-mod "${ATTR_MOD}"
cat ${REFERENCE} <(echo) ${GENOME_EXT} > ${new_ref}
cat ${BED} <(echo) ${aln_bed} > ${new_bed}
REFERENCE=${new_ref}
BED=${new_bed}
fi
## generate common get_seq params
if [[ "${BY_GENE}" == "True" ]]; then
get_seq_common="${get_seq} --acc ref --reference ${REFERENCE} --attr-mod '${ATTR_MOD:-{}}' --no-bed --by-gene"
else
get_seq_common="${get_seq} --acc ref --reference ${REFERENCE} --attr-mod '${ATTR_MOD:-{}}' --no-bed"
fi
## conduct reciprocal blast
recip_blast () {
local output_dir=${1}
local output_pref=${2}
local genes=${3}
local accs_fa=${4}
local recip_blast6=${5}
local recip_bed=${6}
echo "Filtering out candidate targets with higher similarity to non-target genes"
recip_blast6=${output_dir}/${output_pref}_targets_recip.tsv
recip_bed=${output_dir}/${output_pref}_targets_recip.bed
blastn -query ${accs_fa} -subject ${REFERENCE} -out ${recip_blast6} \
-outfmt '6 sseqid sstart send qseqid qstart qend bitscore'
python3 -c "f = open('${recip_blast6}', 'r'); data = [x[:-1].split('\t') for x in f.readlines()]; f.close(); output = [x[:1] + (x[1:3] if int(x[1]) < int(x[2]) else x[2:0:-1]) + x[3:] for x in data]; f = open('${recip_blast6}', 'w+'); f.write('\n'.join(['\t'.join(x) for x in output])); f.close()"
bedtools intersect -wao -a ${recip_blast6} -b ${BED} > ${recip_bed}
python3 -c "import sys; sys.path.append('${SCRIPT_DIR}/scripts'); from recip_filter import *; remove_non_max_bitscore('${accs_fa}', '${recip_bed}', '${genes}', relax=(${RELAX_RECIP}))"
rm ${recip_blast6} ${recip_bed}
}
## get reference fa
get_reference_fa () {
local genes=${1}
local out_dir=${2}
local out_pref=${3}
local fout=${4}
local fout_cds=${5}
local fout_pref=${6}
## reduce BED file
local red_bed=${out_dir}/tmp_$(date +%s).bed
local tmp_f=${out_dir}/tmp_$(date +%s)_genenames.txt
echo ${genes} | tr ',' '\n' > ${tmp_f}
/mnt/chaelab/rachelle/src/extract_gff_features.py ${BED} ${tmp_f} ${red_bed} BED
rm ${tmp_f}
local BED=${red_bed}
if ! [ -z ${7} ] && [[ "${7}" != "gene" ]]; then ## check if domain provided
## parse domain
if [[ ! " ${!DOMAINS[@]} " =~ " ${7} " ]]; then
echo "${7} is not a supported domain name."
if [[ ${7} =~ ^[0-9]+$ ]]; then
echo "Attempting to parse ${7} as CDD PSSM-Id."
domain=${7}
else
echo "Unexpected domain input. Please provide a CDD PSSM-Id (numeric) or one of the following supported domain names: $(echo ${!DOMAINS[@]} | sed 's/ /, /g')"
exit 1
fi
else
domain=${DOMAINS[${7}]}
fi
## some file names
local aa_fasta=${out_dir}/ref/${out_pref}_ref_protein.fasta
local domains_tsv=${out_dir}/ref/${out_pref}_${DOMAIN}.tsv
## get protein sequences
echo "Extracting reference domain range(s)"
# $get_seq --bed ${BED} --gene ${genes} --acc ref --feature CDS --dir ${out_dir} --out ${aa_fasta} --translate --no-bed --attr-mod ${ATTR_MOD:-'{}'} > /dev/null
${get_seq_common} --gene ${genes} --feature CDS --dir ${out_dir} --out ${aa_fasta} --bed ${BED} \
--translate > /dev/null
## identify identical protein sequences and collapse identical ones temporarily
python3 -c "import sys; sys.path.append('/mnt/chaelab/rachelle/src'); from fasta_manip import *; dat = fasta_to_dict('${aa_fasta}'); identicals = {k: set(seqid for seqid, seq in dat.items() if str(seq) == str(v)) for k, v in dat.items()}; identical_sets = set(map(lambda x: tuple(sorted(x)), identicals.values())); dict_to_fasta({seqids[0]: dat[seqids[0]] for seqids in identical_sets}, '${tmp_f}'); open('${tmp_f2}', 'w+').write('\n'.join(['\t'.join(seqids) for seqids in identical_sets]))"
## identify domain positions in protein
rpsblast+ -db ${RPS_DB} -query ${tmp_f} -out ${domains_tsv} \
-outfmt "6 $(echo ${HEADER_CDD} | tr ',' ' ')"
## generate file for input as 'DOMAIN_F' to getSeq
awk -v d="${domain}" '{if ($2 ~ d) print $0 "\t" d}' ${domains_tsv} > ${tmp_f} ## filter (overwrite tmp_f as it's no longer needed)
## expand filtered output of rpsblast+ to sequences w/ identical protein
python3 -c "import sys; sys.path.append('/mnt/chaelab/rachelle/src'); from data_manip import *; dat = [line.split('\t') for line in splitlines('${tmp_f}')]; repr_map = [line.split('\t') for line in splitlines('${tmp_f2}')]; output = [[seqid] + line[1:] for seqids in repr_map for seqid in seqids for line in dat if line[0] == seqids[0]]; open('${tmp_f}', 'w+').write('\n'.join(['\t'.join(x) for x in output]))"
echo -e "$(echo ${HEADER_CDD},domain | tr ',' '\t')\n$(cat ${tmp_f})" > ${domains_tsv} ## add header
rm ${tmp_f} ${tmp_f2}
## get complete domain sequence
echo "Extracting reference domain sequence(s)"
# $get_seq --bed ${BED} --gene ${genes} --acc ref --feature CDS --dir ${out_dir} --out ${fout} --domain-file ${domains_tsv} --complete --minlen ${MINLEN} --domain ${domain} --qname-dname "('qseqid', 'domain')" --qstart-qend "('qstart', 'qend')" --adjust-dir --no-bed --attr-mod ${ATTR_MOD:-'{}'} > /dev/null
${get_seq_common} --gene ${genes} --feature CDS --dir ${out_dir} --bed ${BED} \
--domain-file ${domains_tsv} --minlen ${MINLEN} --domain ${domain} \
--qname-dname "('qseqid', 'domain')" --qstart-qend "('qstart', 'qend')" \
--adjust-dir --out ${fout} --complete > /dev/null
## get CDS-only
# $get_seq --bed ${BED} --gene ${genes} --acc ref --feature CDS --dir ${out_dir} --out ${fout_cds} --domain-file ${domains_tsv} --minlen ${MINLEN} --domain ${domain} --qname-dname "('qseqid', 'domain')" --qstart-qend "('qstart', 'qend')" --adjust-dir --no-bed --attr-mod ${ATTR_MOD:-'{}'} > /dev/null
${get_seq_common} --gene ${genes} --feature CDS --dir ${out_dir} --bed ${BED} \
--domain-file ${domains_tsv} --minlen ${MINLEN} --domain ${domain} \
--qname-dname "('qseqid', 'domain')" --qstart-qend "('qstart', 'qend')" \
--adjust-dir --out ${fout_cds} > /dev/null
rm ${domains_tsv} ## remove temporary file(s)
else
## just get complete CDS + CDS
echo "Getting reference gene sequence(s)"
local fout_bed=${fout_pref}_complete.bed
# $get_seq --bed ${BED} --gene ${genes} --acc ref --feature CDS --dir ${out_dir} --out ${fout} --complete --bed-out ${fout_bed} --adjust-dir --attr-mod ${ATTR_MOD:-'{}'} > /dev/null
${get_seq_common} --gene ${genes} --feature CDS --dir ${out_dir} --bed ${BED} --adjust-dir \
--out ${fout} --bed-out ${fout_bed} --complete > /dev/null
local fout_bed_cds=${fout_pref}_CDS.bed
# $get_seq --bed ${BED} --gene ${genes} --acc ref --feature CDS --dir ${out_dir} --out ${fout_cds} --bed-out ${fout_bed_cds} --adjust-dir --attr-mod ${ATTR_MOD:-'{}'} > /dev/null
${get_seq_common} --gene ${genes} --feature CDS --dir ${out_dir} --bed ${BED} --adjust-dir \
--out ${fout_cds} --bed-out ${fout_bed_cds} > /dev/null
fi
rm ${red_bed}
}
## parse accession IDs into Python-format tuple
if ! [ -z "${ACCS_F}" ]; then ## if file provided
sed -i 's/\r//g' ${ACCS_F}
accs_tuple="('$(awk 'NF > 0' ${ACCS_F} | awk 'NR>1{print PREV} {PREV=$0} END{printf("%s",$0)}' | cat | tr '\n' ',' | sed 's/,/\x27,\x27/g')',)"
elif [[ "${ACC}" == 'ref' ]]; then
accs_tuple="()"
elif [[ "${ACC}" == 'all' ]]; then
accs_tuple="('$(cat /mnt/chaelab/shared/anna_lena/accession_map.txt | sed 's/,.\+$//' | sed 's/[^0-9]//' | tr '\n' ',' | sed 's/,$//' | sed 's/,/\x27,\x27/g')')"
elif ! [ -z "${ACC}" ]; then
accs_tuple="('$(echo ${ACC} | sed 's/,/\x27,\x27/g')',)"
else
accs_tuple="()"
fi
## if --screen-ref raised and --skip-bg-check not raised, let user know that -a will be used for bg check
if ( ( ! [ -z "${ACCS_F}" ] || ! [ -z "${ACC}" ] ) &&
(! [ -z "${TARGET}" ] ) && [[ "${QUERY}" == "${QUERY_DEFAULT}" ]] &&
[[ "${CHECK_BG}" == "True" ]] ); then
echo "--skip-bg-check not raised: ${accs_tuple} sequences in Van de Weyer et al.'s (2019) RenSeq dataset will be screened for off-targets during background screening."
fi
## if --screen-ref raised and --skip-bg-check not raised, let user know that -g/-c will be used for bg check
if ( ( ! [ -z "${GENE}" ] || ! [ -z "${CLUSTER}" ] ) &&
(! [ -z "${TARGET}" ] ) && [[ "${QUERY}" == "${QUERY_DEFAULT}" ]] &&
[[ "${CHECK_BG}" == "True" ]] ); then
echo "--skip-bg-check not raised: ${CLUSTER}${GENE} in Van de Weyer et al.'s (2019) RenSeq dataset will be masked for off-targets during background screening."
fi
## associative array for <output prefix>:<fasta file> combination
declare -a fasta_a
## if target file provided as query
if ! [ -z "${TARGET}" ] && ( ( [[ "${SCREEN_REF}" == 'False' ]] &&
[ -z "${CLUSTER}" ] && [ -z "${GENE}" ] ) ||
[[ "${CHECK_BG}" == 'False' ]] ); then
mkdir -p ${DIR}/${PREFIX}
fasta_a+=("${DIR}/${PREFIX} ${PREFIX} ${TARGET}")
echo "checkpt 1"
## else if cluster or gene specified
elif ! [ -z "${CLUSTER}" ] || ! [ -z "${GENE}" ]; then
declare -A genes_a
if ! [ -z "${CLUSTER}" ]; then
## extract cluster members
if [ -f "${CLUSTER}" ]; then
readarray -d ',' -t clusters <<< "$(tr '\n' ',' < ${CLUSTER} | sed 's/,\+$//g')"
else
readarray -d , -t clusters <<< ${CLUSTER}
fi
## read cluster lookup table
readarray -d ',' -t clusters_a <<< "$(tr '\n' ',' < ${CLUSTER_LOOKUP} | sed 's/,\+$//g')"
for cluster in ${clusters[@]}; do
fout_pref=${PREFIX}_${cluster}
## get cluster gene members (use a lookup file)
for ref_cluster in "${clusters_a[@]}"; do
ref_cluster_a=( ${ref_cluster} )
if [[ ";${ref_cluster_a[2]};" =~ ";${cluster};" ]]; then
## add prefix (key) and members (value) to associative array
cluster_members_f=${CLUSTER_DIR}/${ref_cluster_a[0]}
genes_a["${fout_pref}"]="$(tr '\n' ',' < ${cluster_members_f} | sed 's/,\+$//g')"
continue 2
fi
done
## if not found:
echo "${cluster} is not a recognised cluster name. Please use a different alias or -f <path to fasta file> or -g <comma-separated gene(s)> instead."
done
elif ! [ -z "${GENE}" ]; then
## if gene names provided, get sequences of genes
if [ -f "${GENE}" ]; then
genes_a["${PREFIX}"]="$(tr '\n' ',' < ${GENE} | sed 's/,\+$//g')"
else
genes_a["${PREFIX}"]=${GENE}
fi
fi
## get fasta_a
for group in ${!genes_a[@]}; do
output_dir=${DIR}/${group}
output_pref=${group}_${DOMAIN}
ref_fa=${output_dir}/ref/${group}_ref_${DOMAIN}_complete.fasta
cds_fa=${output_dir}/ref/${group}_ref_${DOMAIN}_CDS.fasta
fout_pref=${output_dir}/ref/${group}_ref_${DOMAIN}
accs_blast6=${output_dir}/${output_pref}_targets.tsv
accs_fa=${output_dir}/${output_pref}_targets.fasta
align_fa=${output_dir}/${group}_${DOMAIN}_mafft.fa
almask_fa=${output_dir}/${group}_${DOMAIN}_toMask.fa
genes=${genes_a["${group}"]} ## comma-separated!
## make directories
mkdir -p ${output_dir}/ref
## get reference sequences
echo "Retrieving reference Col-0 sequence(s)"
## get ref domain seqs
get_reference_fa "${genes}" ${output_dir} ${group} ${ref_fa} ${cds_fa} ${fout_pref} ${DOMAIN}
## duplicate reference complete files into accs_fa and rename sequences if accs == ref
## if using '-a ref', ${ref_fa} is the target (${accs_fa})
if [[ "${ACC}" == 'ref' ]]; then
cp ${ref_fa} ${accs_fa}
elif [[ "${accs_tuple}" != '()' ]]; then
## blast to AL70 database
echo "Searching for homologues in ${accs_tuple}"
blastn -query ${ref_fa} -db ${DB_AL70} -outfmt "6 $(echo ${HEADER_AL70} | tr ',' ' ')" \
-out ${accs_blast6}
if [ $(wc -l < ${accs_blast6}) -gt 1 ]; then
## add header
echo -e "$(echo ${HEADER_AL70} | tr ',' '\t')\n$(cat ${accs_blast6})" > ${accs_blast6}
## extract sequences from required accessions
echo "Extracting sequences for homologues in ${accs_tuple}"
python3 -c "import sys; sys.path.append('${SCRIPT_DIR}/scripts'); from extract_domain_from_AL70 import *; main(blast6_fname='${accs_blast6}', accIDs=${accs_tuple}, fout='${accs_fa}', fasta='${QUERY_DEFAULT}', min_id=${MINID}, min_len=${MINLEN}, merge_within_range=${MERGE_WITHIN}, check_id_before_merge=(${CHECK_ID_PREMERGE}))"
else
## remove the useless file
## - so we can use [ -z {accs_blast6} ] later to check if this step succeeded
rm ${accs_blast6}
## if this step is used for target discovery (i.e. not using -t/-q/-f),
## # exit because we can't continue w/ no hits.
if ( [ -z "${TARGET}" ] && [[ "${QUERY}" == "${QUERY_DEFAULT}" ]] ); then
echo "No blast hits, exiting programme."
exit 2
fi
fi
## check if best scoring hits are the same as target genes
if ( [ -f ${accs_blast6} ] &&
[ ${CHECK_RECIP} == 'True' ] && ! [ -z ${REFERENCE} ] && ! [ -z ${BED} ] ) ; then
recip_blast ${output_dir} ${output_pref} ${genes} ${accs_fa} ${recip_blast6} ${recip_bed}
fi
fi
## if user isn't using -t/-f/-q, (i.e. user is searching AL70 database or ref),
## # align these AL70 homologues/reference genes
if ( [ -z "${TARGET}" ] && [[ "${QUERY}" == "${QUERY_DEFAULT}" ]] ); then
if [ $(grep '>' ${accs_fa} | wc -l) -lt 1 ]; then
echo "No targets found, exiting programme."
exit 2
fi
## align CDS to complete CDS
echo "Aligning reference CDS and reference complete CDS"
mafft --quiet ${cds_fa} > ${tmp_f}
mv ${tmp_f} ${align_fa}
mafft --quiet --add ${ref_fa} ${align_fa} > ${tmp_f}
echo "Aligning target sequences to reference sequences"
mafft --quiet --adjustdirectionaccurately --add ${accs_fa} ${tmp_f} > ${align_fa}
rm ${tmp_f}
fasta_a+=("${output_dir} ${output_pref} ${accs_fa} ${align_fa} ${ref_fa} ${cds_fa}")
# for_fasta_a="${output_dir} ${output_pref} ${accs_fa} ${align_fa} ${ref_fa} ${cds_fa}"
else
## if using QUERY or TARGET, then we're only in here to get al_mask
mv ${accs_fa} ${almask_fa}
fi
if [ -f "${accs_blast6}" ]; then
rm ${accs_blast6} ## remove file
fi
## if using QUERY, blast to QUERY
if [[ "${QUERY}" != "${QUERY_DEFAULT}" ]]; then
echo "Searching for homologues in ${QUERY}"
blastn -query ${ref_fa} -subject ${QUERY} -outfmt "6 $(echo ${HEADER_AL70} | tr ',' ' ')" \
-out ${accs_blast6}
if [ $(wc -l < ${accs_blast6}) -le 1 ]; then
echo "No blast hits, exiting programme."
exit 2
fi
## add header
echo -e "$(echo ${HEADER_AL70} | tr ',' '\t')\n$(cat ${accs_blast6})" > ${accs_blast6}
## extract sequences
echo "Extracting sequences for homologues in ${QUERY}"
python3 -c "import sys; sys.path.append('${SCRIPT_DIR}/scripts'); from extract_domain_from_AL70 import *; main(blast6_fname='${accs_blast6}', accIDs=('.',), fout='${accs_fa}', fasta='${QUERY}', min_id=${MINID}, min_len=${MINLEN}, merge_within_range=${MERGE_WITHIN}, check_id_before_merge=(${CHECK_ID_PREMERGE}), pattern=lambda accID:accID)"
## check if best scoring hits are the same as target genes
if ( [ ${CHECK_RECIP} == 'True' ] && ! [ -z ${REFERENCE} ] && ! [ -z ${BED} ] ) ; then
recip_blast ${output_dir} ${output_pref} ${genes} ${accs_fa} ${recip_blast6} ${recip_bed}
fi
if [ $(grep '>' ${accs_fa} | wc -l) -lt 1 ]; then
echo "No targets found, exiting programme."
exit 2
fi
## align CDS to complete CDS
echo "Aligning reference CDS and reference complete CDS"
mafft --quiet ${cds_fa} > ${tmp_f}
mv ${tmp_f} ${align_fa}
mafft --quiet --add ${ref_fa} ${align_fa} > ${tmp_f}
echo "Aligning target sequences to reference sequences"
mafft --quiet --adjustdirectionaccurately --add ${accs_fa} ${tmp_f} > ${align_fa}
rm ${tmp_f}
fasta_a+=("${output_dir} ${output_pref} ${accs_fa} ${align_fa} ${ref_fa} ${cds_fa} ${almask_fa}")
if [ -f "${accs_blast6}" ]; then
rm ${accs_blast6} ## remove file
fi
## if using TARGET and SCREEN_REF, generate relevant entry and move on (don't do homologue discovery)
elif (! [ -z "${TARGET}" ] ); then
if [ -f ${align_fa} ]; then
rm ${align_fa} ## reset align_fa to non-existent
fi
if [[ "${SCREEN_REF}" == "True" ]]; then
## we're only here because we need the reference sequences
fasta_a+=("${DIR}/${PREFIX} ${PREFIX} ${TARGET} ${align_fa} ${ref_fa} ${cds_fa} ${almask_fa}")
else
## use non-existent file align_fa as placeholder
fasta_a+=("${DIR}/${PREFIX} ${PREFIX} ${TARGET} ${align_fa} ${align_fa} ${align_fa} ${almask_fa}")
fi
fi
done
fi
## work on fasta_a
for entry in "${fasta_a[@]}"; do
echo "Finding gRNA"
entry_a=( $entry ) ## output_dir, prefix, fasta, alignment (also fasta)
# echo "import sys; sys.path.append('${SCRIPT_DIR}/scripts'); from find_common_gRNA import *; find_cluster_gRNA_in_acc('${entry_a[2]}', ${accs_tuple}, '${entry_a[0]}', background_usr_fname='${BACKGROUND}', manual_check=(not (${AUTO})), fout_pref='${entry_a[1]}', sc_algorithm='${SC_ALGO}', accs_background_fname='${BACKGROUND}', max_mismatch=${MISMATCH}, max_gap=${GAP}, pam='${PAM}', gRNA_len=int(${LENGTH}), alignment_fname='${entry_a[3]}', exclude='${EXCLUDE}', relax=(${RELAX}), reference_fasta = '${REFERENCE}', mask_reference='${entry_a[4]}', cds_fasta='${entry_a[5]}', complete_fasta='${entry_a[4]}', check_bg=${CHECK_BG})"
# python3 -c "import sys; sys.path.append('${SCRIPT_DIR}/scripts'); from find_common_gRNA import *; find_cluster_gRNA_in_acc('${entry_a[2]}', ${accs_tuple}, '${entry_a[0]}', background_usr_fname='${BACKGROUND}', manual_check=(not (${AUTO})), fout_pref='${entry_a[1]}', sc_algorithm='${SC_ALGO}', accs_background_fname='${BACKGROUND}', max_mismatch=${MISMATCH}, max_gap=${GAP}, pam='${PAM}', gRNA_len=int(${LENGTH}), alignment_fname='${entry_a[3]}', exclude='${EXCLUDE}', relax=(${RELAX}), reference_fasta = '${REFERENCE}', mask_reference='${entry_a[4]}', cds_fasta='${entry_a[5]}', complete_fasta='${entry_a[4]}', check_bg=${CHECK_BG})"
## changed mask_reference to mask_reference=False
# python3 -c "import sys; sys.path.append('${SCRIPT_DIR}/scripts'); from find_common_gRNA import *; find_cluster_gRNA_in_acc('${entry_a[2]}', ${accs_tuple}, '${entry_a[0]}', background_usr_fname='${BACKGROUND}', manual_check=(not (${AUTO})), fout_pref='${entry_a[1]}', sc_algorithm='${SC_ALGO}', accs_background_fname='${BACKGROUND_DEFAULT}', max_mismatch=${MISMATCH}, max_gap=${GAP}, pam='${PAM}', gRNA_len=int(${LENGTH}), alignment_fname='${entry_a[3]}', exclude='${EXCLUDE}', relax=(${RELAX}), relax_cds_within=(${RELAX_CDS_WITHIN}), reference_fasta = '${REFERENCE}', mask_reference=${MASK_REF}, screen_reference=${SCREEN_REF}, cds_fasta='${entry_a[5]}', complete_fasta='${entry_a[4]}', check_bg=${CHECK_BG}, num_sets=${SETS}, report_bg=${REPORT_BG}, nonref_mask_fname = '${entry_a[6]}')" ## TODO: implement -a -g bg check even w/ -q or -t
python3 -c "import sys; sys.path.append('${SCRIPT_DIR}/scripts'); from find_common_gRNA import *; find_cluster_gRNA_in_acc('${entry_a[2]}', ${accs_tuple}, '${entry_a[0]}', background_usr_fname='${BACKGROUND}', manual_check=(not (${AUTO})), fout_pref='${entry_a[1]}', sc_algorithm='${SC_ALGO}', accs_background_fname='${BACKGROUND_DEFAULT}', max_mismatch=${MISMATCH}, max_gap=${GAP}, pam='${PAM}', gRNA_len=int(${LENGTH}), alignment_fname='${entry_a[3]}', exclude='${EXCLUDE}', max_cds_insertion=${MAX_CDS_INSERTION}, reference_fasta = '${REFERENCE}', mask_reference=${MASK_REF}, screen_reference=${SCREEN_REF}, cds_fasta='${entry_a[5]}', complete_fasta='${entry_a[4]}', check_bg=${CHECK_BG}, pamless_bg_check=${PAMLESS_BG_CHECK}, num_sets=${SETS}, report_bg=${REPORT_BG}, nonref_mask_fname = '${entry_a[6]}')" ## TODO: implement -a -g bg check even w/ -q or -t
done
if ! [ -z "${dir_ext}" ]; then
rm -r ${dir_ext}
fi
exit 0
| true
|
0072642a5ae991efa126dd99cb71e1a2ea38aff8
|
Shell
|
Urs-Bruelhart/beer-garden
|
/resources/centos6/after_install.sh
|
UTF-8
| 3,423
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
APP_NAME="beer-garden"
GROUP=$APP_NAME
USER=$APP_NAME
APP_HOME="/opt/${APP_NAME}"
PID_HOME="/var/run/${APP_NAME}"
CONFIG_HOME="$APP_HOME/conf"
LOG_HOME="$APP_HOME/log"
BIN_HOME="$APP_HOME/bin"
PLUGIN_LOG_HOME="$LOG_HOME/plugins"
PLUGIN_HOME="$APP_HOME/plugins"
BARTENDER_CONFIG="${CONFIG_HOME}/bartender-config"
BARTENDER_LOG_CONFIG="${CONFIG_HOME}/bartender-logging-config.json"
BARTENDER_LOG_FILE="$LOG_HOME/bartender.log"
BREW_VIEW_CONFIG="${CONFIG_HOME}/brew-view-config"
BREW_VIEW_LOG_CONFIG="${CONFIG_HOME}/brew-view-logging-config.json"
BREW_VIEW_LOG_FILE="$LOG_HOME/brew-view.log"
case "$1" in
1)
# This is an initial install
# Create the beer-garden group/user if they do not exist
/usr/bin/getent group $GROUP > /dev/null || /usr/sbin/groupadd -r $GROUP
/usr/bin/getent passwd $USER > /dev/null || /usr/sbin/useradd -r -d $APP_HOME -s /sbin/nologin -g $GROUP $USER
if [ ! -d "$CONFIG_HOME" ]; then
mkdir -p "$CONFIG_HOME"
fi
if [ ! -d "$LOG_HOME" ]; then
mkdir -p "$LOG_HOME"
fi
if [ ! -d "$PLUGIN_LOG_HOME" ]; then
mkdir -p "$PLUGIN_LOG_HOME"
fi
if [ ! -d "$PLUGIN_HOME" ]; then
mkdir -p "$PLUGIN_HOME"
fi
if [ ! -d "$PID_HOME" ]; then
mkdir -p "$PID_HOME"
fi
# Generate logging configs if they don't exist
if [ ! -f "$BARTENDER_LOG_CONFIG" ]; then
"$APP_HOME/bin/generate_bartender_log_config" \
--log-config-file "$BARTENDER_LOG_CONFIG" \
--log-file "$BARTENDER_LOG_FILE" \
--log-level "WARN"
fi
if [ ! -f "$BREW_VIEW_LOG_CONFIG" ]; then
"$APP_HOME/bin/generate_brew_view_log_config" \
--log-config-file "$BREW_VIEW_LOG_CONFIG" \
--log-file "$BREW_VIEW_LOG_FILE" \
--log-level "WARN"
fi
# Enforce .yaml extension for yaml config files
if [ -f "$BARTENDER_CONFIG.yml" ]; then
mv "$BARTENDER_CONFIG.yml" "$BARTENDER_CONFIG.yaml"
fi
if [ -f "$BREW_VIEW_CONFIG.yml" ]; then
mv "$BREW_VIEW_CONFIG.yml" "$BREW_VIEW_CONFIG.yaml"
fi
# Generate application configs if they don't exist
# Migrate them if they do, converting to yaml if necessary
if [ -f "$BARTENDER_CONFIG.yaml" ]; then
"$APP_HOME/bin/migrate_bartender_config" -c "$BARTENDER_CONFIG.yaml"
elif [ -f "$BARTENDER_CONFIG.json" ]; then
"$APP_HOME/bin/migrate_bartender_config" -c "$BARTENDER_CONFIG.json" -t "yaml"
else
"$APP_HOME/bin/generate_bartender_config" \
-c "$BARTENDER_CONFIG.yaml" -l "$BARTENDER_LOG_CONFIG" \
--plugin-local-directory "$PLUGIN_HOME" \
--plugin-local-log-directory "$PLUGIN_LOG_HOME"
fi
if [ -f "$BREW_VIEW_CONFIG.yaml" ]; then
"$APP_HOME/bin/migrate_brew_view_config" -c "$BREW_VIEW_CONFIG.yaml"
elif [ -f "$BREW_VIEW_CONFIG.json" ]; then
"$APP_HOME/bin/migrate_brew_view_config" -c "$BREW_VIEW_CONFIG.json" -t "yaml"
else
"$APP_HOME/bin/generate_brew_view_config" \
-c "$BREW_VIEW_CONFIG.yaml" -l "$BREW_VIEW_LOG_CONFIG"
fi
;;
2)
# This is an upgrade, nothing to do
;;
esac
chown -hR ${USER}:${GROUP} $APP_HOME
| true
|
bbf5a25b18549b949e13f7cf42cab7093d78ebf8
|
Shell
|
pashadag/Tools
|
/validation/makeReport
|
UTF-8
| 18,534
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/sh
runname=$1
chrname=$2
results_dir=${SVN_BASE}/cnv/results/${chrname}/working.$1
echo Report for run $1 and chromosome $2 > report.txt
echo "Figure 1: Size correlation between our calls and the Database of Genomic Variants (DGV)" >> report.txt
cp ${results_dir}/val/${chrname}.cnvs.merged.cnvs-dgv_all.p2v.val..eps ./figure1.eps
echo "Figure 2: Zoom in on Figure 1. " >> report.txt
cp ${results_dir}/val/${chrname}.cnvs.merged.cnvs-dgv_all.v2p.val.?*.eps ./figure2.eps
echo "Figure 3: Length distribution of our calls" >> report.txt
cp ${results_dir}/val/${chrname}.cnvs.merged.cnvs.lendist.eps ./figure3.eps
echo "Figure 4: The sensitivity and specificty as measured with respect to decreasing confidence levels of our calls." >> report.txt
cp ${results_dir}/val/decrConf.roc.eps ./figure4.eps
cp ${results_dir}/val/summary.txt .
cp ${results_dir}/val/smother.txt .
gain_gain=`cat summary.txt | grep -A 2 ".cnvs.gain.cnvs-dgv_all.gain" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
gain_loss=`cat summary.txt | grep -A 2 ".cnvs.gain.cnvs-dgv_all.loss" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
gain_all=`cat summary.txt | grep -v "dgv_all.loss" | grep -v "dgv_all.gain" | grep -A 2 ".cnvs.gain.cnvs-dgv_all" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
loss_gain=`cat summary.txt | grep -A 2 ".cnvs.loss.cnvs-dgv_all.gain" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
loss_loss=`cat summary.txt | grep -A 2 ".cnvs.loss.cnvs-dgv_all.loss" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
loss_all=`cat summary.txt | grep -v "dgv_all.loss" | grep -v "dgv_all.gain" | grep -A 2 ".cnvs.loss.cnvs-dgv_all" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
all_gain=`cat summary.txt | grep -A 2 ".cnvs.merged.cnvs-dgv_all.gain" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
all_loss=`cat summary.txt | grep -A 2 ".cnvs.merged.cnvs-dgv_all.loss" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
all_all=`cat summary.txt | grep -v gain | grep -v loss | grep -v gain | grep -A 2 ".cnvs.merged.cnvs-dgv_all" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
sv_gain=`cat summary.txt | grep -A 2 "svs.merged-dgv_all.gain" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
sv_loss=`cat summary.txt | grep -A 2 "svs.merged-dgv_all.loss" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
#sv_indel=`cat summary.txt | grep -A 2 "svs-dgv_indel" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
sv_all=`cat summary.txt | grep -v loss | grep -v gain | grep -A 2 "svs.merged-dgv_all" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
sv_coll=`cat summary.txt | grep -A 2 "svs.merged-coll_ins" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
sv_abc8=`cat summary.txt | grep -A 2 "svs.merged-kidd_ins_abc8_all" | tail -1 | awk '{ if (($9 + $8) > 0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
shuf_gain=`cat summary.txt | grep -v "dgv_all.loss" | grep -v "dgv_all.gain" | grep -A 2 ".cnvs.gain.shuffle.cnvs-dgv_all" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
shuf_loss=`cat summary.txt | grep -v "dgv_all.loss" | grep -v "dgv_all.gain" | grep -A 2 ".cnvs.loss.shuffle.cnvs-dgv_all" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
shuf_gain_g=`cat summary.txt | grep -A 2 ".cnvs.gain.shuffle.cnvs-dgv_all.gain" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
shuf_loss_g=`cat summary.txt | grep -A 2 ".cnvs.loss.shuffle.cnvs-dgv_all.gain" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
shuf_gain_l=`cat summary.txt | grep -A 2 ".cnvs.gain.shuffle.cnvs-dgv_all.loss" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
shuf_loss_l=`cat summary.txt | grep -A 2 ".cnvs.loss.shuffle.cnvs-dgv_all.loss" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
shuf_ins_all=`cat summary.txt | grep -v "dgv_all.loss" | grep -v "dgv_all.gain" | grep -A 2 "svs.shuffle-dgv_all" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
shuf_ins_gain=`cat summary.txt | grep -A 2 "svs.shuffle-dgv_all.gain" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
shuf_ins_loss=`cat summary.txt | grep -A 2 "svs.shuffle-dgv_all.loss" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
shuf_ins_coll=`cat summary.txt | grep -A 2 "svs.shuffle-coll_ins_all" | tail -1 | awk '{ print $3" / "$2" = "$4"%" }'`
echo >> report.txt
echo >> report.txt
echo "True positive rates of our CNV calls, compared against the DGV. The ratio is between our predictions that overlap DGV and our total number of predictions." >> report.txt
echo >> report.txt
echo " DGV gain DGV loss DGV all coll_Ins " >> report.txt
echo "Our gain " ${gain_gain} " " ${gain_loss} " " ${gain_all} >> report.txt
echo "Our loss " ${loss_gain} " " ${loss_loss} " " ${loss_all} >> report.txt
echo "Our all " ${all_gain} " " ${all_loss} " " ${all_all} >> report.txt
echo "Our ins " ${sv_gain} " " ${sv_loss} " " ${sv_all} " " ${sv_coll} >> report.txt
echo "Shuf gain " ${shuf_gain_g} " " ${shuf_gain_l} " " ${shuf_gain} >> report.txt
echo "Shuf loss " ${shuf_loss_g} " " ${shuf_loss_l} " " ${shuf_loss} >> report.txt
echo "Shuf ins " ${shuf_ins_gain} " " ${shuf_ins_loss} " " ${shuf_ins_all} " " ${shuf_ins_coll} >> report.txt
echo >> report.txt
echo Percentage of kidds ins on abc8 that overlaps our ins is ${sv_abc8}. >> report.txt
loss_smother=`cat smother.txt | grep -A 1 ".cnvs.loss.cnvs" | tail -1 | awk '{print $2}'`
gain_smother=`cat smother.txt | grep -A 1 ".cnvs.gain.cnvs" | tail -1 | awk '{print $2}'`
all_smother=`cat smother.txt | grep -A 1 ".cnvs.merged.cnvs" | tail -1 | awk '{print $2}'`
svs_smother=`cat smother.txt | grep -A 1 ".svs" | tail -1 | awk '{print $2}'`
coll_smother=`cat ${SVN_BASE}/cnv/datasets/smother.txt | grep -A 1 "coll_ins" | tail -1 | awk '{print $2}'`
dgv_loss_smother=`cat ${SVN_BASE}/cnv/datasets/smother.txt | grep -A 1 "dgv_all.loss" | tail -1 | awk '{print $2}'`
dgv_gain_smother=`cat ${SVN_BASE}/cnv/datasets/smother.txt | grep -A 1 "dgv_all.gain" | tail -1 | awk '{print $2}'`
dgv_all_smother=`cat ${SVN_BASE}/cnv/datasets/smother.txt | grep -v gain | grep -v loss | grep -A 1 "dgv_all" | tail -1 | awk '{print $2}'`
kidd_loss_smother=`cat ${SVN_BASE}/cnv/datasets/smother.txt | grep -A 1 "kidd_loss_all" | tail -1 | awk '{print $2}'`
kidd_gain_smother=`cat ${SVN_BASE}/cnv/datasets/smother.txt | grep -A 1 "kidd_gain_all" | tail -1 | awk '{print $2}'`
kidd_all_smother=`cat ${SVN_BASE}/cnv/datasets/smother.txt | grep -A 1 "kidd_all" | tail -1 | awk '{print $2}'`
cooper_all_smother=`cat ${SVN_BASE}/cnv/datasets/smother.txt | grep -A 1 "cooper.all.hg18" | tail -1 | awk '{print $2}'`
svs_indel_smother=`cat ${SVN_BASE}/cnv/datasets/smother.txt | grep -A 1 "dgv_indel" | tail -1 | awk '{print $2}'`
echo >> report.txt
echo Percentage of genome covered >> report.txt
echo >> report.txt
echo " us DGV Kidd " >> report.txt
echo "loss : " ${loss_smother} " " ${dgv_loss_smother} " " ${kidd_loss_smother} "" >> report.txt
echo "gain : " ${gain_smother} " " ${dgv_gain_smother} " " ${kidd_gain_smother} "" >> report.txt
echo " all : " ${all_smother} " " ${dgv_all_smother} " " ${kidd_all_smother} "" >> report.txt
echo >> report.txt
echo >> report.txt
echo "Dataset Smother" >> report.txt
echo ".svs " ${svs_smother} >> report.txt
echo "coll_ins " ${coll_smother} >> report.txt
echo "cooper " ${cooper_all_smother} >> report.txt
#kgg=`cat summary.txt | grep -A 2 ".cnvs.gain.cnvs-kidd_gain" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
kgl=`cat summary.txt | grep -A 2 ".cnvs.gain.cnvs-kidd_loss" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
#klg=`cat summary.txt | grep -A 2 ".cnvs.loss.cnvs-kidd_gain" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
kll=`cat summary.txt | grep -A 2 ".cnvs.loss.cnvs-kidd_loss" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
#kag=`cat summary.txt | grep -A 2 ".cnvs.merged.cnvs-kidd_gain" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
kal=`cat summary.txt | grep -A 2 ".cnvs.merged.cnvs-kidd_loss" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
echo >> report.txt
echo "Percentage of Kidd's calls on ABC8 that are overlapped by our calls" >> report.txt
echo >> report.txt
echo " Our gain Our loss Our all " >> report.txt
#echo "Kidd gain " ${kgg} " " ${klg} " " ${kag} >> report.txt
echo "Kidd loss " ${kgl} " " ${kll} " " ${kal} >> report.txt
#kgg=`cat summary.txt | grep -A 2 ".cnvs.gain.shuffle.cnvs-kidd_gain" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
kgl=`cat summary.txt | grep -A 2 ".cnvs.gain.shuffle.cnvs-kidd_loss" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
#klg=`cat summary.txt | grep -A 2 ".cnvs.loss.shuffle.cnvs-kidd_gain" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
kll=`cat summary.txt | grep -A 2 ".cnvs.loss.shuffle.cnvs-kidd_loss" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
echo >> report.txt
echo "Percentage of Kidd's calls on ABC8 that are overlapped by the shuffled calls" >> report.txt
echo >> report.txt
echo " Shuffle gain Shuffle loss " >> report.txt
#echo "Kidd gain " ${kgg} " " ${klg} >> report.txt
echo "Kidd loss " ${kgl} " " ${kll} >> report.txt
c_dup_gain=`cat summary.txt | grep -A 2 ".cnvs.gain.cnvs-cooper.dup.hg18" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
c_del_gain=`cat summary.txt | grep -A 2 ".cnvs.gain.cnvs-cooper.del.hg18" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
c_null_gain=`cat summary.txt | grep -A 2 ".cnvs.gain.cnvs-cooper.null.hg18" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
c_all_gain=`cat summary.txt | grep -A 2 ".cnvs.gain.cnvs-cooper.all.hg18" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
c_dup_loss=`cat summary.txt | grep -A 2 ".cnvs.loss.cnvs-cooper.dup.hg18" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
c_del_loss=`cat summary.txt | grep -A 2 ".cnvs.loss.cnvs-cooper.del.hg18" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
c_null_loss=`cat summary.txt | grep -A 2 ".cnvs.loss.cnvs-cooper.null.hg18" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
c_all_loss=`cat summary.txt | grep -A 2 ".cnvs.loss.cnvs-cooper.all.hg18" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
c_dup_all=`cat summary.txt | grep -A 2 ".cnvs.merged.cnvs-cooper.dup.hg18" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
c_del_all=`cat summary.txt | grep -A 2 ".cnvs.merged.cnvs-cooper.del.hg18" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
c_null_all=`cat summary.txt | grep -A 2 ".cnvs.merged.cnvs-cooper.null.hg18" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
c_all_all=`cat summary.txt | grep -A 2 ".cnvs.merged.cnvs-cooper.all.hg18" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_0_gain=`cat summary.txt | grep -A 2 ".cnvs.gain.cnvs-mcc.0.s.j.cnvs" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_0_loss=`cat summary.txt | grep -A 2 ".cnvs.loss.cnvs-mcc.0.s.j.cnvs" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_0_all=`cat summary.txt | grep -A 2 ".cnvs.merged.cnvs-mcc.0.s.j.cnvs" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_1_gain=`cat summary.txt | grep -A 2 ".cnvs.gain.cnvs-mcc.1.s.j.cnvs" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_1_loss=`cat summary.txt | grep -A 2 ".cnvs.loss.cnvs-mcc.1.s.j.cnvs" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_1_all=`cat summary.txt | grep -A 2 ".cnvs.merged.cnvs-mcc.1.s.j.cnvs" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_2_gain=`cat summary.txt | grep -A 2 ".cnvs.gain.cnvs-mcc.2.s.j.cnvs" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_2_loss=`cat summary.txt | grep -A 2 ".cnvs.loss.cnvs-mcc.2.s.j.cnvs" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_2_all=`cat summary.txt | grep -A 2 ".cnvs.merged.cnvs-mcc.2.s.j.cnvs" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_3_gain=`cat summary.txt | grep -A 2 ".cnvs.gain.cnvs-mcc.3.s.j.cnvs" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_3_loss=`cat summary.txt | grep -A 2 ".cnvs.loss.cnvs-mcc.3.s.j.cnvs" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_3_all=`cat summary.txt | grep -A 2 ".cnvs.merged.cnvs-mcc.3.s.j.cnvs" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_4_gain=`cat summary.txt | grep -A 2 ".cnvs.gain.cnvs-mcc.4.s.j.cnvs" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_4_loss=`cat summary.txt | grep -A 2 ".cnvs.loss.cnvs-mcc.4.s.j.cnvs" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_4_all=`cat summary.txt | grep -A 2 ".cnvs.merged.cnvs-mcc.4.s.j.cnvs" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_5_gain=`cat summary.txt | grep -A 2 ".cnvs.gain.cnvs-mcc.5.s.j.cnvs" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_5_loss=`cat summary.txt | grep -A 2 ".cnvs.loss.cnvs-mcc.5.s.j.cnvs" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_5_all=`cat summary.txt | grep -A 2 ".cnvs.merged.cnvs-mcc.5.s.j.cnvs" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_min_gain=`cat summary.txt | grep -A 2 ".cnvs.gain.cnvs-mcc.min" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_min_loss=`cat summary.txt | grep -A 2 ".cnvs.loss.cnvs-mcc.min" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_min_all=`cat summary.txt | grep -A 2 ".cnvs.merged.cnvs-mcc.min" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_max_gain=`cat summary.txt | grep -A 2 ".cnvs.gain.cnvs-mcc.max" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_max_loss=`cat summary.txt | grep -A 2 ".cnvs.loss.cnvs-mcc.max" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_max_all=`cat summary.txt | grep -A 2 ".cnvs.merged.cnvs-mcc.max" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_mima_gain=`cat summary.txt | grep -A 2 ".cnvs.gain.cnvs-mcc.mima" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_mima_loss=`cat summary.txt | grep -A 2 ".cnvs.loss.cnvs-mcc.mima" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
m_mima_all=`cat summary.txt | grep -A 2 ".cnvs.merged.cnvs-mcc.mima" | tail -1 | awk '{ if (($9 + $8) >0 ) print $8 " / " $8 + $9 " = " $8 / ($9+$8); else print " NA" }'`
echo >> report.txt
echo "Percentage of Cooper et al's calls on ABC8 that are overlapped by our calls" >> report.txt
echo >> report.txt
echo " Our gain Our loss Our all " >> report.txt
echo "Cooper duplications " ${c_dup_gain} " " ${c_dup_loss} " " ${c_dup_all} >> report.txt
echo "Cooper homozygous dels " ${c_null_gain} " " ${c_null_loss} " " ${c_null_all} >> report.txt
echo "Cooper hemizyogou dels " ${c_del_gain} " " ${c_del_loss} " " ${c_del_all} >> report.txt
echo "Cooper all " ${c_all_gain} " " ${c_all_loss} " " ${c_all_all} >> report.txt
echo "McCarroll 0 " ${m_0_gain} " " ${m_0_loss} " " ${m_0_all} >> report.txt
echo "McCarroll 1 " ${m_1_gain} " " ${m_1_loss} " " ${m_1_all} >> report.txt
echo "McCarroll 2 " ${m_2_gain} " " ${m_2_loss} " " ${m_2_all} >> report.txt
echo "McCarroll 3 " ${m_3_gain} " " ${m_3_loss} " " ${m_3_all} >> report.txt
echo "McCarroll 4 " ${m_4_gain} " " ${m_4_loss} " " ${m_4_all} >> report.txt
echo "McCarroll 5 " ${m_5_gain} " " ${m_5_loss} " " ${m_5_all} >> report.txt
echo "McCarroll min " ${m_min_gain} " " ${m_min_loss} " " ${m_min_all} >> report.txt
echo "McCarroll max " ${m_max_gain} " " ${m_max_loss} " " ${m_max_all} >> report.txt
echo "McCarroll min+max " ${m_mima_gain} " " ${m_mima_loss} " " ${m_mima_all} >> report.txt
| true
|
1af40beaf6cc950290f4773a4af39a46ff941443
|
Shell
|
msimonin/vagrant-g5k
|
/lib/vagrant-g5k/util/launch_vm.sh
|
UTF-8
| 1,866
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This script is originally borrowed to pmorillo
# Thanks to him !
# I've made some addition though :)
# ARGS
# $1: cpu demand
# $2: mem demand
# $3: net type {BRIDGE, NAT}
# BRIDGE | NAT
# $4 subnet_file | $left other net, drive options
# $left net/drive options
set -x
function net_bridge() {
SUBNET_FILE=$1
# As we chose a stateless designe,let's calculate here the ip and mac
# assuming we got at least a slash_22
ipnumber=$(($OAR_JOB_ID % 1022))
IP_MAC=$(cat $SUBNET_FILE|head -n $((ipnumber + 1))|tail -n 1)
IP_ADDR=$(echo $IP_MAC|awk '{print $1}')
MAC_ADDR=$(echo $IP_MAC|awk '{print $2}')
# create tap
TAP=$(sudo create_tap)
# return the specific net string of the kvm command
echo "-net nic,model=virtio,macaddr=$MAC_ADDR -net tap,ifname=$TAP,script=no"
}
# CPU demand
if [ "$1" == "-1" ]
then
SMP=$(nproc)
else
SMP=$1
fi
echo "SMP = $SMP"
shift
# Memory demand
KEEP_SYSTEM_MEM=1 # Gb
if [ "$1" == "-1" ]
then
TOTAL_MEM=$(cat /proc/meminfo | grep -e '^MemTotal:' | awk '{print $2}')
VM_MEM=$(( ($TOTAL_MEM / 1024) - $KEEP_SYSTEM_MEM * 1024 ))
else
VM_MEM=$1
fi
echo "VM_MEM = $VM_MEM"
shift
# net demand
net=""
if [ "$1" == "BRIDGE" ]
then
shift
net=$(net_bridge $@)
echo $(hostname)
echo $net
shift
else
shift
net=""
fi
# Directory for qcow2 snapshots
export TMPDIR=/tmp
# Clean shutdown of the VM at the end of the OAR job
clean_shutdown() {
echo "Caught shutdown signal at $(date)"
echo "system_powerdown" | nc -U /tmp/vagrant-g5k.$OAR_JOB_ID.mon
}
trap clean_shutdown 12
# Launch virtual machine
kvm -m $VM_MEM -smp cores=$SMP,threads=1,sockets=1 -fsdev local,security_model=none,id=fsdev0,path=$HOME -device virtio-9p-pci,id=fs0,fsdev=fsdev0,mount_tag=hostshare -nographic -monitor unix:/tmp/vagrant-g5k.$OAR_JOB_ID.mon,server,nowait -localtime -enable-kvm $net $@ &
wait
| true
|
9eda763d14877b6de09b6c2534fec3182cecc36e
|
Shell
|
Kartik1801/Linux-Shell-Programming-Lab
|
/lab-11-3.sh
|
UTF-8
| 191
| 3.328125
| 3
|
[] |
no_license
|
#Sum of Array Element
echo "Enter No of Element"
read length
sum=0
echo "Enter elements:"
for ((i=0;i<length;i++))
do
read num[$i]
sum=$(($sum+${num[i]}))
echo "Sum = "$sum
done
| true
|
c16813dd102efb52622fdcb9b29c3d05a6f82efd
|
Shell
|
orez-/520proj
|
/joos/jjoos-scc-2/Bin/internal/srcdescent.sh
|
UTF-8
| 354
| 3.71875
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/sh
#
# descent.sh - customizable recursive source tree descent
#
ROOT=$JOOSDIR/src
visit() {
# TODO: make copy of this file and fill this function
echo `basename $1`
}
# 1 param: directory
descent_rec() {
echo descending $1...
for f in `ls $1`; do
visit $1/$f
if test -d $1/$f; then
descent_rec $1/$f
fi
done
}
descent_rec $ROOT
| true
|
b11aeb3007cb9670b26629b32e9df5a87c79f2a8
|
Shell
|
mmatuska/oss-fuzz
|
/projects/mruby/build.sh
|
UTF-8
| 1,290
| 2.75
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -eu
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
# build project
export LD=clang
export LDFLAGS="$CFLAGS"
./minirake clean && ./minirake -j$(nproc) all
# build fuzzers
FUZZ_TARGET=$SRC/mruby/oss-fuzz/mruby_fuzzer.c
name=$(basename $FUZZ_TARGET .c)
$CC -c $CFLAGS -Iinclude \
${FUZZ_TARGET} -o $OUT/${name}.o
$CXX $CXXFLAGS $OUT/${name}.o $LIB_FUZZING_ENGINE -lm \
$SRC/mruby/build/host/lib/libmruby.a -o $OUT/${name}
rm -f $OUT/${name}.o
# dict and config
cp $SRC/mruby/oss-fuzz/config/mruby.dict $OUT
cp $SRC/mruby/oss-fuzz/config/mruby_fuzzer.options $OUT
# seeds
find $SRC/mruby_seeds -exec zip -ujq \
$OUT/mruby_fuzzer_seed_corpus.zip "{}" \;
| true
|
e8cd409bffabea727932636c8265176bb3891eac
|
Shell
|
BiomedicalMachineLearning/CAGE_Analysis
|
/CAGE_Mapping/Mapping.sh
|
UTF-8
| 1,007
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/sh
#PBS -N "map"
#PBS -l select=1:ncpus=8:mem=50GB
#PBS -l walltime=8:00:00
module load bwa/0.7.15
module load samtools
Wdir=/path/to/inputs
cd $Wdir
#map
for fastqfile in `ls fastq_files/*_merged.fastq.gz`; do
bwa mem -t 8 Homo_sapiens.GRCh38.dna.primary_assembly.fa $fastqfile > ${fastqfile}.se.sam
samtools view -bS ${fastqfile}.se.sam > ${fastqfile}.se.bam
samtools view -q10 -b ${fastqfile}.se.bam >${fastqfile}.se.q10.bam
samtools sort ${fastqfile}.se.q10.bam>${fastqfile}.se.q10.sorted.bam
samtools index ${fastqfile}.se.q10.sorted.bam
done
#get mapping statistics all reads
for file in *se.bam; do newname1=`basename $file .fastq.gz.se.bam`; echo $file >>${newname1}.mapped.qc; samtools flagstat ${file} >>${newname1}.mapped.qc; done
#get uniquely mapped CAGE tags with a minimal mapping quality 10
for file in *q10.sorted.bam; do newname2=`basename $file .fastq.gz.se.q10.bam`; echo $file >>${newname2}.mapped.qc; samtools flagstat ${file} >>${newname2}.mapped.qc; done
| true
|
79689f5c0f87ff438a4088a091472f365564da63
|
Shell
|
IBM-Cloud/secure-file-storage
|
/app/generate_yaml.sh
|
UTF-8
| 1,376
| 3.453125
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
set -o pipefail
if [[ -z "$INGRESS_SUBDOMAIN" ]]; then
echo "INGRESS_SUBDOMAIN must be in the environment"
exit 1
fi
if [[ -z "$IMAGE_REPOSITORY" ]]; then
echo "IMAGE_REPOSITORY must be in the environment"
exit 1
fi
if [[ -z "$INGRESS_SECRET" ]]; then
echo "INGRESS_SECRET must be in the environment"
exit 1
fi
if [[ -z "$BASENAME" ]]; then
export BASENAME=secure-file-storage
fi
if [[ -z "$TARGET_NAMESPACE" ]]; then
export TARGET_NAMESPACE=default
fi
if [[ -z "$PUBLIC_CERT_ID" ]] && [[ -z "$SECRETS_MANAGER_API_URL" ]] && [[ -z "$MYDOMAIN" ]]; then
cat secure-file-storage.template.yaml | \
envsubst '$IMAGE_NAME $INGRESS_SECRET $INGRESS_SUBDOMAIN $IMAGE_PULL_SECRET $IMAGE_REPOSITORY $TARGET_NAMESPACE $BASENAME' > secure-file-storage.yaml
exit
fi
if [[ -z "$PUBLIC_CERT_ID" ]]; then
echo "PUBLIC_CERT_ID must be in the environment"
exit 1
fi
if [[ -z "$SECRETS_MANAGER_API_URL" ]]; then
echo "SECRETS_MANAGER_API_URL must be in the environment"
exit 1
fi
if [[ -z "$MYDOMAIN" ]]; then
echo "MYDOMAIN must be in the environment"
exit 1
fi
cat secure-file-storage.template.yaml | \
sed -e 's/^# //' |
envsubst '$PUBLIC_CERT_ID $SECRETS_MANAGER_API_URL $MYDOMAIN $IMAGE_NAME $INGRESS_SECRET $INGRESS_SUBDOMAIN $IMAGE_PULL_SECRET $IMAGE_REPOSITORY $TARGET_NAMESPACE $BASENAME' > secure-file-storage.yaml
| true
|
0fae013d679a6a5d144349c6a56871eb7836e0cd
|
Shell
|
xing/fpm-fry
|
/lib/fpm/fry/templates/debian/before_install.erb
|
UTF-8
| 138
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
case "$1" in
install)
<%= install.join("\n") %>
;;
upgrade)
<%= upgrade.join("\n") %>
;;
*)
exit 1
;;
esac
| true
|
06378e3600233d51e8e2ed0eca6c96f1aeb566e5
|
Shell
|
uchan-nos/mikanos-build
|
/devenv/make_image.sh
|
UTF-8
| 646
| 3.828125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh -ex
if [ $# -lt 3 ]
then
echo "Usage: $0 <image name> <mount point> <.efi file> [another file]"
exit 1
fi
DEVENV_DIR=$(dirname "$0")
DISK_IMG=$1
MOUNT_POINT=$2
EFI_FILE=$3
ANOTHER_FILE=$4
if [ ! -f $EFI_FILE ]
then
echo "No such file: $EFI_FILE"
exit 1
fi
rm -f $DISK_IMG
qemu-img create -f raw $DISK_IMG 200M
mkfs.fat -n 'MIKAN OS' -s 2 -f 2 -R 32 -F 32 $DISK_IMG
$DEVENV_DIR/mount_image.sh $DISK_IMG $MOUNT_POINT
sudo mkdir -p $MOUNT_POINT/EFI/BOOT
sudo cp $EFI_FILE $MOUNT_POINT/EFI/BOOT/BOOTX64.EFI
if [ "$ANOTHER_FILE" != "" ]
then
sudo cp $ANOTHER_FILE $MOUNT_POINT/
fi
sleep 0.5
sudo umount $MOUNT_POINT
| true
|
7da580b00323510a76e57177452f5f3c816909ee
|
Shell
|
darthdeus/devops
|
/install.sh
|
UTF-8
| 491
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/sh
DEVOPS="$HOME/.devops"
if [ ! -z "$HOST_COLOR" ]; then
echo "$HOST_COLOR" > "$HOME/.host_color"
fi
curl -fLo ~/.vim/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
mkdir -p "$HOME/.vim/backup"
if [ ! -d "$DEVOPS" ]; then
git clone https://github.com/darthdeus/devops.git "$DEVOPS"
fi
ln -nsf "$DEVOPS/vimrc" "$HOME/.vimrc"
ln -nsf "$DEVOPS/zshrc" "$HOME/.zshrc"
ln -nsf "$DEVOPS/tmux.conf" "$HOME/.tmux.conf"
| true
|
9a52d0e8f20764d3d0896439caf756a7cc864868
|
Shell
|
kbrock/dotfiles
|
/bashrc.d/perf.bash
|
UTF-8
| 246
| 2.59375
| 3
|
[] |
no_license
|
# determine the query count of an html file
# it is off by 12
function qcount() { sed -E "s/\"formatted_command/\\`echo -e '\n\r'`&/g" $1 |wc -l ; }
alias profile='beer mini_profiler --storage Redis --storage-options db=2 --collapse Rendering'
| true
|
423b134bd322212986336836ec05f0243e3402e4
|
Shell
|
Fluepke/luca-web-clone
|
/scripts/yarnAll.sh
|
UTF-8
| 188
| 2.515625
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
set -euo pipefail
SERVICES="backend contact-form health-department locations scanner webapp"
for SERVICE in ${SERVICES}
do
pushd "services/$SERVICE"
yarn "$@"
popd
done
| true
|
73f7e2ff2ba940bbdb17a7f45e1f474eb8ab47a3
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/python-pomegranate/PKGBUILD
|
UTF-8
| 1,074
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
pkgname=python-pomegranate
pkgver=0.6.1
pkgrel=1
pkgdesc="Pomegranate is a graphical models library for Python, implemented in Cython for speed."
arch=('i686' 'x86_64') # packages contains arch dependend libs
url="http://pypi.python.org/pypi/pomegranate/"
license=('MIT')
depends=('python' 'cython' 'python-networkx' 'python-joblib' 'python-numpy' 'python-scipy')
makedepends=('python-setuptools')
source=("https://pypi.python.org/packages/e4/e1/376a1f6eaa41172f120663dccf4d974db2db5c79216340a1bd009c5e7285/pomegranate-${pkgver}.tar.gz"
"LICENSE") # License copied from the GitHub repo <https://github.com/jmschrei/pomegranate>
sha256sums=('3a7f004daed0e00e4b4f75000311d55503a9cb4fb06bea2a8262ce1f68245ece'
'8c335bd762f024a24b8faf505d269a12916c7ff607e8fec2c40470bdec4a58fa')
build() {
cd ${pkgname/python-}-${pkgver}
python setup.py build
}
package() {
cd ${pkgname/python-}-${pkgver}
python setup.py install --root="${pkgdir}" --optimize=1 --skip-build
install -Dm644 ../LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
}
| true
|
ea456deff1dbba25f12ffb2c6dc16f34069f144d
|
Shell
|
heroku/hk
|
/contrib/hk-bash-completion.sh
|
UTF-8
| 453
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
_hk_commands()
{
hk help commands|cut -f 2 -d ' '
}
_hk()
{
cur=${COMP_WORDS[COMP_CWORD]}
prev=${COMP_WORDS[COMP_CWORD-1]}
if [ $COMP_CWORD -eq 1 ]; then
COMPREPLY=( $( compgen -W "$(_hk_commands)" $cur ) )
elif [ $COMP_CWORD -eq 2 ]; then
case "$prev" in
help)
COMPREPLY=( $( compgen -W "$(_hk_commands)" $cur ) )
;;
esac
fi
}
complete -F _hk -o default hk
| true
|
6625508e2bd5dacb01630b57b634d5c71dae983f
|
Shell
|
lzh9102/bash-countdown
|
/countdown.sh
|
UTF-8
| 7,030
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
# A simple countdown timer written in bash script
# Author: Timothy Lin <[email protected]>
# Date: 2011-07-21
# Messages
INTERRUPT_MSG="Count down stopped by user interrupt."
TIMEUP_MSG="Time is up."
# Constants
SEC_PER_MIN=60
SEC_PER_HOUR=`expr $SEC_PER_MIN \* 60`
SEC_PER_DAY=`expr $SEC_PER_HOUR \* 24`
SEC_PER_WEEK=`expr $SEC_PER_DAY \* 7`
PAT_WDHMS="^([0-9]+):([0-9]+):([0-9]+):([0-9]+):([0-9]+)$"
PAT_DHMS="^([0-9]+):([0-9]+):([0-9]+):([0-9]+)$"
PAT_HMS="^([0-9]+):([0-9]+):([0-9]+)$"
PAT_MS="^([0-9]+):([0-9]+)$"
PAT_S="^([0-9]+)$"
NOW=`date +%s`
####################################################################
function show_hint {
echo "Usage: $(basename $0) [-f] <duration|-d date> [-q] [-t title] [-m message] [-e command]"
echo "Examples:"
echo " $(basename $0) 30 # delay 30 seconds"
echo " $(basename $0) 1:20:30 # delay 1 hour 20 minutes and 30 seconds"
echo " $(basename $0) -d 23:30 # delay until 11:30 PM"
echo "Options:"
echo " -f Force execute. This option must be locate before <-d date>."
echo " -q Quiet. Don't print message on exit."
echo " -t title Show the title at the top of the screen."
echo " -m message Show the message at the bottom of the screen."
echo " -e command Execute command on timeup. The command will not be executed on cancel."
}
####################################################################
# function to get seconds from (weeks,days,hours,minutes,seconds)
# usage: print_seconds week days hours minutes seconds
function print_seconds {
if [ $# -ne 5 ]; then # check for error
echo "Error: function print_seconds takes 5 parameters"
exit 1
fi
# weeks, days
result=`expr $1 \* $SEC_PER_WEEK + $2 \* $SEC_PER_DAY`
# hours, minutes, seconds
result=`expr $result + $3 \* $SEC_PER_HOUR + $4 \* $SEC_PER_MIN + $5`
echo $result
}
####################################################################
# function to correct date by trying to add some time to it
# usage: correct_date_sec seconds
function correct_date_sec {
final=$1
if [ $final -gt 0 ]; then echo $final; return; fi
final=`expr $1 + $SEC_PER_DAY`
if [ $final -gt 0 ]; then echo $final; return; fi
final=`expr $1 + $SEC_PER_WEEK`
if [ $final -gt 0 ]; then echo $final; return; fi
echo "0"
}
####################################################################
# Parse command line options
sec_rem=0 # remaining seconds
param_prev="" # previous parameters
while [ $# -gt 0 ]; do
param=$1
shift
if [ "${param:0:1}" == "-" ]; then # skip options such as -d
if [ "$param" == "-f" ]; then # force execute
NO_CONFIRM=true
elif [ "$param" == "-q" ]; then # quiet, no output on exit
NO_OUTPUT=true
fi
param_prev=$param
continue
fi
case "$param_prev" in
-d) # assign a date
UNTIL=`date -d "$param" +%s`
if [ $? -ne 0 ]; then
exit 1
fi
sec_rem=`expr $UNTIL - $NOW`
if [ $sec_rem -lt 1 ]; then
sec_rem=`correct_date_sec $sec_rem`
if [ $sec_rem -lt 1 ]; then
echo "Error: The date $param is already history."
exit 1
fi
if [ -z "$NO_CONFIRM" ]; then # there's no "-f" option
# confirm for the correction
echo "Warning: The given date is assumed to be: `date -d now\ +$sec_rem\ sec`"
echo "Place an option -f before -d to suppress this warning"
read -n 1 -p "Still proceed [Y]/n?" ch
echo
if [ "$ch" == "n" ] || [ "$ch" == "N" ]; then
exit 1
fi
ch=""
fi
fi
;;
-t) # set title
TITLE="$param"
;;
-m) # set message
MESSAGE="$param"
;;
-e) # execute command on timeup
EXECUTE="$param"
;;
*) # assign a time
# identify the time format and calculate number of seconds by print_seconds
if [[ "$param" =~ $PAT_WDHMS ]]; then # W:D:H:M:S
sec_rem=`print_seconds ${BASH_REMATCH[1]} ${BASH_REMATCH[2]} ${BASH_REMATCH[3]} \
${BASH_REMATCH[4]} ${BASH_REMATCH[5]}`
elif [[ "$param" =~ $PAT_DHMS ]]; then # D:H:M:S
sec_rem=`print_seconds 0 ${BASH_REMATCH[1]} ${BASH_REMATCH[2]} ${BASH_REMATCH[3]} \
${BASH_REMATCH[4]}`
elif [[ "$param" =~ $PAT_HMS ]]; then # H:M:S
sec_rem=`print_seconds 0 0 ${BASH_REMATCH[1]} ${BASH_REMATCH[2]} ${BASH_REMATCH[3]}`
elif [[ "$param" =~ $PAT_MS ]]; then # M:S
sec_rem=`print_seconds 0 0 0 ${BASH_REMATCH[1]} ${BASH_REMATCH[2]}`
elif [[ "$param" =~ $PAT_S ]]; then # S
sec_rem=`print_seconds 0 0 0 0 ${BASH_REMATCH[1]}`
else
echo "Error: Incorrect time format: $param"
exit 1
fi
;;
esac
param_prev="" # clear the previous parameter
done
####################################################################
# check whether a correct time is assigned
if [ $sec_rem -eq 0 ]; then
show_hint
exit 1
fi
# calculate the date when time up
until_date=`expr $NOW + $sec_rem`
####################################################################
# cleanup function
# usage: cleanup_and_exit exitcode [message]
function cleanup_and_exit {
tput cnorm # restore cursor
stty echo # restore keyboard echo
clear
if [ -z $NO_OUTPUT ] && [ ! -z "$2" ]; then # print message
echo $2
fi
if [ $1 -eq 0 ] && [ ! -z "$EXECUTE" ]; then # execute command on timeup
eval $EXECUTE
fi
exit $1
}
trap 'cleanup_and_exit 1 "$INTERRUPT_MSG"' INT # set the cleanup function to be the Control+C handler
####################################################################
clear
tput civis # hide cursor
stty -echo # disable keyboard echo
# count down
while [ 0 -eq 0 ]; do
sec_rem=`expr $until_date - $(date +%s)` # calculate remaining seconds
if [ $sec_rem -lt 1 ]; then
break
fi
# Calculate the date of timeout once
if [ -z "$TIMEOUT_DATE" ]; then
TIMEOUT_DATE=`date -d "now +$sec_rem sec"`
fi
interval=$sec_rem
seconds=`expr $interval % 60`
interval=`expr $interval - $seconds`
minutes=`expr $interval % 3600 / 60`
interval=`expr $interval - $minutes`
hours=`expr $interval % 86400 / 3600`
interval=`expr $interval - $hours`
days=`expr $interval % 604800 / 86400`
interval=`expr $interval - $hours`
weeks=`expr $interval / 604800`
if [ ! -z "$TITLE" ]; then # print the title if it exists
echo "$TITLE"
fi
echo "Now: $(date)" # print date
echo "Until: $TIMEOUT_DATE" # print timeup
echo "------------------------------------ "
echo "Weeks: $weeks "
echo "Days: $days "
echo "Hours: $hours "
echo "Minutes: $minutes "
echo "Seconds: $seconds "
echo " "
if [ ! -z "$EXECUTE" ]; then
echo "Programs to execute on timeup:"
echo " $EXECUTE"
echo
fi
echo "Press [q] to stop counting "
echo " "
if [ ! -z "$MESSAGE" ]; then # print the message
echo "$MESSAGE"
fi
tput home # move cursor back to (0,0)
# wait for 0.9 second and monitor user input
read -n 1 -t 0.9 ch
if [ "$ch" == "q" ]; then
cleanup_and_exit 1 "$INTERRUPT_MSG"
fi
done
cleanup_and_exit 0 "$TIMEUP_MSG"
| true
|
a5a17ea194bf35ebfe4268e48d8df71d4a197f0e
|
Shell
|
amaurybsouza/public-bash-scripts
|
/tail-color-log.sh
|
UTF-8
| 954
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# @author Ryan McIntyre
# log like: /c/wamp/www/proto/portal/log/frontend_local.log
ME="$(basename "$(test -L "$0" && readlink "$0" || echo "$0")")"
MY_DIR=`dirname $0`
if [ -e $MY_DIR/unix-color-codes-not-escaped.sh ]; then
. $MY_DIR/unix-color-codes-not-escaped.sh
fi
if [ $# == 0 ]; then
echo -e "${On_Red}No log file given, usage: $ME <FILE_NAME>${Color_Off}"
exit 1
else
LOG_FILE=$1
echo "Showing tail of: $LOG_FILE"
fi
tail -f $LOG_FILE | awk '
/200 OK/ {print "\033[32m" $0 "\033[39m"; next}
/View "Success"/ {print "\033[33m" $0 "\033[39m"; next}
/sfPatternRouting/ {print "\033[35m" $0 "\033[39m"; next}
/Doctrine_Connection/ {print "\033[36m" $0 "\033[39m"; next}
/err/ {print "\033[31m" $0 "\033[39m"; next}
/severe/ {print "\033[31m" $0 "\033[39m"; next}
/debug/ {print "\033[34m" $0 "\033[39m"; next}
1 {print}
'
# sources:
## http://stackoverflow.com/questions/192292/bash-how-best-to-include-other-scripts
| true
|
52c2d44b97fe4f10d2d58952aacd7ae724d536fa
|
Shell
|
tulanthoar/prezto
|
/runcoms/funcs/man
|
UTF-8
| 133
| 2.984375
| 3
|
[] |
no_license
|
man() {
local width=$(tput cols)
[[ $width -gt $MANWIDTH ]] && width=$MANWIDTH
env LC_CTYPE=C MANWIDTH=$width man "$@"
}
| true
|
7367e10fec538e14c0faaccd85978f12e422bcb8
|
Shell
|
0xmzn/Hackerrank-Shell-Solutions
|
/ArraysInBash/Slice-an-Array.sh
|
UTF-8
| 77
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
arr=($(cat))
for i in {3..7}
do
echo -n "${arr[$i]} "
done
| true
|
f229b4f8a13c8602f5e43c8a32a949262e6dba47
|
Shell
|
RGM-OSC/nagios
|
/SOURCES/nagios-rgm/nagiosconf.sh
|
UTF-8
| 423
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/sh
# appliance group and use
APPLIANCEGRP="rgm"
# nagios paths
rgmdir="/srv/rgm"
linkdir="${rgmdir}/nagios"
chown -R nagios:${APPLIANCEGRP} ${linkdir}*
chmod 775 ${linkdir}/etc
chmod 775 ${linkdir}/etc/objects
chmod 775 ${linkdir}/var/log/spool/checkresults/
systemctl enable nagios.service &>/dev/null
systemctl start nagios.service &>/dev/null
/usr/sbin/usermod -g ${APPLIANCEGRP} -G apache nagios &>/dev/null
| true
|
ef41410af05df23d190f8794ba0a3116edf411b2
|
Shell
|
mlocati/docker-php-extension-installer
|
/scripts/tests/stomp
|
UTF-8
| 278
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
. "$(CDPATH= cd -- "$(dirname -- "$0")" && pwd)/_bootstrap.sh"
rc=0
if test -z "$(php --ri stomp | grep 'SSL Support => enabled')"; then
echo 'stomp has not been compiled with SSL support' >&2
rc=1
else
echo 'stomp has been compiled with SSL support'
fi
exit $rc
| true
|
acbd3c72b336538d1cea37113d83b1b790ee855a
|
Shell
|
svrist/itu
|
/adm/assignment1/countmyrun.sh
|
UTF-8
| 965
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
#ython run.py [options] #options:
#-h, --help : this help message
#-t, --threads= : number of swap threads (1..59)
#-s, --swaps= : total number of swaps (< 1000)
#-r, --runs= : number of repetitions (< 100)
#-i, --isol= : isolation level ('UR', 'CS', 'RS','RR')
#-o, --output= : path to output file (result.txt)
dd=`date "+%s"`
if [ -d output ];then
echo "Moving old output to output-$dd"
mv output output-$dd
fi
mkdir output
run=12
s=100
isols=( CS RR )
threads=( 1 2 5 10 25 50 )
swaps=( 400 400 400 400 400 400 )
count=0
for t in ${threads[@]}
do
echo "Threads $t (run:$run, swaps:${swaps[$count]})"
for i in ${isols[@]}
do
echo "Isolation $i (${swaps[$count]})"
python origrun.py -r$run --threads=$t --isol=$i --swaps=${swaps[$count]} -o output/sum-t$t$i.txt 2>>runlog.log > output/timing-t$t$i.txt
date >> runlog.log
sh clean.sh >> runlog.log
done
let count++
done
echo "Done. Exit"
| true
|
b4b977b582b360153a56c528bb2480b4c980b7ca
|
Shell
|
dalmirdasilva/glcd
|
/make.sh
|
UTF-8
| 992
| 2.921875
| 3
|
[] |
no_license
|
INCLUDES="-I/home/dalmir/Dropbox/Microcontrollers/picnix/ \
-I/home/dalmir/storage/github/pic/sdcc/driver/graphic/lcd/ \
-I/home/dalmir/storage/github/pic/sdcc/library/graphic/lcd/ \
-I/home/dalmir/storage/github/pic/sdcc/driver/graphic/glcd/ \
-I/home/dalmir/storage/github/pic/sdcc/library/graphic/glcd/ \
-I/home/dalmir/storage/github/pic/sdcc/include/util/step_report/ \
-I/home/dalmir/storage/github/pic/sdcc/include/ "
BUILD_DIR=build
TARGET=main
ARCH=-mpic16
CHIP=-p18f4550
LKR="-Wl,-s,/home/dalmir/storage/github/glcd/custom_linker.lkr"
echo "Making..."
if [ ! -d $BUILD_DIR ]; then
mkdir $BUILD_DIR
fi
rm $BUILD_DIR/*.c
rm $BUILD_DIR/*.h
cp *.c $BUILD_DIR
cp *.h $BUILD_DIR
cd $BUILD_DIR
echo "Entering in the build dir (${BUILD_DIR})"
echo -n "Compiling..."
sdcc --opt-code-size $LKR $TARGET.c $INCLUDES $ARCH $CHIP
echo "ok."
cp $TARGET.hex ..
cd ..
echo "Exiting the build dir (${BUILD_DIR})"
echo "done."
| true
|
281415bc1994f117cab5f3d7a4cdfbbf820b7755
|
Shell
|
suizman/dform
|
/push.sh
|
UTF-8
| 599
| 3.28125
| 3
|
[] |
no_license
|
# /bin/bash
# Exit script on command fail
set -e
# Auto export variables
set -a
# Exit if there are any parameter reference not passed as argument
set -u
# Tag as latest and push both versions
echo "Tag suizman/dform:`cat version | sed '/^\s*$/d' | tail -n1` as suizman/dform:latest"
docker tag suizman/dform:`cat version | sed '/^\s*$/d' | tail -n1` suizman/dform:latest
echo "Pushing push suizman/dform:latest"
docker push suizman/dform:latest && \
echo "Pushing suizman/dform:`cat version | sed '/^\s*$/d' | tail -n1`"
docker push suizman/dform:`cat version | sed '/^\s*$/d' | tail -n1`
| true
|
f6f4ee6832f6bd60173d773b2e575a767d3ec732
|
Shell
|
upcloudcom/dev-ops-node
|
/tools/transferFile.sh
|
UTF-8
| 1,100
| 3.578125
| 4
|
[] |
no_license
|
#!/usr/bin/expect
#Shell command to transfer file using scp on Linux
#Don't support windows yet for now
#Usage
# localFIle remoteAddr remoteFile user password isToRemote
set localFile [lindex $argv 0]
set remoteAddr [lindex $argv 1]
set remoteFile [lindex $argv 2]
set user [lindex $argv 3]
set password [lindex $argv 4]
set isToRemote [lindex $argv 5]
if {$isToRemote == "1"} {
send_user "Transfer file to remote machine\n"
set dirName [exec dirname $remoteFile]
# Disable StrictHostKeyChecking
exec echo -e "Host $remoteAddr\n\tStrictHostKeyChecking no\n" >> ~/.ssh/config
# Clean and recreate the directory
spawn ssh $user@$remoteAddr "rm -rf $dirName && mkdir -p $dirName"
set timeout 1800
expect {
"*assword:" {send "$password\r";exp_continue}
}
spawn scp $localFile $user@$remoteAddr:$remoteFile
} else {
exec echo -e "Host $remoteAddr\n\tStrictHostKeyChecking no\n" >> ~/.ssh/config
send_user "Transfer file from remote machine\n"
spawn scp $user@$remoteAddr:$remoteFile $localFile
}
set timeout 1800
expect {
"*assword:" {send "$password\r";exp_continue}
}
| true
|
8007b3302a6973430894996c64e3041d1f9d81b4
|
Shell
|
Mityai/contests
|
/inno/summer-shop-2017/05:07/kthstat/src/doall_old.sh
|
UTF-8
| 2,516
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
logfile=tests.comments
function die {
echo "$*"
exit 1
}
tn=1
function makeTest
{
fn=`printf "%02d" $tn`
echo $fn : $*
echo $fn : $* >> $logfile
$* > $fn || exit
let tn=$tn+1
}
function compile
{
echo compile $1
g++ -O2 -Wall $1.cpp -o $1.exe || exit
}
compile gen_rand
compile gen_rand_wide_range
compile gen_rand_only_find
echo "make tests"
echo . > $logfile
makeTest "./gen_rand.exe 239 10 10 1000000000"
makeTest "./gen_rand.exe 239 100 100 1000000000"
makeTest "./gen_rand.exe 239 1000 1000 1000000000"
makeTest "./gen_rand.exe 239 10000 1000 1000000000"
makeTest "./gen_rand.exe 239 100000 1000 1000000000"
makeTest "./gen_rand.exe 239 100000 1000 1"
makeTest "./gen_rand_wide_range.exe 23917 10 10 1000000000"
makeTest "./gen_rand_wide_range.exe 23917 100 100 1000000000"
makeTest "./gen_rand_wide_range.exe 23917 1000 1000 1000000000"
makeTest "./gen_rand_wide_range.exe 23917 10000 1000 1000000000"
makeTest "./gen_rand_wide_range.exe 23917 100000 1000 1000000000"
makeTest "./gen_rand_wide_range.exe 23917 100000 1000 1"
makeTest "./gen_rand_only_find.exe 239 10 10 1000000000"
makeTest "./gen_rand_only_find.exe 239 100 100 1000000000"
makeTest "./gen_rand_only_find.exe 239 1000 1000 1000000000"
makeTest "./gen_rand_only_find.exe 239 10000 10000 1000000000"
makeTest "./gen_rand_only_find.exe 239 100000 100000 1000000000"
makeTest "./gen_rand_only_find.exe 239 100000 100000 1"
makeTest "./gen_rand.exe 2391 10 10 1000000000"
makeTest "./gen_rand.exe 2391 100 100 1000000000"
makeTest "./gen_rand.exe 2391 1000 1000 1000000000"
makeTest "./gen_rand.exe 2391 10000 10000 1000000000"
makeTest "./gen_rand.exe 2391 100000 100000 1000000000"
makeTest "./gen_rand.exe 2391 100000 100000 1"
makeTest "./gen_rand_wide_range.exe 2392 10 10 1000000000"
makeTest "./gen_rand_wide_range.exe 2392 100 100 1000000000"
makeTest "./gen_rand_wide_range.exe 2392 1000 1000 1000000000"
makeTest "./gen_rand_wide_range.exe 2392 10000 10000 1000000000"
makeTest "./gen_rand_wide_range.exe 2392 100000 100000 1000000000"
makeTest "./gen_rand_wide_range.exe 2392 100000 100000 1"
echo "Compiling solution"
g++ -O2 -Wall -I "../graders" -o "sol.exe" "../graders/grader.cpp" "../solutions/kthstat_ra_n_sqrt_nlogn.cpp" || die "Unable to compile"
for f in ?? ; do
echo "Running on test $f"
./sol.exe < $f > $f.a || die "Unable to run"
done
echo "move tests"
testdir="../tests"
rm -f -r $testdir
mkdir $testdir || fail
mv ?? ??.a ../tests/ || exit 1
echo "clean"
rm gen gen.exe
| true
|
b747f137af8663d0d6e5f2a98bf7843a704ff84d
|
Shell
|
Circuitsoft/sensei_mesh
|
/reprog.sh
|
UTF-8
| 644
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh
CHANNEL=39
make || exit -1
function program {
if [ -c $1 ]; then
if [ "$3" == "clock_master" ]; then
ARGS="--no-sleeping"
fi
make install SERIAL_PORT=$1 && ./pyaci/configure_sensor.py --channel $CHANNEL $ARGS -d $1 $2
if [ "$3" == "clock_master" ]; then
./pyaci/set_time.py -d $1
fi
else
echo "Skipping $1 (not plugged in?)"
fi
}
program /dev/cu.usbserial-DN00D34P 1 &
program /dev/cu.usbserial-DN00CSZ7 2 &
program /dev/cu.usbserial-DO00C2G2 3 &
program /dev/cu.usbserial-FTZ86FTC 12 &
program /dev/cu.usbserial-AI04QL7P 30 clock_master &
program /dev/cu.usbserial-A105RB12 31 &
wait
| true
|
3524103b8f7a05af997ceaa808850642e9402a63
|
Shell
|
hamilton2br/pos-memreader
|
/dumper.sh
|
UTF-8
| 816
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
#lendo memoria do conteiner via nsenter
grep rw-p /proc/$1/maps | sed -n 's/^\([0-9a-f]*\)-\([0-9a-f]*\) .*$/\1 \2/p' | while read start stop; do gdb --batch --pid $1 -ex "x/4096wx 0x$start"; done > $1.dump
#lendo a memória do processo do contêiner
#grep rw-p /proc/$1/maps | sed -n 's/^\([0-9a-f]*\)-\([0-9a-f]*\) .*$/\1 \2/p' | while read start stop; do gdb --batch --pid $1 -ex "dump memory $1-$start-$stop.dump 0x$start 0x$stop"; done
#attempting, so far without success, to read the memory of a process which is running inside a contêiner
#grep rw-p /proc/$1/maps | sed -n 's/^\([0-9a-f]*\)-\([0-9a-f]*\) .*$/\1 \2/p' | while read start stop; do docker -H tcp://127.0.0.1:2375 exec -i furious_newton /usr/bin/gdb --batch --pid $1 -ex "dump memory $1-$start-$stop.dump 0x$start 0x$stop"; done
| true
|
29985017ada20fd550fe8915162a284031abbdfd
|
Shell
|
machine-intelligence/arbital-open-source
|
/scripts/create_db.sh
|
UTF-8
| 1,169
| 3.953125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Creates xelaie DB and tables on MySQL server at localhost.
source init.sh || exit
HOST=localhost
read -r -p "This script will DROP ALL DB DATA and rebuild the database at ${HOST}. Is this your intent? [y/N] " response
if [[ ! $response =~ ^([yY][eE][sS]|[yY])$ ]]; then
exit
fi
DB_NAME=$(cfg mysql.database)
DB_USER=$(cfg mysql.user)
ROOT_PW=$(cfg mysql.root.password)
USER_PW=$(cfg mysql.password)
echo "Creating DB ${DB_NAME}@${HOST}.."
mysql --host ${HOST} -u root -p"${ROOT_PW}" -e "DROP DATABASE IF EXISTS ${DB_NAME}; CREATE DATABASE IF NOT EXISTS ${DB_NAME} DEFAULT CHARACTER SET utf8mb4 DEFAULT COLLATE utf8mb4_general_ci; USE ${DB_NAME};"
echo "Creating user ${DB_USER}.."
# Note that "GRANT" also creates the user, if necessary (no point in using "CREATE USER"):
# http://justcheckingonall.wordpress.com/2011/07/31/create-user-if-not-exists/
mysql --host ${HOST} -u root -p"${ROOT_PW}" -e "GRANT ALL ON ${DB_NAME}.* TO '${DB_USER}'@'%' IDENTIFIED BY '${USER_PW}';"
SCHEMAS=schemas/*.sql
for f in $SCHEMAS; do
echo "Importing schema ${f}.."
cat ${f} | mysql --host ${HOST} -u ${DB_USER} -p${USER_PW} ${DB_NAME}
done
echo "All done."
| true
|
f4bca9ed896d5c013ab4d774ea18f11cb50507f2
|
Shell
|
tushar8871/Shell_Pprograms
|
/RepetationSelection/primeNumInRange.sh
|
UTF-8
| 291
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash -x
echo "Prime number between range"
read -p "Enter num1 " num1
read -p "Enter num2 " num2
for (( i=$num1; $i<$num2; i++ ))
do
count=0;
for (( j=1; $j<=$i; j++ ))
do
if [ $(($i%$j)) -eq 0 ]
then
((count++));
fi
done
if [ $count -eq 2 ]
then
echo $i
fi
done
| true
|
bbbd8957de7ee1c348b574ecdb62cf0cdf324647
|
Shell
|
5l1v3r1/pooldetective
|
/docker-coindaemons/fto/prepare.sh
|
UTF-8
| 349
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
FILE="$PWD/futurocoind"
if [ ! -f "$FILE" ]; then
git clone --depth=1 https://github.com/futuro-coin/Futuro-binaries
cd Futuro-binaries
tar -xvf futurocoincore-1.1.0-ubuntu18_04-x86_64.tar.xz
mv futurocoincore-1.1.0-ubuntu18_04-x86_64/futurocoin* ..
rm ../futurocoin-qt
cd ..
rm -rf Futuro-binaries
fi
| true
|
010797b03b2f1bf18feb8255f53182b9fad391d9
|
Shell
|
pdxgx/ri-tests
|
/data_preparation/processing/shortread/HX1/scripts/run_irfinders.sh
|
UTF-8
| 1,031
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env
# Author: Sean Maden
#
# Run IRFinder-S from a Docker image. Follow instructions in the GH repo
# to download the software and save its annotation files to the
# `annodpath` variable. With the docker image we use -r and -d flags
# to specify the local paths to the intron annotations references and the
# output directory where results files will be saved.
#
# Docker setup:
# 1. get the IRFinder-S image
# > sudo docker pull cloxd/irfinder:2.0
#
# 2. run the IRFinder-S image
# > sudo docker run cloxd/irfinder:2.0 --help
#
# manage paths
srrid=SRR2911306 # set run id
bamfpath=RI_benchmarking_BAMs/$srrid'.sorted.bam'
outdpath=RI_benchmarking_irfinders/
outrunpath=$outdpath$srrid/
annopath=RI_benchmarking_resources/gencode_v35_annotation_files/IRFinder_annotation
# navigate to main dir
cd /eternity/data/
# view docstrings
sudo docker run cloxd/irfinder:2.0 --help
# run irfinders using local annotation files
sudo docker run -w $PWD -v $PWD:$PWD cloxd/irfinder:2.0 -m BAM -r $refpath -d $outrunpath $bamfpath
| true
|
3976bcba0380c590846a0e84d73bd0389ab657d3
|
Shell
|
fisherevans/SmartShift
|
/server/bin/undeploy
|
UTF-8
| 463
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
project="$1"
if [ ! "$project" == "accounts" ] && [ ! "$project" == "business" ] ; then
echo "Please pass a project to build: accounts business"
exit 1
fi
running=`ps -ef | grep org.apache.catalina.startup.Bootstra[p] | wc -l`
if [ ! $running == "0" ] ; then
echo "Tomcat is currently running..."
tomcat stop
fi
echo "Undeploying WAR file: $project.war"
rm -f $STOM/webapps/$project.war
if [ ! $running == "0" ] ; then
tomcat start
fi
| true
|
7424583141962c6c90c6fb952a79132bf2d4e527
|
Shell
|
GewoonMaarten/Waterschade-project
|
/scripts/install.sh
|
UTF-8
| 1,927
| 3.984375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ $# -eq 0 ]; then
>&2 echo "No arguments supplied"
>&2 echo "Possible arguments are: kernel, boot, dependencies, dhcp"
exit
fi
if grep -q kernel <(echo $@); then
if [ ! -f /etc/modules ]; then
>2& printf "Couldn't Find kernal module file\nManualy add dwc2 to modules"
else
if grep -v -q "dwc2" /etc/modules; then
>2& printf "dwc2 already in kernel modules"
else
echo "dwc2" | tee -a /etc/modules
fi
if grep -v -q "g_ether" /etc/modules; then
>2& printf "g_ether already in kernel modules"
else
echo "g_ether" | tee -a /etc/modules
echo "installed kernel module"
fi
fi
fi
if grep -q boot <(echo $@); then
if [ ! -f /boot/config.txt ]; then
>&2 printf "Boot config file does not exist\nNot installing boot"
elif grep -v -q -P "(#\s*dtoverlay\s*=\s*\".*\"|dtoverlay\s*=\s*\"\s*\")" /boot/config.txt; then
>&2 printf "already dtoverlay in boot config\nManualy add \"dwc2\" to that list"
else
echo "dtoverlay=dwc2" | tee -a /boot/config.txt
echo "installed boot options"
fi
fi
if grep -q dependencies <(echo $@); then
apt install python3 python3-pip isc-dhcp-server
pip3 install flask
fi
if grep -q dhcp <(echo $@); then
if [ -f /etc/dhcp/dhcpd.conf ]; then
printf "subnet 10.50.40.0 netmask 255.255.255.0{\n\trange 10.50.40.10 10.50.40.20\n}" >> /etc/dhcp/dhcpd.conf
echo "Installed dhcp config"
else
>&2 printf "Couldn't find isc-dhcp-server configuration, are you sure it's installed?"
fi
if [ -f /etc/dhcpcd.conf ]; then
printf "interface usb0\nstatic ip_address=10.50.40.1" >> /etc/dhcpcd.conf
echo "Installed network interface config"
else
>&2 printf "Couldn't set static ip.\nPlease set ip of interface usb0 to '10.50.40.1'"
fi
fi
echo
| true
|
528007da5e892a62909ae97044e83b6700a1ec00
|
Shell
|
artujose/EPAM_EpicGamesTask
|
/Solution - 1a/copy_logs_remotely.sh
|
UTF-8
| 879
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
remote_host=$1
dest_dir=$2
user_name=$3
user_password=$4
log_date=$(date +"%Y_%m_%d")
log_folder="logs_folder_"$log_date
today=$(date +"%d")
#we could certainly use $APACHE_LOG_DIR var is this exist on ENV
apache_log_dir=/var/log/apache2
#make a local folder to copy all log files matching the condition
mkdir -p $apache_log_dir/$log_folder
#pick up all TODAY's log files from 18 to 21 hrs
for file in $apache_log_dir/*"$today"_18.log *"$today"_19.log *"$today"_20.log *"$today"_21.log
do
#check if the file exists, it could be the file was not created for a certain time
if [ -e "$file" ]
then
cp $file $apache_log_dir/$log_folder
fi
done
#see README.txt, for 'key-based authentication' approach instead of sshpass
sshpass -p "$user_password" scp -r $apache_log_dir/$log_folder $user_name@$remote_host:/$dest_dir
| true
|
1b771aa530bd4edd6fa25957c76208d2c73d3bde
|
Shell
|
somasis/s6-exherbo
|
/libexec/s6/init2.sh
|
UTF-8
| 189
| 2.5625
| 3
|
[
"ISC"
] |
permissive
|
#!/bin/sh
# Try state-normal, else fallback to state-basic
if s6-rc-db list bundles | grep -xFq state-normal;then
s6-rc -u change state-normal
else
s6-rc -u change state-basic
fi
| true
|
259bd2515c517abdbec58c2cdc4c6f031286aa0b
|
Shell
|
ltr01/dotfiles
|
/bash/.alias.sh
|
UTF-8
| 6,093
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
# useful dockers
alias phpserver='docker run --rm -p 2000:80 -v "$PWD":/var/www/html mastermindzh/php-xdebug'
alias nodeserver='docker run --rm -p 3000:3000 -v "$PWD":/app mastermindzh/generic_node'
alias reactserver='docker run --rm -p 8080:8080 -v "$PWD":/app mastermindzh/generic_node'
alias mongoserver='docker run -d --rm -p 27017:27017 --name mongo-server -e MONGO_INITDB_ROOT_USERNAME=admin -e MONGO_INITDB_ROOT_PASSWORD=123 -v ~/.db/mongo:/data/db mongo'
alias sqlserver='docker run -d --rm --name sql-server -e "ACCEPT_EULA=Y" -e "SA_PASSWORD=Your_Password123" -p 1433:1433 -v ~/.db/mssql:/var/opt/mssql microsoft/mssql-server-linux'
# useful docker commands
alias stop-dockers='docker stop $(docker ps -aq)'
alias docker-clean-containers='docker container prune -f --filter "until=48h"'
alias docker-clean-images='docker image prune -a -f --filter "until=48h"'
alias docker-clean-volumes='docker volume prune -f --filter "label!=keep"'
alias docker-clean-networks='docker network prune -f --filter "until=24h"'
alias docker-clean-all='stop-dockers && docker-clean-containers && docker-clean-images && docker-clean-volumes && docker-clean-networks'
# Kubernetes commands
alias mkubectl='microk8s.kubectl'
alias kubestart='microk8s.start'
alias kubestop='microk8s.stop'
#dotnet core
alias efupdate="dotnet ef database update"
alias efmigrate="dotnet ef migrations add"
alias efremove="dotnet ef migrations remove"
alias dotnetnew="dotnet new webapi -o "
# git
alias gitremovelocalbranches='git branch --merged | egrep -v "(^\*|master|dev)" | xargs git branch -d'
alias untangle-line-endings='find ./ -type f -exec dos2unix {} \;'
alias undo-commit='git reset --soft HEAD^'
## pacman and trizen
alias aur='trizen --noconfirm'
alias update='trizen -Syyu --noconfirm'
alias remove-orphans='sudo pacman -Rns $(pacman -Qtdq)'
alias updatekeys='sudo pacman-key --refresh-key'
alias clean-pacmancache='sudo paccache -rk 1 && sudo paccache -ruk0'
## systeminfo
alias meminfo='free -mth'
alias cpuinfo='lscpu'
alias hddinfo='df -h'
alias temp='watch "sensors | grep Core"'
alias internalip=$'ip route get 8.8.8.8 | awk \'NR==1 {print $NF}\''
alias preferredapps='exo-preferred-applications'
#show 5 most memory consuming apps
alias psmem='ps auxf | sort -nr -k 5 | head -n 5'
##utility
alias nmapscan='nmap -n -sP'
alias pia='nohup sh /opt/pia/run.sh &>/dev/null & disown'
alias wifimenu='nm-connection-editor'
alias findcrlf='find . -path node_modules -prune -o -not -type d -exec file "{}" ";" | grep -E "BOM|CRLF"'
alias fixcrlf='findcrlf > /tmp/crlftolf && cat /tmp/crlftolf | while read line; do CUTLINE=$(echo $line | cut -f1 -d":") && dos2unix $CUTLINE; done'
alias enable-wifi='sudo ip link set wlp2s0 up'
alias scan-wifi='sudo iw dev wlp2s0 scan'
alias pretty-json='python -m json.tool'
alias addpgpkey='gpg --recv-keys'
alias clean-trash='sudo rm -rf ~/.local/share/Trash/*'
alias clean-journal='sudo journalctl --vacuum-time=2d'
alias clean-all='clean-trash && clean-journal && clean-pacmancache && docker-clean-all'
alias dotnet-install='~/.dotnet-install.sh --install-dir /usr/share/dotnet/ -channel'
alias mountshares='sudo bash ~/dotfiles/bash/mounts.sh'
# cli tools
alias crypto='curl -s rate.sx?qF | head -n -2 | tail -n +10'
# show file content without comment lines
alias nocomment='grep -Ev '\''^(#|$)'\'''
# list files/dirs on separate lines
alias list='find ./ -maxdepth 1 -printf "%f\n"'
#show directories
alias dirs='ls -FlA | grep :*/'
#show executables
alias execx='ls -FlA | grep -v \*'
#ls -al
alias la='ls -al'
# show external ip
alias cmyip='curl -s http://ipecho.net/plain; echo'
## default command fixes :P
alias mkdir='mkdir -p'
alias wget='wget -c'
alias ls='ls -l --color=auto'
alias installed='sudo pacman -Qetq'
alias aurinstalled='sudo pacman -Qmq'
alias sudo='sudo '
alias markdown-toc='markdown-toc --bullets="-" -i'
alias tree='tree --dirsfirst'
alias handbrake='ghb'
# grub
alias update-grub='grub-mkconfig -o /boot/grub/grub.cfg'
## Functions
# function to cd up a couple of times
# USAGE: up 3 (goes up 3 directories)
up(){
DEEP=$1;
[ -z "${DEEP}" ] && { DEEP=1; };
for i in $(seq 1 ${DEEP}); do
cd ../;
done;
}
# function to extract ... well anything really
extract () {
if [ -f $1 ] ; then
case $1 in
*.tar.bz2) tar xvjf $1 ;;
*.tar.gz) tar xvzf $1 ;;
*.bz2) bunzip2 $1 ;;
*.rar) unrar x $1 ;;
*.gz) gunzip $1 ;;
*.tar) tar xvf $1 ;;
*.tbz2) tar xvjf $1 ;;
*.tgz) tar xvzf $1 ;;
*.zip) unzip $1 ;;
*.Z) uncompress $1 ;;
*.7z) 7z x $1 ;;
*) echo "don't know how to extract '$1'..." ;;
esac
else
echo "'$1' is not a valid file!"
fi
}
# function to return uptime in a human readable format
myuptime () {
uptime | awk '{ print "Uptime:", $3, $4, $5 }' | sed 's/,//g'
return;
}
# function to check whether a specific host is up
isup(){
if ! [ -z "$1" ]; then
ping -c 3 $1 > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "$1 seems to be offline";
else
echo "$1 seems to be online";
fi
fi
}
# function to print a line across the screen
printLine(){
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
}
# function to kill a port
killport () {
if [ -z "$1" ] ; then
echo "please specify a port to kill"
else
fuser -k $1/tcp
fi
}
# function to switch kubernetes namespace
kubeswitch () {
if [ -z "$1" ] ; then
echo "please specify a namespace to switch to"
else
kubectl config set-context --current --namespace=$1
fi
}
# function to switch to a different azure kubernetes cluster
azkubeswitch () {
if [ -z "$2" ] ; then
echo "please execute with the following params: azkubeswitch {resourcegroupname} {clustername}"
else
az aks get-credentials --resource-group $1 --name $2
fi
}
| true
|
4805c471bcf6ebe613cd68787f52416c96e47bf5
|
Shell
|
CliveCarrington/raspberryCamera
|
/bin/statusCamera.sh
|
UTF-8
| 315
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
cd ~/Code/raspberryCamera/bin
echo "Current Activity"
ps -ef | grep piCamera.py
tail ./log/images.log
ls -lrt ./images | tail -5
### Checking run status
status=`/bin/ps -ef | grep piCamera.py | wc -l`
if [ $status -eq "1" ]
then
echo "Need to start"
./startPiCamera.sh >./log/images.log &
fi
| true
|
5e5dc94f8188c86cef7b029c3fe3210ce6c7765a
|
Shell
|
datasets-at/mi-jenkins
|
/customize
|
UTF-8
| 1,019
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/bash
PATH=/opt/local/gnu/bin:/opt/local/bin:/opt/local/sbin:/usr/bin:/usr/sbin
JENKINS_VERSION='1.609.3'
# Exit if any commands fail
set -o errexit
# Ensure we have updated standard packages
echo "* Updating standard packages.";
pkg_delete -v nodejs smtools zoneinit
pkg_add -v nodejs smtools zoneinit
npm install jsontool -g
# Configuring image specific packages
echo "* Configuring image specific packages.";
# Create user for jenkins
echo "* create user for jenkins"
/usr/sbin/groupadd -g 10001 jenkins
/usr/sbin/useradd -u 10001 -g jenkins -d /home/jenkins -m -s /usr/bin/bash jenkins
# Download and enable jenkins
echo "* download and enable jenkins"
/usr/bin/wget -c -O /opt/jenkins.war http://mirrors.jenkins-ci.org/war-stable/${JENKINS_VERSION}/jenkins.war
svccfg import /root/jenkins.xml
# Create ssl folder for nginx
echo "* create ssl folder for nginx"
mkdir -p /opt/local/etc/nginx/ssl
# Clean up
echo "* Cleaning up."
rm -rf /root/*
# Prepare image for provisioning
sm-prepare-image -y
| true
|
32f56f69c5a9b5efc798a7e1a6e52398a73d32ac
|
Shell
|
thz/retain
|
/make-more.sh
|
UTF-8
| 156
| 2.8125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
name=retain
build() {
export GOOS=$1 ; shift
export GOARCH=$1 ; shift
go build -o $name-$GOOS-$GOARCH
}
build solaris amd64
build linux arm64
| true
|
8f6ac0ab3f8aa5beba25046df812f7bb073a558e
|
Shell
|
d-henness/aux
|
/BS_sub_auto.sh
|
UTF-8
| 5,292
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
kind='both' # can be equal to 'm' for minus sym, 'p' for plus sym, or 'both' for both sym
sym=''
nuc=''
exepath=/home/dhenness/prophet/exe
ver=dfratomgpu_binarysearch2.x
alljobs=false
bottom=false
wtime=01:00:00
while [ $# -gt 0 ]
do
val=$1
shift
case $val in
"-a")
alljobs=true
;;
"-b")
bottom=true
;;
"-gauss")
nuc='gauss_'
;;
"-point")
nuc='point_'
;;
"-S")
sym='S'
low=$1
high=$2
shift
shift
;;
"-P")
sym='P'
low=$1
high=$2
shift
shift
;;
"-D")
sym='D'
low=$1
high=$2
shift
shift
;;
"-F")
sym='F'
low=$1
high=$2
shift
shift
;;
"-t")
wtime=$1
shift
;;
"-k")
kind=$1
shift
;;
*)
job=$val
;;
esac
done
if [[ -z $nuc ]]; then
echo No nuc entered
exit
fi
if [ "$alljobs" = true ]; then
job=`ls *inp`
elif [ -z "$job" ]; then
echo no job given
exit
fi
for file in $job; do
dir=`echo $file | sed 's/_.*$//'`
mkdir $dir
cp $file $dir/$file-orig
OLDS=`echo $file | sed -e "s/^.*$nuc//" -e 's/s.*$//'`
OLDPm=`echo $file | sed -e "s/^.*$nuc.*s//" -e 's/p.*$//'`
OLDPp=`echo $file | sed -e "s/^.*$nuc.*p-//" -e 's/p.*$//'`
OLDDm=`echo $file | sed -e "s/^.*$nuc.*p+//" -e 's/d.*$//'`
OLDDp=`echo $file | sed -e "s/^.*$nuc.*d-//" -e 's/d.*$//'`
OLDFm=`echo $file | sed -e "s/^.*$nuc.*d+//" -e 's/f.*$//'`
OLDFp=`echo $file | sed -e "s/^.*$nuc.*f-//" -e 's/f.*$//'`
NEWS=$OLDS
NEWPm=$OLDPm
NEWPp=$OLDPp
NEWDm=$OLDDm
NEWDp=$OLDDp
NEWFm=$OLDFm
NEWFp=$OLDFp
oldbs=`grep 'nbs' $file | sed -e 's/^.*nbs=//' -e 's/.end//'`
if [ "$bottom" = true ]; then
oldbottom=`grep 'start' $file | sed -e 's/^.*start=//' -e 's/.end//'`
OLDSB=`echo $oldbottom | awk '{print $1}'`
OLDPBm=`echo $oldbottom | awk '{print $2}'`
OLDPBp=`echo $oldbottom | awk '{print $3}'`
OLDDBm=`echo $oldbottom | awk '{print $4}'`
OLDDBp=`echo $oldbottom | awk '{print $5}'`
OLDFBm=`echo $oldbottom | awk '{print $6}'`
OLDFBp=`echo $oldbottom | awk '{print $7}'`
NEWSB=$OLDSB
NEWPBm=$OLDPBm
NEWPBp=$OLDPBp
NEWDBm=$OLDDBm
NEWDBp=$OLDDBp
NEWFBm=$OLDFBm
NEWFBp=$OLDFBp
fi
echo Changing directory to $dir
cd $dir
rm *.inp
rm *.sh
i=$low
while [ $i -le $high ]; do
if [ $sym == 'S' ]; then
let NEWS=$OLDS-$i
elif [ $sym == 'P' ] && [ $OLDPp -ne 0 ]; then
if [ "$kind" = 'm' ] || [ "$kind" = 'both' ]; then
let NEWPm=$OLDPm-$i
fi
if [ "$kind" = 'p' ] || [ "$kind" = 'both' ]; then
let NEWPp=$OLDPp-$i
fi
elif [ $sym == 'D' ] && [ $OLDDp -ne 0 ]; then
if [ "$kind" = 'm' ] || [ "$kind" = 'both' ]; then
let NEWDm=$OLDDm-$i
fi
if [ "$kind" = 'p' ] || [ "$kind" = 'both' ]; then
let NEWDp=$OLDDp-$i
fi
elif [ $sym == 'F' ] && [ $OLDFp -ne 0 ]; then
if [ "$kind" = 'm' ] || [ "$kind" = 'both' ]; then
let NEWFm=$OLDFm-$i
fi
if [ "$kind" = 'p' ] || [ "$kind" = 'both' ]; then
let NEWFp=$OLDFp-$i
fi
fi
if [ $sym == 'S' ] && [ "$bottom" = true ]; then
let NEWSB=$OLDSB+$i
elif [ $sym == 'P' ] && [ "$bottom" = true ] && [ $OLDPp -ne 0 ]; then
if [ "$kind" = 'm' ] || [ "$kind" = 'both' ]; then
let NEWPBm=$OLDPBm+$i
fi
if [ "$kind" = 'p' ] || [ "$kind" = 'both' ]; then
let NEWPBp=$OLDPBp+$i
fi
elif [ $sym == 'D' ] && [ "$bottom" = true ] && [ $OLDDp -ne 0 ]; then
if [ "$kind" = 'm' ] || [ "$kind" = 'both' ]; then
let NEWDBm=$OLDDBm+$i
fi
if [ "$kind" = 'p' ] || [ "$kind" = 'both' ]; then
let NEWDBp=$OLDDBp+$i
fi
elif [ $sym == 'F' ] && [ "$bottom" = true ] && [ $OLDFp -ne 0 ]; then
if [ "$kind" = 'm' ] || [ "$kind" = 'both' ]; then
let NEWFBm=$OLDFBm+$i
fi
if [ "$kind" = 'p' ] || [ "$kind" = 'both' ]; then
let NEWFBp=$OLDFBp+$i
fi
fi
newfile=$dir'_'$nuc$NEWS's'$NEWPm'p-'$NEWPp'p+'$NEWDm'd-'$NEWDp'd+'$NEWFm'f-'$NEWFp'f+.inp'
if [ $OLDFp -ne 0 ]; then
newbs=$NEWS' '$NEWPm' '$NEWPp' '$NEWDm' '$NEWDp' '$NEWFm' '$NEWFp' '
elif [ $OLDDp -ne 0 ]; then
newbs=$NEWS' '$NEWPm' '$NEWPp' '$NEWDm' '$NEWDp' '
elif [ $OLDPp -ne 0 ]; then
newbs=$NEWS' '$NEWPm' '$NEWPp' '
else
newbs=$NEWS' '
fi
if [ "$bottom" = true ];then
if [ $OLDFp -ne 0 ]; then
newbottom=$NEWSB' '$NEWPBm' '$NEWPBp' '$NEWDBm' '$NEWDBp' '$NEWFBm' '$NEWFBp' '
elif [ $OLDDp -ne 0 ]; then
newbottom=$NEWSB' '$NEWPBm' '$NEWPBp' '$NEWDBm' '$NEWDBp' '
elif [ $OLDPp -ne 0 ]; then
newbottom=$NEWSB' '$NEWPBm' '$NEWPBp' '
else
newbottom=$NEWSB' '
fi
fi
cp $file-orig $newfile
sed -i -e "s/$oldbs/$newbs/" $newfile
if [ "$bottom" = true ];then
sed -i -e "s/$oldbottom/$newbottom/" $newfile
fi
make_gpu_job.sh -tt ${newfile%????} -pr $exepath/$ver \'$newfile\' -t $wtime
let i=$i+1
done
ls *_sub.sh > jobsub.sh
sed -i 's/^/sbatch /' jobsub.sh
chmod +x jobsub.sh
./jobsub.sh
cd ..
done
| true
|
00dedf2e8da3e7567193d3ad449d3722cc0fb472
|
Shell
|
nasa/GMSEC_API
|
/csharp/gmsec5/fixOther.sh
|
UTF-8
| 4,636
| 3.015625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Copyright 2007-2023 United States Government as represented by the
# Administrator of The National Aeronautics and Space Administration.
# No copyright is claimed in the United States under Title 17, U.S. Code.
# All Rights Reserved.
GENERATED_SRC="interfaces/BinaryField.cs"
OLD_TEXT_1="public SWIGTYPE_p_GmsecBinaryData GetValue"
NEW_TEXT_1="public byte\[\] GetValue"
OLD_TEXT_2="public BinaryField(string name, byte\[\] blob, int length, bool IsHeader) : this(GmsecPINVOKE.new_BinaryField__SWIG_0(name, blob, length, IsHeader), true) {"
NEW_TEXT_2="public BinaryField(string name, byte\[\] blob, bool IsHeader) : this(GmsecPINVOKE.new_BinaryField__SWIG_0(name, blob, blob.Length, IsHeader), true) {"
OLD_TEXT_3="public BinaryField(string name, byte\[\] blob, int length) : this(GmsecPINVOKE.new_BinaryField__SWIG_1(name, blob, length), true) {"
NEW_TEXT_3="public BinaryField(string name, byte\[\] blob) : this(GmsecPINVOKE.new_BinaryField__SWIG_1(name, blob, blob.Length), true) {"
if [[ "$GMSEC_PLATFORM" == "linux"* ]] || [[ "$GMSEC_PLATFORM" == "aarch64" ]]; then
echo "Patching $GENERATED_SRC for $GMSEC_PLATFORM..."
sed -i "s/$OLD_TEXT_1/$NEW_TEXT_1/g" $GENERATED_SRC
sed -i "s/$OLD_TEXT_2/$NEW_TEXT_2/g" $GENERATED_SRC
sed -i "s/$OLD_TEXT_3/$NEW_TEXT_3/g" $GENERATED_SRC
elif [[ "$GMSEC_PLATFORM" == "macosx"* ]]; then
echo "Patching $GENERATED_SRC for MacOS..."
sed -i '' "s/$OLD_TEXT_1/$NEW_TEXT_1/g" $GENERATED_SRC
sed -i '' "s/$OLD_TEXT_2/$NEW_TEXT_2/g" $GENERATED_SRC
sed -i '' "s/$OLD_TEXT_3/$NEW_TEXT_3/g" $GENERATED_SRC
else
echo "Warning: $GENERATED_SRC is not being patched!"
fi
GENERATED_SRC="interfaces/ConfigFile.cs"
OLD_TEXT_1="public void AddSubscriptionEntry(SWIGTYPE_p_gmsec__api5__ConfigFile__SubscriptionEntry entry) {"
NEW_TEXT_1="public void AddSubscriptionEntry(SubscriptionEntry entry) {"
OLD_TEXT_2="GmsecPINVOKE.ConfigFile_AddSubscriptionEntry(swigCPtr, SWIGTYPE_p_gmsec__api5__ConfigFile__SubscriptionEntry.getCPtr(entry));"
NEW_TEXT_2="GmsecPINVOKE.ConfigFile_AddSubscriptionEntry(swigCPtr, SubscriptionEntry.getCPtr(entry));"
OLD_TEXT_3="public SWIGTYPE_p_gmsec__api5__ConfigFile__SubscriptionEntry LookupSubscriptionEntry(string name) {"
NEW_TEXT_3="public SubscriptionEntry LookupSubscriptionEntry(string name) {"
OLD_TEXT_4="SWIGTYPE_p_gmsec__api5__ConfigFile__SubscriptionEntry ret = new SWIGTYPE_p_gmsec__api5__ConfigFile__SubscriptionEntry(GmsecPINVOKE.ConfigFile_LookupSubscriptionEntry(swigCPtr, name), false);"
NEW_TEXT_4="SubscriptionEntry ret = new SubscriptionEntry(GmsecPINVOKE.ConfigFile_LookupSubscriptionEntry(swigCPtr, name), false);"
if [[ "$GMSEC_PLATFORM" == "linux"* ]] || [[ "$GMSEC_PLATFORM" == "aarch64" ]]; then
sed -i "s/$OLD_TEXT_1/$NEW_TEXT_1/g" $GENERATED_SRC
sed -i "s/$OLD_TEXT_2/$NEW_TEXT_2/g" $GENERATED_SRC
sed -i "s/$OLD_TEXT_3/$NEW_TEXT_3/g" $GENERATED_SRC
sed -i "s/$OLD_TEXT_4/$NEW_TEXT_4/g" $GENERATED_SRC
elif [[ "$GMSEC_PLATFORM" == "macosx"* ]]; then
sed -i '' "s/$OLD_TEXT_1/$NEW_TEXT_1/g" $GENERATED_SRC
sed -i '' "s/$OLD_TEXT_2/$NEW_TEXT_2/g" $GENERATED_SRC
sed -i '' "s/$OLD_TEXT_3/$NEW_TEXT_3/g" $GENERATED_SRC
sed -i '' "s/$OLD_TEXT_4/$NEW_TEXT_4/g" $GENERATED_SRC
else
echo "$WRAPPED_SRC is not being patched!"
fi
GENERATED_SRC="interfaces/Message.cs"
OLD_TEXT_1="public bool AddField(string name, byte\[\] blob, int len) {"
NEW_TEXT_1="public bool AddField(string name, byte\[\] blob) {"
OLD_TEXT_2="bool ret = GmsecPINVOKE.Message_AddField__SWIG_1(swigCPtr, name, blob, len);"
NEW_TEXT_2="bool ret = GmsecPINVOKE.Message_AddField__SWIG_1(swigCPtr, name, blob, blob.Length);"
if [[ "$GMSEC_PLATFORM" == "linux"* ]] || [[ "$GMSEC_PLATFORM" == "aarch64" ]]; then
sed -i "s/$OLD_TEXT_1/$NEW_TEXT_1/g" $GENERATED_SRC
sed -i "s/$OLD_TEXT_2/$NEW_TEXT_2/g" $GENERATED_SRC
elif [[ "$GMSEC_PLATFORM" == "macosx"* ]]; then
sed -i '' "s/$OLD_TEXT_1/$NEW_TEXT_1/g" $GENERATED_SRC
sed -i '' "s/$OLD_TEXT_2/$NEW_TEXT_2/g" $GENERATED_SRC
else
echo "$WRAPPED_SRC is not being patched!"
fi
GENERATED_SRC="interfaces/GmsecPINVOKE.cs"
OLD_TEXT_1="\[global::System.Runtime.InteropServices.DllImport(\"Gmsec\""
NEW_TEXT_1="\[global::System.Runtime.InteropServices.DllImport(\"libgmsec_csharp\""
if [[ "$GMSEC_PLATFORM" == "linux"* ]] || [[ "$GMSEC_PLATFORM" == "aarch64" ]]; then
sed -i "s/$OLD_TEXT_1/$NEW_TEXT_1/g" $GENERATED_SRC
elif [[ "$GMSEC_PLATFORM" == "macosx"* ]]; then
sed -i '' "s/$OLD_TEXT_1/$NEW_TEXT_1/g" $GENERATED_SRC
else
echo "$WRAPPED_SRC is not being patched!"
fi
exit 0
| true
|
bdae27799c357eb56fe37259a2d9da448b45a433
|
Shell
|
bf-rmzi/sumologic-aws-lambda
|
/awsautoenableS3Logging/test/TestTemplate.sh
|
UTF-8
| 1,848
| 3.375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
export AWS_REGION="ap-south-1"
export AWS_PROFILE="personal"
# App to test
export AppName="tag"
export InstallTypes=("s3" "s3exiting" "vpc" "vpcexisting" "alb" "albexisting")
export BucketName="sumologiclambdahelper-${AWS_REGION}"
export FilterExpression=".*"
for InstallType in "${InstallTypes[@]}"
do
export BucketPrefix=${InstallType}"-LOGS/"
if [[ "${InstallType}" == "s3" ]]
then
export EnableLogging="S3"
export TaggingResourceOptions="New"
elif [[ "${InstallType}" == "s3exiting" ]]
then
export EnableLogging="S3"
export TaggingResourceOptions="Existing"
elif [[ "${InstallType}" == "vpc" ]]
then
export EnableLogging="VPC"
export TaggingResourceOptions="New"
elif [[ "${InstallType}" == "vpcexisting" ]]
then
export EnableLogging="VPC"
export TaggingResourceOptions="Existing"
elif [[ "${InstallType}" == "alb" ]]
then
export EnableLogging="ALB"
export TaggingResourceOptions="New"
elif [[ "${InstallType}" == "albexisting" ]]
then
export EnableLogging="ALB"
export TaggingResourceOptions="Existing"
export BucketPrefix=${InstallType}"-LOGS"
else
echo "No Valid Choice."
fi
# Stack Name
export stackName="${AppName}-${InstallType}"
aws cloudformation deploy --region ${AWS_REGION} --profile ${AWS_PROFILE} --template-file ././../auto_enable_s3_alb.template.yaml \
--capabilities CAPABILITY_IAM CAPABILITY_AUTO_EXPAND CAPABILITY_NAMED_IAM --stack-name "${AppName}-${InstallType}" \
--parameter-overrides EnableLogging="${EnableLogging}" TaggingResourceOptions="${TaggingResourceOptions}" \
FilterExpression="${FilterExpression}" BucketName="${BucketName}" BucketPrefix="${BucketPrefix}" &
export ExistingResource="No"
done
| true
|
b8d152e9798a7f5572f2b28c864a1d820c5e9109
|
Shell
|
laujonat/dotfiles
|
/scripts/install_go.sh
|
UTF-8
| 200
| 2.671875
| 3
|
[] |
no_license
|
VERSION=1.17
OS=linux
ARCH=amd64
cd $HOME
wget https://storage.googleapis.com/golang/go$VERSION.$OS-$ARCH.tar.gz
tar -xvf go$VERSION.$OS-$ARCH.tar.gz
mv go go-$VERSION
sudo mv go-$VERSION /usr/local
| true
|
87ebb79da0a8975f99346240f87d887d0e15e6a0
|
Shell
|
sabram/scripts
|
/findt
|
UTF-8
| 191
| 3.015625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#Find Text in files
echo "Searching for files containing the text '$1' in current dir and all sub-dirs"
find . -type f -not -iwholename '*.git*' -exec grep -il "$1" {} \;
| true
|
6330fd354cb9b8dddcc615f21ae2ef27d39e7325
|
Shell
|
wying3/cnf-features-deploy
|
/ztp/ztp-playbooks/common-roles/offline-mirror-olm/templates/upgrade-mirror.sh.j2
|
UTF-8
| 744
| 2.609375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Disconnected Operator Catalog Mirror and Minor Upgrade
# Variables to set, suit to your installation
export OCP_REGISTRY=quay.io/openshift-release-dev/ocp-release-nightly
export OCP_RELEASE={{ ocp_release_nightly_version }}
export LOCAL_REGISTRY={{ provisioner_cluster_registry_var }}
export LOCAL_REPOSITORY=ocp4
export AUTH_FILE="{{ pull_secret_path }}"
# generate new auth file with merged creds
jq -s '.[0] * .[1]' $AUTH_FILE $XDG_RUNTIME_DIR/containers/auth.json > ./merged_creds.json
oc adm -a ./merged_creds.json release new --insecure \
--from-release={{ sno_image }} \
--mirror=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY} \
--to-image=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY}:{{ sno_image.split(':')[1] }}
| true
|
d531e1f187d981902e282b4a712a7e1b78a70031
|
Shell
|
BenMusch/microblog
|
/bin/deploy
|
UTF-8
| 799
| 2.734375
| 3
|
[] |
no_license
|
#! /bin/bash
set -e
echo 'Setting env...'
export MIX_ENV=prod
echo 'Copying config from server...'
sshpass -p $PASSWORD scp -o StrictHostKeyChecking=no ben@$IP:~/www/microblog-config/prod.secret.exs ./config/prod.secret.exs
# create release
echo 'Compiling assets...'
cd assets
npm install
./node_modules/brunch/bin/brunch b -p
cd ..
yes | mix phx.digest
echo 'Creating release...'
yes | mix release --env=prod
echo 'Installing release on server...'
sshpass -p $PASSWORD scp -o StrictHostKeyChecking=no \
_build/prod/rel/microblog/releases/0.0.1/microblog.tar.gz ben@$IP:~/www/microblog
sshpass -p $PASSWORD ssh -o StrictHostKeyChecking=no ben@$IP \
'cd ~/www/microblog; tar xzvf microblog.tar.gz; export PORT=8000; ./bin/microblog migrate; ./bin/microblog restart'
echo 'Deploy complete!'
| true
|
e64dd2701d14465d601e7fef5696207641a339f0
|
Shell
|
wki/MyHomeBinaries
|
/bin/make_pod.sh
|
UTF-8
| 1,266
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Generate PDF files from .pod/.pm files
#
# my lazy script for generating all (at least for me) relevant pod documents
# converted to PDF in a tree that is parallel to the CPAN installation
# not perfect but works for me.
#
# pod2pdf is required, please install from CPAN first!
# http://search.cpan.org/~jonallen/pod2pdf-0.42
#
#
# some variables to allow customization
#
pod_dir=`dirname $(perldoc -l Catalyst)`
pdf_dir=~/Desktop/CPAN
mkdir -p $pdf_dir
for dir in $pod_dir/darwin-2level $pod_dir; do
cd $dir
for file in `find Mojo* SQL Moose* Catalyst* DateTime* DBIx HTML/Form* PSGI* Plack* Web* XML* -type f | grep -v DateTime/Locale/`
do
dir=`dirname $file`
pdf=${file/.*/.pdf}
# echo "file=$file, dir=$dir, pdf=$pdf"
if [[ ! -f "$pdf_dir/$pdf" || "$pdf_dir/$pdf" -ot "$file" ]] ; then
pod_nr_text_lines=`pod2text $file | wc -l`
if (( pod_nr_text_lines == 0 )); then
rm -f $pdf_dir/$pdf
else
echo "generating $file"
mkdir -p $pdf_dir/$dir
pod2pdf $file > $pdf_dir/$pdf
fi
else
echo "nothing to do for $file"
fi
done
done
| true
|
d725f110cf322d6b22df5a67a993a88e0979ecb9
|
Shell
|
mdyaseenahmed/USP_Lab
|
/Lab - 04/factorial.sh
|
UTF-8
| 137
| 3.328125
| 3
|
[] |
no_license
|
#!bin/bash
echo "Enter a Number : "
read n
fact=1
while [ $n -gt 1 ]
do
fact=$((fact * n))
n=$((n - 1))
done
echo "Factorial: $fact"
| true
|
e64425aa0b326dd045ff0e3be5cf862c168b1e52
|
Shell
|
Mithul/HackathonS
|
/cs.sh
|
UTF-8
| 342
| 3.015625
| 3
|
[] |
no_license
|
s=''
for i in $(cut -d: -f1 /etc/passwd)
do
s=$s' -e '$i
done
echo $s
while inotifywait -q -e modify /var/log/auth.log > /dev/null; do
echo "filename is changed"
op=`sudo tail -3 /var/log/auth.log | grep $s -o --color`
echo $op
s='?users='
for j in $op
do
s=$s$j','
done
echo $s
curl localhost:3000/userTrack$s
done
| true
|
c90c7ebceea5900fb35bae193491112f8bec55bb
|
Shell
|
Privet-mir/demo
|
/educhain_production/deploy_org/orderer.sh
|
UTF-8
| 1,277
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
echo -e "\e[34m Install Kafka chart \e[0m"
helm install incubator/kafka -n kafka-hlf --namespace orderer -f ../helm_values/kafka-hlf.yaml
echo -e "\e[34m Please be patient Kafka chart is getting install it migth take upto 10 min\e[0m"
# sleep 500
kubectl wait --for=condition=ready --timeout=800s -n orderer pod/kafka-hlf-0
kubectl wait --for=condition=ready --timeout=800s -n orderer pod/kafka-hlf-1
NUM=$1
for i in $(seq 1 $NUM)
do
echo -e "\e[34m Deploy Orderer$i \e[0m"
echo -e "\e[34m Deploy Orderer$i helm chart \e[0m"
helm install -n educhain${i} hyperledger-charts/orderer --namespace orderer -f ../helm_values/ord${i}.yaml
# sleep 30
ORD_POD=$(kubectl get pods --namespace orderer -l "app=orderer,release=educhain$i" -o jsonpath="{.items[0].metadata.name}")
kubectl wait --for=condition=ready --timeout=220s -n orderer pod/$ORD_POD
kubectl logs -n orderer $ORD_POD | grep 'Starting orderer'
done
helm install -n ordcli ../../educhain-ordcli --namespace orderer -f ../helm_values/ordcli.yaml
ORD_POD=$(kubectl get pods --namespace orderer -l "app=orderer,release=ordcli" -o jsonpath="{.items[0].metadata.name}")
kubectl wait --for=condition=ready --timeout=220s -n orderer pod/$ORD_POD
kubectl logs -n orderer $ORD_POD | grep 'Starting orderer'
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.