1596 lines
34 KiB
Bash
Executable File
1596 lines
34 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
|
|
# WARNING: functions do not test for permissions (mostly).
|
|
|
|
print_help () {
|
|
echo "Usage: ./broker.sh [<options>] <command> [<args>]"
|
|
echo ""
|
|
echo "Options: "
|
|
echo " -t, --testing Report operations without executing them."
|
|
echo " -e, --env Set the environment variables from data."
|
|
echo ""
|
|
echo "Commands: "
|
|
print_help_item\
|
|
"info <deal name>"\
|
|
"Collects all information for deal and prints it."
|
|
print_help_item\
|
|
"complete_local <deal name>"\
|
|
"Updates and installs e.g. gitlab"
|
|
print_help_item\
|
|
"stop_container <deal name>"\
|
|
"Stops a single container, e.g. 'gitlab'"
|
|
print_help_item\
|
|
"restart_all_containers [<root dir>]"\
|
|
"Restarts all containers or all containers under root dir."
|
|
print_help_item\
|
|
"stop_all_containers [<root dir>]"\
|
|
"Stops all containers or all containers under root dir."
|
|
print_help_item\
|
|
"setup_local"\
|
|
"Installs environment data and software requirements locally."
|
|
print_help_item\
|
|
"install_on_nodes <cluster> <deal>"\
|
|
"Installs fresh deal across nodes in range."
|
|
print_help_item\
|
|
"remote_node_setup <cluster>"\
|
|
"Remotely updates adamocomp on nodes."
|
|
}
|
|
|
|
print_help_item () {
|
|
printf ' %-69.69s \n\t\t %-61.61s \n\n' "$1" "$2"
|
|
}
|
|
|
|
# These positional arguments get run at the end of the script.
|
|
POSITIONAL=()
|
|
# These options get passed to brokers on remote systems.
|
|
PASS_OPTS=()
|
|
while [[ $# -gt 0 ]]; do
|
|
key="$1"
|
|
|
|
case $key in
|
|
-t|--testing)
|
|
TESTING_MODE="TRUE"
|
|
PASS_OPTS+=("-t")
|
|
shift # past option
|
|
;;
|
|
# Set environment variables in data when running commands.
|
|
-e|--env)
|
|
SET_ENV="TRUE"
|
|
PASS_OPTS+=("-e")
|
|
shift
|
|
;;
|
|
-h|--help)
|
|
print_help
|
|
exit 0
|
|
;;
|
|
-l|--lib)
|
|
LIBPATH="$2"
|
|
shift # past argument
|
|
shift # past value
|
|
;;
|
|
# Don't use.
|
|
-d|--debug)
|
|
DEBUG=TRUE
|
|
shift # past argument
|
|
;;
|
|
*) # unknown option
|
|
POSITIONAL+=("$1") # save it in an array for later
|
|
shift # past argument
|
|
;;
|
|
esac
|
|
done
|
|
|
|
######################################################################
|
|
# Directory and config structure
|
|
|
|
program_root="$(\
|
|
cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd\
|
|
)"
|
|
self=${program_root}/broker.sh
|
|
stage_root="${program_root}/staging"
|
|
target_root=${ADAMOCOMP_HOME:-"/"}
|
|
config_dir=${ADAMOCOMP_CONFDIR:-"${program_root}"}
|
|
|
|
if [[ "$TESTING_MODE" == "TRUE" ]]; then
|
|
mkdir -p "${program_root}/errors"
|
|
errfile="${program_root}/errors/`date '+%D.%H:%M:%S'|
|
|
sed 's|/|.|g'`"
|
|
else
|
|
errfile="/dev/null"
|
|
fi
|
|
|
|
|
|
# Non-public configurations; eventually should be off-loaded to Vault.
|
|
data_file="${config_dir}/data.json"
|
|
data_json="$(jq '.' ${data_file} 2>>$errfile)"
|
|
|
|
if [[ -r "${program_root}/data.json" ]]; then
|
|
data_file="${program_root}/data.json"
|
|
data_json="$(jq '.' ${data_file} 2>>$errfile)"
|
|
fi
|
|
|
|
######################################################################
|
|
# Data interface
|
|
|
|
datatool="${program_root}/data-tool.js -p ${data_file}"
|
|
editor="/usr/bin/nano"
|
|
|
|
if [[ $DEBUG == "TRUE" ]]; then
|
|
datatool="${datatool} -d"
|
|
fi
|
|
|
|
user_edit () {
|
|
$editor "$@"
|
|
}
|
|
|
|
get_filemode () {
|
|
stat -c '%a' $1
|
|
}
|
|
|
|
option_str_from_arr () {
|
|
opt_str=""
|
|
flag="--publish"
|
|
options=("443:443" "80:80" "22:22")
|
|
for option in ${options[@]}; do
|
|
opt_str="${opt_str} ${flag} ${option} "
|
|
done
|
|
}
|
|
|
|
jq_array_size () {
|
|
json=`cat`
|
|
size=$(echo $json|jq 'length' 2>>$errfile)
|
|
if [[ size == "" ]]; then
|
|
echo 0
|
|
return 1
|
|
fi
|
|
echo $size
|
|
return
|
|
}
|
|
|
|
json_to_bash_array () {
|
|
local json=`cat`
|
|
if jq -e . >/dev/null 2>&1 <<<"$json"; then
|
|
echo $json|jq -cr '.[]'
|
|
else
|
|
echo ""
|
|
fi
|
|
return
|
|
}
|
|
|
|
data_array_size () {
|
|
get_data $*|jq_array_size
|
|
}
|
|
|
|
data_array_seq_end () {
|
|
echo $(( $(data_array_size $*) - 1 ))
|
|
}
|
|
|
|
get_data_array () {
|
|
get_data "$*"|jq -cr '.[]'
|
|
}
|
|
|
|
get_data () {
|
|
${datatool} get "$*"|jq -r '.[0]'
|
|
}
|
|
|
|
get_keys () {
|
|
${datatool} get "$*"|jq -r '.[0]|keys'
|
|
}
|
|
|
|
get_key () {
|
|
get_keys "${@:2}"|jq -r ".[$1]"
|
|
}
|
|
|
|
count_keys () {
|
|
get_keys "$*"|jq_array_size
|
|
}
|
|
|
|
get_path () {
|
|
${datatool} path "$*"|jq -r '.[0]'
|
|
}
|
|
|
|
get_cluster_size () {
|
|
get_hosts "$*"|jq_array_size
|
|
}
|
|
|
|
get_hosts () {
|
|
${datatool} hosts "$*"|jq '.[0]'
|
|
}
|
|
|
|
get_hosts_array () {
|
|
get_hosts "$*"|json_to_bash_array
|
|
}
|
|
|
|
|
|
#for i in `seq 1 $cluster_size`; do
|
|
#local host_name=$(get_key $((i-1)) $cluster_name)
|
|
|
|
deal_exists () {
|
|
${datatool} valid_deal "$*"
|
|
}
|
|
|
|
cluster_exists () {
|
|
${datatool} valid_cluster "$*"
|
|
}
|
|
|
|
host_exists () {
|
|
${datatool} valid_host "$*"
|
|
}
|
|
|
|
get_remote_host () {
|
|
if host_exists $*; then
|
|
get_data $* remote host
|
|
fi
|
|
}
|
|
|
|
get_remote_ssh_port () {
|
|
if host_exists $*; then
|
|
get_data $* remote ports ssh
|
|
fi
|
|
}
|
|
|
|
get_remote_user () {
|
|
local user=$(get_data $* remote user)
|
|
if [[ -z $user ]]; then
|
|
user=$dealer
|
|
fi
|
|
echo $user
|
|
}
|
|
|
|
######################################################################
|
|
# Formatted printing and debug.
|
|
|
|
# Prints a table of variable names and values to stdout.
|
|
pr_vars () {
|
|
printf "%s \n" "----------------------------"
|
|
pr_vars_no_break "$@"
|
|
}
|
|
|
|
pr_vars_no_break () {
|
|
for k in "$@"; do
|
|
len=$(echo "${!k}"|wc -m)
|
|
if [[ len -gt 50 ]]; then
|
|
begin=$(( len - 51 ))
|
|
val=$(expr substr "${!k}" $begin $len )
|
|
else
|
|
val="${!k}"
|
|
fi
|
|
printf '%-15.15s : %.51s \n' "$k" "${val[*]}"
|
|
done
|
|
}
|
|
|
|
# Debug that prints relevant environment information.
|
|
print_env () {
|
|
pr_vars TESTING_MODE errfile
|
|
pr_vars program_root config_dir logger datatool
|
|
pr_vars stage_root target_root
|
|
pr_vars data_file
|
|
pr_vars dealer dealermail
|
|
pr_vars github_url github_owner github_token
|
|
}
|
|
|
|
# Takes a json array and prints it as a flat string.
|
|
strip_json_formatting () {
|
|
echo $@|
|
|
sed 's/[",\]//g'|
|
|
sed 's/\[//g'|
|
|
sed 's/\]//g'|
|
|
sed 's/{//g'|
|
|
sed 's/}//g'
|
|
}
|
|
|
|
format_container_name () {
|
|
if [[ $2 =~ [0-9] ]]; then
|
|
echo "${1}-${2}"
|
|
else
|
|
echo "${2}"
|
|
fi
|
|
return
|
|
}
|
|
|
|
concat_paths () {
|
|
if [[ "${2}" == "null" ]] || [[ -z "${2}" ]]; then
|
|
echo "${1}"
|
|
else
|
|
local result="${1}/${2}"
|
|
local roots=$(
|
|
awk -F"\\\.\\\." '{print NF-1}' <<< "${2}")
|
|
if [[ "$roots" -gt 0 ]]; then
|
|
result=$(dirname "$result")
|
|
for i in `seq ${roots}`; do
|
|
result=$(dirname "$result")
|
|
done
|
|
fi
|
|
echo "$result"|sed 's|/./\+|/|g'|sed 's|//\+|/|g'
|
|
fi
|
|
return
|
|
}
|
|
|
|
######################################################################
|
|
# Environment Setup
|
|
|
|
dealer='caes'
|
|
dealermail='caes@adamonet'
|
|
|
|
logtool="${syslog_dir}/bash-logger/syslog.sh"
|
|
|
|
export_env () {
|
|
local lines=()
|
|
readarray -t lines < <(get_data_array etc environment)
|
|
for line in "${lines[@]}"; do
|
|
#if [[ $TESTING_MODE == "TRUE" ]]; then
|
|
# echo export $line
|
|
#fi
|
|
export $line
|
|
done
|
|
}
|
|
|
|
install_environment_vars () {
|
|
:
|
|
# Should check /etc/environment and add lines
|
|
# only if they don't already exist.
|
|
# Need to write a function to do this for a file
|
|
# in the general case to proceed.
|
|
}
|
|
|
|
set_npm_proxy () {
|
|
# Not relying on this at the moment.
|
|
:
|
|
#npm config set proxy http://some.proxy:83
|
|
#npm config set https-proxy http://some.proxy:83
|
|
}
|
|
|
|
export_completions () {
|
|
:
|
|
}
|
|
|
|
update_data () {
|
|
git_update_self $1
|
|
}
|
|
|
|
install_logger () {
|
|
echo "Logger not currently installed".
|
|
# rm -rf\
|
|
# ADAMONET-bash-logger-*\
|
|
# bash-logger\
|
|
# bash-logger.tar\
|
|
# 2> /dev/null
|
|
# get_release bash-logger
|
|
# tar xf bash-logger.tar
|
|
# rm bash-logger.tar
|
|
# mv ADAMONET-bash-logger-* bash-logger
|
|
# return
|
|
}
|
|
|
|
syslog () {
|
|
${logtool} "[adamocomp] ""$@"
|
|
}
|
|
|
|
if [[ "$TESTING_MODE" == "TRUE" ]]; then
|
|
syslog_local="TRUE"
|
|
fi
|
|
|
|
apt_update () {
|
|
sudo apt update -y
|
|
}
|
|
|
|
install_prereqs () {
|
|
if [[ ! -x /usr/bin/jq ]]; then
|
|
sudo apt install jq
|
|
fi
|
|
if [[ ! -x /usr/bin/logger ]]; then
|
|
sudo apt install logger
|
|
fi
|
|
if [[ ! -x /usr/bin/curl ]]; then
|
|
sudo apt install curl
|
|
fi
|
|
if [[ ! -x /usr/bin/curl ]]; then
|
|
sudo apt install curl
|
|
fi
|
|
if [[ ! -x /usr/bin/node ]]; then
|
|
curl -sL https://deb.nodesource.com/setup_14.x | bash -
|
|
sudo apt install -y nodejs npm
|
|
fi
|
|
}
|
|
|
|
setup_env () {
|
|
owd=`pwd`
|
|
cd $program_root
|
|
#set_npm_proxy
|
|
update_data $1
|
|
install_logger
|
|
npm up
|
|
cd $owd
|
|
}
|
|
|
|
setup_local () {
|
|
setup_env $*
|
|
}
|
|
|
|
######################################################################
|
|
# Global constructions.
|
|
|
|
dofor_deal_info () {
|
|
local command=""
|
|
case "$*" in
|
|
"u"*) command="unset" ;;
|
|
"p"*) command="pr_vars" ;;
|
|
esac
|
|
if [[ $command != "" ]]; then
|
|
${command} deal_valid deal_node parent deal_name issue_path
|
|
${command} use_tools toolset missing_tools
|
|
${command} deal_repo deal_branch deal_tag use_npm
|
|
${command} docker_image docker_options
|
|
${command} tool_path tool_src
|
|
${command} stage_node close_node stage_path close_path
|
|
${command} tool_target tool_stage
|
|
fi
|
|
}
|
|
|
|
clear_deal_info () {
|
|
dofor_deal_info "unset"
|
|
}
|
|
|
|
pr_deal_info () {
|
|
dofor_deal_info "pr_vars"
|
|
}
|
|
|
|
# might as well run this at runtime
|
|
|
|
clear_deal_info
|
|
|
|
construct_deal_info () {
|
|
clear_deal_info
|
|
if deal_exists "$*"; then
|
|
deal_node=$(get_path $*)
|
|
parent=$(basename $(dirname $deal_node))
|
|
deal_name=$(basename $deal_node)
|
|
issue_path=$(get_data "$parent $deal_name issue_path")
|
|
|
|
toolset=$(get_data "$parent $deal_name toolset")
|
|
tool_path=$(get_data "$parent $deal_name tool_path")
|
|
tool_src="${program_root}/toolsets/${toolset}"
|
|
|
|
docker_image=$(get_data "$parent $deal_name docker image")
|
|
docker_options=$(get_data "$parent $deal_name docker options")
|
|
|
|
deal_repo=$(get_data "$parent $deal_name repo"|
|
|
sed 's/.git$//'|sed 's\^ADAMONET/\\'
|
|
)
|
|
deal_branch=$(get_data "$parent $deal_name branch")
|
|
deal_tag=$(get_data "$parent $deal_name tag")
|
|
use_npm=$(get_data "$parent $deal_name npm_setup")
|
|
|
|
|
|
stage_node=$(concat_paths $stage_root $deal_node)
|
|
close_node=$(concat_paths $target_root $deal_node)
|
|
stage_path=$(concat_paths $stage_node $issue_path)
|
|
close_path=$(concat_paths $close_node $issue_path)
|
|
tool_target=$(concat_paths $close_node $tool_path)
|
|
tool_stage=$(concat_paths $stage_node $tool_path)
|
|
|
|
if [[ $tool_path == "null" ]] || [[ -z $tool_path ]]; then
|
|
unset use_tools missing_tools
|
|
else
|
|
use_tools="TRUE"
|
|
if ls ${tool_target}/*.sh >> /dev/null 2> /dev/null;
|
|
then
|
|
unset missing_tools
|
|
else
|
|
missing_tools="TRUE"
|
|
fi
|
|
fi
|
|
deal_valid="TRUE"
|
|
else
|
|
return 2
|
|
fi
|
|
return 0
|
|
}
|
|
|
|
|
|
clear_host_list () {
|
|
unset cluster_name cluster_size
|
|
host_list=()
|
|
}
|
|
|
|
push_host () {
|
|
host=$1
|
|
host_list+=("$host")
|
|
}
|
|
|
|
pr_host_list () {
|
|
pr_vars cluster_name
|
|
for host in "${host_list[@]}"; do
|
|
local fqdn=$(get_remote_host $host)
|
|
local ssh_port=$(get_remote_ssh_port $host)
|
|
pr_vars host fqdn ssh_port
|
|
done
|
|
}
|
|
|
|
# This fills in fqdn_list using arguments.
|
|
construst_cluster_info () {
|
|
clear_host_list
|
|
|
|
cluster_name=$1;shift
|
|
|
|
if [[ ! $cluster_name =~ ^[a-zA-Z]+$ ]]; then
|
|
>&2 echo "Cluster argument needs to be string."
|
|
return 12
|
|
fi
|
|
|
|
if cluster_exists $cluster_name; then
|
|
cluster_size=$(get_cluster_size $cluster_name)
|
|
local hosts=()
|
|
readarray -t hosts < <(get_hosts_array $cluster_name)
|
|
if [[ ${#hosts[@]} -lt 2 ]] && [[ -z ${hosts[0]} ]]; then
|
|
push_host "$cluster_name"
|
|
else
|
|
for host in "${hosts[@]}"; do
|
|
push_host "$host"
|
|
done
|
|
fi
|
|
return 0
|
|
else
|
|
>&2 echo "Invalid cluster."
|
|
return 2
|
|
fi
|
|
}
|
|
|
|
######################################################################
|
|
# Informatics
|
|
|
|
info () {
|
|
construct_deal_info $*
|
|
pr_deal_info
|
|
return
|
|
}
|
|
|
|
cluster_info () {
|
|
construst_cluster_info $*
|
|
pr_host_list
|
|
}
|
|
|
|
######################################################################
|
|
# Downloading releases/clones from git repos.
|
|
|
|
github_url="$(echo $githubenv_json|jq -r '.base_url')"
|
|
github_owner="$(echo $githubenv_json|jq -r '.repo_owner')"
|
|
github_token="$(echo $githubenv_json|jq -r '.access_token')"
|
|
|
|
git_identify_global () {
|
|
git config --global user.name "${dealer}"
|
|
git config --global user.email "${dealermail}"
|
|
}
|
|
|
|
git_update_self () {
|
|
owd=`pwd`
|
|
cd ${program_root}
|
|
git_identify_global
|
|
git stash
|
|
git fetch
|
|
if [[ ! -z $1 ]]; then
|
|
git checkout $1
|
|
else
|
|
git checkout master
|
|
fi
|
|
git merge
|
|
cd ${owd}
|
|
}
|
|
|
|
# Prints json object of repo release.
|
|
get_release_data () {
|
|
local repo="${1}"
|
|
local release="${2}"
|
|
local owner_path="${github_url}/repos/${github_owner}"
|
|
local repo_path="${owner_path}/${repo}/releases/${release}"
|
|
curl -s \
|
|
-H "Authorization: token ${github_token}"\
|
|
"${repo_path}"
|
|
#|jq -r
|
|
}
|
|
|
|
# Retrieves latest release of deal.
|
|
# Usage: get_release <repo name> [<tag>]
|
|
# Downloads release tarball as "<repo name>.tar".
|
|
get_release () {
|
|
local repo="${1}"
|
|
local release="${2:-latest}"
|
|
local release_data=$(get_release_data "$repo" "$release")
|
|
local tarball_url=$(echo "$release_data"|jq -r '.tarball_url')
|
|
|
|
if [[ "$TESTING_MODE" == "TRUE" ]]; then
|
|
pr_vars repo release tarball_url release_data
|
|
else
|
|
curl -s \
|
|
-H "Authorization: token ${github_token}" \
|
|
-L -o "${1}.tar" $tarball_url
|
|
fi
|
|
}
|
|
|
|
# Retrieves clone of branch in repo.
|
|
# Usage: get_clone <repo name> <branch>
|
|
# Extracts clone to directory of same name, I guess.
|
|
# Needs to be changed to use token authentication.
|
|
get_clone () {
|
|
local repo="${1}"
|
|
local branch="${2:-master}"
|
|
if [[ "$TESTING_MODE" == "TRUE" ]]; then
|
|
pr_vars repo branch
|
|
return 0
|
|
else
|
|
github_root="${github_token}@gitlab.adamonet"
|
|
git clone \
|
|
--single-branch \
|
|
--branch $branch \
|
|
"https://${github_root}/${github_owner}/${repo}.git"
|
|
return
|
|
fi
|
|
}
|
|
|
|
######################################################################
|
|
# Container operations.
|
|
|
|
docker_options_string () {
|
|
num_opt=$(data_array_seq_end ${deal_name} docker options)
|
|
for i in `seq 0 $num_opt`; do
|
|
num_args=$(data_array_seq_end ${deal_name} docker options $i)
|
|
for j in `seq 1 $num_args`; do
|
|
printf "%s "\
|
|
"$(get_data ${deal_name} docker options $i 0)"
|
|
printf "%s "\
|
|
"$(get_data ${deal_name} docker options $i $j)"
|
|
done
|
|
done
|
|
return
|
|
}
|
|
|
|
build_container () {
|
|
if [[ ! -x $target_root ]]; then
|
|
>&2 echo "Cannot access root at ${target_root}."
|
|
return 2
|
|
fi
|
|
if deal_exists "$*"; then
|
|
local deal_node=`${datatool} path "$*"|jq -r '.[0]'`
|
|
local parent=$(basename $(dirname $deal_node))
|
|
local node=$(basename $deal_node)
|
|
local issue_path=$(get_data "$parent $node issue_path")
|
|
local name=$(format_container_name $parent $node|sed 's/-/:/')
|
|
local build_node="${target_root}/${deal_node}"
|
|
local build_dir=$(concat_paths $build_node $issue_path)
|
|
local repo=$(get_data "$parent $name repo"|jq -r '.[0]')
|
|
|
|
pr_vars name repo build_dir
|
|
|
|
if [[ "$TESTING_MODE" == "TRUE" ]]; then
|
|
pr_vars deal_node parent node issue_path \
|
|
name build_node build_dir repo
|
|
return 0
|
|
fi
|
|
|
|
if DOCKER_BUILDKIT=1 docker build \
|
|
--ssh id_rsa=~/.ssh/id_rsa --no-cache \
|
|
--tag ${name} ${build_dir}
|
|
then
|
|
syslog "Image $name rebuilt from $deal_repo."
|
|
else
|
|
syslog "Error rebuilding image $name."
|
|
fi
|
|
return
|
|
else
|
|
>&2 echo "Deal not found."
|
|
return 10
|
|
fi
|
|
}
|
|
|
|
# start_container <directory containing a dockerfile> [<script name>]
|
|
start_container () {
|
|
local container_dir=$1
|
|
local script_name=$2
|
|
|
|
if [ -z $script_name ]; then script_name="prod"; fi
|
|
|
|
container_parent_dir=$(
|
|
cd "$(dirname $container_dir)" >/dev/null && pwd \
|
|
)
|
|
|
|
local parent=$(basename "$container_parent_dir")
|
|
local node=$(basename "$container_dir")
|
|
local container_name="$parent-$node"
|
|
local container_logs_dir=$container_dir/logs
|
|
local run_name=$(format_container_name $parent $node)
|
|
local name=$(echo $run_name|sed 's/-/:/')
|
|
local port=10000
|
|
|
|
if [ ! -d "$container_logs_dir" ]; then
|
|
mkdir -p $container_logs_dir
|
|
fi
|
|
|
|
pr_vars name port
|
|
|
|
docker run -d \
|
|
-p "${publish_string}" \
|
|
--name "$run_name" \
|
|
--cidfile "${1}/${run_name}.cid" \
|
|
-v "$container_logs_dir:/workdir/logs/:rw" \
|
|
-e "ADAMO_HOST=$(hostname)" \
|
|
--dns=19.13.0.246 \
|
|
--dns=19.69.0.246 \
|
|
--dns-search=some.dns \
|
|
--dns-search=another.dns \
|
|
--restart=on-failure:10 \
|
|
--memory 100M \
|
|
--memory-reservation 20M \
|
|
"$name"
|
|
|
|
return
|
|
}
|
|
|
|
stop_container () {
|
|
# Use either a direct path to the CID file or a directory
|
|
# If directory, all CID files found under it will be stopped.
|
|
local cid_path=$1
|
|
|
|
#Array of CID files to stop
|
|
local cid_files=()
|
|
|
|
#printf "Stopping container(s) at path specified: $cid_path\n"
|
|
|
|
if [ -f "$cid_path" ] ; then # if file, add file
|
|
cid_files+=("$cid_path")
|
|
elif [ -d "$cid_path" ]; then # if dir, add all *.cid files
|
|
for f in $(find "$cid_path" -type f -name "*.cid"); do
|
|
#echo "Found CID file: $f"
|
|
cid_files+=($f)
|
|
done
|
|
fi
|
|
|
|
if [ ${#cid_files[@]} -gt 0 ]; then
|
|
for file in ${cid_files[*]} ; do
|
|
#printf "Stopping container"
|
|
#prinft " [$(basename $file)]...\n"
|
|
cid=`cat "$file"`
|
|
docker kill $cid
|
|
docker rm $cid
|
|
rm $file
|
|
done
|
|
else
|
|
printf "No CID files found (no containers stopped).\n"
|
|
fi
|
|
}
|
|
|
|
stop_all_containers() {
|
|
if [[ -z $1 ]]; then
|
|
stop_container "$program_root"
|
|
else
|
|
stop_container "$1"
|
|
fi
|
|
return
|
|
}
|
|
|
|
restart_all_containers () {
|
|
local container_root
|
|
if [[ -z $1 ]]; then
|
|
container_root=$(dirname "$0")
|
|
syslog "Restarting all containers."
|
|
else
|
|
container_root=$1
|
|
syslog "Restarting all containers under $container_root."
|
|
fi
|
|
for f in $( \
|
|
find $container_root -type f -name "restart_container.sh" \
|
|
); do
|
|
$f
|
|
done
|
|
return
|
|
}
|
|
|
|
######################################################################
|
|
# Local dealing
|
|
|
|
run_apt () {
|
|
if construct_deal_info $*; then
|
|
local lines=()
|
|
readarray -t lines < <(get_data_array $* apt)
|
|
for line in "${lines[@]}"; do
|
|
if [[ $TESTING_MODE == "TRUE" ]]; then
|
|
echo sudo apt $line
|
|
else
|
|
sudo apt $line
|
|
fi
|
|
done
|
|
fi
|
|
}
|
|
|
|
docker_run_from_data () {
|
|
dealkeypath="$*"
|
|
# env exported here since this is for bootstrapping
|
|
export_env
|
|
if construct_deal_info $dealkeypath; then
|
|
local docker_opts="$(docker_options_string $dealkeypath)"
|
|
pr_vars deal_name docker_image docker_opts
|
|
if [[ $TESTING_MODE == "TRUE" ]]; then
|
|
echo docker run -d $docker_opts \
|
|
--name $deal_name $docker_image
|
|
else
|
|
docker run -d $docker_opts \
|
|
--name $deal_name $docker_image
|
|
fi
|
|
fi
|
|
}
|
|
|
|
docker_start_from_data () {
|
|
dealkeypath="$*"
|
|
# env exported here since this is for bootstrapping
|
|
export_env
|
|
if construct_deal_info $dealkeypath; then
|
|
pr_vars deal_name docker_image
|
|
if [[ $TESTING_MODE == "TRUE" ]]; then
|
|
echo docker start $deal_name
|
|
else
|
|
docker start $deal_name
|
|
fi
|
|
fi
|
|
}
|
|
|
|
docker_rm_from_data () {
|
|
dealkeypath="$*"
|
|
# env exported here since this is for bootstrapping
|
|
export_env
|
|
if construct_deal_info $dealkeypath; then
|
|
pr_vars deal_name docker_image
|
|
if [[ $TESTING_MODE == "TRUE" ]]; then
|
|
echo docker stop $deal_name
|
|
echo docker rm $deal_name
|
|
else
|
|
docker stop $deal_name
|
|
docker rm $deal_name
|
|
fi
|
|
fi
|
|
}
|
|
|
|
prepare_issue_path () {
|
|
if $deal_valid; then
|
|
if [[ -x $issue_path && -w $issue_path ]]; then
|
|
return 0
|
|
else
|
|
if [[ "$TESTING_MODE" == "TRUE" ]]; then
|
|
perms=$(get_filemode $issue_path)
|
|
pr_vars issue_path perms
|
|
else
|
|
sudo mkdir -p $issue_path
|
|
sudo chmod u=wrx $issue_path
|
|
fi
|
|
fi
|
|
fi
|
|
}
|
|
|
|
clear_stage () {
|
|
if [[ $TESTING_MODE == "TRUE" ]]; then
|
|
pr_vars stage_root
|
|
return 0
|
|
fi
|
|
if [[ -x $stage_root ]]; then
|
|
rm -rf $stage_root
|
|
return 0
|
|
else
|
|
return 0
|
|
fi
|
|
}
|
|
|
|
install_stage () {
|
|
mkdir -p $target_root
|
|
if [[ ! -x $target_root ]] || [[ ! -w $target_root ]]; then
|
|
>&2 echo "Cannot access root at ${target_root}."
|
|
return 1
|
|
fi
|
|
|
|
if [[ -x $stage_root ]]; then
|
|
pr_vars stage_root target_root
|
|
#cp -R --preserve=mode $stage_root/* $target_root
|
|
rsync -a $stage_root/ $target_root
|
|
return 0
|
|
else
|
|
return 0
|
|
fi
|
|
}
|
|
|
|
apply_stage () {
|
|
install_stage
|
|
clear_stage
|
|
}
|
|
|
|
stage_release () {
|
|
if [[ $deal_valid ]]; then
|
|
if [[ -z $issue_path ]] || [[ -z $deal_repo ]]\
|
|
|| [[ -z $deal_branch ]] || [[ -z $deal_node ]]; then
|
|
>&2 echo "Error in data or API not found."
|
|
return 2
|
|
fi
|
|
|
|
pr_vars stage_path
|
|
if [[ "$TESTING_MODE" == "TRUE" ]]; then
|
|
pr_vars stage_node issue_path
|
|
if [[ "$deal_tag" == "null" ]]; then
|
|
if [[ "$deal_branch" == "null" ]]; then
|
|
echo "No valid release data."
|
|
else
|
|
echo ""
|
|
echo "I want to clone using: "
|
|
get_clone ${deal_repo}
|
|
fi
|
|
else
|
|
echo ""
|
|
echo "I want to release using: "
|
|
get_release ${deal_repo} ${deal_tag}
|
|
fi
|
|
return 0
|
|
fi
|
|
|
|
mkdir -p $stage_path
|
|
|
|
if [[ ! -e ${stage_path} ]]; then
|
|
>&2 echo "Problem accessing release path ${stage_path}."
|
|
return 5
|
|
fi
|
|
|
|
if [[ ! -w $stage_path ]]; then
|
|
>&2 echo "Cannot write to release path."
|
|
return 1
|
|
fi
|
|
|
|
# Perfect world: should go into container,
|
|
# npm update, npm test, if npm tests pass, then
|
|
|
|
if [[ "$deal_tag" == "null" ]]; then
|
|
if [[ "$deal_branch" == "null" ]]; then
|
|
>&2 echo "No valid update source in data."
|
|
return 8
|
|
else
|
|
>&2 echo "Deal by git clone currently disabled."
|
|
return 9
|
|
#get_clone ${deal_repo}
|
|
#mv "${deal_repo}/*" "${stage_path}/"
|
|
fi
|
|
else
|
|
get_release ${deal_repo} ${deal_tag}
|
|
rm -rf "${github_owner}-${deal_repo}-*"
|
|
tar xf "${deal_repo}.tar"
|
|
cp -R $(echo ${github_owner}-${deal_repo}-*/*)\
|
|
"${stage_path}/"
|
|
rm -rf "$(echo ${github_owner}-${deal_repo}-*)"
|
|
rm "${deal_repo}.tar"
|
|
fi
|
|
return 0
|
|
else
|
|
>&2 echo "Cannot stage invalid deal."
|
|
return 6
|
|
fi
|
|
}
|
|
|
|
stage_scripts () {
|
|
# Currently disabled and toolset must be deployed manually.
|
|
return 0
|
|
if [[ $deal_valid ]]; then
|
|
pr_vars tool_src tool_path
|
|
if [[ "$TESTING_MODE" == "TRUE" ]]; then
|
|
pr_vars tool_stage
|
|
else
|
|
if [[ ! -z $tool_stage ]]; then
|
|
mkdir -p $tool_stage
|
|
if [[ ! -w $tool_stage ]]; then
|
|
>&2 echo "Cannot write to tools path."
|
|
return 2
|
|
fi
|
|
fi
|
|
|
|
if [[ ! -x $tool_src ]]; then
|
|
>&2 echo "Cannot access toolset from $tool_src."
|
|
return 3
|
|
fi
|
|
|
|
if [[ ! -r $tool_src ]] ; then
|
|
>&2 echo "Cannot read toolset source dir."
|
|
return 4
|
|
fi
|
|
|
|
cp --preserve=mode \
|
|
$tool_src/*.sh $tool_stage
|
|
cp --preserve=mode \
|
|
${program_root}/broker.sh $tool_stage
|
|
fi
|
|
else
|
|
>&2 echo "Cannot stage tools for invalid deal."
|
|
return 5
|
|
fi
|
|
return
|
|
}
|
|
|
|
######################################################################
|
|
# Remote dealing
|
|
|
|
ssh_tty_to_host () {
|
|
local host=$1
|
|
shift
|
|
if host_exists $host; then
|
|
local fqdn=$(get_remote_host $host)
|
|
local port=$(get_remote_ssh_port $host)
|
|
local user=$(get_remote_user $host)
|
|
local cmd="$*"
|
|
pr_vars fqdn port user cmd
|
|
ssh -tt -o port=${port} ${user}@${fqdn} "$*"
|
|
fi
|
|
}
|
|
|
|
scp_contents_to_host () {
|
|
local host=$1
|
|
local source_dir=$2
|
|
local target_dir=$3
|
|
if host_exists $1; then
|
|
local fqdn=$(get_remote_host $host)
|
|
local port=$(get_remote_ssh_port $host)
|
|
local user=$(get_remote_user $host)
|
|
pr_vars host fqdn port source_dir target_dir
|
|
if [[ $TESTING_MODE == "TRUE" ]]; then
|
|
:
|
|
else
|
|
scp -r -o port=${port} $source_dir/* \
|
|
"${user}@${fqdn}:${target_dir}/"
|
|
fi
|
|
fi
|
|
}
|
|
|
|
scp_to_host () {
|
|
local host=$1
|
|
local source_file=$2
|
|
local target_dir=$3
|
|
if host_exists $1; then
|
|
local fqdn=$(get_remote_host $host)
|
|
local port=$(get_remote_ssh_port $host)
|
|
local user=$(get_remote_user $host)
|
|
pr_vars host fqdn port source_file target_dir
|
|
if [[ $TESTING_MODE == "TRUE" ]]; then
|
|
:
|
|
else
|
|
scp -o port=${port} $source_file\
|
|
"${user}@${fqdn}:${target_dir}/"
|
|
fi
|
|
fi
|
|
|
|
}
|
|
|
|
docker_ps () {
|
|
if construst_cluster_info $1; then
|
|
shift
|
|
if [[ -z $1 ]]; then
|
|
execute_on_nodes "docker ps"
|
|
else
|
|
execute_on_nodes "docker ps|grep $1"
|
|
fi
|
|
fi
|
|
}
|
|
|
|
download_file () {
|
|
local filename=$1
|
|
pr_vars filename
|
|
if [[ "$TESTING_MODE" == "TRUE" ]]; then
|
|
pr_vars ${fqdn_list[0]} filename
|
|
else
|
|
local user=$(get_remote_user $host)
|
|
pr_vars host
|
|
scp -o port="${ssh_port_list[0]}" \
|
|
"${user}@${fqdn_list[0]}:${filename}" ./
|
|
fi
|
|
return
|
|
}
|
|
|
|
distribute_files_in_dir () {
|
|
local source_dir=$1
|
|
local target_dir=$2
|
|
if [[ "$TESTING_MODE" == "TRUE" ]]; then
|
|
for host in ${host_list[@]}; do
|
|
pr_vars host source_dir target_dir
|
|
done
|
|
else
|
|
for host in "${host_list[@]}"; do
|
|
pr_vars host source_dir target_dir
|
|
ssh_tty_to_host "$host" "mkdir -p ${target_dir}"
|
|
scp_contents_to_host "$host" "$source_dir" "$target_dir"
|
|
ssh_tty_to_host "$host" "ls -lh ${target_dir}"
|
|
done
|
|
fi
|
|
return
|
|
}
|
|
|
|
distribute_file () {
|
|
local source_file=$1
|
|
local target_dir=$2
|
|
if [[ "$TESTING_MODE" == "TRUE" ]]; then
|
|
for host in ${host_list[@]}; do
|
|
pr_vars host source_file target_dir
|
|
done
|
|
else
|
|
for host in ${host_list[@]}; do
|
|
pr_vars host source_file target_dir
|
|
ssh_tty_to_host "$host" "mkdir -p ${target_dir}"
|
|
scp_to_host
|
|
done
|
|
fi
|
|
return
|
|
}
|
|
|
|
remote_broker () {
|
|
execute_on_nodes "cd /opt/adamocomp; ${rem_broker_cmd} $*"
|
|
}
|
|
|
|
execute_on_nodes () {
|
|
command=$1
|
|
for host in ${host_list[@]}; do
|
|
ssh_tty_to_host "$host" $command
|
|
done
|
|
}
|
|
|
|
docker_run_from_data_on_host () {
|
|
local host=$1
|
|
shift
|
|
local dealkeypath="$*"
|
|
# env exported here since this is for bootstrapping
|
|
export_env
|
|
if construct_deal_info $dealkeypath; then
|
|
local docker_opts="$(docker_options_string $dealkeypath)"
|
|
pr_vars deal_name docker_image docker_opts
|
|
if [[ $TESTING_MODE == "TRUE" ]]; then
|
|
ssh_tty_to_host $host "echo docker run -d $docker_opts \
|
|
--name $deal_name $docker_image"
|
|
else
|
|
ssh_tty_to_host $host "docker run -d $docker_opts \
|
|
--name $deal_name $docker_image"
|
|
fi
|
|
fi
|
|
}
|
|
|
|
docker_destroy_from_data_on_host () {
|
|
local host=$1
|
|
shift
|
|
local dealkeypath="$*"
|
|
# env exported here since this is for bootstrapping
|
|
export_env
|
|
if construct_deal_info $dealkeypath; then
|
|
pr_vars deal_name docker_image
|
|
if [[ $TESTING_MODE == "TRUE" ]]; then
|
|
ssh_tty_to_host $host "echo docker stop $deal_name;
|
|
echo docker rm $deal_name"
|
|
else
|
|
ssh_tty_to_host $host "docker stop $deal_name;
|
|
docker rm $deal_name"
|
|
fi
|
|
fi
|
|
}
|
|
|
|
######################################################################
|
|
# Local automation
|
|
|
|
local_node_setup () {
|
|
if [[ $TESTING_MODE == "true" ]]; then
|
|
pr_vars 1
|
|
else
|
|
setup_env $1
|
|
clear_stage
|
|
fi
|
|
}
|
|
|
|
# Selfishly performs chmod and chown operations on
|
|
# existing container structure to ensure proper permissions
|
|
# under the deploy user used during remote deployments.
|
|
local_node_repair () {
|
|
local user=`whoami`
|
|
|
|
executables=()
|
|
|
|
if [[ $TESTING_MODE == "true" ]]; then
|
|
pr_vars rootdirs userdirs executables
|
|
else
|
|
for dir in ${rootdirs[@]}; do
|
|
sudo chown -R root:root $dir
|
|
sudo chmod 755 $dir
|
|
ls -lhd $dir
|
|
ls -lh $dir
|
|
done
|
|
|
|
for file in ${locallinkedconfs[@]}; do
|
|
sudo rm $file
|
|
sudo ln -s ${program_root}/$(basename $file) $file
|
|
ls -lhd $file
|
|
done
|
|
|
|
for dir in ${userdirs[@]}; do
|
|
sudo chown -R ${user}:users $dir
|
|
ls -lhd $dir
|
|
ls -lh $dir
|
|
done
|
|
|
|
sudo chmod 755 ${executables[*]}
|
|
fi
|
|
}
|
|
|
|
install_local_scripts () {
|
|
if construct_deal_info $*; then
|
|
pr_vars tool_src tool_target
|
|
if [[ -x $tool_src ]] && [[ -r $tool_src ]]; then
|
|
mkdir -p $tool_target
|
|
if [[ -x $tool_target ]] && [[ -w $tool_target ]]
|
|
then
|
|
if [[ $TESTING_MODE == "TRUE" ]]; then
|
|
echo cp ${tool_src}/* $tool_target/
|
|
else
|
|
cp ${tool_src}/* $tool_target/
|
|
fi
|
|
else
|
|
>&2 echo "Cannot write to destination."
|
|
return 2
|
|
fi
|
|
fi
|
|
else
|
|
>&2 echo "Cannot access toolset."
|
|
return 3
|
|
fi
|
|
return
|
|
}
|
|
|
|
complete_local () {
|
|
if construct_deal_info $@;then
|
|
if [[ $TESTING_MODE == "TRUE" ]]; then
|
|
pr_vars parent deal_name repo close_path tool_target\
|
|
use_tools use_npm
|
|
else
|
|
pr_vars parent deal_name repo close_path tool_target\
|
|
use_tools use_npm
|
|
clear_stage
|
|
if [[ $use_tools ]]; then
|
|
${tool_target}/stop_container.sh
|
|
if [[ $missing_tools ]]; then
|
|
stage_scripts
|
|
fi
|
|
fi
|
|
stage_release
|
|
apply_stage
|
|
if [[ $use_npm ]]; then
|
|
owd=`pwd`
|
|
cd ${close_path}
|
|
npm update
|
|
cd $owd
|
|
fi
|
|
if [[ $use_tools ]]; then
|
|
${tool_target}/start_container.sh
|
|
fi
|
|
# Note this assumes latest since the latest
|
|
# tag is currently baked into the code.
|
|
report_version="Latest tagged release of ${deal_repo}"
|
|
syslog "${report_version} installed at '${close_path}'."
|
|
fi
|
|
else
|
|
>&2 echo "Invalid deal search string: $*."
|
|
return 2
|
|
fi
|
|
return
|
|
}
|
|
|
|
|
|
update_local () {
|
|
if construct_deal_info $@;then
|
|
if [[ $TESTING_MODE == "TRUE" ]]; then
|
|
pr_vars parent deal_name repo deal_node close_path use_npm
|
|
else
|
|
pr_vars parent deal_name repo deal_node close_path use_npm
|
|
clear_stage
|
|
stage_release
|
|
install_stage
|
|
if [[ $use_tools ]]; then
|
|
${tool_target}/stop_container.sh
|
|
fi
|
|
if [[ $use_npm ]]; then
|
|
owd=`pwd`
|
|
cd ${close_path}
|
|
npm update
|
|
cd $owd
|
|
fi
|
|
if [[ $use_tools ]]; then
|
|
${tool_target}/start_container.sh
|
|
fi
|
|
# Note this assumes latest since the latest
|
|
# tag is currently baked into the code.
|
|
report_version="Latest tagged release of ${deal_repo}"
|
|
syslog "${report_version} updated at '${close_path}'."
|
|
fi
|
|
else
|
|
>&2 echo "Invalid deal search string: $*."
|
|
return 2
|
|
fi
|
|
return
|
|
}
|
|
|
|
update_api_container () {
|
|
construct_deal_info $@
|
|
if [[ $deal_valid ]]; then
|
|
pr_vars parent deal_name repo close_path tool_target use_npm
|
|
if [[ "$TESTING_MODE" == "TRUE" ]]; then
|
|
pr_vars close_node close_path
|
|
return 0
|
|
else
|
|
#clear_stage
|
|
#stage_deployment "$*"
|
|
#install_stage
|
|
#clear_stage
|
|
#build_container "$*"
|
|
#${build_dir}/restart_container.sh
|
|
:
|
|
fi
|
|
else
|
|
>&2 echo "Invalid deal identifier."
|
|
fi
|
|
return
|
|
}
|
|
|
|
######################################################################
|
|
# Remote automation
|
|
# Recommended working ssh key
|
|
|
|
rem_broker_cmd="/opt/adamocomp/broker.sh"
|
|
qa_source="1"
|
|
prod_source="4"
|
|
|
|
for opt in ${PASS_OPTS[@]}; do
|
|
rem_broker_cmd="${rem_broker_cmd} ${opt}"
|
|
done
|
|
|
|
# Runs docker_run_from_data on remote hosts for some deal.
|
|
# This is essentially meant for bootstrapping.
|
|
remote_docker_run () {
|
|
if construst_cluster_info $1; then
|
|
shift
|
|
remote_broker "docker_run_from_data $*"
|
|
fi
|
|
}
|
|
|
|
remote_docker_start () {
|
|
if construst_cluster_info $1; then
|
|
shift
|
|
remote_broker "docker_start_from_data $*"
|
|
fi
|
|
}
|
|
|
|
remote_docker_replace () {
|
|
if construst_cluster_info $1; then
|
|
shift
|
|
remote_broker "docker_rm_from_data $*"
|
|
remote_broker "docker_run_from_data $*"
|
|
execute_on_nodes "docker ps"
|
|
fi
|
|
}
|
|
|
|
remote_node_bootstrap () {
|
|
remote_node_init "$@"
|
|
remote_copy_self "$@"
|
|
remote_prereq_install "$@"
|
|
remote_prereq_install "$@"
|
|
remote_node_setup "$@"
|
|
}
|
|
|
|
# Prepares nodes for dealing by installing software.
|
|
remote_node_init () {
|
|
construst_cluster_info $1
|
|
local user=$(get_remote_user $host)
|
|
execute_on_nodes \
|
|
"sudo mkdir -p $ADAMOCOMP_HOME;
|
|
sudo mkdir -p $ADAMOCOMP_CONFDIR;
|
|
sudo mkdir -p $GITLAB_HOME;
|
|
sudo chown -R ${user}:${user} $ADAMOCOMP_HOME;
|
|
sudo chown -R ${user}:${user} $ADAMOCOMP_CONFDIR;
|
|
sudo chown -R ${user}:${user} $GITLAB_HOME;
|
|
ls -lhd $ADAMOCOMP_HOME;
|
|
ls -lhd $ADAMOCOMP_CONFDIR;
|
|
ls -lhd $GITLAB_HOME;"
|
|
}
|
|
|
|
remote_copy_self () {
|
|
if construst_cluster_info $1; then
|
|
distribute_files_in_dir "${program_root}" "$ADAMOCOMP_HOME"
|
|
distribute_file "${data_file}" "$ADAMOCOMP_CONFDIR"
|
|
fi
|
|
}
|
|
|
|
remote_prereq_install () {
|
|
if construst_cluster_info $1; then
|
|
remote_broker apt_update
|
|
remote_broker install_prereqs
|
|
fi
|
|
}
|
|
|
|
remote_node_setup () {
|
|
construst_cluster_info $1
|
|
remote_broker "local_node_setup $2"
|
|
}
|
|
|
|
# Runs repair operations on each node.
|
|
remote_node_repair () {
|
|
construst_cluster_info $*
|
|
execute_on_nodes \
|
|
"${rem_broker_cmd} local_node_repair"
|
|
}
|
|
|
|
remote_sudo_edit () {
|
|
if construst_cluster_info $1; then
|
|
shift
|
|
execute_on_nodes "sudo $editor $*"
|
|
fi
|
|
}
|
|
|
|
remote_edit () {
|
|
if construst_cluster_info $1; then
|
|
shift
|
|
execute_on_nodes "$editor $*"
|
|
fi
|
|
}
|
|
|
|
cluster_run () {
|
|
local cluster=$1; shift
|
|
construst_cluster_info $cluster
|
|
execute_on_nodes \
|
|
"$*"
|
|
}
|
|
|
|
# Only one node number instead of seq range.
|
|
cluster_get () {
|
|
local cluster=$1;shift;
|
|
if construst_cluster_info $cluster; then
|
|
download_file $1
|
|
else
|
|
>&2 echo "Invalid node identification."
|
|
fi
|
|
}
|
|
|
|
#if construst_cluster_info $cluster $source_node $source_node; then
|
|
# download_file $file
|
|
|
|
# Sends file to nodes.
|
|
cluster_send () {
|
|
local cluster=$1;shift
|
|
if construst_cluster_info $cluster; then
|
|
distribute_file $1 $2
|
|
else
|
|
>&2 echo "Invalid node identification."
|
|
fi
|
|
}
|
|
|
|
# This should only be used if you know what you are doing,
|
|
# and prompts for a password at each node.
|
|
distribute_env_file () {
|
|
local cluster=$1; shift
|
|
|
|
if [[ "$TESTING_MODE" == "TRUE" ]]; then
|
|
pr_vars path
|
|
fi
|
|
|
|
local filename=$(basename $1)
|
|
|
|
construst_cluster_info $cluster
|
|
distribute_file "$1" "~"
|
|
execute_on_nodes "chmod 644 $filename;
|
|
sudo chown root:root $filename;
|
|
sudo mv $filename ${config_dir};
|
|
ls -lh ${config_dir};
|
|
"
|
|
}
|
|
|
|
cluster_edit () {
|
|
local cluster=$1; shift
|
|
if cluster_exists $cluster; then
|
|
local file=$1;shift
|
|
local source_file=$(basename $file)
|
|
local target_dir=$(dirname $file)
|
|
cluster_get $cluster $file
|
|
user_edit $source_file
|
|
read -p "Do you want to distribute this file? (y/n)"\
|
|
-n 1 -r
|
|
echo ""
|
|
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
|
cluster_send $cluster $source_file $target_dir
|
|
fi
|
|
rm $source_file
|
|
fi
|
|
}
|
|
|
|
# Used to install toolset before a new deal is installed across
|
|
# the cluster.
|
|
install_scripts_on_nodes () {
|
|
local cluster=$1; shift
|
|
if construct_deal_info $*; then
|
|
pr_vars tool_src tool_target
|
|
if [[ -x $tool_src ]] && [[ -r $tool_src ]]; then
|
|
if construst_cluster_info $cluster; then
|
|
distribute_files_in_dir $tool_src $tool_target
|
|
else
|
|
>&2 echo "Invalid node identification."
|
|
return 2
|
|
fi
|
|
fi
|
|
else
|
|
>&2 echo "Cannot access toolset."
|
|
return 3
|
|
fi
|
|
return
|
|
}
|
|
|
|
# Update a deal across all nodes between
|
|
# and incl. numbers.
|
|
# ./broker.sh <start num> <end num>
|
|
# DOES NOT VALIDATE YET.
|
|
install_on_nodes () {
|
|
local loc=$1;shift;local frontnode=$1;shift;local endnode=$1;shift
|
|
|
|
uses_dockerfile=$(
|
|
${datatool} get "$* use_dockerfile"|jq -r '.[0]')
|
|
|
|
if [[ "$TESTING_MODE" == "TRUE" ]]; then
|
|
pr_vars uses_dockerfile master
|
|
fi
|
|
|
|
# Gating against dockerfile usage due to a bug.
|
|
unset uses_dockerfile
|
|
|
|
construst_cluster_info $loc $frontnode $endnode
|
|
if [[ "$uses_dockerfile" == "true" ]]; then
|
|
execute_on_nodes \
|
|
"${rem_broker_cmd} update_api_container $*"
|
|
else
|
|
execute_on_nodes \
|
|
"${rem_broker_cmd} complete_local $*"
|
|
fi
|
|
}
|
|
|
|
# Runs an npm update on a container and restarts it.
|
|
update_on_nodes () {
|
|
local loc=$1;shift;local frontnode=$1;shift;local endnode=$1;shift
|
|
construst_cluster_info $loc $frontnode $endnode
|
|
execute_on_nodes \
|
|
"${rem_broker_cmd} update_local $*"
|
|
}
|
|
|
|
# Restart a container on a series of nodes.
|
|
# ./broker.sh <start num> <end num>
|
|
# DOES NOT VALIDATE YET.
|
|
restart_on_nodes () {
|
|
local loc=$1;shift;local frontnode=$1;shift;local endnode=$1;shift
|
|
|
|
path=$(${datatool} path "$*"|jq -r '.[0]')
|
|
|
|
if [[ "$TESTING_MODE" == "TRUE" ]]; then
|
|
pr_vars path
|
|
fi
|
|
|
|
construst_cluster_info $loc $frontnode $endnode
|
|
execute_on_nodes "if \
|
|
[[ -x \${ADAMOCOMP_HOME}/${path}/restart_container.sh ]]; then
|
|
\${ADAMOCOMP_HOME}/${path}/restart_container.sh;
|
|
fi
|
|
"
|
|
}
|
|
|
|
# This part checks that the data.json file is
|
|
# accessible, and finally executes the command given at the
|
|
# shell prompt.
|
|
if [[ ! -r "$data_file" ]]; then
|
|
if [[ ! -e "$data_file" ]]; then
|
|
update_data
|
|
fi
|
|
if [[ ! -r "$data_file" ]]; then
|
|
>&2 echo "Could not locate or update data.json."
|
|
fi
|
|
fi
|
|
|
|
# Always export environment variables from data, for now.
|
|
export_env
|
|
|
|
# Run command if one exists.
|
|
if [[ ! -z ${POSITIONAL[0]} ]]; then
|
|
${POSITIONAL[0]} ${POSITIONAL[@]:1}
|
|
fi |