├── .gitignore ├── BaRe ├── bkup_cron.sh ├── handleBackup.sh ├── handleRestore.sh ├── misc │ └── 50-server.cnf ├── rSYNC.sh ├── ros.sh ├── trimLog.sh └── utils.sh ├── LICENSE ├── README.md ├── config.sample ├── docs ├── ExecutionLog.txt ├── ExecutionLog_1.png ├── ExecutionLog_2.png ├── ExecutionLog_3.png ├── ExecutionLog_4.png ├── ExecutionLog_5.png └── ExecutionLog_6.png ├── envars.example.sh ├── makeAskPassEmitter.sh ├── makeEnvarsFile.sh ├── makeMariaDBRestartScript.sh ├── master ├── makeMasterMariaDBScript.sh ├── makeMasterMariaDBconfPatch.sh ├── makeMasterTasks.sh └── prepareMaster.sh ├── prepareMasterAndSlave.sh ├── ros.sh └── slave ├── makeSlaveMariaDBScript.sh ├── makeSlaveMariaDBconfPatch.sh ├── makeSlaveTasks.sh └── prepareSlave.sh /.gitignore: -------------------------------------------------------------------------------- 1 | envars.sh 2 | DemoNote.txt 3 | -------------------------------------------------------------------------------- /BaRe/bkup_cron.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | export SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | 5 | source ${SCRIPT_DIR}/envars.sh; 6 | 7 | declare IS_CRON=""; 8 | if [[ ! -z ${1} ]]; then IS_CRON="${1}"; fi; 9 | 10 | debug() { 11 | if [ "${IS_CRON}" == "RunFromCron" ]; then return; fi; 12 | echo -e "${1}"; 13 | } 14 | 15 | 16 | START_DATE="2022-01-01 01:01:30"; 17 | # START_DATE="2020-01-01 01:01:30"; 18 | toDate=$(date -d "${START_DATE}" +%s) 19 | 20 | END_DATE=$(date); 21 | endDate=$(date -d "${END_DATE}" +%s) 22 | debug "endDate = ${endDate}"; 23 | # echo -e "endDate = ${endDate}"; 24 | 25 | declare bkupsDirectory="${TARGET_BENCH}/BKP"; 26 | declare bkupsLogFile="${bkupsDirectory}/NotesForBackups.txt"; 27 | declare debugMode=True; 28 | 29 | # writeToLog() { 30 | # declare report=$(date --date="@${1}" "+%Y%m%d_%H%M%S-${ERPNEXT_SITE_URL}.tgz # %I %p %Z, %A %B %d, %Y") 31 | # debug "${report}" >> ${bkupsLogFile}; 32 | # } 33 | 34 | # writeTgz() { 35 | # local theTimeStamp=${1} 36 | # local tgz=$(date --date="@${theTimeStamp}" "+%Y%m%d_%H%M%S-${ERPNEXT_SITE_URL}.tgz") 37 | # local dtsttmp=$(date --date="@${theTimeStamp}"); 38 | # # debug "dtsttmp = ${dtsttmp}"; 39 | # touch -d "${dtsttmp}" ${bkupsDirectory}/${tgz} 40 | # } 41 | 42 | declare firstHourOfDay=8; 43 | declare lastHourOfDay=21; 44 | declare lastDayOfWeek=5; 45 | 46 | declare Comment=""; 47 | declare cntr=0 48 | declare delta=2 49 | declare incr="hour" 50 | 51 | declare theTimeStamp=""; 52 | declare Hour=""; 53 | declare DoW=""; 54 | declare Comment=""; 55 | processEvent() { 56 | local theTimeStamp=${1}; 57 | # debug "${theTimeStamp}"; 58 | Hour=$(date --date="@${theTimeStamp}" "+%H"); 59 | DoW=$(date --date="@${theTimeStamp}" "+%u"); 60 | 61 | when=$(date --date="@${theTimeStamp}" "+%Y-%m-%d %R %Z"); 62 | if (( 10#${Hour} < ${firstHourOfDay})); then 63 | debug "Skipped cron job. Too early : ${when}"; 64 | elif (( 10#${Hour} > ${lastHourOfDay})); then 65 | debug "Skipped cron job. Too late : ${when}"; 66 | elif (( 10#${DoW} > ${lastDayOfWeek})); then 67 | debug "Skipped cron job. Weekend : ${when}"; 68 | else 69 | # local Comment=$(date --date="@${1}" "+%Y%m%d_%H%M%S-${ERPNEXT_SITE_URL}.tgz # %I %p %Z, %A %B %d, %Y") 70 | local Comment=$(date --date="@${1}" "+# %I %p %Z, %A %B %d, %Y") 71 | debug "Doing cron job.\nLog record: ${Comment}"; 72 | ${SCRIPT_DIR}/handleBackup.sh "${Comment}" &>/dev/null; 73 | # ${SCRIPT_DIR}/handleBackup.sh "${Comment}"; 74 | # writeToLog "${theTimeStamp}"; 75 | # writeTgz "${theTimeStamp}"; 76 | fi; 77 | } 78 | 79 | purgeLog() { 80 | # cat ${bkupsLogFile}; 81 | > ${bkupsLogFile}; 82 | } 83 | 84 | createTestRecords() { 85 | local START_DATE=${1}; 86 | local endDate=$(date -d "${2}" "+%s"); 87 | pushd ${SCRIPT_DIR} >/dev/null; 88 | purgeLog; 89 | # debug "${START_DATE} ${cntr} ${incr}"; 90 | local toDate=$(date -d "${START_DATE} ${cntr} ${incr}" "+%s") 91 | # debug "toDate = ${toDate}"; 92 | # debug "endDate = ${endDate}"; 93 | until [ ${toDate} -ge ${endDate} ]; do 94 | toDate=$(date -d "${START_DATE} ${cntr} ${incr}" +%s) 95 | processEvent ${toDate}; 96 | cntr=$((${delta}+${cntr})) 97 | done 98 | popd >/dev/null; 99 | } 100 | 101 | 102 | if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then 103 | debug "Cron job started......"; 104 | 105 | TZ="America/Guayaquil" 106 | # Get the current time in the zone 107 | zone=$(TZ=${TZ} date "+%Y%m%d %R") 108 | zone_time=$(date -d "${zone}" "+%s") 109 | debug "Zone '${TZ}' time is ${zone_time}"; 110 | processEvent ${zone_time}; 111 | 112 | 113 | # SD=$(date --date="${START_DATE}" "+Start date: %Y-%m-%d %R %Z (%A %B %d, %Y)"); 114 | # ED=$(date --date="${END_DATE}" "+ End date: %Y-%m-%d %R %Z (%A %B %d, %Y)"); 115 | # debug "\n${SD}\n${ED}"; 116 | # createTestRecords "${START_DATE}" "${END_DATE}"; 117 | 118 | debug "Cron job terminated\n\n"; 119 | fi; 120 | 121 | -------------------------------------------------------------------------------- /BaRe/handleBackup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | 4 | export SCRIPT_DIR="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"; 5 | export CURR_SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" && pwd ) 6 | export SCRIPT_NAME=$( basename ${0#-} ); 7 | export THIS_SCRIPT=$( basename ${BASH_SOURCE} ) 8 | 9 | # echo -e "SCRIPT_DIR ${SCRIPT_DIR}"; 10 | # echo -e "CURR_SCRIPT_DIR ${CURR_SCRIPT_DIR}"; 11 | # echo -e "SCRIPT_NAME ${SCRIPT_NAME}"; 12 | # echo -e "THIS_SCRIPT ${THIS_SCRIPT}"; 13 | 14 | source ${CURR_SCRIPT_DIR}/utils.sh; 15 | 16 | 17 | declare TO="${BACKUP_DIR}" 18 | declare FROM="${TARGET_BENCH}/${BACKUPS_PATH}"; 19 | 20 | declare RPRT="backup_report.txt"; 21 | declare DB_NAME=""; 22 | 23 | declare PREFIX="./${BACKUPS_PATH}/" 24 | declare COMMENT=""; 25 | 26 | if [[ -z ${1} ]]; then 27 | echo -e "Usage: ${SCRIPT_NAME} \"Obligatory comment in double quotes\" 28 | "; 29 | exit; 30 | else 31 | COMMENT="${1}"; 32 | fi; 33 | 34 | 35 | pushd ${TARGET_BENCH} >/dev/null; 36 | SOURCE_HOST=${TARGET_HOST}; 37 | 38 | echo -e "\n - Backing up \"${COMMENT}\" for site ${SOURCE_HOST} (in ${BACKUP_DIR})."; 39 | 40 | mkdir -p ${BACKUP_DIR}; 41 | 42 | DB_NAME=$(jq -r .db_name sites/${SOURCE_HOST}/${SITE_CONFIG}); 43 | 44 | echo -e " - Backup command is:\n ==> bench --site ${SOURCE_HOST} backup --with-files > ${TMP_DIR}/${RPRT};"; 45 | echo -e " - Will archive database (${DB_NAME}) and files to ${FROM}"; 46 | echo -e " - Will write log result to ${TMP_DIR}/${RPRT}"; 47 | 48 | echo -e " started ..."; 49 | bench --site ${SOURCE_HOST} backup --with-files > ${TMP_DIR}/${RPRT}; 50 | echo -e " ... done"; 51 | 52 | line=$(grep Config ${TMP_DIR}/${RPRT} | cut -d ' ' -f 4) 53 | # echo -e "\nline : ${line}" 54 | prefix="./${ERPNEXT_SITE_URL}/private/backups/" 55 | suffix="-site_config_backup.json" 56 | part=${line#"$prefix"} 57 | # echo -e "part : ${part}" 58 | BACKUP_FILE_UID=${part%"$suffix"} 59 | echo -e "BACKUP_FILE_UID : ${BACKUP_FILE_UID}" 60 | 61 | pushd sites/${SOURCE_HOST} >/dev/null; 62 | VIEWS_FILE_NAME="${BACKUP_FILE_UID}-views.ddl" 63 | pushd private/backups >/dev/null; 64 | echo -e " - Saving database views constructors as '${VIEWS_FILE_NAME}' to site backups directory."; 65 | mysql -AD ${DB_NAME} \ 66 | --skip-column-names \ 67 | --batch \ 68 | -e 'select CONCAT("DROP TABLE IF EXISTS ", TABLE_NAME, "; CREATE OR REPLACE VIEW ", TABLE_NAME, " AS ", VIEW_DEFINITION, "; ") as ddl FROM information_schema.views WHERE table_schema = (SELECT database() FROM dual)' \ 69 | > ${VIEWS_FILE_NAME}; 70 | popd >/dev/null; 71 | popd >/dev/null; 72 | 73 | popd >/dev/null; 74 | 75 | 76 | echo -e "\n - Re-packaging database backup."; 77 | 78 | echo -e " - Comment :: \"${COMMENT}\""; 79 | echo -e " - Source : ${FROM}"; 80 | echo -e " - Dest : ${TO}"; 81 | echo -e " - Name : ${BACKUP_FILE_UID}"; 82 | 83 | # tree -L 2 ${TARGET_BENCH}; 84 | 85 | # echo -e "${pYELLOW}----------------- handleBackup Curtailed --------------------------${pDFLT}"; 86 | # # ls -la; 87 | # # hostname; 88 | # pwd; 89 | # exit; 90 | 91 | pushd ${FROM} >/dev/null; 92 | echo -e " - Compression command is:\n ==> tar zcvf ${TO}/${BACKUP_FILE_UID}.tgz ./${BACKUP_FILE_UID}*"; 93 | echo -e " started ..."; 94 | tar zcvf ${TO}/${BACKUP_FILE_UID}.tgz ./${BACKUP_FILE_UID}* >/dev/null; 95 | rm -f ./${BACKUP_FILE_UID}*; 96 | echo -e " ... done"; 97 | popd >/dev/null; 98 | 99 | pushd ${TO} >/dev/null; 100 | # pwd; 101 | # ls -la 102 | echo -e "${BACKUP_FILE_UID}.tgz" > ProdBckup.txt; 103 | cp ProdBckup.txt BACKUP.txt; 104 | touch NotesForBackups.txt; 105 | echo -e "${BACKUP_FILE_UID}.tgz ${COMMENT}" >> NotesForBackups.txt; 106 | echo -e "\n - The 5 most recent logged repackaging results in '$(pwd)/${pGOLD}NotesForBackups.txt${pDFLT}' are :${pGOLD}"; 107 | tail -n 5 NotesForBackups.txt; 108 | popd >/dev/null; 109 | 110 | seconds=$(($(date +'%s') - $start)); 111 | 112 | 113 | echo -e "\n\n${pGREEN}Backup process completed! Elapsed time, $(secs_to_human $seconds) seconds 114 | 115 | ${pDFLT}"; 116 | -------------------------------------------------------------------------------- /BaRe/handleRestore.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | 4 | export SCRIPT_DIR="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"; 5 | export CURR_SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" && pwd ) 6 | export SCRIPT_NAME=$( basename ${0#-} ); 7 | export THIS_SCRIPT=$( basename ${BASH_SOURCE} ) 8 | 9 | # echo -e "SCRIPT_DIR ${SCRIPT_DIR}"; 10 | # echo -e "CURR_SCRIPT_DIR ${CURR_SCRIPT_DIR}"; 11 | # echo -e "SCRIPT_NAME ${SCRIPT_NAME}"; 12 | # echo -e "THIS_SCRIPT ${THIS_SCRIPT}"; 13 | 14 | source ${CURR_SCRIPT_DIR}/utils.sh; 15 | 16 | 17 | declare SITE_ALIAS="${ERPNEXT_SITE_URL//./_}"; 18 | 19 | declare TMP_BACKUP_DIR="${TMP_DIR}/BKP"; 20 | declare BACKUP_DIR="${TARGET_BENCH}/BKP"; 21 | declare BACKUP_FILE_NAME_HOLDER="${BACKUP_DIR}/BACKUP.txt"; 22 | declare ACTIVE_DATABASE=""; 23 | 24 | function getNewSiteNameFromFileName() { 25 | declare filefrag=$(echo -e "${1}" | grep -o "\-.*\."); 26 | filefrag=${filefrag#"-"}; 27 | BACKUP_FILE_SITE_NAME=${filefrag%"."}; 28 | } 29 | 30 | function repackageWithCorrectedSiteName() { 31 | echo -e " The backup is from a different ERPNext site."; 32 | declare BACKUP_FILE_NAME=${BACKUP_FILE_FULL_NAME%".tgz"} 33 | rm -fr *; 34 | tar zxvf ${BACKUP_DIR}/${BACKUP_FILE_FULL_NAME} > /dev/null; 35 | echo -e " Will rename all backup files ...${pFAINT_BLUE}"; 36 | gunzip ${BACKUP_FILE_NAME}-database.sql.gz >/dev/null; 37 | # ls -la ${BACKUP_FILE_NAME}-*; 38 | 39 | declare OLD_FILE=""; 40 | declare NEW_FILE=""; 41 | 42 | OLD_FILE="${BACKUP_FILE_NAME}-database.sql"; 43 | NEW_FILE=${OLD_FILE/${BACKUP_FILE_SITE_NAME}/"${SITE_ALIAS}"}; 44 | echo " - '${OLD_FILE}' becomes '${NEW_FILE}'." 45 | mv ${OLD_FILE} ${NEW_FILE}; 46 | # ls -la; 47 | gzip ${NEW_FILE}; 48 | 49 | 50 | OLD_FILE="${BACKUP_FILE_NAME}-files.tar"; 51 | NEW_FILE=${OLD_FILE/${BACKUP_FILE_SITE_NAME}/"${SITE_ALIAS}"}; 52 | echo " - '${OLD_FILE}' becomes '${NEW_FILE}'." 53 | mv ${OLD_FILE} ${NEW_FILE}; 54 | 55 | OLD_FILE="${BACKUP_FILE_NAME}-private-files.tar"; 56 | NEW_FILE=${OLD_FILE/${BACKUP_FILE_SITE_NAME}/"${SITE_ALIAS}"}; 57 | echo " - '${OLD_FILE}' becomes '${NEW_FILE}'." 58 | mv ${OLD_FILE} ${NEW_FILE}; 59 | 60 | OLD_FILE="${BACKUP_FILE_NAME}-site_config_backup.json"; 61 | NEW_FILE=${OLD_FILE/${BACKUP_FILE_SITE_NAME}/"${SITE_ALIAS}"}; 62 | echo " - '${OLD_FILE}' becomes '${NEW_FILE}'." 63 | mv ${OLD_FILE} ${NEW_FILE}; 64 | 65 | 66 | echo -e "${pDFLT} - patch site name with sed. --> '${NEW_FILE}' from '${OLD_SITE_URL//_/.}' to '${ERPNEXT_SITE_URL}' "; 67 | 68 | sed -i "s/${OLD_SITE_URL//_/.}/${ERPNEXT_SITE_URL}/g" ${NEW_FILE}; 69 | # jq -r . ${NEW_FILE}; 70 | 71 | OLD_FILE="${BACKUP_FILE_NAME}.tgz"; 72 | NEW_FILE=${OLD_FILE/${BACKUP_FILE_SITE_NAME}/"${SITE_ALIAS}"}; 73 | 74 | echo -e "${pDFLT} - Creating new package from repackaged contents of '${BACKUP_FILE_FULL_NAME}'." 75 | tar zcvf ${BACKUP_DIR}/${NEW_FILE} ${NEW_FILE%".tgz"}-* >/dev/null; 76 | 77 | echo -e " Resulting file is -" 78 | echo -e " - ${BACKUP_DIR}/${NEW_FILE}"; 79 | 80 | echo -e " - Writing new package file name into file name holder : '${BACKUP_FILE_NAME_HOLDER}'." 81 | echo -e "${NEW_FILE}" > ${BACKUP_FILE_NAME_HOLDER}; 82 | 83 | # echo -e "\n${pRED}----------------- Restore handler curtailed --------------------------${pDFLT}\n" 84 | # pwd; 85 | # ls -la; 86 | # exit; 87 | } 88 | 89 | export GOOGLE_SOCIAL_LOGIN_PACKAGE="GSLP.json"; 90 | function buildGoogleSocialLoginPackage() { 91 | local CLIENT_ID="$(echo ${GOOGLE_SL_PARMS} | jq -r .client_id)"; 92 | local CLIENT_SECRET="$(echo ${GOOGLE_SL_PARMS} | jq -r .client_secret)"; 93 | cat > ${TMP_DIR}/${GOOGLE_SOCIAL_LOGIN_PACKAGE} </dev/null; 220 | 221 | echo -e " - Getting backup file name from name holder file: '${BACKUP_FILE_NAME_HOLDER}'"; 222 | if [ ! -f ${BACKUP_FILE_NAME_HOLDER} ]; then 223 | echo -e "\n* * * ERROR: Backup file name file '${BACKUP_FILE_NAME_HOLDER}'! * * * \n"; 224 | exit; 225 | fi; 226 | 227 | local BACKUP_FILE_FULL_NAME=$(cat ${BACKUP_FILE_NAME_HOLDER}); 228 | local BACKUP_FILE_DATE=$(echo ${BACKUP_FILE_FULL_NAME} | cut -d - -f 1); 229 | 230 | 231 | if [ ! -f ${BACKUP_DIR}/${BACKUP_FILE_FULL_NAME} ]; then 232 | echo -e "\n* * * Backup '${BACKUP_FILE_FULL_NAME}' was not found at $(pwd)! * * * \n"; 233 | else 234 | echo -e " - Process archive file: '${BACKUP_FILE_FULL_NAME}' into '$(pwd)'."; 235 | 236 | getNewSiteNameFromFileName ${BACKUP_FILE_FULL_NAME}; 237 | 238 | echo -e " - Does site name, '${BACKUP_FILE_SITE_NAME}', extracted from backup file full name, match this site '${SITE_ALIAS}' ??"; 239 | if [[ "${BACKUP_FILE_SITE_NAME}" != "${SITE_ALIAS}" ]]; then 240 | export OLD_SITE_URL="${BACKUP_FILE_SITE_NAME}"; 241 | repackageWithCorrectedSiteName; 242 | fi; 243 | 244 | echo -e " - Commencing decompression. Command is: 245 | tar zxvf ${BACKUP_DIR}/${BACKUP_FILE_FULL_NAME}${pGOLD}"; 246 | 247 | tar zxvf ${BACKUP_DIR}/${BACKUP_FILE_FULL_NAME}; 248 | echo -e " ${pDFLT}"; 249 | 250 | fi; 251 | popd >/dev/null; 252 | 253 | 254 | 255 | # echo -e "\n${pRED}----------------- * Restore handler curtailed * --------------------------${pDFLT} 256 | # ${BACKUP_DIR}/${BACKUP_FILE_FULL_NAME}"; 257 | # ls -la ${BACKUP_DIR}/${BACKUP_FILE_FULL_NAME}; 258 | # exit; 259 | 260 | pushd ${TARGET_BENCH} >/dev/null; 261 | if [[ "X${BACKUP_FILE_DATE}X" != "XX" ]]; then 262 | echo -e "\n - Backup to be restored: ${TMP_BACKUP_DIR}/${BACKUP_FILE_DATE}-${SITE_ALIAS}*"; 263 | declare BUSQ="${TMP_BACKUP_DIR}/${BACKUP_FILE_DATE}-${SITE_ALIAS}-database.sql.gz"; 264 | declare BUPU="${TMP_BACKUP_DIR}/${BACKUP_FILE_DATE}-${SITE_ALIAS}-files.tar"; 265 | declare BUPR="${TMP_BACKUP_DIR}/${BACKUP_FILE_DATE}-${SITE_ALIAS}-private-files.tar"; 266 | declare BUSC="${TMP_BACKUP_DIR}/${BACKUP_FILE_DATE}-${SITE_ALIAS}-site_config_backup.json"; 267 | 268 | # declare PASS=" --mariadb-root-password ${EXISTING_DB_PASSWORD}"; 269 | declare PASS=" --mariadb-root-password ${MYPWD}"; 270 | declare DATA=" ${BUSQ}"; 271 | declare FILE=" --with-public-files ${BUPU}"; 272 | declare PRIV=" --with-private-files ${BUPR}"; 273 | 274 | # echo -e "\n/* ~~~~~~~~ Stopping ERPNext ~~~~~~~~~ */"; 275 | # sudo -A supervisorctl status all; 276 | 277 | 278 | echo -e " - Should '${SITE_CONFIG}' of '${ERPNEXT_SITE_URL}' be overwritten?\n Restore parameters file = '${RESTORE_SITE_CONFIG}'"; 279 | 280 | if [[ "${RESTORE_SITE_CONFIG}" == "yes" ]]; then 281 | pushd "./sites/${ERPNEXT_SITE_URL}/" >/dev/null; 282 | declare SITE_CONFIG_COPY_NAME=""; 283 | SITE_CONFIG_COPY_NAME="${SITE_CONFIG%%.*}_$(date "+%Y-%m-%d_%H.%M").${SITE_CONFIG##*.}"; 284 | echo -e " - Creating dated safety copy of '${SITE_CONFIG}' :: ${SITE_CONFIG_COPY_NAME}."; 285 | cp ${SITE_CONFIG} ${SITE_CONFIG_COPY_NAME}; 286 | 287 | declare NEW_USER=$(jq -r .db_name ${BUSC}); 288 | declare NEW_PWD=$(jq -r .db_password ${BUSC}); 289 | declare OLD_PWD=$(jq -r .db_password ${SITE_CONFIG}); 290 | 291 | declare SHOW_PWD="********"; 292 | 293 | echo -e " - Should 'db_password' of site '${ERPNEXT_SITE_URL}' be overwritten?\n Keep current database password = '${KEEP_SITE_PASSWORD}'"; 294 | if [[ "${KEEP_SITE_PASSWORD}" == "yes" ]]; then 295 | 296 | # SHOW_PWD="${OLD_PWD}"; 297 | 298 | echo -e " Writing current database password into new site configuration '${BUSC}'."; 299 | sed -i "s/.*\"db_password\":.*/ \"db_password\": \"${OLD_PWD}\",/" ${BUSC}; 300 | else 301 | 302 | # SHOW_PWD=${NEW_PWD}; 303 | 304 | echo -e " Setting new database user '${NEW_USER}' & password '${SHOW_PWD}' into current database."; 305 | mariadb -AD mysql --skip-column-names --batch \ 306 | -e "select \" [ set password for '${NEW_USER}'@'localhost' = PASSWORD('${NEW_PWD}') ]\""; 307 | mariadb -AD mysql --skip-column-names --batch \ 308 | -e "set password for '${NEW_USER}'@'localhost' = PASSWORD('${NEW_PWD}');"; 309 | fi; 310 | 311 | popd >/dev/null; 312 | 313 | echo -e " - Overwriting './sites/${ERPNEXT_SITE_URL}/${SITE_CONFIG}' with ${SITE_CONFIG} from backup."; 314 | cp ${BUSC} ./sites/${ERPNEXT_SITE_URL}/${SITE_CONFIG} 315 | 316 | else 317 | echo -e " Won't overwrite '${SITE_CONFIG}'"; 318 | fi; 319 | 320 | ACTIVE_DATABASE=$(jq -r .db_name ./sites/${ERPNEXT_SITE_URL}/${SITE_CONFIG}); 321 | echo -e "\n - Restoring database ${ACTIVE_DATABASE}. Command is: 322 | ==> bench --site ${ERPNEXT_SITE_URL} --force restore --mariadb-root-password ${SHOW_PWD} \\ 323 | ${FILE} \\ 324 | ${PRIV} \\ 325 | ${DATA}${pFAINT_BLUE}"; 326 | 327 | # echo -e "\n${pRED}----------------- * Restore handler curtailed * --------------------------${pDFLT}\n${EXISTING_DB_PASSWORD}"; 328 | # exit; 329 | 330 | echo -e "${pDFLT} started ...${pFAINT_BLUE}"; 331 | bench --site ${ERPNEXT_SITE_URL} --force restore ${PASS} ${FILE} ${PRIV} ${DATA}; 332 | echo -e "${pDFLT} ... restored"; 333 | 334 | # CURRENT_ERPNEXT_VERSION=$(bench version); 335 | CURRENT_ERPNEXT_VERSION=$(bench version | grep erpnext | cut -d' ' -f 2 | cut -d'.' -f 1); 336 | echo -e "\n - Found current site version :: ${CURRENT_ERPNEXT_VERSION}"; 337 | 338 | pushd ./sites/${ERPNEXT_SITE_URL}/private/files >/dev/null; 339 | if [[ -f erpnextVersion.txt ]]; then 340 | OLD_SITE_ERPNEXT_VERSION=$(echo -e "$(cat erpnextVersion.txt | cut -d' ' -f 2)" | cut -d'.' -f 1); 341 | echo -e "\n - Found version of old site :: ${OLD_SITE_ERPNEXT_VERSION}"; 342 | else 343 | echo -e "${pYELLOW}* * * WARNING: Unable to determine ERPNext version of old site * * * . 344 | Will assume it is version 13. 345 | A file './sites/${ERPNEXT_SITE_URL}/private/files/erpnextVersion.txt' was expected but not found. 346 | The file should contain, for example, \"erpnext 13.18.7\". 347 | ${pDFLT}"; 348 | # OLD_SITE_ERPNEXT_VERSION=${CURRENT_ERPNEXT_VERSION}; 349 | OLD_SITE_ERPNEXT_VERSION=13; 350 | fi; 351 | 352 | # echo -e "\n - Restoring database views"; 353 | # echo -e "${pDFLT} started ..."; 354 | # mysql -AD ${ACTIVE_DATABASE} < ./ddlViews.sql; 355 | # echo -e " ... restored"; 356 | popd >/dev/null; 357 | 358 | 359 | rm -fr ${BUSQ}; 360 | rm -fr ${BUPU}; 361 | rm -fr ${BUPR}; 362 | rm -fr ${BUSC}; 363 | 364 | else 365 | echo -e "ERROR: Cannot find files from decompressed archive '${BACKUP_FILE_FULL_NAME}';"; 366 | fi; 367 | popd >/dev/null; 368 | 369 | } 370 | 371 | if [[ ${SCRIPT_NAME} = ${THIS_SCRIPT} ]] ; then 372 | echo -e "\n - Restoring backup ..."; 373 | 374 | restoreDatabase ${1}; 375 | 376 | if [[ 1 == 1 ]]; then 377 | if [[ ${CURRENT_ERPNEXT_VERSION} -gt ${OLD_SITE_ERPNEXT_VERSION} ]]; then 378 | # echo -e "\n - Must run bench 'migrate' and 'clear-cache'.${pFAINT_BLUE}"; 379 | 380 | echo -e "${pYELLOW} - Correcting V13 to V14 discrepancies.${pDFLT} "; 381 | echo -e "${pGOLD} ~ Migrate.${pDFLT} "; 382 | bench --site ${ERPNEXT_SITE_URL} migrate; 383 | echo -e "${pGOLD} ~ Clear cache.${pDFLT} "; 384 | bench --site ${ERPNEXT_SITE_URL} clear-cache; 385 | echo -e "${pGOLD} ~ Enable Scheduler.${pDFLT} "; 386 | bench --site ${ERPNEXT_SITE_URL} enable-scheduler; 387 | 388 | fi; 389 | echo -e "\n - Restarting ERPNext${pFAINT_BLUE}"; 390 | sudo -A supervisorctl start all; 391 | echo -e "${pDFLT} restarted"; 392 | 393 | echo -e "\n - Delaying for restart to complete..."; 394 | sleep 10; 395 | 396 | # pushd ${TARGET_BENCH} >/dev/null; 397 | # pushd ./sites/${ERPNEXT_SITE_URL}/private/files >/dev/null; 398 | # # OLD_SITE_ERPNEXT_VERSION=$(echo -e "$(cat erpnextVersion.txt | cut -d' ' -f 2)" | cut -d'.' -f 1); 399 | # # echo -e "\n - Found version of old site :: ${OLD_SITE_ERPNEXT_VERSION}"; 400 | 401 | # echo -e "\n - Restoring database views to database :: ${ACTIVE_DATABASE}"; 402 | # echo -e "${pDFLT} started ..."; 403 | # cat ./ddlViews.sql; 404 | # mysql -AD ${ACTIVE_DATABASE} < ./ddlViews.sql; 405 | # echo -e " ... restored"; 406 | # popd >/dev/null; 407 | # popd >/dev/null; 408 | 409 | 410 | # echo -e "\n${pRED}----------------- Restore handler curtailed --------------------------${pDFLT} 411 | # OLD_SITE_ERPNEXT_VERSION = ${OLD_SITE_ERPNEXT_VERSION} 412 | # CURRENT_ERPNEXT_VERSION = ${CURRENT_ERPNEXT_VERSION} 413 | # "; 414 | # pwd; 415 | # # whoami; 416 | # exit; 417 | 418 | echo -e "\n - Restoring Social Login ..."; 419 | restoreSocialLoginConfig; 420 | echo -e "${pDFLT} restored"; 421 | else 422 | echo -e "\n - Restarting ERPNext ${pRED}*** SKIPPED ***${pDFLT}"; 423 | fi; 424 | 425 | 426 | seconds=$(($(date +'%s') - ${start})); 427 | echo -e "\n\n${pGREEN}Restore completed. Elapsed time, $(secs_to_human ${seconds}) seconds 428 | 429 | ${pDFLT}"; 430 | 431 | else 432 | echo " - Sourced '${THIS_SCRIPT}' from '${SCRIPT_NAME}'" 433 | fi 434 | -------------------------------------------------------------------------------- /BaRe/misc/50-server.cnf: -------------------------------------------------------------------------------- 1 | # 2 | # These groups are read by MariaDB server. 3 | # Use it for options that only the server (but not clients) should see 4 | 5 | # this is read by the standalone daemon and embedded servers 6 | [server] 7 | 8 | # this is only for the mysqld standalone daemon 9 | [mysqld] 10 | 11 | # 12 | # * Basic Settings 13 | # 14 | 15 | #user = mysql 16 | pid-file = /run/mysqld/mysqld.pid 17 | basedir = /usr 18 | #datadir = /var/lib/mysql 19 | #tmpdir = /tmp 20 | 21 | # Broken reverse DNS slows down connections considerably and name resolve is 22 | # safe to skip if there are no "host by domain name" access grants 23 | #skip-name-resolve 24 | 25 | # Instead of skip-networking the default is now to listen only on 26 | # localhost which is more compatible and is not less secure. 27 | bind-address = 127.0.0.1 28 | 29 | # 30 | # * Fine Tuning 31 | # 32 | 33 | #key_buffer_size = 128M 34 | #max_allowed_packet = 1G 35 | #thread_stack = 192K 36 | #thread_cache_size = 8 37 | # This replaces the startup script and checks MyISAM tables if needed 38 | # the first time they are touched 39 | #myisam_recover_options = BACKUP 40 | #max_connections = 100 41 | #table_cache = 64 42 | 43 | # 44 | # * Logging and Replication 45 | # 46 | 47 | # Both location gets rotated by the cronjob. 48 | # Be aware that this log type is a performance killer. 49 | # Recommend only changing this at runtime for short testing periods if needed! 50 | #general_log_file = /var/log/mysql/mysql.log 51 | #general_log = 1 52 | 53 | # When running under systemd, error logging goes via stdout/stderr to journald 54 | # and when running legacy init error logging goes to syslog due to 55 | # /etc/mysql/conf.d/mariadb.conf.d/50-mysqld_safe.cnf 56 | # Enable this if you want to have error logging into a separate file 57 | #log_error = /var/log/mysql/error.log 58 | # Enable the slow query log to see queries with especially long duration 59 | #slow_query_log_file = /var/log/mysql/mariadb-slow.log 60 | #long_query_time = 10 61 | #log_slow_verbosity = query_plan,explain 62 | #log-queries-not-using-indexes 63 | #min_examined_row_limit = 1000 64 | 65 | # The following can be used as easy to replay backup logs or for replication. 66 | # note: if you are setting up a replication slave, see README.Debian about 67 | # other settings you may need to change. 68 | #server-id = 1 69 | #log_bin = /var/log/mysql/mysql-bin.log 70 | expire_logs_days = 10 71 | #max_binlog_size = 100M 72 | 73 | # 74 | # * SSL/TLS 75 | # 76 | 77 | # For documentation, please read 78 | # https://mariadb.com/kb/en/securing-connections-for-client-and-server/ 79 | #ssl-ca = /etc/mysql/cacert.pem 80 | #ssl-cert = /etc/mysql/server-cert.pem 81 | #ssl-key = /etc/mysql/server-key.pem 82 | #require-secure-transport = on 83 | 84 | # 85 | # * Character sets 86 | # 87 | 88 | # MySQL/MariaDB default is Latin1, but in Debian we rather default to the full 89 | # utf8 4-byte character set. See also client.cnf 90 | character-set-server = utf8mb4 91 | collation-server = utf8mb4_unicode_ci 92 | 93 | # 94 | # * InnoDB 95 | # 96 | 97 | # InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/. 98 | # Read the manual for more InnoDB related options. There are many! 99 | # Most important is to give InnoDB 80 % of the system RAM for buffer use: 100 | # https://mariadb.com/kb/en/innodb-system-variables/#innodb_buffer_pool_size 101 | #innodb_buffer_pool_size = 8G 102 | 103 | # this is only for embedded server 104 | [embedded] 105 | 106 | # This group is only read by MariaDB servers, not by MySQL. 107 | # If you use the same .cnf file for MySQL and MariaDB, 108 | # you can put MariaDB-only options here 109 | [mariadb] 110 | 111 | # This group is only read by MariaDB-10.6 servers. 112 | # If you use the same .cnf file for MariaDB of different versions, 113 | # use this group for options that older servers don't understand 114 | [mariadb-10.6] 115 | -------------------------------------------------------------------------------- /BaRe/rSYNC.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | export SCRIPT_DIR="$( cd "$( dirname "$0" )" && pwd )"; 4 | export SCRIPT_NAME=$(basename "$0"); 5 | 6 | export REMOTE_PROJECT_DIR="returnable"; 7 | 8 | source ../envars.sh; 9 | 10 | declare SLAVE_HOST_ALIAS=${SERVER}; 11 | declare SLAVE_HOST_USR=${ERP_USER_NAME}; 12 | declare SLAVE_BENCH_NAME=${TARGET_BENCH_NAME}; 13 | 14 | export REMOTE_PROJECT="${SLAVE_HOST_ALIAS}:/home/${SLAVE_HOST_USR}/${SLAVE_BENCH_NAME}/BaRe"; 15 | 16 | 17 | if [[ -z ${1} ]]; then 18 | echo -e "Usage: ./rSync y" 19 | echo -e "Will synchronize this directory '$(pwd)' with :: ${REMOTE_PROJECT}"; 20 | exit; 21 | else 22 | echo -e "Synching this directory '$(pwd)' with remote directory :: ${REMOTE_PROJECT}"; 23 | fi; 24 | 25 | while inotifywait -qqr -e close_write,move,create,delete ./*; do 26 | rsync -rzavx --update . ${REMOTE_PROJECT}; 27 | done; 28 | 29 | # echo -e "\n\n/* ~~~~~~~~~ Curtailed ~~~~~~~ ${SCRIPT_DIR}/${SCRIPT_NAME} ~~~~~~~~ */ 30 | 31 | # "; 32 | # exit; 33 | -------------------------------------------------------------------------------- /BaRe/ros.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # set -e; 4 | 5 | WATCH_DIRECTORY=$1; 6 | shift; 7 | 8 | EVENT_TASK=$1; 9 | shift; 10 | 11 | IGNORE_PATHS="$@"; 12 | 13 | function listVariables() { 14 | echo -e "Variables :: 15 | WATCH_DIRECTORY = ${WATCH_DIRECTORY}; 16 | EVENT_TASK = ${EVENT_TASK}; 17 | "; 18 | } 19 | 20 | function doIt() { 21 | sleep 1; 22 | ${EVENT_TASK}; 23 | }; 24 | 25 | echo -e "\nros.sh -- Run On Save : Executes the indicated command when any file is changed in the indicated directory. 26 | Usage: ./ros.sh . \"ls -la\";\n\n"; 27 | 28 | declare PKG="inotify-tools"; 29 | 30 | if ! dpkg-query -l ${PKG} &>/dev/null; then 31 | echo "Attempting to install '${PKG}'" 32 | if sudo apt-get -y install ${PKG} &>/dev/null; then 33 | echo -e "Hmmm."; 34 | else 35 | echo -e "Required repositories are not available. Running 'apt-get update'"; 36 | sudo apt-get update; 37 | echo "\n\nAgain attempting to install '${PKG}'\n" 38 | sudo apt-get -y install ${PKG}; 39 | echo "\nInstalled '${PKG}'\n" 40 | fi; 41 | fi; 42 | declare PKG="tree"; 43 | dpkg-query -l ${PKG} &>/dev/null || sudo apt-get -y install ${PKG}; 44 | 45 | echo "Run-On-Save will execute : '${EVENT_TASK}'"; 46 | listVariables; 47 | 48 | doIt; 49 | while true #run indefinitely 50 | do 51 | inotifywait -qqr -e close_write,move,create,delete ${IGNORE_PATHS} ${WATCH_DIRECTORY} && doIt; 52 | done 53 | -------------------------------------------------------------------------------- /BaRe/trimLog.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | 4 | abort() { 5 | echo >&2 ' 6 | *************** 7 | *** ABORTED *** 8 | *************** 9 | ' 10 | echo "An error occurred. Exiting..." >&2 11 | exit 1 12 | } 13 | 14 | trap 'abort' 0 15 | 16 | export SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 17 | 18 | source ${SCRIPT_DIR}/envars.sh; 19 | 20 | set -e; 21 | 22 | echo -e "trimLog.sh"; 23 | 24 | declare TMP_DIR="/dev/shm"; 25 | 26 | declare WORK_SCRIPT_NAME="copyBackUps.sh"; 27 | 28 | declare WORK_DIR="${TMP_DIR}/Work"; 29 | declare WORK_FILE_NAME="backUpsWork.log"; 30 | declare WORK_FILE="${WORK_DIR}/${WORK_FILE_NAME}"; 31 | declare WORK_SCRIPT="${WORK_DIR}/${WORK_SCRIPT_NAME}"; 32 | 33 | declare BKP_DIR="${TARGET_BENCH}/BKP"; 34 | declare BKP_FILE_NAME="NotesForBackups.txt"; 35 | declare BKP_LATEST_NAME="BACKUP.txt"; 36 | declare BKP_FILE="${BKP_DIR}/${BKP_FILE_NAME}"; 37 | declare BKP_LATEST="${BKP_DIR}/${BKP_LATEST_NAME}"; 38 | 39 | extractDate() { 40 | declare strDate=${1}; 41 | local __rslt=${2} 42 | 43 | declare theDay=$(echo "${strDate}" | cut -c1-8) 44 | declare theHour=$(echo "${strDate}" | cut -c10-11) 45 | declare theMinute=$(echo "${strDate}" | cut -c12-13) 46 | declare theSecond=$(echo "${strDate}" | cut -c14-15) 47 | 48 | local fileTimeStamp=$(date -d "${theDay} ${theHour}:${theMinute}:${theSecond}" "+%s") 49 | eval $__rslt="'${fileTimeStamp}'" 50 | } 51 | 52 | processRequiredFile() { 53 | local backupRecord=${1}; 54 | local backupFile=${backupRecord%%#*} 55 | echo -e "cp ${BKP_DIR}/${backupFile} ${WORK_DIR}" >> ${WORK_SCRIPT}; 56 | echo -e "${backupRecord} ${2}" >> ${WORK_FILE}; 57 | } 58 | 59 | secureRecentBackups() { 60 | declare workDate="${1}"; 61 | declare limitEpoch=$(date --date="${workDate}" "+%s"); 62 | echo -e "Securing recent ERPNext database backup files 63 | between ${limitEpoch} ($(date --date="${workDate}" "+%B %d, %Y")) and now $(date "+%s")"; 64 | 65 | BIFS=${IFS}; 66 | IFS=$'\n' 67 | 68 | declare limit=4; 69 | declare counter=0; 70 | 71 | tac ${BKP_FILE} | while read backupRecord; do 72 | 73 | recordTimeStamp=${backupRecord%%-*} 74 | # echo "Record times stamp is : "${recordTimeStamp}; 75 | extractDate "${recordTimeStamp}" recordEpoch; 76 | # echo -e "record epoch = ${recordEpoch} vs limit epoch = ${limitEpoch}"; 77 | 78 | if [[ "${limitEpoch}" > "${recordEpoch}" ]]; then exit; fi; 79 | 80 | echo -e "Put aside back up: ${backupRecord}"; 81 | processRequiredFile "${backupRecord}" "Back up"; 82 | 83 | if [[ ${counter} -ge ${limit} ]]; then exit; fi; 84 | 85 | ((counter+=1)) 86 | 87 | done; 88 | 89 | IFS=${BIFS}; 90 | 91 | echo -e "\n..........................................\n\n"; 92 | } 93 | 94 | secureRequiredOlderBackups() { 95 | declare workDate="${1}"; 96 | echo "Trimming ERPNext database backup files before $(date --date="${workDate}" "+%B %d %Y")."; 97 | 98 | BIFS=${IFS}; 99 | IFS=$'\n' 100 | 101 | declare LAST_BKUP_NO=""; 102 | declare LAST_DAY__NO=""; 103 | declare LAST_WEEK_NO=""; 104 | declare LAST_MNTH_NO=""; 105 | declare LAST_YEAR_NO=""; 106 | 107 | declare BKUP_NO=""; 108 | declare DAY__NO=""; 109 | declare WEEK_NO=""; 110 | declare MNTH_NO=""; 111 | declare YEAR_NO=""; 112 | 113 | declare limit=10000; 114 | declare counter=0; 115 | # declare RECENT_BKUP=0; 116 | declare RECENT__DAY=0; 117 | declare RECENT_WEEK=0; 118 | declare RECENT_MNTH=0; 119 | declare RECENT_YEAR=0; 120 | 121 | declare limitEpoch=$(date --date="${workDate}" "+%s"); 122 | 123 | tac ${BKP_FILE} | while read backupRecord; do 124 | recordTimeStamp=${backupRecord%%-*} 125 | # echo "Record times stamp is : "${recordTimeStamp}; 126 | extractDate "${recordTimeStamp}" recordEpoch; 127 | # echo -e "record epoch = ${recordEpoch}"; 128 | if [[ ${recordEpoch} -lt ${limitEpoch} ]]; then 129 | 130 | BKUP_NO=$(date --date="@${recordEpoch}" "+%H") 131 | DAY__NO=$(date --date="@${recordEpoch}" "+%w") 132 | WEEK_NO=$(date --date="@${recordEpoch}" "+%U") 133 | MNTH_NO=$(date --date="@${recordEpoch}" "+%m") 134 | YEAR_NO=$(date --date="@${recordEpoch}" "+%Y") 135 | 136 | if [[ ${counter} -eq 0 ]]; then 137 | LAST_BKUP_NO=${BKUP_NO} 138 | LAST_DAY__NO=${DAY__NO} 139 | LAST_WEEK_NO=${WEEK_NO} 140 | LAST_MNTH_NO=${MNTH_NO} 141 | LAST_YEAR_NO=${YEAR_NO} 142 | fi; 143 | 144 | echo -e "${YEAR_NO} ${MNTH_NO} ${WEEK_NO} ${DAY__NO} ${BKUP_NO} (${counter}/${limit})"; 145 | 146 | if [[ "10#${YEAR_NO}" != "10#${LAST_YEAR_NO}" ]]; then 147 | echo -e "Year : ${YEAR_NO}/${LAST_YEAR_NO} (${backupRecord})"; 148 | processRequiredFile "${backupRecord}" "Year"; 149 | RECENT_YEAR=1; 150 | else 151 | if [[ ${RECENT_YEAR} -lt 1 ]]; then 152 | if [[ "10#${MNTH_NO}" != "10#${LAST_MNTH_NO}" ]]; then 153 | echo -e "Month : ${MNTH_NO}/${LAST_MNTH_NO} (${backupRecord})"; 154 | processRequiredFile "${backupRecord}" "Month"; 155 | RECENT_MNTH=1; 156 | else 157 | if [[ ${RECENT_MNTH} -lt 1 ]]; then 158 | if [[ "10#${WEEK_NO}" != "10#${LAST_WEEK_NO}" ]]; then 159 | echo -e "Week : ${WEEK_NO}/${LAST_WEEK_NO} (${backupRecord})"; 160 | processRequiredFile "${backupRecord}" "Week"; 161 | RECENT_WEEK=1; 162 | else 163 | if [[ ${RECENT_WEEK} -lt 1 ]]; then 164 | if [[ "10#${DAY__NO}" != "10#${LAST_DAY__NO}" ]]; then 165 | echo -e "Day : ${DAY__NO}/${LAST_DAY__NO} (${backupRecord})"; 166 | processRequiredFile "${backupRecord}" "Day"; 167 | RECENT__DAY=1; 168 | else 169 | if [[ ${RECENT__DAY} -lt 1 ]]; then 170 | if [[ "10#${BKUP_NO}" != "10#${LAST_BKUP_NO}" ]]; then 171 | echo -e "Back up : ${BKUP_NO}/${LAST_BKUP_NO} (${backupRecord})"; 172 | processRequiredFile "${backupRecord}" "Back up"; 173 | # RECENT_BKUP=1; 174 | fi; 175 | fi; 176 | fi; 177 | fi; 178 | fi; 179 | fi; 180 | fi; 181 | fi; 182 | fi; 183 | 184 | LAST_BKUP_NO=${BKUP_NO} 185 | LAST_DAY__NO=${DAY__NO} 186 | LAST_WEEK_NO=${WEEK_NO} 187 | LAST_MNTH_NO=${MNTH_NO} 188 | LAST_YEAR_NO=${YEAR_NO} 189 | 190 | ((counter+=1)); 191 | if [[ ${counter} -ge ${limit} ]]; then exit; fi; 192 | else 193 | echo "Record times stamp is : "${recordTimeStamp}; 194 | fi; 195 | 196 | done; 197 | 198 | IFS=${BIFS}; 199 | 200 | echo -e "\n..........................................\n\n"; 201 | } 202 | 203 | if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then 204 | declare workDate=$(date -d "-10 days" "+%A %B %d, %Y"); 205 | 206 | if [ ! -f "${BKP_FILE}" ]; then 207 | echo "File \""${BKP_FILE}"\" does not exist"; 208 | exit; 209 | fi 210 | 211 | echo -e "Trimming .... 212 | mkdir -p ${WORK_DIR}; 213 | > ${WORK_FILE}; 214 | > ${WORK_SCRIPT}; 215 | chmod +x ${WORK_SCRIPT}; 216 | "; 217 | 218 | mkdir -p ${WORK_DIR}; 219 | > ${WORK_FILE}; 220 | > ${WORK_SCRIPT}; 221 | chmod +x ${WORK_SCRIPT}; 222 | 223 | secureRecentBackups "${workDate}"; 224 | 225 | secureRequiredOlderBackups "${workDate}"; 226 | 227 | echo -e "Executing generated script '${WORK_SCRIPT}' to collect required backup files."; 228 | ${WORK_SCRIPT} 229 | 230 | echo -e "Replacing backup log file '${BKP_FILE}' with '${WORK_FILE}'"; 231 | # tac ${WORK_FILE} 232 | echo -e "tac ${WORK_FILE} > ${BKP_FILE}"; 233 | tac ${WORK_FILE} > ${BKP_FILE}; 234 | # head -n 10 ${BKP_FILE}; 235 | # echo -e "==========\n\n"; 236 | 237 | echo -e "Deleting all backups in permanent directory..."; 238 | rm -f ${BKP_DIR}/20*.tgz; 239 | 240 | echo -e "Copying require backups into permanent directory..."; 241 | cp ${WORK_DIR}/20*.tgz ${BKP_DIR}; 242 | 243 | echo -e "Noting most recent backup ..."; 244 | tail -n 1 ${BKP_FILE} > ${BKP_LATEST}; 245 | 246 | fi; 247 | 248 | trap : 0 249 | 250 | echo >&2 ' 251 | ************ 252 | *** DONE *** 253 | ************ 254 | ' 255 | -------------------------------------------------------------------------------- /BaRe/utils.sh: -------------------------------------------------------------------------------- 1 | export start=$(date +'%s'); 2 | export seconds= 3 | 4 | secs_to_human() { 5 | echo "$(( ${1} / 3600 ))h $(( (${1} / 60) % 60 ))m $(( ${1} % 60 ))s" 6 | } 7 | 8 | 9 | export pRED="\033[1;40;31m"; 10 | export pYELLOW="\033[1;40;33m"; 11 | export pGOLD="\033[0;40;33m"; 12 | export pFAINT_BLUE="\033[0;49;34m"; 13 | export pGREEN="\033[1;40;32m"; 14 | export pDFLT="\033[0m"; 15 | export pBG_YLO="\033[1;43;33m"; 16 | 17 | export ENVARS="envars.sh"; 18 | export ENVIRONMENT_VARIABLES="${CURR_SCRIPT_DIR}/${ENVARS}"; 19 | 20 | if [[ -L ${ENVIRONMENT_VARIABLES} ]]; then 21 | if [[ -e ${ENVIRONMENT_VARIABLES} ]]; then 22 | echo -e "\n\n${pGREEN}Loading environment variables from '${ENVIRONMENT_VARIABLES}'${pDFLT}"; 23 | source ${ENVIRONMENT_VARIABLES}; 24 | else 25 | echo -e "${pRED} The local symlink '${ENVIRONMENT_VARIABLES}' to a file of environment variables is broken. Cannot proceed.${pDFLT}"; 26 | exit 1; 27 | fi; 28 | else 29 | echo -e "${pRED} A required symlink '${ENVIRONMENT_VARIABLES}' to a file of environment variables was not found. Cannot proceed.${pDFLT}"; 30 | exit 1; 31 | fi; 32 | 33 | declare TARGET_HOST=${ERPNEXT_SITE_URL}; 34 | 35 | declare SITES="sites"; 36 | declare SITE_PATH="${SITES}/${TARGET_HOST}"; 37 | declare PRIVATE_PATH="${SITE_PATH}/private"; 38 | declare BACKUPS_PATH="${PRIVATE_PATH}/backups"; 39 | declare FILES_PATH="${PRIVATE_PATH}/files"; 40 | 41 | declare TMP_DIR="/dev/shm"; 42 | 43 | declare BACKUP_DIR="${TARGET_BENCH}/BKP"; 44 | 45 | declare SITE_CONFIG="site_config.json"; 46 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Martin H. Bramwell 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ERP Next Fail Over 2 | 3 | ### What? 4 | *ERPNextFailOver* is a tool to automate setting up Master / Slave continuous replication for ERPNext. 5 | 6 | To be more precise, it sets up MySql/MariaDb replication, but also provides you with tools to help you migrate site specific details, such as `site_config.json`. You will also find included `handleBackup.sh` and `handleRestore.sh` which take care of backing up an origin site and restoring to a destination having a different site name and URL. This includes correcting the Social Login details, which must correspond to the site name in use (Google only, so far). 7 | 8 | ### Why? 9 | 10 | You can certainly set up this kind of replication manually, but the docs typically cover many options and alternatives making it time consuming to work through what is appropriate for a single ERPNext installation. This tool will give you a minimum starting configuration, which you can enhance as needed. 11 | 12 | For replication to start correctly, you want the master and slave databases to be nearly identical. In particular they must both manage the same transaction log and the "position tag" in the master's log must be the same as, or ahead of, the slave's log position tag. Setting up replication requires a sequence of operations that needs to happen correctly or you end up with a setup that seems right but doesn't start. 13 | 14 | ## How? 15 | 16 | To begin with, you should have three independant devices running Ubuntu Linux: 17 | 18 | 1. Your workstation 19 | 2. The master host running ERPNext v13 20 | 3. The slave host running ERPNext v13 21 | 22 | The entire setup and installation is driven from your workstation according to the variables you set in a shell script file: `envars.sh`. 23 | 24 | For our use we have, for example, a master VPS rented in North America and a slave VPS somewhere in Europe. An equally valid setup would be to use Qemu/KVM, or Virtual Box, etc to create two virtual machines inside your workstation. The point is to have two distinct target machines each with ERPNext installed in Ubuntu Linux 25 | 26 | You will not need to log into either of the other two *unless you use UID/PWD access with SSH*! These scripts require PKI based SSH access, so you **will** need to prepare for that. 27 | 28 | ## envars.sh 29 | 30 | You only need concern yourself with two files: 31 | 32 | - `prepareMasterAndSlave.sh` 33 | - `envars.sh` 34 | 35 | The script, `prepareMasterAndSlave.sh`, does all the work according to the environment variable settings you make in `envars.sh`. 36 | 37 | To get started you will need to copy `envars.example.sh` to `envars.sh` and adjust the values to conform to your configuration. 38 | 39 | It is recommended, but not necessary, to set up a host alias for both targets using `${HOME}/.ssh/config`. 40 | 41 | When `envars.sh` is correctly prepared you just need to run `prepareMasterAndSlave.sh` which handles everything else. `prepareMasterAndSlave.sh` is *idempotent*, meaning you can rerun as often as needed and (while `envars.sh` remains unaltered) you will always get the same result. 42 | 43 | ## After execution 44 | 45 | The script `prepareMasterAndSlave.sh` does most its work in `/dev/shm` a standard shared memory ramdisk. Rebooting destroys that content, but the root directory of the ERPNext user will have some leftover files, which the script does not purge yet. 46 | 47 | Two new directories will be created in the Frappe Bench directory: `BaRe` and `BKP`. BaRe contains the backup and restore handlers. `BKP` contains backup archives and some pointer files. 48 | 49 | `/etc/mysql/mariadb.conf.d/50-server.cnf` will have been patched. 50 | 51 | Uncomplicated firewall will have a new record allowing the slave into port 3306. 52 | 53 | The slave user will be granted replication privilege in the master MariaDb. 54 | 55 | `xmlstarlet` and `jq` will be installed to facilitate extracting info from XML and JSON files. 56 | 57 | The slave database will have been replaced by a complete copy of the master database. 58 | 59 | Parts of the slave file `site_config.json` will have been altered. 60 | 61 | `${HOME}/.profile` will contain a new line: "`export SUDO_ASKPASS=/home/admin/.ssh/.supwd.sh;`" and a new file, `.supwd.sh`, will be stored in `${HOME}/.ssh`. That file contains the sudo password for `${MASTER_HOST_USR}` or `${SLAVE_HOST_USR}` as appropriate. 62 | 63 | 64 | 65 | 66 | ## Log of a complete error-free execution. 67 | 68 | The following is a plain text log of a single complete error free execution. 69 | 70 | For something a bit easier to read, look for screenshots of a colorized terminal session in the `docs` directory. 71 | 72 | ```shell 73 | 74 | you@xub22:~/projects/ERPNextFailOver$ ./prepareMasterAndSlave.sh; 75 | 76 | 77 | ----------------------------- Starting ----------------------------------- 78 | 79 | 80 | Checking presence of 'xmlstarlet' tool. 81 | dpkg-query: no packages found matching xmlstarlet 82 | 83 | * * * Do you accept to install 'xmlstarlet' ( https://en.wikipedia.org/wiki/XMLStarlet ) * * * 84 | Type 'y' to approve, or any other key to quit : y 85 | Ok. 86 | Reading package lists... Done 87 | Building dependency tree... Done 88 | Reading state information... Done 89 | The following packages were automatically installed and are no longer required: 90 | libjq1 libonig5 91 | Use 'sudo apt autoremove' to remove them. 92 | The following NEW packages will be installed: 93 | xmlstarlet 94 | 0 upgraded, 1 newly installed, 0 to remove and 0 not upgraded. 95 | Need to get 265 kB of archives. 96 | After this operation, 631 kB of additional disk space will be used. 97 | Get:1 http://ca.archive.ubuntu.com/ubuntu jammy/universe amd64 xmlstarlet amd64 1.6.1-2.1 [265 kB] 98 | Fetched 265 kB in 1s (430 kB/s) 99 | Selecting previously unselected package xmlstarlet. 100 | (Reading database ... 178079 files and directories currently installed.) 101 | Preparing to unpack .../xmlstarlet_1.6.1-2.1_amd64.deb ... 102 | Unpacking xmlstarlet (1.6.1-2.1) ... 103 | Setting up xmlstarlet (1.6.1-2.1) ... 104 | Processing triggers for man-db (2.10.2-1) ... 105 | Processing triggers for doc-base (0.11.1) ... 106 | Processing 1 added doc-base file... 107 | 108 | - Installed 'xmlstarlet' 109 | 110 | 111 | Checking presence of 'jq' tool. 112 | dpkg-query: no packages found matching jq 113 | 114 | * * * Do you accept to install 'jq' * * * 115 | Type 'y' to approve, or any other key to quit : y 116 | Ok. 117 | Reading package lists... Done 118 | Building dependency tree... Done 119 | Reading state information... Done 120 | The following NEW packages will be installed: 121 | jq 122 | 0 upgraded, 1 newly installed, 0 to remove and 0 not upgraded. 123 | Need to get 52.5 kB of archives. 124 | After this operation, 102 kB of additional disk space will be used. 125 | Get:1 http://ca.archive.ubuntu.com/ubuntu jammy/main amd64 jq amd64 1.6-2.1ubuntu3 [52.5 kB] 126 | Fetched 52.5 kB in 0s (179 kB/s) 127 | Selecting previously unselected package jq. 128 | (Reading database ... 178326 files and directories currently installed.) 129 | Preparing to unpack .../jq_1.6-2.1ubuntu3_amd64.deb ... 130 | Unpacking jq (1.6-2.1ubuntu3) ... 131 | Setting up jq (1.6-2.1ubuntu3) ... 132 | Processing triggers for man-db (2.10.2-1) ... 133 | 134 | - Installed 'jq' 135 | 136 | 137 | Loading dependencies ... 138 | - Sourced 'makeMasterTasks.sh' from 'prepareMasterAndSlave.sh' 139 | - Sourced 'makeMasterMariaDBconfPatch.sh' from 'prepareMasterAndSlave.sh' 140 | - Sourced 'makeMasterMariaDBScript.sh' from 'prepareMasterAndSlave.sh' 141 | - Sourced 'prepareMaster.sh' from 'prepareMasterAndSlave.sh' 142 | - Sourced 'makeSlaveTasks.sh' from 'prepareMasterAndSlave.sh' 143 | - Sourced 'makeSlaveMariaDBconfPatch.sh' from 'prepareMasterAndSlave.sh' 144 | - Sourced 'makeSlaveMariaDBScript.sh' from 'prepareMasterAndSlave.sh' 145 | - Sourced 'makeSlaveMariaDBScript.sh' from 'prepareMasterAndSlave.sh' 146 | - Sourced 'makeMariaDBRestartScript.sh' from 'prepareMasterAndSlave.sh' 147 | - Sourced 'makeAskPassEmitter.sh' from 'prepareMasterAndSlave.sh' 148 | - Sourced 'makeEnvarsFile.sh' from 'prepareMasterAndSlave.sh' 149 | 150 | Was host alias use specified? 151 | - Found 'ssh-agent' already running. 152 | 153 | Adding Master host PKI key to agent 154 | Enter passphrase for /home/you/.ssh/admin_loso_erpnext_host: 155 | Identity added: /home/you/.ssh/admin_loso_erpnext_host (water.iridium.blue@gmail.com) 156 | 157 | Adding Slave host PKI key to agent 158 | Enter passphrase for /home/you/.ssh/adm_stg_erpnext_host: 159 | Identity added: /home/you/.ssh/adm_stg_erpnext_host (X22_VM) 160 | 161 | Testing connectivity ... 162 | - testing with command : 'ssh admin@loso.erpnext.host "whoami"' 163 | - testing with command : 'ssh adm@stg.erpnext.host "whoami"' 164 | 165 | No initial configuration errors found 166 | -- o 0 o -- 167 | 168 | Ready to prepare Master/Slave replication: 169 | - Master: 170 | - User: admin 171 | - Host: loso.erpnext.host has address 185.34.136.36 172 | - Slave: 173 | - User: adm 174 | - Host: stg.erpnext.host has address 85.239.234.6 175 | 176 | Press any key to proceed : 177 | | 178 | | 179 | V 180 | Making generic host-specific scripts 181 | - For Master 182 | - Making MariaDB restart script :: /dev/shm/M_work/restartMariaDB.sh 183 | - Making password emitter script :: /dev/shm/M_work/.supwd.sh 184 | - Making environment variables file for backup and restore functions (/dev/shm/M_work/BaRe/Master_envars.sh) 185 | - For Slave 186 | - Making MariaDB restart script :: /dev/shm/S_work/restartMariaDB.sh 187 | - Making password emitter script :: /dev/shm/S_work/.supwd.sh 188 | - Making environment variables file for backup and restore functions (/dev/shm/S_work/BaRe/Slave_envars.sh) 189 | 190 | 191 | Preparing master ... 192 | Moving backup and restore handlers 'handleBackup.sh' to transfer directory '/dev/shm/M_work' 193 | - Making Master Tasks script :: /dev/shm/M_work/masterTasks.sh. 194 | - Making MariaDB script :: '/dev/shm/M_work/setUpSlave.sql'. 195 | - Making MariaDB config patch :: '/dev/shm/M_work/master_50-server.cnf.patch'. 196 | 197 | Uploading Master tasks files 'M_work.tgz' to 'admin@loso.erpnext.host:/dev/shm'. 198 | Extracting content from uploaded file 'M_work.tgz' on Master. 199 | Executing script 'masterTasks.sh' on Master. 200 | - Testing 'SUDO_ASKPASS' capability. ( SUDO_ASKPASS = >< ) 201 | - Configuration allows ASKPASS creation. 202 | - Found password in configuration file. Trying uploaded ASK_PASS emmitter. 203 | - 'SUDO_ASKPASS' environment variable is correct. 204 | 205 | 206 | - Installing dependencies. 207 | dpkg-query: no packages found matching xmlstarlet 208 | Scanning processes... 209 | Scanning linux images... 210 | 211 | Running kernel seems to be up-to-date. 212 | 213 | No services need to be restarted. 214 | 215 | No containers need to be restarted. 216 | 217 | No user sessions are running outdated binaries. 218 | 219 | No VM guests are running outdated hypervisor (qemu) binaries on this host. 220 | 221 | - Installed xmlstarlet 222 | - Found jq already installed 223 | - Making ERPNext supervisor restart script :: '/home/admin/restartERPNextSupervisor.sh' 224 | - Checking Frappe Bench directory location :: '/home/admin/frappe-bench-LENH' 225 | - Moving Backup and Restore handlers from '/dev/shm/M_work/BaRe' to Frappe Bench directory 226 | - Stopping ERPNext on Master ... 227 | 228 | electronic_vouchers-service-Logichem: stopped 229 | frappe-bench-LENH-workers:frappe-bench-LENH-frappe-schedule: stopped 230 | frappe-bench-LENH-redis:frappe-bench-LENH-redis-cache: stopped 231 | frappe-bench-LENH-redis:frappe-bench-LENH-redis-queue: stopped 232 | frappe-bench-LENH-redis:frappe-bench-LENH-redis-socketio: stopped 233 | frappe-bench-LENH-web:frappe-bench-LENH-node-socketio: stopped 234 | frappe-bench-LENH-workers:frappe-bench-LENH-frappe-default-worker-0: stopped 235 | frappe-bench-LENH-workers:frappe-bench-LENH-frappe-short-worker-0: stopped 236 | frappe-bench-LENH-workers:frappe-bench-LENH-frappe-long-worker-0: stopped 237 | frappe-bench-LENH-web:frappe-bench-LENH-frappe-web: stopped 238 | 239 | Stopped 240 | 241 | - Configuring MariaDB Master for replication. (/etc/mysql/mariadb.conf.d/50-server.cnf) 242 | - Getting database name for site 'loso.erpnext.host' from '/home/admin/frappe-bench-LENH/sites/loso.erpnext.host/site_config.json'. 243 | - Providing 'binlog-do-db' with its value (_091b776d72ba8e16), in patch file '/dev/shm/M_work/master_50-server.cnf.patch'. 244 | - Patching '50-server.cnf' with '/dev/shm/M_work/master_50-server.cnf.patch'. 245 | 246 | patching file 50-server.cnf 247 | 248 | Patched 249 | 250 | - Restarting MariaDB 251 | - Taking backup of Master database ... 252 | 253 | 254 | Loading environment variables from '/home/admin/frappe-bench-LENH/BaRe/envars.sh ' 255 | 256 | - Backing up "Pre-replication baseline" for site loso.erpnext.host (in /home/admin/frappe-bench-LENH/BKP). 257 | - Saving database views constructors to site private files directory. (db: _091b776d72ba8e16) 258 | - Backup command is: 259 | ==> bench --site loso.erpnext.host backup --with-files > /dev/shm/backup_report.txt; 260 | - Will archive database (_091b776d72ba8e16) and files to /home/admin/frappe-bench-LENH/sites/loso.erpnext.host/private/backups 261 | - Will write log result to /dev/shm/backup_report.txt 262 | started ... 263 | ... done 264 | 265 | - Re-packaging database backup. 266 | - Comment :: "Pre-replication baseline" 267 | - Source : /home/admin/frappe-bench-LENH/sites/loso.erpnext.host/private/backups 268 | - Dest : /home/admin/frappe-bench-LENH/BKP 269 | - Name : 20221218_191404-loso_erpnext_host 270 | - Compression command is: 271 | ==> tar zcvf /home/admin/frappe-bench-LENH/BKP/20221218_191404-loso_erpnext_host.tgz ./20221218_191404-loso_erpnext_host* 272 | started ... 273 | ... done 274 | 275 | - The 5 most recent logged repackaging results in '/home/admin/frappe-bench-LENH/BKP/NotesForBackups.txt' are : 276 | Friday morning. :: 20221209_091609-loso_erpnext_host.tgz 277 | Saturday morning. :: 20221210_070824-loso_erpnext_host.tgz 278 | Wednesday morning :: 20221214_082704-loso_erpnext_host.tgz 279 | Thursday morning :: 20221215_063242-loso_erpnext_host.tgz 280 | Pre-replication baseline :: 20221218_191404-loso_erpnext_host.tgz 281 | 282 | Backup process completed! Elapsed time, 0h 0m 38s seconds 283 | - Backup name is : '20221218_191404-loso_erpnext_host.tgz' 284 | - Enabling Slave user access and reading status of Master 285 | - Log FILE :: mariadb-bin.000020 286 | - Log file POSITION :: 344 287 | - Restrict to DATABASE :: _091b776d72ba8e16 288 | - Open MySql port 3306 for remote host :: 85.239.234.6 289 | Rule added 290 | - Stopping MariaDB so that the backup can be restored on the Slave. 291 | - Packaging results into :: '/dev/shm/M_rslt.tgz' 292 | Purging temporary files from Master. 293 | 294 | Completed remote job : '/dev/shm/M_work/masterTasks.sh'. 295 | 296 | 297 | Connection to loso.erpnext.host closed. 298 | Downloading Master status file 'M_rslt.tgz' to '/dev/shm'. 299 | 300 | Ready to 'prepareSlave' 301 | Preparing slave ... 302 | - Extracting Master status values 303 | - Log FILE :: mariadb-bin.000020 304 | - Log file POSITION :: 344 305 | - Moving backup and restore handlers 'handleBackup.sh' to transfer directory '/dev/shm/M_work' 306 | - Making Slave Tasks script :: /dev/shm/S_work/slaveTasks.sh 307 | - Copy backup of Master ('20221218_191404-loso_erpnext_host.tgz') to Slave work directory. 308 | - Making MariaDB script :: /dev/shm/S_work/setUpSlave.sql 309 | - Making MariaDB config patch :: '/dev/shm/S_work/50-server.cnf.patch'. 310 | - Packaging Slave work files ('S_work.tgz') from '/dev/shm/S_work' in '/dev/shm' ... 311 | - Purging existing Slave work files from 'adm@stg.erpnext.host:/dev/shm' 312 | - Uploading Slave work files 'S_work.tgz' to 'adm@stg.erpnext.host:/dev/shm' 313 | - Extracting content from uploaded file 'S_work.tgz' on Slave ... 314 | - Executing script 'slaveTasks.sh' on Slave 315 | - Testing 'SUDO_ASKPASS' capability. ( SUDO_ASKPASS = >< ) 316 | - Configuration allows ASKPASS creation. 317 | - Found password in configuration file. Trying uploaded ASK_PASS emmitter. 318 | - 'SUDO_ASKPASS' environment variable is correct. 319 | 320 | 321 | - Installing dependencies. 322 | dpkg-query: no packages found matching xmlstarlet 323 | Reading package lists... 324 | Building dependency tree... 325 | Reading state information... 326 | The following NEW packages will be installed: 327 | xmlstarlet 328 | 0 upgraded, 1 newly installed, 0 to remove and 0 not upgraded. 329 | Need to get 265 kB of archives. 330 | After this operation, 631 kB of additional disk space will be used. 331 | Get:1 http://archive.ubuntu.com/ubuntu jammy/universe amd64 xmlstarlet amd64 1.6.1-2.1 [265 kB] 332 | Fetched 265 kB in 1s (284 kB/s) 333 | Selecting previously unselected package xmlstarlet. 334 | (Reading database ... 125258 files and directories currently installed.) 335 | Preparing to unpack .../xmlstarlet_1.6.1-2.1_amd64.deb ... 336 | Unpacking xmlstarlet (1.6.1-2.1) ... 337 | Setting up xmlstarlet (1.6.1-2.1) ... 338 | Processing triggers for man-db (2.10.2-1) ... 339 | 340 | Running kernel seems to be up-to-date. 341 | 342 | No services need to be restarted. 343 | 344 | No containers need to be restarted. 345 | 346 | No user sessions are running outdated binaries. 347 | 348 | No VM guests are running outdated hypervisor (qemu) binaries on this host. 349 | 350 | - Installed xmlstarlet 351 | - Found jq already installed 352 | - Checking Frappe Bench directory location :: '/home/adm/frappe-bench-SERPHT' 353 | - Moving Backup and Restore handlers from '/dev/shm/S_work/BaRe' to Frappe Bench directory 354 | - Stopping ERPNext on Slave ... 355 | electronic_vouchers-service-Logichem: stopped 356 | frappe-bench-SERPHT-redis:frappe-bench-SERPHT-redis-cache: stopped 357 | frappe-bench-SERPHT-redis:frappe-bench-SERPHT-redis-socketio: stopped 358 | frappe-bench-SERPHT-web:frappe-bench-SERPHT-node-socketio: stopped 359 | frappe-bench-SERPHT-workers:frappe-bench-SERPHT-frappe-schedule: stopped 360 | frappe-bench-SERPHT-redis:frappe-bench-SERPHT-redis-queue: stopped 361 | frappe-bench-SERPHT-workers:frappe-bench-SERPHT-frappe-short-worker-0: stopped 362 | frappe-bench-SERPHT-workers:frappe-bench-SERPHT-frappe-default-worker-0: stopped 363 | frappe-bench-SERPHT-workers:frappe-bench-SERPHT-frappe-long-worker-0: stopped 364 | frappe-bench-SERPHT-web:frappe-bench-SERPHT-frappe-web: stopped 365 | 366 | Stopped 367 | 368 | - Move backup files from '/dev/shm/S_work' to backup directory '/home/adm/frappe-bench-SERPHT/BKP' 369 | Moving ... 370 | - 'BACKUP.txt' 371 | - '20221218_191404-loso_erpnext_host.tgz' 372 | 373 | - Ensuring MariaDB is running 374 | SCRIPT_DIR /home/adm/frappe-bench-SERPHT/BaRe 375 | CURR_SCRIPT_DIR /home/adm/frappe-bench-SERPHT/BaRe 376 | SCRIPT_NAME handleRestore.sh 377 | THIS_SCRIPT handleRestore.sh 378 | 379 | 380 | Loading environment variables from '/home/adm/frappe-bench-SERPHT/BaRe/envars.sh ' 381 | 382 | - Restoring backup ... 383 | - File locations used: 384 | - SITE_PATH = sites/stg.erpnext.host 385 | - PRIVATE_PATH = sites/stg.erpnext.host/sites/stg.erpnext.host/private 386 | - BACKUPS_PATH = sites/stg.erpnext.host/sites/stg.erpnext.host/private/backups 387 | - FILES_PATH = sites/stg.erpnext.host/sites/stg.erpnext.host/private/files 388 | 389 | - SITE_ALIAS = stg_erpnext_host 390 | - TMP_BACKUP_DIR = /dev/shm/BKP 391 | - BACKUP_DIR = /home/adm/frappe-bench-SERPHT/BKP 392 | - BACKUP_FILE_NAME_HOLDER = /home/adm/frappe-bench-SERPHT/BKP/BACKUP.txt 393 | 394 | Got MariaDB password from '/home/adm/frappe-bench-SERPHT/BaRe/../sites/stg.erpnext.host/site_config.json'. 395 | - Ensuring work directory exists 396 | - Getting backup file name from name holder file: '/home/adm/frappe-bench-SERPHT/BKP/BACKUP.txt' 397 | - Process archive file: '20221218_191404-loso_erpnext_host.tgz' into '/dev/shm/BKP'. 398 | - Does site name, 'loso_erpnext_host', extracted from backup file full name, match this site 'stg_erpnext_host' ?? 399 | The backup is from a different ERPNext site. 400 | Will rename all backup files ... 401 | - '20221218_191404-loso_erpnext_host-database.sql' becomes '20221218_191404-stg_erpnext_host-database.sql'. 402 | - '20221218_191404-loso_erpnext_host-files.tar' becomes '20221218_191404-stg_erpnext_host-files.tar'. 403 | - '20221218_191404-loso_erpnext_host-private-files.tar' becomes '20221218_191404-stg_erpnext_host-private-files.tar'. 404 | - '20221218_191404-loso_erpnext_host-site_config_backup.json' becomes '20221218_191404-stg_erpnext_host-site_config_backup.json'. 405 | - patch site name with sed. --> '20221218_191404-stg_erpnext_host-site_config_backup.json' from 'loso.erpnext.host' to 'stg.erpnext.host' 406 | - Creating new package from repackaged contents of '20221218_191404-loso_erpnext_host.tgz'. 407 | Resulting file is - 408 | - /home/adm/frappe-bench-SERPHT/BKP/20221218_191404-stg_erpnext_host.tgz 409 | - Writing new package file name into file name holder : '/home/adm/frappe-bench-SERPHT/BKP/BACKUP.txt'. 410 | - Commencing decompression. Command is: 411 | tar zxvf /home/adm/frappe-bench-SERPHT/BKP/20221218_191404-loso_erpnext_host.tgz 412 | ./20221218_191404-loso_erpnext_host-database.sql.gz 413 | ./20221218_191404-loso_erpnext_host-files.tar 414 | ./20221218_191404-loso_erpnext_host-private-files.tar 415 | ./20221218_191404-loso_erpnext_host-site_config_backup.json 416 | 417 | - Backup to be restored: /dev/shm/BKP/20221218_191404-stg_erpnext_host* 418 | - Should 'site_config.json' of 'stg.erpnext.host' be overwritten? 419 | Restore parameters file = 'yes' 420 | - Creating dated safety copy of 'site_config.json' :: site_config_2022-12-19_01.16.json. 421 | - Should 'db_password' of site 'stg.erpnext.host' be overwritten? 422 | Keep current database password = 'yes' 423 | Writing current database password into new site configuration '/dev/shm/BKP/20221218_191404-stg_erpnext_host-site_config_backup.json'. 424 | - Overwriting './sites/stg.erpnext.host/site_config.json' with site_config.json from backup. 425 | 426 | - Restoring database _091b776d72ba8e16. Command is: 427 | ==> bench --site stg.erpnext.host --force restore --mariadb-root-password ******** \ 428 | --with-public-files /dev/shm/BKP/20221218_191404-stg_erpnext_host-files.tar \ 429 | --with-private-files /dev/shm/BKP/20221218_191404-stg_erpnext_host-private-files.tar \ 430 | /dev/shm/BKP/20221218_191404-stg_erpnext_host-database.sql.gz 431 | started ... 432 | *** Scheduler is disabled *** 433 | Site stg.erpnext.host has been restored with files 434 | ... restored 435 | 436 | - Restoring database views 437 | started ... 438 | ... restored 439 | 440 | - Restarting ERPNext 441 | electronic_vouchers-service-Logichem: started 442 | frappe-bench-SERPHT-redis:frappe-bench-SERPHT-redis-cache: started 443 | frappe-bench-SERPHT-redis:frappe-bench-SERPHT-redis-queue: started 444 | frappe-bench-SERPHT-redis:frappe-bench-SERPHT-redis-socketio: started 445 | frappe-bench-SERPHT-web:frappe-bench-SERPHT-frappe-web: started 446 | frappe-bench-SERPHT-web:frappe-bench-SERPHT-node-socketio: started 447 | frappe-bench-SERPHT-workers:frappe-bench-SERPHT-frappe-schedule: started 448 | frappe-bench-SERPHT-workers:frappe-bench-SERPHT-frappe-default-worker-0: started 449 | frappe-bench-SERPHT-workers:frappe-bench-SERPHT-frappe-short-worker-0: started 450 | frappe-bench-SERPHT-workers:frappe-bench-SERPHT-frappe-long-worker-0: started 451 | restarted 452 | 453 | 454 | Restore completed. Elapsed time, 0h 1m 44s seconds 455 | - Configuring MariaDB Slave for replication 456 | - Patching '50-server.cnf' with '/dev/shm/S_work/master_50-server.cnf.patch' 457 | - Restarting MariaDB 458 | - Enabling Slave connection to Master 459 | - Purging temporary files from Slave. *** SKIPPED *** 460 | 461 | Completed remote job : '/dev/shm/S_work/slaveTasks.sh'. 462 | 463 | 464 | - Finished with Slave. 465 | 466 | - Restarting MariaDB for user 'admin' on Master host 'loso.erpnext.host' 467 | Active: active (running) since Mon 2022-12-19 01:17:44 CET; 51ms ago 468 | - Restarting MariaDB for user 'adm' on Slave host 'stg.erpnext.host' 469 | Active: active (running) since Mon 2022-12-19 01:17:51 CET; 22ms ago 470 | 471 | 472 | Sleeping for 75 seconds, before checking slave status. 473 | Found slave status to be ... 474 | Master_Log_File: mariadb-bin.000021 475 | Read_Master_Log_Pos: 344 476 | Slave_IO_Running: Yes 477 | Slave_SQL_Running: Yes 478 | Last_IO_Error: 479 | Slave_SQL_Running_State: Slave has read all relay log; waiting for more updates 480 | 481 | Restarting ERPNext on Master ... 482 | electronic_vouchers-service-Logichem: started 483 | frappe-bench-LENH-redis:frappe-bench-LENH-redis-cache: started 484 | frappe-bench-LENH-redis:frappe-bench-LENH-redis-queue: started 485 | frappe-bench-LENH-redis:frappe-bench-LENH-redis-socketio: started 486 | frappe-bench-LENH-web:frappe-bench-LENH-frappe-web: started 487 | frappe-bench-LENH-web:frappe-bench-LENH-node-socketio: started 488 | frappe-bench-LENH-workers:frappe-bench-LENH-frappe-schedule: started 489 | frappe-bench-LENH-workers:frappe-bench-LENH-frappe-default-worker-0: started 490 | frappe-bench-LENH-workers:frappe-bench-LENH-frappe-short-worker-0: started 491 | frappe-bench-LENH-workers:frappe-bench-LENH-frappe-long-worker-0: started 492 | ------------------------------ Finished ---------------------------------- 493 | you@xub22:~/projects/ERPNextFailOver$ 494 | 495 | ``` 496 | 497 | -------------------------------------------------------------------------------- /config.sample: -------------------------------------------------------------------------------- 1 | ##### For configuring loso.erpnext.host ( Digital Ocean :: 24854gd2 ) 2 | # ----------------------------------------------------------------------- 3 | # Alias configuration: 'lenh' «begins» 4 | # Alias 'lenh' binds to remote user 'admin@loso.erpnext.host' 5 | Host lenh 6 | User admin 7 | HostName loso.erpnext.host 8 | ServerAliveInterval 120 9 | ServerAliveCountMax 20 10 | IdentityFile /home/you/.ssh/admin_loso_erpnext_host 11 | # Alias configuration: 'lenh' «ends» 12 | 13 | ##### For configuring stg.erpnext.host ( Contabo :: 24854gd2 ) 14 | # ----------------------------------------------------------------------- 15 | # Alias configuration: 'serpht' «begins» 16 | # Alias 'serpht' binds to remote user 'adm@stg.erpnext.host' 17 | Host serpht 18 | User adm 19 | HostName stg.erpnext.host 20 | ServerAliveInterval 120 21 | ServerAliveCountMax 20 22 | IdentityFile /home/you/.ssh/adm_stg_erpnext_host 23 | # Alias configuration: 'serpht' «ends» 24 | 25 | -------------------------------------------------------------------------------- /docs/ExecutionLog.txt: -------------------------------------------------------------------------------- 1 | 2 | you@xub22:~/projects/ERPNextFailOver$ ./prepareMasterAndSlave.sh; 3 | 4 | 5 | ----------------------------- Starting ----------------------------------- 6 | 7 | 8 | Checking presence of 'xmlstarlet' tool. 9 | dpkg-query: no packages found matching xmlstarlet 10 | 11 | * * * Do you accept to install 'xmlstarlet' ( https://en.wikipedia.org/wiki/XMLStarlet ) * * * 12 | Type 'y' to approve, or any other key to quit : y 13 | Ok. 14 | Reading package lists... Done 15 | Building dependency tree... Done 16 | Reading state information... Done 17 | The following packages were automatically installed and are no longer required: 18 | libjq1 libonig5 19 | Use 'sudo apt autoremove' to remove them. 20 | The following NEW packages will be installed: 21 | xmlstarlet 22 | 0 upgraded, 1 newly installed, 0 to remove and 0 not upgraded. 23 | Need to get 265 kB of archives. 24 | After this operation, 631 kB of additional disk space will be used. 25 | Get:1 http://ca.archive.ubuntu.com/ubuntu jammy/universe amd64 xmlstarlet amd64 1.6.1-2.1 [265 kB] 26 | Fetched 265 kB in 1s (430 kB/s) 27 | Selecting previously unselected package xmlstarlet. 28 | (Reading database ... 178079 files and directories currently installed.) 29 | Preparing to unpack .../xmlstarlet_1.6.1-2.1_amd64.deb ... 30 | Unpacking xmlstarlet (1.6.1-2.1) ... 31 | Setting up xmlstarlet (1.6.1-2.1) ... 32 | Processing triggers for man-db (2.10.2-1) ... 33 | Processing triggers for doc-base (0.11.1) ... 34 | Processing 1 added doc-base file... 35 | 36 | - Installed 'xmlstarlet' 37 | 38 | 39 | Checking presence of 'jq' tool. 40 | dpkg-query: no packages found matching jq 41 | 42 | * * * Do you accept to install 'jq' * * * 43 | Type 'y' to approve, or any other key to quit : y 44 | Ok. 45 | Reading package lists... Done 46 | Building dependency tree... Done 47 | Reading state information... Done 48 | The following NEW packages will be installed: 49 | jq 50 | 0 upgraded, 1 newly installed, 0 to remove and 0 not upgraded. 51 | Need to get 52.5 kB of archives. 52 | After this operation, 102 kB of additional disk space will be used. 53 | Get:1 http://ca.archive.ubuntu.com/ubuntu jammy/main amd64 jq amd64 1.6-2.1ubuntu3 [52.5 kB] 54 | Fetched 52.5 kB in 0s (179 kB/s) 55 | Selecting previously unselected package jq. 56 | (Reading database ... 178326 files and directories currently installed.) 57 | Preparing to unpack .../jq_1.6-2.1ubuntu3_amd64.deb ... 58 | Unpacking jq (1.6-2.1ubuntu3) ... 59 | Setting up jq (1.6-2.1ubuntu3) ... 60 | Processing triggers for man-db (2.10.2-1) ... 61 | 62 | - Installed 'jq' 63 | 64 | 65 | Loading dependencies ... 66 | - Sourced 'makeMasterTasks.sh' from 'prepareMasterAndSlave.sh' 67 | - Sourced 'makeMasterMariaDBconfPatch.sh' from 'prepareMasterAndSlave.sh' 68 | - Sourced 'makeMasterMariaDBScript.sh' from 'prepareMasterAndSlave.sh' 69 | - Sourced 'prepareMaster.sh' from 'prepareMasterAndSlave.sh' 70 | - Sourced 'makeSlaveTasks.sh' from 'prepareMasterAndSlave.sh' 71 | - Sourced 'makeSlaveMariaDBconfPatch.sh' from 'prepareMasterAndSlave.sh' 72 | - Sourced 'makeSlaveMariaDBScript.sh' from 'prepareMasterAndSlave.sh' 73 | - Sourced 'makeSlaveMariaDBScript.sh' from 'prepareMasterAndSlave.sh' 74 | - Sourced 'makeMariaDBRestartScript.sh' from 'prepareMasterAndSlave.sh' 75 | - Sourced 'makeAskPassEmitter.sh' from 'prepareMasterAndSlave.sh' 76 | - Sourced 'makeEnvarsFile.sh' from 'prepareMasterAndSlave.sh' 77 | 78 | Was host alias use specified? 79 | - Found 'ssh-agent' already running. 80 | 81 | Adding Master host PKI key to agent 82 | Enter passphrase for /home/you/.ssh/admin_loso_erpnext_host: 83 | Identity added: /home/you/.ssh/admin_loso_erpnext_host (water.iridium.blue@gmail.com) 84 | 85 | Adding Slave host PKI key to agent 86 | Enter passphrase for /home/you/.ssh/adm_stg_erpnext_host: 87 | Identity added: /home/you/.ssh/adm_stg_erpnext_host (X22_VM) 88 | 89 | Testing connectivity ...' 90 | - testing with command : 'ssh admin@loso.erpnext.host "whoami"' 91 | - testing with command : 'ssh adm@stg.erpnext.host "whoami"' 92 | 93 | No initial configuration errors found 94 | -- o 0 o -- 95 | 96 | Ready to prepare Master/Slave replication: 97 | - Master: 98 | - User: admin 99 | - Host: loso.erpnext.host has address 185.34.136.36 100 | - Slave: 101 | - User: adm 102 | - Host: stg.erpnext.host has address 85.239.234.6 103 | 104 | Press any key to proceed : 105 | | 106 | | 107 | V 108 | Making generic host-specific scripts 109 | - For Master 110 | - Making MariaDB restart script :: /dev/shm/M_work/restartMariaDB.sh 111 | - Making password emitter script :: /dev/shm/M_work/.supwd.sh 112 | - Making environment variables file for backup and restore functions (/dev/shm/M_work/BaRe/Master_envars.sh) 113 | - For Slave 114 | - Making MariaDB restart script :: /dev/shm/S_work/restartMariaDB.sh 115 | - Making password emitter script :: /dev/shm/S_work/.supwd.sh 116 | - Making environment variables file for backup and restore functions (/dev/shm/S_work/BaRe/Slave_envars.sh) 117 | 118 | 119 | Preparing master ... 120 | Moving backup and restore handlers 'handleBackup.sh' to transfer directory '/dev/shm/M_work' 121 | - Making Master Tasks script :: /dev/shm/M_work/masterTasks.sh. 122 | - Making MariaDB script :: '/dev/shm/M_work/setUpSlave.sql'. 123 | - Making MariaDB config patch :: '/dev/shm/M_work/master_50-server.cnf.patch'. 124 | 125 | Uploading Master tasks files 'M_work.tgz' to 'admin@loso.erpnext.host:/dev/shm'. 126 | Extracting content from uploaded file 'M_work.tgz' on Master. 127 | Executing script 'masterTasks.sh' on Master. 128 | - Testing 'SUDO_ASKPASS' capability. ( SUDO_ASKPASS = >< ) 129 | - Configuration allows ASKPASS creation. 130 | - Found password in configuration file. Trying uploaded ASK_PASS emmitter. 131 | - 'SUDO_ASKPASS' environment variable is correct. 132 | 133 | 134 | - Installing dependencies. 135 | dpkg-query: no packages found matching xmlstarlet 136 | Scanning processes... 137 | Scanning linux images... 138 | 139 | Running kernel seems to be up-to-date. 140 | 141 | No services need to be restarted. 142 | 143 | No containers need to be restarted. 144 | 145 | No user sessions are running outdated binaries. 146 | 147 | No VM guests are running outdated hypervisor (qemu) binaries on this host. 148 | 149 | - Installed xmlstarlet 150 | - Found jq already installed 151 | - Making ERPNext supervisor restart script :: '/home/admin/restartERPNextSupervisor.sh' 152 | - Checking Frappe Bench directory location :: '/home/admin/frappe-bench-LENH' 153 | - Moving Backup and Restore handlers from '/dev/shm/M_work/BaRe' to Frappe Bench directory 154 | - Stopping ERPNext on Master ... 155 | 156 | electronic_vouchers-service-Logichem: stopped 157 | frappe-bench-LENH-workers:frappe-bench-LENH-frappe-schedule: stopped 158 | frappe-bench-LENH-redis:frappe-bench-LENH-redis-cache: stopped 159 | frappe-bench-LENH-redis:frappe-bench-LENH-redis-queue: stopped 160 | frappe-bench-LENH-redis:frappe-bench-LENH-redis-socketio: stopped 161 | frappe-bench-LENH-web:frappe-bench-LENH-node-socketio: stopped 162 | frappe-bench-LENH-workers:frappe-bench-LENH-frappe-default-worker-0: stopped 163 | frappe-bench-LENH-workers:frappe-bench-LENH-frappe-short-worker-0: stopped 164 | frappe-bench-LENH-workers:frappe-bench-LENH-frappe-long-worker-0: stopped 165 | frappe-bench-LENH-web:frappe-bench-LENH-frappe-web: stopped 166 | 167 | Stopped 168 | 169 | - Configuring MariaDB Master for replication. (/etc/mysql/mariadb.conf.d//50-server.cnf) 170 | - Getting database name for site 'loso.erpnext.host' from '/home/admin/frappe-bench-LENH/sites/loso.erpnext.host/site_config.json'. 171 | - Providing 'binlog-do-db' with its value (_091b776d72ba8e16), in patch file '/dev/shm/M_work/master_50-server.cnf.patch'. 172 | - Patching '50-server.cnf' with '/dev/shm/M_work/master_50-server.cnf.patch'. 173 | 174 | patching file 50-server.cnf 175 | 176 | Patched 177 | 178 | - Restarting MariaDB 179 | - Taking backup of Master database ... 180 | 181 | 182 | Loading environment variables from '/home/admin/frappe-bench-LENH/BaRe/envars.sh ' 183 | 184 | - Backing up "Pre-replication baseline" for site loso.erpnext.host (in /home/admin/frappe-bench-LENH/BKP). 185 | - Saving database views constructors to site private files directory. (db: _091b776d72ba8e16) 186 | - Backup command is: 187 | ==> bench --site loso.erpnext.host backup --with-files > /dev/shm/backup_report.txt; 188 | - Will archive database (_091b776d72ba8e16) and files to /home/admin/frappe-bench-LENH/sites/loso.erpnext.host/private/backups 189 | - Will write log result to /dev/shm/backup_report.txt 190 | started ... 191 | ... done 192 | 193 | - Re-packaging database backup. 194 | - Comment :: "Pre-replication baseline" 195 | - Source : /home/admin/frappe-bench-LENH/sites/loso.erpnext.host/private/backups 196 | - Dest : /home/admin/frappe-bench-LENH/BKP 197 | - Name : 20221218_191404-loso_erpnext_host 198 | - Compression command is: 199 | ==> tar zcvf /home/admin/frappe-bench-LENH/BKP/20221218_191404-loso_erpnext_host.tgz ./20221218_191404-loso_erpnext_host* 200 | started ... 201 | ... done 202 | 203 | - The 5 most recent logged repackaging results in '/home/admin/frappe-bench-LENH/BKP/NotesForBackups.txt' are : 204 | Friday morning. :: 20221209_091609-loso_erpnext_host.tgz 205 | Saturday morning. :: 20221210_070824-loso_erpnext_host.tgz 206 | Wednesday morning :: 20221214_082704-loso_erpnext_host.tgz 207 | Thursday morning :: 20221215_063242-loso_erpnext_host.tgz 208 | Pre-replication baseline :: 20221218_191404-loso_erpnext_host.tgz 209 | 210 | Backup process completed! Elapsed time, 0h 0m 38s seconds 211 | - Backup name is : '20221218_191404-loso_erpnext_host.tgz' 212 | - Enabling Slave user access and reading status of Master 213 | - Log FILE :: mariadb-bin.000020 214 | - Log file POSITION :: 344 215 | - Restrict to DATABASE :: _091b776d72ba8e16 216 | - Open MySql port 3306 for remote host :: 85.239.234.6 217 | Rule added 218 | - Stopping MariaDB so that the backup can be restored on the Slave. 219 | - Packaging results into :: '/dev/shm/M_rslt.tgz' 220 | Purging temporary files from Master. 221 | 222 | Completed remote job : '/dev/shm/M_work/masterTasks.sh'. 223 | 224 | 225 | Connection to loso.erpnext.host closed. 226 | Downloading Master status file 'M_rslt.tgz' to '/dev/shm'. 227 | 228 | Ready to 'prepareSlave' 229 | Preparing slave ... 230 | - Extracting Master status values 231 | - Log FILE :: mariadb-bin.000020 232 | - Log file POSITION :: 344 233 | - Moving backup and restore handlers 'handleBackup.sh' to transfer directory '/dev/shm/M_work' 234 | - Making Slave Tasks script :: /dev/shm/S_work/slaveTasks.sh 235 | - Copy backup of Master ('20221218_191404-loso_erpnext_host.tgz') to Slave work directory. 236 | - Making MariaDB script :: /dev/shm/S_work/setUpSlave.sql 237 | - Making MariaDB config patch :: '/dev/shm/S_work/50-server.cnf.patch'. 238 | 239 | - Packaging Slave work files ('S_work.tgz') from '/dev/shm/S_work' in '/dev/shm' ... 240 | - Purging existing Slave work files from 'adm@stg.erpnext.host:/dev/shm' 241 | - Uploading Slave work files 'S_work.tgz' to 'adm@stg.erpnext.host:/dev/shm' 242 | - Extracting content from uploaded file 'S_work.tgz' on Slave ... 243 | - Executing script 'slaveTasks.sh' on Slave 244 | - Testing 'SUDO_ASKPASS' capability. ( SUDO_ASKPASS = >< ) 245 | - Configuration allows ASKPASS creation. 246 | - Found password in configuration file. Trying uploaded ASK_PASS emmitter. 247 | - 'SUDO_ASKPASS' environment variable is correct. 248 | 249 | 250 | - Installing dependencies. 251 | dpkg-query: no packages found matching xmlstarlet 252 | Reading package lists... 253 | Building dependency tree... 254 | Reading state information... 255 | The following NEW packages will be installed: 256 | xmlstarlet 257 | 0 upgraded, 1 newly installed, 0 to remove and 0 not upgraded. 258 | Need to get 265 kB of archives. 259 | After this operation, 631 kB of additional disk space will be used. 260 | Get:1 http://archive.ubuntu.com/ubuntu jammy/universe amd64 xmlstarlet amd64 1.6.1-2.1 [265 kB] 261 | Fetched 265 kB in 1s (284 kB/s) 262 | Selecting previously unselected package xmlstarlet. 263 | (Reading database ... 125258 files and directories currently installed.) 264 | Preparing to unpack .../xmlstarlet_1.6.1-2.1_amd64.deb ... 265 | Unpacking xmlstarlet (1.6.1-2.1) ... 266 | Setting up xmlstarlet (1.6.1-2.1) ... 267 | Processing triggers for man-db (2.10.2-1) ... 268 | 269 | Running kernel seems to be up-to-date. 270 | 271 | No services need to be restarted. 272 | 273 | No containers need to be restarted. 274 | 275 | No user sessions are running outdated binaries. 276 | 277 | No VM guests are running outdated hypervisor (qemu) binaries on this host. 278 | 279 | - Installed xmlstarlet 280 | - Found jq already installed 281 | - Checking Frappe Bench directory location :: '/home/adm/frappe-bench-SERPHT' 282 | - Moving Backup and Restore handlers from '/dev/shm/S_work/BaRe' to Frappe Bench directory 283 | - Stopping ERPNext on Slave ... 284 | electronic_vouchers-service-Logichem: stopped 285 | frappe-bench-SERPHT-redis:frappe-bench-SERPHT-redis-cache: stopped 286 | frappe-bench-SERPHT-redis:frappe-bench-SERPHT-redis-socketio: stopped 287 | frappe-bench-SERPHT-web:frappe-bench-SERPHT-node-socketio: stopped 288 | frappe-bench-SERPHT-workers:frappe-bench-SERPHT-frappe-schedule: stopped 289 | frappe-bench-SERPHT-redis:frappe-bench-SERPHT-redis-queue: stopped 290 | frappe-bench-SERPHT-workers:frappe-bench-SERPHT-frappe-short-worker-0: stopped 291 | frappe-bench-SERPHT-workers:frappe-bench-SERPHT-frappe-default-worker-0: stopped 292 | frappe-bench-SERPHT-workers:frappe-bench-SERPHT-frappe-long-worker-0: stopped 293 | frappe-bench-SERPHT-web:frappe-bench-SERPHT-frappe-web: stopped 294 | 295 | Stopped 296 | 297 | - Move backup files from '/dev/shm/S_work' to backup directory '/home/adm/frappe-bench-SERPHT/BKP' 298 | Moving ... 299 | - 'BACKUP.txt' 300 | - '20221218_191404-loso_erpnext_host.tgz' 301 | 302 | - Ensuring MariaDB is running 303 | SCRIPT_DIR /home/adm/frappe-bench-SERPHT/BaRe 304 | CURR_SCRIPT_DIR /home/adm/frappe-bench-SERPHT/BaRe 305 | SCRIPT_NAME handleRestore.sh 306 | THIS_SCRIPT handleRestore.sh 307 | 308 | 309 | Loading environment variables from '/home/adm/frappe-bench-SERPHT/BaRe/envars.sh ' 310 | 311 | - Restoring backup ... 312 | - File locations used: 313 | - SITE_PATH = sites/stg.erpnext.host 314 | - PRIVATE_PATH = sites/stg.erpnext.host/sites/stg.erpnext.host/private 315 | - BACKUPS_PATH = sites/stg.erpnext.host/sites/stg.erpnext.host/private/backups 316 | - FILES_PATH = sites/stg.erpnext.host/sites/stg.erpnext.host/private/files 317 | 318 | - SITE_ALIAS = stg_erpnext_host 319 | - TMP_BACKUP_DIR = /dev/shm/BKP 320 | - BACKUP_DIR = /home/adm/frappe-bench-SERPHT/BKP 321 | - BACKUP_FILE_NAME_HOLDER = /home/adm/frappe-bench-SERPHT/BKP/BACKUP.txt 322 | 323 | Got MariaDB password from '/home/adm/frappe-bench-SERPHT/BaRe/../sites/stg.erpnext.host/site_config.json'. 324 | - Ensuring work directory exists 325 | - Getting backup file name from name holder file: '/home/adm/frappe-bench-SERPHT/BKP/BACKUP.txt' 326 | - Process archive file: '20221218_191404-loso_erpnext_host.tgz' into '/dev/shm/BKP'. 327 | - Does site name, 'loso_erpnext_host', extracted from backup file full name, match this site 'stg_erpnext_host' ?? 328 | The backup is from a different ERPNext site. 329 | Will rename all backup files ... 330 | - '20221218_191404-loso_erpnext_host-database.sql' becomes '20221218_191404-stg_erpnext_host-database.sql'. 331 | - '20221218_191404-loso_erpnext_host-files.tar' becomes '20221218_191404-stg_erpnext_host-files.tar'. 332 | - '20221218_191404-loso_erpnext_host-private-files.tar' becomes '20221218_191404-stg_erpnext_host-private-files.tar'. 333 | - '20221218_191404-loso_erpnext_host-site_config_backup.json' becomes '20221218_191404-stg_erpnext_host-site_config_backup.json'. 334 | - patch site name with sed. --> '20221218_191404-stg_erpnext_host-site_config_backup.json' from 'loso.erpnext.host' to 'stg.erpnext.host' 335 | - Creating new package from repackaged contents of '20221218_191404-loso_erpnext_host.tgz'. 336 | Resulting file is - 337 | - /home/adm/frappe-bench-SERPHT/BKP/20221218_191404-stg_erpnext_host.tgz 338 | - Writing new package file name into file name holder : '/home/adm/frappe-bench-SERPHT/BKP/BACKUP.txt'. 339 | - Commencing decompression. Command is: 340 | tar zxvf /home/adm/frappe-bench-SERPHT/BKP/20221218_191404-loso_erpnext_host.tgz 341 | ./20221218_191404-loso_erpnext_host-database.sql.gz 342 | ./20221218_191404-loso_erpnext_host-files.tar 343 | ./20221218_191404-loso_erpnext_host-private-files.tar 344 | ./20221218_191404-loso_erpnext_host-site_config_backup.json 345 | 346 | - Backup to be restored: /dev/shm/BKP/20221218_191404-stg_erpnext_host* 347 | - Should 'site_config.json' of 'stg.erpnext.host' be overwritten? 348 | Restore parameters file = 'yes' 349 | - Creating dated safety copy of 'site_config.json' :: site_config_2022-12-19_01.16.json. 350 | - Should 'db_password' of site 'stg.erpnext.host' be overwritten? 351 | Keep current database password = 'yes' 352 | Writing current database password into new site configuration '/dev/shm/BKP/20221218_191404-stg_erpnext_host-site_config_backup.json'. 353 | - Overwriting './sites/stg.erpnext.host/site_config.json' with site_config.json from backup. 354 | 355 | - Restoring database _091b776d72ba8e16. Command is: 356 | ==> bench --site stg.erpnext.host --force restore --mariadb-root-password ******** \ 357 | --with-public-files /dev/shm/BKP/20221218_191404-stg_erpnext_host-files.tar \ 358 | --with-private-files /dev/shm/BKP/20221218_191404-stg_erpnext_host-private-files.tar \ 359 | /dev/shm/BKP/20221218_191404-stg_erpnext_host-database.sql.gz 360 | started ... 361 | *** Scheduler is disabled *** 362 | Site stg.erpnext.host has been restored with files 363 | ... restored 364 | 365 | - Restoring database views 366 | started ... 367 | ... restored 368 | 369 | - Restarting ERPNext 370 | electronic_vouchers-service-Logichem: started 371 | frappe-bench-SERPHT-redis:frappe-bench-SERPHT-redis-cache: started 372 | frappe-bench-SERPHT-redis:frappe-bench-SERPHT-redis-queue: started 373 | frappe-bench-SERPHT-redis:frappe-bench-SERPHT-redis-socketio: started 374 | frappe-bench-SERPHT-web:frappe-bench-SERPHT-frappe-web: started 375 | frappe-bench-SERPHT-web:frappe-bench-SERPHT-node-socketio: started 376 | frappe-bench-SERPHT-workers:frappe-bench-SERPHT-frappe-schedule: started 377 | frappe-bench-SERPHT-workers:frappe-bench-SERPHT-frappe-default-worker-0: started 378 | frappe-bench-SERPHT-workers:frappe-bench-SERPHT-frappe-short-worker-0: started 379 | frappe-bench-SERPHT-workers:frappe-bench-SERPHT-frappe-long-worker-0: started 380 | restarted 381 | 382 | 383 | Restore completed. Elapsed time, 0h 1m 44s seconds 384 | - Configuring MariaDB Slave for replication 385 | - Patching '50-server.cnf' with '/dev/shm/S_work/master_50-server.cnf.patch' 386 | - Restarting MariaDB 387 | - Enabling Slave connection to Master 388 | - Purging temporary files from Slave. *** SKIPPED *** 389 | 390 | Completed remote job : '/dev/shm/S_work/slaveTasks.sh'. 391 | 392 | 393 | - Finished with Slave. 394 | 395 | - Restarting MariaDB for user 'admin' on Master host 'loso.erpnext.host' 396 | Active: active (running) since Mon 2022-12-19 01:17:44 CET; 51ms ago 397 | - Restarting MariaDB for user 'adm' on Slave host 'stg.erpnext.host' 398 | Active: active (running) since Mon 2022-12-19 01:17:51 CET; 22ms ago 399 | 400 | 401 | Sleeping for 75 seconds, before checking slave status. 402 | Found slave status to be ... 403 | Master_Log_File: mariadb-bin.000021 404 | Read_Master_Log_Pos: 344 405 | Slave_IO_Running: Yes 406 | Slave_SQL_Running: Yes 407 | Last_IO_Error: 408 | Slave_SQL_Running_State: Slave has read all relay log; waiting for more updates 409 | 410 | Restarting ERPNext on Master ... 411 | electronic_vouchers-service-Logichem: started 412 | frappe-bench-LENH-redis:frappe-bench-LENH-redis-cache: started 413 | frappe-bench-LENH-redis:frappe-bench-LENH-redis-queue: started 414 | frappe-bench-LENH-redis:frappe-bench-LENH-redis-socketio: started 415 | frappe-bench-LENH-web:frappe-bench-LENH-frappe-web: started 416 | frappe-bench-LENH-web:frappe-bench-LENH-node-socketio: started 417 | frappe-bench-LENH-workers:frappe-bench-LENH-frappe-schedule: started 418 | frappe-bench-LENH-workers:frappe-bench-LENH-frappe-default-worker-0: started 419 | frappe-bench-LENH-workers:frappe-bench-LENH-frappe-short-worker-0: started 420 | frappe-bench-LENH-workers:frappe-bench-LENH-frappe-long-worker-0: started 421 | ------------------------------ Finished ---------------------------------- 422 | you@xub22:~/projects/ERPNextFailOver$ 423 | -------------------------------------------------------------------------------- /docs/ExecutionLog_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/martinhbramwell/ERPNextFailOver/4fd39c009d45b3c3e702292823fb341dfea5da31/docs/ExecutionLog_1.png -------------------------------------------------------------------------------- /docs/ExecutionLog_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/martinhbramwell/ERPNextFailOver/4fd39c009d45b3c3e702292823fb341dfea5da31/docs/ExecutionLog_2.png -------------------------------------------------------------------------------- /docs/ExecutionLog_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/martinhbramwell/ERPNextFailOver/4fd39c009d45b3c3e702292823fb341dfea5da31/docs/ExecutionLog_3.png -------------------------------------------------------------------------------- /docs/ExecutionLog_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/martinhbramwell/ERPNextFailOver/4fd39c009d45b3c3e702292823fb341dfea5da31/docs/ExecutionLog_4.png -------------------------------------------------------------------------------- /docs/ExecutionLog_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/martinhbramwell/ERPNextFailOver/4fd39c009d45b3c3e702292823fb341dfea5da31/docs/ExecutionLog_5.png -------------------------------------------------------------------------------- /docs/ExecutionLog_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/martinhbramwell/ERPNextFailOver/4fd39c009d45b3c3e702292823fb341dfea5da31/docs/ExecutionLog_6.png -------------------------------------------------------------------------------- /envars.example.sh: -------------------------------------------------------------------------------- 1 | ######################################### 2 | ### ### 3 | ### Replication Parameters ### 4 | ### ### 5 | ######################################### 6 | 7 | ############################### 8 | ### ### 9 | ### Common ### 10 | ### ### 11 | ############################### 12 | 13 | export USE_HOST_ALIAS="no"; # If "yes", use *_HOST_ALIAS instead of *_HOST_URL, *_HOST_USR & *_HOST_KEY 14 | export ALLOW_SUDO_ASKPASS_CREATION="yes"; # If "yes", a temporary SUDO_ASKPASS environment variable will be created in '/dev/shm' 15 | # which avoids typing passwords every time 16 | 17 | 18 | ############################### 19 | ### ### 20 | ### Master ### 21 | ### ### 22 | ############################### 23 | 24 | export MASTER_HOST_URL="loso.erpnext.host"; # Domain name of host 25 | export MASTER_HOST_USR="admin"; # ERPNext user name 26 | export MASTER_HOST_PWD="password#1"; # ERPNext user password 27 | export MASTER_HOST_KEY="admin_loso_erpnext_host"; # ERPNext user SSH key registered in authorized_keys of user 'MASTER_HOST_USR' 28 | export MASTER_HOST_ALIAS="lenh"; # SSH host alias name 29 | export MASTER_BENCH_HOME=/home/${MASTER_HOST_USR}; # Directory where the Frappe Bench is installed 30 | export MASTER_BENCH_NAME=frappe-bench-LENH; # The name given to the Frappe Bench directory 31 | export MASTER_BENCH_PATH=${MASTER_BENCH_HOME}/${MASTER_BENCH_NAME}; # Full path to Bench directory 32 | 33 | export MASTER_DB_ROOT_PWD="password#2"; # Root password for MariaDb of master 34 | 35 | 36 | ############################### 37 | ### ### 38 | ### Slave ### 39 | ### ### 40 | ############################### 41 | 42 | export SLAVE_HOST_URL="stg.erpnext.host"; # Domain name of host 43 | export SLAVE_HOST_USR="adm"; # ERPNext user name 44 | export SLAVE_HOST_PWD="password#3"; # ERPNext user password 45 | export SLAVE_HOST_KEY="adm_stg_erpnext_host"; # ERPNext user SSH key registered in authorized_keys of user 'SLAVE_HOST_USR' 46 | export SLAVE_HOST_ALIAS="serpht"; # SSH host alias name 47 | export SLAVE_BENCH_HOME=/home/${SLAVE_HOST_USR}; # Directory where the Frappe Bench is installed 48 | export SLAVE_BENCH_NAME=frappe-bench-SERPHT; # The name given to the Frappe Bench directory 49 | export SLAVE_BENCH_PATH=${SLAVE_BENCH_HOME}/${SLAVE_BENCH_NAME}; # Full path to Bench directory 50 | 51 | export SLAVE_DB_ROOT_PWD="password#4"; # Root password for MariaDb of slave 52 | export SLAVE_DB_PWD="password#5"; # Replicator slave password 53 | 54 | export RESTORE_SITE_CONFIG="yes"; 55 | export KEEP_SITE_PASSWORD="yes"; 56 | 57 | 58 | ############################### 59 | ### ### 60 | ### For Testing ### 61 | ### ### 62 | ############################### 63 | 64 | # export DRY_RUN_ONLY="yes"; not yet implemented # If "yes", run checks but make no permanent changes 65 | export TEST_CONNECTIVITY="yes" # If "yes", the ability to log on and execute a command will be tested for each host. 66 | export ALLOW_ALTERING_MASTER="no"; # If "no", assume master was prepared previously. Collect necessary data for fixed/new slave. 67 | export REPEAT_SLAVE_WITHOUT_MASTER="no"; # If "yes", skips uploads to slave and all calls to and downloads from master. (Fast test) 68 | export UPLOAD_MASTER_BACKUP="yes"; # If "yes", upload master files to slave (ignored if REPEAT_SLAVE_WITHOUT_MASTER="no") 69 | 70 | 71 | if [[ 0 == 1 ]]; then 72 | 73 | echo -e " 74 | Some useful commands ...... 75 | "; 76 | 77 | # Reset Master 78 | 79 | declare SLAVE_IP=$(ssh ${SLAVE_HOST_USR}@${SLAVE_HOST_URL} "dig +short myip.opendns.com @resolver1.opendns.com"); 80 | # declare SLAVE_IP=$(dig ${SLAVE_HOST_URL} A +short); 81 | declare SLAVE_USR=${SLAVE_HOST_URL//./_}; 82 | 83 | echo -e " 84 | 85 | sudo -A cp ${HOME}/${MASTER_BENCH_NAME}/BaRe/misc/50-server.cnf /etc/mysql/mariadb.conf.d; 86 | 87 | sudo -A systemctl restart mariadb; 88 | 89 | mysql mysql; 90 | 91 | # CREATE USER '${SLAVE_USR}'@'${SLAVE_IP}' IDENTIFIED BY '${SLAVE_DB_PWD}'; 92 | # GRANT REPLICATION SLAVE ON *.* TO '${SLAVE_USR}'@'${SLAVE_IP}'; 93 | 94 | SELECT Host, User, Repl_slave_priv, Delete_priv FROM user; 95 | SELECT Host, Db, User FROM db; 96 | 97 | DROP USER ${SLAVE_USR}; 98 | FLUSH PRIVILEGES; 99 | SHOW MASTER STATUS; 100 | "; 101 | 102 | # Reset Slave 103 | 104 | echo -e " 105 | 106 | sudo -A cp ${HOME}/${SLAVE_BENCH_NAME}/BaRe/misc/50-server.cnf /etc/mysql/mariadb.conf.d; 107 | 108 | sudo -A systemctl restart mariadb; 109 | 110 | mysql mysql; 111 | 112 | FLUSH PRIVILEGES; 113 | 114 | RESET MASTER; 115 | 116 | STOP SLAVE; 117 | 118 | SHOW SLAVE STATUS; 119 | "; 120 | fi; 121 | 122 | 123 | 124 | -------------------------------------------------------------------------------- /makeAskPassEmitter.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | 4 | export SCRIPT_DIR="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"; 5 | export SCRIPT_NAME=$( basename ${0#-} ); 6 | export THIS_SCRIPT=$( basename ${BASH_SOURCE} ) 7 | 8 | function makeAskPassEmitter () { 9 | echo -e " - Making password emitter script :: ${DIR}/${ASK_PASS_EMITTER}" 10 | 11 | cat << EOFMRS > ${DIR}/${ASK_PASS_EMITTER} 12 | #!/usr/bin/env bash 13 | echo '${APD}'; 14 | EOFMRS 15 | sudo -A chmod +x ${DIR}/${ASK_PASS_EMITTER}; 16 | 17 | # cat ${DIR}/${ASK_PASS_EMITTER}; 18 | 19 | } 20 | 21 | if [[ ${SCRIPT_NAME} = ${THIS_SCRIPT} ]] ; then 22 | makeAskPassEmitter; 23 | 24 | ls -la ${TMP_DIR} 25 | else 26 | echo " - Sourced '${THIS_SCRIPT}' from '${SCRIPT_NAME}'" 27 | fi; 28 | -------------------------------------------------------------------------------- /makeEnvarsFile.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | 4 | export SCRIPT_DIR="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"; 5 | export SCRIPT_NAME=$( basename ${0#-} ); 6 | export THIS_SCRIPT=$( basename ${BASH_SOURCE} ) 7 | 8 | function makeEnvarsFile () { 9 | echo -e " - Making environment variables file for backup and restore functions (${DIR}/${BACKUP_RESTORE_DIR}/${ROL}_${ENVARS})"; 10 | 11 | mkdir -p ${DIR}/${BACKUP_RESTORE_DIR}; 12 | 13 | cat << EOFMRS > ${DIR}/${BACKUP_RESTORE_DIR}/${ROL}_${ENVARS} 14 | export TARGET_BENCH=${FBP}; # Full path to Bench directory 15 | export ERPNEXT_SITE_URL=${HST}; # URL of site to back up 16 | export RESTORE_SITE_CONFIG="yes"; # URL of site to back up 17 | export KEEP_SITE_PASSWORD="yes"; # URL of site to back up 18 | EOFMRS 19 | } 20 | 21 | if [[ ${SCRIPT_NAME} = ${THIS_SCRIPT} ]] ; then 22 | makeEnvarsFile; 23 | 24 | ls -la ${DIR} 25 | else 26 | echo " - Sourced '${THIS_SCRIPT}' from '${SCRIPT_NAME}'" 27 | fi; 28 | -------------------------------------------------------------------------------- /makeMariaDBRestartScript.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | 4 | export SCRIPT_DIR="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"; 5 | export SCRIPT_NAME=$( basename ${0#-} ); 6 | export THIS_SCRIPT=$( basename ${BASH_SOURCE} ) 7 | 8 | function makeMariaDBRestartScript () { 9 | echo -e " - Making MariaDB restart script :: ${DIR}/${MARIA_RST_SCRIPT}" 10 | 11 | cat << EOFMRS > ${DIR}/${MARIA_RST_SCRIPT} 12 | #!/usr/bin/env bash 13 | # 14 | echo -e "${pYELLOW} - Restarting MariaDB for user '${USR}' on ${ROL} host '${HST}' ${pDFLT}"; 15 | export SUDO_ASKPASS=${DIR}/.supwd.sh; 16 | 17 | sudo -A systemctl restart mariadb; 18 | sudo -A systemctl status mariadb | grep "Active: "; 19 | EOFMRS 20 | sudo -A chmod +x ${DIR}/${MARIA_RST_SCRIPT}; 21 | 22 | # cat ${DIR}/${MARIA_RST_SCRIPT}; 23 | 24 | } 25 | 26 | if [[ ${SCRIPT_NAME} = ${THIS_SCRIPT} ]] ; then 27 | makeMariaDBRestartScript; 28 | 29 | ls -la ${TMP_DIR} 30 | else 31 | echo " - Sourced '${THIS_SCRIPT}' from '${SCRIPT_NAME}'" 32 | fi; 33 | -------------------------------------------------------------------------------- /master/makeMasterMariaDBScript.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | 4 | export SCRIPT_DIR="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"; 5 | export SCRIPT_NAME=$( basename ${0#-} ); 6 | export THIS_SCRIPT=$( basename ${BASH_SOURCE} ) 7 | 8 | function makeMasterMariaDBScript () { 9 | echo -e " - Making MariaDB script :: '${MSTR_WRK_DIR}/${MARIADB_SCRIPT}'."; 10 | 11 | declare SLAVE_IP=$(ssh ${SLAVE_HOST_USR}@${SLAVE_HOST_URL} "dig +short myip.opendns.com @resolver1.opendns.com"); 12 | declare SLAVE_USR=${SLAVE_HOST_URL//./_}; 13 | 14 | cat << EOFMDB > ${MSTR_WRK_DIR}/${MARIADB_SCRIPT} 15 | DROP USER IF EXISTS '${SLAVE_USR}'@'${SLAVE_IP}'; 16 | 17 | STOP SLAVE; 18 | 19 | CREATE USER '${SLAVE_USR}'@'${SLAVE_IP}' IDENTIFIED BY '${SLAVE_DB_PWD}'; 20 | GRANT REPLICATION SLAVE ON *.* TO '${SLAVE_USR}'@'${SLAVE_IP}'; 21 | 22 | FLUSH PRIVILEGES; 23 | 24 | SHOW MASTER STATUS; 25 | 26 | EOFMDB 27 | } 28 | 29 | if [[ ${SCRIPT_NAME} = ${THIS_SCRIPT} ]] ; then 30 | makeMasterMariaDBScript; 31 | 32 | ls -la ${TMP_DIR} 33 | else 34 | echo " - Sourced '${THIS_SCRIPT}' from '${SCRIPT_NAME}'" 35 | fi; 36 | -------------------------------------------------------------------------------- /master/makeMasterMariaDBconfPatch.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | 4 | export SCRIPT_DIR="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"; 5 | export SCRIPT_NAME=$( basename ${0#-} ); 6 | export THIS_SCRIPT=$( basename ${BASH_SOURCE} ) 7 | 8 | function makeMasterMariaDBconfPatch () { 9 | 10 | declare HOST_ALIAS=${MASTER_HOST_URL//./_} 11 | 12 | echo -e " - Making MariaDB config patch :: '${MSTR_WRK_DIR}/${MSTR_PATCH_NAME}'. 13 | "; 14 | 15 | cat << EOFMCP > ${MSTR_WRK_DIR}/${MSTR_PATCH_NAME} 16 | --- 50-server.cnf 2022-10-28 21:58:04.584379268 +0200 17 | +++ 50-server_new.cnf 2022-10-28 21:57:05.182855389 +0200 18 | @@ -24,7 +24,7 @@ 19 | 20 | # Instead of skip-networking the default is now to listen only on 21 | # localhost which is more compatible and is not less secure. 22 | -bind-address = 127.0.0.1 23 | +bind-address = 0.0.0.0 24 | 25 | # 26 | # * Fine Tuning 27 | @@ -43,6 +43,11 @@ 28 | # 29 | # * Logging and Replication 30 | # 31 | +log-basename=${HOST_ALIAS}; 32 | +log-bin=/var/log/mysql/mariadb-bin.log 33 | +server_id=1 34 | +binlog-do-db=REPLACE_WITH_DATABASE_NAME 35 | +gtid-domain-id=1 36 | 37 | # Both location gets rotated by the cronjob. 38 | # Be aware that this log type is a performance killer. 39 | EOFMCP 40 | } 41 | 42 | if [[ ${SCRIPT_NAME} = ${THIS_SCRIPT} ]] ; then 43 | makeMasterMariaDBconfPatch; 44 | 45 | ls -la ${TMP_DIR} 46 | else 47 | echo " - Sourced '${THIS_SCRIPT}' from '${SCRIPT_NAME}'" 48 | fi; 49 | -------------------------------------------------------------------------------- /master/makeMasterTasks.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | 4 | export SCRIPT_DIR="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"; 5 | export SCRIPT_NAME=$( basename ${0#-} ); 6 | export THIS_SCRIPT=$( basename ${BASH_SOURCE} ) 7 | 8 | 9 | 10 | function makeMasterTasks () { 11 | 12 | echo -e " - Making Master Tasks script :: ${MSTR_WRK_DIR}/${MSTR_JOB}." 13 | declare SLAVE_IP=$(ssh ${SLAVE_HOST_USR}@${SLAVE_HOST_URL} "dig +short myip.opendns.com @resolver1.opendns.com"); 14 | echo -e " Expecting slave IP :: ${SLAVE_IP}." 15 | 16 | cat << EOFCT > ${MSTR_WRK_DIR}/${MSTR_JOB} 17 | #!/usr/bin/env bash 18 | # 19 | 20 | export SCRIPT_DIR="\$( cd -- "\$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"; 21 | 22 | function ensurePkgIsInstalled () { 23 | if dpkg-query -l \${PKG} >/dev/null; then 24 | echo -e " - Found \${PKG} already installed"; 25 | else 26 | sudo -A apt-get install \${PKG} >/dev/null; 27 | echo -e "\n - Installed \${PKG}" 28 | fi; 29 | } 30 | 31 | function ensure_SUDO_ASKPASS () { 32 | echo -e " - Testing 'SUDO_ASKPASS' capability. ( SUDO_ASKPASS = >\${SUDO_ASKPASS}< )"; 33 | if [[ "${ALLOW_SUDO_ASKPASS_CREATION}" == "yes" ]]; then 34 | echo -e " - Configuration allows ASKPASS creation."; 35 | if [ "$([ -z ${MASTER_HOST_PWD} ] && echo N)" == "N" ]; then 36 | echo -e " - Configuration provides no password."; 37 | return 1; 38 | else 39 | echo -e " - Found password in configuration file."; 40 | export SUDO_ASKPASS=${MSTR_WRK_DIR}/.supwd.sh; 41 | fi; 42 | else 43 | echo -e " - SUDO_ASKPASS creation denied in configuration"; 44 | return 1; 45 | fi; 46 | 47 | echo -e " - Trying uploaded ASK_PASS emmitter."; 48 | # declare TEST_RSLT=\$(sudo -A touch /etc/hostname); 49 | sudo -A touch /etc/hostname; 50 | if [ \$? -ne 0 ]; then 51 | echo -e "SUDO_ASKPASS ==> \${SUDO_ASKPASS}"; 52 | if [ ! -f \${SUDO_ASKPASS} ]; then 53 | echo -e "${pRED}\n\n* * * There is no file: '\${SUDO_ASKPASS}' * * * ${pDFLT}"; 54 | else 55 | echo -e "${pRED}\n\n* * * Are you sure the pwd from '\${SUDO_ASKPASS}' is correct? * * * ${pDFLT}"; 56 | fi 57 | return 1; 58 | fi; 59 | 60 | } 61 | 62 | function configureDBforReplication () { 63 | echo -e " - Configuring MariaDB Master for replication. (${MARIADB_CONFIG_DIR}/${MARIADB_CONFIG})"; 64 | echo -e " - Getting database name for site '${MASTER_HOST_URL}' from '${MASTER_BENCH_PATH}/sites/${MASTER_HOST_URL}/${SITE_CONFIG}'."; 65 | 66 | # jq -r . ${MASTER_BENCH_PATH}/sites/${MASTER_HOST_URL}/${SITE_CONFIG}; 67 | 68 | declare MASTER_DATABASE_NAME=\$(jq -r .db_name ${MASTER_BENCH_PATH}/sites/${MASTER_HOST_URL}/${SITE_CONFIG}); 69 | 70 | pushd ${MARIADB_CONFIG_DIR} >/dev/null; 71 | echo -e " - Providing 'binlog-do-db' with its value ("\${MASTER_DATABASE_NAME}"), in patch file '${MSTR_WRK_DIR}/${MSTR_PATCH_NAME}'."; 72 | sed -i "s/.*REPLACE_WITH_DATABASE_NAME.*/+binlog-do-db=\${MASTER_DATABASE_NAME}/" ${MSTR_WRK_DIR}/${MSTR_PATCH_NAME}; 73 | # cat "${MSTR_WRK_DIR}/${MSTR_PATCH_NAME}"; 74 | 75 | echo -e " - Patching '${MARIADB_CONFIG}' with '${MSTR_WRK_DIR}/${MSTR_PATCH_NAME}'.\n${pFAINT_BLUE}"; 76 | 77 | sudo -A patch --forward ${MARIADB_CONFIG} ${MSTR_WRK_DIR}/${MSTR_PATCH_NAME}; 78 | # sudo -A patch --forward --dry-run ${MARIADB_CONFIG} ${MSTR_WRK_DIR}/${MSTR_PATCH_NAME}; 79 | echo -e "\n${pDFLT} Patched\n"; 80 | 81 | echo -e "${pYELLOW} - Restarting MariaDB ${pDFLT}"; 82 | sudo -A systemctl restart mariadb; 83 | # sudo -A systemctl status mariadb; 84 | 85 | popd >/dev/null; 86 | } 87 | 88 | function backupDatabase () { 89 | echo -e " - Taking backup of Master database ..."; 90 | 91 | pushd ${MASTER_BENCH_PATH} >/dev/null; 92 | pushd ${BACKUP_RESTORE_DIR} >/dev/null; 93 | ./handleBackup.sh "Pre-replication baseline"; 94 | popd >/dev/null; 95 | 96 | pushd ./BKP >/dev/null; 97 | BACKUP_NAME="\$(cat BACKUP.txt)"; 98 | echo -e " - Backup name is : '\${BACKUP_NAME}'"; 99 | rm -f ${MSTR_RSLT_DIR}/20*.tgz; 100 | cp BACKUP.txt ${MSTR_RSLT_DIR}; 101 | cp \${BACKUP_NAME} ${MSTR_RSLT_DIR}; 102 | popd >/dev/null; 103 | # pwd; 104 | # echo -e "Purging temporary files from Master."; 105 | # rm -fr /dev/shm/M_*; 106 | # echo -e "${pYELLOW}----------------- Master Tasks Curtailed --------------------------${pDFLT}"; 107 | # exit; 108 | popd >/dev/null; 109 | } 110 | 111 | function installBackupAndRestoreTools () { 112 | echo -e " - Checking Frappe Bench directory location :: '${MASTER_BENCH_PATH}'"; 113 | if [ -f ${MASTER_BENCH_PATH}/Procfile ]; then 114 | echo -e " - Moving Backup and Restore handlers from '${MSTR_WRK_DIR}/${BACKUP_RESTORE_DIR}' to Frappe Bench directory"; 115 | pushd ${MSTR_WRK_DIR}/${BACKUP_RESTORE_DIR} >/dev/null; 116 | ln -fs Master_${ENVARS} ${ENVARS}; 117 | popd >/dev/null; 118 | cp -r ${MSTR_WRK_DIR}/${BACKUP_RESTORE_DIR} ${MASTER_BENCH_PATH} 119 | else 120 | echo -e "\n${pRED}* * * Specified Frappe Bench directory location, '${MASTER_BENCH_PATH}', is NOT correct. Cannot continue .... * * * ${pDFLT}" 121 | exit 1; 122 | fi; 123 | } 124 | 125 | function makeRestartERPNextSupervisorScript () { 126 | echo -e " - Making ERPNext supervisor restart script :: '\${HOME}/restartERPNextSupervisor.sh'"; 127 | echo -e " 128 | #!/usr/bin/env bash 129 | # 130 | source \${HOME}/.profile; 131 | sudo -A supervisorctl restart all; 132 | " > \${HOME}/restartERPNextSupervisor.sh; 133 | chmod +x \${HOME}/restartERPNextSupervisor.sh; 134 | } 135 | 136 | function stopERPNext () { 137 | echo -e "${pYELLOW} - Stopping ERPNext on Master ... ${pFAINT_BLUE}\n"; 138 | sudo -A supervisorctl stop all; 139 | echo -e "\n${pDFLT} Stopped\n"; 140 | } 141 | 142 | mkdir -p ${MSTR_RSLT_DIR}; 143 | 144 | export BACKUP_NAME=""; 145 | 146 | ensure_SUDO_ASKPASS; 147 | if [ \$? -eq 0 ]; then 148 | echo -e " - 'SUDO_ASKPASS' environment variable is correct."; 149 | else 150 | echo -e "\n${pRED}* * * 'SUDO_ASKPASS' environment variable or emitter is NOT correct. Cannot continue .... * * * ${pDFLT}" 151 | exit 1; 152 | fi; 153 | 154 | # echo -e "${ALLOW_ALTERING_MASTER}\n${pYELLOW}----------------- Master Tasks Curtailed --------------------------${pDFLT}"; 155 | # exit; 156 | 157 | if [[ ${ALLOW_ALTERING_MASTER} == "yes" ]]; then 158 | echo -e "\n\n - Installing dependencies."; 159 | declare PKG="xmlstarlet"; 160 | ensurePkgIsInstalled; 161 | 162 | declare PKG="jq"; 163 | ensurePkgIsInstalled; 164 | 165 | makeRestartERPNextSupervisorScript; 166 | 167 | installBackupAndRestoreTools; 168 | 169 | stopERPNext; 170 | 171 | configureDBforReplication; 172 | else 173 | echo -e "\n\n${pYELLOW} - Won't install dependencies. (ALLOW_ALTERING_MASTER != 'yes')${pDFLT}"; 174 | fi; 175 | 176 | backupDatabase; 177 | 178 | # # tree /dev/shm; 179 | # # echo -e "Purging temporary files from Master."; 180 | # # rm -fr /dev/shm/M_*; 181 | # echo -e "${pYELLOW}----------------- Master Tasks Curtailed --------------------------${pDFLT}"; 182 | # # ls -la; 183 | # # hostname; 184 | # # pwd; 185 | # exit; 186 | 187 | if [[ ${ALLOW_ALTERING_MASTER} == "yes" ]]; then 188 | echo -e " - Enabling Slave user access and reading status of Master"; 189 | pushd ${MSTR_WRK_DIR} >/dev/null; 190 | # ls -la 191 | mysql -AX < ${MARIADB_SCRIPT} > ${MSTR_RSLT_DIR}/${MSTR_STATUS_RSLT}; 192 | popd >/dev/null; 193 | else 194 | echo -e "\n\n${pYELLOW} - Won't change slave user access. (ALLOW_ALTERING_MASTER != 'yes')${pDFLT}"; 195 | mysql -Xe "show master status" mysql > ${MSTR_RSLT_DIR}/${MSTR_STATUS_RSLT}; 196 | fi; 197 | 198 | export STATUS_FILE=\$(xmlstarlet sel -t -v "//resultset/row/field[@name='File']" ${MSTR_RSLT_DIR}/${MSTR_STATUS_RSLT}); 199 | echo -e " - Log FILE :: \${STATUS_FILE}"; 200 | 201 | export STATUS_POS=\$(xmlstarlet sel -t -v "//resultset/row/field[@name='Position']" ${MSTR_RSLT_DIR}/${MSTR_STATUS_RSLT}); 202 | echo -e " - Log file POSITION :: \${STATUS_POS}"; 203 | 204 | export STATUS_DB=\$(xmlstarlet sel -t -v "//resultset/row/field[@name='Binlog_Do_DB']" ${MSTR_RSLT_DIR}/${MSTR_STATUS_RSLT}); 205 | echo -e " - Restrict to DATABASE :: \${STATUS_DB}"; 206 | 207 | if [[ ${ALLOW_ALTERING_MASTER} == "yes" ]]; then 208 | echo -e " - Open MySql port 3306 for remote host :: ${SLAVE_IP}"; 209 | sudo ufw allow from ${SLAVE_IP} to any port 3306; 210 | else 211 | echo -e "\n\n${pYELLOW} - Won't alter firewall. (ALLOW_ALTERING_MASTER != 'yes')${pDFLT}"; 212 | fi; 213 | 214 | if [[ ${ALLOW_ALTERING_MASTER} == "yes" ]]; then 215 | echo -e "${pYELLOW} - Stopping MariaDB so that the backup can be restored on the Slave. ${pDFLT}"; 216 | sudo -A systemctl stop mariadb; 217 | # sudo -A systemctl status mariadb; 218 | else 219 | echo -e "\n\n${pYELLOW} - Won't pause MariaDB. (ALLOW_ALTERING_MASTER != 'yes')${pDFLT}"; 220 | sudo -A systemctl status mariadb; 221 | fi; 222 | 223 | 224 | echo -e " - Packaging results into :: '${TMP_DIR}/${MSTR_RSLT_PKG}'"; 225 | pushd ${TMP_DIR} >/dev/null; 226 | tar zcvf ${MSTR_RSLT_PKG} ${MSTR_RSLT} >/dev/null; 227 | popd >/dev/null; 228 | 229 | echo -e "Purging temporary files from Master."; 230 | # ls -la /dev/shm/; 231 | # rm -fr /dev/shm/M_w*; 232 | # rm -fr /dev/shm/M_rslt; 233 | 234 | 235 | echo -e "\nCompleted remote job : '${MSTR_WRK_DIR}/${MSTR_JOB}'.\n\n"; 236 | exit; 237 | EOFCT 238 | chmod +x ${MSTR_WRK_DIR}/${MSTR_JOB}; 239 | } 240 | 241 | if [[ ${SCRIPT_NAME} = ${THIS_SCRIPT} ]] ; then 242 | makeMasterTasks; 243 | echo -e "???" 244 | 245 | ls -la ${TMP_DIR} 246 | else 247 | echo " - Sourced '${THIS_SCRIPT}' from '${SCRIPT_NAME}'" 248 | fi; 249 | -------------------------------------------------------------------------------- /master/prepareMaster.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | 4 | export SCRIPT_DIR="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"; 5 | 6 | declare MAKE_MSTR_JOB="./master/makeMasterTasks.sh"; 7 | source ${MAKE_MSTR_JOB}; 8 | 9 | declare MAKE_MSTR_PATCH="./master/makeMasterMariaDBconfPatch.sh"; 10 | source ${MAKE_MSTR_PATCH}; 11 | 12 | declare MAKE_MSTR_MARIA_SQL="./master/makeMasterMariaDBScript.sh"; 13 | source ${MAKE_MSTR_MARIA_SQL}; 14 | 15 | declare MSTR_PATCH_NAME="master_${MARIADB_CONFIG}.patch"; 16 | declare MSTR_JOB="masterTasks.sh"; 17 | declare MARIADB_SCRIPT="setUpMaster.sql"; 18 | 19 | function prepareMaster() { 20 | echo -e "\n\nPreparing master ..."; 21 | 22 | echo -e "Moving backup and restore handlers '${BACKUP_HANDLER}' to transfer directory '${TMP_DIR}/${MSTR_WRK}'"; 23 | cp -r ${BACKUP_RESTORE_PATH} ${TMP_DIR}/${MSTR_WRK}; 24 | 25 | makeMasterTasks; 26 | makeMasterMariaDBScript; 27 | makeMasterMariaDBconfPatch; 28 | 29 | pushd ${MSTR_WRK_DIR}/BaRe >/dev/null; 30 | echo "export MYPWD=\"${MASTER_DB_ROOT_PWD}\";" >> Master_envars.sh; 31 | popd >/dev/null; 32 | 33 | pushd ${TMP_DIR} >/dev/null 34 | tar zcvf ${MSTR_WRK_FILES} ${MSTR_WRK} >/dev/null; 35 | popd >/dev/null 36 | 37 | echo -e "Uploading Master tasks files '${MSTR_WRK_FILES}' to '${THE_MASTER}:${TMP_DIR}'." 38 | scp ${TMP_DIR}/${MSTR_WRK_FILES} ${THE_MASTER}:${TMP_DIR} >/dev/null; 39 | 40 | echo -e "Extracting content from uploaded file '${MSTR_WRK_FILES}' on Master." 41 | ssh ${THE_MASTER} tar zxvf ${TMP_DIR}/${MSTR_WRK_FILES} -C /dev/shm >/dev/null; 42 | 43 | # # ls -la; 44 | # echo -e "${pYELLOW}------------- prepareMaster Curtailed ---------------------${pDFLT}"; 45 | # exit; 46 | 47 | echo -e "Executing script '${MSTR_JOB}' on Master." 48 | ssh -t ${THE_MASTER} ${MSTR_WRK_DIR}/${MSTR_JOB}; 49 | 50 | } 51 | 52 | export SCRIPT_NAME=$( basename ${0#-} ); 53 | export THIS_SCRIPT=$( basename ${BASH_SOURCE} ) 54 | 55 | if [[ ${SCRIPT_NAME} = ${THIS_SCRIPT} ]] ; then 56 | 57 | prepareMaster; 58 | 59 | ls -la ${TMP_DIR} 60 | else 61 | echo " - Sourced '${THIS_SCRIPT}' from '${SCRIPT_NAME}'" 62 | fi 63 | -------------------------------------------------------------------------------- /prepareMasterAndSlave.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | 4 | export start_time="$(date -u +%s)"; 5 | 6 | export DEBUGGING="debugging"; 7 | export pRED="\033[1;40;31m"; 8 | export pYELLOW="\033[1;40;33m"; 9 | export pGOLD="\033[0;40;33m"; 10 | export pFAINT_BLUE="\033[0;49;34m"; 11 | export pGREEN="\033[1;40;32m"; 12 | export pDFLT="\033[0m"; 13 | export pBG_YLO="\033[1;43;33m"; 14 | 15 | echo -e " 16 | 17 | ${pGREEN}----------------------------- Starting -----------------------------------${pDFLT}"; 18 | 19 | function ensurePkgIsInstalled () { 20 | echo -e "\nChecking presence of '${PKG}' tool."; 21 | if dpkg-query -l ${PKG} >/dev/null; then 22 | echo -e " - Found '${PKG}' already installed.\n"; 23 | else 24 | echo -e "\n* * * Do you accept to install '${PKG}' ${NOTE} * * * "; 25 | read -n 1 -p "Type 'y' to approve, or any other key to quit : " installOk 26 | if [ "${installOk}" == "y" ]; then 27 | echo -e "\nOk."; 28 | sudo -A apt-get install ${PKG}; 29 | echo -e "\n - Installed '${PKG}'\n"; 30 | else 31 | echo -e "\n\nOk. Cannot proceed.\n ${pRed}Quitting now. ${pDFLT}"; 32 | fi 33 | fi; 34 | } 35 | 36 | echo -e ""; 37 | 38 | export SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 39 | 40 | declare ENVARS="envars.sh"; 41 | declare ENVARS_PATH="${SCRIPT_DIR}/${ENVARS}"; 42 | 43 | declare PREP_MSTR="${SCRIPT_DIR}/master/prepareMaster.sh"; 44 | declare PREP_SLV="${SCRIPT_DIR}/slave/prepareSlave.sh"; 45 | declare MAKE_MARIADB_RESTART_SCRIPT="${SCRIPT_DIR}/makeMariaDBRestartScript.sh"; 46 | declare MAKE_ASK_PASS_EMITTER="${SCRIPT_DIR}/makeAskPassEmitter.sh"; 47 | declare MAKE_ENVARS_FILE="${SCRIPT_DIR}/makeEnvarsFile.sh"; 48 | 49 | declare BACKUP_RESTORE_DIR="BaRe"; 50 | declare BACKUP_RESTORE_PATH="${SCRIPT_DIR}/${BACKUP_RESTORE_DIR}"; 51 | declare BACKUP_HANDLER="handleBackup.sh"; 52 | declare RESTORE_HANDLER="handleRestore.sh"; 53 | 54 | declare TMP_DIR="/dev/shm"; 55 | # declare CE_SRI_UTILS="apps/ce_sri/development/initialization"; 56 | declare MARIADB_CONFIG_DIR="/etc/mysql/mariadb.conf.d/"; 57 | declare MARIADB_CONFIG="50-server.cnf"; 58 | 59 | declare MSTR_WRK="M_work"; 60 | declare MSTR_RSLT="M_rslt"; 61 | declare MSTR_WRK_FILES="${MSTR_WRK}.tgz"; 62 | declare MSTR_WRK_DIR="${TMP_DIR}/${MSTR_WRK}"; 63 | declare MSTR_RSLT_DIR="${TMP_DIR}/${MSTR_RSLT}"; 64 | declare MSTR_RSLT_PKG="${MSTR_RSLT}.tgz"; 65 | declare MSTR_STATUS_RSLT="masterStatus.xml"; 66 | 67 | declare SLAV_WRK="S_work"; 68 | declare SLAV_RSLT="S_rslt"; 69 | declare SLAV_WRK_FILES="${SLAV_WRK}.tgz"; 70 | declare SLAV_WRK_DIR="${TMP_DIR}/${SLAV_WRK}"; 71 | declare SLAV_RSLT_DIR="${TMP_DIR}/${SLAV_RSLT}"; 72 | declare SLAV_RSLT_PKG="${SLAV_RSLT}.tgz"; 73 | declare SLAV_STATUS_RSLT="slaveStatus.xml"; 74 | 75 | declare MARIA_RST_SCRIPT="restartMariaDB.sh"; 76 | declare ASK_PASS_EMITTER=".supwd.sh"; 77 | declare SITE_CONFIG="site_config.json"; 78 | 79 | if [ ! -f ${ENVARS_PATH} ]; then 80 | echo -e "${pRED}Error:${pDFLT} 81 | The environment variables configuration file '${ENVARS_PATH}' was not found. 82 | Copy 'envars.sh.sample' to 'envars.sh' and edit as needed."; 83 | echo -e "\n${pRED}Cannot continue.\n~~~~~~~~~~~~~~~${pDFLT}"; 84 | exit 1; 85 | fi; 86 | 87 | 88 | declare NOTE=""; 89 | declare PKG=""; 90 | 91 | PKG="xmlstarlet"; 92 | NOTE=" ( https://en.wikipedia.org/wiki/XMLStarlet )"; 93 | ensurePkgIsInstalled; 94 | 95 | PKG="jq"; 96 | NOTE=""; 97 | ensurePkgIsInstalled; 98 | 99 | mkdir -p ${MSTR_WRK_DIR}; 100 | rm -fr ${MSTR_WRK_DIR}/*; 101 | 102 | mkdir -p ${SLAV_WRK_DIR}; 103 | rm -fr ${SLAV_WRK_DIR}/*; 104 | 105 | echo -e "\nLoading dependencies ..." 106 | source ${ENVARS_PATH} >/dev/null; 107 | source ${PREP_MSTR}; 108 | source ${PREP_SLV}; 109 | source ${MAKE_MARIADB_RESTART_SCRIPT}; 110 | source ${MAKE_ASK_PASS_EMITTER}; 111 | source ${MAKE_ENVARS_FILE}; 112 | 113 | export MASTER_IP_ADDR=${STR#${THST} has address } 114 | 115 | [ ! -z ${MASTER_HOST_URL} ] || ERRORS="${ERRORS}\n - Master host URL was not specified in '${ENVARS_PATH}'"; 116 | [ -f ${HOME}/.ssh/${MASTER_HOST_KEY} ] || ERRORS="${ERRORS}\n - Master host PKI file '${HOME}/.ssh/${MASTER_HOST_KEY}' was not found"; 117 | 118 | [ ! -z ${SLAVE_HOST_URL} ] || ERRORS="${ERRORS}\n - Slave host URL was not specified in '${ENVARS_PATH}'"; 119 | [ -f ${HOME}/.ssh/${SLAVE_HOST_KEY} ] || ERRORS="${ERRORS}\n - Slave host PKI file '${HOME}/.ssh/${SLAVE_HOST_KEY}' was not found"; 120 | 121 | 122 | echo -e "\nWas host alias use specified?"; 123 | if [[ "${USE_HOST_ALIAS}" = "yes" ]]; then 124 | echo -e " Yes! Will use host aliases"; 125 | export THE_MASTER="${MASTER_HOST_ALIAS}"; 126 | export THE_SLAVE="${SLAVE_HOST_ALIAS}"; 127 | else 128 | if [[ -z ${SSH_AGENT_PID} ]]; then 129 | echo -e " - No 'ssh-agent' found. Starting it ..."; 130 | eval "$(ssh-agent -s)"; 131 | else 132 | if ps -p ${SSH_AGENT_PID} > /dev/null 133 | then 134 | echo -e " - Found 'ssh-agent' already running." 135 | # Do something knowing the pid exists, i.e. the process with $PID is running 136 | else 137 | echo -e " - No 'ssh-agent' found. Starting it ..."; 138 | eval "$(ssh-agent -s)"; 139 | fi 140 | fi; 141 | 142 | # echo -e "${SSH_AGENT_PID}\n${pYELLOW} ${KEY_CNT} ------------- prepareMasterAndSlave Curtailed ---------------------${pDFLT}"; 143 | # exit; 144 | 145 | declare KEY_CNT="0"; 146 | declare CURRENTLY_ADDED_KEYS=$(ssh-add -l); 147 | declare MASTER_HOST_KEY_FINGERPRINT=$(ssh-keygen -lf ${HOME}/.ssh/${MASTER_HOST_KEY}); 148 | KEY_CNT=$(echo -e ${CURRENTLY_ADDED_KEYS} | grep -c "${MASTER_HOST_KEY_FINGERPRINT}"); 149 | if [[ "${KEY_CNT}" < "1" ]]; then 150 | echo -e "\n Adding Master host PKI key to agent"; 151 | ssh-add "${HOME}/.ssh/${MASTER_HOST_KEY}"; 152 | else 153 | echo -e "\n Master host PKI key was added to agent previously"; 154 | fi; 155 | 156 | declare SLAVE_HOST_KEY_FINGERPRINT=$(ssh-keygen -lf ${HOME}/.ssh/${SLAVE_HOST_KEY}); 157 | KEY_CNT=$(echo -e ${CURRENTLY_ADDED_KEYS} | grep -c "${SLAVE_HOST_KEY_FINGERPRINT}"); 158 | if [[ "${KEY_CNT}" < "1" ]]; then 159 | echo -e "\n Adding Slave host PKI key to agent"; 160 | ssh-add "${HOME}/.ssh/${SLAVE_HOST_KEY}"; 161 | else 162 | echo -e "\n Slave host PKI key was added to agent previously"; 163 | fi; 164 | 165 | export THE_MASTER=${MASTER_HOST_USR}@${MASTER_HOST_URL}; 166 | export THE_SLAVE=${SLAVE_HOST_USR}@${SLAVE_HOST_URL}; 167 | fi; 168 | 169 | 170 | if [[ "${TEST_CONNECTIVITY}" == "yes" ]]; then 171 | echo -e "\nTesting connectivity ...'"; 172 | echo -e " - testing with command : 'ssh ${THE_MASTER} \"whoami\"'"; 173 | 174 | # ssh ${THE_MASTER} whoami; 175 | 176 | [ "$(ssh ${THE_MASTER} \"whoami\")" == "${MASTER_HOST_USR}" ] || ERRORS="${ERRORS}\n - Unable to get HOME directory of remote host '${THE_MASTER}'."; 177 | 178 | echo -e " - testing with command : 'ssh ${THE_SLAVE} \"whoami\"'"; 179 | [ "$(ssh ${THE_SLAVE} \"whoami\")" == "${SLAVE_HOST_USR}" ] || ERRORS="${ERRORS}\n - Unable to get HOME directory of remote host '${THE_SLAVE}'."; 180 | else 181 | echo -e "${pGOLD}Skipping testing connectivity. (TEST_CONNECTIVITY =='${TEST_CONNECTIVITY}').${pDFLT}"; 182 | fi; 183 | 184 | # echo -e "${SSH_AGENT_PID}\n${pYELLOW} ${KEY_CNT} ------------- prepareMasterAndSlave Curtailed ---------------------${pDFLT}"; 185 | # exit; 186 | 187 | 188 | if [[ "${ERRORS}" != "${ERROR_MSG}" ]]; then 189 | echo -e "${pRED}\n\nThere are errors:\n${pDFLT}\n${ERRORS}\n\n${pRED}Cannot continue.\n~~~~~~~~~~~~~~~${pDFLT}"; 190 | exit; 191 | else 192 | echo -e "\n${pGREEN} No initial configuration errors found"; 193 | echo -e " -- o 0 o --${pDFLT}"; 194 | echo -e "\n\nReady to prepare Master/Slave replication: " 195 | echo -e " - Master: " 196 | echo -e " - User : ${MASTER_HOST_USR}" 197 | echo -e " - Host : $(host ${MASTER_HOST_URL})" 198 | echo -e " - Slave: " 199 | echo -e " - User : ${SLAVE_HOST_USR}" 200 | 201 | declare SLAVE_IP=$(ssh ${SLAVE_HOST_USR}@${SLAVE_HOST_URL} "dig +short myip.opendns.com @resolver1.opendns.com"); 202 | echo -e " - Host (local) : $(host ${SLAVE_HOST_URL})" 203 | echo -e " - Host (public) : ${SLAVE_HOST_URL} has public address ${SLAVE_IP}" 204 | 205 | echo -e "" 206 | if [[ -z ${1} ]]; then 207 | read -p "Press any key to proceed : " -n1 -s; 208 | echo -e "" 209 | echo -e "|" 210 | echo -e "|" 211 | echo -e "V" 212 | fi; 213 | fi; 214 | 215 | declare -a HOSTS 216 | 217 | HOSTS[0]="Master|${MASTER_HOST_USR}|${MSTR_WRK_DIR}|${MASTER_HOST_URL}|${MASTER_HOST_PWD}|${MASTER_BENCH_PATH}"; 218 | HOSTS[1]="Slave|${SLAVE_HOST_USR}|${SLAV_WRK_DIR}|${SLAVE_HOST_URL}|${SLAVE_HOST_PWD}|${SLAVE_BENCH_PATH}"; 219 | 220 | echo -e "Making generic host-specific scripts"; 221 | for HOST in "${HOSTS[@]}" 222 | do 223 | IFS="|" read -r -a arr <<< "${HOST}" 224 | 225 | ROL="${arr[0]}" 226 | 227 | echo -e " - For ${ROL}"; 228 | USR="${arr[1]}" 229 | DIR="${arr[2]}" 230 | HST="${arr[3]}" 231 | APD="${arr[4]}" 232 | FBP="${arr[5]}" 233 | 234 | makeMariaDBRestartScript; 235 | makeAskPassEmitter; 236 | makeEnvarsFile; 237 | done 238 | 239 | if [[ "${REPEAT_SLAVE_WITHOUT_MASTER}" == "yes" ]]; then 240 | echo -e "\n${pGOLD}Skipping processing master. (REPEAT_SLAVE_WITHOUT_MASTER =='${REPEAT_SLAVE_WITHOUT_MASTER}').${pDFLT}" 241 | else 242 | prepareMaster; 243 | fi; 244 | 245 | # echo -e "${pYELLOW}------------- prepareMasterAndSlave Curtailed ---------------------${pDFLT}"; 246 | # exit; 247 | 248 | declare MASTER_OK="no"; 249 | 250 | pushd ${TMP_DIR} >/dev/null; 251 | if [[ "${REPEAT_SLAVE_WITHOUT_MASTER}" == "yes" ]]; then 252 | echo -e "${pGOLD}Skipping downloading from master. (REPEAT_SLAVE_WITHOUT_MASTER =='${REPEAT_SLAVE_WITHOUT_MASTER}').${pDFLT}\n" 253 | else 254 | echo -e "Downloading Master status file '${MSTR_RSLT_PKG}' to '$(pwd)'." 255 | # echo -e "scp ${THE_MASTER}:${TMP_DIR}/${MSTR_RSLT_PKG}"; 256 | scp ${THE_MASTER}:${TMP_DIR}/${MSTR_RSLT_PKG} . &>/dev/null; 257 | fi; 258 | 259 | if [ $? -eq 0 ]; then 260 | tar zxvf ${MSTR_RSLT_PKG} >/dev/null; 261 | pushd ${MSTR_RSLT} >/dev/null; 262 | if [ $? -eq 0 ]; then 263 | xmlstarlet sel -t -v "//resultset/row" ${MSTR_STATUS_RSLT} >/dev/null; 264 | if [ $? -eq 0 ]; then 265 | MASTER_OK="yes" 266 | echo -e "\nReady to 'prepareSlave'"; 267 | 268 | prepareSlave; 269 | 270 | # echo -e "${pYELLOW}------------- prepareMasterAndSlave Curtailed ---------------------${pDFLT}"; 271 | # exit; 272 | 273 | # echo -e "Purging temporary files from workstation."; 274 | # # tree /dev/shm; 275 | # rm -fr /dev/shm/M_*; 276 | # rm -fr /dev/shm/S_*; 277 | # # ls -la; 278 | 279 | 280 | ssh ${THE_MASTER} ${MSTR_WRK_DIR}/${MARIA_RST_SCRIPT}; 281 | 282 | declare WAIT=90; 283 | echo -e "\n\nSleeping for ${WAIT} seconds, before starting slave."; 284 | sleep ${WAIT}; 285 | 286 | ssh ${THE_SLAVE} ${SLAV_WRK_DIR}/${MARIA_RST_SCRIPT}; 287 | 288 | declare WAIT=60; 289 | echo -e "\n\nSleeping for ${WAIT} seconds, before checking slave status."; 290 | sleep ${WAIT}; 291 | 292 | echo -e "Found slave status to be ..."; 293 | ssh ${THE_SLAVE} 'mysql mysql -e "SHOW SLAVE STATUS\G" | grep -e "Slave_IO_Running" -e "Slave_SQL_Running" -e " Master_Log_File" -e "Read_Master_Log_Pos" -e "Last_IO_Error" -e "Slave_SQL_Running_State"'; 294 | 295 | echo -e "\nRestarting ERPNext on Master ..."; 296 | ssh ${THE_MASTER} "\${HOME}/restartERPNextSupervisor.sh"; 297 | 298 | else 299 | echo -e "\n\n${pRED}* * * Expected Master status data was not found .... * * * ${pDFLT}" 300 | cat ${TMP_DIR}/${MSTR_STATUS_RSLT}; 301 | fi; 302 | else 303 | echo -e "\n\n${pRED}* * * Unable to decompress Master results package file .... * * * ${pDFLT}" 304 | fi; 305 | popd >/dev/null; 306 | else 307 | echo -e "\n\n${pRED}* * * Expected result package could not be retrieved from Master * * * ${pDFLT}" 308 | fi; 309 | popd >/dev/null; 310 | 311 | 312 | if [ "${MASTER_OK}" == "yes" ]; then 313 | echo -e "${pGREEN}------------------------------ Finished ----------------------------------${pDFLT}"; 314 | else 315 | echo -e "\n\n${pRED}------------ Could not configure Slave. Bad result from Master. -------------------------${pDFLT}"; 316 | fi 317 | 318 | # ls -la ${TMP_DIR} 319 | # cat ${TMP_DIR}/50-server.cnf.patch; 320 | 321 | exit; 322 | 323 | for clbg in {40..47} {100..107} 49 ; do 324 | #Foreground 325 | for clfg in {30..37} {90..97} 39 ; do 326 | #Formatting 327 | for attr in 0 1 2 4 5 7 ; do 328 | #Print the result 329 | echo -en "\e[${attr};${clbg};${clfg}m ^[${attr};${clbg};${clfg}m \e[0m" 330 | done 331 | echo #Newline 332 | done 333 | done 334 | -------------------------------------------------------------------------------- /ros.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # set -e; 4 | 5 | WATCH_DIRECTORY=$1; 6 | shift; 7 | 8 | EVENT_TASK=$1; 9 | shift; 10 | 11 | IGNORE_PATHS="$@"; 12 | 13 | function listVariables() { 14 | echo -e "Variables :: 15 | WATCH_DIRECTORY = ${WATCH_DIRECTORY}; 16 | EVENT_TASK = ${EVENT_TASK}; 17 | "; 18 | } 19 | 20 | function doIt() { 21 | sleep 1; 22 | ${EVENT_TASK}; 23 | }; 24 | 25 | echo -e "\nros.sh -- Run On Save : Executes the indicated command when any file is changed in the indicated directory. 26 | Usage: ./ros.sh . \"ls -la\";\n\n"; 27 | 28 | declare PKG="inotify-tools"; 29 | 30 | if ! dpkg-query -l ${PKG} &>/dev/null; then 31 | echo "Attempting to install '${PKG}'" 32 | if sudo apt-get -y install ${PKG} &>/dev/null; then 33 | echo -e "Hmmm."; 34 | else 35 | echo -e "Required repositories are not available. Running 'apt-get update'"; 36 | sudo apt-get update; 37 | echo "\n\nAgain attempting to install '${PKG}'\n" 38 | sudo apt-get -y install ${PKG}; 39 | echo "\nInstalled '${PKG}'\n" 40 | fi; 41 | fi; 42 | declare PKG="tree"; 43 | dpkg-query -l ${PKG} &>/dev/null || sudo apt-get -y install ${PKG}; 44 | 45 | echo "Run-On-Save will execute : '${EVENT_TASK}'"; 46 | listVariables; 47 | 48 | doIt; 49 | while true #run indefinitely 50 | do 51 | inotifywait -qqr -e close_write,move,create,delete ${IGNORE_PATHS} ${WATCH_DIRECTORY} && doIt; 52 | done 53 | -------------------------------------------------------------------------------- /slave/makeSlaveMariaDBScript.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | 4 | export SCRIPT_DIR="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"; 5 | export SCRIPT_NAME=$( basename ${0#-} ); 6 | export THIS_SCRIPT=$( basename ${BASH_SOURCE} ) 7 | 8 | function makeSlaveMariaDBScript () { 9 | echo -e " - Making MariaDB script :: ${SLAV_WRK_DIR}/${MARIADB_SCRIPT}" 10 | 11 | cat << EOFMDB > ${SLAV_WRK_DIR}/${MARIADB_SCRIPT} 12 | STOP SLAVE; 13 | CHANGE MASTER TO MASTER_HOST='${MASTER_HOST_URL}', MASTER_USER='${SLAVE_NAME}', MASTER_PASSWORD='${SLAVE_DB_PWD}', MASTER_LOG_FILE='${STATUS_FILE}', MASTER_LOG_POS=${STATUS_POS}; 14 | START SLAVE; 15 | SHOW SLAVE STATUS\G; 16 | EOFMDB 17 | } 18 | 19 | if [[ ${SCRIPT_NAME} = ${THIS_SCRIPT} ]] ; then 20 | makeSlaveMariaDBScript; 21 | 22 | ls -la ${TMP_DIR} 23 | else 24 | echo " - Sourced '${THIS_SCRIPT}' from '${SCRIPT_NAME}'" 25 | fi; 26 | -------------------------------------------------------------------------------- /slave/makeSlaveMariaDBconfPatch.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | 4 | export SCRIPT_DIR="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"; 5 | export SCRIPT_NAME=$( basename ${0#-} ); 6 | export THIS_SCRIPT=$( basename ${BASH_SOURCE} ) 7 | 8 | function makeSlaveMariaDBconfPatch () { 9 | echo -e " - Making MariaDB config patch :: '${SLAV_WRK_DIR}/${SLAV_PATCH_NAME}'. 10 | "; 11 | 12 | cat << EOFMCP > ${SLAV_WRK_DIR}/${SLAV_PATCH_NAME} 13 | --- /dev/shm/50-server.cnf 2022-10-27 10:33:19.504245491 -0400 14 | +++ /dev/shm/50-server_new.cnf 2022-11-03 16:20:47.086226155 -0400 15 | @@ -44,6 +44,10 @@ 16 | # * Logging and Replication 17 | # 18 | 19 | +log-basename=${SLAVE_NAME} 20 | +log-bin 21 | +server_id=2 22 | + 23 | # Both location gets rotated by the cronjob. 24 | # Be aware that this log type is a performance killer. 25 | # Recommend only changing this at runtime for short testing periods if needed! 26 | 27 | EOFMCP 28 | # cat ${SLAV_WRK_DIR}/${SLAV_PATCH_NAME}; 29 | } 30 | 31 | 32 | if [[ ${SCRIPT_NAME} = ${THIS_SCRIPT} ]] ; then 33 | makeSlaveMariaDBconfPatch; 34 | 35 | ls -la ${TMP_DIR} 36 | else 37 | echo " - Sourced '${THIS_SCRIPT}' from '${SCRIPT_NAME}'" 38 | fi; 39 | -------------------------------------------------------------------------------- /slave/makeSlaveTasks.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | 4 | export SCRIPT_DIR="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"; 5 | export SCRIPT_NAME=$( basename ${0#-} ); 6 | export THIS_SCRIPT=$( basename ${BASH_SOURCE} ) 7 | 8 | function makeSlaveTasks () { 9 | echo -e " - Making Slave Tasks script :: ${SLAV_WRK_DIR}/${SLAV_JOB}" 10 | cat << EOFCTS > ${SLAV_WRK_DIR}/${SLAV_JOB} 11 | #!/usr/bin/env bash 12 | # 13 | 14 | function ensure_SUDO_ASKPASS () { 15 | echo -e " - Testing 'SUDO_ASKPASS' capability. ( SUDO_ASKPASS = >\${SUDO_ASKPASS}< )"; 16 | if [[ "${ALLOW_SUDO_ASKPASS_CREATION}" == "yes" ]]; then 17 | echo -e " - Configuration allows ASKPASS creation."; 18 | if [ "$([ -z ${SLAVE_HOST_PWD} ] && echo N)" == "N" ]; then 19 | echo -e " - Configuration provides no password."; 20 | return 1; 21 | else 22 | echo -e " - Found password in configuration file."; 23 | export SUDO_ASKPASS=${SLAV_WRK_DIR}/.supwd.sh; 24 | fi; 25 | else 26 | echo -e " - SUDO_ASKPASS creation denied in configuration"; 27 | return 1; 28 | fi; 29 | 30 | 31 | echo -e " - Trying uploaded ASK_PASS emmitter."; 32 | sudo -A touch /etc/hostname; 33 | if [ \$? -ne 0 ]; then 34 | # echo -e "SUDO_ASKPASS ==> \${SUDO_ASKPASS}"; 35 | if [ ! -f \${SUDO_ASKPASS} ]; then 36 | echo -e "${pRED}\n\n* * * There is no file: '\${SUDO_ASKPASS}' * * * ${pDFLT}"; 37 | else 38 | echo -e "${pRED}\n\n* * * Are you sure the pwd from '\${SUDO_ASKPASS}' is correct? * * * ${pDFLT}"; 39 | fi 40 | return 1; 41 | fi; 42 | } 43 | 44 | function ensurePkgIsInstalled () { 45 | if dpkg-query -l \${PKG} >/dev/null; then 46 | echo -e " - Found \${PKG} already installed"; 47 | else 48 | sudo -A apt-get install \${PKG}; 49 | echo -e "\n - Installed \${PKG}" 50 | fi; 51 | } 52 | 53 | function stopERPNext () { 54 | echo -e "${pYELLOW} - Stopping ERPNext on Slave ... \n${pFAINT_BLUE}"; 55 | sudo -A supervisorctl stop all; 56 | echo -e "\n Stopped${pDFLT}\n"; 57 | } 58 | 59 | function relocateBackupFiles () { 60 | echo -e " - Move backup files from '${SLAV_WRK_DIR}' to backup directory '\${DIR_BKP}'"; 61 | pushd ${SLAV_WRK_DIR} >/dev/null; 62 | BACKUP_NAME="\$(cat BACKUP.txt)"; 63 | echo -e " Moving ..."; 64 | echo -e " - 'BACKUP.txt'"; 65 | echo -e " - '\${BACKUP_NAME}'"; 66 | cp BACKUP.txt \${DIR_BKP} >/dev/null; 67 | cp \${BACKUP_NAME} \${DIR_BKP} >/dev/null; 68 | popd >/dev/null; 69 | } 70 | 71 | function restoreDatabase () { 72 | echo -e "\n - Ensuring MariaDB is running"; 73 | sudo -A systemctl restart mariadb; 74 | 75 | # echo -e " - Restoring backup ...\n\n"; 76 | pushd ${SLAVE_BENCH_PATH} >/dev/null; 77 | pushd ${BACKUP_RESTORE_DIR} >/dev/null; 78 | ./handleRestore.sh; 79 | # echo -e "${pYELLOW}----------------- Slave Tasks Curtailed --------------------------${pDFLT}"; 80 | # exit; 81 | popd >/dev/null; 82 | 83 | popd >/dev/null; 84 | } 85 | 86 | function configureDBforReplication () { 87 | echo -e " - Configuring MariaDB Slave for replication"; 88 | pushd ${MARIADB_CONFIG_DIR} >/dev/null; 89 | 90 | echo -e " - Patching '${MARIADB_CONFIG}' with '${SLAV_WRK_DIR}/${MSTR_PATCH_NAME}'"; 91 | sudo -A patch ${MARIADB_CONFIG} ${SLAV_WRK_DIR}/${SLAV_PATCH_NAME} >/dev/null; 92 | # sudo -A patch --dry-run ${MARIADB_CONFIG} ${SLAV_WRK_DIR}/${SLAV_PATCH_NAME}; 93 | 94 | echo -e "${pYELLOW} - Restarting MariaDB ${pDFLT}"; 95 | sudo -A systemctl restart mariadb; 96 | # sudo -A systemctl status mariadb; 97 | 98 | popd >/dev/null; 99 | } 100 | 101 | function installBackupAndRestoreTools () { 102 | echo -e " - Checking Frappe Bench directory location :: '${SLAVE_BENCH_PATH}'"; 103 | if [ -f ${SLAVE_BENCH_PATH}/Procfile ]; then 104 | echo -e " - Moving Backup and Restore handlers from '${SLAV_WRK_DIR}/${BACKUP_RESTORE_DIR}' to Frappe Bench directory"; 105 | pushd ${SLAV_WRK_DIR}/${BACKUP_RESTORE_DIR} >/dev/null; 106 | ln -fs Slave_${ENVARS} ${ENVARS}; 107 | popd >/dev/null; 108 | cp -r ${SLAV_WRK_DIR}/${BACKUP_RESTORE_DIR} ${SLAVE_BENCH_PATH} 109 | else 110 | echo -e "\n${pRED}* * * Specified Frappe Bench directory location, '${SLAVE_BENCH_PATH}', is NOT correct. Cannot continue .... * * * ${pDFLT}" 111 | exit 1; 112 | fi; 113 | } 114 | 115 | 116 | ensure_SUDO_ASKPASS; 117 | if [ \$? -eq 0 ]; then 118 | echo -e " - 'SUDO_ASKPASS' environment variable is correct."; 119 | else 120 | echo -e "\n${pRED}* * * 'SUDO_ASKPASS' environment variable or emitter is NOT correct. Cannot continue .... * * * ${pDFLT}" 121 | exit 1; 122 | fi; 123 | 124 | declare DIR_BKP="${SLAVE_BENCH_PATH}/BKP"; 125 | 126 | mkdir -p ${SLAV_RSLT_DIR}; 127 | 128 | # echo -e "${pYELLOW}----------------- Slave Tasks Curtailed Before Restore Database --------------------------${pDFLT} 129 | # ${SLAV_RSLT_DIR}"; 130 | # exit; 131 | 132 | 133 | 134 | echo -e "\n\n - Installing dependencies."; 135 | 136 | declare PKG="xmlstarlet"; 137 | ensurePkgIsInstalled; 138 | 139 | declare PKG="jq"; 140 | ensurePkgIsInstalled; 141 | 142 | 143 | installBackupAndRestoreTools; 144 | 145 | stopERPNext; 146 | 147 | relocateBackupFiles; 148 | 149 | restoreDatabase; 150 | 151 | configureDBforReplication; 152 | 153 | echo -e " - Enabling Slave connection to Master"; 154 | pushd ${SLAV_WRK_DIR} >/dev/null; 155 | # ls -la 156 | mysql -AX < ${MARIADB_SCRIPT} > ${SLAV_RSLT_DIR}/${SLAV_STATUS_RSLT}; 157 | popd >/dev/null; 158 | 159 | echo -e " - Purging temporary files from Slave. ${pRED}*** SKIPPED ***${pDFLT}"; 160 | # rm -fr /dev/shm/S_*; 161 | 162 | 163 | echo -e "\nCompleted remote job : '${SLAV_WRK_DIR}/${SLAV_JOB}'.\n\n"; 164 | exit; 165 | 166 | EOFCTS 167 | 168 | chmod +x ${SLAV_WRK_DIR}/${SLAV_JOB}; 169 | } 170 | 171 | if [[ ${SCRIPT_NAME} = ${THIS_SCRIPT} ]] ; then 172 | makeSlaveTasks; 173 | echo -e "???" 174 | 175 | ls -la ${TMP_DIR} 176 | else 177 | echo " - Sourced '${THIS_SCRIPT}' from '${SCRIPT_NAME}'" 178 | fi; 179 | -------------------------------------------------------------------------------- /slave/prepareSlave.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | 4 | export SCRIPT_DIR="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"; 5 | export SCRIPT_NAME=$( basename ${0#-} ); 6 | export THIS_SCRIPT=$( basename ${BASH_SOURCE} ) 7 | 8 | 9 | declare MAKE_SLAV_JOB="./slave/makeSlaveTasks.sh"; 10 | source ${MAKE_SLAV_JOB}; 11 | 12 | declare MAKE_SLAV_PATCH="./slave/makeSlaveMariaDBconfPatch.sh"; 13 | source ${MAKE_SLAV_PATCH}; 14 | 15 | declare MAKE_SLAV_MARIA_SQL="./slave/makeSlaveMariaDBScript.sh"; 16 | source ${MAKE_SLAV_MARIA_SQL}; 17 | 18 | declare SLAV_PATCH_NAME="50-server.cnf.patch"; 19 | declare SLAV_JOB="slaveTasks.sh"; 20 | declare MARIADB_SCRIPT="setUpSlave.sql"; 21 | 22 | function prepareSlave() { 23 | echo -e "Preparing slave ..."; 24 | 25 | declare SLAVE_NAME=${SLAVE_HOST_URL//./_} 26 | 27 | if [[ "${SKIP_UPLOADS_TO_SLAVE}" == "yes" ]]; then 28 | echo -e "\n${pGOLD}Skipping uploads to slave. (SKIP_UPLOADS_TO_SLAVE =='${SKIP_UPLOADS_TO_SLAVE}').${pDFLT}\n" 29 | else 30 | echo -e " - Extracting Master status values"; 31 | declare STATUS_FILE=$(xmlstarlet sel -t -v "//resultset/row/field[@name='File']" ${MSTR_RSLT_DIR}/${MSTR_STATUS_RSLT}); 32 | echo -e " - Log FILE :: ${STATUS_FILE}"; 33 | 34 | declare STATUS_POS=$(xmlstarlet sel -t -v "//resultset/row/field[@name='Position']" ${MSTR_RSLT_DIR}/${MSTR_STATUS_RSLT}); 35 | echo -e " - Log file POSITION :: ${STATUS_POS}"; 36 | 37 | # declare STATUS_DB=$(xmlstarlet sel -t -v "//resultset/row/field[@name='Binlog_Do_DB']" ${MSTR_RSLT_DIR}/${MSTR_STATUS_RSLT}); 38 | # echo -e " - Restrict to DATABASE :: ${STATUS_DB}"; 39 | 40 | 41 | echo -e " - Moving backup and restore handlers '${BACKUP_HANDLER}' to transfer directory '${TMP_DIR}/${MSTR_WRK}'"; 42 | cp -r ${BACKUP_RESTORE_PATH} ${TMP_DIR}/${SLAV_WRK}; 43 | 44 | makeSlaveTasks; 45 | 46 | pushd ${MSTR_RSLT_DIR} >/dev/null; 47 | declare MASTER_BACKUP=$(cat BACKUP.txt) 48 | echo -e " - Copy backup of Master ('${MASTER_BACKUP}') to Slave work directory."; 49 | if [[ "${UPLOAD_MASTER_BACKUP}" == "yes" ]]; then 50 | cp BACKUP.txt ${SLAV_WRK_DIR} &>/dev/null; 51 | cp ${MASTER_BACKUP} ${SLAV_WRK_DIR} &>/dev/null; 52 | else 53 | echo -e "\n${pGOLD}Skipping uploading Master backup file to slave! (UPLOAD_MASTER_BACKUP =='${UPLOAD_MASTER_BACKUP}').${pDFLT}\n" 54 | fi; 55 | popd >/dev/null; 56 | 57 | makeSlaveMariaDBScript; 58 | makeSlaveMariaDBconfPatch; 59 | 60 | pushd ${SLAV_WRK_DIR}/BaRe >/dev/null; 61 | echo "export MYPWD=\"${SLAVE_DB_ROOT_PWD}\";" >> Slave_envars.sh; 62 | popd >/dev/null; 63 | 64 | pushd ${TMP_DIR} >/dev/null 65 | echo -e " - Packaging Slave work files ('${SLAV_WRK_FILES}') from '${SLAV_WRK_DIR}' in '${TMP_DIR}' ..."; 66 | 67 | # echo -e "${pYELLOW}------------------------------ Curtailed ----------------------------------${pDFLT}"; 68 | # exit; 69 | 70 | tar zcvf ${SLAV_WRK_FILES} ${SLAV_WRK} >/dev/null; 71 | popd >/dev/null 72 | 73 | 74 | echo -e " - Purging existing Slave work files from '${THE_SLAVE}:${TMP_DIR}'" 75 | ssh ${THE_SLAVE} "rm -fr /dev/shm/S_rslt; rm -fr /dev/shm/S_work*; rm -fr /dev/shm/BKP;" >/dev/null; 76 | 77 | echo -e " - Uploading Slave work files '${SLAV_WRK_FILES}' to '${THE_SLAVE}:${TMP_DIR}'" 78 | scp ${TMP_DIR}/${SLAV_WRK_FILES} ${THE_SLAVE}:${TMP_DIR} >/dev/null; 79 | fi; 80 | 81 | echo -e " - Extracting content from uploaded file '${SLAV_WRK_FILES}' on Slave ..." 82 | ssh ${THE_SLAVE} tar zxvf ${TMP_DIR}/${SLAV_WRK_FILES} -C /dev/shm >/dev/null; 83 | 84 | echo -e " - Executing script '${SLAV_JOB}' on Slave" 85 | ssh ${THE_SLAVE} ${SLAV_WRK_DIR}/${SLAV_JOB}; 86 | echo -e " - Finished with Slave.\n" 87 | 88 | # echo -e "Downloading Master status file '${MASTER_STATUS_RSLT}' to '${TMP_DIR}'" 89 | # scp ${THE_MASTER}:${SLAV_WRK_DIR}/${MASTER_STATUS_RSLT} ${TMP_DIR} >/dev/null; 90 | # # cat ${TMP_DIR}/${MASTER_STATUS_RSLT}; 91 | } 92 | 93 | if [[ ${SCRIPT_NAME} = ${THIS_SCRIPT} ]] ; then 94 | prepareSlave; 95 | 96 | ls -la ${TMP_DIR} 97 | else 98 | echo " - Sourced '${THIS_SCRIPT}' from '${SCRIPT_NAME}'" 99 | fi; 100 | 101 | --------------------------------------------------------------------------------