├── tcp-closer ├── wiki-list-to-main ├── pipelines-stop-gracefully ├── snscrape-vk-user ├── transfer.notkiska.pw-upload ├── fos-ftp-upload ├── warc-size ├── wpull1-progress-monitor ├── ix.io-upload ├── run-every-five-minutes ├── snscrape-instagram-user ├── wiki-sections-sort ├── archivebot-high-memory ├── killcx-all-https ├── pipelines-monitor-tmux-wget-outcomes ├── tar-many-files-progress ├── pipelines-launch-in-tmux-windows ├── wpull2-extract-remaining ├── archivebot-youtube ├── archivebot-blogspot ├── uniqify ├── snscrape-twitter-hashtag ├── snscrape-twitter-user ├── wpull1-parallel-progress-monitor ├── wpull2-url-origin ├── iasha1check ├── curl-ua ├── bing-scrape ├── snscrape-extract ├── wget-spider-estimate-size ├── snscrape-facebook-user ├── ia-derive ├── snscrape-wiki-transfer-merge ├── snscrape-twitter-filter ├── archivebot-list-stuck-requests ├── youtube-filter-autogen-channels ├── mastodon-outdated ├── wiki-website-extract-social-media ├── snscrape-prepare-commands ├── archivebot-monitor-job-queue ├── README.md ├── format-size ├── ia-upload-progress ├── get-crx4chrome-urls ├── snscrape-upload ├── foolfuuka-search ├── snscrape-tmux ├── mastodon-enumerate-users ├── gofile.io-dl ├── reddit-pushshift-search ├── europarl-meps-collect ├── archivebot-jobid-calculation ├── social-media-extract-profile-link ├── transfer.notkiska.pw-check-ia ├── wiki-recursive-extract-normalise ├── website-extract-social-media ├── warc-peek ├── kill-wpull-connections ├── s3-bucket-list ├── wpull2-log-extract-errors ├── deb-repo-urls ├── url-normalise ├── archivebot-jobs ├── warc-tiny └── LICENSE /tcp-closer: -------------------------------------------------------------------------------- 1 | sudo tcp-closer --dport 443 --idle_time 21601000 --last_recv_limit 43200000 --interval 300 2 | -------------------------------------------------------------------------------- /wiki-list-to-main: -------------------------------------------------------------------------------- 1 | grep '^==' | awk '{print; print ""; print "";}' | sed 's,bot:=\+ ,bot:,; s,=\+ -->,-->,' 2 | -------------------------------------------------------------------------------- /pipelines-stop-gracefully: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Stop all pipelines running on the current machine gracefully 3 | # DO NOT USE FOR ARCHIVEBOT 4 | pkill -INT pipeline 5 | -------------------------------------------------------------------------------- /snscrape-vk-user: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | user="${1,,}" 3 | shift 4 | { 5 | echo "https://vk.com/${user}" 6 | snscrape "$@" vkontakte-user "${user}" 7 | } > "vk-@${user}" 8 | -------------------------------------------------------------------------------- /transfer.notkiska.pw-upload: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | curl -sv --upload-file "$1" https://transfer.notkiska.pw/ 2> >(grep -Po -m 1 '< X-Url-\KDelete: .*' >&2) | sed 's,%40,@,g'; echo 3 | -------------------------------------------------------------------------------- /fos-ftp-upload: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Upload something to FOS via FTP 3 | { echo 'user username password'; echo 'cd directory'; echo 'mput something-00???.warc.gz'; } | ftp -ni fos.textfiles.com 4 | -------------------------------------------------------------------------------- /warc-size: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Total size of all WARCs in the current directory (or subdirectories) 3 | find -name '*.warc.gz' -printf '%s\n' | "$(cd "$(dirname "$0")"; pwd -P)/format-size" 4 | -------------------------------------------------------------------------------- /wpull1-progress-monitor: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sqlite3 *.db 'SELECT status, COUNT(id) FROM urls GROUP BY status' 3 | sqlite3 *.db 'SELECT status_code, COUNT(id) FROM urls GROUP BY status_code' 4 | -------------------------------------------------------------------------------- /ix.io-upload: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | localfile="$1" 3 | remotefile="${2:-${1}}" 4 | remotefile="${remotefile##*/}" 5 | echo "$(curl -sF "f:1=@${localfile}" http://ix.io/)+/${remotefile}" | sed 's,#,%23,g' 6 | -------------------------------------------------------------------------------- /run-every-five-minutes: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Run a command every full five minutes in a terminal window 3 | while :; do date; "$@"; echo; sleep $(echo "(5 - $(date '+%M') % 5) * 60 - $(date +'%S.%N')" | bc); done 4 | -------------------------------------------------------------------------------- /snscrape-instagram-user: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | user="${1,,}" 3 | shift 4 | { 5 | echo "https://www.instagram.com/${user}/" 6 | snscrape "$@" --format '{dirtyUrl}' instagram-user "${user}" 7 | } > "instagram-@${user}" 8 | -------------------------------------------------------------------------------- /wiki-sections-sort: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Sort == level-two wiki sections == alphabetically while keeping all section contents (including subsections) in the given order 3 | tr '\n' '\r' | sed 's,\r== ,\n== ,g' | sort | tr '\r' '\n' 4 | -------------------------------------------------------------------------------- /archivebot-high-memory: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Find high memory usage ArchiveBot jobs 3 | { echo "PID RSS JOBID"; ps -C wpull --format 'pid,rss,cmd' --no-headers | sed 's,^\s*,,; s,^\([0-9]\+ [0-9]\+ \).*/data/[^/]\+/\([0-9a-z]\+\)/wpull\.log.*$,\1\2,'; } | column -t 4 | -------------------------------------------------------------------------------- /killcx-all-https: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Call killcx for all HTTPS connections established by a process 3 | lsof -np $pid | grep TCP | grep -Po -- '->\K[^:]+:https?(?= )' | sort | sed 's,:https,:443,; s,:http,:80,' | while read -r con; do echo "=========== ${con}"; ./killcx "${con}"; done 4 | -------------------------------------------------------------------------------- /pipelines-monitor-tmux-wget-outcomes: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Monitor how the wget processes went recently from pipelines running in tmux windows 3 | { for i in {1..6}; do tmux capture-pane -t session:${i} -pS -1000; done; } | grep -P 'WgetDownload' | grep -Po '^(Finished|Failed|Retrying)' | sort | uniq -c 4 | -------------------------------------------------------------------------------- /tar-many-files-progress: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Tar a directory with many files with a progress report 3 | time stdbuf -oL tar -cvzf foo.tar.gz directory | awk 'BEGIN{count=0;total=1000000}{count+=1;if (count % 100 == 0) { printf "\r%d of %d done", count, total; fflush(); }}END{printf "\r%d of %d done\n", count, total}' 4 | -------------------------------------------------------------------------------- /pipelines-launch-in-tmux-windows: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Launch pipeline instances, each using a different IP and a separate directory, in windows in tmux session 'instances' 3 | for ip in ...; do tmux new-window -t instances "bash -c 'cd ~/whatever-grab.${ip}; run-pipeline3 pipeline.py --disable-web-server --concurrent 1 --context-value bind_address=${ip} YOURNICKHERE; exec bash'"; done 4 | -------------------------------------------------------------------------------- /wpull2-extract-remaining: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Usage: wpull2-extract-remaining FILENAME 3 | # FILENAME points to a wpull 2.x SQLite DB 4 | # Prints all remaining URLs from the DB on stdout 5 | for status in in_progress todo error 6 | do 7 | sqlite3 "$1" 'SELECT url_strings.url FROM queued_urls JOIN url_strings ON url_string_id = url_strings.id WHERE status = "'$status'"' 8 | done 9 | -------------------------------------------------------------------------------- /archivebot-youtube: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | for url in "$@" 3 | do 4 | echo "!ao ${url}" 5 | echo "!ao ${url}/videos" 6 | echo "!ao ${url}?disable_polymer=1" 7 | echo "!ao ${url}/videos?disable_polymer=1" 8 | echo "chromebot: a ${url}" 9 | echo "chromebot: a ${url}/videos" 10 | echo "chromebot: a ${url}?disable_polymer=1" 11 | echo "chromebot: a ${url}/videos?disable_polymer=1" 12 | done 13 | -------------------------------------------------------------------------------- /archivebot-blogspot: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | scriptpath="$(cd "$(dirname "$0")"; pwd -P)" 3 | 4 | if [[ "$1" =~ ^http ]] 5 | then 6 | url="$1" 7 | url="${url/http:/https:}" 8 | pattern="${url//./\\.}" 9 | pattern="${pattern/https:/https?:}" 10 | else 11 | url="https://${1}.blogspot.com/" 12 | pattern="https?://${url//./\\.}/" 13 | fi 14 | 15 | echo "!a ${url}" 16 | echo "!ig $("${scriptpath}/archivebot-jobid-calculation" "${url}") ^${pattern}search\?(.*&)?reverse-paginate=true(&|$)" 17 | -------------------------------------------------------------------------------- /uniqify: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [[ "$1" != "-c" ]] 3 | then 4 | # Without count (preserving order, printing the first appearance) 5 | # In this case, Perl is *much* faster than AWK. 6 | perl -ne 'print if ! $a{$_}++' 7 | #awk '!seen[$0]++' 8 | else 9 | # With count (order undefined) 10 | # Here, AWK is significantly faster. 11 | #perl -e 'while (<>) { $a{$_}++; } foreach $key (keys %a) { print "$a{$key} $key"; }' 12 | awk '{ tot[$0]++ } END { for (i in tot) print tot[i],i }' 13 | fi 14 | -------------------------------------------------------------------------------- /snscrape-twitter-hashtag: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | hashtag="$1" 3 | shift 4 | { 5 | echo "https://twitter.com/hashtag/${hashtag}" 6 | echo "https://twitter.com/hashtag/${hashtag}?src=hash" 7 | echo "https://twitter.com/hashtag/${hashtag}?f=tweets&vertical=default" 8 | echo "https://twitter.com/hashtag/${hashtag}?f=tweets&vertical=default&src=hash" 9 | snscrape --format '{url} {tcooutlinksss} {outlinksss}' "$@" twitter-hashtag "${hashtag}" | tr ' ' '\n' | grep -v '^$' | awk '!seen[$0]++' 10 | } > "twitter-#${hashtag}" 11 | -------------------------------------------------------------------------------- /snscrape-twitter-user: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | origUser="$1" 3 | shift 4 | user="$(snscrape --max-results 1 twitter-user "${origUser}" | grep -Po '^https?://twitter\.com/\K[^/]+')" 5 | if [[ "${user}" ]] 6 | then 7 | if [[ "${user}" != "${origUser}" ]]; then echo "Username fix: ${origUser} -> ${user}" >&2; fi 8 | { 9 | echo "https://twitter.com/${user}" 10 | snscrape --format '{url} {tcooutlinksss} {outlinksss}' "$@" twitter-user "${user}" 11 | } | tr ' ' '\n' | grep -v '^$' | awk '!seen[$0]++' > "twitter-@${user}" 12 | fi 13 | -------------------------------------------------------------------------------- /wpull1-parallel-progress-monitor: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Monitor a grab split up over multiple wpull 1.x processes (e.g. a forum where you split everything up by thread ID) 3 | sqlite3 < <(declare -i i=0; for f in /paths/to/wpull.db; do echo "ATTACH DATABASE '${f}' AS db${i};"; i+=1; done; declare -i n=i; echo -n "SELECT status, SUM(count) FROM ("; i=0; while [[ ${i} -lt ${n} ]]; do if [[ ${i} -ne 0 ]]; then echo -n "UNION ALL "; fi; echo "SELECT status, COUNT(id) AS count FROM db${i}.urls GROUP BY status "; i+=1; done; echo ") GROUP BY status;") 4 | -------------------------------------------------------------------------------- /wpull2-url-origin: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Trace back where a URL was discovered, all the way back to the root 3 | url="$1"; curId=$(sqlite3 wpull.db 'SELECT id FROM url_strings WHERE url = "'"${url}"'"'); while :; do sqlite3 wpull.db 'SELECT queued_urls.*, url_strings.* FROM queued_urls JOIN url_strings ON queued_urls.url_string_id = url_strings.id WHERE url_strings.id = '$curId; IFS='|' read -r curId level < <(sqlite3 wpull.db 'SELECT parent_url_string_id, level FROM queued_urls WHERE url_string_id = '$curId); if [[ ${level} -eq 0 ]]; then break; fi done 4 | -------------------------------------------------------------------------------- /iasha1check: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Fetch the SHA-1 hashes from an IA item and ensure that they match the local files (i.e. that the upload was successful) 3 | identifier="$1"; escapedIdentifier="$(sed 's/[.[\*^$()+?{|]/\\&/g' <<<"${identifier}")"; sha1sum -c <(curl -sL "https://archive.org/download/${identifier}/${identifier}_files.xml" | tr -d '\n' | grep -Po '' | grep 'source="original".*' | sed 's,^.*name=",,; s,".*, ,; s,.*$,,' | grep -Pv "^${escapedIdentifier}"'(\.cdx\.(gz|idx)|_meta\.(sqlite|xml)) ' | awk '{ print $2 " " $1 }'); 4 | -------------------------------------------------------------------------------- /curl-ua: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | declare -A uas 3 | uas["archivebot"]='ArchiveTeam ArchiveBot/20190427.01 (wpull 2.0.3) and not Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36' 4 | uas["firefox"]='Mozilla/5.0 (Windows NT 6.1; rv:60.0) Gecko/20100101 Firefox/60.0' 5 | 6 | if [[ $# -eq 0 || "$1" == '-h' || "$1" == '--help' || -z "${uas[$1]+exists}" ]] 7 | then 8 | echo "Usage: curl-ua NAME ARGS" >&2 9 | echo "" >&2 10 | echo " NAME: name of a UA alias" >&2 11 | echo " ARGS: args for curl" >&2 12 | exit 1 13 | fi 14 | 15 | ua="${uas[$1]}" 16 | shift 17 | 18 | curl -A "${ua}" "$@" 19 | -------------------------------------------------------------------------------- /bing-scrape: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | q="$1" 3 | declare -i max=10000 4 | if [[ $# -eq 2 ]]; then max=$2; fi 5 | { 6 | declare -i first=1 7 | queryStr="q=${q}" 8 | while [[ ${first} -lt ${max} ]] 9 | do 10 | echo "http://www.bing.com/search?${queryStr}" >&2 11 | curl -s -A 'Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0' "http://www.bing.com/search?${queryStr}" 12 | first+=10 13 | queryStr="q=${q}&go=Search&qs=ds&first=${first}&FORM=PORE" 14 | sleep 2 15 | done 16 | } | grep -Po '
  • .*?
  • ' | grep -Po 'href="\Khttps?://(?!www\.microsofttranslator\.com/|view\.officeapps\.live\.com/)[^"]+' | awk '!seen[$0]++' 17 | -------------------------------------------------------------------------------- /snscrape-extract: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Extract from stdin social media usernames suitable for snscrape, grouped by service 3 | grep -Po '(https?://www\.\Kfacebook\.com/(?!pages/)\S+(?=/)|https?://www\.\Kinstagram\.com/\S+(?=/)|https?://\Ktwitter\.com/\S+)' | 4 | sed 's,\.com/, ,; s,^twitter hashtag/,twitter-hashtag ,; s,twitter ,twitter-user ,' | 5 | sort | 6 | awk ' 7 | BEGIN { 8 | prev1=""; 9 | } 10 | 11 | ($1 != prev1) { 12 | if (prev1 != "") { 13 | print ""; 14 | } 15 | printf "%s:", $1; 16 | prev1 = $1; 17 | } 18 | 19 | ($1 == prev1) { 20 | printf " %s", $2; 21 | } 22 | 23 | END { 24 | print ""; 25 | }' 26 | -------------------------------------------------------------------------------- /wget-spider-estimate-size: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Estimate size of a website through wget spider 3 | # Note: this will miss anything where the server doesn't advertise the size. 4 | wget --recursive --level inf --spider --no-directories --output-file=wget.log --no-parent --reject-regex '/\?C=[NMSD];O=[AD]$' "$1" 5 | grep -Po ' \Khttps?://.*$|Length: \K\d+(?= )' wget.log | sed 's,^\(.*https\?://.*$\),url \1,; s,^\([0-9]\+\)$,length \1,' | awk 'BEGIN {url = ""; len = 0; totalsize = 0; } { if ($1 == "url") { if ($2 != url) { totalsize += len; url = $2; len = 0; } } else { if ($1 == "length") { len = $2; } } } END { totalsize += len; printf "%.0f\n", totalsize; }' 6 | -------------------------------------------------------------------------------- /snscrape-facebook-user: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | origUser="$1" 3 | shift 4 | user="$(curl -s -A 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36' -H 'Accept-Language: en-US,en;q=0.5' "https://www.facebook.com/${origUser}/" | grep -Po ']*(?<=\s)data-key\s*=\s*"tab_home".*?' | grep -Po ']*(?<=\s)href="/\K[^/]+')" 5 | if [[ "${user}" ]] 6 | then 7 | if [[ "${user}" != "${origUser}" ]]; then echo "Username fix: ${origUser} -> ${user}" >&2; fi 8 | { 9 | echo "https://www.facebook.com/${user}/" 10 | snscrape "$@" facebook-user "${user}" 11 | } > "facebook-@${user}" 12 | fi 13 | -------------------------------------------------------------------------------- /ia-derive: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | identifier="$1" 3 | if [[ -z "${identifier}" ]]; then exit 1; fi 4 | 5 | cookies="$(grep '^logged-in-' ~/.config/ia.ini | sed 's, = ,=,' | tr '\n' ';' | sed 's,;$,,; s,;,; ,')" 6 | 7 | result="$(curl -s -H "Cookie: ${cookies}" "https://archive.org/manage/${identifier}" --data "identifier=${identifier}&admin=derive")" 8 | 9 | main="$(tr -d '\n' <<<"${result}" | grep -Po '')" 10 | catalogueUrl="$(grep -Po 'task ID: ]\+>,,g' <<< "${main}")" 16 | fi 17 | -------------------------------------------------------------------------------- /snscrape-wiki-transfer-merge: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [[ ! -e wiki || ! -e transfer ]] 3 | then 4 | echo "Error: ./wiki or ./transfer missing" 5 | exit 1 6 | fi 7 | 8 | while read -r line 9 | do 10 | if [[ "${line}" == *facebook* ]] 11 | then 12 | l="${line%/}" 13 | search="facebook.*${l##*/}" 14 | elif [[ "${line}" == *twitter* ]] 15 | then 16 | search="twitter.*${line##*/}" 17 | elif [[ "${line}" == *instagram* ]] 18 | then 19 | l="${line%/}" 20 | search="instagram.*${l##*/}" 21 | else 22 | search= 23 | fi 24 | 25 | if [[ "${search}" ]] 26 | then 27 | if grep -q "${search}" transfer 28 | then 29 | echo "$(grep "${search}" transfer) | note = ${line}" 30 | continue 31 | fi 32 | fi 33 | echo "${line}" 34 | done "${f}-fixed"; done 5 | for f in *-fixed; do { grep -vF '/status/' $f; grep -F '/status/' $f | sort -t'/' -k6,6n | tac; } > "${f}-sorted"; done 6 | for f in *-fixed-sorted; do mv $f ${f/-fixed-sorted/-filtered}; done 7 | 8 | # sort -r should work, but for some reason it doesn't, hence the tac... 9 | # There's certainly a cleaner way which doesn't involve sorting and then restoring the inverse chronological order. 10 | -------------------------------------------------------------------------------- /archivebot-list-stuck-requests: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # For each ArchiveBot job running on the machine, list requests that are stuck, i.e. older than 6 hours 3 | ps -C wpull --format 'pid,cmd' --no-headers | sed 's,^\s*,,; s,\s*/usr/bin/python3.*/data/[^/]\+/\([0-9a-z]\+\)/wpull\.log.*$, \1,' | while read -r pid jobid; do echo "$jobid (PID $pid)"; find /proc/${pid}/fd -lname '*/tmp-wpull-warcsesreq-*' -printf "%l\0" | xargs -0 -r bash -c 'find "$@" -mmin +360 -printf "%TY-%Tm-%Td %TH:%TM:%TS %TZ\0%p\0"' bash 2> >(grep -v ': No such file or directory$' >&2) | while IFS= read -r -d '' mtime; IFS= read -r -d '' filename; do grep ^Host "${filename}" | tr -d '\r' | while read -r outline; do printf '%s %q %s\n' "${mtime}" "${filename}" "${outline}"; done; done | sort | sed 's,\.[0-9]\+,,' | grep ^ || echo 'None'; echo; done 4 | -------------------------------------------------------------------------------- /youtube-filter-autogen-channels: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Takes a list of YouTube URLs on stdin. URLs for autogenerated channels are written to FD 3, all other URLs go to stdout. 3 | while read -r url 4 | do 5 | if [[ "${url}" == */channel/* ]] 6 | then 7 | header="$(curl -4sL -A 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36' -H 'Accept-Language: en-US,en;q=0.5' "${url}?disable_polymer=1" | \ 8 | tr -d '\n' | \ 9 | grep -Po ']*\s)?class="([^"]* )?channel-header( [^"]*)?".*?]*\s)?id="channel-subheader"')" 10 | if grep -qP ']*\s)?class="([^"]+ )?yt-channel-title-autogenerated[ "]' <<<"${header}" 11 | then 12 | echo "${url}" >&3 13 | continue 14 | fi 15 | fi 16 | echo "${url}" 17 | done 18 | -------------------------------------------------------------------------------- /mastodon-outdated: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | beforeversion="$1" 3 | curl -s 'https://instances.social/list.json?q%5Busers%5D=&strict=true' | python3 -c 'import json,sys'$'\n''d = json.loads(sys.stdin.read())'$'\n''for i in d["instances"]:'$'\n'' print("{} {} {} {}".format(i["name"], i["version"] or "None", i["users"], i["statuses"]))' | grep -v ' None ' | sort -k2,2 | awk '$2 < "'"${beforeversion}"'"' | awk '{print $1 " | data-sort-value=\"" $1 "\" | [http://" $1 "/ " $1 "] || " $2 " || " $3 " || " $4 }' | while read -r domain line; do content="$(curl -sL "http://${domain}/about")"; if grep -qF '//github.com/tootsuite/mastodon' <<<"${content}"; then mastodon="{{green|Yes}}"; elif grep -qF 'mastodon' <<<"${content}"; then mastodon="{{orange|Possibly}}"; else mastodon="{{red|No?}}"; fi; echo "${line} || ${mastodon}"; echo '|-'; done 4 | -------------------------------------------------------------------------------- /wiki-website-extract-social-media: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Reads a wiki page in the new-style viewer format from stdin, runs everything that looks like a website through website-extract-social-media, and formats the output accordingly 3 | 4 | scriptpath="$(cd "$(dirname "$0")"; pwd -P)" 5 | while read -r line 6 | do 7 | echo "${line}" 8 | if [[ "${line}" == '* http://'* || "${line}" == '* https://'* ]] 9 | then 10 | url="${line:2}" 11 | if [[ "${url}" == *' | '* ]] 12 | then 13 | url="${url%% | *}" 14 | fi 15 | if ! grep -Pq '//(www\.)?(facebook\.com|flickr\.com|instagram\.com|twitter\.com|vk\.com|youtube\.com|youtu\.be)/' <<<"${url}" 16 | then 17 | "${scriptpath}/website-extract-social-media" "${url}" < <(:) | sed 's,^\(https\?://\(www\.\)\?\(.*\)\)$,\3 \1,' | sort | awk '{ print $2 }' | sed 's,^,* ,' 18 | fi 19 | fi 20 | done 21 | -------------------------------------------------------------------------------- /snscrape-prepare-commands: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | scriptpath="$(cd "$(dirname "$0")"; pwd -P)" 3 | 4 | "${scriptpath}/snscrape-extract" | while read -r service line 5 | do 6 | if [[ "${service}" == "facebook:" ]] 7 | then 8 | echo "printf '%s\n' ${line} | parallel -n 1 -k -j 4 $(printf "%q" "${scriptpath}")/snscrape-facebook-user" 9 | elif [[ "${service}" == "twitter-hashtag:" ]] 10 | then 11 | echo "printf '%s\n' ${line} | parallel -n 1 -k -j 3 $(printf "%q" "${scriptpath}")/snscrape-twitter-hashtag" 12 | elif [[ "${service}" == "twitter-user:" ]] 13 | then 14 | echo "printf '%s\n' ${line} | parallel -n 1 -k -j 3 $(printf "%q" "${scriptpath}")/snscrape-twitter-user" 15 | elif [[ "${service}" == "instagram:" ]] 16 | then 17 | echo "for user in ${line}; do $(printf "%q" "${scriptpath}")/snscrape-${service:0:-1}-user "'"${user}"; done' 18 | fi 19 | done 20 | -------------------------------------------------------------------------------- /archivebot-monitor-job-queue: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Micro-optimisation FTW 3 | # Sometimes, sites have ridiculous rate limits, but there are also a lot of other URLs in the job's queue. 4 | # This command helps figuring out when the delay needs to be adjusted so that off-site stuff can be processed quickly while the on-site things are retrieved slowly in accordance with the rate limit. 5 | { echo 'DIFF POS ID ID PARENT ROOT STATUS TRY LEVEL ILEVEL TYPE PRIO POST SCODE FN ID URL'; sqlite3 wpull.db 'SELECT queued_urls.*, url_strings.* FROM queued_urls JOIN url_strings ON queued_urls.url_string_id = url_strings.id WHERE queued_urls.status = "todo"' | grep -nF '//ridiculouslyratelimitedsite.example.net/' | grep -v -e '/most' -e '\?important' -e '&ignore' -e 'patterns$' | awk -F':' 'BEGIN{prev=0} {print ($1 - prev) " " $0; prev=$1}' | sed 's,:, ,; s,|, ,g'; } | head -1000 | column -nt | less -S 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Over the past few years, I've written and accumulated a number of useful little things to help with archival-related tasks. This repository collects them. I hope someone finds some of them useful. 2 | 3 | ## License (applies to all programs in this repository) 4 | This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. 5 | 6 | This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. 7 | 8 | You should have received a copy of the GNU General Public License along with this program. If not, see . 9 | -------------------------------------------------------------------------------- /format-size: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Format size in bytes into a readable string of the form "1.23 KiB"; reads from stdin and arguments and can take multiple numbers separated by any amount of whitespace including newlines 3 | { 4 | if [ ! -t 0 ]; then cat; fi 5 | echo "$@" 6 | } | 7 | tr '\t ' '\n' | 8 | awk ' 9 | BEGIN { 10 | units[0] = "B"; 11 | units[1] = "KiB"; 12 | units[2] = "MiB"; 13 | units[3] = "GiB"; 14 | units[4] = "TiB"; 15 | units[5] = "PiB"; 16 | } 17 | 18 | { 19 | size += $1; 20 | } 21 | 22 | END { 23 | if (size > 0) { 24 | magnitude = int(log(size) / log(1024)); 25 | if (magnitude > 5) { 26 | magnitude = 5; 27 | } 28 | } else { 29 | magnitude = 0; 30 | } 31 | if (magnitude > 0) { 32 | sizeformat = "%.2f"; 33 | } else { 34 | sizeformat = "%d"; 35 | } 36 | printf sizeformat " %s\n", size / (1024 ^ magnitude), units[magnitude]; 37 | }' 38 | -------------------------------------------------------------------------------- /ia-upload-progress: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Check how much of an upload made it into the item yet (vs. how much is stuck in the S3 queue if you also watch the upload process) 3 | # Usage: ia-upload-progress IA_IDENTIFIER [FILES] 4 | # If no files are specified, all WARCs in the current directory are used. 5 | identifier=$1 6 | shift 7 | files=("$@") 8 | if [[ ${#files[@]} -eq 0 ]] 9 | then 10 | files=(*.warc.gz) 11 | fi 12 | scriptpath="$(cd "$(dirname "$0")"; pwd -P)" 13 | 14 | uploaded=$(ia metadata "${identifier}" | grep -Po '("source"\s*:\s*"original",[^}]*"size"\s*:\s*"\K\d+|"size"\s*:\s*"\K\d+(?="\s*,[^}]*"source"\s*:\s*"original"))' | awk '{sum+=$1} END {printf "%.0f\n", sum}') 15 | localSize=$(du -bc "${files[@]}" | tail -1 | cut -f1) 16 | echo "Uploaded $("${scriptpath}/format-size" <<<"${uploaded}") of $("${scriptpath}/format-size" <<<"${localSize}") ($(awk '{ printf "%.1f", 100 * ($1 / $2); }' <<<"${uploaded} ${localSize}") %)" 17 | -------------------------------------------------------------------------------- /get-crx4chrome-urls: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Generate a list of relevant crx4chrome.com URLs for an extension (e.g. for feeding it into ArchiveBot) 3 | # Call passing the URL to an extension page, e.g. https://www.crx4chrome.com/extensions/eebpioaailbjojmdbmlpomfgijnlcemk/ 4 | url="$1"; echo "${url}"; historyUrl="https://www.crx4chrome.com$(curl -s "${url}" | grep -Po 'href="\K/history/[^"]+' | uniq)"; if [[ $(wc -l <<<"${historyUrl}") -ne 1 ]]; then echo "Not exactly one history URL" >&2; return; fi; echo "${historyUrl}"; curl -s "${historyUrl}" | tr -d '\n' | grep -Po '
      .*?
    ' | grep -Po 'href="\K/crx/[^"]+' | while read -r versionUrl; do versionUrl="https://www.crx4chrome.com${versionUrl}"; echo "${versionUrl}"; curl -s "${versionUrl}" | grep -Po 'href="\Khttps://www.crx4chrome.com/go.php\?[^"]+' | while read -r downloadUrl; do echo "${downloadUrl}"; grep -Po '[?&]l=\K[^&]+' <<< "${downloadUrl}" | perl -pe 's/\%(\w\w)/chr hex $1/ge'; done; done; 5 | -------------------------------------------------------------------------------- /snscrape-upload: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | scriptpath="$(cd "$(dirname "$0")"; pwd -P)" 3 | 4 | if [[ -e transfer ]] 5 | then 6 | echo "Error: ./transfer exists" >&2 7 | exit 1 8 | fi 9 | 10 | if ! python3 --version &>/dev/null 11 | then 12 | echo "Error: python3 not found" >&2 13 | exit 1 14 | fi 15 | 16 | # ArchiveBot 17 | for f in "$@" 18 | do 19 | upurl="$("${scriptpath}/transfer.notkiska.pw-upload" "${f}")" 20 | echo "${upurl}" >&3 21 | if [[ "${f}" == facebook-* ]] 22 | then 23 | echo "!ao < ${upurl}" 24 | elif [[ "${f}" == twitter-* ]] 25 | then 26 | echo "!ao < ${upurl} --concurrency 6 --delay 0" 27 | elif [[ "${f}" == instagram-* ]] 28 | then 29 | echo "!a < ${upurl}" 30 | echo "!ig $("${scriptpath}/archivebot-jobid-calculation" "${upurl}") ^https?://www.instagram.com/.*[?&]hl=" 31 | else 32 | echo "!ao < ${upurl}" 33 | fi 34 | done 3>transfer 35 | 36 | # chromebot 37 | for f in "$@" 38 | do 39 | if [[ "${f}" == facebook-@* || "${f}" == twitter-@* ]] 40 | then 41 | head -1 "${f}" 42 | elif [[ "${f}" == twitter-#* ]] 43 | then 44 | head -4 "${f}" 45 | fi 46 | done | sed 's,^,chromebot: a ,' 47 | 48 | echo "Wrote ./transfer, you can run snscrape-wiki-transfer-merge now if ./wiki exists." >&2 49 | -------------------------------------------------------------------------------- /foolfuuka-search: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Search 4chan archives based on FoolFuuka 3 | # Output: one post per line in HTML, prefixed with the post ID 4 | # Note that posts can appear multiple times in the output in some cases. You're encouraged to filter based on the post ID. 5 | domain="$1" 6 | q="$2" 7 | end= 8 | nextend=2038-01-19 9 | while : 10 | do 11 | end="${nextend}" 12 | content=$(curl -s "https://${domain}/_/search/text/${q}/end/${end}/page/${page}/") 13 | 14 | declare -i page=1 15 | while [[ ${page} -lt 201 ]] 16 | do 17 | echo "Grabbing https://${domain}/_/search/text/${q}/end/${end}/page/${page}/" >&2 18 | content=$(curl -s "https://${domain}/_/search/text/${q}/end/${end}/page/${page}/") 19 | tr -d '\n' <<<"${content}" | grep -Po '
    ' | grep -q 'No results found' 27 | then 28 | echo "Error" >&2 29 | break 30 | else 31 | break 2 32 | fi 33 | fi 34 | page+=1 35 | done 36 | done 37 | -------------------------------------------------------------------------------- /snscrape-tmux: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | mkdir -p /tmp/snscrape 3 | echo 'snscrape-dev' > /tmp/snscrape/.python-version 4 | scriptpath="$(cd "$(dirname "$0")"; pwd -P)" 5 | export PATH="${scriptpath}:${PATH}" 6 | cd /tmp/snscrape 7 | tmux new -s snscrape \ 8 | -n "normalise" 'printf "\033]2;%s\033\\" "normalise"; bash' \; \ 9 | send-keys -t 'snscrape:normalise' 'pyenv_setup' Enter '# xclip -selection c -o | snscrape-normalise >wiki' Enter '# xclip -selection c = consecutive404threshold: 32 | break 33 | if int(response.getheader('X-RateLimit-Remaining')) < 10: 34 | time.sleep(60) #TODO sleep until X-RateLimit-Reset 35 | finally: 36 | connection.close() 37 | -------------------------------------------------------------------------------- /gofile.io-dl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -f # No globbing 3 | set -C # No clobbering 4 | 5 | if [[ $# -ne 1 || ( ! "$1" =~ ^https://gofile\.io/d/[0-9a-zA-Z]+$ && ! "$1" =~ ^https://gofile\.io/\?c=[0-9a-zA-Z]+$ ) ]] 6 | then 7 | echo 'Usage: gofile.io-dl URL' >&2 8 | exit 1 9 | fi 10 | 11 | url="$1" 12 | if [[ "${url}" == *'?c='* ]] 13 | then 14 | code="${url##*=}" 15 | else 16 | code="${url##*/}" 17 | fi 18 | 19 | server="$(curl -s "https://apiv2.gofile.io/getServer?c=${code}" | python3 -c 'import json,sys; print(json.loads(sys.stdin.read().strip())["data"]["server"])')" 20 | if [[ ! "${server}" =~ ^srv-file[0-9]+$ ]] 21 | then 22 | echo "Unexpected server value: ${server}" >&2 23 | exit 1 24 | fi 25 | 26 | curl -s "https://${server}.gofile.io/getUpload?c=${code}" | python3 -c 'import json,sys; obj = json.loads(sys.stdin.read().strip())'$'\n''for f in obj["data"]["files"].values():'$'\n'' print(f["size"], f["md5"], f["name"], f["link"])' | \ 27 | while read -r size md5 name link 28 | do 29 | if [[ "${name}" == *'/'* || "${link}" == *' '* || "${link}" != "https://${server}.gofile.io/download/"* ]] 30 | then 31 | echo 'Cannot download file:' >&2 32 | echo "name: ${name}" >&2 33 | echo "link: ${link}" >&2 34 | echo "size: ${size}" >&2 35 | echo "md5: ${md5}" >&2 36 | exit 1 37 | fi 38 | 39 | if [[ -e "./${name}" ]] 40 | then 41 | echo "./${name} already exists" >&2 42 | exit 1 43 | fi 44 | 45 | echo "Downloading ${link} to ./${name}..." >&2 46 | curl "${link}" >"./${name}" 47 | 48 | declare -i actualSize=$(stat -c %s "./${name}") 49 | if [[ ${actualSize} -ne ${size} ]] 50 | then 51 | echo "Size mismatch: expected ${size}, got ${actualSize}" >&2 52 | exit 1 53 | fi 54 | 55 | md5sum -c <<<"${md5} ./${name}" 56 | done 57 | -------------------------------------------------------------------------------- /reddit-pushshift-search: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Search all submissions or comments on Reddit for a search term 3 | # Usage: $0 (submission|comment) QUERY 4 | # Output: 5 | # For submissions: post date timestamp, permalink, url or None if it's a selfpost, body in Python-repr format or None if it's a title-only selfpost 6 | # For comments: post date timestamp, permalink, content in Python-repr format 7 | # For comments before 2017-10-24, the Pushshift API doesn't provide a permalink, so that field is filled with "comment_id/parent_id/username" instead. 8 | # Unfortunately, that means that it may be hard to find those comments on Reddit (unless the parent is a thread, i.e. it's a top-level comment). 9 | mode="$1" 10 | q="$2" 11 | before=2147483647 12 | pipe=$(mktemp -u); mkfifo "${pipe}"; exec 3<>"${pipe}"; rm "${pipe}"; unset pipe 13 | while : 14 | do 15 | { 16 | if [[ "${mode}" == "comment" ]] 17 | then 18 | curl -s "https://api.pushshift.io/reddit/search/comment/?q=${q}&size=500&fields=author,body,created_utc,link_id,parent_id,permalink&before=${before}" | python3 -c 'import json,sys'$'\n''for d in json.loads(sys.stdin.read())["data"]:'$'\n'' print("%d %s %r" % (d["created_utc"], d["permalink"] if "permalink" in d else d["parent_id"] + "/" + d["link_id"] + "/" + d["author"], d["body"]))' 19 | else 20 | curl -s "https://api.pushshift.io/reddit/search/submission/?q=${q}&size=500&fields=author,created_utc,id,is_self,permalink,selftext,url&before=${before}" | python3 -c 'import json,sys'$'\n''for d in json.loads(sys.stdin.read())["data"]:'$'\n'' print("%d %s %s %s" % (d["created_utc"], d["permalink"], d["url"] if not d["is_self"] else "None", repr(d["selftext"]) if "selftext" in d else "None"))' 21 | fi 22 | } | awk 'BEGIN { timestamp = 0; } { timestamp=$1; print; } END { print timestamp >"/dev/fd/3" }' 23 | before=$(head -1 <&3) 24 | if [[ ${before} -eq 0 ]] # No data returned by Pushshift 25 | then 26 | break 27 | fi 28 | done 29 | -------------------------------------------------------------------------------- /europarl-meps-collect: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Collect all websites and social media for MEPs based on https://www.europarl.europa.eu/meps/en/full-list/all 3 | # Writes to several file descriptors: 4 | # - Info about what it's doing to stderr 5 | # - Extracted URLs to FD 3 6 | # - Warnings about EP Newshub links to FD 4 7 | 8 | # https://unix.stackexchange.com/a/206848 9 | if ! { >&3; } 2>/dev/null 10 | then 11 | echo "Error: FD 3 not open" >&1 12 | exit 1 13 | fi 14 | if ! { >&4; } 2>/dev/null 15 | then 16 | echo "Error: FD 4 not open" >&1 17 | exit 1 18 | fi 19 | 20 | scriptpath="$(cd "$(dirname "$0")"; pwd -P)" 21 | export PATH="${scriptpath}:${PATH}" 22 | echo "Fetching MEP list" >&1 23 | curl-archivebot-ua -s "https://www.europarl.europa.eu/meps/en/full-list/all" | \ 24 | grep -Po '&1 29 | profilePage="$(curl-archivebot-ua -sL "${profileUrl}")" 30 | mapfile -t urls < <(tr -d '\r\n' <<< "${profilePage}" | \ 31 | grep -Po '
    .*?' | \ 32 | grep -Po ']*\s+)?href="\K(?!mailto:)[^"]+') 33 | 34 | # Classification 35 | for url in "${urls[@]}" 36 | do 37 | if [[ "${url}" =~ //((www|[a-z][a-z]-[a-z][a-z])\.)?facebook\.com/ ]] 38 | then 39 | echo "Facebook: ${url}" 40 | elif [[ "${url}" =~ //(www\.)?instagram\.com/ ]] 41 | then 42 | echo "Instagram: ${url}" 43 | elif [[ "${url}" =~ //(www\.)?twitter\.com/ ]] 44 | then 45 | echo "Twitter: ${url}" 46 | elif [[ "${url}" =~ //([^/]+\.)?youtube\.com/ || "${url}" =~ //youtu\.be/ ]] 47 | then 48 | echo "YouTube: ${url}" 49 | else 50 | echo "Other: ${url}" 51 | fi 52 | done >&3 53 | 54 | # Check if there's a newshub mention and print a warning about that if necessary 55 | if grep -q 'container_header_newshub' <<< "${profilePage}" 56 | then 57 | echo "Has EP Newshub link: ${profileUrl}" >&4 58 | fi 59 | done 60 | -------------------------------------------------------------------------------- /archivebot-jobid-calculation: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # The SHA1 UUID stuff in Ruby is actually more complicated. Everything's right until the `head -c32`, but then Ruby transforms it into an integer in a quite peculiar way: https://github.com/sporkmonger/uuidtools/blob/a10724236cefd922ee5cd3de7695fb6e5fd703f5/lib/uuidtools.rb#L480-L494 4 | # Ruby code: ArchiveBot lib/job.rb + https://github.com/sporkmonger/uuidtools/blob/a10724236cefd922ee5cd3de7695fb6e5fd703f5/lib/uuidtools.rb#L688-L691 5 | # Takes the SHA-1 hash of the namespace (as raw bytes) and the name, truncates it to 32 hex chars, creates a new UUID from it, transforms two fields, converts it to a bigint, and formats it in base-36 6 | 7 | # sed/sha1sum/head/bash-based version missing the time_hi_and_version and clock_seq_hi_and_reserved modification 8 | #{ echo -n '82244de1-c354-4c89-bf2b-f153ce23af43' | sed 's,-,,g' | xxd -r -p; echo -n 'https://transfer.notkiska.pw/sDu6C/marwilliamson-twitter.txt'; } | sha1sum | head -c32 | { read -r hash; BASE36=($(echo {0..9} {a..z})); for i in $(bc <<< "obase=32; ibase=16; ${hash^^}" | tr -d '\\\n'); do echo -n ${BASE36[$((10#$i))]}; done; }; echo 9 | 10 | 11 | import hashlib 12 | import sys 13 | import uuid 14 | 15 | url = sys.argv[1] # Assume that it's normalised already 16 | 17 | # Calculate hash 18 | h = hashlib.sha1() 19 | h.update(bytes.fromhex('82244de1-c354-4c89-bf2b-f153ce23af43'.replace('-', ''))) 20 | h.update(url.encode('ascii')) 21 | h = h.hexdigest() 22 | 23 | # Create and transform UUID object 24 | u = uuid.UUID(h[:32]) 25 | f = list(u.fields) 26 | f[2] &= 0x0FFF 27 | f[2] |= (5 << 12) 28 | f[3] &= 0x3F; 29 | f[3] |= 0x80; 30 | 31 | # Turn it into an int 32 | #i = (f[0] << 96) + (f[1] << 80) + (f[2] << 64) + (f[3] << 56) + (f[4] << 48) + f[5] 33 | i = uuid.UUID(fields = f).int 34 | 35 | # Convert to base-36 36 | def int_to_base36(num): 37 | # https://stackoverflow.com/a/31746873 38 | assert num >= 0 39 | digits = '0123456789abcdefghijklmnopqrstuvwxyz' 40 | res = '' 41 | while not res or num > 0: 42 | num, i = divmod(num, 36) 43 | res = digits[i] + res 44 | return res 45 | print(int_to_base36(i)) 46 | -------------------------------------------------------------------------------- /social-media-extract-profile-link: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Given social media links on stdin or as args, this extracts the link in the profile description, if any. 3 | 4 | function verbose_echo { 5 | if [[ "${verbose}" ]] 6 | then 7 | echo "$@" 8 | fi 9 | } 10 | 11 | function fetch { 12 | verbose_echo "Fetching $1" >&2 13 | curl -sL --max-time 10 -A 'Mozilla/5.0 (Windows NT 6.1; rv:60.0) Gecko/20100101 Firefox/60.0' "$1" 14 | } 15 | 16 | function fetch_n_extract { 17 | url="$1" 18 | if [[ "${url}" == *'facebook.com/'* ]] 19 | then 20 | page="$(fetch "${url}")" 21 | if grep -qF '"tab_home"' <<<"${page}" 22 | then 23 | # Publicly accessible profile 24 | grep -Po '"website_url":\K"[^"]+"' <<<"${page}" | python3 -c 'import json, sys'$'\n''for line in sys.stdin:'$'\n'' print(json.loads(line))' | awk '!seen[$0]++' 25 | elif grep -qF 'id="pagelet_loggedout_sign_up"' <<< "${page}" 26 | then 27 | # Profile overview only 28 | grep -Po ']*\s)?class\s*=\s*"([^"]*\s)?ProfileHeaderCard-url(\s[^"]*)?">.*?
    ' | grep -Po ']*\s)?class\s*=\s*"([^"]*\s)?u-textUserColor(\s[^"]*)?")([^>]*\s)?title="\K[^"]+' 37 | elif [[ "${url}" == *'youtube.com/'* ]] 38 | then 39 | if [[ "${url}" == *'?'* ]]; then u="${url}&disable_polymer=1"; else u="${url}?disable_polymer=1"; fi 40 | fetch "${u}" | tr -d '\n' | grep -Po ']*\s)?id\s*=\s*"header-links".*?' | grep -Po 'href="/redirect\?([^"]*&(amp;)?)?q=\K[^&"]+' | python3 -c 'import sys, urllib.parse; sys.stdout.write(urllib.parse.unquote(sys.stdin.read()))' 41 | fi 42 | } 43 | 44 | verbose= 45 | for arg in "$@" 46 | do 47 | if [[ "${arg}" == '--verbose' || "${arg}" == '-v' ]] 48 | then 49 | verbose=1 50 | shift 51 | elif [[ "${arg}" == '--' ]] 52 | then 53 | shift 54 | else 55 | # Assume end of options 56 | break 57 | fi 58 | done 59 | 60 | { 61 | for arg in "$@" 62 | do 63 | echo "${arg}" 64 | done 65 | 66 | if [ ! -t 0 ] 67 | then 68 | cat 69 | fi 70 | } | while read -r url 71 | do 72 | if [[ "${url}" == '* '* ]] 73 | then 74 | url="${url:2}" 75 | fi 76 | fetch_n_extract "${url}" 77 | done 78 | -------------------------------------------------------------------------------- /transfer.notkiska.pw-check-ia: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | if [[ $# -ne 1 || "$1" == '-h' || "$1" == '--help' ]] 5 | then 6 | echo 'Usage: transfer.notkiska.pw-check-ia TKPPATH' 7 | echo 'TKPPATH is the path of a file on transfer.notkiska.pw, e.g. "123F3V/twitter-#qanon"' 8 | echo 'Checks that the file is archived correctly on IA by downloading both copies and comparing the SHA-1.' 9 | echo 'If the TKPCHECK_CACHE_DIR environment variable is set, it is used as a cache for the CDXs to avoid redownloading them from IA on every check.' 10 | exit 1 11 | fi 12 | 13 | if [[ "${TKPCHECK_CACHE_DIR}" ]] 14 | then 15 | if [[ -e "${TKPCHECK_CACHE_DIR}" && ! -d "${TKPCHECK_CACHE_DIR}" ]] 16 | then 17 | echo "Error: ${TKPCHECK_CACHE_DIR} is not a directory." >&2 18 | exit 1 19 | fi 20 | fi 21 | 22 | file="$1" 23 | fileid="${file%/*}" 24 | filename="${file#*/}" 25 | 26 | echo "Downloading from transfer.notkiska.pw" >&2 27 | tkphash="$(curl "https://transfer.notkiska.pw/${file}" | sha1sum | tee /dev/fd/2)" 28 | 29 | echo "Retrieving WARC list from viewer" >&2 30 | mapfile -t warcs < <(curl -s "https://archive.fart.website/archivebot/viewer/api/v1/search.json?q=urls-transfer.notkiska.pw-transfer.notkiska.pw-" | python3 -c 'import json,sys; [print(x["job_id"]) for x in json.loads(sys.stdin.read())["results"]]' | sed 's,^,https://archive.fart.website/archivebot/viewer/job/,' | xargs curl -s | grep -Po 'href="\Khttps://archive.org/download/[^/"]+/[^/"]+-\d\d\d\d\d\.warc\.gz(?=")') 31 | 32 | cdxLines=() 33 | for warc in "${warcs[@]}" 34 | do 35 | cdx="${warc::-3}.os.cdx.gz" 36 | mapfile -t -O ${#cdxLines[@]} cdxLines < <( 37 | { 38 | if [[ "${TKPCHECK_CACHE_DIR}" ]] 39 | then 40 | cdxfn="${cdx:29}" 41 | mkdir -p "${TKPCHECK_CACHE_DIR}/${cdxfn%/*}" 42 | if [[ ! -e "${TKPCHECK_CACHE_DIR}/${cdxfn}" ]] 43 | then 44 | echo "Fetching ${cdx} into local cache" >&2 45 | curl -L "${cdx}" >"${TKPCHECK_CACHE_DIR}/${cdxfn}" 46 | fi 47 | cat "${TKPCHECK_CACHE_DIR}/${cdxfn}" 48 | else 49 | echo "Fetching ${cdx}" >&2 50 | curl -L "${cdx}" 51 | fi 52 | } | zgrep -F "/${fileid}/" 2>/dev/null | tee /dev/fd/2 53 | ) 54 | done 55 | 56 | if [[ ${#cdxLines[@]} -ne 1 ]] 57 | then 58 | echo "Not exactly one matching CDX line found, cannot continue" >&2 59 | exit 1 60 | fi 61 | 62 | read -r length offset iapath < <(awk '{print $9 " " $10 " " $11}' <<<"${cdxLines[0]}") 63 | echo "Fetching ${offset}-$((${offset}+${length})) from IA ${iapath}" >&2 64 | iahash="$(curl -L --range "${offset}-$((${offset}+${length}))" "https://archive.org/download/${iapath}" | zcat 2>/dev/null | awk '/^\r$/ {empty+=1; next} (empty >= 2)' | sha1sum | tee /dev/fd/2)" 65 | 66 | if [[ "${tkphash}" == "${iahash}" ]] 67 | then 68 | echo OK >&2 69 | exit 0 70 | else 71 | echo "Hash mismatch!" >&2 72 | exit 1 73 | fi 74 | -------------------------------------------------------------------------------- /wiki-recursive-extract-normalise: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Takes a wiki page in new-style viewer format on stdin. 3 | # Everything that looks like a social media link (including YouTube) is run through social-media-extract-profile-link. 4 | # Everything else is run through website-extract-social-media. 5 | # This is done recursively until no new links are discovered anymore. 6 | # The output is further fed through url-normalise before and during processing to avoid equivalent but slightly different duplicates, and the output is deduplicated within each section at the end. 7 | 8 | verbose= 9 | while [[ $# -gt 0 ]] 10 | do 11 | if [[ "$1" == '--verbose' || "$1" == '-v' ]] 12 | then 13 | verbose='--verbose' 14 | else 15 | echo "Unknown option: $1" >&2 16 | exit 1 17 | fi 18 | shift 19 | done 20 | 21 | function verbose_echo { 22 | if [[ "${verbose}" ]] 23 | then 24 | echo "$@" 25 | fi 26 | } 27 | 28 | function stderr_annotate { 29 | name="$1" 30 | shift 31 | if [[ "${name}" == '' ]]; then name="${1##*/}"; fi 32 | "$@" 2> >(while read -r line; do echo "[${name}] ${line}"; done >&2) 33 | } 34 | 35 | scriptpath="$(cd "$(dirname "$0")"; pwd -P)" 36 | declare -A sectionUrls 37 | stderr_annotate 'url-normalise/before' "${scriptpath}/url-normalise" ${verbose} | while read -r line 38 | do 39 | echo "${line}" 40 | if [[ "${line}" == '=='* ]] 41 | then 42 | verbose_echo "${line}" >&2 43 | unset sectionUrls 44 | declare -A sectionUrls 45 | fi 46 | if [[ "${line}" == '* http://'* || "${line}" == '* https://'* ]] 47 | then 48 | url="${line:2}" 49 | if [[ "${url}" == *' | '* ]] 50 | then 51 | url="${url%% | *}" 52 | fi 53 | 54 | if [[ "${sectionUrls[${url}]}" ]] 55 | then 56 | # Processed already, skip 57 | continue 58 | fi 59 | sectionUrls["${url}"]=1 60 | toProcess=("${url}") 61 | while [[ ${#toProcess[@]} -gt 0 ]] 62 | do 63 | curUrl="${toProcess[0]}" 64 | toProcess=("${toProcess[@]:1}") 65 | 66 | if grep -Pq '//([^/]+\.)?(facebook\.com|flickr\.com|instagram\.com|twitter\.com|vk\.com|youtube\.com|youtu\.be)/' <<<"${curUrl}" 67 | then 68 | mapfile -t outUrls < <(stderr_annotate '' "${scriptpath}/social-media-extract-profile-link" ${verbose} "${curUrl}" < <(:) | stderr_annotate 'url-normalise/post-social' "${scriptpath}/url-normalise" ${verbose}) 69 | else 70 | mapfile -t outUrls < <(stderr_annotate '' "${scriptpath}/website-extract-social-media" ${verbose} "${curUrl}" < <(:) | stderr_annotate 'url-normalise/post-web' "${scriptpath}/url-normalise" ${verbose}) 71 | fi 72 | 73 | for outUrl in "${outUrls[@]}" 74 | do 75 | if [[ "${sectionUrls[${outUrl}]}" ]] 76 | then 77 | # The discovered URL was processed already, skip it entirely 78 | continue 79 | else 80 | # Not-yet-known URL, add to the list of URLs to process, mark as seen, and print 81 | toProcess+=("${outUrl}") 82 | sectionUrls["${outUrl}"]=1 83 | echo "* ${outUrl}" 84 | fi 85 | done 86 | done 87 | fi 88 | done | mawk -W interactive '! /^\*/ { print; } /^\*/ && !seen[$0]++ { print; } /^==/ { delete seen; }' 89 | -------------------------------------------------------------------------------- /website-extract-social-media: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | function verbose_echo { if [[ "${verbose}" ]]; then echo "$@"; fi; } 3 | 4 | function fetch_n_extract { 5 | local url="$1" 6 | verbose_echo "Fetching ${url}" >&2 7 | { 8 | curl -sSL --max-time 10 -A 'Mozilla/5.0 (Windows NT 6.1; rv:60.0) Gecko/20100101 Firefox/60.0' "${url}" | \ 9 | grep -Fi -e 'facebook' -e 'flickr' -e 'instagram' -e 'twitter' -e 't.me' -e 'youtube' -e 'youtu.be' -e 'vk.com' | \ 10 | tee \ 11 | >( 12 | # Facebook 13 | grep -Poi 'facebook\.com/((pages(/category)?|people)/((?!")[^/ <"'"'"'])+/|groups/|pg/)?((?!")[^/ <"'"'"'])+' | \ 14 | sed 's,^,https://www.,' | \ 15 | grep -vi -e '^https://www\.facebook\.com/2008$' -e '^https://www\.facebook\.com/tr\?' -e '^https://www\.facebook\.com/plugins$' -e '^https://www\.facebook\.com/l\.php\?' | \ 16 | grep -Pvi '^https://www\.facebook\.com/share(r(\.php)?)?(\?|$)' 17 | ) \ 18 | >( 19 | # Flickr 20 | grep -Poi 'flickr\.com/photos/((?!")[^/ <"'"'"'])+' | \ 21 | sed 's,^,https://www.,' 22 | ) \ 23 | >( 24 | # Instagram 25 | grep -Poi 'instagram\.com/(p/)?((?!")[^/ <"'"'"'])+' | \ 26 | sed 's,^,https://www.,' | \ 27 | grep -Pvi -e '^https://www\.instagram\.com/v?p$' 28 | ) \ 29 | >( 30 | # Telegram 31 | grep -Poi '//(www\.)?t\.me/((?!")[^/ <"'"'"'])+' | \ 32 | sed 's,^//,,; s,^www\.,,; s,^,https://,' 33 | ) \ 34 | >( 35 | # Twitter 36 | grep -Poi 'twitter\.com/(#!/)?(hashtag/)?((?!")[^/ <"'"'"'])+' | \ 37 | sed 's,^twitter\.com/#!/,twitter.com/,; s,^,https://,' | \ 38 | grep -vi -e '^https://twitter\.com/home\?' -e '^https://twitter\.com/widgets\.js$' -e '^https://twitter\.com/share\?' -e '^https://twitter\.com/intent$' | \ 39 | sed 's,\([?&]\)ref_src=[^&]\+&\?,\1,; s,?$,,' 40 | ) \ 41 | >( 42 | # VKontakte 43 | grep -Poi 'vk\.com/((?!")[^/ <"'"'"'])+' | \ 44 | sed 's,^,https://,' 45 | ) \ 46 | >( 47 | # YouTube 48 | grep -Poi '(youtube\.com/((user|channel|c|embed)/)?((?!")[^/ <"'"'"'])+|youtu\.be/((?!")[^/ <"'"'"'])+)' | \ 49 | awk '/^youtube/ { print "https://www." $0 } /^youtu\.be/ { print "https://" $0 }' | \ 50 | grep -vi -e '^https://www\.youtube\.com/vi$' 51 | ) \ 52 | >/dev/null 53 | } | awk '!seen[$0]++' 54 | } 55 | 56 | # Parse options 57 | printInputUrl= 58 | verbose= 59 | while [[ $# -gt 0 ]] 60 | do 61 | if [[ "$1" == '--print-input-urls' || "$1" == '--print-input-url' ]] 62 | then 63 | printInputUrl=true 64 | shift 65 | elif [[ "$1" == '--verbose' || "$1" == 'v' ]] 66 | then 67 | verbose=1 68 | shift 69 | elif [[ "$1" == '--' ]] 70 | then 71 | # End of options 72 | shift 73 | break 74 | elif [[ "$1" == '--'* ]] 75 | then 76 | echo "Unknown option: $1" >&2 77 | exit 1 78 | else 79 | # Assume end of options 80 | break 81 | fi 82 | done 83 | 84 | { 85 | for arg in "$@" 86 | do 87 | echo "${arg}" 88 | done 89 | 90 | if [ ! -t 0 ] 91 | then 92 | cat 93 | fi 94 | } | while read -r url 95 | do 96 | if [[ "${printInputUrl}" ]] 97 | then 98 | echo "${url}" 99 | fi 100 | fetch_n_extract "${url}" 101 | done 102 | -------------------------------------------------------------------------------- /warc-peek: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # `warc-peek.py` is a small script to help looking into gzipped WARC files without decompressing the entire file. 4 | # It searches a window in the file for gzip's magic bytes `1F 8B`, attempts decompression, compares the result to the expected beginning of a WARC record, and prints all valid offsets. 5 | # These can then be used with e.g. `tail` and `zless` to actually look at the records. 6 | # 7 | # Usage: warc-peek.py WARCFILE OFFSET LENGTH 8 | # Opens `WARCFILE`, reads `LENGTH` bytes starting at `OFFSET` (zero-based), and prints valid WARC record offsets to stdout (one integer per line). 9 | # 10 | # Caveats 11 | # - This script only works with WARCs in which each record is compressed individually. 12 | # This is what the specification recommends and what most tools should generate by default, but there definitely exist valid compressed WARCs which can't be processed in this way. 13 | # - When you want to use `tail -c+OFFSET WARCFILE | zless` to look at the records, keep in mind that `tail` uses one-based indices, i.e. you will have to add one to the indices returned by `warc-peek.py`. 14 | # - `warc-peek.py` will miss valid record offsets in the last 512 bytes of the window. 15 | # This is because a certain length of the compressed data is necessary to be able to decompress it. `warc-peek.py` uses 512 bytes for this and will therefore 16 | # not attempt decompression when `1F 8B` is found in the last 512 bytes of the window. You can increase `LENGTH` to compensate for this if necessary. 17 | 18 | import argparse 19 | import logging 20 | import zlib 21 | 22 | 23 | logger = logging.getLogger('warc-peek') 24 | 25 | 26 | def finditer(b, sub): 27 | pos = 0 28 | while True: 29 | pos = b.find(sub, pos) 30 | if pos < 0: 31 | break 32 | yield pos 33 | pos += 1 34 | 35 | 36 | def find_offsets(warcfile, offset, length): 37 | with open(warcfile, 'rb') as fp: 38 | fp.seek(offset) 39 | buffer = fp.read(length) 40 | 41 | logger.debug('Buffer length: {:d}'.format(len(buffer))) 42 | for pos in finditer(buffer, b'\x1f\x8b'): 43 | logger.debug('Trying relative offset {:d}'.format(pos)) 44 | if pos > len(buffer) - 512: # 512 bytes might be a bit too much, but at least it ensures that the decompression will work. 45 | break 46 | try: 47 | dec = zlib.decompressobj(zlib.MAX_WBITS | 32).decompress(buffer[pos:pos+512]) 48 | except: 49 | continue 50 | logger.debug('First 100 bytes of decompressed data: {!r}'.format(dec[:100])) 51 | if dec.startswith(b'WARC/1.0\r\n') or dec.startswith(b'WARC/1.1\r\n'): 52 | yield offset + pos 53 | 54 | 55 | if __name__ == '__main__': 56 | parser = argparse.ArgumentParser() 57 | parser.add_argument('--debug', action = 'store_true', help = 'Enable debug output') 58 | parser.add_argument('warcfile', help = 'A .warc.gz file') 59 | parser.add_argument('offset', type = int, help = 'Zero-based byte offset of the window') 60 | parser.add_argument('length', type = int, help = 'Length in bytes of the window') 61 | args = parser.parse_args() 62 | 63 | if args.debug: 64 | logging.basicConfig( 65 | format = '{asctime} {levelname} {name} {message}', 66 | style = '{', 67 | level = logging.DEBUG, 68 | ) 69 | for offset in find_offsets(args.warcfile, args.offset, args.length): 70 | print(offset) 71 | -------------------------------------------------------------------------------- /kill-wpull-connections: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # kill-wpull-connections is a workaround for wpull's bug of HTTPS connections getting stuck, slowing down or entirely stopping progress (https://github.com/ArchiveTeam/wpull/issues/407). 4 | # It works by attaching to the process and shutting down the TCP connections directly using the `shutdown` syscall. Although written for use with wpull, it can actually be used for any process. 5 | # 6 | # Usage: 7 | # - To kill all TCP connections except those to 127.0.0.1 of a process: `kill-wpull-connections -p $PID` 8 | # - As a convenience option for ArchiveBot pipeline maintainers, to kill the connections of a running ArchiveBot job: `kill-wpull-connections -j $JOBID` 9 | # - If you get an error of `gdb.error: 'shutdown' has unknown return type; cast the call to its declared return type` then add the `-c` option *before* `-p` or `-j`, e.g. `kill-wpull-connections -c -p $PID`. 10 | # - If you're running wpull inside a Docker container without ptrace capabilities, you need to run kill-wpull-connections outside of the container (where you do have ptrace cap) but inside the container's network namespace. 11 | # One way to do this is: `nsenter --net=$(docker inspect $CONTAINERID | jq -r .[].NetworkSettings.SandboxKey) kill-wpull-connections -p $PID` 12 | 13 | function usage_exit { 14 | # usage E -- print usage; exit with status code E 15 | echo 'Usage: kill-wpull-connections (-h | [-c] (-p PID | -j JOBID))' 16 | echo 17 | echo ' -h: Display this message and exit' 18 | echo ' -c: Cast return value of shutdown(2) to int explicitly (only necessary on broken machines)' 19 | echo ' -p PID: Kill connections of wpull process PID' 20 | echo ' -j JOBID: Kill connections of the wpull process for ArchiveBot job JOBID' 21 | exit $1 22 | } 23 | 24 | if [[ $# -eq 0 || $# -gt 3 ]]; then usage_exit 1; fi 25 | if [[ $# -eq 1 && "$1" == '-h' ]]; then usage_exit 0; fi 26 | if [[ $# -ne 2 && $# -ne 3 ]]; then usage_exit 1; fi 27 | 28 | cast= 29 | if [[ "$1" == '-c' ]]; then cast='(int)'; shift; fi 30 | 31 | if [[ "$1" != -[pj] ]]; then usage_exit 1; fi 32 | if [[ $# -ne 2 ]]; then usage_exit 1; fi 33 | 34 | if [[ "$1" == '-p' ]] 35 | then 36 | wpullPid=$2 37 | if [[ "${wpullPid}" == *[^0-9]* ]] 38 | then 39 | echo "Error: '${wpullPid}' is not a valid PID" 40 | exit 1 41 | fi 42 | elif [[ "$1" == '-j' ]] 43 | then 44 | pids=($(pgrep --full "wpull.*/$2/")) 45 | if [[ ${#pids[@]} -ne 1 ]] 46 | then 47 | echo "Error: not exactly one process found for '$2'" 48 | exit 1 49 | fi 50 | wpullPid=${pids[0]} 51 | fi 52 | 53 | if ! command -v lsof >/dev/null 2>&1 54 | then 55 | echo "Error: could not find lsof" 56 | exit 1 57 | fi 58 | 59 | if ! command -v gdb >/dev/null 2>&1 60 | then 61 | echo "Error: could not find gdb" 62 | exit 1 63 | fi 64 | 65 | if ! ps -p ${wpullPid} >/dev/null 2>&1 66 | then 67 | echo "Error: no process with PID ${wpullPid}" 68 | exit 1 69 | fi 70 | 71 | if [[ -e /proc/sys/kernel/yama/ptrace_scope && "$(< /proc/sys/kernel/yama/ptrace_scope)" != "0" && $EUID -ne 0 ]] 72 | then 73 | echo "Warning: /proc/sys/kernel/yama/ptrace_scope is not zero. You likely need to run this script as root." 74 | fi 75 | 76 | gdb -batch -batch-silent \ 77 | -ex "attach ${wpullPid}" \ 78 | -ex 'python import subprocess' \ 79 | -ex 'python def call(s): return subprocess.call(s, shell = True) == 0' \ 80 | -ex 'python call("echo '\''FDs before forced shutdown:'\''") and call("lsof -an -p '${wpullPid}' -i TCP | grep -v 127\.0\.0\.1") and ([gdb.execute("p '${cast}'shutdown(" + fd + ", 2)") for fd in subprocess.check_output("lsof -an -p '${wpullPid}' -i TCP -F pfn | awk '\''NR%2==0{fd=substr($0,2)}NR%2==1&&NR>1&&!/127\.0\.0\.1/{print fd}'\''", shell = True).decode("ascii").strip().split("\n")] or True) and call("echo '\''FDs after forced shutdown:'\''") and call("lsof -an -p '${wpullPid}' -i TCP | grep -v 127\.0\.0\.1")' \ 81 | -ex detach \ 82 | -ex quit 83 | -------------------------------------------------------------------------------- /s3-bucket-list: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import html 3 | import http.client 4 | import os 5 | import shlex 6 | import sys 7 | import urllib.parse 8 | 9 | 10 | # Arguments 11 | i = 1 12 | withListUrls = False 13 | listUrlsFD = None 14 | startMarker = None 15 | format = '{url}' 16 | args = [] 17 | while i < len(sys.argv): 18 | arg = sys.argv[i] 19 | if arg == '--help': 20 | print('s3-bucket-list [options] BUCKETURL', file = sys.stderr) 21 | print('', file = sys.stderr) 22 | print('Options:', file = sys.stderr) 23 | print(f' --format FORMAT Modify the output format; FORMAT defaults to {format!r}; available fields: url, key, size, and all fields returned by S3 (e.g. LastModified)', file = sys.stderr) 24 | print( ' --marker KEY Start after a particular key instead of from the beginning', file = sys.stderr) 25 | print( ' --with-list-urls Enables printing the list URLs retrieved to FD 3', file = sys.stderr) 26 | sys.exit(1) 27 | elif arg == '--with-list-urls': 28 | withListUrls = True 29 | try: 30 | listUrlsFD = os.fdopen(3, 'w') 31 | except OSError: 32 | print('Error: FD 3 not open', file = sys.stderr) 33 | sys.exit(1) 34 | elif arg == '--marker': 35 | startMarker = sys.argv[i + 1] 36 | i += 1 37 | elif arg == '--format': 38 | format = sys.argv[i + 1] 39 | i += 1 40 | else: 41 | args.append(arg) 42 | i += 1 43 | assert len(args) == 1, 'Need one argument: bucket URL' 44 | baseUrl = args[0] 45 | assert baseUrl.startswith('http://') or baseUrl.startswith('https://'), 'Argument does not look like an HTTP URL' 46 | if '/' not in baseUrl.split('://', 1)[1] or not baseUrl.endswith('/'): 47 | baseUrl = f'{baseUrl}/' 48 | hostname = baseUrl.split('://', 1)[1].split('/', 1)[0] 49 | 50 | 51 | conn = http.client.HTTPSConnection(hostname) 52 | params = {} 53 | if startMarker is not None: 54 | params['marker'] = startMarker 55 | attempt = 1 56 | while True: 57 | queryString = urllib.parse.urlencode(params) 58 | url = f'{baseUrl}{"?" + queryString if queryString else ""}' 59 | if withListUrls: 60 | print(f'{url}', file = listUrlsFD) 61 | conn.request('GET', url[url.index('/', 8):]) 62 | resp = conn.getresponse() 63 | body = resp.read() 64 | if b'InternalErrorWe encountered an internal error. Please try again.' in body: 65 | print(f'Got internal error on {url} on attempt {attempt}; {"retrying" if attempt < 10 else "aborting"}', file = sys.stderr) 66 | if attempt >= 10: 67 | if 'marker' in params: 68 | print(f'To retry, use --marker {shlex.quote(params["marker"])}', file = sys.stderr) 69 | break 70 | attempt += 1 71 | continue 72 | if not body.startswith(b'\n'): 73 | raise RuntimeError(f'Invalid body: {body[:200]}...') 74 | 75 | if b'' in body[:200] and 'marker' in params: 76 | raise RuntimeError('Marker loop (empty marker in response despite providing one)') 77 | 78 | # No risk, no fun! 79 | contents = body.split(b'') 80 | assert all(content.startswith(b'') for content in contents[1:]) 81 | assert all(content.endswith(b'') for content in contents[1:-1]) 82 | assert contents[-1].endswith(b'') 83 | contents[-1] = contents[-1][:-len('')] 84 | for content in contents[1:]: 85 | key = html.unescape(content[5 : content.index(b'')].decode('utf-8')) # 5 = len(b'') 86 | url = f'{baseUrl}{urllib.parse.quote(key)}' 87 | 88 | tags = content.split(b'>') 89 | assert len(tags) % 2 == 0 90 | assert tags[-1] == b'' 91 | assert tags[-2] == b''.join(openTags).decode('utf-8')] = html.unescape(tag[:-(len(openTags[-1]) + 2)].decode('utf-8')) 101 | openTags.pop() 102 | continue 103 | assert False 104 | 105 | size = int(fields['Size']) if 'Size' in fields else None 106 | 107 | try: 108 | print(format.format(**fields, key = key, url = url, size = size)) 109 | except BrokenPipeError: 110 | sys.exit(0) 111 | lastKey = key 112 | 113 | truncated = True if b'true' in body else (False if b'false' in body else None) 114 | assert truncated in (True, False) 115 | 116 | if not truncated: 117 | break 118 | if 'marker' in params and params['marker'] == lastKey: 119 | raise RuntimeError('Marker loop (same last key as previous marker)') 120 | params['marker'] = lastKey 121 | attempt = 1 122 | -------------------------------------------------------------------------------- /wpull2-log-extract-errors: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ "$1" == '--test' ]] 4 | then 5 | ## Self-test 6 | 7 | # Notes: 8 | # - Only the response lines are included here; the requests would be skipped anyway. 9 | # - I didn't bother adjusting the status code message since it's not used for parsing 10 | # - Only one example of an ERROR is included because the error message doesn't matter anyway 11 | 12 | diff -q <("$0" <<-EOF 13 | 2020-09-10 23:54:25,000 - wpull.processor.web - INFO - Fetched ‘https://example.org/success-200’: 200 OK. Length: 1234 [text/html; charset=utf-8]. 14 | 2020-09-10 23:54:25,000 - wpull.processor.web - INFO - Fetched ‘https://example.org/success-204’: 204 OK. Length: 1234 [text/html; charset=utf-8]. 15 | 2020-09-10 23:54:25,000 - wpull.processor.web - INFO - Fetched ‘https://example.org/success-304’: 304 OK. Length: 1234 [text/html; charset=utf-8]. 16 | 2020-09-10 23:54:25,000 - wpull.processor.web - INFO - Fetched ‘https://example.org/ok-401’: 401 OK. Length: 1234 [text/html; charset=utf-8]. 17 | 2020-09-10 23:54:25,000 - wpull.processor.web - INFO - Fetched ‘https://example.org/ok-403’: 403 OK. Length: 1234 [text/html; charset=utf-8]. 18 | 2020-09-10 23:54:25,000 - wpull.processor.web - INFO - Fetched ‘https://example.org/ok-404’: 404 OK. Length: 1234 [text/html; charset=utf-8]. 19 | 2020-09-10 23:54:25,000 - wpull.processor.web - INFO - Fetched ‘https://example.org/ok-405’: 405 OK. Length: 1234 [text/html; charset=utf-8]. 20 | 2020-09-10 23:54:25,000 - wpull.processor.web - INFO - Fetched ‘https://example.org/ok-410’: 410 OK. Length: 1234 [text/html; charset=utf-8]. 21 | 2020-09-10 23:54:25,000 - wpull.processor.web - INFO - Fetched ‘https://example.org/error-429’: 429 OK. Length: 1234 [text/html; charset=utf-8]. 22 | 2020-09-10 23:54:25,000 - wpull.processor.web - INFO - Fetched ‘https://example.org/error-418’: 418 OK. Length: 1234 [text/html; charset=utf-8]. 23 | 2020-09-10 23:54:25,000 - wpull.processor.base - ERROR - Fetching ‘https://example.org/error-dns’ encountered an error: DNS resolution failed: [Errno -2] Name or service not known 24 | 2020-09-10 23:54:25,000 - wpull.processor.web - INFO - Fetched ‘https://example.org/error-429-successful-retry’: 429 OK. Length: 1234 [text/html; charset=utf-8]. 25 | 2020-09-10 23:54:25,000 - wpull.processor.web - INFO - Fetched ‘https://example.org/error-429-successful-retry’: 200 OK. Length: 1234 [text/html; charset=utf-8]. 26 | 2020-09-10 23:54:25,000 - wpull.processor.web - INFO - Fetched ‘https://example.org/error-429-successful-second-retry’: 429 OK. Length: 1234 [text/html; charset=utf-8]. 27 | 2020-09-10 23:54:25,000 - wpull.processor.web - INFO - Fetched ‘https://example.org/error-429-successful-second-retry’: 429 OK. Length: 1234 [text/html; charset=utf-8]. 28 | 2020-09-10 23:54:25,000 - wpull.processor.web - INFO - Fetched ‘https://example.org/error-429-successful-second-retry’: 200 OK. Length: 1234 [text/html; charset=utf-8]. 29 | 2020-09-10 23:54:25,000 - wpull.processor.base - ERROR - Fetching ‘https://example.org/error-dns-successful-retry’ encountered an error: DNS resolution failed: [Errno -2] Name or service not known 30 | 2020-09-10 23:54:25,000 - wpull.processor.web - INFO - Fetched ‘https://example.org/error-dns-successful-retry’: 200 OK. Length: 1234 [text/html; charset=utf-8]. 31 | EOF 32 | ) <(cat <<-EOF 33 | https://example.org/error-429 34 | https://example.org/error-418 35 | https://example.org/error-dns 36 | EOF 37 | ) >/dev/null 38 | if [[ $? -eq 0 ]] 39 | then 40 | echo 'Success!' 41 | exit 0 42 | else 43 | echo 'Fail!' 44 | exit 1 45 | fi 46 | fi 47 | 48 | if [[ -t 0 || "$1" == '--help' ]] 49 | then 50 | echo 'Usage: pipe a wpull log (or meta WARC, decompressed) to this script' >&2 51 | echo 'Produces a list of URLs that were attempted but not retrieved successfully. They are output in the order of the first failure.' >&2 52 | exit 1 53 | fi 54 | 55 | # Logic: extract all lines of interest, process them such that they only contain a + or - indicating success or error plus the URL, filter the errors with the successes in awk. 56 | # The output order is as each URL appears for the first time in the log. Since awk doesn't preserve the insertion order on iteration, keep the line number and sort the output on that. 57 | grep -F -e ' - ERROR - Fetching ‘' -e ' - INFO - Fetched ‘' | sed 's,^.*‘\(.*\)’: \(200\|204\|304\|401\|403\|404\|405\|410\) .*$,+ \1,; s,^.*‘\(.*\)’.*$,- \1,' | awk '/^\+ / { successes[$2] = 1; } /^- / && ! ($2 in successes) { errors[$2] = NR; } END { for (url in errors) { if (! (url in successes)) { print errors[url] " " url; } } }' | sort -n | cut -d' ' -f2- 58 | 59 | # Faster version without preserving order: grep -F -e ' - ERROR - Fetching ‘' -e ' - INFO - Fetched ‘' | sed 's,^.*‘\(.*\)’: \(200\|204\|304\|401\|403\|404\|405\|410\) .*$,+ \1,; s,^.*‘\(.*\)’.*$,- \1,' | awk '/^\+ / { successes[$2] = 1; } /^- / && ! ($2 in successes) { errors[$2] = 1; } END { for (url in errors) { if (! (url in successes)) { print url; } } }' 60 | -------------------------------------------------------------------------------- /deb-repo-urls: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | if [[ $# -ne 2 || "$1" == '-h' || "$1" == '--help' ]] 5 | then 6 | echo 'Lists all relevant files in a Debian-style repository' >&2 7 | echo >&2 8 | echo 'Usage: deb-repo-ls ARCHIVEROOT DISTRIBUTION' >&2 9 | exit 1 10 | fi 11 | 12 | archiveRoot="$1" 13 | distribution="$2" 14 | 15 | while [[ "${archiveRoot}" == */ ]]; do archiveRoot="${archiveRoot%/}"; done # Strip trailing slashes since they're added again manually below 16 | if [[ "${distribution}" == */ || "${distribution}" == /* ]]; then echo "Invalid distribution" >&2; exit 1; fi 17 | 18 | declare -i fetchedSize 19 | declare -i totalSize=0 20 | 21 | pipe=$(mktemp -u); mkfifo "${pipe}"; exec 3<>"${pipe}"; rm "${pipe}"; unset pipe # For fetch size 22 | 23 | function maybe_decompress { 24 | url="$1" 25 | if [[ "${url}" == *'.gz' ]] 26 | then 27 | gunzip 28 | elif [[ "${url}" == *'.bz2' ]] 29 | then 30 | bunzip2 31 | elif [[ "${url}" == *'.xz' ]] 32 | then 33 | unxz 34 | else 35 | # Peek at the data to see if it's compressed 36 | beginning="$(head -c 10 | xxd -p)" 37 | { 38 | xxd -p -r <<<"${beginning}" 39 | cat 40 | } | { 41 | if [[ "${beginning}" == '1f8b'* ]] 42 | then 43 | gunzip 44 | elif [[ "${beginning}" == '425a68'??'314159265359'* ]] 45 | then 46 | bunzip2 47 | elif [[ "${beginning}" == 'fd377a585a00'* ]] 48 | then 49 | unxz 50 | else 51 | cat 52 | fi 53 | } 54 | fi 55 | } 56 | 57 | function fetch_and_print { 58 | url="$1" 59 | # Fetch url, print it to stdout; as side effects, the HTTP body is stored in fetchedBody and its size is added to totalSize 60 | # If the url ends with .gz, it's gunzipped; if it ends in .bz2, it gets bunzip2'd. The totalSize reflects the compressed size. 61 | echo "Fetching ${url}" >&2 62 | fetchedBody="$(curl -A 'Debian APT-HTTP/1.3 (1.8.2)' -s "${url}" | tee >(wc -c >&3) | maybe_decompress "${url}")" 63 | echo "${url}" 64 | #totalSize+=$(LANG=C; LC_ALL=C; echo ${#fetchedBody}) 65 | totalSize+=$(head -1 <&3) 66 | } 67 | 68 | function head_and_print { 69 | url="$1" 70 | # Issue a HEAD request on url, print it to stdout, and add its content length to totalSize 71 | echo "Heading ${url}" >&2 72 | declare -i contentLength="$(curl -A 'Debian APT-HTTP/1.3 (1.8.2)' -s --head "${url}" | grep -i '^Content-Length: ' | awk '{print $2}' | tr -d '\r')" 73 | echo "${url}" 74 | totalSize+=${contentLength} 75 | } 76 | 77 | # Retrieve Release and accompanying files, parse it and extract the next level 78 | fetch_and_print "${archiveRoot}/dists/${distribution}/Release" 79 | inHashSection= 80 | declare -A files # filename -> size 81 | while IFS='' read -r line 82 | do 83 | if [[ "${line}" == 'MD5Sum:' || "${line}" == 'SHA1:' || "${line}" == 'SHA256:' || "${line}" == 'SHA512:' ]] 84 | then 85 | inHashSection=1 86 | continue 87 | fi 88 | if [[ "${line}" != ' '* ]] 89 | then 90 | inHashSection= 91 | continue 92 | fi 93 | if [[ "${inHashSection}" ]] 94 | then 95 | files["$(awk '{print $3}' <<<"${line}")"]="$(awk '{print $2}' <<<"${line}")" 96 | fi 97 | done <<<"${fetchedBody}" 98 | 99 | head_and_print "${archiveRoot}/dists/${distribution}/Release.gpg" 100 | head_and_print "${archiveRoot}/dists/${distribution}/InRelease" 101 | 102 | # Process files 103 | filename= 104 | declare -i size=-1 105 | declare -A debfiles # filename -> size 106 | for fn in "${!files[@]}" 107 | do 108 | # Legacy releases 109 | if [[ "${fn}" =~ /Release$ ]] 110 | then 111 | echo "${archiveRoot}/dists/${distribution}/${fn}" 112 | totalSize+="${files[$fn]}" 113 | 114 | # Package indices 115 | elif [[ "${fn}" =~ /Packages(\.[^/]*)?$ ]] 116 | then 117 | echo "Processing package: ${fn}" >&2 118 | fetch_and_print "${archiveRoot}/dists/${distribution}/${fn}" 119 | filename= 120 | size=-1 121 | while IFS= read -r line 122 | do 123 | if [[ "${line}" == '' ]] 124 | then 125 | filename= 126 | size=-1 127 | elif [[ "${line}" == 'Filename: '* ]] 128 | then 129 | filename="${line:10}" 130 | elif [[ "${line}" == 'Size: '* ]] 131 | then 132 | size="${line:6}" 133 | fi 134 | if [[ "${filename}" && ${size} -ge 0 ]] 135 | then 136 | debfiles["${archiveRoot}/${filename}"]=${size} 137 | filename= 138 | size=-1 139 | fi 140 | done <<<"${fetchedBody}" 141 | 142 | # Contents indices 143 | elif [[ "${fn}" =~ /Contents-[^/]*$ ]] 144 | then 145 | # Nothing really to do here but take note of its existence 146 | echo "${archiveRoot}/dists/${distribution}/${fn}" 147 | totalSize+="${files[$fn]}" 148 | 149 | # Anything else 150 | else 151 | echo "${archiveRoot}/dists/${distribution}/${fn}" 152 | totalSize+="${files[$fn]}" 153 | echo "Skipping unknown file: ${fn}" >&2 154 | fi 155 | done 156 | 157 | # Print the debfiles and add them to the total size 158 | for debfile in "${!debfiles[@]}" 159 | do 160 | echo "${debfile}" 161 | totalSize+="${debfiles[$debfile]}" 162 | done 163 | 164 | # Report total size to stderr 165 | echo "Total size: ${totalSize} bytes" >&2 166 | -------------------------------------------------------------------------------- /url-normalise: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Taking a list of URLs from stdin (optionally in new-viewer style wiki format), every URL is normalised as follows: 3 | # - For social media URLs, the correct capitalisation is extracted and extraneous parameters are removed. 4 | # - For YouTube user or channel URLs, the canonical base URL is extracted. 5 | # - For anything else, retrieval is attempted and the final, post-redirect URL is used. (To not follow redirects, use --other-no-redirects.) 6 | 7 | otherCurlRedirectOpt='-L' 8 | verbose= 9 | while [[ $# -gt 0 ]] 10 | do 11 | if [[ "$1" == '--other-no-redirects' ]] 12 | then 13 | otherCurlRedirectOpt= 14 | elif [[ "$1" == '--verbose' || "$1" == '-v' ]] 15 | then 16 | verbose=1 17 | else 18 | echo "Unknown option: $1" >&2 19 | exit 1 20 | fi 21 | shift 22 | done 23 | 24 | function verbose_echo { 25 | if [[ "${verbose}" ]] 26 | then 27 | echo "$@" 28 | fi 29 | } 30 | 31 | userAgent='Mozilla/5.0 (Windows NT 6.1; rv:60.0) Gecko/20100101 Firefox/60.0' 32 | 33 | while read -r line 34 | do 35 | if [[ "${line}" != 'http://'* && "${line}" != 'https://'* && "${line}" != '* http://'* && "${line}" != '* https://'* ]] 36 | then 37 | echo "${line}" 38 | continue 39 | fi 40 | 41 | if [[ "${line}" == '* '* ]] 42 | then 43 | prefix="${line::2}" 44 | url="${line:2}" 45 | else 46 | prefix="" 47 | url="${line}" 48 | fi 49 | 50 | if [[ "${url}" == *' | '* ]] 51 | then 52 | suffix=" | ${url#* | }" 53 | url="${url%% | *}" 54 | else 55 | suffix="" 56 | fi 57 | 58 | # Normalise domain 59 | if [[ "${url}" =~ ^https?://.*/ ]] 60 | then 61 | domain="${url#*://}" 62 | domain="${domain%%/*}" 63 | url="${url%%://*}://${domain,,}/${url#*://*/}" 64 | fi 65 | 66 | if [[ "${url}" =~ ^https?://((www|m|[a-z][a-z]-[a-z][a-z]).)?facebook.com/login/.*[?\&]next=https?%3A%2F%2F((www|m|[a-z][a-z]-[a-z][a-z]).)?facebook.com%2F && "${url}" != *'%0A'* && "${url}" != *'%00'* ]] 67 | then 68 | url="${url##*\?next=}" 69 | url="${url##*&next=}" 70 | url="${url%%&*}" 71 | url="$(printf '%b' "${url//%/\\x}")" 72 | fi 73 | if [[ "${url}" =~ ^https?://((www|m|[a-z][a-z]-[a-z][a-z]).)?facebook.com/([^/]+/?(\?|$)|pages/[^/]+/[0-9]+/?(\?|$)|pages/category/[^/]+/[^/]+/?(\?|$)|pg/[^/]+([/?]|$)|profile\.php\?id=[0-9]+(&|$)) ]] 74 | then 75 | verbose_echo "Normalising Facebook URL: ${url}" >&2 76 | if [[ "${url}" == *profile.php* ]] 77 | then 78 | url="${url%%&*}" 79 | else 80 | url="${url%%\?*}" 81 | fi 82 | page="$(curl -sL --max-time 10 -A "${userAgent}" -H 'Accept-Language: en-US,en;q=0.5' "https://www.facebook.com/${url#*facebook.com/}")" 83 | user="$(grep -Po ']*(?<=\s)data-key\s*=\s*"tab_home".*?' <<< "${page}" | grep -Po ']*(?<=\s)href="/\K[^/]+')" 84 | if [[ "${user}" ]] 85 | then 86 | echo "${prefix}https://www.facebook.com/${user}/${suffix}" 87 | continue 88 | elif grep -q 'id="pagelet_loggedout_sign_up"' <<< "${page}" 89 | then 90 | # Profile page which is only visible when logged in 91 | # Extract canonical URL 92 | user="$(grep -Po '&2 100 | echo "${line}" 101 | elif [[ "${url}" =~ ^https?://(www\.)?twitter\.com/[^/]+/?(\?.*)?$ ]] 102 | then 103 | if [[ "${url}" =~ ^https?://(www\.)?twitter\.com/(i|web|search|hashtag)[/?] ]] 104 | then 105 | verbose_echo "Leaving Twitter URL alone: ${url}" >&2 106 | echo "${line}" 107 | continue 108 | fi 109 | verbose_echo "Normalising Twitter URL: ${url}" >&2 110 | url="${url%%\?*}" 111 | url="${url%/}" 112 | unnormalisedUser="${url##*/}" 113 | user="$(curl -sL --max-time 10 -A "Opera/9.80 (Windows NT 6.1; WOW64) Presto/2.12.388 Version/12.18" "https://twitter.com/${unnormalisedUser}" | grep -Po '
    &2 119 | echo "${line}" 120 | fi 121 | elif [[ "${url}" =~ ^https?://(www\.)?instagram\.com/[^/]+/?$ ]] 122 | then 123 | if [[ "${url}" =~ ^https?://(www\.)?instagram\.com/(p|explore)/ ]] 124 | then 125 | verbose_echo "Leaving Instagram URL alone: ${url}" >&2 126 | echo "${line}" 127 | continue 128 | fi 129 | verbose_echo "Normalising Instagram URL: ${url}" >&2 130 | user="${url%/}" 131 | user="${user##*/}" 132 | echo "${prefix}https://www.instagram.com/${user,,}/${suffix}" 133 | elif [[ "${url}" =~ ^https?://(www\.)?youtube\.com/ ]] 134 | then 135 | verbose_echo "Normalising YouTube URL: ${url}" >&2 136 | if [[ "${url}" == *'?'* ]] 137 | then 138 | rurl="${url}&disable_polymer=1" 139 | else 140 | rurl="${url}?disable_polymer=1" 141 | fi 142 | page="$(curl -4sL --max-time 10 -A "${userAgent}" -H 'Accept-Language: en-US,en;q=0.5' "${rurl}")" 143 | canonical="$(grep -Po '&2 154 | echo "${line}" 155 | fi 156 | fi 157 | else 158 | verbose_echo "Normalising other URL: ${url}" >&2 159 | canonical="$(curl -sS ${otherCurlRedirectOpt} --max-time 10 -A "${userAgent}" -o /dev/null -w '%{url_effective}' "${url}")" 160 | if [[ "${canonical}" ]] 161 | then 162 | echo "${prefix}${canonical}${suffix}" 163 | else 164 | echo "Failed to normalise other URL: ${url}" >&2 165 | echo "${line}" 166 | fi 167 | fi 168 | done 169 | -------------------------------------------------------------------------------- /archivebot-jobs: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import argparse 3 | import datetime 4 | import itertools 5 | import json 6 | import math 7 | import os 8 | import re 9 | import sys 10 | import time 11 | import urllib.request 12 | 13 | # Column definitions 14 | columns = { 15 | 'jobid': (lambda job, pipelines: job["job_data"]["ident"], ()), 16 | 'url': (lambda job, pipelines: job["job_data"]["url"], ('truncatable',)), 17 | 'user': (lambda job, pipelines: job["job_data"]["started_by"], ()), 18 | 'pipenick': (lambda job, pipelines: pipelines[job["job_data"]["pipeline_id"]] if job["job_data"]["pipeline_id"] in pipelines else "unknown", ()), 19 | 'queued': (lambda job, pipelines: job["job_data"]["queued_at"], ('date', 'numeric')), 20 | 'started': (lambda job, pipelines: job["job_data"]["started_at"], ('date', 'numeric')), 21 | 'last active': (lambda job, pipelines: int(job["ts"]), ('date', 'coloured', 'numeric')), 22 | 'dl urls': (lambda job, pipelines: job["job_data"]["items_downloaded"], ('numeric',)), 23 | 'dl size': (lambda job, pipelines: job["job_data"]["bytes_downloaded"], ('size', 'numeric')), 24 | 'queue': (lambda job, pipelines: job["job_data"]["items_queued"] - job["job_data"]["items_downloaded"], ('numeric',)), 25 | 'con': (lambda job, pipelines: job["job_data"]["concurrency"], ('numeric',)), 26 | 'delay min': (lambda job, pipelines: int(job["job_data"]["delay_min"]), ('hidden', 'numeric')), 27 | 'delay max': (lambda job, pipelines: int(job["job_data"]["delay_max"]), ('hidden', 'numeric')), 28 | 'delay': (lambda job, pipelines: str(int(job["job_data"]["delay_min"])) + '-' + str(int(job["job_data"]["delay_max"])) if job["job_data"]["delay_min"] != job["job_data"]["delay_max"] else str(int(job["job_data"]["delay_min"])), ()), 29 | } 30 | defaultSort = 'jobid' 31 | 32 | # Validate 33 | if any('truncatable' in colDef[1] and any(x in colDef[1] for x in ('date', 'coloured', 'size')) for colDef in columns.values()): 34 | # Truncation code can't handle renderers 35 | raise RuntimeError('Invalid column definitions: cannot combine date/coloured/size with truncatable') 36 | 37 | # Filter function 38 | def make_field_filter(column, op, value, caseSensitive = True): 39 | compFunc = { 40 | "=": lambda a, b: a == b, 41 | "<": lambda a, b: a < b, 42 | ">": lambda a, b: a > b, 43 | "^": lambda a, b: a.startswith(b), 44 | "*": lambda a, b: b in a, 45 | "$": lambda a, b: a.endswith(b), 46 | "~": lambda a, b: re.search(b, a) is not None, 47 | }[op] 48 | transform = { 49 | True: (lambda x: x), 50 | False: (lambda x: x.lower() if isinstance(x, str) else x) 51 | }[caseSensitive] 52 | return (lambda job: compFunc(transform(job[column]), transform(value))) 53 | 54 | 55 | # Parse arguments 56 | class FilterAction(argparse.Action): 57 | def __call__(self, parser, namespace, values, optionString = None): 58 | if optionString == '--pyfilter': 59 | try: 60 | func = compile(values[0], '', 'eval') 61 | except Exception as e: 62 | parser.error(f'Could not compile filter expression: {type(e).__module__}.{type(e).__name__}: {e!s}') 63 | setattr(namespace, self.dest, lambda job: eval(func, {}, {'job': job})) 64 | return 65 | global columns 66 | match = re.match(r"^(?P[A-Za-z ]+)(?P[=<>^*$~])(?P.*)$", values[0]) 67 | if not match: 68 | parser.error('Invalid filter') 69 | filterDict = match.groupdict() 70 | filterDict["column"] = filterDict["column"].lower() 71 | assert filterDict["column"] in columns 72 | if 'numeric' in columns[filterDict['column']][1]: 73 | filterDict['value'] = float(filterDict['value']) 74 | if 'date' in columns[filterDict['column']][1] and filterDict['value'] < 0: 75 | filterDict['value'] = time.time() + filterDict['value'] 76 | setattr(namespace, self.dest, make_field_filter(filterDict['column'], filterDict['op'], filterDict['value'], caseSensitive = (optionString in ('--filter', '-f')))) 77 | 78 | def parse_sort(value): 79 | global columns 80 | sortDesc = value.startswith('-') 81 | if sortDesc: 82 | value = value[1:] 83 | value = value.lower() 84 | if value not in columns: 85 | parser.error('Invalid column name') 86 | return (value, sortDesc) 87 | 88 | class SortAction(argparse.Action): 89 | def __call__(self, parser, namespace, values, optionString = None): 90 | result = parse_sort(values[0]) 91 | if getattr(namespace, self.dest, None) is None: 92 | setattr(namespace, self.dest, []) 93 | getattr(namespace, self.dest).append(result) 94 | 95 | parser = argparse.ArgumentParser(formatter_class = argparse.RawTextHelpFormatter) 96 | parser.add_argument('--filter', '-f', nargs = 1, type = str, action = FilterAction, help = '\n'.join([ 97 | 'Filter the table for rows where a COLUMN has a certain VALUE. If specified multiple times, only the last value is used.', 98 | 'FILTER has the format COLUMN{=|<|>|^|*|$|~}VALUE', 99 | ' = means the value must be exactly as specified.', 100 | ' < and > mean it must be less/greater than the specified.', 101 | ' ^ and $ mean it must start/end with the specified.', 102 | ' * means it must contain the specified.', 103 | ' ~ means it must match the specified regex.', 104 | ])) 105 | parser.add_argument('--ifilter', '-i', nargs = 1, type = str, action = FilterAction, dest = 'filter', help = 'Like --filter but case-insensitive') 106 | parser.add_argument('--pyfilter', nargs = 1, type = str, action = FilterAction, dest = 'filter', help = 'A Python expression for filtering using the local variable `job`') 107 | parser.add_argument('--sort', '-s', nargs = 1, type = str, action = SortAction, help = "Sort the table by a COLUMN (descending if preceded by '-'). This can be used multiple times to refine the sorting.") 108 | parser.add_argument('--mode', choices = ('table', 'dashboard-regex', 'con-d-commands', 'format'), default = 'table', help = '\n'.join([ 109 | 'Output modes:', 110 | ' table: print a table of the matched jobs', 111 | ' dashboard-regex: compose a regular expression that can be used on the dashboard to actively watch the jobs matched by the filter', 112 | ' con-d-commands: print !con and !d commands for the current settings', 113 | ' format: print some output for each job, separated by newlines; this requires the --format option', 114 | ])) 115 | parser.add_argument('--no-colours', '--no-colors', action = 'store_true', help = "Don't colourise the last activity column if it's been a while. (Table mode only)") 116 | parser.add_argument('--no-table', action = 'store_true', help = 'Raw output without feeding through column(1); columns are separated by tabs. (Table mode only)') 117 | parser.add_argument('--no-truncate', action = 'store_true', help = 'Disable truncating long values if the terminal width would be exceeded. (Table mode without --no-table only)') 118 | parser.add_argument('--dates', action = 'store_true', help = 'Print dates instead of elapsed times for queued/started/last active columns. (Table mode only)') 119 | parser.add_argument('--replace-concurrency', nargs = 1, metavar = 'CON', type = int, help = 'Replace the delay values with the specified ones. (con-d-commands mode only)') 120 | parser.add_argument('--replace-delay', nargs = 2, metavar = ('MIN', 'MAX'), type = int, help = 'Replace the delay values with the specified ones. (con-d-commands mode only)') 121 | parser.add_argument('--format', help = 'Output format for the format mode; this must be a Python format string and can use any column name in lower-case with spaces replaced by underscores; e.g. "{url} {last_active}". (Format mode only)') 122 | args = parser.parse_args() 123 | 124 | if args.mode == 'format' and not args.format: 125 | print('Error: when using format mode, --format is required.', file = sys.stderr) 126 | sys.exit(1) 127 | 128 | if not args.sort: 129 | args.sort = [parse_sort(defaultSort)] 130 | 131 | if args.mode == 'con-d-commands': 132 | args.mode = 'format' 133 | args.format = '!con {jobid} {con}\n!d {jobid} {delay_min} {delay_max}' 134 | else: 135 | args.replace_concurrency = None 136 | args.replace_delay = None 137 | 138 | # Retrieve 139 | def fetch(url): 140 | req = urllib.request.Request(url) 141 | req.add_header('Accept', 'application/json') 142 | with urllib.request.urlopen(req) as f: 143 | if f.getcode() != 200: 144 | raise RuntimeError('Could not fetch job data') 145 | return json.load(f) 146 | 147 | jobdata = fetch('http://dashboard.at.ninjawedding.org/logs/recent?count=1') 148 | pipelinedata = fetch('http://dashboard.at.ninjawedding.org/pipelines') 149 | currentTime = time.time() 150 | 151 | # Process 152 | pipelines = {p["id"]: p["nickname"] for p in pipelinedata["pipelines"]} 153 | 154 | jobs = [] 155 | for job in jobdata: 156 | jobs.append({column: columnFunc(job, pipelines) for column, (columnFunc, _) in columns.items()}) 157 | 158 | if not jobs: 159 | # Nothing to do 160 | sys.exit(0) 161 | 162 | # Filter 163 | if args.filter: 164 | jobs = [job for job in jobs if args.filter(job)] 165 | 166 | if not jobs: 167 | sys.exit(0) 168 | 169 | # Sort 170 | class reversor: # https://stackoverflow.com/a/56842689 171 | def __init__(self, obj): 172 | self.obj = obj 173 | 174 | def __eq__(self, other): 175 | return other.obj == self.obj 176 | 177 | def __lt__(self, other): 178 | return other.obj < self.obj 179 | 180 | sortColumns = tuple((column, descending, columns[column]) for column, descending in args.sort) 181 | if not args.dates: 182 | # Reverse sorting order for columns which have a date attribute since the column will have elapsed time 183 | sortColumns = tuple((column, not descending if 'date' in columnInfo[1] else descending, columnInfo) for column, descending, columnInfo in sortColumns) 184 | jobs = sorted(jobs, key = lambda job: tuple(job[column] if not descending else reversor(job[column]) for column, descending, _ in sortColumns)) 185 | 186 | # Concurrency and delay overrides if specified and relevant 187 | if args.replace_concurrency is not None or args.replace_delay is not None: 188 | for job in jobs: 189 | if args.replace_concurrency is not None: 190 | job['con'] = args.replace_concurrency[0] 191 | if args.replace_delay is not None: 192 | job['delay min'] = args.replace_delay[0] 193 | job['delay max'] = args.replace_delay[1] 194 | 195 | # Non-table output modes 196 | if args.mode == 'dashboard-regex': 197 | print('^(' + '|'.join(re.escape(job['url']) for job in jobs) + ')$') 198 | sys.exit(0) 199 | elif args.mode == 'format': 200 | for job in jobs: 201 | print(args.format.format(**{key.replace(' ', '_'): value for key, value in job.items()})) 202 | sys.exit(0) 203 | 204 | # Renderers 205 | def render_date(ts, coloured = False): 206 | global args, currentTime 207 | diff = currentTime - ts 208 | colourStr = f"\x1b[{0 if diff < 6 * 3600 else 7};31m" if coloured and diff >= 300 else "" 209 | colourEndStr = "\x1b[0m" if colourStr else "" 210 | if args.dates: 211 | return (colourStr, datetime.datetime.fromtimestamp(ts).isoformat(sep = " "), colourEndStr) 212 | if diff <= 0: 213 | return "now" 214 | elif diff < 60: 215 | return "<1 min ago" 216 | elif diff < 86400: 217 | return (colourStr, (f"{diff // 3600:.0f}h " if diff >= 3600 else "") + f"{(diff % 3600) // 60:.0f}mn ago", colourEndStr) 218 | else: 219 | return (colourStr, f"{diff // 86400:.0f}d {(diff % 86400) // 3600:.0f}h ago", colourEndStr) 220 | 221 | def render_size(size): 222 | units = ('B', 'KiB', 'MiB', 'GiB', 'TiB') 223 | unitIdx = min(int(math.log(size, 1024)), len(units) - 1) if size >= 1 else 0 224 | if unitIdx == 0: 225 | return f'{size} B' # No decimal places 226 | return f'{size / 1024 ** unitIdx:.1f} {units[unitIdx]}' 227 | 228 | renderers = {} 229 | for column, (_, columnAttr) in columns.items(): 230 | if "date" in columnAttr: 231 | if "coloured" in columnAttr: 232 | renderers[column] = lambda x: render_date(x, coloured = not args.no_colours) 233 | else: 234 | renderers[column] = render_date 235 | elif "size" in columnAttr: 236 | renderers[column] = render_size 237 | elif isinstance(jobs[0][column], (int, float)): 238 | renderers[column] = str 239 | 240 | for job in jobs: 241 | for column in renderers: 242 | job[column] = renderers[column](job[column]) 243 | 244 | # Truncate if applicable 245 | printableColumns = {column: colDef for column, colDef in columns.items() if 'hidden' not in colDef[1]} 246 | if not args.no_table and not args.no_truncate: 247 | widthsD = {column: max(itertools.chain((len(column),), (len(job[column]) if isinstance(job[column], str) else len(job[column][1]) for job in jobs))) for column in printableColumns} 248 | minWidthsD = {column: len(column) for column in printableColumns} 249 | try: 250 | termWidth = os.get_terminal_size().columns 251 | except OSError as e: 252 | if e.errno == 25: 253 | # Inappropriate ioctl for device (stdout not a terminal, happens e.g. when redirecting or piping) 254 | # Silently ignore this and don't truncate 255 | termWidth = float('Inf') 256 | else: 257 | raise 258 | overage = sum(x + 2 for x in widthsD.values()) - 2 - termWidth 259 | if overage > 0: 260 | if sum((widthsD[column] if 'truncatable' not in colDef[1] else minWidthsD[column]) + 2 for column, colDef in printableColumns.items()) - 2 > termWidth: 261 | # Even truncating all truncatable columns to the minimum width is not sufficient, i.e. can't match this terminal width. Print a warning and proceed normally 262 | print('Sorry, cannot truncate columns to terminal width', file = sys.stderr) 263 | else: 264 | # Distribute overage to truncatable columns proportionally to each column's length over the minimum 265 | truncatableColumns = {column: colDef for column, colDef in columns.items() if 'truncatable' in colDef[1]} 266 | totalOverMin = sum(widthsD[column] - minWidthsD[column] for column in truncatableColumns) 267 | trWidthsD = {column: math.floor(widthsD[column] - (widthsD[column] - minWidthsD[column]) / totalOverMin * overage) for column in truncatableColumns} 268 | if sum(widthsD[column] - trWidthsD[column] for column in truncatableColumns) - overage == 1: 269 | # Truncated one more character than necessary due to the flooring; add it again to the shortest column 270 | trWidthsD[min(trWidthsD, key = trWidthsD.get)] += 1 271 | for job in jobs: 272 | for column in truncatableColumns: 273 | if len(job[column]) > trWidthsD[column]: 274 | job[column] = job[column][:trWidthsD[column] - 1] + '…' 275 | 276 | # Print 277 | output = [] 278 | output.append(tuple(column.upper() for column in columns if "hidden" not in columns[column][1])) 279 | for job in jobs: 280 | output.append(tuple(job[column] for column in columns if "hidden" not in columns[column][1])) 281 | 282 | if not args.no_table: 283 | widths = tuple(max(len(field) if isinstance(field, str) else len(field[1]) for field in column) for column in zip(*output)) 284 | for row in output: 285 | print(' '.join((value.ljust(width) if isinstance(value, str) else ''.join((value[0], value[1], value[2], ' ' * (width - len(value[1]))))) for value, width in zip(row, widths))) 286 | else: 287 | for row in output: 288 | print('\t'.join(field if isinstance(field, str) else ''.join(field) for field in row)) 289 | -------------------------------------------------------------------------------- /warc-tiny: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Tiny tool for WARC stuff. 4 | # Operating modes: 5 | # warc-tiny colour FILES -- coloured output of the WARCs for easier reading 6 | # warc-tiny dump-responses [-m|--meta] FILES -- dump the HTTP response bodies to stdout 7 | # With --meta, prefix every line with the filename, record offset, record ID, and target URI; e.g. 'file.warc.gz:123::: foobar' 8 | # The record offset may be -1 if it is not known. 9 | # The filename is wrapped in angled brackets if it contains a colon; the target URI is always wrapped in angled brackets (since it virtually always contains a colon). 10 | # warc-tiny verify FILES -- verify the integrity of a WARC by comparing the digests 11 | 12 | import base64 13 | import gzip 14 | import hashlib 15 | import sys 16 | import zlib 17 | 18 | 19 | def GzipDecompressor(): 20 | return zlib.decompressobj(16 + zlib.MAX_WBITS) 21 | 22 | 23 | class DummyDecompressor: 24 | def decompress(self, data): 25 | return data 26 | 27 | 28 | class Event: 29 | pass 30 | 31 | 32 | class NewFile(Event): 33 | def __init__(self, filename): 34 | self._filename = filename 35 | 36 | @property 37 | def filename(self): 38 | return self._filename 39 | 40 | 41 | class BeginOfRecord(Event): 42 | def __init__(self, warcHeaders, rawData): 43 | self._warcHeaders = warcHeaders 44 | self._rawData = rawData 45 | 46 | @property 47 | def warcHeaders(self): 48 | return self._warcHeaders 49 | 50 | @property 51 | def rawData(self): 52 | return self._rawData 53 | 54 | 55 | class _DataChunk(Event): 56 | def __init__(self, data): 57 | self._data = data 58 | 59 | @property 60 | def data(self): 61 | return self._data 62 | 63 | def __repr__(self): 64 | return '{}({!r}{})'.format(type(self).__name__, self._data[:50], '...' if len(self._data) > 50 else '') 65 | 66 | 67 | class WARCBlockChunk(_DataChunk): 68 | def __init__(self, data, isHttpHeader = None): 69 | super().__init__(data) 70 | self._isHttpHeader = isHttpHeader 71 | 72 | @property 73 | def isHttpHeader(self): 74 | # True: the chunk represents (part of) the HTTP header; False: the chunk represents (part of) the HTTP body; None: the chunk is not part of an HTTP record 75 | return self._isHttpHeader 76 | 77 | 78 | class RawHTTPBodyChunk(_DataChunk): 79 | ''' 80 | Because many tools misunderstood the WARC specifications, the Payload-Digest was often implemented without stripping transfer encoding. 81 | This is like HTTPBodyChunk but without transfer encoding stripping. 82 | ''' 83 | 84 | 85 | class HTTPBodyChunk(_DataChunk): 86 | ''' 87 | Representing a part of the HTTP body with transfer encoding stripped. 88 | ''' 89 | 90 | 91 | class EndOfRecord(Event): 92 | pass 93 | 94 | 95 | def iter_warc(f): 96 | # Yields Events 97 | # BeginOfRecord's rawData does not include the CRLF CRLF at the end of the headers, and WARCBlockChunk does not contain the CRLF CRLF after the block either. 98 | 99 | with gzip.open(f, 'rb') as fp: 100 | buf = b'' 101 | while True: 102 | # Read WARC header 103 | while b'\r\n\r\n' not in buf: 104 | try: 105 | buf = buf + fp.read(4096) 106 | except EOFError: 107 | break 108 | if not buf: 109 | break 110 | if not buf: 111 | break 112 | warcHeaderBuf, buf = buf.split(b'\r\n\r\n', 1) 113 | assert warcHeaderBuf.startswith(b'WARC/1.0\r\n') or warcHeaderBuf.startswith(b'WARC/1.1\r\n') 114 | assert b'\r\nContent-Length:' in warcHeaderBuf 115 | warcHeaders = tuple(tuple(map(bytes.strip, x.split(b':', 1))) for x in warcHeaderBuf.split(b'\r\n')) 116 | warcContentType = next(x[1] for x in warcHeaders if x[0] == b'Content-Type') 117 | warcContentLength = int(next(x[1] for x in warcHeaders if x[0] == b'Content-Length')) 118 | warcType = next(x[1] for x in warcHeaders if x[0] == b'WARC-Type') 119 | yield BeginOfRecord(warcHeaders, warcHeaderBuf) 120 | recordID = next(x[1] for x in warcHeaders if x[0] == b'WARC-Record-ID') 121 | 122 | # Read WARC block (and skip CRLFCRLF at the end of the record) 123 | if len(buf) < warcContentLength + 4: 124 | try: 125 | buf = buf + fp.read(warcContentLength + 4 - len(buf)) 126 | except EOFError: 127 | pass 128 | if len(buf) < warcContentLength + 4: 129 | print('Error: truncated WARC', file = sys.stderr) 130 | break 131 | warcContent = buf[:warcContentLength] 132 | buf = buf[warcContentLength + 4:] 133 | 134 | # Decode HTTP body if appropriate 135 | if warcContentType in (b'application/http;msgtype=request', b'application/http; msgtype=request') and warcType == b'request': 136 | httpType = 'request' 137 | elif warcContentType in (b'application/http;msgtype=response', b'application/http; msgtype=response') and warcType == b'response': 138 | httpType = 'response' 139 | else: 140 | httpType = None 141 | if httpType is not None: 142 | if b'\r\n\r\n' in warcContent: 143 | httpHeaders, httpBody = warcContent.split(b'\r\n\r\n', 1) 144 | 145 | # Parse headers and extract transfer encoding 146 | httpHeaderLines = [tuple(map(bytes.strip, x.split(b':', 1))) for x in httpHeaders.split(b'\r\n')] 147 | chunked = False 148 | gzipped = False 149 | if b'\r\ntransfer-encoding' in httpHeaders.lower(): 150 | transferEncoding = next(x[1] for x in httpHeaderLines if x[0].lower() == b'transfer-encoding') 151 | transferEncodings = set(map(bytes.strip, transferEncoding.split(b','))) 152 | chunked = b'chunked' in transferEncodings 153 | gzipped = b'gzip' in transferEncodings 154 | 155 | yield WARCBlockChunk(httpHeaders + b'\r\n\r\n', isHttpHeader = True) 156 | yield WARCBlockChunk(httpBody, isHttpHeader = False) 157 | yield RawHTTPBodyChunk(httpBody) 158 | 159 | # Decode body 160 | if gzipped: 161 | httpDecompressor = GzipDecompressor() 162 | else: 163 | httpDecompressor = DummyDecompressor() 164 | if chunked: 165 | while True: 166 | try: 167 | chunkLineEnd = httpBody.index(b'\r\n') 168 | except ValueError: 169 | print('Error: could not find chunk line end in record {}, skipping'.format(recordID), file = sys.stderr) 170 | break 171 | chunkLine = httpBody[:chunkLineEnd] 172 | if b';' in chunkLine: 173 | chunkLength = chunkLine[:chunkLine.index(b';')].strip() 174 | else: 175 | chunkLength = chunkLine.strip() 176 | if chunkLength.lstrip(b'0123456789abcdefABCDEF') != b'': 177 | print('Error: malformed chunk length {!r} in record {}, skipping'.format(chunkLength, recordID), file = sys.stderr) 178 | break 179 | chunkLength = int(chunkLength, base = 16) 180 | if chunkLength == 0: 181 | break 182 | chunk = httpDecompressor.decompress(httpBody[chunkLineEnd + 2 : chunkLineEnd + 2 + chunkLength]) 183 | yield HTTPBodyChunk(chunk) 184 | httpBody = httpBody[chunkLineEnd + 2 + chunkLength + 2:] 185 | else: 186 | yield HTTPBodyChunk(httpDecompressor.decompress(httpBody)) 187 | else: 188 | print('Warning: malformed HTTP request or response in record {}, skipping'.format(recordID), file = sys.stderr) 189 | yield WARCBlockChunk(warcContent) 190 | else: 191 | yield WARCBlockChunk(warcContent) 192 | yield EndOfRecord() 193 | 194 | 195 | class ProcessMode: 196 | @classmethod 197 | def split_args(cls, args): 198 | '''Split args into arguments to be passed into __init__ and filenames''' 199 | return (), args 200 | 201 | def process_event(self, event): 202 | raise NotImplementedError 203 | 204 | 205 | class Digest: 206 | def __init__(self, digest): 207 | self._digest = digest 208 | 209 | def format(self, digest = None): 210 | raise NotImplementedError 211 | 212 | def equals(self, digest): 213 | return self._digest == digest 214 | 215 | 216 | class Base32Digest(Digest): 217 | def format(self, digest = None): 218 | return base64.b32encode(digest if digest else self._digest) 219 | 220 | 221 | class HexDigest(Digest): 222 | def format(self, digest = None): 223 | return (digest if digest else self._digest).hex() 224 | 225 | 226 | class VerifyMode(ProcessMode): 227 | def __init__(self): 228 | self._blockDigester = None 229 | self._recordedBlockDigest = None 230 | self._payloadDigester = None 231 | self._brokenPayloadDigester = None 232 | self._recordedPayloadDigest = None 233 | self._printedBrokenPayloadWarning = False 234 | 235 | def parse_digest(self, digest): 236 | if not digest.startswith(b'sha1:'): 237 | print('Warning: don\'t understand hash format: {!r}'.format(digest), file = sys.stderr) 238 | return None 239 | if len(digest) == 37 and digest.rstrip(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567') == b'sha1:': # 5 for 'sha1:' + 32 for base-32 hash 240 | return Base32Digest(base64.b32decode(digest[5:])) 241 | if len(digest) == 45 and digest.rstrip(b'0123456789abcdef') == b'sha1:': 242 | return HexDigest(bytes.fromhex(digest[5:].decode('ascii'))) 243 | return None 244 | 245 | def process_event(self, event): 246 | if type(event) is NewFile: 247 | self._printedBrokenPayloadWarning = False 248 | elif type(event) is BeginOfRecord: 249 | if any(x[0] == b'WARC-Block-Digest' for x in event.warcHeaders): 250 | self._blockDigester = hashlib.sha1() 251 | self._recordedBlockDigest = self.parse_digest(next(x[1] for x in event.warcHeaders if x[0] == b'WARC-Block-Digest')) 252 | else: 253 | self._blockDigester = None 254 | self._recordedBlockDigest = None 255 | if any(x[0] == b'WARC-Payload-Digest' for x in event.warcHeaders): 256 | self._payloadDigester = hashlib.sha1() 257 | self._brokenPayloadDigester = hashlib.sha1() 258 | self._recordedPayloadDigest = self.parse_digest(next(x[1] for x in event.warcHeaders if x[0] == b'WARC-Payload-Digest')) 259 | else: 260 | self._payloadDigester = None 261 | self._brokenPayloadDigester = None 262 | self._recordedPayloadDigest = None 263 | self._recordID = next(x[1] for x in event.warcHeaders if x[0] == b'WARC-Record-ID') 264 | self._recordType = next(x[1] for x in event.warcHeaders if x[0] == b'WARC-Type') 265 | elif type(event) is WARCBlockChunk: 266 | if self._blockDigester: 267 | self._blockDigester.update(event.data) 268 | elif type(event) is HTTPBodyChunk: 269 | if self._payloadDigester: 270 | self._payloadDigester.update(event.data) 271 | elif type(event) is RawHTTPBodyChunk: 272 | if self._brokenPayloadDigester: 273 | self._brokenPayloadDigester.update(event.data) 274 | elif type(event) is EndOfRecord: 275 | if self._blockDigester and self._recordedBlockDigest: 276 | if not self._recordedBlockDigest.equals(self._blockDigester.digest()): 277 | print('Block digest mismatch for record {}: recorded {} v calculated {}'.format(self._recordID, self._recordedBlockDigest.format(), self._recordedBlockDigest.format(self._blockDigester.digest())), file = sys.stderr) 278 | if self._payloadDigester and self._recordType in (b'request', b'response'): #TODO: Support revisit 279 | if not self._recordedPayloadDigest.equals(self._payloadDigester.digest()): 280 | if self._recordedPayloadDigest.equals(self._brokenPayloadDigester.digest()): 281 | if not self._printedBrokenPayloadWarning: 282 | print('Warning: WARC uses incorrect payload digests without stripping the transfer encoding', file = sys.stderr) 283 | self._printedBrokenPayloadWarning = True 284 | else: 285 | print('Payload digest mismatch for record {}: recorded {} vs. calculated {} (calculated broken {})'.format(self._recordID, self._recordedPayloadDigest.format(), self._recordedPayloadDigest.format(self._payloadDigester.digest()), self._recordedPayloadDigest.format(self._brokenPayloadDigester.digest())), file = sys.stderr) 286 | 287 | 288 | class DumpResponsesMode(ProcessMode): 289 | @classmethod 290 | def split_args(cls, args): 291 | if args[0] == '-m' or args[0] == '--meta': 292 | return (True,), args[1:] 293 | return (False,), args 294 | 295 | def __init__(self, withMeta): 296 | self._printEOR = False 297 | self._isResponse = False 298 | self._withMeta = withMeta 299 | if withMeta: 300 | self._recordID = None 301 | self._targetURI = None 302 | self._buffer = b'' 303 | 304 | def _write(self, data): 305 | if not self._withMeta: 306 | sys.stdout.buffer.write(data) 307 | return 308 | 309 | buf = self._buffer + data 310 | lines = buf.split(b'\n') 311 | self._buffer = lines.pop() # Since there's an explicit `_write(b'\r\n')` at the end of the record, this implicitly resets the buffer as well 312 | for line in lines: 313 | sys.stdout.buffer.write(':'.join((self._filename, '-1', self._recordID, '<' + self._targetURI + '>', '')).encode('utf-8')) 314 | sys.stdout.buffer.write(line) 315 | sys.stdout.buffer.write(b'\n') 316 | 317 | def process_event(self, event): 318 | if type(event) is NewFile: 319 | self._filename = event.filename 320 | if ':' in self._filename: 321 | self._filename = '<' + self._filename + '>' 322 | elif type(event) is BeginOfRecord: 323 | warcContentType = next(x[1] for x in event.warcHeaders if x[0] == b'Content-Type') 324 | warcType = next(x[1] for x in event.warcHeaders if x[0] == b'WARC-Type') 325 | self._isResponse = warcContentType in (b'application/http;msgtype=response', b'application/http; msgtype=response') and warcType == b'response' 326 | self._printEOR = False 327 | if self._withMeta: 328 | # Both of these are URIs, and per RFC 3986, those can only contain ASCII characters. 329 | self._recordID = next(x[1] for x in event.warcHeaders if x[0] == b'WARC-Record-ID').decode('ascii') 330 | self._targetURI = next((x[1] for x in event.warcHeaders if x[0] == b'WARC-Target-URI'), b'').decode('ascii') 331 | self._buffer = b'' 332 | elif type(event) is HTTPBodyChunk: 333 | if self._isResponse: 334 | self._printEOR = True 335 | self._write(event.data) 336 | elif type(event) is EndOfRecord: 337 | if self._printEOR: 338 | self._write(b'\r\n') 339 | 340 | 341 | class COLOURS: 342 | RESET = b'\x1b[0m' 343 | GREEN = b'\x1b[0;32m' 344 | LIGHTGREEN = b'\x1b[1;32m' 345 | PURPLE = b'\x1b[0;35m' 346 | LIGHTPURPLE = b'\x1b[1;35m' 347 | RED = b'\x1b[0;31m' 348 | INVERTED = b'\x1b[7m' 349 | 350 | 351 | class ColourMode(ProcessMode): 352 | def __init__(self): 353 | self._hadHttpStatusLine = False 354 | 355 | def _replace_esc(self, data): 356 | return data.replace(b'\x1b', COLOURS.INVERTED + b'ESC' + COLOURS.RESET) 357 | 358 | def _print_line(self, line, colour, withLF = True, colourOnlyBeforeColon = False): 359 | if colourOnlyBeforeColon: 360 | if b':' in line: 361 | offset = line.index(b':') 362 | else: 363 | offset = 0 364 | else: 365 | offset = len(line) 366 | if offset > 0: 367 | sys.stdout.buffer.write(colour) 368 | sys.stdout.buffer.write(self._replace_esc(line[:offset])) 369 | sys.stdout.buffer.write(COLOURS.RESET) 370 | sys.stdout.buffer.write(line[offset:]) 371 | if withLF: 372 | sys.stdout.buffer.write(b'\n') 373 | 374 | def _print_data(self, data, colour, colourOnlyBeforeColon): 375 | later = False 376 | for line in data.split(b'\r\n'): 377 | if later: 378 | sys.stdout.buffer.write(b'\n') 379 | self._print_line(line, colour, withLF = False, colourOnlyBeforeColon = colourOnlyBeforeColon) 380 | later = True 381 | 382 | def process_event(self, event): 383 | if type(event) is BeginOfRecord: 384 | firstNewline = event.rawData.index(b'\r\n') 385 | self._print_line(event.rawData[:firstNewline], COLOURS.LIGHTGREEN) 386 | self._print_data(event.rawData[firstNewline + 2:], COLOURS.GREEN, True) 387 | sys.stdout.buffer.write(b'\n\n') # separator between header and block 388 | self._hadHttpStatusLine = False 389 | elif type(event) is WARCBlockChunk: 390 | if event.isHttpHeader is True: 391 | if not self._hadHttpStatusLine: 392 | firstNewline = event.data.index(b'\r\n') 393 | self._print_line(event.data[:firstNewline], COLOURS.LIGHTPURPLE) 394 | offset = firstNewline + 2 395 | self._hadHttpStatusLine = True 396 | else: 397 | offset = 0 398 | self._print_data(event.data[offset:], COLOURS.PURPLE, True) 399 | elif event.isHttpHeader is False: 400 | self._print_data(event.data, COLOURS.RED, False) 401 | elif event.isHttpHeader is None: 402 | sys.stdout.buffer.write(self._replace_esc(event.data)) 403 | elif type(event) is EndOfRecord: 404 | sys.stdout.buffer.write(b'\n\n') 405 | 406 | def main(): 407 | processorMap = {'verify': VerifyMode, 'dump-responses': DumpResponsesMode, 'colour': ColourMode} 408 | 409 | assert len(sys.argv) - 1 >= 2 410 | mode = sys.argv[1] 411 | assert mode in processorMap 412 | processorArgs, files = processorMap[mode].split_args(sys.argv[2:]) 413 | assert files 414 | 415 | processor = processorMap[mode](*processorArgs) 416 | 417 | try: 418 | for f in files: 419 | print('Info: processing {}'.format(f), file = sys.stderr) 420 | processor.process_event(NewFile(f)) 421 | for event in iter_warc(f): 422 | processor.process_event(event) 423 | except BrokenPipeError: 424 | return 425 | 426 | 427 | if __name__ == '__main__': 428 | main() 429 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | --------------------------------------------------------------------------------