4 | # Distributed under the GNU General Public License, version 2.0.
5 | #
6 | # This script allows you to see repository status in your prompt.
7 | #
8 | # To enable:
9 | #
10 | # 1) Copy this file to somewhere (e.g. ~/.git-prompt.sh).
11 | # 2) Add the following line to your .bashrc/.zshrc:
12 | # source ~/.git-prompt.sh
13 | # 3a) Change your PS1 to call __git_ps1 as
14 | # command-substitution:
15 | # Bash: PS1='[\u@\h \W$(__git_ps1 " (%s)")]\$ '
16 | # ZSH: setopt PROMPT_SUBST ; PS1='[%n@%m %c$(__git_ps1 " (%s)")]\$ '
17 | # the optional argument will be used as format string.
18 | # 3b) Alternatively, for a slightly faster prompt, __git_ps1 can
19 | # be used for PROMPT_COMMAND in Bash or for precmd() in Zsh
20 | # with two parameters, and , which are strings
21 | # you would put in $PS1 before and after the status string
22 | # generated by the git-prompt machinery. e.g.
23 | # Bash: PROMPT_COMMAND='__git_ps1 "\u@\h:\w" "\\\$ "'
24 | # will show username, at-sign, host, colon, cwd, then
25 | # various status string, followed by dollar and SP, as
26 | # your prompt.
27 | # ZSH: precmd () { __git_ps1 "%n" ":%~$ " "|%s" }
28 | # will show username, pipe, then various status string,
29 | # followed by colon, cwd, dollar and SP, as your prompt.
30 | # Optionally, you can supply a third argument with a printf
31 | # format string to finetune the output of the branch status
32 | #
33 | # The repository status will be displayed only if you are currently in a
34 | # git repository. The %s token is the placeholder for the shown status.
35 | #
36 | # The prompt status always includes the current branch name.
37 | #
38 | # In addition, if you set GIT_PS1_SHOWDIRTYSTATE to a nonempty value,
39 | # unstaged (*) and staged (+) changes will be shown next to the branch
40 | # name. You can configure this per-repository with the
41 | # bash.showDirtyState variable, which defaults to true once
42 | # GIT_PS1_SHOWDIRTYSTATE is enabled.
43 | #
44 | # You can also see if currently something is stashed, by setting
45 | # GIT_PS1_SHOWSTASHSTATE to a nonempty value. If something is stashed,
46 | # then a '$' will be shown next to the branch name.
47 | #
48 | # If you would like to see if there're untracked files, then you can set
49 | # GIT_PS1_SHOWUNTRACKEDFILES to a nonempty value. If there're untracked
50 | # files, then a '%' will be shown next to the branch name. You can
51 | # configure this per-repository with the bash.showUntrackedFiles
52 | # variable, which defaults to true once GIT_PS1_SHOWUNTRACKEDFILES is
53 | # enabled.
54 | #
55 | # If you would like to see the difference between HEAD and its upstream,
56 | # set GIT_PS1_SHOWUPSTREAM="auto". A "<" indicates you are behind, ">"
57 | # indicates you are ahead, "<>" indicates you have diverged and "="
58 | # indicates that there is no difference. You can further control
59 | # behaviour by setting GIT_PS1_SHOWUPSTREAM to a space-separated list
60 | # of values:
61 | #
62 | # verbose show number of commits ahead/behind (+/-) upstream
63 | # legacy don't use the '--count' option available in recent
64 | # versions of git-rev-list
65 | # git always compare HEAD to @{upstream}
66 | # svn always compare HEAD to your SVN upstream
67 | #
68 | # By default, __git_ps1 will compare HEAD to your SVN upstream if it can
69 | # find one, or @{upstream} otherwise. Once you have set
70 | # GIT_PS1_SHOWUPSTREAM, you can override it on a per-repository basis by
71 | # setting the bash.showUpstream config variable.
72 | #
73 | # If you would like to see more information about the identity of
74 | # commits checked out as a detached HEAD, set GIT_PS1_DESCRIBE_STYLE
75 | # to one of these values:
76 | #
77 | # contains relative to newer annotated tag (v1.6.3.2~35)
78 | # branch relative to newer tag or branch (master~4)
79 | # describe relative to older annotated tag (v1.6.3.1-13-gdd42c2f)
80 | # default exactly matching tag
81 | #
82 | # If you would like a colored hint about the current dirty state, set
83 | # GIT_PS1_SHOWCOLORHINTS to a nonempty value. The colors are based on
84 | # the colored output of "git status -sb" and are available only when
85 | # using __git_ps1 for PROMPT_COMMAND or precmd.
86 |
87 | # stores the divergence from upstream in $p
88 | # used by GIT_PS1_SHOWUPSTREAM
89 | __git_ps1_show_upstream ()
90 | {
91 | local key value
92 | local svn_remote svn_url_pattern count n
93 | local upstream=git legacy="" verbose=""
94 |
95 | svn_remote=()
96 | # get some config options from git-config
97 | local output="$(git config -z --get-regexp '^(svn-remote\..*\.url|bash\.showupstream)$' 2>/dev/null | tr '\0\n' '\n ')"
98 | while read -r key value; do
99 | case "$key" in
100 | bash.showupstream)
101 | GIT_PS1_SHOWUPSTREAM="$value"
102 | if [[ -z "${GIT_PS1_SHOWUPSTREAM}" ]]; then
103 | p=""
104 | return
105 | fi
106 | ;;
107 | svn-remote.*.url)
108 | svn_remote[$((${#svn_remote[@]} + 1))]="$value"
109 | svn_url_pattern+="\\|$value"
110 | upstream=svn+git # default upstream is SVN if available, else git
111 | ;;
112 | esac
113 | done <<< "$output"
114 |
115 | # parse configuration values
116 | for option in ${GIT_PS1_SHOWUPSTREAM}; do
117 | case "$option" in
118 | git|svn) upstream="$option" ;;
119 | verbose) verbose=1 ;;
120 | legacy) legacy=1 ;;
121 | esac
122 | done
123 |
124 | # Find our upstream
125 | case "$upstream" in
126 | git) upstream="@{upstream}" ;;
127 | svn*)
128 | # get the upstream from the "git-svn-id: ..." in a commit message
129 | # (git-svn uses essentially the same procedure internally)
130 | local -a svn_upstream
131 | svn_upstream=($(git log --first-parent -1 \
132 | --grep="^git-svn-id: \(${svn_url_pattern#??}\)" 2>/dev/null))
133 | if [[ 0 -ne ${#svn_upstream[@]} ]]; then
134 | svn_upstream=${svn_upstream[${#svn_upstream[@]} - 2]}
135 | svn_upstream=${svn_upstream%@*}
136 | local n_stop="${#svn_remote[@]}"
137 | for ((n=1; n <= n_stop; n++)); do
138 | svn_upstream=${svn_upstream#${svn_remote[$n]}}
139 | done
140 |
141 | if [[ -z "$svn_upstream" ]]; then
142 | # default branch name for checkouts with no layout:
143 | upstream=${GIT_SVN_ID:-git-svn}
144 | else
145 | upstream=${svn_upstream#/}
146 | fi
147 | elif [[ "svn+git" = "$upstream" ]]; then
148 | upstream="@{upstream}"
149 | fi
150 | ;;
151 | esac
152 |
153 | # Find how many commits we are ahead/behind our upstream
154 | if [[ -z "$legacy" ]]; then
155 | count="$(git rev-list --count --left-right \
156 | "$upstream"...HEAD 2>/dev/null)"
157 | else
158 | # produce equivalent output to --count for older versions of git
159 | local commits
160 | if commits="$(git rev-list --left-right "$upstream"...HEAD 2>/dev/null)"
161 | then
162 | local commit behind=0 ahead=0
163 | for commit in $commits
164 | do
165 | case "$commit" in
166 | "<"*) ((behind++)) ;;
167 | *) ((ahead++)) ;;
168 | esac
169 | done
170 | count="$behind $ahead"
171 | else
172 | count=""
173 | fi
174 | fi
175 |
176 | # calculate the result
177 | if [[ -z "$verbose" ]]; then
178 | case "$count" in
179 | "") # no upstream
180 | p="" ;;
181 | "0 0") # equal to upstream
182 | p="=" ;;
183 | "0 "*) # ahead of upstream
184 | p=">" ;;
185 | *" 0") # behind upstream
186 | p="<" ;;
187 | *) # diverged from upstream
188 | p="<>" ;;
189 | esac
190 | else
191 | case "$count" in
192 | "") # no upstream
193 | p="" ;;
194 | "0 0") # equal to upstream
195 | p=" u=" ;;
196 | "0 "*) # ahead of upstream
197 | p=" u+${count#0 }" ;;
198 | *" 0") # behind upstream
199 | p=" u-${count% 0}" ;;
200 | *) # diverged from upstream
201 | p=" u+${count#* }-${count% *}" ;;
202 | esac
203 | fi
204 |
205 | }
206 |
207 | # Helper function that is meant to be called from __git_ps1. It
208 | # injects color codes into the appropriate gitstring variables used
209 | # to build a gitstring.
210 | __git_ps1_colorize_gitstring ()
211 | {
212 | if [[ -n ${ZSH_VERSION-} ]]; then
213 | local c_red='%F{red}'
214 | local c_green='%F{green}'
215 | local c_lblue='%F{blue}'
216 | local c_clear='%f'
217 | else
218 | # Using \[ and \] around colors is necessary to prevent
219 | # issues with command line editing/browsing/completion!
220 | local c_red='\[\e[31m\]'
221 | local c_green='\[\e[32m\]'
222 | local c_lblue='\[\e[1;34m\]'
223 | local c_clear='\[\e[0m\]'
224 | fi
225 | local bad_color=$c_red
226 | local ok_color=$c_green
227 | local flags_color="$c_lblue"
228 |
229 | local branch_color=""
230 | if [ $detached = no ]; then
231 | branch_color="$ok_color"
232 | else
233 | branch_color="$bad_color"
234 | fi
235 | c="$branch_color$c"
236 |
237 | z="$c_clear$z"
238 | if [ "$w" = "*" ]; then
239 | w="$bad_color$w"
240 | fi
241 | if [ -n "$i" ]; then
242 | i="$ok_color$i"
243 | fi
244 | if [ -n "$s" ]; then
245 | s="$flags_color$s"
246 | fi
247 | if [ -n "$u" ]; then
248 | u="$bad_color$u"
249 | fi
250 | r="$c_clear$r"
251 | }
252 |
253 | # __git_ps1 accepts 0 or 1 arguments (i.e., format string)
254 | # when called from PS1 using command substitution
255 | # in this mode it prints text to add to bash PS1 prompt (includes branch name)
256 | #
257 | # __git_ps1 requires 2 or 3 arguments when called from PROMPT_COMMAND (pc)
258 | # in that case it _sets_ PS1. The arguments are parts of a PS1 string.
259 | # when two arguments are given, the first is prepended and the second appended
260 | # to the state string when assigned to PS1.
261 | # The optional third parameter will be used as printf format string to further
262 | # customize the output of the git-status string.
263 | # In this mode you can request colored hints using GIT_PS1_SHOWCOLORHINTS=true
264 | __git_ps1 ()
265 | {
266 | local pcmode=no
267 | local detached=no
268 | local ps1pc_start='\u@\h:\w '
269 | local ps1pc_end='\$ '
270 | local printf_format=' (%s)'
271 |
272 | case "$#" in
273 | 2|3) pcmode=yes
274 | ps1pc_start="$1"
275 | ps1pc_end="$2"
276 | printf_format="${3:-$printf_format}"
277 | ;;
278 | 0|1) printf_format="${1:-$printf_format}"
279 | ;;
280 | *) return
281 | ;;
282 | esac
283 |
284 | local repo_info rev_parse_exit_code
285 | repo_info="$(git rev-parse --git-dir --is-inside-git-dir \
286 | --is-bare-repository --is-inside-work-tree \
287 | --short HEAD 2>/dev/null)"
288 | rev_parse_exit_code="$?"
289 |
290 | if [ -z "$repo_info" ]; then
291 | if [ $pcmode = yes ]; then
292 | #In PC mode PS1 always needs to be set
293 | PS1="$ps1pc_start$ps1pc_end"
294 | fi
295 | return
296 | fi
297 |
298 | local short_sha
299 | if [ "$rev_parse_exit_code" = "0" ]; then
300 | short_sha="${repo_info##*$'\n'}"
301 | repo_info="${repo_info%$'\n'*}"
302 | fi
303 | local inside_worktree="${repo_info##*$'\n'}"
304 | repo_info="${repo_info%$'\n'*}"
305 | local bare_repo="${repo_info##*$'\n'}"
306 | repo_info="${repo_info%$'\n'*}"
307 | local inside_gitdir="${repo_info##*$'\n'}"
308 | local g="${repo_info%$'\n'*}"
309 |
310 | local r=""
311 | local b=""
312 | local step=""
313 | local total=""
314 | if [ -d "$g/rebase-merge" ]; then
315 | read b 2>/dev/null <"$g/rebase-merge/head-name"
316 | read step 2>/dev/null <"$g/rebase-merge/msgnum"
317 | read total 2>/dev/null <"$g/rebase-merge/end"
318 | if [ -f "$g/rebase-merge/interactive" ]; then
319 | r="|REBASE-i"
320 | else
321 | r="|REBASE-m"
322 | fi
323 | else
324 | if [ -d "$g/rebase-apply" ]; then
325 | read step 2>/dev/null <"$g/rebase-apply/next"
326 | read total 2>/dev/null <"$g/rebase-apply/last"
327 | if [ -f "$g/rebase-apply/rebasing" ]; then
328 | read b 2>/dev/null <"$g/rebase-apply/head-name"
329 | r="|REBASE"
330 | elif [ -f "$g/rebase-apply/applying" ]; then
331 | r="|AM"
332 | else
333 | r="|AM/REBASE"
334 | fi
335 | elif [ -f "$g/MERGE_HEAD" ]; then
336 | r="|MERGING"
337 | elif [ -f "$g/CHERRY_PICK_HEAD" ]; then
338 | r="|CHERRY-PICKING"
339 | elif [ -f "$g/REVERT_HEAD" ]; then
340 | r="|REVERTING"
341 | elif [ -f "$g/BISECT_LOG" ]; then
342 | r="|BISECTING"
343 | fi
344 |
345 | if [ -n "$b" ]; then
346 | :
347 | elif [ -h "$g/HEAD" ]; then
348 | # symlink symbolic ref
349 | b="$(git symbolic-ref HEAD 2>/dev/null)"
350 | else
351 | local head=""
352 | if ! read head 2>/dev/null <"$g/HEAD"; then
353 | if [ $pcmode = yes ]; then
354 | PS1="$ps1pc_start$ps1pc_end"
355 | fi
356 | return
357 | fi
358 | # is it a symbolic ref?
359 | b="${head#ref: }"
360 | if [ "$head" = "$b" ]; then
361 | detached=yes
362 | b="$(
363 | case "${GIT_PS1_DESCRIBE_STYLE-}" in
364 | (contains)
365 | git describe --contains HEAD ;;
366 | (branch)
367 | git describe --contains --all HEAD ;;
368 | (describe)
369 | git describe HEAD ;;
370 | (* | default)
371 | git describe --tags --exact-match HEAD ;;
372 | esac 2>/dev/null)" ||
373 |
374 | b="$short_sha..."
375 | b="($b)"
376 | fi
377 | fi
378 | fi
379 |
380 | if [ -n "$step" ] && [ -n "$total" ]; then
381 | r="$r $step/$total"
382 | fi
383 |
384 | local w=""
385 | local i=""
386 | local s=""
387 | local u=""
388 | local c=""
389 | local p=""
390 |
391 | if [ "true" = "$inside_gitdir" ]; then
392 | if [ "true" = "$bare_repo" ]; then
393 | c="BARE:"
394 | else
395 | b="GIT_DIR!"
396 | fi
397 | elif [ "true" = "$inside_worktree" ]; then
398 | if [ -n "${GIT_PS1_SHOWDIRTYSTATE-}" ] &&
399 | [ "$(git config --bool bash.showDirtyState)" != "false" ]
400 | then
401 | git diff --no-ext-diff --quiet --exit-code || w="*"
402 | if [ -n "$short_sha" ]; then
403 | git diff-index --cached --quiet HEAD -- || i="+"
404 | else
405 | i="#"
406 | fi
407 | fi
408 | if [ -n "${GIT_PS1_SHOWSTASHSTATE-}" ] &&
409 | [ -r "$g/refs/stash" ]; then
410 | s="$"
411 | fi
412 |
413 | if [ -n "${GIT_PS1_SHOWUNTRACKEDFILES-}" ] &&
414 | [ "$(git config --bool bash.showUntrackedFiles)" != "false" ] &&
415 | git ls-files --others --exclude-standard --error-unmatch -- '*' >/dev/null 2>/dev/null
416 | then
417 | u="%${ZSH_VERSION+%}"
418 | fi
419 |
420 | if [ -n "${GIT_PS1_SHOWUPSTREAM-}" ]; then
421 | __git_ps1_show_upstream
422 | fi
423 | fi
424 |
425 | local z="${GIT_PS1_STATESEPARATOR-" "}"
426 |
427 | # NO color option unless in PROMPT_COMMAND mode
428 | if [ $pcmode = yes ] && [ -n "${GIT_PS1_SHOWCOLORHINTS-}" ]; then
429 | __git_ps1_colorize_gitstring
430 | fi
431 |
432 | local f="$w$i$s$u"
433 | local gitstring="$c${b##refs/heads/}${f:+$z$f}$r$p"
434 |
435 | if [ $pcmode = yes ]; then
436 | if [[ -n ${ZSH_VERSION-} ]]; then
437 | gitstring=$(printf -- "$printf_format" "$gitstring")
438 | else
439 | printf -v gitstring -- "$printf_format" "$gitstring"
440 | fi
441 | PS1="$ps1pc_start$gitstring$ps1pc_end"
442 | else
443 | printf -- "$printf_format" "$gitstring"
444 | fi
445 | }
446 |
--------------------------------------------------------------------------------
/old/docker/scripts/z.sh:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2009 rupa deadwyler under the WTFPL license
2 |
3 | # maintains a jump-list of the directories you actually use
4 | #
5 | # INSTALL:
6 | # * put something like this in your .bashrc/.zshrc:
7 | # . /path/to/z.sh
8 | # * cd around for a while to build up the db
9 | # * PROFIT!!
10 | # * optionally:
11 | # set $_Z_CMD in .bashrc/.zshrc to change the command (default z).
12 | # set $_Z_DATA in .bashrc/.zshrc to change the datafile (default ~/.z).
13 | # set $_Z_NO_RESOLVE_SYMLINKS to prevent symlink resolution.
14 | # set $_Z_NO_PROMPT_COMMAND if you're handling PROMPT_COMMAND yourself.
15 | # set $_Z_EXCLUDE_DIRS to an array of directories to exclude.
16 | #
17 | # USE:
18 | # * z foo # cd to most frecent dir matching foo
19 | # * z foo bar # cd to most frecent dir matching foo and bar
20 | # * z -r foo # cd to highest ranked dir matching foo
21 | # * z -t foo # cd to most recently accessed dir matching foo
22 | # * z -l foo # list matches instead of cd
23 | # * z -c foo # restrict matches to subdirs of $PWD
24 |
25 | [ -d "${_Z_DATA:-$HOME/.z}" ] && {
26 | echo "ERROR: z.sh's datafile (${_Z_DATA:-$HOME/.z}) is a directory."
27 | }
28 |
29 | _z() {
30 |
31 | local datafile="${_Z_DATA:-$HOME/.z}"
32 |
33 | # bail if we don't own ~/.z (we're another user but our ENV is still set)
34 | [ -f "$datafile" -a ! -O "$datafile" ] && return
35 |
36 | # add entries
37 | if [ "$1" = "--add" ]; then
38 | shift
39 |
40 | # $HOME isn't worth matching
41 | [ "$*" = "$HOME" ] && return
42 |
43 | # don't track excluded dirs
44 | local exclude
45 | for exclude in "${_Z_EXCLUDE_DIRS[@]}"; do
46 | [ "$*" = "$exclude" ] && return
47 | done
48 |
49 | # maintain the data file
50 | local tempfile="$datafile.$RANDOM"
51 | while read line; do
52 | # only count directories
53 | [ -d "${line%%\|*}" ] && echo $line
54 | done < "$datafile" | awk -v path="$*" -v now="$(date +%s)" -F"|" '
55 | BEGIN {
56 | rank[path] = 1
57 | time[path] = now
58 | }
59 | $2 >= 1 {
60 | # drop ranks below 1
61 | if( $1 == path ) {
62 | rank[$1] = $2 + 1
63 | time[$1] = now
64 | } else {
65 | rank[$1] = $2
66 | time[$1] = $3
67 | }
68 | count += $2
69 | }
70 | END {
71 | if( count > 6000 ) {
72 | # aging
73 | for( x in rank ) print x "|" 0.99*rank[x] "|" time[x]
74 | } else for( x in rank ) print x "|" rank[x] "|" time[x]
75 | }
76 | ' 2>/dev/null >| "$tempfile"
77 | # do our best to avoid clobbering the datafile in a race condition
78 | if [ $? -ne 0 -a -f "$datafile" ]; then
79 | env rm -f "$tempfile"
80 | else
81 | env mv -f "$tempfile" "$datafile" || env rm -f "$tempfile"
82 | fi
83 |
84 | # tab completion
85 | elif [ "$1" = "--complete" ]; then
86 | while read line; do
87 | [ -d "${line%%\|*}" ] && echo $line
88 | done < "$datafile" | awk -v q="$2" -F"|" '
89 | BEGIN {
90 | if( q == tolower(q) ) imatch = 1
91 | split(substr(q, 3), fnd, " ")
92 | }
93 | {
94 | if( imatch ) {
95 | for( x in fnd ) tolower($1) !~ tolower(fnd[x]) && $1 = ""
96 | } else {
97 | for( x in fnd ) $1 !~ fnd[x] && $1 = ""
98 | }
99 | if( $1 ) print $1
100 | }
101 | ' 2>/dev/null
102 |
103 | else
104 | # list/go
105 | while [ "$1" ]; do case "$1" in
106 | --) while [ "$1" ]; do shift; local fnd="$fnd${fnd:+ }$1";done;;
107 | -*) local opt=${1:1}; while [ "$opt" ]; do case ${opt:0:1} in
108 | c) local fnd="^$PWD $fnd";;
109 | h) echo "${_Z_CMD:-z} [-chlrtx] args" >&2; return;;
110 | x) sed -i "\:^${PWD}|.*:d" "$datafile";;
111 | l) local list=1;;
112 | r) local typ="rank";;
113 | t) local typ="recent";;
114 | esac; opt=${opt:1}; done;;
115 | *) local fnd="$fnd${fnd:+ }$1";;
116 | esac; local last=$1; shift; done
117 | [ "$fnd" -a "$fnd" != "^$PWD " ] || local list=1
118 |
119 | # if we hit enter on a completion just go there
120 | case "$last" in
121 | # completions will always start with /
122 | /*) [ -z "$list" -a -d "$last" ] && cd "$last" && return;;
123 | esac
124 |
125 | # no file yet
126 | [ -f "$datafile" ] || return
127 |
128 | local cd
129 | cd="$(while read line; do
130 | [ -d "${line%%\|*}" ] && echo $line
131 | done < "$datafile" | awk -v t="$(date +%s)" -v list="$list" -v typ="$typ" -v q="$fnd" -F"|" '
132 | function frecent(rank, time) {
133 | # relate frequency and time
134 | dx = t - time
135 | if( dx < 3600 ) return rank * 4
136 | if( dx < 86400 ) return rank * 2
137 | if( dx < 604800 ) return rank / 2
138 | return rank / 4
139 | }
140 | function output(files, out, common) {
141 | # list or return the desired directory
142 | if( list ) {
143 | cmd = "sort -n >&2"
144 | for( x in files ) {
145 | if( files[x] ) printf "%-10s %s\n", files[x], x | cmd
146 | }
147 | if( common ) {
148 | printf "%-10s %s\n", "common:", common > "/dev/stderr"
149 | }
150 | } else {
151 | if( common ) out = common
152 | print out
153 | }
154 | }
155 | function common(matches) {
156 | # find the common root of a list of matches, if it exists
157 | for( x in matches ) {
158 | if( matches[x] && (!short || length(x) < length(short)) ) {
159 | short = x
160 | }
161 | }
162 | if( short == "/" ) return
163 | # use a copy to escape special characters, as we want to return
164 | # the original. yeah, this escaping is awful.
165 | clean_short = short
166 | gsub(/[\(\)\[\]\|]/, "\\\\&", clean_short)
167 | for( x in matches ) if( matches[x] && x !~ clean_short ) return
168 | return short
169 | }
170 | BEGIN { split(q, words, " "); hi_rank = ihi_rank = -9999999999 }
171 | {
172 | if( typ == "rank" ) {
173 | rank = $2
174 | } else if( typ == "recent" ) {
175 | rank = $3 - t
176 | } else rank = frecent($2, $3)
177 | matches[$1] = imatches[$1] = rank
178 | for( x in words ) {
179 | if( $1 !~ words[x] ) delete matches[$1]
180 | if( tolower($1) !~ tolower(words[x]) ) delete imatches[$1]
181 | }
182 | if( matches[$1] && matches[$1] > hi_rank ) {
183 | best_match = $1
184 | hi_rank = matches[$1]
185 | } else if( imatches[$1] && imatches[$1] > ihi_rank ) {
186 | ibest_match = $1
187 | ihi_rank = imatches[$1]
188 | }
189 | }
190 | END {
191 | # prefer case sensitive
192 | if( best_match ) {
193 | output(matches, best_match, common(matches))
194 | } else if( ibest_match ) {
195 | output(imatches, ibest_match, common(imatches))
196 | }
197 | }
198 | ')"
199 | [ $? -gt 0 ] && return
200 | [ "$cd" ] && cd "$cd"
201 | fi
202 | }
203 |
204 | alias ${_Z_CMD:-z}='_z 2>&1'
205 |
206 | [ "$_Z_NO_RESOLVE_SYMLINKS" ] || _Z_RESOLVE_SYMLINKS="-P"
207 |
208 | if compctl >/dev/null 2>&1; then
209 | # zsh
210 | [ "$_Z_NO_PROMPT_COMMAND" ] || {
211 | # populate directory list, avoid clobbering any other precmds.
212 | if [ "$_Z_NO_RESOLVE_SYMLINKS" ]; then
213 | _z_precmd() {
214 | _z --add "${PWD:a}"
215 | }
216 | else
217 | _z_precmd() {
218 | _z --add "${PWD:A}"
219 | }
220 | fi
221 | [[ -n "${precmd_functions[(r)_z_precmd]}" ]] || {
222 | precmd_functions[$(($#precmd_functions+1))]=_z_precmd
223 | }
224 | }
225 | _z_zsh_tab_completion() {
226 | # tab completion
227 | local compl
228 | read -l compl
229 | reply=(${(f)"$(_z --complete "$compl")"})
230 | }
231 | compctl -U -K _z_zsh_tab_completion _z
232 | elif complete >/dev/null 2>&1; then
233 | # bash
234 | # tab completion
235 | complete -o filenames -C '_z --complete "$COMP_LINE"' ${_Z_CMD:-z}
236 | [ "$_Z_NO_PROMPT_COMMAND" ] || {
237 | # populate directory list. avoid clobbering other PROMPT_COMMANDs.
238 | grep "_z --add" <<< "$PROMPT_COMMAND" >/dev/null || {
239 | PROMPT_COMMAND="$PROMPT_COMMAND"$'\n''_z --add "$(pwd '$_Z_RESOLVE_SYMLINKS' 2>/dev/null)" 2>/dev/null;'
240 | }
241 | }
242 | fi
243 |
--------------------------------------------------------------------------------
/old/docker/serf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Michieljoris/node-haproxy/54d2033461051eca7c445e4599f69db72f9ee004/old/docker/serf
--------------------------------------------------------------------------------
/old/docker/snapshot:
--------------------------------------------------------------------------------
1 | alive: haproxy 172.17.0.125:7946
2 | alive: agent-two 172.17.42.1:7946
3 | clock: 1
4 | clock: 2
5 | not-alive: agent-two
6 | alive: agent-two 172.17.42.1:7946
7 | clock: 3
8 | leave
9 | clock: 4
10 | alive: haproxy 172.17.0.125:7946
11 | clock: 5
12 | alive: agent-two 172.17.42.1:7946
13 | clock: 6
14 | not-alive: agent-two
15 | alive: agent-two 172.17.42.1:7946
16 | clock: 7
17 | query-clock: 7
18 | query-clock: 8
19 | query-clock: 9
20 | query-clock: 10
21 | query-clock: 11
22 | query-clock: 14
23 | query-clock: 15
24 | query-clock: 16
25 | query-clock: 17
26 | query-clock: 18
27 | query-clock: 19
28 | query-clock: 20
29 | query-clock: 21
30 | query-clock: 22
31 | query-clock: 23
32 | query-clock: 24
33 | query-clock: 25
34 | query-clock: 26
35 | query-clock: 27
36 | query-clock: 28
37 | query-clock: 29
38 | query-clock: 30
39 | event-clock: 1
40 | event-clock: 2
41 | query-clock: 31
42 | query-clock: 32
43 | event-clock: 3
44 | clock: 8
45 | not-alive: agent-two
46 | leave
47 |
--------------------------------------------------------------------------------
/old/etc-default-haproxy:
--------------------------------------------------------------------------------
1 | # Set ENABLED to 1 if you want the init script to start haproxy.
2 | ENABLED=1
3 | # Add extra flags here.
4 | #EXTRAOPTS="-de -m 16"
5 |
--------------------------------------------------------------------------------
/old/etc-init.d-haproxy:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | ### BEGIN INIT INFO
3 | # Provides: haproxy
4 | # Required-Start: $local_fs $network $remote_fs
5 | # Required-Stop: $local_fs $remote_fs
6 | # Default-Start: 2 3 4 5
7 | # Default-Stop: 0 1 6
8 | # Short-Description: fast and reliable load balancing reverse proxy
9 | # Description: This file should be used to start and stop haproxy.
10 | ### END INIT INFO
11 |
12 | # Author: Arnaud Cornet
13 |
14 | PATH=/sbin:/usr/sbin:/bin:/usr/bin
15 | PIDFILE=/var/run/haproxy.pid
16 | CONFIG=/etc/haproxy/haproxy.cfg
17 | # PIDFILE=/home/michieljoris/mysrc/javascript/node-haproxy/haproxy.pid
18 | # CONFIG=/home/michieljoris/mysrc/javascript/node-haproxy/haproxy.cfg
19 | HAPROXY=/usr/local/sbin/haproxy
20 | EXTRAOPTS=
21 | ENABLED=1
22 |
23 | test -x $HAPROXY || exit 0
24 | test -f "$CONFIG" || exit 0
25 |
26 | if [ -e /etc/default/haproxy ]; then
27 | . /etc/default/haproxy
28 | fi
29 |
30 | test "$ENABLED" != "0" || exit 0
31 |
32 | [ -f /etc/default/rcS ] && . /etc/default/rcS
33 | . /lib/lsb/init-functions
34 |
35 |
36 | haproxy_start()
37 | {
38 | start-stop-daemon --start --pidfile "$PIDFILE" \
39 | --exec $HAPROXY -- -f "$CONFIG" -D -p "$PIDFILE" \
40 | $EXTRAOPTS || return 2
41 | return 0
42 | }
43 |
44 | haproxy_stop()
45 | {
46 | if [ ! -f $PIDFILE ] ; then
47 | # This is a success according to LSB
48 | return 0
49 | fi
50 | for pid in $(cat $PIDFILE) ; do
51 | /bin/kill $pid || return 4
52 | done
53 | rm -f $PIDFILE
54 | return 0
55 | }
56 |
57 | haproxy_reload()
58 | {
59 | $HAPROXY -f "$CONFIG" -p $PIDFILE -D $EXTRAOPTS -sf $(cat $PIDFILE) \
60 | || return 2
61 | return 0
62 | }
63 |
64 | haproxy_status()
65 | {
66 | if [ ! -f $PIDFILE ] ; then
67 | # program not running
68 | return 3
69 | fi
70 |
71 | for pid in $(cat $PIDFILE) ; do
72 | if ! ps --no-headers p "$pid" | grep haproxy > /dev/null ; then
73 | # program running, bogus pidfile
74 | return 1
75 | fi
76 | done
77 |
78 | return 0
79 | }
80 |
81 |
82 | case "$1" in
83 | start)
84 | log_daemon_msg "Starting haproxy" "haproxy"
85 | haproxy_start
86 | ret=$?
87 | case "$ret" in
88 | 0)
89 | log_end_msg 0
90 | ;;
91 | 1)
92 | log_end_msg 1
93 | echo "pid file '$PIDFILE' found, haproxy not started."
94 | ;;
95 | 2)
96 | log_end_msg 1
97 | ;;
98 | esac
99 | exit $ret
100 | ;;
101 | stop)
102 | log_daemon_msg "Stopping haproxy" "haproxy"
103 | haproxy_stop
104 | ret=$?
105 | case "$ret" in
106 | 0|1)
107 | log_end_msg 0
108 | ;;
109 | 2)
110 | log_end_msg 1
111 | ;;
112 | esac
113 | exit $ret
114 | ;;
115 | reload|force-reload)
116 | log_daemon_msg "Reloading haproxy" "haproxy"
117 | haproxy_reload
118 | case "$?" in
119 | 0|1)
120 | log_end_msg 0
121 | ;;
122 | 2)
123 | log_end_msg 1
124 | ;;
125 | esac
126 | ;;
127 | restart)
128 | log_daemon_msg "Restarting haproxy" "haproxy"
129 | haproxy_stop
130 | haproxy_start
131 | case "$?" in
132 | 0)
133 | log_end_msg 0
134 | ;;
135 | 1)
136 | log_end_msg 1
137 | ;;
138 | 2)
139 | log_end_msg 1
140 | ;;
141 | esac
142 | ;;
143 | status)
144 | haproxy_status
145 | ret=$?
146 | case "$ret" in
147 | 0)
148 | echo "haproxy is running."
149 | ;;
150 | 1)
151 | echo "haproxy dead, but $PIDFILE exists."
152 | ;;
153 | *)
154 | echo "haproxy not running."
155 | ;;
156 | esac
157 | exit $ret
158 | ;;
159 | *)
160 | echo "Usage: /etc/init.d/haproxy {start|stop|reload|restart|status}"
161 | exit 2
162 | ;;
163 | esac
164 |
165 | :
166 |
--------------------------------------------------------------------------------
/old/server.js:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 | var Aqueduct = require('..')
3 | , Hapi = require('hapi')
4 | , shoe = require('shoe')
5 | , util = require('util')
6 | ;
7 |
8 | // require('nodetime').profile({
9 | // accountKey: '1765a180c09b73ea0a7d7262ff6dc60d776bf395',
10 | // appName: 'Aqueuct'
11 | // });
12 |
13 | var optimist = require('optimist')
14 | .options({
15 | host: {
16 | default : '0.0.0.0',
17 | describe: 'host to bind to'
18 | },
19 | port: {
20 | default : 10000,
21 | describe: 'port to bind to'
22 | },
23 | label: {
24 | describe: 'logical label for this aqueduct'
25 | },
26 | thalassaHost: {
27 | default : '127.0.0.1',
28 | describe: 'host of the Thalassa server'
29 | },
30 | thalassaPort: {
31 | default : 5001,
32 | describe: 'socket port of the Thalassa server'
33 | },
34 | thalassaApiPort: {
35 | default : 9000,
36 | describe: 'http API port of the Thalassa server'
37 | },
38 | haproxySocketPath: {
39 | default: '/tmp/haproxy.status.sock',
40 | describe: 'path to Haproxy socket file'
41 | },
42 | haproxyPidPath: {
43 | default: '/var/run/haproxy.pid',
44 | describe: 'path to Haproxy pid file'
45 | },
46 | haproxyCfgPath: {
47 | default: '/etc/haproxy/haproxy.cfg',
48 | describe: 'generated Haproxy config location'
49 | },
50 | templateFile: {
51 | default: __dirname + '/../default.haproxycfg.tmpl',
52 | describe: 'template used to generate Haproxy config'
53 | },
54 | persistence: {
55 | describe: 'directory to save configuration'
56 | },
57 | dbPath: {
58 | default : __dirname + '/db',
59 | describe: 'filesystem path for leveldb'
60 | },
61 | sudo: {
62 | describe: 'use sudo when starting haproxy'
63 | },
64 | debug: {
65 | boolean: true,
66 | describe: 'enabled debug logging'
67 | },
68 | help: {
69 | alias: 'h'
70 | }
71 | });
72 |
73 | var argv = optimist.argv;
74 | if (argv.h) {
75 | optimist.showHelp();
76 | process.exit(0);
77 | }
78 |
79 | var log = argv.log = require('../lib/defaultLogger')( (argv.debug == true) ? 'debug' : 'error' );
80 | var aqueduct = new Aqueduct(argv);
81 | var server = new Hapi.Server({
82 | port: argv.port,
83 | host: argv.host
84 | });
85 | server.route(aqueduct.apiRoutes());
86 |
87 | // anything at the top level goes to index.html
88 | server.route({ method: 'GET', path: '/{p}', handler: { file: { path: __dirname+'/../public/index.html' }}});
89 |
90 | server.route({
91 | method: 'GET',
92 | path: '/{path*}',
93 | handler: {
94 | directory: { path: __dirname + '/../public', listing: false, index: true }
95 | }
96 | });
97 |
98 | //Setup websocket to the client/browser
99 | var sock = shoe();
100 | sock.install(server.listener, '/aqueductStreams');
101 | sock.on('connection', function (stream) {
102 | var s = aqueduct.createMuxStream();
103 | stream.pipe(s).pipe(stream);
104 |
105 | stream.on('end', function () {
106 | s.destroy();
107 | })
108 |
109 | });
110 |
111 | sock.on('log', function (severity, msg) {
112 | log(severity, msg);
113 | })
114 |
115 | server.start(function () {
116 | log('info', util.format("Thalassa Aqueduct listening on %s:%s", argv.host, argv.port));
117 | });
118 |
119 | aqueduct.haproxyManager.on('configChanged', function() { log('debug', 'Config changed') });
120 | aqueduct.haproxyManager.on('reloaded', function() { log('debug', 'Haproxy reloaded') });
121 | aqueduct.data.stats.on('changes', function (it) { log('debug', it.state.id, it.state.status )})
122 |
123 | // var memwatch = require('memwatch');
124 | // memwatch.on('leak', function(info) { log('debug', 'leak', info); });
125 | // memwatch.on('stats', function(stats) { log('debug', 'stats', stats); });
126 | // var hd = new memwatch.HeapDiff();
127 |
128 | // setInterval(function () {
129 | // log('debug', 'diff', hd.end().after.size);
130 | // hd = new memwatch.HeapDiff();
131 | // }, 10000);
132 |
133 |
--------------------------------------------------------------------------------
/old/test-api.js:
--------------------------------------------------------------------------------
1 |
2 | // api.validatepostbackendsubscription = function () {
3 | // return {
4 | // payload: {
5 | // key : hapi.types.string().optional(),
6 | // name : hapi.types.string().optional(),
7 | // version : hapi.types.string()
8 | // }
9 | // };
10 | // };
11 |
12 |
13 |
14 | // api.validateputbackend = function () {
15 | // return {
16 | // payload: {
17 | // _type : hapi.types.string().optional(),
18 | // key : hapi.types.string(),
19 | // type : hapi.types.string().valid(['dynamic', 'static']),
20 | // name : hapi.types.string().optional(),
21 | // version : hapi.types.string().optional(),
22 | // balance : hapi.types.string().optional(),
23 | // host : hapi.types.string().optional(),
24 | // mod : hapi.types.string().valid(['http', 'tcp']).optional(),
25 | // members : hapi.types.array().optional(),
26 | // natives : hapi.types.array().optional(),
27 | // health : hapi.types.object({
28 | // method: hapi.types.string().valid(['get','post']).optional(),
29 | // uri: hapi.types.string().optional(),
30 | // httpversion: hapi.types.string().valid(['http/1.0', 'http/1.1']).optional(),
31 | // interval: hapi.types.number().min(1).optional()
32 | // }).optional()
33 | // }
34 | // };
35 | // };
36 |
37 |
38 | // api.validateputfrontend = function () {
39 | // return {
40 | // payload: {
41 | // _type : hapi.types.string().optional(),
42 | // key : hapi.types.string(),
43 | // bind : hapi.types.string(),
44 | // backend : hapi.types.string(),
45 | // mode : hapi.types.string().valid(['http', 'tcp']).optional(),
46 | // keepalive : hapi.types.string().valid(['default','close','server-close']).optional(),
47 | // rules : hapi.types.array().optional(),
48 | // natives : hapi.types.array().optional()
49 | // }
50 | // };
51 | // };
52 |
53 | var haproxy = module.exports({ ipc: true });
54 |
55 | // setTimeout(function() {
56 | // haproxy.putBackend('backend1', {
57 | // // "type" : "dynamic|static"
58 | // "type" : "static"
59 | // // , "name" : "foo" // only required if type = dynamic
60 | // // , "version" : "1.0.0" // only required if type = dynamic
61 | // // , "balance" : "roundrobin|source" // defaults to roundrobin
62 | // // , "host" : "myapp.com" // default: undefined, if specified request to member will contain this host header
63 | // // , "health" : { // optional health check
64 | // // "method": "GET" // HTTP method
65 | // // , "uri": "/checkity-check" // URI to call
66 | // // , "httpVersion": "HTTP/1.0" // HTTP/1.0 or HTTP/1.1 `host` required if HTTP/1.1
67 | // // , "interval": 5000 // period to check, milliseconds
68 | // // }
69 | // // , "mode" : "http|tcp" // default: http
70 | // , "natives": [] // array of strings of raw config USE SPARINGLY!!
71 | // , "members" : [
72 | // {
73 | // // "name": "myapp",
74 | // // "version": "1.0.0",
75 | // "host": "192.168.1.184",
76 | // "port": 3000
77 | // // "lastKnown": 1378762056885,
78 | // // "meta": {
79 | // // "hostname": "dev-use1b-pr-01-myapp-01x00x00-01",
80 | // // "pid": 17941,
81 | // // "registered": 1378740834616
82 | // // },
83 | // // "id": "/myapp/1.0.0/10.10.240.121/8080"
84 | // },
85 | // // {
86 | // // // "name": "myapp",
87 | // // // "version": "1.0.0",
88 | // // "host": "192.168.1.184",
89 | // // "port": 8002
90 | // // // "lastKnown": 1378762060226,
91 | // // // "meta": {
92 | // // // "hostname": "dev-use1b-pr-01-myapp-01x00x00-02",
93 | // // // "pid": 18020,
94 | // // // "registered": 1378762079883
95 | // // // },
96 | // // // "id": "/myapp/1.0.0/10.10.240.80/8080"
97 | // // }
98 |
99 | // ] // if type = dynamic this is dynamically populated based on role/version subscription
100 | // // otherwise expects { host: '10.10.10.10', port: 8080}
101 | // });
102 |
103 | // haproxy.putFrontend('www1', {
104 | // "bind": "*:10000" // IP and ports to bind to, comma separated, host may be *
105 | // , "backend": "backend1" // the default backend to route to, it must be defined already
106 | // , "mode": "http" // default: http, expects tcp|http
107 | // , "keepalive": "close" // default: "default", expects default|close|server-close
108 | // , "rules": [] // array of rules, see next section
109 | // , "natives": [] // array of strings of raw config USE SPARINGLY!!
110 | // });
111 |
112 |
113 |
114 | // var r = haproxy.getBackends();
115 | // var f = haproxy.getFrontends();
116 |
117 | // log('BACKENDS-=-------------------:\n', util.inspect(r, { colors: true, depth:10 }));
118 | // log('FRONTENDS-=-------------------:\n', util.inspect(f, { colors: true, depth:10 }));
119 |
120 | // }, 5000);
121 | // test
122 | // setInterval(function() {
123 |
124 | // log('CONFIG------------------------:\n', haproxy.getHaproxyConfig());
125 | // },3000);
126 |
--------------------------------------------------------------------------------
/old/test-serf.js:
--------------------------------------------------------------------------------
1 | var SerfRPC = require("serf-rpc");
2 | var serf = new SerfRPC();
3 |
4 | serf.connect(function(err){
5 | if(err)
6 | throw err;
7 |
8 | serf.event({"Name": "deploy", "Payload": "4f33de567283e4a456539b8dc493ae8a853a93f6", "Coalesce": false}, function(err, response){
9 | if(err)
10 | throw err;
11 | else
12 | console.log("Triggered the event!");
13 | });
14 | serf.join({"Existing": ["172.17.0.125"], "Replay": false}, function(err, res) {
15 | if(err)
16 | throw err;
17 | else
18 | console.log("joined");
19 |
20 |
21 | serf["members-filtered"]({ Name: "hap.*" }, function(err, res) {
22 | if(err)
23 | throw err;
24 | else
25 | console.log("Members\n", res);
26 |
27 | });
28 |
29 | });
30 | });
31 |
--------------------------------------------------------------------------------
/old/testhap.js:
--------------------------------------------------------------------------------
1 | require('logthis').config({ _on: true,
2 | 'Data': 'debug' ,
3 | 'HaproxyManager': 'debug' ,
4 | 'HaproxyStats': 'debug',
5 | 'Db': 'debug',
6 | 'haproxy': 'debug'
7 | });
8 |
9 | var log = require('logthis').logger._create('haproxy');
10 |
11 | var Haproxy = require('haproxy');
12 | var resolve = require('path').resolve;
13 |
14 | var defaults = {
15 | host: '0.0.0.0',
16 | port: 10000,
17 | // haproxySocketPath: '/tmp/haproxy.status.sock',
18 | // haproxyPidPath: '/var/run/haproxy.pid',
19 | // haproxyCfgPath: '/etc/haproxy/haproxy.cfg',
20 | haproxySocketPath: __dirname + '/haproxy.status.sock',
21 | haproxyPidPath: __dirname + '/haproxy.pid',
22 | haproxyCfgPath: __dirname + '/haproxy.cfg',
23 | templateFile: __dirname + '/default.haproxycfg.tmpl',
24 | persistence: __dirname + '/persisted',
25 | dbPath: __dirname + '/db',
26 | // sudo: 'use sudo when starting haproxy'
27 | sudo: 'use sudo when starting haproxy'
28 | };
29 |
30 | log('hello', defaults);
31 |
32 |
33 | var opts = defaults;
34 | var haproxy = new Haproxy(opts.haproxySocketPath, {
35 | config: resolve(opts.haproxyCfgPath),
36 | pidFile: resolve(opts.haproxyPidPath),
37 | prefix: (opts.sudo) ? 'sudo' : undefined,
38 | which: __dirname + '/bin/haproxy'
39 | });
40 |
41 | haproxy.stop(function(err) {
42 | haproxy.start(function(err) {
43 | log(err);
44 | haproxy.running(function(err, running) {
45 | log('running:', err, running);
46 | });
47 |
48 | haproxy.stat('-1', '-1', '-1', function (err, stats) {
49 |
50 |
51 | });
52 | });
53 | });
54 |
55 |
--------------------------------------------------------------------------------
/package.js:
--------------------------------------------------------------------------------
1 | // This file is the source for constructing a `package.json` file.
2 | // JSON is a wonderful interchange format, but due to the fact that the
3 | // [JSON Specification](http://json.org) does not allow for comments, I find
4 | // it horrid for self documenting examples.
5 | //
6 | // JavaScript allows for comments and inherently allows JSON. This file will
7 | // act as the source for building a `package.json` file that also manages this
8 | // package.
9 | //
10 | // It is the closest I can get to a self-documenting `package.json` file.
11 |
12 |
13 |
14 | // The `package.json` file always consists of one top level object, which is
15 | // what we export here in a [Node.js](http://nodejs.org) friendly way that
16 | // will allow us to build our `package.json` file. A real `package.json` file
17 | // will not contain the `exports = ` definition, nor any of these comments.
18 | module.exports = {
19 | // Many of the following `package.json` parameters are optional depending
20 | // on whether or not this package will ever be published, and depending
21 | // on how much management we want to delegate to npm. I did not mark
22 | // optional vs. not-optional for the parameters, as a `package.json` file
23 | // is by its nature always optional.
24 |
25 | // Our npm package name needs to be unique only if we are going to publish
26 | // our package into an npm registry. If we aren't going to publish the
27 | // package the name can be anything we want.
28 | //
29 | // Leave off redundant affixes like `node-package` or `package-js`.
30 | // We know it is JavaScript for Node.
31 | "name": "node-haproxy",
32 | // A single line, or sometimes slightly longer, description of our package.
33 | "description": "",
34 | // [npm](http://npmjs.org) enforces the X.Y.Z semantic version
35 | // scheme that is described at [http://semver.org/](http://semver.org/)
36 | // and we should follow this versioning for our package.
37 | //Comment out go auto increase version on execution of this file
38 | // "version": "0.1.0",
39 | // URL to the homepage for this package.
40 | "homepage": "https://github.com/michieljoris/node-haproxy",
41 | // An array of keywords used to describe this package to search engines,
42 | // mainly for people searching within the npm universe.
43 | "keywords": [
44 |
45 | ],
46 | // Where is the source of truth for this code, and what type of repo is it?
47 | "repository": {
48 | "type": "git",
49 | "url": "https://github.com/michieljoris/node-haproxy.git"
50 | },
51 | // Every package should have at least one author. There are a couple of
52 | // formats for the author. I prefer the explicit object format as follows:
53 | "author": {
54 | "name": "Michiel van Oosten",
55 | "email": "mail@axion5.net",
56 | "url": "http://www.axion5.net/"
57 | },
58 | // What licenses govern this code, and where is the license associated
59 | // with this code?
60 | // The complex form, "licenses", is an array of objects.
61 | // The simplest form is "license", and may point to just a string that
62 | // represents the standard name of the license, like "MIT".
63 | "licenses": [
64 | {
65 | "type": "MIT",
66 | "url": "http://github.com/michieljoris/node-haproxy/blob/master/LICENSE.txt"
67 | }
68 | ],
69 | // If there is a file that should be loaded when require()ing this
70 | // folder-as-a-package, declare this file here, relative to our package
71 | // structure.
72 | "main": "src/api.js",
73 | // Essentially, which Node.js platforms do we support? These are glob
74 | // like expressions supported by the
75 | // [npm semantic version parser](https://npmjs.org/doc/semver.html),
76 | // and the below version means what it looks like:
77 | //
78 |
79 | //Installs a binary script called node-haproxy which is linked to
80 | //./bin/node-haproxy in the local package.
81 |
82 | //If we have installed this package globally using npm install node-haproxy
83 | //-g we will be able to call this new command node-haproxy from anywhere on
84 | //our system.
85 | "bin": {
86 | "node-haproxy": "bin/node-haproxy.sh"
87 | },
88 |
89 | // require a Node.js installation that is greater than or equal to version 0.6.0
90 | "engines": {
91 | "node": ">= 0.6.x"
92 | },
93 | // What other modules/libraries do we require for our own module?
94 | // The beauty of this dependencies block is that these modules will
95 | // be downloaded magically when we run npm install from within our
96 | // directory. npm itself will sort out any dependency conflicts within
97 | // our own dependencies and we can be pretty much assured that the
98 | // modules we need will be ready to run.
99 | //
100 | // **NOTE:** We don't have any dependencies for this module. See the
101 | // `devDependencies` block for the way to include dependencies.
102 | "dependencies": {
103 | "dougs_vow": "*",
104 | "fs-extra": "0.18.x",
105 | "logthis": "*",
106 |
107 | // ,"hapi": "~1.20.0",
108 | "crdt": "~3.5.1",
109 | "handlebars": "~1.0.12",
110 | "debounce": "0.0.2",
111 | "deep-equal": "0.0.0",
112 | // "shoe": "~0.0.11",
113 | // "thalassa": "~0.4.0",
114 | "haproxy": "0.0.3", //up to 0.0.4 as of July/14
115 | // "cli-color": "~0.2.2",
116 | "changeset": "0.0.5",
117 | "optimist": "~0.6.0",
118 | // "websocket-stream": "~0.2.0",
119 | // "ws": "~0.4.27",
120 | "xtend": "~2.0.6",
121 | // "mux-demux": "~3.7.8",
122 | // "through": "~2.3.4",
123 | "extend": "~1.2.0",
124 | "level": "~0.18.0",
125 | "mkdirp": "~0.3.5",
126 | "flic": "^1.1.2",
127 | "node-ipc": "^1.1.13"
128 | // "redis": "^0.12.1"
129 | // "split": "~0.2.10",
130 | // "browserify": "~2.25.1",
131 | // "CBuffer": "~0.1.4"
132 |
133 | // "colors": "*",
134 | },
135 | // What dependencies are useful only for developers?
136 | // Installed when we `npm install` in our working directory, but not
137 | // when people require our package in their own package.json. This is the
138 | // usual and accepted place to put test frameworks and documentation
139 | // tools.
140 | //
141 | // The packages we depend on for development:
142 | //
143 | // * **fs-extra**: Mixin for the fs (filesystem) module.
144 | // * **doccoh**: Documentation utility for this code.
145 | "devDependencies": {
146 | // "doccoh": "*"
147 | "docco": "*"
148 | },
149 | // Should this package be prevented from accidental publishing by npm?
150 | // The default is false (not hidden), but I include this here for doc
151 | // purposes.
152 | "private": false,
153 | // npm has can manage a set of standard and non-standard scripts. The
154 | // standard set of scripts can be run with:
155 | //
156 | // npm standard-script-name
157 | //
158 | // The non-standard scripts can be run with:
159 | //
160 | // npm run-script script-name
161 | //
162 | // `docs` is a non-standard script, and can be run with:
163 | //
164 | // npm run-script docs
165 | "scripts": {
166 | // "docs": "node node_modules/.bin/doccoh package.js"
167 | "docs": "node_modules/.bin/docco src/api.js"
168 | }
169 | };
170 |
171 |
172 | // Small script used to write the package.json file out from the package.js
173 | // file.
174 |
175 | var fs = require("fs-extra");
176 | var packagejs = require("./package.js");
177 | var v = '0.1.0';
178 | if (!packagejs.version) {
179 |
180 | try {
181 | v = require('./package.json').version;
182 | } catch(e) {
183 | console.log('Created new package.json. You\'re at version 0.0.0.');
184 | }
185 | var s = v.split('.');
186 | v = [s[0],s[1],parseInt(s[2]) + 1].join('.');
187 | packagejs.version = v;
188 | }
189 |
190 | console.log("Writing the package.json file out from package.js...");
191 | fs.writeJSONFile("package.json", packagejs, function(err){
192 | if (err) {
193 | console.log("Error writing package.json");
194 | console.log(err);
195 | console.log("");
196 | }
197 | else {
198 | console.log(packagejs);
199 | console.log("package.json written successfully.");
200 | console.log("");
201 | }
202 | });
203 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "node-haproxy",
3 | "description": "",
4 | "homepage": "https://github.com/michieljoris/node-haproxy",
5 | "keywords": [],
6 | "repository": {
7 | "type": "git",
8 | "url": "https://github.com/michieljoris/node-haproxy.git"
9 | },
10 | "author": {
11 | "name": "Michiel van Oosten",
12 | "email": "mail@axion5.net",
13 | "url": "http://www.axion5.net/"
14 | },
15 | "licenses": [
16 | {
17 | "type": "MIT",
18 | "url": "http://github.com/michieljoris/node-haproxy/blob/master/LICENSE.txt"
19 | }
20 | ],
21 | "main": "src/api.js",
22 | "bin": {
23 | "node-haproxy": "bin/node-haproxy.js"
24 | },
25 | "engines": {
26 | "node": ">= 0.6.x"
27 | },
28 | "dependencies": {
29 | "dougs_vow": "*",
30 | "fs-extra": "0.18.x",
31 | "logthis": "*",
32 | "crdt": "~3.5.1",
33 | "handlebars": "~1.0.12",
34 | "debounce": "0.0.2",
35 | "deep-equal": "0.0.0",
36 | "haproxy": "0.0.3",
37 | "changeset": "0.0.5",
38 | "optimist": "~0.6.0",
39 | "xtend": "~2.0.6",
40 | "extend": "~1.2.0",
41 | "level": "~0.18.0",
42 | "mkdirp": "~0.3.5",
43 | "flic": "^1.1.2",
44 | "node-ipc": "^1.1.13"
45 | },
46 | "devDependencies": {
47 | "docco": "*"
48 | },
49 | "private": false,
50 | "scripts": {
51 | "docs": "node_modules/.bin/docco src/api.js"
52 | },
53 | "version": "0.1.26"
54 | }
55 |
--------------------------------------------------------------------------------
/src/Data.js:
--------------------------------------------------------------------------------
1 | var log = require('logthis').logger._create('Data');
2 |
3 | var crdt = require('crdt')
4 | , assert = require('assert')
5 | , deepEqual = require('deep-equal')
6 | , diff = require('changeset')
7 | , fs = require('fs')
8 | , extend = require('xtend')
9 | ;
10 |
11 |
12 | var Data = module.exports = function Data (opts) {
13 | if (!opts) opts = {};
14 |
15 | this.doc = new crdt.Doc();
16 |
17 | this.frontends = this.doc.createSet('_type', 'frontend');
18 | this.backends = this.doc.createSet('_type', 'backend');
19 |
20 | if (opts.persistence) {
21 | //this._bootstrapLevelDB(opts.persistence);
22 | this._bootstrapFileSystemPersistence(opts.persistence);
23 | }
24 |
25 | // Stats are kept separate from the frontends and backends because
26 | // change events on those trigger possible reloading of Haproxy
27 | // and we don't want to reload Haproxy every time we retreive stats :)
28 | // IDEA: reconsider separate stats storage and conditionally reload haproxy
29 | this.stats = this.doc.createSet('_type', 'stat');
30 |
31 | this.log = log;
32 | };
33 |
34 |
35 | Data.prototype.createStream = function() {
36 | return this.doc.createStream();
37 | };
38 |
39 | Data.prototype.createReadableStream = function() {
40 | return this.doc.createStream({writable: false, sendClock: true});
41 | };
42 |
43 | Data.prototype.setFrontend = function(obj) {
44 | assert(typeof obj.key === 'string' && obj.key.length > 0);
45 | assert(typeof obj.bind === 'string');
46 | var id = this.frontendId(obj.key);
47 | if (obj.id) assert.equal(obj.id, id, 'key must correspond with id');
48 |
49 | var frontend = {
50 | _type : 'frontend'
51 | , key : obj.key
52 | , bind : obj.bind // TODO validate bind comma separated list of host || * : port
53 | , backend : obj.backend // TODO validate, make sure the backend is defined ?
54 | , mode : obj.mode || 'http'
55 | , keepalive : obj.keepalive || 'default' // default|close|server-close, default default
56 | , rules : obj.rules || [] // TODO validate each rule
57 | , natives : obj.natives || []
58 | , uuid : obj.uuid || ''
59 | };
60 |
61 | stripUndefinedProps(frontend);
62 | this._updateDifferences(id, this.frontends.get(id), frontend);
63 | };
64 |
65 | Data.prototype.setBackend = function(obj) {
66 | assert(typeof obj.key === 'string' && obj.key.length > 0);
67 | obj.type = obj.type || 'static';
68 | assert(obj.type === 'dynamic' || obj.type === 'static');
69 | var id = this.backendId(obj.key);
70 | if (obj.id) assert.equal(obj.id, id, 'key must correspond with id');
71 | var existing = this.backends.get(id);
72 |
73 | var backend = {
74 | _type : 'backend'
75 | , key : obj.key
76 | , type : obj.type
77 | , name : obj.name // TODO validate
78 | , version : obj.version // TODO validate
79 | , balance : obj.balance || 'roundrobin' // TODO validate
80 | , host : obj.host || null // for host header override
81 | , mode : obj.mode || 'http'
82 | , members : (Array.isArray(obj.members)) ? obj.members : []
83 | , natives : obj.natives || []
84 | , uuid : obj.uuid || ''
85 | };
86 |
87 | stripUndefinedProps(backend);
88 |
89 | // custom health checks, only for http
90 | if (backend.mode === 'http' && obj.health) {
91 | backend.health = {
92 | method: obj.health.method || 'GET'
93 | , uri: obj.health.uri || '/'
94 | , httpVersion: obj.health.httpVersion || 'HTTP/1.0'
95 | , interval: obj.health.interval || 2000
96 | };
97 | // validation - host header required for HTTP/1.1
98 | assert(!(backend.health.httpVersion === 'HTTP/1.1' && !backend.host),
99 | 'host required with health.httpVersion == HTTP/1.1');
100 | }
101 |
102 | this._updateDifferences(id, existing, backend);
103 | };
104 |
105 |
106 | Data.prototype.setBackendMembers = function(key, members) {
107 | var backend = this.backends.get(this.backendId(key));
108 | if (backend) backend.set({ 'members': members });
109 | };
110 |
111 | Data.prototype.getFrontends = function() {
112 | return this.frontends.toJSON();
113 | };
114 |
115 | Data.prototype.getBackends = function() {
116 | return this.backends.toJSON();
117 | };
118 |
119 | Data.prototype.deleteFrontend = function(key) {
120 | var id = this.frontendId(key);
121 | this.doc.rm(id);
122 | };
123 |
124 | Data.prototype.deleteBackend = function(key) {
125 | var id = this.backendId(key);
126 | this.doc.rm(id);
127 | };
128 |
129 | Data.prototype.frontendId = function(key) {
130 | return "frontend/"+key;
131 | };
132 |
133 | Data.prototype.backendId = function(key) {
134 | return "backend/"+key;
135 | };
136 |
137 | Data.prototype.setFrontendStat = function(stat) {
138 | // expect { key: 'fontEndName', status: 'UP/DOWN or like UP 2/3' }
139 | var statId = stat.id;
140 | var statObj = this._createStatObj(statId, stat.key, 'frontend', stat);
141 | statObj.frontend = this.frontendId(stat.key);
142 | this._setStat(statId, statObj);
143 | };
144 |
145 | Data.prototype.setBackendStat = function(stat) {
146 | // expect { key: 'key', status: 'UP/DOWN or like UP 2/3' }
147 | var statId = stat.id;
148 | var statObj = this._createStatObj(statId, stat.key, 'backend', stat);
149 | statObj.backend = this.backendId(stat.key);
150 | this._setStat(statId, statObj);
151 | };
152 |
153 | Data.prototype.setBackendMemberStat = function(stat) {
154 | // expect { key: 'key', status: 'UP/DOWN or like UP 2/3' }
155 | var statId = stat.id;
156 | var statObj = this._createStatObj(statId, stat.key, 'backendMember', stat);
157 | statObj.backend = this.backendId(stat.backendName);
158 | this._setStat(statId, statObj);
159 | };
160 |
161 | Data.prototype.rmBackendMemberStatsAllBut = function(key, memberNames) {
162 | var self = this;
163 | this.stats.toJSON()
164 | .forEach(function (stat) {
165 | if (stat.type === 'backendMember' &&
166 | stat.key === key &&
167 | memberNames.indexOf(stat.key) === -1) {
168 | self.doc.rm(stat.id);
169 | }
170 | });
171 | };
172 |
173 | Data.prototype._setStat = function (statId, statObj) {
174 | var hasChanged = !deepEqual(this.doc.get(statId).toJSON(), statObj);
175 | if (hasChanged) this.doc.set(statId, statObj);
176 | };
177 |
178 | Data.prototype._createStatObj = function(id, key, type, stat) {
179 | // set just the status and no other stat
180 | return { id: id, _type: 'stat', type: type, key: key, status: stat.status };
181 | //return extend(stat, { id: id, _type: 'stat', type: type, key: key});
182 | };
183 |
184 | Data.prototype._updateDifferences = function (id, existingRow, updatedObj) {
185 | if (!existingRow) return this.doc.set(id, updatedObj);
186 | var diffObj = {};
187 | diff(existingRow.toJSON(), updatedObj).forEach(function (change) {
188 |
189 | var key = change.key[0];
190 | if (key === 'id') return;
191 | if (!diffObj[key]) {
192 | if (change.type === 'put') diffObj[key] = updatedObj[key];
193 | else if (change.type === 'del') {
194 | if (Array.isArray(updatedObj[key]))
195 | diffObj[key] = updatedObj[key];
196 | else diffObj[key] = undefined;
197 | }
198 | }
199 | });
200 |
201 | existingRow.set(diffObj);
202 | };
203 |
204 |
205 | // Data.prototype.closeDb = function(cb) {
206 | // if (this.db) this.db.close(cb);
207 | // else cb(null);
208 | // };
209 |
210 |
211 | // This leveldb back storage is not working, sometimes it either failing
212 | // to store some data or read it out. I had to revert back to constantly
213 | // serializing the contents into a flat file
214 | //
215 | // Data.prototype._bootstrapLevelDB = function(dbLocation) {
216 | // var self = this;
217 | // var doc = self.doc;
218 |
219 | // var levelup = require("levelup");
220 | // var level_scuttlebutt = require("level-scuttlebutt");
221 | // var SubLevel = require('level-sublevel');
222 | // var db = this.db = SubLevel(levelup(dbLocation));
223 | // var udid = require('udid')('thalassa-aqueduct');
224 | // var sbDb = db.sublevel('scuttlebutt');
225 |
226 | // level_scuttlebutt(sbDb, udid, function (name) {
227 | // return doc;
228 | // });
229 |
230 | // sbDb.open(udid, function (err, model) {
231 | // self.log('debug', 'leveldb initialized, storing data at ' + dbLocation);
232 | // //model.on('change:key', console.log);
233 | // });
234 | // };
235 |
236 | Data.prototype._bootstrapFileSystemPersistence = function (fileLocation) {
237 | var self = this;
238 |
239 | var writing = false, queued = false;
240 | function _syncDown() {
241 | writing = true;
242 | queued = false;
243 | var contents = JSON.stringify({ version: 1, frontends: self.frontends, backends: self.backends });
244 | fs.writeFile(fileLocation, contents, function (err) {
245 | if (err)
246 | self.log('error', 'failed writing serialized configuration ' + fileLocation +', ' + err.message);
247 | writing = false;
248 | if (queued) _syncDown();
249 | });
250 | }
251 |
252 | var syncDown = function () {
253 | if (writing) queued = true;
254 | else _syncDown();
255 | };
256 |
257 | fs.exists(fileLocation, function (exists) {
258 | if (exists) {
259 | var contents = fs.readFileSync(fileLocation);
260 | try {
261 | var data = JSON.parse(contents);
262 | data.frontends.forEach(function (frontend) {
263 | self.setFrontend(frontend);
264 | });
265 | data.backends.forEach(function (backend) {
266 | self.setBackend(backend);
267 | });
268 | }
269 | catch (err) {
270 | self.log('error', 'failed parsing serialized configuration JSON ' + fileLocation +', ' + err.message);
271 | }
272 |
273 | }
274 | self.frontends.on('changes', syncDown);
275 | self.backends.on('changes', syncDown);
276 | });
277 |
278 | };
279 |
280 | function stripUndefinedProps(obj) {
281 | Object.keys(obj).forEach(function(key) {
282 | if (obj[key] === undefined ) delete obj[key];
283 | });
284 | }
285 |
--------------------------------------------------------------------------------
/src/Db.js:
--------------------------------------------------------------------------------
1 | var log = require('logthis').logger._create('Db');
2 |
3 | var level = require('level')
4 | , assert = require('assert')
5 | , util = require('util')
6 | , path = require('path')
7 | , mkdirp = require('mkdirp')
8 | ;
9 |
10 | // opts:
11 | // - dbPath
12 | // - secondsToRetainStats
13 | //
14 | var Db = module.exports = function(opts, cb) {
15 | var self = this;
16 |
17 | this.log = log;
18 | if (typeof opts !== 'object') opts = {};
19 |
20 | assert(opts.dbPath, 'Db.js: opts.dbPath, dbPath to leveldb database, must be passed!');
21 |
22 | var dbPath = self.DBPATH = opts.dbPath;
23 | self.SECONDS_TO_RETAIN_STATS = opts.secondsToRetainStats || 300;
24 |
25 | mkdirp(dbPath, function (err) {
26 | if (err) {
27 | self.log('error', 'mkdirp ' + dbPath, String(err));
28 | throw err;
29 | }
30 |
31 | var statsDbPath = path.join(dbPath, 'statsDb');
32 | self.statsDb = level(statsDbPath, { valueEncoding : 'json' });
33 |
34 | var activityDbPath = path.join(dbPath, 'activityDb');
35 | self.activityDb = level(activityDbPath, { valueEncoding : 'json' });
36 | if (typeof cb === 'function') cb();
37 |
38 | });
39 |
40 | };
41 |
42 | Db.prototype.writeStat = function writeStat(statObj) {
43 | // log(statObj);
44 | var key = [statObj.hostId, statObj.id, statObj.time].join('~');
45 | this.statsDb.put(key, statObj);
46 | this.trimStats();
47 | };
48 |
49 | var prev = {};
50 | Db.prototype.writeActivity = function writeActivity(activityObj) {
51 | var key = [activityObj.time, activityObj.object].join('~');
52 | this.activityDb.put(key, activityObj);
53 |
54 | };
55 |
56 | Db.prototype.trimStats = function () {
57 | var self = this;
58 |
59 | // if we're already trimming, return
60 | if (self.isTrimming) return;
61 |
62 | self.isTrimming = true;
63 | // self.log('trimStats starting');
64 |
65 | var ws = self.statsDb.createWriteStream();
66 | var numKeysDeleted = 0;
67 | var numKeysConsidered = 0;
68 | var startTime = Date.now();
69 | var timeToExpire = Date.now() - (self.SECONDS_TO_RETAIN_STATS * 1000);
70 |
71 | var rs = self.statsDb.createReadStream({ keys: true, values: false })
72 | .on('data', function (key) {
73 | numKeysConsidered++;
74 | var parts = key.split('~');
75 | var epoch = parseInt(parts[2], 10) || 0; // if the key doesn't contain the time, aggressively delete it
76 | if (epoch < timeToExpire) {
77 | //self.log('trimStats deleting (' + (epoch - timeToExpire) + ') ' + key);
78 | ws.write({ type: 'del', key: key });
79 | numKeysDeleted++;
80 | }
81 | })
82 | .on('end', function () {
83 | ws.end();
84 | var duration = Date.now()-startTime;
85 | // self.log(util.format('trimStats trimmed %s of %s in %sms (%s)', numKeysDeleted, numKeysConsidered, duration, (numKeysConsidered/duration)));
86 | self.allowTrimmingIn(6000);
87 | })
88 | .on('error', function (err) {
89 | self.log('error', 'trimStats reading keystream from statsDb', String(err));
90 | ws.end();
91 | });
92 |
93 | ws.on('error', function (err) {
94 | self.log('error', 'trimStats write stream to statsDb', String(err));
95 | rs.destroy();
96 | });
97 | };
98 |
99 | Db.prototype.statsValueStream = function(hostId) {
100 | var opts = (hostId) ? { start: hostId + '~', end: hostId + '~~' } : undefined;
101 | return this.statsDb.createValueStream(opts);
102 | };
103 |
104 | Db.prototype.activityValueStream = function(opts) {
105 | if (!opts) opts = {};
106 | if (!opts.start) opts.start = Date.now();
107 | if (!opts.limit) opts.limit = 50;
108 | opts.reverse = true;
109 | return this.activityDb.createValueStream(opts);
110 | };
111 |
112 | Db.prototype.allowTrimmingIn = function (t) {
113 | var self = this;
114 | setTimeout(function () {
115 | self.isTrimming = false;
116 | }, t);
117 | };
118 |
--------------------------------------------------------------------------------
/src/HaproxyManager.js:
--------------------------------------------------------------------------------
1 | var log = require('logthis').logger._create('HaproxyManager');
2 |
3 | var handlebars = require('handlebars')
4 | // , HAProxy = require('haproxy')
5 | , fs = require('fs')
6 | , resolve = require('path').resolve
7 | , util = require('util')
8 | , f = util.format
9 | , assert = require('assert')
10 | , EventEmitter = require('events').EventEmitter
11 | , debounce = require('debounce')
12 | , deepEqual = require('deep-equal')
13 | ;
14 |
15 |
16 | // log('hello');
17 |
18 | var HAProxyManager = module.exports = function HAProxyManager (opts) {
19 | if (typeof opts !== 'object') opts = {};
20 |
21 | assert(opts.data, 'opts.data required');
22 | assert(opts.haproxy, 'opts.haproxy required');
23 |
24 | this.config = {};
25 | this.config.templateFile = resolve(opts.templateFile || __dirname + '/../default.haproxycfg.tmpl');
26 | this.config.haproxyCfgPath = resolve(opts.haproxyCfgPath || '/etc/haproxy/haproxy.cfg');
27 | this.config.watchConfigFile = (opts.watchConfigFile !== undefined) ? opts.watchConfigFile : true;
28 | this.config.debounceRate = opts.debounceRate || 2000;
29 | this.log = (typeof opts.log === 'function') ? opts.log : function (){};
30 |
31 | this.latestConfig = "";
32 |
33 | if (!fs.existsSync(this.config.templateFile)) {
34 | this.log('error', f("template file %s doesn't exists!", this.config.templateFile));
35 | }
36 | this.template = handlebars.compile(fs.readFileSync(this.config.templateFile, 'utf-8'));
37 | this.writeConfigDebounced = debounce(this.writeConfig.bind(this), this.config.debounceRate, false);
38 |
39 | this.data = opts.data;
40 | this.haproxy = opts.haproxy;
41 | this.data.frontends.on( 'changes', this._changeFrontEnd.bind(this) );
42 | this.data.backends.on ( 'changes', this._changeBackEnd.bind(this) );
43 |
44 | this.writeConfigDebounced();
45 |
46 | };
47 |
48 | util.inherits(HAProxyManager, EventEmitter);
49 |
50 | HAProxyManager.prototype.writeConfig = function() {
51 | var data = {
52 | frontends: this.data.frontends.toJSON(),
53 | backends: this.data.backends.toJSON(),
54 | haproxySocketPath: this.haproxy.socket
55 | };
56 |
57 | var previousConfig = this.latestConfig;
58 | this.latestConfig = this.template(data);
59 |
60 | // only write the config and reload if it actually changed
61 | if (!deepEqual(previousConfig, this.latestConfig)) {
62 | log('writing config\n', this.latestConfig);
63 | fs.writeFileSync(this.config.haproxyCfgPath, this.latestConfig , 'utf-8');
64 | this.emit('configChanged');
65 | this.reload();
66 | }
67 | else {
68 | this.emit('configNotChanged');
69 | }
70 | };
71 |
72 | HAProxyManager.prototype.reload = function () {
73 | var self = this;
74 | self.haproxy.running(function (err, running) {
75 | if (err) {
76 | self.emit('haproxy-error', String(err));
77 | return self.log('error', 'HaproxyManager.reload', { error: String(err) });
78 | }
79 |
80 | function handleRestart (err) {
81 | if (err) {
82 | self.emit('haproxy-error', String(err));
83 | self.log('error', 'HaproxyManager.reload', { error: String(err) });
84 | }
85 | self.emit('reloaded');
86 | }
87 | if (running) self.haproxy.reload(handleRestart);
88 | else self.haproxy.start(handleRestart);
89 | });
90 | };
91 |
92 | HAProxyManager.prototype._changeFrontEnd = function(row, changed) {
93 | this.log('HaproxyManager._changeFrontEnd', changed);
94 | this.writeConfigDebounced();
95 | };
96 |
97 | HAProxyManager.prototype._changeBackEnd = function(row, changed) {
98 | this.log('debug', 'HaproxyManager_changeBackEnd', changed);
99 | this.writeConfigDebounced();
100 | };
101 |
102 | //
103 | //
104 | //
105 | //
106 | //
107 | //
108 | //
109 | // TODO refactor all these helper, reconsider business logic
110 | //
111 |
112 | // template helper for outputing FrontEnd acl rules
113 | handlebars.registerHelper('aclRule', function (rule) {
114 | var rand = Math.random().toString(36).substring(3);
115 | var name = rule.type + '_' + rand;
116 |
117 | if (rule.type === 'path' || rule.type === 'url') {
118 | return util.format("acl %s %s %s\nuse_backend %s if %s\n", name, rule.operation, rule.value, rule.backend, name);
119 | }
120 | else if (rule.type === 'header') {
121 | return util.format("acl %s %s(%s) %s\nuse_backend %s if %s\n", name, rule.operation, rule.header, rule.value, rule.backend, name);
122 | }
123 | });
124 |
125 | handlebars.registerHelper('frontendHelper', function (frontend) {
126 | var output = [];
127 | var hasRules = frontend.rules && frontend.rules.length > 0;
128 | var hasNatives = frontend.natives && frontend.natives.length > 0;
129 |
130 | output.push("bind " + frontend.bind);
131 | output.push(" mode " + frontend.mode);
132 | output.push(" default_backend " + frontend.backend);
133 |
134 | // http only default options
135 | if (frontend.mode === 'http') {
136 | output.push(" option httplog");
137 |
138 | // The default keep-alive behavior is to use keep-alive if clients and
139 | // backends support it. However, if haproxy will only process rules when
140 | // a connection is first established so if any rules are used then server-close
141 | // should be specified at least and haproxy will let clients use keep-alive
142 | // to haproxy but close the backend connections each time.
143 | //
144 | // If there are any rules, the default behavior is to use http-server-close
145 | // and http-pretend-keepalive
146 | if (frontend.keepalive === 'server-close') {
147 | output.push(" option http-server-close");
148 | output.push(" option http-pretend-keepalive");
149 | }
150 | else if (frontend.keepalive === 'close'){
151 | output.push(" option forceclose");
152 | }
153 | // the default if there are rules is to use server close
154 | else if (hasRules) {
155 | output.push(" option http-server-close");
156 | output.push(" option http-pretend-keepalive");
157 | }
158 | }
159 |
160 | if (hasRules) {
161 | frontend.rules.forEach(function (rule) {
162 | var rand = Math.random().toString(36).substring(13);
163 | var name = rule.type + '_' + rand;
164 |
165 | if (rule.type === 'path' || rule.type === 'url') {
166 | output.push(util.format("acl %s %s %s\nuse_backend %s if %s",
167 | name, rule.operation, rule.value, rule.backend, name));
168 | }
169 | else if (rule.type === 'header') {
170 | output.push(util.format("acl %s %s(%s) %s\nuse_backend %s if %s",
171 | name, rule.operation, rule.header, rule.value, rule.backend, name));
172 | }
173 | });
174 | }
175 |
176 | if (hasNatives) {
177 | frontend.natives.forEach(function (native) {
178 | output.push(native);
179 | });
180 | }
181 |
182 | return output.join('\n');
183 | });
184 |
185 |
186 | // helper to output http check and servers block
187 | handlebars.registerHelper('backendHelper', function (backend) {
188 | var host = backend.host;
189 | var health = backend.health;
190 | var members = backend.members;
191 | var output = [];
192 | var hasNatives = backend.natives && backend.natives.length > 0;
193 |
194 | // output mode and balance options
195 | output.push("mode " + backend.mode);
196 | output.push(" balance " + backend.balance);
197 |
198 | // host header propagation
199 | if (backend.host) {
200 | output.push(" reqirep ^Host:\\ .* Host:\\ " + backend.host);
201 | }
202 |
203 | // option httpchk
204 | if (backend.mode === 'http' && health) {
205 | var httpVersion = (health.httpVersion === 'HTTP/1.1') ?
206 | ('HTTP/1.1\\r\\nHost:\\ ' + backend.host) :
207 | health.httpVersion;
208 | output.push(util.format(" option httpchk %s %s %s", health.method, health.uri, httpVersion));
209 | }
210 |
211 | if (hasNatives) {
212 | backend.natives.forEach(function (native) {
213 | output.push(native);
214 | });
215 | }
216 |
217 | if (members) {
218 | // server lines for each member
219 | members.forEach(function (member) {
220 | var name = util.format("%s_%s:%s", backend.key, member.host, member.port);
221 | var interval = (health) ? health.interval : 2000;
222 | output.push(util.format(" server %s %s:%s check inter %s", name, member.host, member.port, interval));
223 | });
224 | }
225 |
226 | return output.join('\n');
227 | });
228 |
--------------------------------------------------------------------------------
/src/HaproxyStats.js:
--------------------------------------------------------------------------------
1 | var log = require('logthis').logger._create('HaproxyStats');
2 |
3 | var handlebars = require('handlebars')
4 | // , HAProxy = require('haproxy')
5 | , fs = require('fs')
6 | , norm = require('path').normalize
7 | , util = require('util')
8 | , assert = require('assert')
9 | , EventEmitter = require('events').EventEmitter
10 | , deepEqual = require('deep-equal')
11 | ;
12 |
13 | var HaproxyStats = module.exports = function HaproxyStats (opts) {
14 | if (typeof opts !== 'object') opts = {};
15 |
16 | assert(opts.data, 'opts.data required');
17 | assert(opts.haproxy, 'opts.haproxy required');
18 |
19 | // TODO normalize paths
20 |
21 | this.config = {};
22 | this.data = opts.data;
23 | this.haproxy = opts.haproxy;
24 | this.config.haproxySocketPath = norm(opts.haproxySocketPath || '/tmp/haproxy.sock');
25 | this.config.statsIntervalRate = opts.statsIntervalRate || 6000;
26 | this.log = (typeof opts.log === 'function') ? opts.log : function (){};
27 |
28 | this.createStatsInterval(this.config.statsIntervalRate);
29 | };
30 |
31 | util.inherits(HaproxyStats, EventEmitter);
32 |
33 |
34 | HaproxyStats.prototype.createStatsInterval = function(period) {
35 | var self = this;
36 | self.statsTimer = setTimeout(function() {
37 | self.haproxy.stat('-1', '-1', '-1', function (err, stats) {
38 | if (err) {
39 | self.log('error', 'HaproxyStats: ' + err.message);
40 | }
41 | else if (!stats) {
42 | self.log('error', 'HaproxyStats: connected but received no stats');
43 | }
44 | else {
45 | //console.log(stats);
46 |
47 | // frontend stats
48 | stats.filter(isFrontend).forEach(function (it) {
49 | var statsObj = {
50 | id: 'stat/frontend/' + it.pxname,
51 | key: it.pxname,
52 | type: 'frontend',
53 | time: Date.now(),
54 | status: it.status,
55 | connections: {
56 | current: it.scur,
57 | max: it.smax
58 | },
59 | weight: it.weight
60 | // responses: {
61 | // '100': it.hrsp_1xx,
62 | // '200': it.hrsp_2xx,
63 | // '300': it.hrsp_3xx,
64 | // '400': it.hrsp_4xx,
65 | // '500': it.hrsp_5xx,
66 | // total: it.req_tot
67 | // }
68 | };
69 | self.emit('stat', statsObj);
70 | });
71 |
72 | // backend stats
73 | stats.filter(isBackend).forEach(function (it) {
74 | var statsObj = {
75 | id: 'stat/backend/' + it.pxname,
76 | key: it.pxname,
77 | type: 'backend',
78 | time: Date.now(),
79 | status: it.status,
80 | connections: {
81 | current: it.scur,
82 | max: it.smax
83 | }
84 | };
85 | self.emit('stat', statsObj);
86 |
87 | // backend members stats
88 | var backendName = it.pxname;
89 | var backendStats = stats.filter(isBackendMember(it.pxname));
90 | backendStats.forEach(function (it) {
91 | var statsObj = {
92 | id: 'stat/backend/' + backendName + '/' + it.svname,
93 | key: it.svname,
94 | type: 'backendMember',
95 | time: Date.now(),
96 | backendName: it.pxname,
97 | status: it.status
98 | };
99 | self.emit('stat', statsObj);
100 | });
101 |
102 | // TODO clean up members and frontends and backend stats that don't exist anymore?
103 | //self.data.rmBackendMemberStatsAllBut(backendName, backendStats.map(function (it) { return it.svname; }));
104 |
105 | });
106 | }
107 | self.createStatsInterval(period);
108 | });
109 | }, period);
110 | };
111 |
112 | function isFrontend (it) { return it.svname === 'FRONTEND' && it.pxname != 'stats';}
113 | function isBackend(it) { return it.svname === 'BACKEND' && it.pxname != 'stats';}
114 | function isBackendMember (backendName) { return function (it) {
115 | return it.pxname === backendName && it.svname.indexOf(backendName) === 0;
116 | };
117 | }
118 |
--------------------------------------------------------------------------------
/src/api.js:
--------------------------------------------------------------------------------
1 | var logLevel = 'info';
2 |
3 | require('logthis').config({ _on: true,
4 | 'Data': logLevel ,
5 | 'HaproxyManager': logLevel ,
6 | 'HaproxyStats': logLevel,
7 | 'Db': logLevel,
8 | 'api': logLevel
9 | });
10 |
11 | var log = require('logthis').logger._create('api');
12 |
13 | var assert = require('assert')
14 | , resolve = require('path').resolve
15 | , Haproxy = require('haproxy')
16 | , Data = require('../src/Data')
17 | , Db = require('../src/Db')
18 | , HaproxyManager = require('../src/HaproxyManager')
19 | , HaproxyStats = require('../src/HaproxyStats')
20 | , extend = require('extend')
21 | , Path = require('path')
22 | , fs = require('fs-extra')
23 | , util = require('util');
24 |
25 | var tempDir = Path.join(__dirname, '../temp');
26 |
27 | fs.ensureDirSync(tempDir);
28 |
29 | var firstStart = true;
30 |
31 | var defaults = {
32 | //Alternative configuration:
33 |
34 | // haproxySocketPath: '/tmp/haproxy.status.sock',
35 | // haproxyPidPath: '/var/run/haproxy.pid',
36 | // haproxyCfgPath: '/etc/haproxy/haproxy.cfg',
37 | // sudo: 'use sudo when starting haproxy',
38 |
39 | haproxySocketPath: Path.join(tempDir, 'haproxy.status.sock'),
40 | haproxyPidPath: Path.join(tempDir, 'haproxy.pid'),
41 | haproxyCfgPath: Path.join(tempDir, 'haproxy.cfg'),
42 |
43 | templateFile: Path.join(__dirname, '../haproxycfg.tmpl'),
44 | persistence: Path.join(tempDir, 'persisted'),
45 | dbPath: Path.join(tempDir, 'db'),
46 | which: Path.join(__dirname, '../haproxy'), //if undefined tries to find haproxy on system
47 | ipc: false //whether to enable the ipc server
48 | };
49 |
50 | var PACKAGEJSON = Path.resolve(__dirname, '../package.json');
51 |
52 | function getUuid() {
53 | return 'xxxx-xxxx-xxxx-xxxx-xxxx-xxxx-xxxx-xxxx'.replace(/[x]/g, function(c) {
54 | return (Math.random()*16|0).toString(16);
55 | });
56 | }
57 |
58 | var response;
59 | var infoFunctions = ['getHaproxyConfig', 'getFrontend', 'getBackend', 'getBackendMembers', 'getFrontends', 'getBackends'];
60 |
61 | function version() {
62 | var packageJson = fs.readJsonSync(PACKAGEJSON);
63 | return packageJson.version;
64 | }
65 |
66 | function ipc(api) {
67 |
68 | var ipc=require('node-ipc');
69 |
70 | ipc.config.id = 'haproxy';
71 | ipc.config.retry= 1500;
72 | ipc.config.silent = true;
73 | var timeoutId;
74 | ipc.serve(
75 | function(){
76 | ipc.server.on(
77 | 'api',
78 | function(data,socket){
79 | console.log(data.call);
80 | var error, result;
81 | if (response) {
82 | ipc.server.emit(
83 | socket,
84 | data.uuid,
85 | {
86 | id : ipc.config.id,
87 | error: "Call in progress.."
88 | }
89 | );
90 | }
91 | else {
92 | response = function(error) {
93 | ipc.server.emit(
94 | socket,
95 | data.uuid,
96 | {
97 | id : ipc.config.id,
98 | data : result,
99 | error: error
100 | }
101 | );
102 | response = null;
103 | clearTimeout(timeoutId);
104 | };
105 | if (!api[data.call]) {
106 | error = "No such function: " + data.call;
107 | response(error);
108 | }
109 | else {
110 | data.args = data.args || [];
111 | data.args = Array.isArray(data.args) ? data.args : [data.args];
112 | result = api[data.call].apply(null, data.args || []);
113 | }
114 | if (infoFunctions.indexOf(data.call) !== -1) {
115 | response();
116 | }
117 | else {
118 | timeoutId = setTimeout(function() {
119 | if (response) response('timout');
120 | }, 10000);
121 |
122 | }
123 | }
124 | }
125 | );
126 | }
127 | );
128 |
129 | ipc.server.start();
130 | console.log('ipc server started');
131 | }
132 |
133 |
134 | module.exports = function(opts) {
135 | console.log('Version:', version());
136 | var data, haproxyManager;
137 | opts = extend(defaults, opts);
138 | if (opts.which === 'system') delete opts.which;
139 |
140 | data = new Data( {
141 | persistence: opts.persistence, //file location
142 | log: log
143 | });
144 |
145 | var haproxy = new Haproxy(opts.haproxySocketPath, {
146 | config: resolve(opts.haproxyCfgPath),
147 | pidFile: resolve(opts.haproxyPidPath),
148 | prefix: (opts.sudo) ? 'sudo' : undefined,
149 | which: opts.which
150 | });
151 |
152 | haproxyManager = new HaproxyManager({
153 | haproxy: haproxy,
154 | data: data,
155 | haproxyCfgPath: opts.haproxyCfgPath,
156 |
157 | templateFile: opts.templateFile,
158 | sudo: opts.sudo,
159 | log: log
160 | });
161 |
162 | var haproxyStats = new HaproxyStats({
163 | haproxy: haproxy,
164 | data: data,
165 | log: log
166 | });
167 |
168 | // Stream stats into a leveldb
169 | var db = new Db(opts, function () {
170 | db.writeActivity({ type: 'activity', time: Date.now(), verb: 'started'});
171 | });
172 |
173 | // Wire up stats to write to stats db
174 | haproxyStats.on('stat', function (statObj) {
175 | db.writeStat(statObj);
176 |
177 | if (statObj.type === 'frontend') {
178 | data.setFrontendStat(statObj);
179 | }
180 | else if (statObj.type === 'backend') {
181 | data.setBackendStat(statObj);
182 | }
183 | else if (statObj.type === 'backendMember') {
184 | data.setBackendMemberStat(statObj);
185 | }
186 | });
187 |
188 | haproxyManager.on('haproxy-error', function (error) {
189 | log._e('Haproxy error:\n', error);
190 | if (response) response(error);
191 | });
192 |
193 | haproxyManager.on('configNotChanged', function (statObj) {
194 | log._i('Config not changed');
195 | if (response) response();
196 | });
197 |
198 | // Wire up haproxy changes to write to activity db
199 | haproxyManager.on('configChanged', function (statObj) {
200 | var activityObj = { type: 'activity', time: Date.now(), verb: 'haproxyConfigChanged'};
201 | log('configChanged\n', activityObj);
202 | db.writeActivity(activityObj);
203 | });
204 |
205 | haproxyManager.on('reloaded', function (statObj) {
206 | var activityObj = { type: 'activity', time: Date.now(), verb: 'haproxyRestarted' };
207 | log('reloaded\n', activityObj);
208 | if (firstStart) {
209 | console.log('Running..');
210 | firstStart = false;
211 | if (opts.ipc) ipc(api);
212 | }
213 | db.writeActivity(activityObj);
214 | if (response) response();
215 | });
216 |
217 | var api = {};
218 | api.getFrontend = function (key) {
219 | var id = data.frontendId(key);
220 | var row = data.frontends.get(id);
221 | return row ? row.toJSON() : null;
222 | };
223 |
224 | api.getBackend = function (key) {
225 | var id = data.backendId(key);
226 | var row = data.backends.get(id);
227 | return row ? row.toJSON() : null;
228 | };
229 |
230 | api.getBackendMembers = function (key) {
231 | var id = data.backendId(key);
232 | var row = data.backends.get(id);
233 | return row ? row.toJSON().members : null;
234 | };
235 |
236 | api.getFrontends = function () {
237 | return data.frontends.toJSON();
238 | };
239 |
240 | api.getBackends = function () {
241 | return data.backends.toJSON();
242 | };
243 |
244 | api.putFrontend = function (key, obj) {
245 | // var id = data.frontendId(key);
246 | obj.key = key;
247 | obj.uuid = getUuid(); //to mark it as changed..
248 | data.setFrontend(obj);
249 | };
250 |
251 | api.putFrontends = function(array) {
252 | array.forEach(function(e) {
253 | api.putFrontend(e.key, e.obj);
254 | });
255 | },
256 |
257 | api.putBackend = function (key, obj) {
258 | // var id = data.backendId(key);
259 | obj.key = key;
260 | obj.uuid = getUuid(); //to mark it as changed..
261 | if (obj.health && obj.health.httpVersion === 'http/1.1' && !obj.host) {
262 | throw Error('host is required with health check with httpversion=http/1.1');
263 | }
264 | data.setBackend(obj);
265 | };
266 |
267 | api.putBackends = function(array) {
268 | array.forEach(function(e) {
269 | api.putBackend(e.key, e.obj);
270 | });
271 | },
272 |
273 | api.deleteFrontend = function (key) {
274 | var id = data.frontendId(key);
275 | var row = data.frontends.get(id);
276 | if (row) data.frontends.rm(id);
277 | else if (response) response('Frontend not found: ' + key);
278 | };
279 |
280 | var deleteFrontends = function (keys) {
281 | var touched;
282 | keys.forEach(function(key) {
283 | var id = data.frontendId(key);
284 | var row = data.frontends.get(id);
285 | if (row) {
286 | data.frontends.rm(id);
287 | touched = true;
288 | }
289 | });
290 | return touched;
291 | };
292 |
293 | api.deleteFrontends = function(keys) {
294 | var touched = deleteFrontends(keys);
295 | if (!touched && response) response();
296 | },
297 |
298 | api.deleteBackend = function (key) {
299 | var id = data.backendId(key);
300 | var row = data.backends.get(id);
301 | if (row) data.backends.rm(id);
302 | else if (response) response();
303 | };
304 |
305 | var deleteBackends = function (keys) {
306 | var touched;
307 | keys.forEach(function(key) {
308 | var id = data.backendId(key);
309 | var row = data.backends.get(id);
310 | if (row) {
311 | data.backends.rm(id);
312 | touched = true;
313 | }
314 | });
315 | return touched;
316 | };
317 |
318 | api.deleteBackends = function(keys) {
319 | var touched = deleteBackends(keys);
320 | if (!touched && response) response();
321 | };
322 |
323 | // function inspect(arg) {
324 | // return util.inspect(arg, { depth: 10, colors: true });
325 | // }
326 |
327 | // api.updateFrontend = function (key, obj) {
328 | // var id = data.frontendId(key);
329 | // var row = data.frontends.get(id);
330 | // var oldFrontend = {};
331 | // obj = obj || {};
332 | // if (row) {
333 | // row = row.toJSON();
334 | // oldFrontend = extend(true, {}, row); //deep copy row
335 | // }
336 | // console.log(inspect(oldFrontend));
337 | // console.log(inspect(obj));
338 | // var frontend = oldFrontend ? extend(true, oldFrontend, obj) : obj;
339 | // frontend.rules = obj.rules;
340 | // frontend.uuid = getUuid(); //to mark it as changed..
341 | // frontend.key = key;
342 | // console.log(inspect(frontend));
343 | // console.log(response);
344 | // data.setFrontend(frontend);
345 | // };
346 |
347 | // api.updateBackend = function (key, obj) {
348 | // var id = data.backendId(key);
349 | // var row = data.backends.get(id);
350 | // var oldBackend = {};;
351 | // obj = obj || {};
352 | // if (row) {
353 | // row = row.toJSON();
354 | // oldBackend = extend(true, {}, row); //deep copy row
355 | // }
356 | // var backend = oldBackend ? extend(true, oldBackend, obj) : obj;
357 | // backend.uuid = getUuid(); //to mark it as changed..
358 | // backend.key = key;
359 | // data.setBackend(backend);
360 | // };
361 |
362 | api.bulkSet = function(ops) {
363 | ops = ops || {};
364 | var touched;
365 | if (ops.delete) {
366 | if (ops.delete.backends) touched = touched || deleteBackends(ops.delete.backends);
367 | if (ops.delete.frontends) touched = touched || deleteFrontends(ops.delete.frontends);
368 | }
369 | if (ops.put) {
370 | if (ops.put.backends && ops.put.backends.length) {
371 | touched = true;
372 | api.putBackends(ops.put.backends);
373 | }
374 | if (ops.put.frontend) {
375 | touched = true;
376 | api.putFrontend(ops.put.frontend.key, ops.put.frontend.obj);
377 | }
378 | }
379 | if (!touched && response) response();
380 | },
381 |
382 | api.getHaproxyConfig = function () {
383 | return haproxyManager.latestConfig;
384 | };
385 |
386 | return api;
387 | };
388 |
389 | // module.exports({ ipc: true});
390 | // console.log(process._arguments);
391 | // console.log(extend(true, {a:1}, {a:2, b:2}));
392 | // module.exports({ ipc: true });
393 |
394 | // var p = fs.readJsonSync('/home/michieljoris/src/node-haproxy/temp/persisted');
395 | // console.log(p);
396 |
--------------------------------------------------------------------------------
/src/ipc-client.js:
--------------------------------------------------------------------------------
1 | var ipc = require('node-ipc');
2 | var util = require('util');
3 | var VOW = require('dougs_vow');
4 |
5 | ipc.config.id = 'haproxy-client';
6 | ipc.config.retry = 1000;
7 | ipc.config.silent = true;
8 |
9 | function getUuid() {
10 | return 'xxxx-xxxx-xxxx-xxxx-xxxx-xxxx-xxxx-xxxx'.replace(/[x]/g, function(c) {
11 | return (Math.random()*16|0).toString(16);
12 | });
13 | }
14 |
15 | var haproxy = function(call, args) {
16 | var vow = VOW.make();
17 | var uuid = getUuid();
18 | ipc.connectTo(
19 | 'haproxy',
20 | function(){
21 | ipc.of.haproxy.emit(
22 | 'api',
23 | {
24 | id : ipc.config.id,
25 | call : call,
26 | args: args,
27 | uuid: uuid
28 | }
29 | );
30 | ipc.of.haproxy.on(
31 | 'disconnect',
32 | function(){
33 | console.log('disconnected from haproxy'.notice);
34 | vow.break('node-haproxy is not running..');
35 | }
36 | );
37 | ipc.of.haproxy.on(
38 | uuid,
39 | function(result){
40 | if (result.error) vow.break(result.error); //callback(result.error, null);
41 | else vow.keep(result.data); //callback(null, result.data);
42 | // console.log('got a message from haproxy : ', util.inspect(result, {depth: 10, colors: true }));
43 | }
44 | );
45 | }
46 | );
47 | return vow.promise;
48 | };
49 |
50 | haproxy.close = function() {
51 | ipc.config.maxRetries = 0;
52 | ipc.disconnect('haproxy');
53 |
54 | };
55 |
56 | module.exports = haproxy;
57 |
58 | // test
59 | // module.exports('getFrontends', [], function(error, result) {
60 | // console.log(error, result);
61 | // });
62 |
--------------------------------------------------------------------------------
/test/ipc-server.js:
--------------------------------------------------------------------------------
1 | var ipc=require('node-ipc');
2 |
3 | ipc.config.id = 'haproxy';
4 | ipc.config.retry= 1500;
5 | ipc.config.silent = true;
6 |
7 | ipc.serve(
8 | function(){
9 | ipc.server.on(
10 | 'api',
11 | function(data,socket){
12 | console.log(data);
13 | //ipc.log('got a message from'.debug, (data.id).variable, (data.message).data);
14 | ipc.server.emit(
15 | socket,
16 | 'result',
17 | {
18 | id : ipc.config.id,
19 | data : data
20 | }
21 | );
22 | }
23 | );
24 | }
25 | );
26 |
27 | // ipc.server.define.listen['app.message']='This event type listens for message strings as value of data key.';
28 |
29 | ipc.server.start();
30 |
--------------------------------------------------------------------------------
/test/test-flic.js:
--------------------------------------------------------------------------------
1 | var flic = require('flic');
2 | var Node = flic.node;
3 |
4 |
5 | var node = new Node(function(err){
6 | if (err) {
7 | console.log(err);
8 | return;
9 | }
10 | console.log('client is online!');
11 |
12 | node.tell('haproxy:call', 'getBackends', null, null, function(err, result){
13 | if (err) return console.log(err);
14 | console.log(result);
15 |
16 | });
17 | node.tell('haproxy:call', 'getFrontends', null, null, function(err, result){
18 | if (err) return console.log(err);
19 | console.log(result);
20 | });
21 | });
22 |
23 |
24 | //From api.js
25 | // function flic(api, port) {
26 | // var flic = require('flic');
27 | // var Bridge = flic.bridge;
28 | // var Node = flic.node;
29 |
30 | // // Default port is 8221
31 | // port = typeof port === 'number' ? port : 8221;
32 |
33 | // // Bridge can be in any process, and nodes can be in any process
34 | // var bridge = new Bridge();
35 | // // var bridge = new Bridge(port); //not working???
36 |
37 | // var node = new Node('haproxy', function(err){
38 | // if (err) log._e(err);
39 | // else log._i('node-haproxy is online!');
40 | // });
41 |
42 | // node.on('call', function(fn, param1, param2, callback){
43 | // console.log(fn, param1, param2);
44 | // if (api[fn]) callback(null, api[fn](param1, param2));
45 | // else callback('No such function: ' + fn, null);
46 | // });
47 |
48 | // }
49 |
50 |
51 |
52 |
--------------------------------------------------------------------------------