92 | This paragraph starts in HTML ... 93 | 96 | ... and this paragraph finishes in HTML. 97 |
98 |
100 | Now we will use JavaScript to display even numbers in italics and
101 | odd numbers in bold.
102 |
120 |
103 | 117 |
118 | 119 | """ 120 | 121 | # Original webpage 122 | webpage = """ 123 |124 | The correct answer is 3.0 4.0: 125 | 144 |
145 |
147 |
148 | """
149 |
150 |
151 | htmlast = htmlparser.parse(webpage,lexer=htmllexer)
152 | graphics.initialize() # let's start rendering a webpage
153 | htmlinterp.interpret(htmlast)
154 | graphics.finalize() # we're done rendering this webpage
155 |
156 |
157 |
158 |
--------------------------------------------------------------------------------
/src/programming_languages/ps3_p5_actual_answer.py:
--------------------------------------------------------------------------------
1 | # Detecting Ambiguity
2 | #
3 | # A grammar is ambiguous if there exists a string in the language of that
4 | # grammar that has two (or more) parse trees. Equivalently, a grammar is
5 | # ambiguous if there are two (or more) different sequences of rewrite rules
6 | # that arrive at the same final string.
7 | #
8 | # Ambiguity is a critical concept in natural languages and in programming
9 | # languages. If we are not careful, our formal grammars for languages like
10 | # JavaScript will have ambiguity.
11 | #
12 | # In this problem you will write a procedure isambig(grammar,start,tokens)
13 | # that takes as input a grammar with a finite number of possible
14 | # derivations and a string and returns True (the value True, not the string
15 | # "True") if those tokens demonstrate that the grammar is ambiguous
16 | # starting from that start symbol (i.e., because two different sequences of
17 | # rewrite rules can arrive at those tokens).
18 | #
19 | # For example:
20 | #
21 | # grammar1 = [ # Rule Number
22 | # ("S", [ "P", ] ), # 0
23 | # ("S", [ "a", "Q", ]) , # 1
24 | # ("P", [ "a", "T"]), # 2
25 | # ("P", [ "c" ]), # 3
26 | # ("Q", [ "b" ]), # 4
27 | # ("T", [ "b" ]), # 5
28 | # ]
29 | #
30 | # In this grammar, the tokens ["a", "b"] do demonstrate that the
31 | # grammar is ambiguous because there are two difference sequences of
32 | # rewrite rules to obtain them:
33 | #
34 | # S --0-> P --2-> a T --5-> a b
35 | #
36 | # S --1-> a Q --4-> a b
37 | #
38 | # (I have written the number of the rule used inside the arrow for
39 | # clarity.) The two sequences are [0,2,5] and [1,4].
40 | #
41 | # However, the tokens ["c"] do _not_ demonstrate that the grammar is
42 | # ambiguous, because there is only one derivation for it:
43 | #
44 | # S --0-> P --3-> c
45 | #
46 | # So even though the grammar is ambiguous, the tokens ["c"] do not
47 | # demonstrate that: there is only one sequence [0,3].
48 | #
49 | # Important Assumption: In this problem the grammar given to you will
50 | # always have a finite number of possible derivations. So only a
51 | # finite set of strings will be in the language of the grammar. (You could
52 | # test this with something like cfginfinite, so we'll just assume it.)
53 | #
54 | # Hint 1: Consider something like "expand" from the end of the Unit, but
55 | # instead of just enumerating utterances, enumerate (utterance,derivation)
56 | # pairs. For a derivation, you might use a list of the rule indexes as we
57 | # did in the example above.
58 | #
59 | # Hint 2: Because the grammar has only a finite number of derivations, you
60 | # can just keep enumerating new (utterance,derivation) pairs until you
61 | # cannot find any that are not already enumerated.
62 |
63 | def expand(tokens_and_derivation, grammar):
64 | (tokens, derivation) = tokens_and_derivation
65 | for token_pos in range(len(tokens)): # for each token
66 | for rule_index in range(len(grammar)): # for each rule
67 | rule = grammar[rule_index]
68 | if tokens[token_pos] == rule[0]: # token is on LHS of rule
69 | yield ((tokens[:token_pos] + rule[1] + tokens[token_pos+1:]), derivation + [rule_index])
70 |
71 | def isambig(grammar, start, utterance):
72 | enumerated = [ ([start], []) ]
73 | while True:
74 | new_enumerated = enumerated
75 |
76 | # ENUMERATE
77 | for u in enumerated:
78 | for i in expand(u, grammar):
79 | if not i in new_enumerated:
80 | new_enumerated = new_enumerated + [i]
81 |
82 | if new_enumerated != enumerated:
83 | enumerated = new_enumerated
84 | else:
85 | break
86 |
87 | return len([x for x in enumerated if x[0] == utterance]) + 1
88 |
89 | # We have provided a few test cases. You will likely want to add your own.
90 |
91 | grammar1 = [
92 | ("S", [ "P", ]),
93 | ("S", [ "a", "Q", ]) ,
94 | ("P", [ "a", "T"]),
95 | ("P", [ "c" ]),
96 | ("Q", [ "b" ]),
97 | ("T", [ "b" ]),
98 | ]
99 | print '-' * 20 + ' 1 ' + '-' * 20
100 | print isambig(grammar1, "S", ["a", "b"]) == True
101 | print isambig(grammar1, "S", ["c"]) == False
102 |
103 | grammar2 = [
104 | ("A", [ "B", ]),
105 | ("B", [ "C", ]),
106 | ("C", [ "D", ]),
107 | ("D", [ "E", ]),
108 | ("E", [ "F", ]),
109 | ("E", [ "G", ]),
110 | ("E", [ "x", "H", ]),
111 | ("F", [ "x", "H"]),
112 | ("G", [ "x", "H"]),
113 | ("H", [ "y", ]),
114 | ]
115 | print '-' * 20 + ' 2 ' + '-' * 20
116 | print isambig(grammar2, "A", ["x", "y"]) == True
117 | print isambig(grammar2, "E", ["y"]) == False
118 |
119 | grammar3 = [ # Rivers in Kenya
120 | ("A", [ "B", "C"]),
121 | ("A", [ "D", ]),
122 | ("B", [ "Dawa", ]),
123 | ("C", [ "Gucha", ]),
124 | ("D", [ "B", "Gucha"]),
125 | ("A", [ "E", "Mbagathi"]),
126 | ("A", [ "F", "Nairobi"]),
127 | ("E", [ "Tsavo" ]),
128 | ("F", [ "Dawa", "Gucha" ])
129 | ]
130 | print '-' * 20 + ' 3 ' + '-' * 20
131 | print isambig(grammar3, "A", ["Dawa", "Gucha"]) == True
132 | print isambig(grammar3, "A", ["Dawa", "Gucha", "Nairobi"]) == False
133 | print isambig(grammar3, "A", ["Tsavo"]) == False
134 |
--------------------------------------------------------------------------------
/src/programming_languages/ps4_parser.py:
--------------------------------------------------------------------------------
1 | work_count = 0 # track one notion of "time taken"
2 |
3 | def addtoset(theset,index,elt):
4 | if not (elt in theset[index]):
5 | theset[index] = [elt] + theset[index]
6 | return True
7 | return False
8 |
9 | def parse(tokens,grammar):
10 | global work_count
11 | work_count = 0
12 | tokens = tokens + [ "end_of_input_marker" ]
13 | chart = {}
14 | start_rule = grammar[0]
15 | for i in xrange(len(tokens)+1):
16 | chart[i] = []
17 | start_state = (start_rule[0], [], start_rule[1], 0)
18 | chart[0] = [start_state]
19 | for i in xrange(len(tokens)):
20 | while True:
21 | changes = False
22 | for state in chart[i]:
23 | # State === x -> a b . c d , j
24 | (x, ab, cd, j) = state
25 |
26 | # Current State == x -> a b . c d , j
27 | # Option 1: For each grammar rule c -> p q r
28 | # (where the c's match)
29 | # make a next state c -> . p q r , i
30 | # English: We're about to start parsing a "c", but
31 | # "c" may be something like "exp" with its own
32 | # production rules. We'll bring those production rules in.
33 | next_states = [ (rule[0], [], rule[1], i)
34 | for rule in grammar
35 | if cd <> [] and cd[0] == rule[0] ]
36 | work_count = work_count + len(grammar)
37 | for next_state in next_states:
38 | changes = addtoset(chart,i,next_state) or changes
39 |
40 | # Current State == x -> a b . c d , j
41 | # Option 2: If tokens[i] == c,
42 | # make a next state x -> a b c . d , j
43 | # in chart[i+1]
44 | # English: We're looking for to parse token c next
45 | # and the current token is exactly c! Aren't we lucky!
46 | # So we can parse over it and move to j+1.
47 | if cd <> [] and tokens[i] == cd[0]:
48 | next_state = (x, ab + [cd[0]], cd[1:], j)
49 | changes = addtoset(chart,i+1,next_state) or changes
50 |
51 | # Current State == x -> a b . c d , j
52 | # Option 3: If cd is [], the state is just x -> a b . , j
53 | # for each p -> q . x r , l in chart[j]
54 | # make a new state p -> q x . r , l
55 | # in chart[i]
56 | # English: We just finished parsing an "x" with this token,
57 | # but that may have been a sub-step (like matching "exp -> 2"
58 | # in "2+3"). We should update the higher-level rules as well.
59 | next_states = [ (jstate[0], jstate[1] + [x], (jstate[2])[1:],
60 | jstate[3] )
61 | for jstate in chart[j]
62 | if cd == [] and
63 | jstate[2] <> [] and
64 | (jstate[2])[0] == x ]
65 |
66 | work_count = work_count + len(chart[j])
67 | for next_state in next_states:
68 | changes = addtoset(chart,i,next_state) or changes
69 |
70 | # We're done if nothing changed!
71 | if not changes:
72 | break
73 |
74 | ## Uncomment this block if you'd like to see the chart printed.
75 | #
76 | for i in range(len(tokens)):
77 | print "== chart " + str(i)
78 | for state in chart[i]:
79 | x, ab, cd, j = state
80 | print " " + x + " ->",
81 | for sym in ab:
82 | print " " + sym,
83 | print " .",
84 | for sym in cd:
85 | print " " + sym,
86 | print " from " + str(j)
87 |
88 | # Uncomment this block if you'd like to see the chart printed
89 | # in cases where it's important to see quotes in the grammar
90 | # for i in range(len(tokens)):
91 | # print "== chart " + str(i)
92 | # for state in chart[i]:
93 | # x = state[0]
94 | # ab = state[1]
95 | # cd = state[2]
96 | # j = state[3]
97 | # print " " + x.__repr__() + " ->",
98 | # for sym in ab:
99 | # print " " + sym.__repr__(),
100 | # print " .",
101 | # for sym in cd:
102 | # print " " + sym.__repr__(),
103 | # print " from " + str(j)
104 |
105 | accepting_state = (start_rule[0], start_rule[1], [], 0)
106 | return accepting_state in chart[len(tokens)-1]
107 |
108 | def parentheses():
109 | grammar = [
110 | ("S", ["P" ]) ,
111 | ("P", ["(" , "P", ")" ]),
112 | ("P", [ ]) ,
113 | ]
114 | tokens = [ "(", "(", ")", ")"]
115 | #tokens = [ "(", "(", "(", ")"]
116 | result=parse(tokens, grammar)
117 | print result
118 |
119 | def ps4_p1():
120 | grammar = [
121 | ("S", ["id", "(", "OPTARGS", ")"]),
122 | ("OPTARGS", []),
123 | ("OPTARGS", ["ARGS"]),
124 | ("ARGS", ["exp", ",", "ARGS"]),
125 | ("ARGS", ["exp"]),
126 | ]
127 | tokens = ["id", "(", "exp", ",", "exp", ")"]
128 | result = parse(tokens, grammar)
129 | print result
130 |
131 | if __name__ == "__main__":
132 | ps4_p1()
133 |
134 |
--------------------------------------------------------------------------------
/src/programming_languages/ps2_p1.py:
--------------------------------------------------------------------------------
1 | # Hexadecimal Numbers
2 | #
3 | # In this exercise you will write a lexical analyzer that breaks strings up
4 | # into whitespace-separated identifiers and numbers. An identifier is a
5 | # sequence of one or more upper- or lower-case letters. In this exercise,
6 | # however, there are two types of numbers: decimal numbers, and
7 | # _hexadecimal_ numbers.
8 | #
9 | # Humans usually write numbers using "decimal" or "base 10" notation. The
10 | # number# 234 means 2*10^2 + 3*10 + 4*1.
11 | #
12 | # It is also possible to write numbers using other "bases", like "base 16"
13 | # or "hexadecimal". Computers often use base 16 because 16 is a convenient
14 | # power of two (i.e., it is a closer fit to the "binary" system that
15 | # computers use internally). A hexadecimal number always starts with the
16 | # two-character prefix "0x" so that you know not to mistake it for a binary
17 | # number. The number 0x234 means
18 | # 2 * 16^2
19 | # + 3 * 16^1
20 | # + 4 * 16^0
21 | # = 564 decimal.
22 | #
23 | # Because base 16 is larger than base 10, the letters 'a' through 'f' are
24 | # used to represent the numbers '10' through '15'. So the hexadecimal
25 | # number 0xb is the same as the decimal number 11. When read out loud, the
26 | # "0x" is often pronounced like "hex". "0x" must always be followed by at
27 | # least one hexadecimal digit to count as a hexadecimal number.
28 | #
29 | # Modern programming languages like Python can understand hexadecimal
30 | # numbers natively! Try it:
31 | #
32 | # print 0x234 # uncomment me to see 564 printed
33 | # print 0xb # uncomment me to see 11 printed
34 | #
35 | # This provides an easy way to test your knowledge of hexadecimal.
36 | #
37 | # For this assignment you must write token definition rules (e.g., t_ID,
38 | # t_NUM_hex) that will break up a string of whitespace-separated
39 | # identifiers and numbers (either decimal or hexadecimal) into ID and NUM
40 | # tokens. If the token is an ID, you should store its text in the
41 | # token.value field. If the token is a NUM, you must store its numerical
42 | # value (NOT a string) in the token.value field. This means that if a
43 | # hexadecimal string is found, you must convert it to a decimal value.
44 | #
45 | # Hint 1: When presented with a hexadecimal string like "0x2b4", you can
46 | # convert it to a decimal number in stages, reading it from left to right:
47 | # number = 0 # '0x'
48 | # number = number * 16
49 | # number = number + 2 # '2'
50 | # number = number * 16
51 | # number = number + 11 # 'b'
52 | # number = number * 16
53 | # number = number + 4 # '4'
54 | # Of course, since you don't know the number of digits in advance, you'll
55 | # probably want some sort of loop. There are other ways to convert a
56 | # hexadecimal string to a number. You may use any way that works.
57 | #
58 | # Hint 2: The Python function ord() will convert a single letter into
59 | # an ordered internal numerical representation. This allows you to perform
60 | # simple arithmetic on numbers:
61 | #
62 | # print ord('c') - ord('a') == 2
63 |
64 | import ply.lex as lex
65 |
66 | tokens = ('NUM', 'ID')
67 |
68 | ####
69 | # Fill in your code here.
70 | ####
71 |
72 | def t_ID(token):
73 | r'[A-Za-z]+'
74 | return token
75 |
76 | def t_NUM_hex(token):
77 | r'0x[0-9a-f]+'
78 | token.value = int(eval(token.value)) # cheeky!
79 | token.type = 'NUM'
80 | return token
81 |
82 | def t_NUM_decimal(token):
83 | r'[0-9]+'
84 | token.value = int(token.value) # won't work on hex numbers!
85 | token.type = 'NUM'
86 | return token
87 |
88 | t_ignore = ' \t\v\r'
89 |
90 | def t_error(t):
91 | print "Lexer: unexpected character " + t.value[0]
92 | t.lexer.skip(1)
93 |
94 | # We have included some testing code to help you check your work. You will
95 | # probably want to add your own additional tests.
96 | lexer = lex.lex()
97 |
98 | def test_lexer(input_string):
99 | lexer.input(input_string)
100 | result = [ ]
101 | while True:
102 | tok = lexer.token()
103 | if not tok: break
104 | result = result + [(tok.type, tok.value)]
105 | return result
106 |
107 | question1 = "0x19 equals 25" # 0x19 = (1*16) + 9
108 | answer1 = [('NUM', 25), ('ID', 'equals'), ('NUM', 25) ]
109 |
110 | print '---'
111 | print "question1"
112 | print test_lexer(question1)
113 | print test_lexer(question1) == answer1
114 | print '---'
115 |
116 | question2 = "0xfeed MY 0xface"
117 | answer2 = [('NUM', 65261), ('ID', 'MY'), ('NUM', 64206) ]
118 |
119 | print '---'
120 | print "question2"
121 | print test_lexer(question2)
122 | print test_lexer(question2) == answer2
123 | print '---'
124 |
125 | question3 = "tricky 0x0x0x"
126 | answer3 = [('ID', 'tricky'), ('NUM', 0), ('ID', 'x'), ('NUM', 0), ('ID', 'x')]
127 | print '---'
128 | print "question3"
129 | print test_lexer(question3)
130 | print test_lexer(question3) == answer3
131 | print '---'
132 |
133 | question4 = "in 0xdeed"
134 | print test_lexer(question4)
135 |
136 | question5 = "where is the 0xbeef"
137 | print test_lexer(question5)
138 |
139 | question6 = "5 is 0x5"
140 | print '---'
141 | print test_lexer(question6)
142 | print test_lexer(question6) == [('NUM', 5), ('ID', 'is'), ('NUM', 5)]
143 | print '---'
144 |
145 | question7 = "0 is 0x0"
146 | print '---'
147 | print test_lexer(question7)
148 | print test_lexer(question7) == [('NUM', 0), ('ID', 'is'), ('NUM', 0)]
149 | print '---'
150 |
151 | question8 = "tricky 0xabc0x"
152 | print '---'
153 | print test_lexer(question8)
154 | print test_lexer(question8) == [('ID', 'tricky'), ('NUM', 43968), ('ID', 'x')]
155 | print '---'
156 |
--------------------------------------------------------------------------------
/[post] monty_hall_problem.md:
--------------------------------------------------------------------------------
1 | I was **extremely** disappointed that a unit that went into an extraordinary level of detail of Bayes nets, inference, sampling, etc., referred to the Monty Hall problem as an obligatory curiosity and then answered it by saying "I'm so, so very clever, bite me Monty."
2 |
3 | Rather than just moan, this is my process of applying Bayes nets to the Monty Hall problem. I've marked this as a community wiki; please feel free to edit, update, append, etc. Also, I might be completely wrong about much of my reasoning; tell me!
4 |
5 | - - -
6 |
7 | I want to draw a Bayes net representing the Monty Hall problem. Nodes are random variables, so what are the random variables?
8 |
9 | - Before I make a choice the prize exists behind some door. Let's call this `Prize (P)`, which is discrete and may have values `[Door1, Door2, Door3]`
10 | - I choose a door at the beginning. Let's call this `First selection (F)`, which is discrete and may have values `[Door1, Door2, Door3]`.
11 | - Monty, the host, opens a door. Let's call this `Monty opens (M)`, which is discrete and may have values `[Door1, Door2, Door3]`.
12 |
13 | What are the edges?
14 |
15 | - Where the `Prize` is will affect which door `Monty opens`; he'll never open the door the `Prize` is behind! Hence `P -> M`.
16 | - Which door I open will affect which door Monty opens. He can't re-open the door I've just opened! Hence `F -> M`.
17 |
18 | Interesting...this looks familiar. This looks like a particular triplet covered in the "D Separation (3.35)" unit! This triplet tells us:
19 |
20 | 1. `P` and `F` are **absolutely independent**. In the absence of other information they do not provide information about one another. (This makes sense!)
21 | 2. `P` and `F` are **conditionally dependent on M**. Given M there is a link between `P` and `F`.
22 |
23 | For me, point 2 is mind-blowing! There is a connection between `P` and `F` in the Bayes net, whereas every ounce of intution in my mind asserts that there is absolutely no connection between the two. *Without even doing any calculations* we've just discovered something totally unintuitive about the Monty Hall problem.
24 |
25 | I think we're going to need four tables, drawn out below:
26 |
27 | +---------+ +---------+
28 | | P(F) | | P(P) |
29 | +---------+ +---------+
30 | | D1 | 1/3| | D1 | 1/3|
31 | |----|----| |----|----|
32 | | D2 | 1/3| | D2 | 1/3|
33 | |---------| |---------|
34 | | D3 | 1/3| | D3 | 1/3|
35 | +----v----+ +----v----+
36 |
37 | +--------------+ +--------------+
38 | | P(M|F) | | P(M|P) |
39 | |---+---+------+ |---+---+------+
40 | | M | F | | | M | P | |
41 | |---|---|------| |---|---|------|
42 | | D1| D1| 0 | | D1| D1| 0 |
43 | |--------------| |--------------|
44 | | D1| D2| 1/2 | | D1| D2| 1/2 |
45 | |--------------| |--------------|
46 | | D1| D3| 1/2 | | D1| D3| 1/2 |
47 | |--------------| |--------------|
48 | | D2| D1| 1/2 | | D2| D1| 1/2 |
49 | |--------------| |--------------|
50 | | D2| D2| 0 | | D2| D2| 0 |
51 | |--------------| |--------------|
52 | | D2| D3| 1/2 | | D2| D3| 1/2 |
53 | |--------------| |--------------|
54 | | D3| D1| 1/2 | | D3| D1| 1/2 |
55 | |--------------| |--------------|
56 | | D3| D2| 1/2 | | D3| D2| 1/2 |
57 | |--------------| |--------------|
58 | | D3| D3| 0 | | D3| D3| 0 |
59 | +--------------+ +--------------+
60 |
61 | Keep in mind the constraints of the problem: Monty cannot re-open the door I opened, and Monty will never open the door with the prize behind it.
62 |
63 | Also, I think, given that M has two incident edges, we actually need a `P(M | F,P)` table, but to save space I've excluded it and used intuition in its place.
64 |
65 | Suppose I've chosen D1, then Monty chooses D3 - just like the lecture video. Should we switch? Well:
66 |
67 | $$\alpha \buildrel\triangle\over = P(P = D1 | F = D1, M = D3)$$
68 | $$\beta \buildrel\triangle\over = P(P = D2 | F = D1, M = D3)$$
69 |
70 | If $$\beta \gt \alpha$$ then we should switch. Else we should not switch. In English, beta is "If I chose D1, and Monty chose D3, is the prize behind D2?".
71 |
72 | Rather than go to the trouble of calculating alpha, we just note that:
73 |
74 | $$\alpha + \beta = 1$$
75 |
76 | This is true because of the contraints of the problem; Monty will never reveal the door containing the prize, hence the prize may only be behind doors D1 or D2.
77 |
78 | Hence, we are actually trying to determine if:
79 |
80 | $$\beta \gt \frac{1}{2}$$
81 |
82 | Using conditional probability:
83 |
84 | $$\beta = \frac{P(P=D2, F=D1, M=D3)}{P(F=D1, M=D3)}$$
85 |
86 | Using material from Section 4.2 (Enumeration):
87 |
88 | $$\beta = \frac{P(P=D2) \times P(F=D1) \times P(M=D3 | P=D2, F=D1)}{P(F=D1) \times P(M=D3 | F=D1)}$$
89 |
90 | $$\beta = \frac{\frac{1}{3} \times \frac{1}{3} \times P(M=D3 | P=D2, F=D1)}{\frac{1}{3} \times P(M=D3 | F=D1)}$$
91 |
92 | The conditional in the denominator can be read straight out of the `P(M|F)` table:
93 |
94 | $$\beta = \frac{\frac{1}{3} \times \frac{1}{3} \times P(M=D3 | P=D2, F=D1)}{\frac{1}{3} \times \frac{1}{2}}$$
95 |
96 | $$\beta = \frac{2}{3} \times P(M=D3 | P=D2, F=D1)$$
97 |
98 | That leaves the conditional:
99 |
100 | $$\gamma \buildrel\triangle\over = P(M=D3 | P=D2, F=D1)$$
101 |
102 | $$\beta = \frac{2}{3} \times \gamma$$
103 |
104 | Pause and consider what gamma actually is. If I choose D1, and the prize is behind D2...what door will Monty choose?
105 |
106 | - D1? No! It's already open.
107 | - D2? No! The prize is behind there.
108 | - D3? Yes.
109 |
110 | **Monty will always choose D3**!
111 |
112 | $$\gamma = 1$$
113 |
114 | $$\beta = \frac{2}{3}$$
115 |
116 | QED.
--------------------------------------------------------------------------------
/src/programming_languages/final/final_1.py:
--------------------------------------------------------------------------------
1 | # Underscoring the Magnitude
2 | #
3 | # Focus: Units 1 and 2, Regular Expressions and Lexical Analysis
4 | #
5 | # In this problem you will use regular expressions to specify tokens for a
6 | # part of a new programming language. You must handle seven types of
7 | # tokens:
8 | #
9 | #
10 | # PLUS +
11 | # MINUS -
12 | # TIMES *
13 | # DIVIDE /
14 | # IDENT my_variable Caps_Are_OK
15 | # STRING 'yes' "also this"
16 | # NUMBER 123 123_456_789
17 | #
18 | # The last three merit a more detailed explanation.
19 | #
20 | # An IDENT token is a non-empty sequence of lower- and/or upper-case
21 | # letters and underscores, but the first character cannot be an underscore.
22 | # (Letters are a-z and A-Z only.) The value of an IDENT token is the string
23 | # matched.
24 | #
25 | # A STRING token is zero or more of any character surrounded by 'single
26 | # quotes' or "double quotes". In this language, there are no escape
27 | # sequences, so "this\" is a string containing five characters. The value
28 | # of a STRING token is the string matched with the quotes removed.
29 | #
30 | # A NUMBER is a a non-empty sequence of digits (0-9) and/or underscores,
31 | # except that the first character cannot be an underscore. Many real-world
32 | # languages actually support this, to make large number easier to read.
33 | # All NUMBERs in this language are positive integers; negative signs and/or
34 | # periods are not part of NUMBERs. The value of a NUMBER is the integer
35 | # value of its digits with all of the underscores removed: the value of
36 | # "12_34" is 1234 (the integer).
37 | #
38 | # For this problem we do *not* care about line number information. Only the
39 | # types and values of tokens matter. Whitespace characters are ' \t\v\r'
40 | # (and we have already filled them in for you below).
41 | #
42 | # Complete the lexer below.
43 |
44 | import ply.lex as lex
45 |
46 | tokens = ('PLUS', 'MINUS', 'TIMES', 'DIVIDE',
47 | 'IDENT', 'STRING', 'NUMBER')
48 |
49 | #####
50 | #
51 |
52 | # Place your token definition rules here.
53 |
54 | #
55 | #####
56 |
57 | t_ignore = ' \t\v\r'
58 |
59 | def t_error(t):
60 | print "Lexer: unexpected character " + t.value[0]
61 | t.lexer.skip(1)
62 |
63 | # PLUS +
64 | # MINUS -
65 | # TIMES *
66 | # DIVIDE /
67 | t_PLUS = r'\+'
68 | t_MINUS = r'\-'
69 | t_TIMES = r'\*'
70 | t_DIVIDE = r'\/'
71 |
72 | # An IDENT token is a non-empty sequence of lower- and/or upper-case
73 | # letters and underscores, but the first character cannot be an underscore.
74 | # (Letters are a-z and A-Z only.) The value of an IDENT token is the string
75 | # matched.
76 | t_IDENT = r'[a-zA-Z][a-zA-Z_]*'
77 |
78 | # A STRING token is zero or more of any character surrounded by 'single
79 | # quotes' or "double quotes". In this language, there are no escape
80 | # sequences, so "this\" is a string containing five characters. The value
81 | # of a STRING token is the string matched with the quotes removed.
82 | def t_STRING(token):
83 | #r'\'.*?\'|\".*?\"' !!AI my answer, don't need non-greedy operator.
84 | r'\'[^\']*\'|\"[^\"]*\"'
85 | token.value = token.value[1:-1]
86 | return token
87 |
88 | # A NUMBER is a a non-empty sequence of digits (0-9) and/or underscores,
89 | # except that the first character cannot be an underscore. Many real-world
90 | # languages actually support this, to make large number easier to read.
91 | # All NUMBERs in this language are positive integers; negative signs and/or
92 | # periods are not part of NUMBERs. The value of a NUMBER is the integer
93 | # value of its digits with all of the underscores removed: the value of
94 | # "12_34" is 1234 (the integer).
95 | #
96 | def t_NUMBER(token):
97 | r'[0-9][0-9_]*'
98 | integer = token.value.replace("_","")
99 | token.value = int(integer)
100 | return token
101 |
102 | # We have included some testing code to help you check your work. Since
103 | # this is the final exam, you will definitely want to add your own tests.
104 | lexer = lex.lex()
105 |
106 | def test_lexer(input_string):
107 | lexer.input(input_string)
108 | result = [ ]
109 | while True:
110 | tok = lexer.token()
111 | if not tok: break
112 | result.append((tok.type,tok.value))
113 | return result
114 |
115 | question1 = " + - / * "
116 | answer1 = [('PLUS', '+'), ('MINUS', '-'), ('DIVIDE', '/'), ('TIMES', '*')]
117 |
118 | print "test 1"
119 | print test_lexer(question1) == answer1
120 |
121 | question2 = """ 'string "nested" \' "inverse 'nested'" """
122 | answer2 = [('STRING', 'string "nested" '), ('STRING', "inverse 'nested'")]
123 | print "test 2"
124 | print test_lexer(question2) == answer2
125 |
126 | question3 = """ 12_34 5_6_7_8 0______1 1234 """
127 | answer3 = [('NUMBER', 1234), ('NUMBER', 5678), ('NUMBER', 1), ('NUMBER', 1234)]
128 | print "test 3"
129 | print test_lexer(question3) == answer3
130 |
131 | question4 = """ 'he'llo w0rld 33k """
132 | answer4 = [('STRING', 'he'), ('IDENT', 'llo'), ('IDENT', 'w'), ('NUMBER',
133 | 0), ('IDENT', 'rld'), ('NUMBER', 33), ('IDENT', 'k')]
134 | print "test 4"
135 | print test_lexer(question4) == answer4
136 |
137 | question5 = """hello world"""
138 | answer5 = [('IDENT','hello'), ('IDENT','world')]
139 | print "test 5"
140 | print test_lexer(question5) == answer5
141 |
142 | question6 = """'hello' "world" """
143 | answer6 = [('STRING','hello'), ('STRING','world')]
144 | print "test 6"
145 | print test_lexer(question6) == answer6
146 |
147 | question7 = """ 'string "nested" \' """
148 | answer7 = [('STRING', 'string "nested" ')]
149 | print "test 7"
150 | print test_lexer(question7) == answer7
151 |
152 | question8 = """ "inverse 'nested'" """
153 | answer8 = [('STRING', "inverse 'nested'")]
154 | print "test 8"
155 | print test_lexer(question8) == answer8
156 |
157 |
--------------------------------------------------------------------------------
/src/programming_languages/ps2_p3.py:
--------------------------------------------------------------------------------
1 | # JavaScript: Comments & Keywords
2 | #
3 | # In this exercise you will write token definition rules for all of the
4 | # tokens in our subset of JavaScript *except* IDENTIFIER, NUMBER and
5 | # STRING. In addition, you will handle // end of line comments
6 | # as well as /* delimited comments */.
7 | #
8 | # We will assume that JavaScript is case sensitive and that keywords like
9 | # 'if' and 'true' must be written in lowercase. There are 26 possible
10 | # tokens that you must handle. The 'tokens' variable below has been
11 | # initialized below, listing each token's formal name (i.e., the value of
12 | # token.type). In addition, each token has its associated textual string
13 | # listed in a comment. For example, your lexer must convert && to a token
14 | # with token.type 'ANDAND' (unless the && is found inside a comment).
15 | #
16 | # Hint 1: Use an exclusive state for /* comments */. You may want to define
17 | # t_comment_ignore and t_comment_error as well.
18 |
19 | import ply.lex as lex
20 |
21 | def test_lexer(lexer,input_string):
22 | lexer.input(input_string)
23 | result = [ ]
24 | while True:
25 | tok = lexer.token()
26 | if not tok: break
27 | result = result + [tok.type]
28 | return result
29 |
30 | tokens = (
31 | 'ANDAND', # &&
32 | 'COMMA', # ,
33 | 'DIVIDE', # /
34 | 'ELSE', # else
35 | 'EQUAL', # =
36 | 'EQUALEQUAL', # ==
37 | 'FALSE', # false
38 | 'FUNCTION', # function
39 | 'GE', # >=
40 | 'GT', # >
41 | # 'IDENTIFIER', #### Not used in this problem.
42 | 'IF', # if
43 | 'LBRACE', # {
44 | 'LE', # <=
45 | 'LPAREN', # (
46 | 'LT', # <
47 | 'MINUS', # -
48 | 'NOT', # !
49 | # 'NUMBER', #### Not used in this problem.
50 | 'OROR', # ||
51 | 'PLUS', # +
52 | 'RBRACE', # }
53 | 'RETURN', # return
54 | 'RPAREN', # )
55 | 'SEMICOLON', # ;
56 | # 'STRING', #### Not used in this problem.
57 | 'TIMES', # *
58 | 'TRUE', # true
59 | 'VAR', # var
60 | )
61 |
62 | states = (
63 | ('javascriptmultilinecomment', 'exclusive'),
64 | )
65 |
66 | t_ignore = ' \t\r\f\v' # whitespace
67 |
68 | reserved = {
69 | 'else': 'ELSE',
70 | 'false': 'FALSE',
71 | 'function': 'FUNCTION',
72 | 'if': 'IF',
73 | 'return': 'RETURN',
74 | 'true': 'TRUE',
75 | 'var': 'VAR',
76 | }
77 |
78 | def t_eolcomment(token):
79 | r'//[^\n]*'
80 | pass
81 |
82 | t_ANDAND = r'&&'
83 | t_COMMA = r','
84 | t_DIVIDE = r'/'
85 | t_ELSE = r'else'
86 | t_EQUALEQUAL = r'=='
87 | t_EQUAL = r'='
88 | t_GE = r'>='
89 | t_GT = r'>'
90 | t_LBRACE = r'\{'
91 | t_LE = r'<='
92 | t_LPAREN = r'\('
93 | t_LT = r'<'
94 | t_MINUS = r'-'
95 | t_NOT = r'!'
96 | t_OROR = r'\|\|'
97 | t_PLUS = r'\+'
98 | t_RBRACE = r'\}'
99 | t_RPAREN = r'\)'
100 | t_SEMICOLON = r';'
101 | t_TIMES = r'\*'
102 |
103 | # -----------------------------------------------------------------------------
104 | # We are _not_ going to return an IDENTIFIER from here. Hence we expect
105 | # any matching string to be in the 'reserved' dictionary.
106 | # -----------------------------------------------------------------------------
107 | def t_IDENTIFIER(token):
108 | r'[a-z]+'
109 | token.type = reserved[token.value]
110 | return token
111 | # -----------------------------------------------------------------------------
112 |
113 | def t_newline(t):
114 | r'\n'
115 | t.lexer.lineno += 1
116 |
117 | def t_error(t):
118 | print "JavaScript Lexer: Illegal character line %s: %s" % (t.lexer.lineno, t.value[0])
119 | t.lexer.skip(1)
120 |
121 | # -----------------------------------------------------------------------------
122 | # 'javascriptmultilinecomment' state.
123 | # -----------------------------------------------------------------------------
124 | t_javascriptmultilinecomment_ignore = ' \t\r\f\v'
125 |
126 | def t_javascriptmultilinecomment(token):
127 | r'\/\*'
128 | token.lexer.begin('javascriptmultilinecomment')
129 |
130 | def t_javascriptmultilinecomment_end(token):
131 | r'\*\/'
132 | token.lexer.lineno += token.value.count('\n')
133 | token.lexer.begin('INITIAL')
134 |
135 | def t_javascriptmultilinecomment_error(token):
136 | token.lexer.skip(1)
137 | # -----------------------------------------------------------------------------
138 |
139 | # We have included two test cases to help you debug your lexer. You will
140 | # probably want to write some of your own.
141 |
142 | lexer = lex.lex()
143 |
144 | def test_lexer(input_string):
145 | lexer.input(input_string)
146 | result = []
147 | while True:
148 | tok = lexer.token()
149 | if not tok: break
150 | result.append(tok.type)
151 | return result
152 |
153 | input1 = """ - ! && () * , / ; { || } + < <= = == > >= else false function
154 | if return true var """
155 |
156 | output1 = ['MINUS', 'NOT', 'ANDAND', 'LPAREN', 'RPAREN', 'TIMES', 'COMMA',
157 | 'DIVIDE', 'SEMICOLON', 'LBRACE', 'OROR', 'RBRACE', 'PLUS', 'LT', 'LE',
158 | 'EQUAL', 'EQUALEQUAL', 'GT', 'GE', 'ELSE', 'FALSE', 'FUNCTION', 'IF',
159 | 'RETURN', 'TRUE', 'VAR']
160 |
161 | print test_lexer(input1) == output1
162 |
163 | input2 = """
164 | if // else mystery
165 | =/*=*/=
166 | true /* false
167 | */ return"""
168 |
169 | output2 = ['IF', 'EQUAL', 'EQUAL', 'TRUE', 'RETURN']
170 |
171 | print test_lexer(input2) == output2
172 |
173 | input3 = """
174 | if /* true // else
175 | */
176 | """
177 | output3 = ['IF']
178 | print test_lexer(input3) == output3
179 |
180 |
--------------------------------------------------------------------------------
/src/programming_languages/final/final_2.py:
--------------------------------------------------------------------------------
1 | # Terrible Tuples
2 | #
3 | # Focus: Units 3 and 4, Grammars and Parsing
4 | #
5 | # In this problem you will use context-free grammars to specify some
6 | # expression for part of a new programming language. We will specify tuples
7 | # and lists, as in Python. We will consider four types of expressions.
8 | #
9 | # 1. An expression can be a single NUMBER token. In this case, your parser
10 | # should return ("number",XYZ) where XYZ is the value of the NUMBER
11 | # token.
12 | #
13 | # 2. An expression can be LPAREN expression RPAREN . In this case, your
14 | # parser should return the value of the expression inside the parentheses.
15 | #
16 | # 3. An expression can be LPAREN "a list of more than one comma-separated
17 | # expressions" RPAREN. This should remind you of tuples in Python:
18 | #
19 | # (1,2,3)
20 | #
21 | # The inner expressions are 1 2 and 3, and they are separated by commas.
22 | # In this case, your parser should return ("tuple", ...) where ... is a
23 | # list of the child expression values. For example, for (1,2) you should
24 | # return ("tuple",[("number",2),("number",3)]).
25 | #
26 | # 4. An expression can be LBRACKET "a list of one or more comma-separated
27 | # expressions" RBRACKET. This should remind you of lists in Python:
28 | #
29 | # [7,8,9]
30 | #
31 | # These parse exactly like tuples, except that they use square brackets
32 | # instead of parentheses, and singleton lists like [7] are valid. Your
33 | # parser should return ("list", ...) as above, so [7,8] would return
34 | # ("list",[("number",7),("number",8)]).
35 | #
36 | # Complete the parser below.
37 |
38 | import ply.lex as lex
39 | import ply.yacc as yacc
40 |
41 | start = 'exp' # the start symbol in our grammar
42 |
43 | #####
44 | #
45 |
46 | # Place your grammar definition rules here.
47 |
48 | #
49 | #####
50 |
51 | # 1. An expression can be a single NUMBER token. In this case, your parser
52 | # should return ("number",XYZ) where XYZ is the value of the NUMBER
53 | # token.
54 | def p_exp_number(p):
55 | r'exp : NUMBER'
56 | p[0] = ("number", p[1])
57 |
58 | # 2. An expression can be LPAREN expression RPAREN . In this case, your
59 | # parser should return the value of the expression inside the parentheses.
60 | def p_exp_paren(p):
61 | r'exp : LPAREN exp RPAREN'
62 | p[0] = p[2]
63 |
64 | # 3. An expression can be LPAREN "a list of more than one comma-separated
65 | # expressions" RPAREN. This should remind you of tuples in Python:
66 | #
67 | # (1,2,3)
68 | #
69 | # The inner expressions are 1 2 and 3, and they are separated by commas.
70 | # In this case, your parser should return ("tuple", ...) where ... is a
71 | # list of the child expression values. For example, for (1,2) you should
72 | # return ("tuple",[("number",2),("number",3)]).
73 | #
74 | # !!AI this is unusually tricky because e.g. (4) is not a tuple, it's
75 | # a number, so we can't re-use the nonterminal tuplelements to terminate
76 | # the parse. Odd!
77 | def p_exp_tuple(p):
78 | r'exp : LPAREN tupleelements RPAREN'
79 | p[0] = ("tuple", p[2])
80 |
81 | def p_tupleelements(p):
82 | r'tupleelements : exp COMMA tupleelements_end'
83 | p[0] = [p[1]] + p[3]
84 |
85 | def p_tupleelements_end_more(p):
86 | r'tupleelements_end : tupleelements'
87 | p[0] = p[1]
88 |
89 | def p_tupleelements_end_finished(p):
90 | r'tupleelements_end : exp'
91 | p[0] = [p[1]]
92 |
93 | # 4. An expression can be LBRACKET "a list of one or more comma-separated
94 | # expressions" RBRACKET. This should remind you of lists in Python:
95 | #
96 | # [7,8,9]
97 | #
98 | # These parse exactly like tuples, except that they use square brackets
99 | # instead of parentheses, and singleton lists like [7] are valid. Your
100 | # parser should return ("list", ...) as above, so [7,8] would return
101 | # ("list",[("number",7),("number",8)]).
102 | #
103 | # !!AI this is much easier than tuples, as e.g. [4] is a list.
104 | def p_exp_list(p):
105 | 'exp : LBRACKET listelements RBRACKET'
106 | p[0] = ("list", p[2])
107 |
108 | def p_listelements_single(p):
109 | 'listelements : exp'
110 | p[0] = [p[1]]
111 |
112 | def p_listelements_morethanone(p):
113 | 'listelements : exp COMMA listelements'
114 | p[0] = [p[1]] + p[3]
115 |
116 | def p_error(p):
117 | raise SyntaxError
118 |
119 | # We have provided a lexer for you. You should not change it.
120 |
121 | tokens = ('LPAREN', 'RPAREN', 'LBRACKET', 'RBRACKET', 'NUMBER', 'COMMA')
122 |
123 | def t_NUMBER(token):
124 | r"[0-9]+"
125 | token.value = int(token.value)
126 | return token
127 |
128 | t_ignore = ' \t\v\r'
129 | t_COMMA = r','
130 | t_LPAREN = r'\('
131 | t_RPAREN = r'\)'
132 | t_LBRACKET = r'\['
133 | t_RBRACKET = r'\]'
134 |
135 | def t_error(t):
136 | print "Lexer: unexpected character " + t.value[0]
137 | t.lexer.skip(1)
138 |
139 | # We have included some testing code to help you check your work. Since
140 | # this is the final exam, you will definitely want to add your own tests.
141 | lexer = lex.lex()
142 |
143 | def test(input_string):
144 | lexer.input(input_string)
145 | parser = yacc.yacc()
146 | try:
147 | parse_tree = parser.parse(input_string, lexer=lexer)
148 | return parse_tree
149 | except:
150 | return "error"
151 |
152 | question1 = " 123 "
153 | answer1 = ('number', 123)
154 | print "test 1"
155 | print test(question1) == answer1
156 |
157 | question2 = " (123) "
158 | print "test 2"
159 | print test(question2) == answer1
160 |
161 | question3 = " (1,2,3) "
162 | answer3 = ('tuple', [('number', 1), ('number', 2), ('number', 3)])
163 | print "test 3"
164 | print test(question3) == answer3
165 |
166 | question4 = " [123] "
167 | answer4 = ('list', [('number', 123)])
168 | print "test 4"
169 | print test(question4) == answer4
170 |
171 | question5 = " [1,2,3] "
172 | answer5 = ('list', [('number', 1), ('number', 2), ('number', 3)])
173 | print "test 5"
174 | print test(question5) == answer5
175 |
176 | question6 = " [(1,2),[3,[4]]] "
177 | answer6 = ('list', [('tuple', [('number', 1), ('number', 2)]), ('list', [('number', 3), ('list', [('number', 4)])])])
178 | print "test 6"
179 | print test(question6) == answer6
180 |
181 | question7 = " (1,2) [3,4) "
182 | answer7 = "error"
183 | print "test 7"
184 | print test(question7) == answer7
185 |
186 |
187 |
188 |
--------------------------------------------------------------------------------
/[book] html5 and javascript web apps (oreilly).md:
--------------------------------------------------------------------------------
1 | # HTML5 and JavaScript Web Apps
2 |
3 | O'Reilly, 2012
4 |
5 | ## Ch1 - Client-Side Architecture
6 |
7 | - Fat clients are back.
8 | - Before HTML5, server-side templating and JS for nicities.
9 | - Browsers are becoming application platforms.
10 | - Cross-browser DOM libraries like jQuery are not enough, do not give a client-side architecture.
11 | - Architecture of heavy, HTML5-driven UI still in its infancy, but this is the topic of the book.
12 |
13 | ## Ch2 - The Mobile Web
14 |
15 | - Support mobile first, then desktop.
16 | - Why?
17 | - You think about constrained resolutions, flexible layouts.
18 | - Device sensors.
19 | - Code-quality, battery life.
20 | - Bleeding edge.
21 | - WebKit on iOS and Android
22 | - Android moving to Dolphin.
23 | - Opera Mobile.
24 | - Internet Explorer Mobile.
25 | - QA
26 | - Preferable to use actual devices.
27 | - Emulators available at [mobilexweb](http://www.mobilexweb.com/emulators).
28 |
29 | ## Ch3 - Building for the Mobile Web
30 |
31 | - Success dependent on design and performance.
32 | - Consistent across all platforms.
33 | - Constrained by CPU/GPU and network throughput/latency.
34 | - Reference: Mobile Design Pattern Gallery (O'Reilly), for UI patterns for native apps.
35 | - Want to use hardware acceleration where possible.
36 | - Functions like `translate3d`, `scale3d`, `translateZ`.
37 | - CSS features like `gradient`, `box-shadow`, `borders`, `background-repeat` cause many repaints, taxing on GPU and battery life.
38 | - p36: use JavaScript to swap class names, and let CSS handle animation.
39 | - Decouples JavaScript from CSS.
40 | - Sliding, flipping, and rotating animations.
41 | - How to debug frames per second and hardware acceleration in Safari.
42 | - p50: fetching and caching
43 | - Code for looking for pages with a `fetch` class name and then using `ajax()` to pre-fetch and then insert into `localStorage`.
44 | - p53: write AJAX response text into a sandboxed `iframe`.
45 | - Browser does DOM parsing and sanitisation for you.
46 | - Just as fast, sometimes faster, than usual `innerHTML` approach.
47 | - p56: network detection. online? slow?
48 | - p61: *single page* approach
49 | - All content in one page. Each subpage wrapped in a div.
50 | - **jQuery Mobile.**
51 | - p61: example.
52 | - **jQTouch**
53 | - Basic widgets and animations.
54 | - Lacks support for multiple platforms.
55 | - p63: example
56 | - In jQuery Mobile and jQTouch you write specially structures HTML. When loaded library reconfigured pages and turns regular links into AJAX-based animated ones.
57 | - p64: *no page structure* approach
58 | - Light markup, not tied to specific DOM structure.
59 | - **[xui](http://xuijs.com/)**
60 | - Comes from PhoneGap.
61 | - DOM manipulation for mobile environment.
62 | - Very light.
63 | - p65: *100% JavaScript Driven* approach
64 | - **Sencha Touch**
65 | - Don't write HTML. UI and app in JavaScript.
66 | - p66: example
67 | - **Wink Toolkit**
68 | - Small following.
69 | - JavaScript helpers, UI via JavaScript.
70 | - p68: example
71 | - **The-M-Project**
72 | - On top of jQuery and jQuery Mobile.
73 | - MVC, Content Binding, Dynamic Value Computing, Event Handling.
74 | - Much more than just fancy UI.
75 | - p70: example.
76 | - Other frameworks: SproutCore, Jo, Zepto, LungoJS, …, but not fit-for-purpose.
77 | - Mobile debugging
78 | - **[weinre](http://people.apache.org/~pmuellr/weinre/)**
79 | - Like a remote Firebug.
80 | - **Adobe Shadow.**
81 | - Again like remote Firebug.
82 | - **Opera Dragonfly**
83 | - For Opera Mobile Emulator.
84 |
85 | ## Ch4 - The Desktop Web
86 |
87 | - Moving towards client-side generation of view. Backend delivers just data over JSON / XML.
88 | - Pros: better UX, less bandwidth, offline capable.
89 | - Cons: security of locally stored data, speed.
90 | - Feature detection
91 | - **modernizr.js** or simple JavaScript to detect client-side capabilities.
92 | - **FormFactor.js** for detecting resolution.
93 | - Sometimes, however, you unfortunately need to parse `userAgent` string, especially for bugs.
94 | - **ua-parser** for parsing useragent.
95 | - **Platform.js** is another userAgent detection method.
96 | - **MobileESP** for server-side userAgent detection.
97 | - Compression.
98 | - GZIP resources and JSON/XML responses.
99 | - Minification, p90
100 | - **JSLint** then **JSMin**
101 | - **Packer**, popular and advanced.
102 | - **Dojo ShrinkSafe**, popular.
103 | - **YUI Compressor**, safety of JSMin with high compression of ShrinkSafe.
104 | - p91, **CompressorRater** to compare.
105 | - p91, **grunt**.
106 | - Command-line build tool for frontend projects.
107 | - node.js package.
108 | - **Jawr**, tunable JavaScript/CSS packager.
109 | - **Ziproxy**, forwarding, noncaching, compressing HTTP proxy.
110 | - MVC
111 | - TodoMVC for comparing many MVC frameworks with a simple Todo app.
112 | - **Backbone**
113 | - Framework of choice.
114 | - Uses **Undescore.js** heavily.
115 | - Data is models. Created, validated, destroyed, saved to server.
116 | - Any UI action change to model triggers `change` event. Views notified and update themselves.
117 | - p99: `Model` and `Collection` example.
118 | - RESTful URI endpoints for models.
119 | - **Ember**, p101
120 | - Formerly Amber.js and SproutCore 2.0.
121 | - Made by Apple
122 | - less wiring than Backbone.
123 | - p101: example
124 | - **Angular**, p102
125 | - Made by Google.
126 | - p103: example.
127 | - Dependency injection.
128 | - **Batman**
129 | - Created by Shopify.
130 | - Similar to Knockout and Angular
131 | - p104: model, server synch.
132 | - **Knockout**
133 | - Three core features
134 | - Observable and dependency tracking.
135 | - Declarative bindings.
136 | - Templating.
--------------------------------------------------------------------------------
/src/programming_languages/graphics.py:
--------------------------------------------------------------------------------
1 | # Wes Weimer
2 | #
3 | # This allows students with minimal knowledge to produce pretty pictures of
4 | # HTML webpages.
5 | #
6 | import sys
7 | import subprocess
8 | import os
9 |
10 | # If you want the output filenames to be different (e.g., based on
11 | # environment variables), just change them here.
12 | output_latex_filename = "./student"
13 |
14 | # If you want to put the static images elsewhere, just change this.
15 | image_directory = "images/" # make "" for current directory
16 |
17 | # The example image output requires these packages:
18 | #
19 | # pdflatex (aka "pdftex")
20 | # imagemagick (for "convert")
21 | # ghostscript (called by "convert")
22 |
23 | outfile = None
24 | logfile = None
25 |
26 | import base64
27 | import json
28 | import sys
29 |
30 |
31 | def word(x):
32 | global outfile
33 | for i in range(len(x)):
34 | if x[i] == '_':
35 | outfile.write("\_")
36 | elif x[i] != '\\':
37 | outfile.write(x[i])
38 | outfile.write (" ")
39 |
40 | def warning(x):
41 | global outfile
42 | outfile.write("{\\color{red}{\\bf{" + x + "}}}")
43 |
44 | closetags = []
45 |
46 | def pushclosing(x):
47 | global closetags
48 | closetags = [x] + closetags
49 | def begintag(tag,args):
50 | global outfile
51 | global logfile
52 | tag = tag.lower()
53 | # need "IMG"
54 | logfile.write("TAG + " + tag + "\n")
55 | if tag == "a":
56 | if "href" in args:
57 | target = args["href"]
58 | outfile.write("\\href{" + target + "}{\underline{")
59 | pushclosing("}}")
60 | else:
61 | warning("invalid 'a' tag: no 'href' argument")
62 | pushclosing("")
63 | elif tag == "img":
64 | if "src" in args:
65 | target = args["src"]
66 | filename = image_directory + target
67 | if os.path.isfile(filename):
68 | if "height" in args and "width" in args:
69 | h = args["height"]
70 | w = args["width"]
71 | outfile.write("\\includegraphics[height=" + h + "px, width=" + w + "px]{" + filename + "}")
72 | pushclosing("")
73 | else:
74 | outfile.write("\\includegraphics{" + filename + "}")
75 | pushclosing("")
76 | else:
77 | warning("'img' " + target + " not found (predefined local images only, sorry)")
78 | pushclosing("")
79 | else:
80 | warning("invalid 'img' tag: no 'src' argument")
81 | pushclosing("")
82 | elif tag == "b" or tag == "strong":
83 | outfile.write("\\textbf{")
84 | pushclosing("}")
85 | elif tag == "ul":
86 | outfile.write("\\begin{itemize}")
87 | pushclosing("\\end{itemize}")
88 | elif tag == "ol":
89 | outfile.write("\\begin{enumerate}")
90 | pushclosing("\\end{enumerate}")
91 | elif tag == "li":
92 | outfile.write("\\item{")
93 | pushclosing("}")
94 | elif tag == "big":
95 | outfile.write("{\\Large ")
96 | pushclosing("}")
97 | elif tag == "tt" or tag == "code":
98 | outfile.write("{\\tt ")
99 | pushclosing("}")
100 | elif tag == "small":
101 | outfile.write("{\\footnotesize ")
102 | pushclosing("}")
103 | elif tag == "i" or tag == "em":
104 | outfile.write("\\emph{")
105 | pushclosing("}")
106 | elif tag == "hr":
107 | outfile.write("{\\begin{center} \\line(1,0){400} \\end{center}}")
108 | pushclosing("")
109 | elif tag == "h1":
110 | outfile.write("\\section*{")
111 | pushclosing("}")
112 | elif tag == "h2":
113 | outfile.write("\\subsection*{")
114 | pushclosing("}")
115 | elif tag == "h3":
116 | outfile.write("\\subsubsection*{")
117 | pushclosing("}")
118 | elif tag == "p" or tag == "br":
119 | outfile.write("\n~\n\n\\noindent ")
120 | pushclosing("\n")
121 | else:
122 | pushclosing("")
123 |
124 | def endtag():
125 | global outfile
126 | global logfile
127 | global closetags
128 | if closetags == []:
129 | raise IndexError
130 | tag = closetags[0]
131 | closetags = closetags[1:]
132 | logfile.write("TAG -\n")
133 | outfile.write(tag)
134 |
135 | def initialize():
136 | global outfile
137 | global logfile
138 | global output_latex_filename
139 | outfile = open(output_latex_filename + ".tex",'w+')
140 | logfile = open(output_latex_filename + ".taglog",'w+')
141 | outfile.write("""
142 | \\documentclass{article}
143 | \\usepackage{fullpage}
144 | \\usepackage{hyperref}
145 | \\hypersetup{
146 | colorlinks,%
147 | citecolor=blue,%
148 | filecolor=blue,%
149 | linkcolor=blue,%
150 | urlcolor=blue
151 | }
152 | \\usepackage{graphicx}
153 | \\usepackage{color}
154 | \\usepackage{url}
155 | \\usepackage{geometry}
156 | \\pagestyle{empty}
157 | \\begin{document}
158 | \\mag 1440
159 | """)
160 |
161 | def finalize():
162 | global outfile
163 | global logfile
164 | logfile.close()
165 | outfile.write("""
166 | \\end{document}
167 | """)
168 | #print "Writing TEX Output: " + output_latex_filename + ".tex"
169 | outfile.close()
170 | #print "Rendering PDF Graphics: " + output_latex_filename + ".pdf"
171 | cmd = "pdflatex " + output_latex_filename + ".tex > /dev/null < /dev/null"
172 | subprocess.call(cmd,shell=True)
173 | #print "Rendering PNG Graphics: " + output_latex_filename + ".png"
174 | cmd = "convert " + output_latex_filename + ".pdf " + \
175 | output_latex_filename + ".png"
176 | subprocess.call(cmd,shell=True)
177 |
178 |
179 |
--------------------------------------------------------------------------------
/src/programming_languages/unit5_l9/graphics.py:
--------------------------------------------------------------------------------
1 | # Wes Weimer
2 | #
3 | # This allows students with minimal knowledge to produce pretty pictures of
4 | # HTML webpages.
5 | #
6 | import sys
7 | import subprocess
8 | import os
9 |
10 | # If you want the output filenames to be different (e.g., based on
11 | # environment variables), just change them here.
12 | output_latex_filename = "./student"
13 |
14 | # If you want to put the static images elsewhere, just change this.
15 | image_directory = "images/" # make "" for current directory
16 |
17 | # The example image output requires these packages:
18 | #
19 | # pdflatex (aka "pdftex")
20 | # imagemagick (for "convert")
21 | # ghostscript (called by "convert")
22 |
23 | outfile = None
24 | logfile = None
25 |
26 | import base64
27 | import json
28 | import sys
29 |
30 |
31 | def word(x):
32 | global outfile
33 | for i in range(len(x)):
34 | if x[i] == '_':
35 | outfile.write("\_")
36 | elif x[i] != '\\':
37 | outfile.write(x[i])
38 | outfile.write (" ")
39 |
40 | def warning(x):
41 | global outfile
42 | outfile.write("{\\color{red}{\\bf{" + x + "}}}")
43 |
44 | closetags = []
45 |
46 | def pushclosing(x):
47 | global closetags
48 | closetags = [x] + closetags
49 | def begintag(tag,args):
50 | global outfile
51 | global logfile
52 | tag = tag.lower()
53 | # need "IMG"
54 | logfile.write("TAG + " + tag + "\n")
55 | if tag == "a":
56 | if "href" in args:
57 | target = args["href"]
58 | outfile.write("\\href{" + target + "}{\underline{")
59 | pushclosing("}}")
60 | else:
61 | warning("invalid 'a' tag: no 'href' argument")
62 | pushclosing("")
63 | elif tag == "img":
64 | if "src" in args:
65 | target = args["src"]
66 | filename = image_directory + target
67 | if os.path.isfile(filename):
68 | if "height" in args and "width" in args:
69 | h = args["height"]
70 | w = args["width"]
71 | outfile.write("\\includegraphics[height=" + h + "px, width=" + w + "px]{" + filename + "}")
72 | pushclosing("")
73 | else:
74 | outfile.write("\\includegraphics{" + filename + "}")
75 | pushclosing("")
76 | else:
77 | warning("'img' " + target + " not found (predefined local images only, sorry)")
78 | pushclosing("")
79 | else:
80 | warning("invalid 'img' tag: no 'src' argument")
81 | pushclosing("")
82 | elif tag == "b" or tag == "strong":
83 | outfile.write("\\textbf{")
84 | pushclosing("}")
85 | elif tag == "ul":
86 | outfile.write("\\begin{itemize}")
87 | pushclosing("\\end{itemize}")
88 | elif tag == "ol":
89 | outfile.write("\\begin{enumerate}")
90 | pushclosing("\\end{enumerate}")
91 | elif tag == "li":
92 | outfile.write("\\item{")
93 | pushclosing("}")
94 | elif tag == "big":
95 | outfile.write("{\\Large ")
96 | pushclosing("}")
97 | elif tag == "tt" or tag == "code":
98 | outfile.write("{\\tt ")
99 | pushclosing("}")
100 | elif tag == "small":
101 | outfile.write("{\\footnotesize ")
102 | pushclosing("}")
103 | elif tag == "i" or tag == "em":
104 | outfile.write("\\emph{")
105 | pushclosing("}")
106 | elif tag == "hr":
107 | outfile.write("{\\begin{center} \\line(1,0){400} \\end{center}}")
108 | pushclosing("")
109 | elif tag == "h1":
110 | outfile.write("\\section*{")
111 | pushclosing("}")
112 | elif tag == "h2":
113 | outfile.write("\\subsection*{")
114 | pushclosing("}")
115 | elif tag == "h3":
116 | outfile.write("\\subsubsection*{")
117 | pushclosing("}")
118 | elif tag == "p" or tag == "br":
119 | outfile.write("\n~\n\n\\noindent ")
120 | pushclosing("\n")
121 | else:
122 | pushclosing("")
123 |
124 | def endtag():
125 | global outfile
126 | global logfile
127 | global closetags
128 | if closetags == []:
129 | raise IndexError
130 | tag = closetags[0]
131 | closetags = closetags[1:]
132 | logfile.write("TAG -\n")
133 | outfile.write(tag)
134 |
135 | def initialize():
136 | global outfile
137 | global logfile
138 | global output_latex_filename
139 | outfile = open(output_latex_filename + ".tex",'w+')
140 | logfile = open(output_latex_filename + ".taglog",'w+')
141 | outfile.write("""
142 | \\documentclass{article}
143 | \\usepackage{fullpage}
144 | \\usepackage{hyperref}
145 | \\hypersetup{
146 | colorlinks,%
147 | citecolor=blue,%
148 | filecolor=blue,%
149 | linkcolor=blue,%
150 | urlcolor=blue
151 | }
152 | \\usepackage{graphicx}
153 | \\usepackage{color}
154 | \\usepackage{url}
155 | \\usepackage{geometry}
156 | \\pagestyle{empty}
157 | \\begin{document}
158 | \\mag 1440
159 | """)
160 |
161 | def finalize():
162 | global outfile
163 | global logfile
164 | logfile.close()
165 | outfile.write("""
166 | \\end{document}
167 | """)
168 | #print "Writing TEX Output: " + output_latex_filename + ".tex"
169 | outfile.close()
170 | #print "Rendering PDF Graphics: " + output_latex_filename + ".pdf"
171 | cmd = "pdflatex " + output_latex_filename + ".tex > /dev/null < /dev/null"
172 | subprocess.call(cmd,shell=True)
173 | #print "Rendering PNG Graphics: " + output_latex_filename + ".png"
174 | cmd = "convert " + output_latex_filename + ".pdf " + \
175 | output_latex_filename + ".png"
176 | subprocess.call(cmd,shell=True)
177 |
178 |
179 |
--------------------------------------------------------------------------------
/src/programming_languages/ps6/graphics.py:
--------------------------------------------------------------------------------
1 | # Wes Weimer
2 | #
3 | # This allows students with minimal knowledge to produce pretty pictures of
4 | # HTML webpages.
5 | #
6 | import sys
7 | import subprocess
8 | import os
9 |
10 | # If you want the output filenames to be different (e.g., based on
11 | # environment variables), just change them here.
12 | output_latex_filename = "./student"
13 |
14 | # If you want to put the static images elsewhere, just change this.
15 | image_directory = "images/" # make "" for current directory
16 |
17 | # The example image output requires these packages:
18 | #
19 | # pdflatex (aka "pdftex")
20 | # imagemagick (for "convert")
21 | # ghostscript (called by "convert")
22 |
23 | outfile = None
24 | logfile = None
25 |
26 | import base64
27 | import json
28 | import sys
29 |
30 |
31 | def word(x):
32 | global outfile
33 | for i in range(len(x)):
34 | if x[i] == '_':
35 | outfile.write("\_")
36 | elif x[i] != '\\':
37 | outfile.write(x[i])
38 | outfile.write (" ")
39 |
40 | def warning(x):
41 | global outfile
42 | outfile.write("{\\color{red}{\\bf{" + x + "}}}")
43 |
44 | closetags = []
45 |
46 | def pushclosing(x):
47 | global closetags
48 | closetags = [x] + closetags
49 | def begintag(tag,args):
50 | global outfile
51 | global logfile
52 | tag = tag.lower()
53 | # need "IMG"
54 | logfile.write("TAG + " + tag + "\n")
55 | if tag == "a":
56 | if "href" in args:
57 | target = args["href"]
58 | outfile.write("\\href{" + target + "}{\underline{")
59 | pushclosing("}}")
60 | else:
61 | warning("invalid 'a' tag: no 'href' argument")
62 | pushclosing("")
63 | elif tag == "img":
64 | if "src" in args:
65 | target = args["src"]
66 | filename = image_directory + target
67 | if os.path.isfile(filename):
68 | if "height" in args and "width" in args:
69 | h = args["height"]
70 | w = args["width"]
71 | outfile.write("\\includegraphics[height=" + h + "px, width=" + w + "px]{" + filename + "}")
72 | pushclosing("")
73 | else:
74 | outfile.write("\\includegraphics{" + filename + "}")
75 | pushclosing("")
76 | else:
77 | warning("'img' " + target + " not found (predefined local images only, sorry)")
78 | pushclosing("")
79 | else:
80 | warning("invalid 'img' tag: no 'src' argument")
81 | pushclosing("")
82 | elif tag == "b" or tag == "strong":
83 | outfile.write("\\textbf{")
84 | pushclosing("}")
85 | elif tag == "ul":
86 | outfile.write("\\begin{itemize}")
87 | pushclosing("\\end{itemize}")
88 | elif tag == "ol":
89 | outfile.write("\\begin{enumerate}")
90 | pushclosing("\\end{enumerate}")
91 | elif tag == "li":
92 | outfile.write("\\item{")
93 | pushclosing("}")
94 | elif tag == "big":
95 | outfile.write("{\\Large ")
96 | pushclosing("}")
97 | elif tag == "tt" or tag == "code":
98 | outfile.write("{\\tt ")
99 | pushclosing("}")
100 | elif tag == "small":
101 | outfile.write("{\\footnotesize ")
102 | pushclosing("}")
103 | elif tag == "i" or tag == "em":
104 | outfile.write("\\emph{")
105 | pushclosing("}")
106 | elif tag == "hr":
107 | outfile.write("{\\begin{center} \\line(1,0){400} \\end{center}}")
108 | pushclosing("")
109 | elif tag == "h1":
110 | outfile.write("\\section*{")
111 | pushclosing("}")
112 | elif tag == "h2":
113 | outfile.write("\\subsection*{")
114 | pushclosing("}")
115 | elif tag == "h3":
116 | outfile.write("\\subsubsection*{")
117 | pushclosing("}")
118 | elif tag == "p" or tag == "br":
119 | outfile.write("\n~\n\n\\noindent ")
120 | pushclosing("\n")
121 | else:
122 | pushclosing("")
123 |
124 | def endtag():
125 | global outfile
126 | global logfile
127 | global closetags
128 | if closetags == []:
129 | raise IndexError
130 | tag = closetags[0]
131 | closetags = closetags[1:]
132 | logfile.write("TAG -\n")
133 | outfile.write(tag)
134 |
135 | def initialize():
136 | global outfile
137 | global logfile
138 | global output_latex_filename
139 | outfile = open(output_latex_filename + ".tex",'w+')
140 | logfile = open(output_latex_filename + ".taglog",'w+')
141 | outfile.write("""
142 | \\documentclass{article}
143 | \\usepackage{fullpage}
144 | \\usepackage{hyperref}
145 | \\hypersetup{
146 | colorlinks,%
147 | citecolor=blue,%
148 | filecolor=blue,%
149 | linkcolor=blue,%
150 | urlcolor=blue
151 | }
152 | \\usepackage{graphicx}
153 | \\usepackage{color}
154 | \\usepackage{url}
155 | \\usepackage{geometry}
156 | \\pagestyle{empty}
157 | \\begin{document}
158 | \\mag 1440
159 | """)
160 |
161 | def finalize():
162 | global outfile
163 | global logfile
164 | logfile.close()
165 | outfile.write("""
166 | \\end{document}
167 | """)
168 | #print "Writing TEX Output: " + output_latex_filename + ".tex"
169 | outfile.close()
170 | #print "Rendering PDF Graphics: " + output_latex_filename + ".pdf"
171 | cmd = "pdflatex " + output_latex_filename + ".tex > /dev/null < /dev/null"
172 | subprocess.call(cmd,shell=True)
173 | #print "Rendering PNG Graphics: " + output_latex_filename + ".png"
174 | cmd = "convert " + output_latex_filename + ".pdf " + \
175 | output_latex_filename + ".png"
176 | subprocess.call(cmd,shell=True)
177 |
178 |
--------------------------------------------------------------------------------
/[admn] commands.md:
--------------------------------------------------------------------------------
1 | ## Commands
2 |
3 | - `tshark` HTTP traffic on loopback port 8000:
4 |
5 | sudo tshark -i lo -V -T text -f "tcp port 8000" -d "tcp.port==8000,http" -R "http.request"
6 |
7 | - `ffmpeg` conversion for iPad:
8 |
9 | ffmpeg -i ${input_file} -acodec libfaac -ac 2 -ab 192k -s 1024x768 -vcodec libx264 -vprofile baseline -tune film -preset slower -b:v 1200k -f mp4 -threads 0 ${output_file}
10 |
11 | - sort `du -h` output
12 |
13 | du | sort -nr | cut -f2- | xargs du -hs
14 |
15 | - streaming tcpdump directly to a Windows Wireshark instance (untested)
16 |
17 | tcpdump -w - -v -i eth0 http | "c:\program files\wireshark\wireshark.exe" -k -i -"
18 |
19 | - Python profiling
20 |
21 | # Execute command with profiling
22 | python -u -m cProfile -o profile.stats script.py arg1 arg2 --hostname blah
23 |
24 | # Sort stats by total time spent in function
25 | python -c "import pstats; p = pstats.Stats(\"profile.stats\"); p.sort_stats('time').print_stats(20)"
26 |
27 | - How to run Wireshark on Mac OS X
28 |
29 | # Install XQuartz, install Wireshark.
30 | # Run XQuartz via Spotlight
31 | # Open a new Terminal, run:
32 |
33 | open /Applications/Wireshark.app/
34 |
35 | # A new xterm instance opens, but Wireshark isn't visible. In the xterm instance run:
36 |
37 | export DISPLAY:=0
38 |
39 | # Wireshark should now be visible.
40 |
41 | - Useful httrack one-liner to index a site and all 'near' non-HTML resources and all first-links away in useful directory structure.
42 |
43 | httrack http://www.cs.columbia.edu/~smaskey/CS6998/ -W -O "/Users/ai/websites/smaskey" --extended-parsing --mirrorlinks --structure=4 +*.pdf
44 |
45 | - Another httrack one liner.
46 |
47 | httrack "http://www.bradblock.com.s3-website-us-west-1.amazonaws.com/mll.html" --mirror-wizard --path "/home/ubuntu/websites" --near --structure=4 --sockets=1 --priority=7
48 |
49 | - Setting up SAMBA on RedHat
50 | - `yum install samba samba-client`
51 | - Replace `/etc/samba/smb.conf` contents with the following, replacing `${ip_address}` with local IP address:
52 |
53 | [global]
54 |
55 | workgroup = DCL
56 | local master = no
57 | preferred master = no
58 | server string = %L Samba %v
59 | interfaces = 127.0.0.1 ${ip_address}
60 | socket address = ${ip_address}
61 | log file = /var/log/samba/log.%m
62 | max log size = 50
63 | security = share
64 | passdb backend = tdbsam
65 | load printers = no
66 | cups options = raw
67 |
68 | [root]
69 |
70 | comment = Root Directory
71 | path = /
72 | read only = no
73 | writable = yes
74 | printable = no
75 | public = yes
76 | force user = root
77 |
78 | - Add a root user to SAMBA by executing
79 |
80 | smbpasswd -a root
81 |
82 | - Set SAMBA to load on startup by executing:
83 |
84 | chkconfig smb on && chkconfig nmb on
85 |
86 | - Enable SAMBA by executing:
87 |
88 | service smb start && service nmb start
89 |
90 | - Browse to `\\${hostname}`
91 |
92 | - Setting up IP connectivity on a fresh RedHat install
93 | - Edit `/etc/sysconfig/network-scripts/ifcfg-eth0` and make sure at least the following lines are present (adjust values as appropriate):
94 |
95 | DEVICE=eth0
96 | BOOTPROTO=none
97 | DNS1=172.19.1.83
98 | DNS2=172.18.10.55
99 | DOMAIN=datcon.co.uk
100 | GATEWAY=10.224.0.1
101 | IPADDR=10.224.104.2
102 | NETMASK=255.255.0.0
103 | ONBOOT=yes
104 | DEFROUTE=yes
105 |
106 | - Assign the IP address to the Ethernet interface:
107 |
108 | ip addr add 10.224.104.2/16 broadcast 10.224.255.255 gateway 10.224.0.1 dev eth0
109 |
110 | - Add a default IP route to the default gateway:
111 |
112 | route add default gw 10.224.0.1 eth0
113 |
114 | - Turn up the Ethernet interface:
115 |
116 | ifconfig eth0 up
117 |
118 | - Set IP connectivity to enable in startup:
119 |
120 | chkconfig network on
121 |
122 | - Install latest GCC on Mac OS X
123 |
124 | # Install homebrew.
125 | brew update
126 | brew tap homebrew/dupes
127 | brew install gcc --use-llvm --enable-all-languages --enable-profiled-build
128 |
129 | - To get GCC 4.7 working on Mac we have to force distutils to give up using `-Qunused-arguments`. It's painful, so we hack it real hard:
130 |
131 | import distutils.sysconfig
132 | for key in distutils.sysconfig._config_vars:
133 | if key in ['CONFIG_ARGS', 'PY_CFLAGS', 'CFLAGS']:
134 | distutils.sysconfig._config_vars[key] = distutils.sysconfig._config_vars[key].replace("-Qunused-arguments ", "")
135 |
136 | - GPG symmetric encryption and compression
137 |
138 | gpg --symmetric --cipher-algo "AES256" --digest-algo "SHA512" --compress-algo "BZIP2" --passphrase "password" -o test.svg.gpg test.svg
139 |
140 | - GPG symmetric decryption and decompression
141 |
142 | gpg --passphrase "password" test.svg.gpg
143 |
144 | - OpenSSL symmetric encryption and compression
145 |
146 | bzip2 --stdout -- test.svg | /usr/local/Cellar/openssl/1.0.1e/bin/openssl enc -aes-256-ctr -salt -k "password" > test.svg.openssl
147 |
148 | - OpenSSL symmetric decryption and decompression
149 |
150 | /usr/local/Cellar/openssl/1.0.1e/bin/openssl enc -aes-256-ctr -d -salt -k "password" -in test.svg.openssl | bunzip2 > test.svg.2
151 |
152 | - With OpenSSL 1.0.1e, AES-CBC-HMAC-SHA1 segfaults and AES-GCM always fails; they are not intended for command-line usage.
153 |
154 | - OpenSSL encrypt and HMAC simultaneously (`pigz` is a parallel gzip implementation)
155 |
156 | cat test.svg | pigz --best -c | /usr/local/Cellar/openssl/1.0.1e/bin/openssl enc -aes-256-ctr -salt -k "password" | tee test.svg.2 | /usr/local/Cellar/openssl/1.0.1e/bin/openssl sha256 -hmac "password"
157 |
158 | - - -
159 |
160 | ### CentOS initial setup
161 |
162 | - Install with "Software development workstation" selected.
163 | - Set up Solarized theme for gnome-terminal
164 |
165 | git clone git://github.com/sigurdga/gnome-terminal-colors-solarized.git
166 | cd gnome-terminal-colors-solarized
167 |
168 | # make it light
169 | ./solarize
170 |
171 | # make it dark
172 | ./solarize
173 |
174 | # then restart terminal
175 |
176 | - Set up "Oh My ZSH" with my custom theme
177 |
178 | curl -L https://github.com/robbyrussell/oh-my-zsh/raw/master/tools/install.sh | sh
179 |
180 |
181 | - Set up gvim
--------------------------------------------------------------------------------