├── .github └── workflows │ └── pythonapp.yml ├── .gitignore ├── DS-Python-Implementations ├── Basic │ ├── deque │ │ ├── Deque.py │ │ └── palchecker.py │ ├── node │ │ └── Node.py │ ├── queue │ │ ├── Queue.py │ │ └── hotPotato.py │ └── stack │ │ ├── Stack.py │ │ ├── divideBy2.py │ │ ├── divideByN.py │ │ ├── parChecker.py │ │ └── parsChecker.py ├── DP │ └── makeChange.py ├── Graph │ └── README.md ├── README.md ├── Recursion │ ├── sum.py │ ├── toStr.py │ ├── tree.py │ └── triangle.py ├── Search │ ├── binarySearch.py │ ├── hash.py │ ├── orderedSequentialSearch.py │ ├── search.py │ └── sequentialSearch.py ├── Sort │ ├── bubbleSort.py │ ├── insertSort.py │ ├── selectionSort.py │ └── shortBubbleSort.py └── Tree │ └── README.md ├── Data_Structure_with_Python ├── Basic │ ├── deque │ │ ├── Deque.py │ │ └── palchecker.py │ ├── node │ │ └── Node.py │ ├── queue │ │ ├── Queue.py │ │ └── hotPotato.py │ └── stack │ │ ├── Stack.py │ │ ├── divideBy2.py │ │ ├── divideByN.py │ │ ├── parChecker.py │ │ └── parsChecker.py ├── DP │ └── makeChange.py ├── Graph │ └── README.md ├── README.md ├── Recursion │ ├── sum.py │ ├── toStr.py │ ├── tree.py │ └── triangle.py ├── Search │ ├── binarySearch.py │ ├── hash.py │ ├── orderedSequentialSearch.py │ └── sequentialSearch.py ├── Sort │ ├── bubbleSort.py │ ├── insertSort.py │ ├── selectionSort.py │ └── shortBubbleSort.py └── Tree │ └── README.md ├── Deep-Learning-Specialization ├── Convolutional-Neural-Networks │ ├── CNN-Applications.py │ ├── CNN-Certificate.pdf │ ├── CNN-StepbyStep.py │ ├── __pycache__ │ │ └── cnn_utils.cpython-36.pyc │ ├── cnn_utils.py │ └── datasets │ │ ├── test_signs.h5 │ │ └── train_signs.h5 ├── Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-Optimization │ ├── Optimation │ │ ├── README.md │ │ ├── appendix.tex │ │ ├── arxiv.sty │ │ ├── image_normalization,py │ │ ├── optimization.tex │ │ ├── references.bib │ │ ├── run.sh │ │ ├── template.aux │ │ ├── template.bbl │ │ ├── template.blg │ │ ├── template.fdb_latexmk │ │ ├── template.fls │ │ ├── template.log │ │ ├── template.pdf │ │ └── template.tex │ └── README.md ├── NOTES │ └── 0318 │ │ ├── Course1.tex │ │ ├── Course2.tex │ │ ├── Course3.tex │ │ ├── Course4.tex │ │ ├── Course5.tex │ │ ├── DeepLearningSpecializatoin-Notes-0318-afternoon.pdf │ │ ├── README.md │ │ ├── appendix.tex │ │ ├── arxiv.sty │ │ ├── img │ │ ├── C1-Certificate.pdf │ │ ├── C2-Certificate.pdf │ │ ├── C3-Certificate.pdf │ │ ├── C4-Certificate.pdf │ │ ├── c1 │ │ │ ├── backprop_kiank.png │ │ │ ├── classification_kiank.png │ │ │ ├── final outline.png │ │ │ ├── grad_summary.png │ │ │ ├── logistic-regression.png │ │ │ └── nn-figure.png │ │ ├── c2 │ │ │ ├── RMSprop.png │ │ │ ├── batch_vs_mini_batch_cost.png │ │ │ ├── bias-variance.png │ │ │ ├── bn.png │ │ │ ├── early-stopping.png │ │ │ ├── ewa.png │ │ │ ├── gradient-checking.png │ │ │ └── human-level_performance.png │ │ ├── c4 │ │ │ ├── alexnet.png │ │ │ ├── anchor2.png │ │ │ ├── cgimage.png │ │ │ ├── cnn-examples.png │ │ │ ├── conv-block.png │ │ │ ├── fail2recognize.png │ │ │ ├── google-net.png │ │ │ ├── google-net2.png │ │ │ ├── identity-block.png │ │ │ ├── inception-naive.png │ │ │ ├── inception-reduced.png │ │ │ ├── inception_keras.png │ │ │ ├── lenet.png │ │ │ ├── nms.png │ │ │ ├── plain-vs-resnet.png │ │ │ ├── rcnn1.png │ │ │ ├── receptiveField.png │ │ │ ├── resblock.png │ │ │ ├── resnet-34.jpg │ │ │ ├── resnet.png │ │ │ ├── siamese.png │ │ │ ├── sliding-window.png │ │ │ ├── sliding-window2.png │ │ │ ├── sliding-window3.png │ │ │ ├── stagedG.png │ │ │ ├── style-cost.png │ │ │ ├── style-transfer.png │ │ │ ├── triplet-loss.png │ │ │ ├── vgg16.png │ │ │ ├── visualize-convnets.png │ │ │ ├── visualize-convnets2.png │ │ │ ├── yolo1.png │ │ │ ├── yolo2.png │ │ │ ├── yolo3.png │ │ │ ├── yolo4.png │ │ │ ├── yolo5.png │ │ │ └── yolo6.png │ │ └── c5 │ │ │ ├── diff-rnns.jpg │ │ │ ├── forward-rnn.png │ │ │ ├── language-model.png │ │ │ ├── name-enttity-task.png │ │ │ └── simple-rnn-notations.png │ │ ├── introduction.tex │ │ ├── references.bib │ │ ├── template.tex │ │ └── update.tex ├── Neural-Networks-and-Deep-Learning │ ├── Logistic_Regression_With_a_Neural_Network_mindset.py │ ├── NNDL-Certificate.pdf │ ├── README.md │ ├── __pycache__ │ │ └── lr_utils.cpython-36.pyc │ ├── datasets │ │ ├── test_catvnoncat.h5 │ │ └── train_catvnoncat.h5 │ ├── images │ │ ├── cat_in_iran.jpg │ │ └── gargouille.jpg │ └── lr_utils.py ├── README.md ├── Sequence-Models │ └── README.md └── Structuring-Machine-Learning-Projects │ └── README.md ├── Gaming-Under-Uncertainty ├── Game-10Point30 │ ├── 1player-dealer-exp.py │ ├── 1player-player-10P50-updated.py │ ├── 1player-player-10P50.py │ ├── README.md │ ├── cvpr.sty │ ├── cvpr_eso.sty │ ├── egbib.bib │ ├── eso-pic.sty │ ├── ieee.bst │ ├── main.aux │ ├── main.bbl │ ├── main.blg │ ├── main.fdb_latexmk │ ├── main.fls │ ├── main.log │ ├── main.pdf │ ├── main.tex │ ├── nplayers-exp.py │ ├── run.sh │ ├── two-five.py │ └── win-2players.py └── README.md ├── LeetCode ├── 0001.py ├── 0002.py ├── 0004.py ├── 0012.py ├── 0013.py ├── 0033.py ├── 0034.py ├── 0035.py ├── 0050.py ├── 0056.py ├── 0069.py ├── 0074.py ├── 0081.py ├── 0100-2.py ├── 0100.py ├── 0101.py ├── 0102.py ├── 0104.py ├── 0107.py ├── 0110-2.py ├── 0110.py ├── 0111-2.py ├── 0111.py ├── 0121-2.py ├── 0121.py ├── 0153.py ├── 0154.py ├── 0167.py ├── 0204.py ├── 0222-2.py ├── 0222.py ├── 0240-2.py ├── 0240.py ├── 0270.py ├── 0278.py ├── 0367.py ├── 0374.py ├── 0410.py ├── 0633.py ├── 0637.py ├── 0653.py ├── 0655.py ├── 0701.py ├── 0704.py ├── 0812.py ├── 0976.py ├── 0993-2.py ├── 0993.py └── 1103.py ├── MathematicalFundations ├── Conv Optimization Review (Cont'd).pdf ├── Conv Optimization Review.pdf ├── Gaussian Process.pdf ├── Hidden Markov Network.pdf ├── Linear Algebra Review.pdf ├── Multivariate Gaussian (Cont'd).pdf ├── Multivariate Gaussian.pdf └── Prob Review.pdf ├── README.md └── requirements.txt /.github/workflows/pythonapp.yml: -------------------------------------------------------------------------------- 1 | name: Python application 2 | 3 | on: [push] 4 | 5 | jobs: 6 | build: 7 | 8 | runs-on: ubuntu-latest 9 | 10 | steps: 11 | - uses: actions/checkout@v1 12 | - name: Set up Python 3.7 13 | uses: actions/setup-python@v1 14 | with: 15 | python-version: 3.7 16 | - name: Install dependencies 17 | run: | 18 | python -m pip install --upgrade pip 19 | pip install -r requirements.txt 20 | - name: Lint with flake8 21 | run: | 22 | pip install flake8 23 | # stop the build if there are Python syntax errors or undefined names 24 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 25 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 26 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 27 | - name: Test with pytest 28 | run: | 29 | pip install pytest 30 | pytest 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Object files 2 | *.o 3 | *.ko 4 | *.obj 5 | *.elf 6 | 7 | # Precompiled Headers 8 | *.gch 9 | *.pch 10 | 11 | # Libraries 12 | *.lib 13 | *.a 14 | *.la 15 | *.lo 16 | 17 | # Shared objects (inc. Windows DLLs) 18 | *.dll 19 | *.so 20 | *.so.* 21 | *.dylib 22 | 23 | # Executables 24 | *.exe 25 | *.out 26 | *.app 27 | *.i*86 28 | *.x86_64 29 | *.hex 30 | 31 | # Debug files 32 | *.dSYM/ 33 | *.su 34 | -------------------------------------------------------------------------------- /DS-Python-Implementations/Basic/deque/Deque.py: -------------------------------------------------------------------------------- 1 | class Deque: 2 | def __init__(self): 3 | self.items = [] 4 | 5 | def isEmpty(self): 6 | return self.items == [] 7 | 8 | def addFront(self, item): 9 | self.items.append(item) 10 | 11 | def addRear(self, item): 12 | self.items.append(0, item) 13 | 14 | def removeFront(self, item): 15 | return self.items.pop() 16 | 17 | def removeRear(self, item): 18 | return self.items.pop(0) 19 | 20 | def size(self): 21 | return len(self.items) -------------------------------------------------------------------------------- /DS-Python-Implementations/Basic/deque/palchecker.py: -------------------------------------------------------------------------------- 1 | from pythonds.basic.deque import Deque 2 | 3 | def palchecker(aString): 4 | d = Deque() 5 | 6 | for ch in aString: 7 | d.addRear(ch) 8 | 9 | sideEqual = True 10 | # print(sideEqual, d.size()) 11 | 12 | while sideEqual and d.size() > 1: 13 | first = d.removeFront() 14 | last = d.removeRear() 15 | # print(first) 16 | if first != last: 17 | sideEqual = False 18 | 19 | return sideEqual 20 | 21 | print(palchecker('abcdcba')) 22 | print(palchecker("adbsdfdsf")) -------------------------------------------------------------------------------- /DS-Python-Implementations/Basic/node/Node.py: -------------------------------------------------------------------------------- 1 | class Node: 2 | def __init__(self.initdata): 3 | self.data = initdata 4 | self.next = None 5 | 6 | def getData(self): 7 | return self.data 8 | 9 | def getNet(self): 10 | return self.next 11 | 12 | def setData(self.newdata): 13 | self.data = newdata 14 | 15 | def setNext(self, newnext): 16 | self.next = newnext 17 | 18 | # Continue from PG72 -------------------------------------------------------------------------------- /DS-Python-Implementations/Basic/queue/Queue.py: -------------------------------------------------------------------------------- 1 | class Queue: 2 | def __init__(self): 3 | self.items = [] 4 | 5 | def isEmpty(self): 6 | return self.items == [] 7 | 8 | def enqueue(self, item): 9 | self.items.insert(0, item) 10 | 11 | def dequeue(self): 12 | return self.items.pop() 13 | 14 | def size(self): 15 | return len(self.items) -------------------------------------------------------------------------------- /DS-Python-Implementations/Basic/queue/hotPotato.py: -------------------------------------------------------------------------------- 1 | # HotPotato Problem 2 | 3 | from pythonds.basic.queue import Queue 4 | 5 | def hotPotato(nameList, num): 6 | q = Queue() 7 | for name in nameList: 8 | q.enqueue(name) 9 | 10 | while q.size() > 1: 11 | for i in range(num): 12 | q.enqueue(q.dequeue()) 13 | 14 | q.dequeue() 15 | 16 | return q.dequeue() 17 | 18 | print(hotPotato(["Bill","David","Susan","Jane","Kent","Brad"],7)) -------------------------------------------------------------------------------- /DS-Python-Implementations/Basic/stack/Stack.py: -------------------------------------------------------------------------------- 1 | # LIFO Stack Class 2 | # Created by Kai Yi on 09/09/2019 3 | # Stack can also be loaded by pythonds 4 | 5 | class Stack(): 6 | def __init__(self): 7 | self.items = [] 8 | 9 | def isEmpty(self): 10 | return self.items == [] 11 | 12 | def push(self, item): 13 | self.items.append(item) 14 | 15 | def pop(self): 16 | return self.items.pop() 17 | 18 | def peek(self): 19 | return self.items[len(self.items)-1] 20 | 21 | def size(self): 22 | return len(self.items) -------------------------------------------------------------------------------- /DS-Python-Implementations/Basic/stack/divideBy2.py: -------------------------------------------------------------------------------- 1 | from pythonds.basic.stack import Stack 2 | 3 | def divideBy2(decimal): 4 | s = Stack() 5 | 6 | while decimal > 0: 7 | number = decimal % 2 8 | s.push(number) 9 | decimal = decimal // 2 10 | 11 | final = "" 12 | while not s.isEmpty(): 13 | final += str(s.pop()) 14 | 15 | return final 16 | 17 | print(divideBy2(15)) 18 | print(divideBy2(8)) 19 | -------------------------------------------------------------------------------- /DS-Python-Implementations/Basic/stack/divideByN.py: -------------------------------------------------------------------------------- 1 | # 10 to N (2-16) 2 | 3 | from pythonds.basic.stack import Stack 4 | 5 | def divideByN(decimal, base): 6 | 7 | digits = "0123456789ABCDEF" 8 | s = Stack() 9 | 10 | while decimal > 0: 11 | number = decimal % base 12 | s.push(number) 13 | decimal = decimal // base 14 | 15 | final = '' 16 | while not s.isEmpty(): 17 | final += digits[s.pop()] 18 | 19 | return final 20 | 21 | print(divideByN(25, 2)) 22 | print(divideByN(25, 16)) 23 | 24 | # Continue from PG45 of Python Data Structure 25 | -------------------------------------------------------------------------------- /DS-Python-Implementations/Basic/stack/parChecker.py: -------------------------------------------------------------------------------- 1 | # ParChecker, created by Kai Yi on 09/09/2019 2 | 3 | from pythonds.basic.stack import Stack 4 | 5 | def parChecker(symbolString): 6 | s = Stack() 7 | index = 0 8 | balanced = True 9 | while index < len(symbolString) and balanced: 10 | symbol = symbolString[index] 11 | if symbol == '(': 12 | s.push(symbol) 13 | else: 14 | if s.isEmpty(): 15 | balanced = False 16 | else: 17 | s.pop() 18 | index += 1 19 | 20 | if balanced and s.isEmpty(): 21 | return True 22 | else: 23 | return False 24 | 25 | print(parChecker('(((()))')) # False 26 | print(parChecker('((()))')) # True -------------------------------------------------------------------------------- /DS-Python-Implementations/Basic/stack/parsChecker.py: -------------------------------------------------------------------------------- 1 | # parsChecker created by Kai Yi on 09/09/2019 2 | 3 | from pythonds.basic.stack import Stack 4 | 5 | def parsChecker(symbolString): 6 | s = Stack() 7 | balanced = True 8 | index = 0 9 | while balanced and index < len(symbolString): 10 | symbol = symbolString[index] 11 | if symbol in '{[(': 12 | s.push(symbol) 13 | else: 14 | if s.isEmpty(): 15 | balanced = False 16 | else: 17 | top = s.pop() 18 | if not matches(top, symbol): 19 | balanced = False 20 | index += 1 21 | if balanced and s.isEmpty(): 22 | return True 23 | else: 24 | return False 25 | 26 | def matches(open, close): 27 | opens = '{[(' 28 | closes = '}])' 29 | return opens.index(open) == closes.index(close) 30 | 31 | print(parsChecker('{{{{]]}')) # False 32 | print(parsChecker('(){[]}')) # True -------------------------------------------------------------------------------- /DS-Python-Implementations/DP/makeChange.py: -------------------------------------------------------------------------------- 1 | def dpMakeChange(coinValueList, change, minCoins, coinsUsed): 2 | for cent in range(change+1): 3 | coinCount = cent 4 | newCoin = 1 5 | for j in [k for k in coinValueList if k <= cent]: 6 | if minCoins[cent - j] + 1 < coinCount: 7 | coinCount = minCoins[cent - j] + 1 8 | newCoin = j 9 | minCoins[cent] = coinCount 10 | coinsUsed[cent] = newCoin 11 | return minCoins[change] 12 | 13 | def printCoins(coinsUsed, change): 14 | coin = change 15 | while coin > 0: 16 | thisCoin = coinsUsed[coin] 17 | print(thisCoin) 18 | coin = coin - thisCoin 19 | 20 | def main(): 21 | amnt = 63 22 | clist = [1,5,10,21,25] 23 | coinsUsed = [0] * (amnt+1) 24 | coinCount = [0] * (amnt+1) 25 | 26 | print("Making change for {} requires".format(amnt)) 27 | print(dpMakeChange(clist, amnt, coinCount, coinsUsed), "coins") 28 | print("They are: ") 29 | printCoins(coinsUsed, amnt) 30 | print("The used list is as follows:") 31 | print(coinsUsed) 32 | 33 | main() -------------------------------------------------------------------------------- /DS-Python-Implementations/Graph/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/DS-Python-Implementations/Graph/README.md -------------------------------------------------------------------------------- /DS-Python-Implementations/README.md: -------------------------------------------------------------------------------- 1 | This is the review of data structure (concepts and usages) with python. Most of the information is based on [Problem Solving with Algorithms and Data Structures using Python](https://runestone.academy/runestone/books/published/pythonds/index.html) and the corresponding non-official [github](https://github.com/facert/python-data-structure-cn). 2 | 3 | For the master of graphs and trees needs more practice, I will append more LeetCode solution to help the understanding of them, for which I have not present anything in the corresponding repos so far. I may extend sth on my own. 4 | -------------------------------------------------------------------------------- /DS-Python-Implementations/Recursion/sum.py: -------------------------------------------------------------------------------- 1 | # For recursion, the most import part is to find recursive equation 2 | 3 | # Three laws of recursion 4 | # A recursive algorithm must have a base case. 5 | # A recursive algorithm must change its state and move toward the base case. 6 | # A recursive algorithm must call itself, recursively. 7 | 8 | def listSum(numList): 9 | if len(numList) == 1: 10 | return numList[0] 11 | else: 12 | return numList[0] + listSum(numList[1:]) 13 | 14 | print(listSum([1,2,3,4,5,6])) -------------------------------------------------------------------------------- /DS-Python-Implementations/Recursion/toStr.py: -------------------------------------------------------------------------------- 1 | def toStr(n, base): 2 | convertString = '0123456789ABCDEF' 3 | 4 | if n < base: 5 | return convertString[n] 6 | else: 7 | return toStr(n//base, base) + convertString[n%base] 8 | 9 | print(toStr(1453, 16)) -------------------------------------------------------------------------------- /DS-Python-Implementations/Recursion/tree.py: -------------------------------------------------------------------------------- 1 | import turtle 2 | 3 | def tree(branchLen,t): 4 | if branchLen > 5: 5 | t.forward(branchLen) 6 | t.right(20) 7 | tree(branchLen-15,t) 8 | t.left(40) 9 | tree(branchLen-15,t) 10 | t.right(20) 11 | t.backward(branchLen) 12 | 13 | def main(): 14 | t = turtle.Turtle() 15 | myWin = turtle.Screen() 16 | t.left(90) 17 | t.up() 18 | t.backward(100) 19 | t.down() 20 | t.color("green") 21 | tree(75,t) 22 | myWin.exitonclick() 23 | 24 | main() -------------------------------------------------------------------------------- /DS-Python-Implementations/Recursion/triangle.py: -------------------------------------------------------------------------------- 1 | import turtle 2 | 3 | def drawTriangle(points,color,myTurtle): 4 | myTurtle.fillcolor(color) 5 | myTurtle.up() 6 | myTurtle.goto(points[0][0],points[0][1]) 7 | myTurtle.down() 8 | myTurtle.begin_fill() 9 | myTurtle.goto(points[1][0],points[1][1]) 10 | myTurtle.goto(points[2][0],points[2][1]) 11 | myTurtle.goto(points[0][0],points[0][1]) 12 | myTurtle.end_fill() 13 | 14 | def getMid(p1,p2): 15 | return ( (p1[0]+p2[0]) / 2, (p1[1] + p2[1]) / 2) 16 | 17 | def sierpinski(points,degree,myTurtle): 18 | colormap = ['blue','red','green','white','yellow','violet','orange'] 19 | drawTriangle(points,colormap[degree],myTurtle) 20 | 21 | if degree > 0: 22 | sierpinski([points[0], getMid(points[0], points[1]), getMid(points[0], points[2])], degree-1, myTurtle) 23 | sierpinski([points[1], getMid(points[0], points[1]), getMid(points[1], points[2])], degree-1, myTurtle) 24 | sierpinski([points[2], getMid(points[2], points[1]), getMid(points[0], points[2])], degree-1, myTurtle) 25 | 26 | def main(): 27 | myTurtle = turtle.Turtle() 28 | myWin = turtle.Screen() 29 | myPoints = [[-100,-50],[0,100],[100,-50]] 30 | sierpinski(myPoints,3,myTurtle) 31 | myWin.exitonclick() 32 | main() 33 | 34 | # Continue from PG100, Turtle Maze -------------------------------------------------------------------------------- /DS-Python-Implementations/Search/binarySearch.py: -------------------------------------------------------------------------------- 1 | def binarySearch(aList, item): 2 | left = 0 3 | right = len(aList) - 1 4 | found = False 5 | 6 | while left <= right and not found: 7 | mid = (left + right) / 2 8 | if aList[mid] == item: 9 | found = True 10 | elif aList[mid] < item: 11 | left = mid + 1 12 | else: 13 | right = mid - 1 14 | 15 | return found 16 | 17 | testlist = [0, 1, 2, 8, 13, 17, 19, 32, 42] 18 | print(binarySearch(testlist, 3)) 19 | print(binarySearch(testlist, 13)) -------------------------------------------------------------------------------- /DS-Python-Implementations/Search/hash.py: -------------------------------------------------------------------------------- 1 | # O(1) time complexity -------------------------------------------------------------------------------- /DS-Python-Implementations/Search/orderedSequentialSearch.py: -------------------------------------------------------------------------------- 1 | def orderedSequentialSearch(aList, item): 2 | pos = 0 3 | found = False 4 | stop = False 5 | while pos < len(aList) and not found and not stop: 6 | if aList[pos] == item: 7 | found = True 8 | else: 9 | if aList[pos] > item: 10 | stop = True 11 | else: 12 | pos = pos + 1 13 | return found 14 | 15 | testlist = [0, 1, 2, 8, 13, 17, 19, 32, 42,] 16 | print(orderedSequentialSearch(testlist, 3)) 17 | print(orderedSequentialSearch(testlist, 13)) -------------------------------------------------------------------------------- /DS-Python-Implementations/Search/search.py: -------------------------------------------------------------------------------- 1 | # Sequential Search: Time O(n), Space O(1) 2 | def sequentialSearch(aList, target): 3 | for i in range(len(aList)): 4 | if aList[i] == target: 5 | return i 6 | return False 7 | 8 | # early break sequential search: Time O(n), Space O(1) 9 | def earlyBreakSequentialSearch(aList, target): 10 | for i in range(len(aList)): 11 | if aList[i] == target: 12 | return i 13 | elif aList[i] > target: 14 | return False 15 | return False 16 | 17 | # binary search: Time O(logn), Space O(1) 18 | def binarySearch(aList, target): 19 | left = 0 20 | right = len(aList) - 1 21 | while left <= right: 22 | mid = (left + right) >> 1 23 | if aList[mid] == target: 24 | return mid 25 | elif aList[mid] > target: 26 | right = mid - 1 27 | else: 28 | left = mid + 1 29 | return False 30 | 31 | testList = [1,2,3,4,5,6] 32 | print(sequentialSearch(testList, 3)) 33 | print(sequentialSearch(testList, 7)) 34 | print(earlyBreakSequentialSearch(testList, 3)) 35 | print(earlyBreakSequentialSearch(testList, 7)) 36 | print(binarySearch(testList, 3)) 37 | print(binarySearch(testList, 7)) -------------------------------------------------------------------------------- /DS-Python-Implementations/Search/sequentialSearch.py: -------------------------------------------------------------------------------- 1 | def sequentialSearch(aList, item): 2 | pos = 0 3 | found = False 4 | 5 | while pos < len(aList) and not found: 6 | if aList[pos] == item: 7 | found = True 8 | else: 9 | pos += 1 10 | 11 | return found 12 | 13 | testList = [1,2,3,4,5,6] 14 | print(sequentialSearch(testList, 3)) 15 | print(sequentialSearch(testList, 7)) -------------------------------------------------------------------------------- /DS-Python-Implementations/Sort/bubbleSort.py: -------------------------------------------------------------------------------- 1 | # bubbleSort created by Kai Yi on 09/09/2019 2 | 3 | def bubbleSort(aList): 4 | for i in range(len(aList)-1, 0, -1): 5 | for j in range(i): 6 | if aList[j+1] < aList[j]: 7 | aList[j+1], aList[j] = aList[j], aList[j+1] 8 | 9 | alist = [54,26,93,17,77,31,44,55,20] 10 | bubbleSort(alist) 11 | print(alist) -------------------------------------------------------------------------------- /DS-Python-Implementations/Sort/insertSort.py: -------------------------------------------------------------------------------- 1 | def insertionSort(aList): 2 | for i in range(1, len(aList)): 3 | value = aList[i] 4 | pointer = i 5 | 6 | while pointer > 0 and aList[pointer-1]>value: 7 | aList[pointer] = aList[pointer-1] 8 | pointer = pointer - 1 9 | 10 | aList[pointer] = value 11 | 12 | alist = [54,26,93,17,77,31,44,55,20] 13 | insertionSort(alist) 14 | print(alist) 15 | 16 | # Continue from PG133 -------------------------------------------------------------------------------- /DS-Python-Implementations/Sort/selectionSort.py: -------------------------------------------------------------------------------- 1 | def selectionSort(aList): 2 | for i in range(len(aList)-1, 0, -1): 3 | mmax = 0 4 | for j in range(1, i+1): 5 | if aList[mmax] < aList[j]: 6 | mmax = j 7 | aList[i], aList[mmax] = aList[mmax], aList[i] 8 | 9 | alist = [54,26,93,17,77,31,44,55,20] 10 | selectionSort(alist) 11 | print(alist) -------------------------------------------------------------------------------- /DS-Python-Implementations/Sort/shortBubbleSort.py: -------------------------------------------------------------------------------- 1 | # if sorted, then return. 2 | 3 | def shortBubbleSort(aList): 4 | exchange = True 5 | length = len(aList) - 1 6 | while length > 0 and exchange: 7 | exchange = False 8 | for i in range(length): 9 | if aList[i+1] < aList[i]: 10 | exchange = True 11 | aList[i], aList[i+1] = aList[i+1], aList[i] 12 | length = length - 1 13 | 14 | alist = [54,26,93,17,77,31,44,55,20] 15 | shortBubbleSort(alist) 16 | print(alist) -------------------------------------------------------------------------------- /DS-Python-Implementations/Tree/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/DS-Python-Implementations/Tree/README.md -------------------------------------------------------------------------------- /Data_Structure_with_Python/Basic/deque/Deque.py: -------------------------------------------------------------------------------- 1 | class Deque: 2 | def __init__(self): 3 | self.items = [] 4 | 5 | def isEmpty(self): 6 | return self.items == [] 7 | 8 | def addFront(self, item): 9 | self.items.append(item) 10 | 11 | def addRear(self, item): 12 | self.items.append(0, item) 13 | 14 | def removeFront(self, item): 15 | return self.items.pop() 16 | 17 | def removeRear(self, item): 18 | return self.items.pop(0) 19 | 20 | def size(self): 21 | return len(self.items) -------------------------------------------------------------------------------- /Data_Structure_with_Python/Basic/deque/palchecker.py: -------------------------------------------------------------------------------- 1 | from pythonds.basic.deque import Deque 2 | 3 | def palchecker(aString): 4 | d = Deque() 5 | 6 | for ch in aString: 7 | d.addRear(ch) 8 | 9 | sideEqual = True 10 | # print(sideEqual, d.size()) 11 | 12 | while sideEqual and d.size() > 1: 13 | first = d.removeFront() 14 | last = d.removeRear() 15 | # print(first) 16 | if first != last: 17 | sideEqual = False 18 | 19 | return sideEqual 20 | 21 | print(palchecker('abcdcba')) 22 | print(palchecker("adbsdfdsf")) -------------------------------------------------------------------------------- /Data_Structure_with_Python/Basic/node/Node.py: -------------------------------------------------------------------------------- 1 | class Node: 2 | def __init__(self.initdata): 3 | self.data = initdata 4 | self.next = None 5 | 6 | def getData(self): 7 | return self.data 8 | 9 | def getNet(self): 10 | return self.next 11 | 12 | def setData(self.newdata): 13 | self.data = newdata 14 | 15 | def setNext(self, newnext): 16 | self.next = newnext 17 | 18 | # Continue from PG72 -------------------------------------------------------------------------------- /Data_Structure_with_Python/Basic/queue/Queue.py: -------------------------------------------------------------------------------- 1 | class Queue: 2 | def __init__(self): 3 | self.items = [] 4 | 5 | def isEmpty(self): 6 | return self.items == [] 7 | 8 | def enqueue(self, item): 9 | self.items.insert(0, item) 10 | 11 | def dequeue(self): 12 | return self.items.pop() 13 | 14 | def size(self): 15 | return len(self.items) -------------------------------------------------------------------------------- /Data_Structure_with_Python/Basic/queue/hotPotato.py: -------------------------------------------------------------------------------- 1 | # HotPotato Problem 2 | 3 | from pythonds.basic.queue import Queue 4 | 5 | def hotPotato(nameList, num): 6 | q = Queue() 7 | for name in nameList: 8 | q.enqueue(name) 9 | 10 | while q.size() > 1: 11 | for i in range(num): 12 | q.enqueue(q.dequeue()) 13 | 14 | q.dequeue() 15 | 16 | return q.dequeue() 17 | 18 | print(hotPotato(["Bill","David","Susan","Jane","Kent","Brad"],7)) -------------------------------------------------------------------------------- /Data_Structure_with_Python/Basic/stack/Stack.py: -------------------------------------------------------------------------------- 1 | # LIFO Stack Class 2 | # Created by Kai Yi on 09/09/2019 3 | # Stack can also be loaded by pythonds 4 | 5 | class Stack(): 6 | def __init__(self): 7 | self.items = [] 8 | 9 | def isEmpty(self): 10 | return self.items == [] 11 | 12 | def push(self, item): 13 | self.items.append(item) 14 | 15 | def pop(self): 16 | return self.items.pop() 17 | 18 | def peek(self): 19 | return self.items[len(self.items)-1] 20 | 21 | def size(self): 22 | return len(self.items) -------------------------------------------------------------------------------- /Data_Structure_with_Python/Basic/stack/divideBy2.py: -------------------------------------------------------------------------------- 1 | from pythonds.basic.stack import Stack 2 | 3 | def divideBy2(decimal): 4 | s = Stack() 5 | 6 | while decimal > 0: 7 | number = decimal % 2 8 | s.push(number) 9 | decimal = decimal // 2 10 | 11 | final = "" 12 | while not s.isEmpty(): 13 | final += str(s.pop()) 14 | 15 | return final 16 | 17 | print(divideBy2(15)) 18 | print(divideBy2(8)) 19 | -------------------------------------------------------------------------------- /Data_Structure_with_Python/Basic/stack/divideByN.py: -------------------------------------------------------------------------------- 1 | # 10 to N (2-16) 2 | 3 | from pythonds.basic.stack import Stack 4 | 5 | def divideByN(decimal, base): 6 | 7 | digits = "0123456789ABCDEF" 8 | s = Stack() 9 | 10 | while decimal > 0: 11 | number = decimal % base 12 | s.push(number) 13 | decimal = decimal // base 14 | 15 | final = '' 16 | while not s.isEmpty(): 17 | final += digits[s.pop()] 18 | 19 | return final 20 | 21 | print(divideByN(25, 2)) 22 | print(divideByN(25, 16)) 23 | 24 | # Continue from PG45 of Python Data Structure 25 | -------------------------------------------------------------------------------- /Data_Structure_with_Python/Basic/stack/parChecker.py: -------------------------------------------------------------------------------- 1 | # ParChecker, created by Kai Yi on 09/09/2019 2 | 3 | from pythonds.basic.stack import Stack 4 | 5 | def parChecker(symbolString): 6 | s = Stack() 7 | index = 0 8 | balanced = True 9 | while index < len(symbolString) and balanced: 10 | symbol = symbolString[index] 11 | if symbol == '(': 12 | s.push(symbol) 13 | else: 14 | if s.isEmpty(): 15 | balanced = False 16 | else: 17 | s.pop() 18 | index += 1 19 | 20 | if balanced and s.isEmpty(): 21 | return True 22 | else: 23 | return False 24 | 25 | print(parChecker('(((()))')) # False 26 | print(parChecker('((()))')) # True -------------------------------------------------------------------------------- /Data_Structure_with_Python/Basic/stack/parsChecker.py: -------------------------------------------------------------------------------- 1 | # parsChecker created by Kai Yi on 09/09/2019 2 | 3 | from pythonds.basic.stack import Stack 4 | 5 | def parsChecker(symbolString): 6 | s = Stack() 7 | balanced = True 8 | index = 0 9 | while balanced and index < len(symbolString): 10 | symbol = symbolString[index] 11 | if symbol in '{[(': 12 | s.push(symbol) 13 | else: 14 | if s.isEmpty(): 15 | balanced = False 16 | else: 17 | top = s.pop() 18 | if not matches(top, symbol): 19 | balanced = False 20 | index += 1 21 | if balanced and s.isEmpty(): 22 | return True 23 | else: 24 | return False 25 | 26 | def matches(open, close): 27 | opens = '{[(' 28 | closes = '}])' 29 | return opens.index(open) == closes.index(close) 30 | 31 | print(parsChecker('{{{{]]}')) # False 32 | print(parsChecker('(){[]}')) # True -------------------------------------------------------------------------------- /Data_Structure_with_Python/DP/makeChange.py: -------------------------------------------------------------------------------- 1 | def dpMakeChange(coinValueList, change, minCoins, coinsUsed): 2 | for cent in range(change+1): 3 | coinCount = cent 4 | newCoin = 1 5 | for j in [k for k in coinValueList if k <= cent]: 6 | if minCoins[cent - j] + 1 < coinCount: 7 | coinCount = minCoins[cent - j] + 1 8 | newCoin = j 9 | minCoins[cent] = coinCount 10 | coinsUsed[cent] = newCoin 11 | return minCoins[change] 12 | 13 | def printCoins(coinsUsed, change): 14 | coin = change 15 | while coin > 0: 16 | thisCoin = coinsUsed[coin] 17 | print(thisCoin) 18 | coin = coin - thisCoin 19 | 20 | def main(): 21 | amnt = 63 22 | clist = [1,5,10,21,25] 23 | coinsUsed = [0] * (amnt+1) 24 | coinCount = [0] * (amnt+1) 25 | 26 | print("Making change for {} requires".format(amnt)) 27 | print(dpMakeChange(clist, amnt, coinCount, coinsUsed), "coins") 28 | print("They are: ") 29 | printCoins(coinsUsed, amnt) 30 | print("The used list is as follows:") 31 | print(coinsUsed) 32 | 33 | main() -------------------------------------------------------------------------------- /Data_Structure_with_Python/Graph/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Data_Structure_with_Python/Graph/README.md -------------------------------------------------------------------------------- /Data_Structure_with_Python/README.md: -------------------------------------------------------------------------------- 1 | This is the review of data structure (concepts and usages) with python. Most of the information is based on [Problem Solving with Algorithms and Data Structures using Python](https://runestone.academy/runestone/books/published/pythonds/index.html) and the corresponding non-official [github](https://github.com/facert/python-data-structure-cn). 2 | 3 | For the master of graphs and trees needs more practice, I will append more LeetCode solution to help the understanding of them, for which I have not present anything in the corresponding repos so far. I may extend sth on my own. 4 | -------------------------------------------------------------------------------- /Data_Structure_with_Python/Recursion/sum.py: -------------------------------------------------------------------------------- 1 | # For recursion, the most import part is to find recursive equation 2 | 3 | # Three laws of recursion 4 | # A recursive algorithm must have a base case. 5 | # A recursive algorithm must change its state and move toward the base case. 6 | # A recursive algorithm must call itself, recursively. 7 | 8 | def listSum(numList): 9 | if len(numList) == 1: 10 | return numList[0] 11 | else: 12 | return numList[0] + listSum(numList[1:]) 13 | 14 | print(listSum([1,2,3,4,5,6])) -------------------------------------------------------------------------------- /Data_Structure_with_Python/Recursion/toStr.py: -------------------------------------------------------------------------------- 1 | def toStr(n, base): 2 | convertString = '0123456789ABCDEF' 3 | 4 | if n < base: 5 | return convertString[n] 6 | else: 7 | return toStr(n//base, base) + convertString[n%base] 8 | 9 | print(toStr(1453, 16)) -------------------------------------------------------------------------------- /Data_Structure_with_Python/Recursion/tree.py: -------------------------------------------------------------------------------- 1 | import turtle 2 | 3 | def tree(branchLen,t): 4 | if branchLen > 5: 5 | t.forward(branchLen) 6 | t.right(20) 7 | tree(branchLen-15,t) 8 | t.left(40) 9 | tree(branchLen-15,t) 10 | t.right(20) 11 | t.backward(branchLen) 12 | 13 | def main(): 14 | t = turtle.Turtle() 15 | myWin = turtle.Screen() 16 | t.left(90) 17 | t.up() 18 | t.backward(100) 19 | t.down() 20 | t.color("green") 21 | tree(75,t) 22 | myWin.exitonclick() 23 | 24 | main() -------------------------------------------------------------------------------- /Data_Structure_with_Python/Recursion/triangle.py: -------------------------------------------------------------------------------- 1 | import turtle 2 | 3 | def drawTriangle(points,color,myTurtle): 4 | myTurtle.fillcolor(color) 5 | myTurtle.up() 6 | myTurtle.goto(points[0][0],points[0][1]) 7 | myTurtle.down() 8 | myTurtle.begin_fill() 9 | myTurtle.goto(points[1][0],points[1][1]) 10 | myTurtle.goto(points[2][0],points[2][1]) 11 | myTurtle.goto(points[0][0],points[0][1]) 12 | myTurtle.end_fill() 13 | 14 | def getMid(p1,p2): 15 | return ( (p1[0]+p2[0]) / 2, (p1[1] + p2[1]) / 2) 16 | 17 | def sierpinski(points,degree,myTurtle): 18 | colormap = ['blue','red','green','white','yellow','violet','orange'] 19 | drawTriangle(points,colormap[degree],myTurtle) 20 | 21 | if degree > 0: 22 | sierpinski([points[0], getMid(points[0], points[1]), getMid(points[0], points[2])], degree-1, myTurtle) 23 | sierpinski([points[1], getMid(points[0], points[1]), getMid(points[1], points[2])], degree-1, myTurtle) 24 | sierpinski([points[2], getMid(points[2], points[1]), getMid(points[0], points[2])], degree-1, myTurtle) 25 | 26 | def main(): 27 | myTurtle = turtle.Turtle() 28 | myWin = turtle.Screen() 29 | myPoints = [[-100,-50],[0,100],[100,-50]] 30 | sierpinski(myPoints,3,myTurtle) 31 | myWin.exitonclick() 32 | main() 33 | 34 | # Continue from PG100, Turtle Maze -------------------------------------------------------------------------------- /Data_Structure_with_Python/Search/binarySearch.py: -------------------------------------------------------------------------------- 1 | def binarySearch(aList, item): 2 | left = 0 3 | right = len(aList) - 1 4 | found = False 5 | 6 | while left <= right and not found: 7 | mid = (left + right) / 2 8 | if aList[mid] == item: 9 | found = True 10 | elif aList[mid] < item: 11 | left = mid + 1 12 | else: 13 | right = mid - 1 14 | 15 | return found 16 | 17 | testlist = [0, 1, 2, 8, 13, 17, 19, 32, 42] 18 | print(binarySearch(testlist, 3)) 19 | print(binarySearch(testlist, 13)) -------------------------------------------------------------------------------- /Data_Structure_with_Python/Search/hash.py: -------------------------------------------------------------------------------- 1 | # O(1) time complexity -------------------------------------------------------------------------------- /Data_Structure_with_Python/Search/orderedSequentialSearch.py: -------------------------------------------------------------------------------- 1 | def orderedSequentialSearch(aList, item): 2 | pos = 0 3 | found = False 4 | stop = False 5 | while pos < len(aList) and not found and not stop: 6 | if aList[pos] == item: 7 | found = True 8 | else: 9 | if aList[pos] > item: 10 | stop = True 11 | else: 12 | pos = pos + 1 13 | return found 14 | 15 | testlist = [0, 1, 2, 8, 13, 17, 19, 32, 42,] 16 | print(orderedSequentialSearch(testlist, 3)) 17 | print(orderedSequentialSearch(testlist, 13)) -------------------------------------------------------------------------------- /Data_Structure_with_Python/Search/sequentialSearch.py: -------------------------------------------------------------------------------- 1 | def sequentialSearch(aList, item): 2 | pos = 0 3 | found = False 4 | 5 | while pos < len(aList) and not found: 6 | if aList[pos] == item: 7 | found = True 8 | else: 9 | pos += 1 10 | 11 | return found 12 | 13 | testList = [1,2,3,4,5,6] 14 | print(sequentialSearch(testList, 3)) 15 | print(sequentialSearch(testList, 7)) -------------------------------------------------------------------------------- /Data_Structure_with_Python/Sort/bubbleSort.py: -------------------------------------------------------------------------------- 1 | # bubbleSort created by Kai Yi on 09/09/2019 2 | 3 | def bubbleSort(aList): 4 | for i in range(len(aList)-1, 0, -1): 5 | for j in range(i): 6 | if aList[j+1] < aList[j]: 7 | aList[j+1], aList[j] = aList[j], aList[j+1] 8 | 9 | alist = [54,26,93,17,77,31,44,55,20] 10 | bubbleSort(alist) 11 | print(alist) -------------------------------------------------------------------------------- /Data_Structure_with_Python/Sort/insertSort.py: -------------------------------------------------------------------------------- 1 | def insertionSort(aList): 2 | for i in range(1, len(aList)): 3 | value = aList[i] 4 | pointer = i 5 | 6 | while pointer > 0 and aList[pointer-1]>value: 7 | aList[pointer] = aList[pointer-1] 8 | pointer = pointer - 1 9 | 10 | aList[pointer] = value 11 | 12 | alist = [54,26,93,17,77,31,44,55,20] 13 | insertionSort(alist) 14 | print(alist) 15 | 16 | # Continue from PG133 -------------------------------------------------------------------------------- /Data_Structure_with_Python/Sort/selectionSort.py: -------------------------------------------------------------------------------- 1 | def selectionSort(aList): 2 | for i in range(len(aList)-1, 0, -1): 3 | mmax = 0 4 | for j in range(1, i+1): 5 | if aList[mmax] < aList[j]: 6 | mmax = j 7 | aList[i], aList[mmax] = aList[mmax], aList[i] 8 | 9 | alist = [54,26,93,17,77,31,44,55,20] 10 | selectionSort(alist) 11 | print(alist) -------------------------------------------------------------------------------- /Data_Structure_with_Python/Sort/shortBubbleSort.py: -------------------------------------------------------------------------------- 1 | # if sorted, then return. 2 | 3 | def shortBubbleSort(aList): 4 | exchange = True 5 | length = len(aList) - 1 6 | while length > 0 and exchange: 7 | exchange = False 8 | for i in range(length): 9 | if aList[i+1] < aList[i]: 10 | exchange = True 11 | aList[i], aList[i+1] = aList[i+1], aList[i] 12 | length = length - 1 13 | 14 | alist = [54,26,93,17,77,31,44,55,20] 15 | shortBubbleSort(alist) 16 | print(alist) -------------------------------------------------------------------------------- /Data_Structure_with_Python/Tree/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Data_Structure_with_Python/Tree/README.md -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Convolutional-Neural-Networks/CNN-Applications.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | import h5py 4 | import matplotlib.pyplot as plt 5 | from PIL import Image 6 | from scipy import ndimage 7 | import tensorflow as tf 8 | from tensorflow.python.framework import ops 9 | from cnn_utils import * 10 | 11 | np.random.seed(1) 12 | 13 | # Loading the data (signs) 14 | X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset() 15 | 16 | # Example of a picture 17 | index = 5 18 | plt.imshow(X_train_orig[index]) 19 | # print(Y_train_orig.shape, np.squeeze(Y_train_orig).shape) 20 | print('y = ' + str(np.squeeze(Y_train_orig)[index])) 21 | # print('y = ' + str(np.squeeze(Y_train_orig[:,index]))) 22 | plt.show() 23 | 24 | X_train = X_train_orig / 255. 25 | X_test = X_test_orig / 255. 26 | Y_train = convert_to_one_hot(Y_train_orig, 6).T 27 | Y_test = convert_to_one_hot(Y_test_orig, 6).T 28 | print ("number of training examples = " + str(X_train.shape[0])) 29 | print ("number of test examples = " + str(X_test.shape[0])) 30 | print ("X_train shape: " + str(X_train.shape)) 31 | print ("Y_train shape: " + str(Y_train.shape)) 32 | print ("X_test shape: " + str(X_test.shape)) 33 | print ("Y_test shape: " + str(Y_test.shape)) 34 | conv_layers = {} 35 | 36 | def create_placeholders(n_H0, n_W0, n_C0, n_y): 37 | ''' 38 | Creates the placeholders for the tensorflow session. 39 | 40 | Arguments: 41 | n_H0 -- scalar, height of an input image 42 | n_W0 -- scalar, width of an input image 43 | n_C0 -- scalar, number of channels of the input 44 | n_y -- scalar, number of classes 45 | 46 | Returns: 47 | X -- placeholder for the data input, of shape [None, n_H0, n_W0, n_C0] and dtype 'float' 48 | Y -- placeholder for the input labels, of shape [None, n_y] and dtype 'float' 49 | ''' 50 | 51 | X = tf.placeholder(shape=[None, n_H0, n_W0, n_C0], dtype=tf.float32) 52 | Y = tf.placeholder(shape=[None, n_y], dtype=tf.float32) 53 | 54 | return X, Y 55 | 56 | X, Y = create_placeholders(64, 64, 3, 6) 57 | print('X = {}'.format(X)) 58 | print('Y = {}'.format(Y)) 59 | 60 | # Continue from Initialize parameters. 61 | # https://walzqyuibvvdjprisbmedy.coursera-apps.org/notebooks/week1/Convolution_model_Application_v1a.ipynb#1.2---Initialize-parameters -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Convolutional-Neural-Networks/CNN-Certificate.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/Convolutional-Neural-Networks/CNN-Certificate.pdf -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Convolutional-Neural-Networks/CNN-StepbyStep.py: -------------------------------------------------------------------------------- 1 | ''' Implement convolutional neural network with CONV and POOL layer with numpy. 2 | Motivated by https://walzqyuibvvdjprisbmedy.coursera-apps.org/notebooks/week1/Convolution_model_Step_by_Step_v2a.ipynb 3 | Created by Kai Yi on March 1st, 2020 4 | ''' 5 | 6 | import numpy as np 7 | import matplotlib.pyplot as plt 8 | import h5py 9 | 10 | # matplotlib inline 11 | plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots 12 | plt.rcParams['image.interpolation'] = 'nearest' 13 | plt.rcParams['image.cmap'] = 'gray' 14 | 15 | np.random.seed(1) # keep all the random function calss consistent 16 | 17 | def zero_pad(X, pad): 18 | ''' 19 | Pad with zeros all images of the dataset X. 20 | The padding is applied to the height and width of an image other than the example number and channel. 21 | 22 | Argument: 23 | X -- python numpy array of shape (m, n_H, n_W, n_C) reprensenting a batch of m images 24 | pad -- integer, amount of padding around each image on vertical and horizontal dimensions. 25 | 26 | Returns: 27 | X_pad -- padded image of shape (m, n_H + 2*pad, n_W + 2*pad, n_C) 28 | ''' 29 | 30 | X_pad = np.pad(X, ((0,0), (pad,pad), (pad,pad), (0,0)), mode='constant', constant_values=(0,0)) 31 | # (pad, pad) := (pad before, pad after), identical padding 32 | 33 | return X_pad 34 | 35 | # test for zero_pad function 36 | np.random.seed(1) 37 | x = np.random.randn(4, 3, 3, 2) 38 | x_pad = zero_pad(x, 2) 39 | print('x.shape = \n', x.shape) 40 | print('x_pad.shape = \n', x_pad.shape) 41 | print('x[1,1] = \n', x[1,1]) 42 | print('x[1,1] = \n', x_pad[1,1]) 43 | 44 | fig, axarr = plt.subplots(1, 2) 45 | axarr[0].set_title('x') 46 | axarr[0].imshow(x[0,:,:,0], cmap='gray') 47 | axarr[1].set_title('x_pad') 48 | axarr[1].imshow(x_pad[0,:,:,0], cmap='gray') 49 | plt.show() 50 | 51 | def conv_single_step(a_slice_prev, W, b): 52 | ''' 53 | Apply one filter defined by parameters W on a single sclie (a_slice_prev) of the output activation of the previous layer. 54 | 55 | Argument: 56 | a_slice_prev -- slice of input data of shape (f,f,n_C_prev) 57 | W -- Weight parameters contained in a window - matrix of shape (f,f,n_C_prev) 58 | b -- Bias parameters contained in a window - matrix of shape (1,1,1) 59 | 60 | Returns: 61 | Z -- a scalar value, the result of convolving the sliding window (W,b) on a slice x of the input data 62 | ''' 63 | 64 | # Element-wise product between a_slice_prev and W. 65 | s = np.multiply(a_slice_prev, W) 66 | # Sum over all entries of the volume s. 67 | Z = np.sum(s) 68 | # Add bias b to Z. Cast b to a float() so than Z results in a scalar value. 69 | Z = Z + float(b) 70 | 71 | return Z 72 | 73 | # test for conv_single_step 74 | np.random.seed(1) 75 | a_slice_prev = np.random.randn(4,4,3) 76 | W = np.random.randn(4,4,3) 77 | b = np.random.randn(1,1,1) 78 | 79 | Z = conv_single_step(a_slice_prev, W, b) 80 | print('Z = ', Z) 81 | 82 | def conv_forward(A_prev, W, b, hparameters): 83 | ''' 84 | Implements the forward propagation for a convolution function. 85 | 86 | Arguments: 87 | A_prev -- Output activations of the previous layer, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev) 88 | W -- Weights, numpy array of shape (f, f, n_C_prev, n_C) 89 | b -- Biases, numpy array of shape (1, 1, 1, n_C) 90 | hparameters -- python dictionary containing 'stride' and 'pad' 91 | 92 | Returns: 93 | Z -- conv output, numpy array of shape (m, n_H, n_W, n_C) 94 | cache -- cache of values needed for the conv_backward() function 95 | ''' 96 | 97 | # Retrieve dimensions from A_prev's shape 98 | (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape 99 | # Retrive dimensions from W's shape 100 | (f, f, n_C_prev, n_C) = W.shape 101 | # Retrive information from ''hparameters'' 102 | stride, pad = hparameters['stride'], hparameters['pad'] 103 | 104 | # Compute the dimensions of the CONV output volume 105 | n_H = int((n_H_prev - f + 2*pad) / stride + 1) 106 | n_W = int((n_W_prev - f + 2*pad) / stride + 1) 107 | # Initialize the output volume Z with zeros 108 | Z = np.zeros([m, n_H, n_W, n_C]) 109 | # Create A_prev pad by padding A_prev 110 | A_prev_pad = zero_pad(A_prev, pad) 111 | 112 | for i in range(m): 113 | a_prev_pad = A_prev_pad[i] 114 | for h in range(n_H): 115 | vert_start = h * stride 116 | vert_end = vert_start + f 117 | for w in range(n_W): 118 | horiz_start = w * stride 119 | horiz_end = horiz_start + f 120 | for c in range(n_C): 121 | a_slice_prev = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] 122 | Z[i, h, w, c] = conv_single_step(a_slice_prev, W[:,:,:,c], b[:,:,:,c]) 123 | assert(Z.shape == (m, n_H, n_W, n_C)) 124 | 125 | cache = (A_prev, W, b, hparameters) 126 | 127 | return Z, cache 128 | 129 | # test for conv_forward 130 | np.random.seed(1) 131 | A_prev = np.random.randn(10,5,7,4) 132 | W = np.random.randn(3,3,4,8) 133 | b = np.random.randn(1,1,1,8) 134 | hparameters = {'pad' : 1, 135 | 'stride' : 2} 136 | Z, cache_conv = conv_forward(A_prev, W, b, hparameters) 137 | print("Z's mean =\n", np.mean(Z)) 138 | print("Z[3,2,1] =\n", Z[3,2,1]) 139 | print("cache_conv[0][1][2][3] =\n", cache_conv[0][1][2][3]) 140 | 141 | def pool_forward(A_prev, hparameters, mode='max'): 142 | ''' 143 | Implements the forward pass of the pooling layer 144 | 145 | Arguments: 146 | A_prev -- Input data, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev) 147 | hparameters -- python dictionary containing 'f' and 'stride' 148 | mode -- the pooling mode you would like to use, 'max' or 'average' 149 | 150 | Returns: 151 | A -- output of the pool layer, a numpy array of shape (m, n_H, n_W, n_C) 152 | cache -- cache used in the backward pass of the pooling layer, contains the input and hparameters 153 | ''' 154 | 155 | (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape 156 | f, stride = hparameters['f'], hparameters['stride'] 157 | 158 | n_H = int((n_H_prev - f) / stride + 1) 159 | n_W = int((n_W_prev - f) / stride + 1) 160 | n_C = n_C_prev 161 | 162 | A = np.zeros([m, n_H, n_W, n_C]) 163 | 164 | for i in range(m): 165 | for h in range(n_H): 166 | vert_start = h * stride 167 | vert_end = vert_start + f 168 | for w in range(n_W): 169 | horiz_start = w * stride 170 | horiz_end = horiz_start + f 171 | for c in range(n_C): 172 | a_slice_prev = A_prev[i, vert_start:vert_end, horiz_start:horiz_end, c] 173 | 174 | if mode == 'max': 175 | A[i, h, w, c] = np.max(a_slice_prev) 176 | elif mode == 'average': 177 | A[i, h, w, c] = np.mean(a_slice_prev) 178 | 179 | cache = (A_prev, hparameters) 180 | assert(A.shape == (m, n_H, n_W, n_C)) 181 | 182 | return A, cache 183 | 184 | # test for pool forward 185 | np.random.seed(1) 186 | A_prev = np.random.randn(2,5,5,3) 187 | hparameters = {'stride' : 1, 'f' : 3} 188 | 189 | A, cache = pool_forward(A_prev, hparameters) 190 | print("mode = max") 191 | print("A.shape = " + str(A.shape)) 192 | print("A =\n", A) 193 | print() 194 | A, cache = pool_forward(A_prev, hparameters, mode = "average") 195 | print("mode = average") 196 | print("A.shape = " + str(A.shape)) 197 | print("A =\n", A) -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Convolutional-Neural-Networks/__pycache__/cnn_utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/Convolutional-Neural-Networks/__pycache__/cnn_utils.cpython-36.pyc -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Convolutional-Neural-Networks/cnn_utils.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | import h5py 4 | import matplotlib.pyplot as plt 5 | import tensorflow as tf 6 | from tensorflow.python.framework import ops 7 | 8 | def load_dataset(): 9 | train_dataset = h5py.File('datasets/train_signs.h5', "r") 10 | train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features 11 | train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels 12 | 13 | test_dataset = h5py.File('datasets/test_signs.h5', "r") 14 | test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features 15 | test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels 16 | 17 | classes = np.array(test_dataset["list_classes"][:]) # the list of classes 18 | 19 | train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0])) 20 | test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0])) 21 | 22 | return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes 23 | 24 | 25 | def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0): 26 | """ 27 | Creates a list of random minibatches from (X, Y) 28 | 29 | Arguments: 30 | X -- input data, of shape (input size, number of examples) (m, Hi, Wi, Ci) 31 | Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples) (m, n_y) 32 | mini_batch_size - size of the mini-batches, integer 33 | seed -- this is only for the purpose of grading, so that you're "random minibatches are the same as ours. 34 | 35 | Returns: 36 | mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y) 37 | """ 38 | 39 | m = X.shape[0] # number of training examples 40 | mini_batches = [] 41 | np.random.seed(seed) 42 | 43 | # Step 1: Shuffle (X, Y) 44 | permutation = list(np.random.permutation(m)) 45 | shuffled_X = X[permutation,:,:,:] 46 | shuffled_Y = Y[permutation,:] 47 | 48 | # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case. 49 | num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning 50 | for k in range(0, num_complete_minibatches): 51 | mini_batch_X = shuffled_X[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:,:,:] 52 | mini_batch_Y = shuffled_Y[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:] 53 | mini_batch = (mini_batch_X, mini_batch_Y) 54 | mini_batches.append(mini_batch) 55 | 56 | # Handling the end case (last mini-batch < mini_batch_size) 57 | if m % mini_batch_size != 0: 58 | mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size : m,:,:,:] 59 | mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size : m,:] 60 | mini_batch = (mini_batch_X, mini_batch_Y) 61 | mini_batches.append(mini_batch) 62 | 63 | return mini_batches 64 | 65 | 66 | def convert_to_one_hot(Y, C): 67 | Y = np.eye(C)[Y.reshape(-1)].T 68 | return Y 69 | 70 | 71 | def forward_propagation_for_predict(X, parameters): 72 | """ 73 | Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX 74 | 75 | Arguments: 76 | X -- input dataset placeholder, of shape (input size, number of examples) 77 | parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3" 78 | the shapes are given in initialize_parameters 79 | 80 | Returns: 81 | Z3 -- the output of the last LINEAR unit 82 | """ 83 | 84 | # Retrieve the parameters from the dictionary "parameters" 85 | W1 = parameters['W1'] 86 | b1 = parameters['b1'] 87 | W2 = parameters['W2'] 88 | b2 = parameters['b2'] 89 | W3 = parameters['W3'] 90 | b3 = parameters['b3'] 91 | # Numpy Equivalents: 92 | Z1 = tf.add(tf.matmul(W1, X), b1) # Z1 = np.dot(W1, X) + b1 93 | A1 = tf.nn.relu(Z1) # A1 = relu(Z1) 94 | Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) + b2 95 | A2 = tf.nn.relu(Z2) # A2 = relu(Z2) 96 | Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,Z2) + b3 97 | 98 | return Z3 99 | 100 | def predict(X, parameters): 101 | 102 | W1 = tf.convert_to_tensor(parameters["W1"]) 103 | b1 = tf.convert_to_tensor(parameters["b1"]) 104 | W2 = tf.convert_to_tensor(parameters["W2"]) 105 | b2 = tf.convert_to_tensor(parameters["b2"]) 106 | W3 = tf.convert_to_tensor(parameters["W3"]) 107 | b3 = tf.convert_to_tensor(parameters["b3"]) 108 | 109 | params = {"W1": W1, 110 | "b1": b1, 111 | "W2": W2, 112 | "b2": b2, 113 | "W3": W3, 114 | "b3": b3} 115 | 116 | x = tf.placeholder("float", [12288, 1]) 117 | 118 | z3 = forward_propagation_for_predict(x, params) 119 | p = tf.argmax(z3) 120 | 121 | sess = tf.Session() 122 | prediction = sess.run(p, feed_dict = {x: X}) 123 | 124 | return prediction 125 | 126 | #def predict(X, parameters): 127 | # 128 | # W1 = tf.convert_to_tensor(parameters["W1"]) 129 | # b1 = tf.convert_to_tensor(parameters["b1"]) 130 | # W2 = tf.convert_to_tensor(parameters["W2"]) 131 | # b2 = tf.convert_to_tensor(parameters["b2"]) 132 | ## W3 = tf.convert_to_tensor(parameters["W3"]) 133 | ## b3 = tf.convert_to_tensor(parameters["b3"]) 134 | # 135 | ## params = {"W1": W1, 136 | ## "b1": b1, 137 | ## "W2": W2, 138 | ## "b2": b2, 139 | ## "W3": W3, 140 | ## "b3": b3} 141 | # 142 | # params = {"W1": W1, 143 | # "b1": b1, 144 | # "W2": W2, 145 | # "b2": b2} 146 | # 147 | # x = tf.placeholder("float", [12288, 1]) 148 | # 149 | # z3 = forward_propagation(x, params) 150 | # p = tf.argmax(z3) 151 | # 152 | # with tf.Session() as sess: 153 | # prediction = sess.run(p, feed_dict = {x: X}) 154 | # 155 | # return prediction -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Convolutional-Neural-Networks/datasets/test_signs.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/Convolutional-Neural-Networks/datasets/test_signs.h5 -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Convolutional-Neural-Networks/datasets/train_signs.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/Convolutional-Neural-Networks/datasets/train_signs.h5 -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-Optimization/Optimation/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Description: 3 | 4 | The project hosts an aesthetic an simple LaTeX style suitable for "preprint" publications such as arXiv and bio-arXiv, etc. 5 | It is based on the [**nips_2018.sty**](https://media.nips.cc/Conferences/NIPS2018/Styles/nips_2018.sty) style. 6 | 7 | This styling maintains the esthetic of NIPS but adding and changing features to make it (IMO) even better and nore suitable for preprints. 8 | The result looks fairly different from NIPS style so that readers won't get confused to think that the preprint was published in NIPS. 9 | 10 | ### Why NIPS? 11 | Because the NIPS styling is a comfortable single column format that is very esthetic and convenient for reading. 12 | 13 | ## Usage: 14 | 1. Use Document class **article**. 15 | 2. Copy **arxiv.sty** to the folder containing your tex file. 16 | 3. add `\usepackage{arxiv}` after `\documentclass{article}`. 17 | 4. The only packages used in the style file are **geometry** and **fancyheader**. Do not reimport them. 18 | 19 | See **template.tex** 20 | 21 | ## Project files: 22 | 1. **arxiv.sty** - the style file. 23 | 2. **template.tex** - a sample template that uses the **arxiv style**. 24 | 3. **references.bib** - the bibliography source file for template.tex. 25 | 4. **template.pdf** - a sample output of the template file that demonstrated the design provided by the arxiv style. 26 | 27 | 28 | ## Handling References when submitting to arXiv.org 29 | The most convenient way to manage references is using an external BibTeX file and pointing to it from the main file. 30 | However, this requires running the [bibtex](http://www.bibtex.org/) tool to "compile" the `.bib` file and create `.bbl` file containing "bibitems" that can be directly inserted in the main tex file. 31 | However, unfortunately the arXiv Tex environment ([Tex Live](https://www.tug.org/texlive/)) do not do that. 32 | So easiest way when submitting to arXiv is to create a single self-contained .tex file that contains the references. 33 | This can be done by running the BibTeX command on your machine and insert the content of the generated `.bbl` file into the `.tex` file and commenting out the `\bibliography{references}` that point to the external references file. 34 | 35 | Below are the commands that should be run in the project folder: 36 | 1. Run `$ latex template` 37 | 2. Run `$ bibtex template` 38 | 3. A `template.bbl` file will be generated (make sure it is there) 39 | 4. Copy the `template.bbl` file content to `template.tex` into the `\begin{thebibliography}` command. 40 | 5. Comment out the `\bibliography{references}` command in `template.tex`. 41 | 6. You ready to submit to arXiv.org. 42 | 43 | 44 | ## General Notes: 45 | 1. For help, comments, praises, bug reporting or change requests, you can contact the author at: kourgeorge/at/gmail.com. 46 | 2. You can use, redistribute and do whatever with this project, however, the author takes no responsibility on whatever usage of this project. 47 | 3. If you start another project based on this project, it would be nice to mention/link to this project. 48 | 4. You are very welcome to contribute to this project. 49 | 5. A good looking 2 column template can be found in https://github.com/brenhinkeller/preprint-template.tex. 50 | -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-Optimization/Optimation/appendix.tex: -------------------------------------------------------------------------------- 1 | \newpage 2 | \section*{Appendix} -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-Optimization/Optimation/image_normalization,py: -------------------------------------------------------------------------------- 1 | img_per_class = 400 2 | for i in range(0, images_train.shape[0], img_per_class): 3 | mean = images_train[i:i + img_per_class].mean( 4 | axis=(0, 1, 2), keepdims=True) 5 | std = images_train[i:i + img_per_class].std( 6 | axis=(0, 1, 2), keepdims=True) 7 | assert mean.shape==(1,1,1,d) and std.shape==(1,1,1,d) 8 | images_train[i:i + img_per_class] = ( 9 | images_train[i:i + img_per_class] - mean) / std 10 | images_test[i:i + img_per_class] = ( 11 | images_test[i:i + img_per_class] - mean) / std -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-Optimization/Optimation/optimization.tex: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-Optimization/Optimation/optimization.tex -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-Optimization/Optimation/references.bib: -------------------------------------------------------------------------------- 1 | @inproceedings{kour2014real, 2 | title={Real-time segmentation of on-line handwritten arabic script}, 3 | author={Kour, George and Saabne, Raid}, 4 | booktitle={Frontiers in Handwriting Recognition (ICFHR), 2014 14th International Conference on}, 5 | pages={417--422}, 6 | year={2014}, 7 | organization={IEEE} 8 | } 9 | 10 | @inproceedings{kour2014fast, 11 | title={Fast classification of handwritten on-line Arabic characters}, 12 | author={Kour, George and Saabne, Raid}, 13 | booktitle={Soft Computing and Pattern Recognition (SoCPaR), 2014 6th International Conference of}, 14 | pages={312--318}, 15 | year={2014}, 16 | organization={IEEE} 17 | } 18 | 19 | @article{hadash2018estimate, 20 | title={Estimate and Replace: A Novel Approach to Integrating Deep Neural Networks with Existing Applications}, 21 | author={Hadash, Guy and Kermany, Einat and Carmeli, Boaz and Lavi, Ofer and Kour, George and Jacovi, Alon}, 22 | journal={arXiv preprint arXiv:1804.09028}, 23 | year={2018} 24 | } 25 | @ARTICLE{py06nimg, 26 | author = {Paul A. Yushkevich and Joseph Piven and Cody Hazlett, Heather and 27 | Gimpel Smith, Rachel and Sean Ho and James C. Gee and Guido Gerig}, 28 | title = {User-Guided {3D} Active Contour Segmentation of 29 | Anatomical Structures: Significantly Improved Efficiency and Reliability}, 30 | journal = {Neuroimage}, 31 | year = {2006}, 32 | volume = {31}, 33 | number = {3}, 34 | pages = {1116--1128}, 35 | } 36 | @article{yaniv2018simpleitk, 37 | title={SimpleITK image-analysis notebooks: a collaborative environment for education and reproducible research}, 38 | author={Yaniv, Ziv and Lowekamp, Bradley C and Johnson, Hans J and Beare, Richard}, 39 | journal={Journal of digital imaging}, 40 | volume={31}, 41 | number={3}, 42 | pages={290--303}, 43 | year={2018}, 44 | publisher={Springer} 45 | } 46 | @inproceedings{cciccek20163d, 47 | title={3D U-Net: learning dense volumetric segmentation from sparse annotation}, 48 | author={{\c{C}}i{\c{c}}ek, {\"O}zg{\"u}n and Abdulkadir, Ahmed and Lienkamp, Soeren S and Brox, Thomas and Ronneberger, Olaf}, 49 | booktitle={International conference on medical image computing and computer-assisted intervention}, 50 | pages={424--432}, 51 | year={2016}, 52 | organization={Springer} 53 | } 54 | @inproceedings{yang2019disentangling, 55 | title={Disentangling latent hands for image synthesis and pose estimation}, 56 | author={Yang, Linlin and Yao, Angela}, 57 | booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, 58 | pages={9877--9886}, 59 | year={2019} 60 | } 61 | @inproceedings{hara3dcnns, 62 | author={Kensho Hara and Hirokatsu Kataoka and Yutaka Satoh}, 63 | title={Can Spatiotemporal 3D CNNs Retrace the History of 2D CNNs and ImageNet?}, 64 | booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, 65 | pages={6546--6555}, 66 | year={2018}, 67 | } 68 | @inproceedings{he2016deep, 69 | title={Deep residual learning for image recognition}, 70 | author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, 71 | booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, 72 | pages={770--778}, 73 | year={2016} 74 | } -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-Optimization/Optimation/run.sh: -------------------------------------------------------------------------------- 1 | xelatex template.tex 2 | bibtex template.aux 3 | xelatex template.tex 4 | xelatex template.tex 5 | evince template.pdf -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-Optimization/Optimation/template.aux: -------------------------------------------------------------------------------- 1 | \relax 2 | \providecommand\hyper@newdestlabel[2]{} 3 | \providecommand\HyperFirstAtBeginDocument{\AtBeginDocument} 4 | \HyperFirstAtBeginDocument{\ifx\hyper@anchor\@undefined 5 | \global\let\oldcontentsline\contentsline 6 | \gdef\contentsline#1#2#3#4{\oldcontentsline{#1}{#2}{#3}} 7 | \global\let\oldnewlabel\newlabel 8 | \gdef\newlabel#1#2{\newlabelxx{#1}#2} 9 | \gdef\newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}} 10 | \AtEndDocument{\ifx\hyper@anchor\@undefined 11 | \let\contentsline\oldcontentsline 12 | \let\newlabel\oldnewlabel 13 | \fi} 14 | \fi} 15 | \global\let\hyper@last\relax 16 | \gdef\HyperFirstAtBeginDocument#1{#1} 17 | \providecommand*\HyPL@Entry[1]{} 18 | \bibstyle{unsrt} 19 | \bibdata{references} 20 | \HyPL@Entry{0<>} 21 | -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-Optimization/Optimation/template.bbl: -------------------------------------------------------------------------------- 1 | \begin{thebibliography}{} 2 | 3 | \end{thebibliography} 4 | -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-Optimization/Optimation/template.blg: -------------------------------------------------------------------------------- 1 | This is BibTeX, Version 0.99d (TeX Live 2017/Debian) 2 | Capacity: max_strings=100000, hash_size=100000, hash_prime=85009 3 | The top-level auxiliary file: template.aux 4 | The style file: unsrt.bst 5 | I found no \citation commands---while reading file template.aux 6 | Database file #1: references.bib 7 | You've used 0 entries, 8 | 1791 wiz_defined-function locations, 9 | 445 strings with 3524 characters, 10 | and the built_in function-call counts, 18 in all, are: 11 | = -- 0 12 | > -- 0 13 | < -- 0 14 | + -- 0 15 | - -- 0 16 | * -- 2 17 | := -- 7 18 | add.period$ -- 0 19 | call.type$ -- 0 20 | change.case$ -- 0 21 | chr.to.int$ -- 0 22 | cite$ -- 0 23 | duplicate$ -- 0 24 | empty$ -- 1 25 | format.name$ -- 0 26 | if$ -- 1 27 | int.to.chr$ -- 0 28 | int.to.str$ -- 0 29 | missing$ -- 0 30 | newline$ -- 3 31 | num.names$ -- 0 32 | pop$ -- 0 33 | preamble$ -- 1 34 | purify$ -- 0 35 | quote$ -- 0 36 | skip$ -- 1 37 | stack$ -- 0 38 | substring$ -- 0 39 | swap$ -- 0 40 | text.length$ -- 0 41 | text.prefix$ -- 0 42 | top$ -- 0 43 | type$ -- 0 44 | warning$ -- 0 45 | while$ -- 0 46 | width$ -- 0 47 | write$ -- 2 48 | (There was 1 error message) 49 | -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-Optimization/Optimation/template.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-Optimization/Optimation/template.pdf -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-Optimization/Optimation/template.tex: -------------------------------------------------------------------------------- 1 | \documentclass{article} 2 | 3 | 4 | \usepackage{arxiv} 5 | 6 | \renewcommand{\contentsname}{目录} 7 | \renewcommand{\abstractname}{摘要} 8 | \renewcommand{\refname}{参考文献} 9 | \renewcommand{\figurename}{图} 10 | \renewcommand{\tablename}{表} 11 | \renewcommand{\appendixname}{附录} 12 | \renewcommand{\listfigurename}{图} 13 | \renewcommand{\listtablename}{表} 14 | 15 | \usepackage[utf8]{inputenc} % allow utf-8 input 16 | \usepackage[T1]{fontenc} % use 8-bit T1 fonts 17 | \usepackage{hyperref} % hyperlinks 18 | \usepackage{url} % simple URL typesetting 19 | \usepackage{booktabs} % professional-quality tables 20 | \usepackage{amsfonts} % blackboard math symbols 21 | \usepackage{nicefrac} % compact symbols for 1/2, etc. 22 | \usepackage{microtype} % microtypography 23 | \usepackage{lipsum} 24 | 25 | \usepackage{graphicx} 26 | 27 | \usepackage[justification=centering]{caption} 28 | \usepackage{subcaption} 29 | 30 | \usepackage{listings} 31 | \usepackage{xcolor} 32 | 33 | \usepackage{amsmath,amssymb} 34 | \DeclareMathOperator{\E}{\mathbb{E}} 35 | 36 | \renewcommand{\baselinestretch}{1.2} 37 | \usepackage{xeCJK} 38 | \usepackage{fontspec} 39 | \setCJKmainfont{SimSun} %或\setCJKmainfont{KaiTi} 40 | \setCJKmonofont{SimSun} 41 | \setmainfont{Times New Roman} 42 | 43 | \lstset{ 44 | numbers=left, 45 | numberstyle= \tiny, 46 | keywordstyle= \color{ blue!70}, 47 | commentstyle= \color{red!50!green!50!blue!50}, 48 | frame=shadowbox, 49 | rulesepcolor= \color{ red!20!green!20!blue!20} , 50 | escapeinside=``, 51 | xleftmargin=2em,xrightmargin=2em, aboveskip=1em, 52 | framexleftmargin=2em 53 | } 54 | 55 | \title{Improving Deep Neural Networks: Hyperparameter tuning, Regularization and Optimization\thanks{This paper is part of the weekly report from March 1nd to March 7th, 2020.}} 56 | 57 | 58 | \author{ 59 | Kai Yi\thanks{The author received his B.Eng with honor from Department of Software Engineering, Xi'an Jiaotong University in June 2019. His current research interests include cognition-based artificial intelligence, machine learning, computer vision and conputational psychology. His homepage is kaiyi.me. Now, he is planning to pursue PhD studies.}\\ 60 | % Department of Computer Science\\ 61 | % Cranberry-Lemon University\\ 62 | % Pittsburgh, PA 15213 \\ 63 | \texttt{williamyi96@gmail.com} \\} 64 | 65 | \begin{document} 66 | \maketitle 67 | 68 | \begin{abstract} 69 | \lipsum[1] 70 | \end{abstract} 71 | 72 | 73 | % keywords can be removed 74 | % \keywords{First keyword \and Second keyword \and More} 75 | 76 | 77 | \input{optimization.tex} 78 | 79 | % \input{experiments.tex} 80 | 81 | % \input{cnn-encoder.tex} 82 | 83 | \section*{Acknowledgements} 84 | \bibliographystyle{unsrt} 85 | \bibliography{references} %%% Remove comment to use the external .bib file (using bibtex). 86 | %%% and comment out the ``thebibliography'' section. 87 | 88 | \input{appendix.tex} 89 | 90 | \end{document} 91 | -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-Optimization/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-Optimization/README.md -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/DeepLearningSpecializatoin-Notes-0318-afternoon.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/DeepLearningSpecializatoin-Notes-0318-afternoon.pdf -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Description: 3 | 4 | The project hosts an aesthetic an simple LaTeX style suitable for "preprint" publications such as arXiv and bio-arXiv, etc. 5 | It is based on the [**nips_2018.sty**](https://media.nips.cc/Conferences/NIPS2018/Styles/nips_2018.sty) style. 6 | 7 | This styling maintains the esthetic of NIPS but adding and changing features to make it (IMO) even better and nore suitable for preprints. 8 | The result looks fairly different from NIPS style so that readers won't get confused to think that the preprint was published in NIPS. 9 | 10 | ### Why NIPS? 11 | Because the NIPS styling is a comfortable single column format that is very esthetic and convenient for reading. 12 | 13 | ## Usage: 14 | 1. Use Document class **article**. 15 | 2. Copy **arxiv.sty** to the folder containing your tex file. 16 | 3. add `\usepackage{arxiv}` after `\documentclass{article}`. 17 | 4. The only packages used in the style file are **geometry** and **fancyheader**. Do not reimport them. 18 | 19 | See **template.tex** 20 | 21 | ## Project files: 22 | 1. **arxiv.sty** - the style file. 23 | 2. **template.tex** - a sample template that uses the **arxiv style**. 24 | 3. **references.bib** - the bibliography source file for template.tex. 25 | 4. **template.pdf** - a sample output of the template file that demonstrated the design provided by the arxiv style. 26 | 27 | 28 | ## Handling References when submitting to arXiv.org 29 | The most convenient way to manage references is using an external BibTeX file and pointing to it from the main file. 30 | However, this requires running the [bibtex](http://www.bibtex.org/) tool to "compile" the `.bib` file and create `.bbl` file containing "bibitems" that can be directly inserted in the main tex file. 31 | However, unfortunately the arXiv Tex environment ([Tex Live](https://www.tug.org/texlive/)) do not do that. 32 | So easiest way when submitting to arXiv is to create a single self-contained .tex file that contains the references. 33 | This can be done by running the BibTeX command on your machine and insert the content of the generated `.bbl` file into the `.tex` file and commenting out the `\bibliography{references}` that point to the external references file. 34 | 35 | Below are the commands that should be run in the project folder: 36 | 1. Run `$ latex template` 37 | 2. Run `$ bibtex template` 38 | 3. A `template.bbl` file will be generated (make sure it is there) 39 | 4. Copy the `template.bbl` file content to `template.tex` into the `\begin{thebibliography}` command. 40 | 5. Comment out the `\bibliography{references}` command in `template.tex`. 41 | 6. You ready to submit to arXiv.org. 42 | 43 | 44 | ## General Notes: 45 | 1. For help, comments, praises, bug reporting or change requests, you can contact the author at: kourgeorge/at/gmail.com. 46 | 2. You can use, redistribute and do whatever with this project, however, the author takes no responsibility on whatever usage of this project. 47 | 3. If you start another project based on this project, it would be nice to mention/link to this project. 48 | 4. You are very welcome to contribute to this project. 49 | 5. A good looking 2 column template can be found in https://github.com/brenhinkeller/preprint-template.tex. 50 | -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/appendix.tex: -------------------------------------------------------------------------------- 1 | \newpage 2 | \section*{Appendix} 3 | % =========================================================================== 4 | 5 | \subsection*{Raw Images}\label{append1} 6 | 7 | \begin{figure}[!htbp] 8 | \begin{subfigure}{.47\textwidth} 9 | \centering 10 | % include first image 11 | \includegraphics[width=1.0\linewidth]{img/raw_imgs/raw0.png} 12 | % \caption{Put your sub-caption here} 13 | \label{fig:001} 14 | \end{subfigure} 15 | \begin{subfigure}{.47\textwidth} 16 | \centering 17 | % include second image 18 | \includegraphics[width=1.0\linewidth]{img/raw_imgs/raw1.png} 19 | % \caption{Put your sub-caption here} 20 | \label{fig:002} 21 | \end{subfigure} 22 | 23 | \end{figure} 24 | 25 | \begin{figure}[!htbp] 26 | 27 | \begin{subfigure}{.47\textwidth} 28 | \centering 29 | % include third image 30 | \includegraphics[width=1.0\linewidth]{img/raw_imgs/raw2.png} 31 | % \caption{Put your sub-caption here} 32 | \label{fig:003} 33 | \end{subfigure} 34 | \begin{subfigure}{.47\textwidth} 35 | \centering 36 | % include fourth image 37 | \includegraphics[width=1.0\linewidth]{img/raw_imgs/raw3.png} 38 | % \caption{Put your sub-caption here} 39 | \label{fig:004} 40 | \end{subfigure} 41 | 42 | \end{figure} 43 | 44 | % ===================================== 45 | \begin{figure}[!htbp] 46 | 47 | \begin{subfigure}{.47\textwidth} 48 | \centering 49 | % include first image 50 | \includegraphics[width=1.0\linewidth]{img/raw_imgs/raw4.png} 51 | % \caption{Put your sub-caption here} 52 | \label{fig:005} 53 | \end{subfigure} 54 | \begin{subfigure}{.47\textwidth} 55 | \centering 56 | % include second image 57 | \includegraphics[width=1.0\linewidth]{img/raw_imgs/raw5.png} 58 | % \caption{Put your sub-caption here} 59 | \label{fig:006} 60 | \end{subfigure} 61 | % \caption{Put your caption here} 62 | \label{005} 63 | \end{figure} 64 | 65 | \begin{figure}[!htbp] 66 | \begin{subfigure}{.47\textwidth} 67 | \centering 68 | % include first image 69 | \includegraphics[width=1.0\linewidth]{img/raw_imgs/raw4.png} 70 | % \caption{Put your sub-caption here} 71 | \label{fig:005} 72 | \end{subfigure} 73 | \begin{subfigure}{.47\textwidth} 74 | \centering 75 | % include second image 76 | \includegraphics[width=1.0\linewidth]{img/raw_imgs/raw5.png} 77 | % \caption{Put your sub-caption here} 78 | \label{fig:006} 79 | \end{subfigure} 80 | % \caption{Put your caption here} 81 | \label{005} 82 | \end{figure} 83 | 84 | \begin{figure}[!htbp] 85 | \begin{subfigure}{.47\textwidth} 86 | \centering 87 | % include first image 88 | \includegraphics[width=1.0\linewidth]{img/raw_imgs/raw6.png} 89 | % \caption{Put your sub-caption here} 90 | \label{fig:005} 91 | \end{subfigure} 92 | \begin{subfigure}{.47\textwidth} 93 | \centering 94 | % include second image 95 | \includegraphics[width=1.0\linewidth]{img/raw_imgs/raw7.png} 96 | % \caption{Put your sub-caption here} 97 | \label{fig:006} 98 | \end{subfigure} 99 | % \caption{Put your caption here} 100 | \label{005} 101 | \end{figure} 102 | 103 | \newpage 104 | % ===================================== 105 | \begin{figure}[!htbp] 106 | \begin{subfigure}{1.0\textwidth} 107 | \centering 108 | % include first image 109 | \includegraphics[width=1.0\linewidth]{img/raw_imgs/raw8.png} 110 | % \caption{Put your sub-caption here} 111 | \label{fig:005} 112 | \end{subfigure} 113 | % \caption{Put your caption here} 114 | \label{005} 115 | \end{figure} -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/arxiv.sty: -------------------------------------------------------------------------------- 1 | \NeedsTeXFormat{LaTeX2e} 2 | 3 | \ProcessOptions\relax 4 | 5 | % fonts 6 | \renewcommand{\rmdefault}{ptm} 7 | \renewcommand{\sfdefault}{phv} 8 | 9 | % set page geometry 10 | \usepackage[verbose=true,letterpaper]{geometry} 11 | \AtBeginDocument{ 12 | \newgeometry{ 13 | textheight=9in, 14 | textwidth=6.5in, 15 | top=1in, 16 | headheight=14pt, 17 | headsep=25pt, 18 | footskip=30pt 19 | } 20 | } 21 | 22 | \widowpenalty=10000 23 | \clubpenalty=10000 24 | \flushbottom 25 | \sloppy 26 | 27 | \usepackage{fancyhdr} 28 | \fancyhf{} 29 | \pagestyle{fancy} 30 | \renewcommand{\headrulewidth}{0pt} 31 | \fancyheadoffset{0pt} 32 | \rhead{\scshape A preprint - \today} 33 | \cfoot{\thepage} 34 | 35 | 36 | %Handling Keywords 37 | \def\keywordname{{\bfseries \emph Keywords}}% 38 | \def\keywords#1{\par\addvspace\medskipamount{\rightskip=0pt plus1cm 39 | \def\and{\ifhmode\unskip\nobreak\fi\ $\cdot$ 40 | }\noindent\keywordname\enspace\ignorespaces#1\par}} 41 | 42 | % font sizes with reduced leading 43 | \renewcommand{\normalsize}{% 44 | \@setfontsize\normalsize\@xpt\@xipt 45 | \abovedisplayskip 7\p@ \@plus 2\p@ \@minus 5\p@ 46 | \abovedisplayshortskip \z@ \@plus 3\p@ 47 | \belowdisplayskip \abovedisplayskip 48 | \belowdisplayshortskip 4\p@ \@plus 3\p@ \@minus 3\p@ 49 | } 50 | \normalsize 51 | \renewcommand{\small}{% 52 | \@setfontsize\small\@ixpt\@xpt 53 | \abovedisplayskip 6\p@ \@plus 1.5\p@ \@minus 4\p@ 54 | \abovedisplayshortskip \z@ \@plus 2\p@ 55 | \belowdisplayskip \abovedisplayskip 56 | \belowdisplayshortskip 3\p@ \@plus 2\p@ \@minus 2\p@ 57 | } 58 | \renewcommand{\footnotesize}{\@setfontsize\footnotesize\@ixpt\@xpt} 59 | \renewcommand{\scriptsize}{\@setfontsize\scriptsize\@viipt\@viiipt} 60 | \renewcommand{\tiny}{\@setfontsize\tiny\@vipt\@viipt} 61 | \renewcommand{\large}{\@setfontsize\large\@xiipt{14}} 62 | \renewcommand{\Large}{\@setfontsize\Large\@xivpt{16}} 63 | \renewcommand{\LARGE}{\@setfontsize\LARGE\@xviipt{20}} 64 | \renewcommand{\huge}{\@setfontsize\huge\@xxpt{23}} 65 | \renewcommand{\Huge}{\@setfontsize\Huge\@xxvpt{28}} 66 | 67 | % sections with less space 68 | \providecommand{\section}{} 69 | \renewcommand{\section}{% 70 | \@startsection{section}{1}{\z@}% 71 | {-2.0ex \@plus -0.5ex \@minus -0.2ex}% 72 | { 1.5ex \@plus 0.3ex \@minus 0.2ex}% 73 | {\large\bf\raggedright}% 74 | } 75 | \providecommand{\subsection}{} 76 | \renewcommand{\subsection}{% 77 | \@startsection{subsection}{2}{\z@}% 78 | {-1.8ex \@plus -0.5ex \@minus -0.2ex}% 79 | { 0.8ex \@plus 0.2ex}% 80 | {\normalsize\bf\raggedright}% 81 | } 82 | \providecommand{\subsubsection}{} 83 | \renewcommand{\subsubsection}{% 84 | \@startsection{subsubsection}{3}{\z@}% 85 | {-1.5ex \@plus -0.5ex \@minus -0.2ex}% 86 | { 0.5ex \@plus 0.2ex}% 87 | {\normalsize\bf\raggedright}% 88 | } 89 | \providecommand{\paragraph}{} 90 | \renewcommand{\paragraph}{% 91 | \@startsection{paragraph}{4}{\z@}% 92 | {1.5ex \@plus 0.5ex \@minus 0.2ex}% 93 | {-1em}% 94 | {\normalsize\bf}% 95 | } 96 | \providecommand{\subparagraph}{} 97 | \renewcommand{\subparagraph}{% 98 | \@startsection{subparagraph}{5}{\z@}% 99 | {1.5ex \@plus 0.5ex \@minus 0.2ex}% 100 | {-1em}% 101 | {\normalsize\bf}% 102 | } 103 | \providecommand{\subsubsubsection}{} 104 | \renewcommand{\subsubsubsection}{% 105 | \vskip5pt{\noindent\normalsize\rm\raggedright}% 106 | } 107 | 108 | % float placement 109 | \renewcommand{\topfraction }{0.85} 110 | \renewcommand{\bottomfraction }{0.4} 111 | \renewcommand{\textfraction }{0.1} 112 | \renewcommand{\floatpagefraction}{0.7} 113 | 114 | \newlength{\@abovecaptionskip}\setlength{\@abovecaptionskip}{7\p@} 115 | \newlength{\@belowcaptionskip}\setlength{\@belowcaptionskip}{\z@} 116 | 117 | \setlength{\abovecaptionskip}{\@abovecaptionskip} 118 | \setlength{\belowcaptionskip}{\@belowcaptionskip} 119 | 120 | % swap above/belowcaptionskip lengths for tables 121 | \renewenvironment{table} 122 | {\setlength{\abovecaptionskip}{\@belowcaptionskip}% 123 | \setlength{\belowcaptionskip}{\@abovecaptionskip}% 124 | \@float{table}} 125 | {\end@float} 126 | 127 | % footnote formatting 128 | \setlength{\footnotesep }{6.65\p@} 129 | \setlength{\skip\footins}{9\p@ \@plus 4\p@ \@minus 2\p@} 130 | \renewcommand{\footnoterule}{\kern-3\p@ \hrule width 12pc \kern 2.6\p@} 131 | \setcounter{footnote}{0} 132 | 133 | % paragraph formatting 134 | \setlength{\parindent}{\z@} 135 | \setlength{\parskip }{5.5\p@} 136 | 137 | % list formatting 138 | \setlength{\topsep }{4\p@ \@plus 1\p@ \@minus 2\p@} 139 | \setlength{\partopsep }{1\p@ \@plus 0.5\p@ \@minus 0.5\p@} 140 | \setlength{\itemsep }{2\p@ \@plus 1\p@ \@minus 0.5\p@} 141 | \setlength{\parsep }{2\p@ \@plus 1\p@ \@minus 0.5\p@} 142 | \setlength{\leftmargin }{3pc} 143 | \setlength{\leftmargini }{\leftmargin} 144 | \setlength{\leftmarginii }{2em} 145 | \setlength{\leftmarginiii}{1.5em} 146 | \setlength{\leftmarginiv }{1.0em} 147 | \setlength{\leftmarginv }{0.5em} 148 | \def\@listi {\leftmargin\leftmargini} 149 | \def\@listii {\leftmargin\leftmarginii 150 | \labelwidth\leftmarginii 151 | \advance\labelwidth-\labelsep 152 | \topsep 2\p@ \@plus 1\p@ \@minus 0.5\p@ 153 | \parsep 1\p@ \@plus 0.5\p@ \@minus 0.5\p@ 154 | \itemsep \parsep} 155 | \def\@listiii{\leftmargin\leftmarginiii 156 | \labelwidth\leftmarginiii 157 | \advance\labelwidth-\labelsep 158 | \topsep 1\p@ \@plus 0.5\p@ \@minus 0.5\p@ 159 | \parsep \z@ 160 | \partopsep 0.5\p@ \@plus 0\p@ \@minus 0.5\p@ 161 | \itemsep \topsep} 162 | \def\@listiv {\leftmargin\leftmarginiv 163 | \labelwidth\leftmarginiv 164 | \advance\labelwidth-\labelsep} 165 | \def\@listv {\leftmargin\leftmarginv 166 | \labelwidth\leftmarginv 167 | \advance\labelwidth-\labelsep} 168 | \def\@listvi {\leftmargin\leftmarginvi 169 | \labelwidth\leftmarginvi 170 | \advance\labelwidth-\labelsep} 171 | 172 | % create title 173 | \providecommand{\maketitle}{} 174 | \renewcommand{\maketitle}{% 175 | \par 176 | \begingroup 177 | \renewcommand{\thefootnote}{\fnsymbol{footnote}} 178 | % for perfect author name centering 179 | \renewcommand{\@makefnmark}{\hbox to \z@{$^{\@thefnmark}$\hss}} 180 | % The footnote-mark was overlapping the footnote-text, 181 | % added the following to fix this problem (MK) 182 | \long\def\@makefntext##1{% 183 | \parindent 1em\noindent 184 | \hbox to 1.8em{\hss $\m@th ^{\@thefnmark}$}##1 185 | } 186 | \thispagestyle{empty} 187 | \@maketitle 188 | \@thanks 189 | %\@notice 190 | \endgroup 191 | \let\maketitle\relax 192 | \let\thanks\relax 193 | } 194 | 195 | % rules for title box at top of first page 196 | \newcommand{\@toptitlebar}{ 197 | \hrule height 2\p@ 198 | \vskip 0.25in 199 | \vskip -\parskip% 200 | } 201 | \newcommand{\@bottomtitlebar}{ 202 | \vskip 0.29in 203 | \vskip -\parskip 204 | \hrule height 2\p@ 205 | \vskip 0.09in% 206 | } 207 | 208 | % create title (includes both anonymized and non-anonymized versions) 209 | \providecommand{\@maketitle}{} 210 | \renewcommand{\@maketitle}{% 211 | \vbox{% 212 | \hsize\textwidth 213 | \linewidth\hsize 214 | \vskip 0.1in 215 | \@toptitlebar 216 | \centering 217 | {\LARGE\sc \@title\par} 218 | \@bottomtitlebar 219 | \textsc{A Preprint}\\ 220 | \vskip 0.1in 221 | \def\And{% 222 | \end{tabular}\hfil\linebreak[0]\hfil% 223 | \begin{tabular}[t]{c}\bf\rule{\z@}{24\p@}\ignorespaces% 224 | } 225 | \def\AND{% 226 | \end{tabular}\hfil\linebreak[4]\hfil% 227 | \begin{tabular}[t]{c}\bf\rule{\z@}{24\p@}\ignorespaces% 228 | } 229 | \begin{tabular}[t]{c}\bf\rule{\z@}{24\p@}\@author\end{tabular}% 230 | \vskip 0.4in \@minus 0.1in \center{\today} \vskip 0.2in 231 | } 232 | } 233 | 234 | % add conference notice to bottom of first page 235 | \newcommand{\ftype@noticebox}{8} 236 | \newcommand{\@notice}{% 237 | % give a bit of extra room back to authors on first page 238 | \enlargethispage{2\baselineskip}% 239 | \@float{noticebox}[b]% 240 | \footnotesize\@noticestring% 241 | \end@float% 242 | } 243 | 244 | % abstract styling 245 | \renewenvironment{abstract} 246 | { 247 | \centerline 248 | {\large \bfseries \scshape Abstract} 249 | \begin{quote} 250 | } 251 | { 252 | \end{quote} 253 | } 254 | 255 | \endinput 256 | -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/C1-Certificate.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/C1-Certificate.pdf -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/C2-Certificate.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/C2-Certificate.pdf -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/C3-Certificate.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/C3-Certificate.pdf -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/C4-Certificate.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/C4-Certificate.pdf -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c1/backprop_kiank.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c1/backprop_kiank.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c1/classification_kiank.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c1/classification_kiank.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c1/final outline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c1/final outline.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c1/grad_summary.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c1/grad_summary.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c1/logistic-regression.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c1/logistic-regression.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c1/nn-figure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c1/nn-figure.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c2/RMSprop.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c2/RMSprop.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c2/batch_vs_mini_batch_cost.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c2/batch_vs_mini_batch_cost.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c2/bias-variance.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c2/bias-variance.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c2/bn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c2/bn.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c2/early-stopping.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c2/early-stopping.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c2/ewa.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c2/ewa.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c2/gradient-checking.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c2/gradient-checking.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c2/human-level_performance.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c2/human-level_performance.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/alexnet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/alexnet.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/anchor2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/anchor2.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/cgimage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/cgimage.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/cnn-examples.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/cnn-examples.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/conv-block.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/conv-block.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/fail2recognize.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/fail2recognize.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/google-net.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/google-net.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/google-net2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/google-net2.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/identity-block.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/identity-block.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/inception-naive.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/inception-naive.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/inception-reduced.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/inception-reduced.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/inception_keras.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/inception_keras.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/lenet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/lenet.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/nms.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/nms.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/plain-vs-resnet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/plain-vs-resnet.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/rcnn1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/rcnn1.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/receptiveField.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/receptiveField.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/resblock.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/resblock.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/resnet-34.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/resnet-34.jpg -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/resnet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/resnet.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/siamese.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/siamese.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/sliding-window.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/sliding-window.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/sliding-window2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/sliding-window2.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/sliding-window3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/sliding-window3.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/stagedG.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/stagedG.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/style-cost.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/style-cost.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/style-transfer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/style-transfer.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/triplet-loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/triplet-loss.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/vgg16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/vgg16.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/visualize-convnets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/visualize-convnets.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/visualize-convnets2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/visualize-convnets2.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/yolo1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/yolo1.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/yolo2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/yolo2.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/yolo3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/yolo3.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/yolo4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/yolo4.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/yolo5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/yolo5.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c4/yolo6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c4/yolo6.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c5/diff-rnns.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c5/diff-rnns.jpg -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c5/forward-rnn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c5/forward-rnn.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c5/language-model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c5/language-model.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c5/name-enttity-task.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c5/name-enttity-task.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/img/c5/simple-rnn-notations.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/NOTES/0318/img/c5/simple-rnn-notations.png -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/introduction.tex: -------------------------------------------------------------------------------- 1 | \section{Introduction} 2 | This section will mainly talk about our designed algorithm and how it works. 3 | 4 | % \subsection{Data Organization} 5 | 6 | % \subsection{Data Visualization} 7 | 8 | % \subsection{Data Importing} 9 | -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/references.bib: -------------------------------------------------------------------------------- 1 | @inproceedings{kour2014real, 2 | title={Real-time segmentation of on-line handwritten arabic script}, 3 | author={Kour, George and Saabne, Raid}, 4 | booktitle={Frontiers in Handwriting Recognition (ICFHR), 2014 14th International Conference on}, 5 | pages={417--422}, 6 | year={2014}, 7 | organization={IEEE} 8 | } 9 | 10 | @inproceedings{kour2014fast, 11 | title={Fast classification of handwritten on-line Arabic characters}, 12 | author={Kour, George and Saabne, Raid}, 13 | booktitle={Soft Computing and Pattern Recognition (SoCPaR), 2014 6th International Conference of}, 14 | pages={312--318}, 15 | year={2014}, 16 | organization={IEEE} 17 | } 18 | 19 | @article{hadash2018estimate, 20 | title={Estimate and Replace: A Novel Approach to Integrating Deep Neural Networks with Existing Applications}, 21 | author={Hadash, Guy and Kermany, Einat and Carmeli, Boaz and Lavi, Ofer and Kour, George and Jacovi, Alon}, 22 | journal={arXiv preprint arXiv:1804.09028}, 23 | year={2018} 24 | } 25 | 26 | @inproceedings{zeiler2014visualizing, 27 | title={Visualizing and understanding convolutional networks}, 28 | author={Zeiler, Matthew D and Fergus, Rob}, 29 | booktitle={European conference on computer vision}, 30 | pages={818--833}, 31 | year={2014}, 32 | organization={Springer} 33 | } 34 | 35 | @inproceedings{taigman2014deepface, 36 | title={Deepface: Closing the gap to human-level performance in face verification}, 37 | author={Taigman, Yaniv and Yang, Ming and Ranzato, Marc'Aurelio and Wolf, Lior}, 38 | booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, 39 | pages={1701--1708}, 40 | year={2014} 41 | } 42 | 43 | @inproceedings{schroff2015facenet, 44 | title={Facenet: A unified embedding for face recognition and clustering}, 45 | author={Schroff, Florian and Kalenichenko, Dmitry and Philbin, James}, 46 | booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, 47 | pages={815--823}, 48 | year={2015} 49 | } 50 | 51 | @inproceedings{ren2015faster, 52 | title={Faster r-cnn: Towards real-time object detection with region proposal networks}, 53 | author={Ren, Shaoqing and He, Kaiming and Girshick, Ross and Sun, Jian}, 54 | booktitle={Advances in neural information processing systems}, 55 | pages={91--99}, 56 | year={2015} 57 | } 58 | 59 | @inproceedings{he2017mask, 60 | title={Mask r-cnn}, 61 | author={He, Kaiming and Gkioxari, Georgia and Doll{\'a}r, Piotr and Girshick, Ross}, 62 | booktitle={Proceedings of the IEEE international conference on computer vision}, 63 | pages={2961--2969}, 64 | year={2017} 65 | } 66 | 67 | @inproceedings{girshick2015fast, 68 | title={Fast r-cnn}, 69 | author={Girshick, Ross}, 70 | booktitle={Proceedings of the IEEE international conference on computer vision}, 71 | pages={1440--1448}, 72 | year={2015} 73 | } 74 | 75 | @inproceedings{girshick2014rich, 76 | title={Rich feature hierarchies for accurate object detection and semantic segmentation}, 77 | author={Girshick, Ross and Donahue, Jeff and Darrell, Trevor and Malik, Jitendra}, 78 | booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, 79 | pages={580--587}, 80 | year={2014} 81 | } 82 | @inproceedings{redmon2016you, 83 | title={You only look once: Unified, real-time object detection}, 84 | author={Redmon, Joseph and Divvala, Santosh and Girshick, Ross and Farhadi, Ali}, 85 | booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, 86 | pages={779--788}, 87 | year={2016} 88 | } 89 | 90 | @inproceedings{szegedy2015going, 91 | title={Going deeper with convolutions}, 92 | author={Szegedy, Christian and Liu, Wei and Jia, Yangqing and Sermanet, Pierre and Reed, Scott and Anguelov, Dragomir and Erhan, Dumitru and Vanhoucke, Vincent and Rabinovich, Andrew}, 93 | booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, 94 | pages={1--9}, 95 | year={2015} 96 | } 97 | 98 | @article{lin2013network, 99 | title={Network in network}, 100 | author={Lin, Min and Chen, Qiang and Yan, Shuicheng}, 101 | journal={arXiv preprint arXiv:1312.4400}, 102 | year={2013} 103 | } 104 | @article{simonyan2014very, 105 | title={Very deep convolutional networks for large-scale image recognition}, 106 | author={Simonyan, Karen and Zisserman, Andrew}, 107 | journal={arXiv preprint arXiv:1409.1556}, 108 | year={2014} 109 | } 110 | 111 | @inproceedings{krizhevsky2012imagenet, 112 | title={Imagenet classification with deep convolutional neural networks}, 113 | author={Krizhevsky, Alex and Sutskever, Ilya and Hinton, Geoffrey E}, 114 | booktitle={Advances in neural information processing systems}, 115 | pages={1097--1105}, 116 | year={2012} 117 | } 118 | 119 | @article{lecun1998gradient, 120 | title={Gradient-based learning applied to document recognition}, 121 | author={LeCun, Yann and Bottou, L{\'e}on and Bengio, Yoshua and Haffner, Patrick}, 122 | journal={Proceedings of the IEEE}, 123 | volume={86}, 124 | number={11}, 125 | pages={2278--2324}, 126 | year={1998}, 127 | publisher={Ieee} 128 | } 129 | 130 | @inproceedings{koch2015siamese, 131 | title={Siamese neural networks for one-shot image recognition}, 132 | author={Koch, Gregory and Zemel, Richard and Salakhutdinov, Ruslan}, 133 | booktitle={ICML deep learning workshop}, 134 | volume={2}, 135 | year={2015}, 136 | organization={Lille} 137 | } 138 | 139 | @ARTICLE{py06nimg, 140 | author = {Paul A. Yushkevich and Joseph Piven and Cody Hazlett, Heather and 141 | Gimpel Smith, Rachel and Sean Ho and James C. Gee and Guido Gerig}, 142 | title = {User-Guided {3D} Active Contour Segmentation of 143 | Anatomical Structures: Significantly Improved Efficiency and Reliability}, 144 | journal = {Neuroimage}, 145 | year = {2006}, 146 | volume = {31}, 147 | number = {3}, 148 | pages = {1116--1128}, 149 | } 150 | @article{yaniv2018simpleitk, 151 | title={SimpleITK image-analysis notebooks: a collaborative environment for education and reproducible research}, 152 | author={Yaniv, Ziv and Lowekamp, Bradley C and Johnson, Hans J and Beare, Richard}, 153 | journal={Journal of digital imaging}, 154 | volume={31}, 155 | number={3}, 156 | pages={290--303}, 157 | year={2018}, 158 | publisher={Springer} 159 | } 160 | @inproceedings{cciccek20163d, 161 | title={3D U-Net: learning dense volumetric segmentation from sparse annotation}, 162 | author={{\c{C}}i{\c{c}}ek, {\"O}zg{\"u}n and Abdulkadir, Ahmed and Lienkamp, Soeren S and Brox, Thomas and Ronneberger, Olaf}, 163 | booktitle={International conference on medical image computing and computer-assisted intervention}, 164 | pages={424--432}, 165 | year={2016}, 166 | organization={Springer} 167 | } 168 | @inproceedings{yang2019disentangling, 169 | title={Disentangling latent hands for image synthesis and pose estimation}, 170 | author={Yang, Linlin and Yao, Angela}, 171 | booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, 172 | pages={9877--9886}, 173 | year={2019} 174 | } 175 | @inproceedings{hara3dcnns, 176 | author={Kensho Hara and Hirokatsu Kataoka and Yutaka Satoh}, 177 | title={Can Spatiotemporal 3D CNNs Retrace the History of 2D CNNs and ImageNet?}, 178 | booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, 179 | pages={6546--6555}, 180 | year={2018}, 181 | } 182 | @inproceedings{he2016deep, 183 | title={Deep residual learning for image recognition}, 184 | author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, 185 | booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, 186 | pages={770--778}, 187 | year={2016} 188 | } -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/template.tex: -------------------------------------------------------------------------------- 1 | \documentclass{article} 2 | 3 | 4 | \usepackage{arxiv} 5 | 6 | \usepackage[utf8]{inputenc} % allow utf-8 input 7 | \usepackage[T1]{fontenc} % use 8-bit T1 fonts 8 | \usepackage{hyperref} % hyperlinks 9 | \usepackage{url} % simple URL typesetting 10 | \usepackage{booktabs} % professional-quality tables 11 | \usepackage{amsfonts} % blackboard math symbols 12 | \usepackage{nicefrac} % compact symbols for 1/2, etc. 13 | \usepackage{microtype} % microtypography 14 | \usepackage{lipsum} 15 | 16 | \usepackage{hyperref} 17 | 18 | \usepackage{amsmath} 19 | 20 | \usepackage{graphicx} 21 | 22 | \usepackage[justification=centering]{caption} 23 | \usepackage{subcaption} 24 | 25 | \usepackage{listings} 26 | \usepackage{xcolor} 27 | \lstset{ 28 | numbers=left, 29 | numberstyle= \tiny, 30 | keywordstyle= \color{ blue!70}, 31 | commentstyle= \color{red!50!green!50!blue!50}, 32 | frame=shadowbox, 33 | rulesepcolor= \color{ red!20!green!20!blue!20} , 34 | escapeinside=``, 35 | xleftmargin=2em,xrightmargin=2em, aboveskip=1em, 36 | framexleftmargin=2em 37 | } 38 | 39 | \title{Deep Learning Specialization\thanks{This is the specialization at Coursera taught by Professor Andrew Ng.}} 40 | 41 | 42 | \author{ 43 | Kai Yi\thanks{The author received his B.Eng with honor from Department of Software Engineering, Xi'an Jiaotong University in June 2019. His current research interests include cognition-based artificial intelligence, machine learning, computer vision and conputational psychology. His homepage is kaiyi.me. Now, he is planning to pursue PhD studies.}\\ 44 | % Department of Computer Science\\ 45 | % Cranberry-Lemon University\\ 46 | % Pittsburgh, PA 15213 \\ 47 | \texttt{williamyi96@gmail.com} \\ 48 | 49 | } 50 | 51 | \begin{document} 52 | \maketitle 53 | 54 | \begin{abstract} 55 | % \lipsum[1] 56 | 57 | % \lipsum[2] 58 | 59 | % \lipsum[3] 60 | 61 | % \lipsum[4] 62 | This paper is the notes of Deep Learning Specialization at Coursera taught by Prof. Andrew Ng. There are five courses in this specialization. They are \textit{Neural Network and Deep Learning (NN Overview)}. \textit{Improving Deep Neural Networks : Hyperparameter tuning, Regularization and Optimization (Optimization)}, \textit{Structuring Machine Learning Projects (NN-Projects)}, \textit{Convolutional Neural Networks (CNNs)} and \textit{Sequence Model (SNNs)}, respectively. 63 | 64 | Course \textit{NN Overview} mainly talks about the foundations of deep learning. It includes the major technology trends driving deep learning, the building, training and applying of fully connected deep neural network, the vectorization technology improving network efficiency and the key parameters in a neural network's architecture. Course \textit{Optimization} represents the key rules to improve neural networks' performance. It includes the industry best-practices for building deep learning applications, and a variety of tricks about hyperparameter tuning, regularization and optimization. More information about the rest three courses will conclude here later. 65 | 66 | \end{abstract} 67 | 68 | \newpage 69 | \tableofcontents 70 | \newpage 71 | \listoffigures 72 | \newpage 73 | \listoftables 74 | \newpage 75 | 76 | % keywords can be removed 77 | % \keywords{First keyword \and Second keyword \and More} 78 | 79 | 80 | % \input{update.tex} 81 | % \input{introduction.tex} 82 | % \input{data-preprocessing} 83 | \input{Course1.tex} 84 | \input{Course2.tex} 85 | \input{Course3.tex} 86 | \input{Course4.tex} 87 | \input{Course5.tex} 88 | % \input{experiments.tex} 89 | 90 | % \input{cnn-encoder.tex} 91 | 92 | \section*{Acknowledgements} 93 | This note is heavily based on Prof. Andrew Ng's course at Coursera. Thanks Andrew for taking this wonderful course, I've learnt a lot from that. Besides, thanks Coursera for providing me full-funded financial aids to finish all the five courses in the specialization. Moreover, parts of the note are borrowed from Mahmoud Badry's github repository\footnote{https://github.com/mbadry1/DeepLearning.ai-Summary}. It gives me a clear panorama of what is the most important. 94 | 95 | \bibliographystyle{unsrt} 96 | \bibliography{references} %%% Remove comment to use the external .bib file (using bibtex). 97 | %%% and comment out the ``thebibliography'' section. 98 | 99 | % \input{appendix.tex} 100 | 101 | \end{document} 102 | -------------------------------------------------------------------------------- /Deep-Learning-Specialization/NOTES/0318/update.tex: -------------------------------------------------------------------------------- 1 | \section{Updates} 2 | \subsubsection*{March 1st to March 7th, 2020} 3 | 4 | Finished Items: 5 | 6 | \begin{itemize} 7 | \item Data Preprocessing (Data Visualization and Numpy Data Generation for Network Training). 8 | \item Experimented on single proteins of No. 31, 33, 35, 43, 72 and 73 with ZD10. Got the ELBO, Error and KL of every protein and visualized them at Appendix \ref{append1}. 9 | \item Visualized the reconstruct results and removing transforms results of every single protein (Appendix \ref{append2}). 10 | \item Experimented on multiple proteins (No. 31-33, No. 35-43, No. 72-73, No. 31-33-35-43-72-73) with ZD10. Got the ELBO, Error and KL of different combinations and visualized them at Appendix \ref{append4} 11 | \item Filled in the table of ELBO, Error and KL of reconstruction and removing transformations with ZD10 of multiple proteins (Table. \ref{construction-error1}, \ref{construction-error1}) and plot them at Fig. \ref{mfigure2}. 12 | \item Visualized the reconstruct results and removing transforms results of above-mentioned combinations (Appendix \ref{append3}) 13 | \item Implemented 3D U-Net as encoder (Fig. \ref{U-Net}), though not tested so far. 14 | \end{itemize} 15 | 16 | Issues Needed to Be Solved: 17 | 18 | \begin{itemize} 19 | \item Followed the rules at section \ref{image-normalization} and found the ELBO of all proteins were becoming exponentially large. 20 | \end{itemize} 21 | 22 | Conclusions: 23 | \begin{itemize} 24 | \item I've tested MLP encoder with 3 layers with ZD10 of different proteins and found there were no decrease in reconstruction loss and removing transformations loss. So maybe due to the limited input data and limited representative ability, there is no need to try on deeper MLP as encoder. 25 | \item ZD10 works well with every single protein. We need more sophisticated representation of input features when feeding multiple proteins as input. Comparative experiments proves that MLP encoder works better with ZD50 than with ZD10 when dealing with multiple proteins. 26 | \item At most cases, training with multiple proteins will not affect the accuracy. And the network tends to learn the combined pose distributions (\ref{append3}) 27 | \end{itemize} 28 | 29 | Future Works: 30 | \begin{itemize} 31 | \item Implement better designed 3D-UNet and 3D ResNet-18 with ZD50. 32 | \item Seek for a better implementation of image normalization that can satisfy our needs. 33 | \item Code refactoring and decoupling based on Yungeng's implementation of protein No. 72 with ZD10. 34 | \end{itemize} -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Neural-Networks-and-Deep-Learning/NNDL-Certificate.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/Neural-Networks-and-Deep-Learning/NNDL-Certificate.pdf -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Neural-Networks-and-Deep-Learning/README.md: -------------------------------------------------------------------------------- 1 | This is the first course of the Deep Learning Specialization at Coursera which is taught by Prof. Andrew Ng. I'll summarize the key points in my perspective for my further reference. 2 | 3 | Course I is very basic and contains a lot of equations. For this reason, I will refer to Latex. 4 | 5 | 6 | -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Neural-Networks-and-Deep-Learning/__pycache__/lr_utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/Neural-Networks-and-Deep-Learning/__pycache__/lr_utils.cpython-36.pyc -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Neural-Networks-and-Deep-Learning/datasets/test_catvnoncat.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/Neural-Networks-and-Deep-Learning/datasets/test_catvnoncat.h5 -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Neural-Networks-and-Deep-Learning/datasets/train_catvnoncat.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/Neural-Networks-and-Deep-Learning/datasets/train_catvnoncat.h5 -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Neural-Networks-and-Deep-Learning/images/cat_in_iran.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/Neural-Networks-and-Deep-Learning/images/cat_in_iran.jpg -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Neural-Networks-and-Deep-Learning/images/gargouille.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/Neural-Networks-and-Deep-Learning/images/gargouille.jpg -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Neural-Networks-and-Deep-Learning/lr_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import h5py 3 | 4 | def load_dataset(): 5 | train_dataset = h5py.File('datasets/train_catvnoncat.h5', 'r') 6 | train_set_x_orig = np.array(train_dataset['train_set_x'][:]) # train set features 7 | train_set_y_orig = np.array(train_dataset['train_set_y'][:]) # train set labels 8 | 9 | test_dataset = h5py.File('datasets/test_catvnoncat.h5', 'r') 10 | test_set_x_orig = np.array(test_dataset['test_set_x'][:]) # test set features 11 | test_set_y_orig = np.array(test_dataset['test_set_y'][:]) # test set labels 12 | 13 | classes = np.array(test_dataset['list_classes'][:]) # the list of classes 14 | 15 | train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0])) 16 | test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0])) 17 | 18 | return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes 19 | -------------------------------------------------------------------------------- /Deep-Learning-Specialization/README.md: -------------------------------------------------------------------------------- 1 | # Deep Learning Specialization 2 | Deep Learning Specialization contains several coures taught by Andrew Ng at deeplearning.ai. Here is the notes of my understanding and efforts while learning the coures in the specialization. I really have havested a lot after taking the lectures. Thanks Prof. Andrew Ng. He is a really excellent teacher and researcher. And also thanks coursera providing me with the financial aid. 3 | 4 | Overall, there are five courses in the Deep Learning Specialization, they are as follows: 5 | - [Neural Networks and Deep Learning](https://www.coursera.org/learn/neural-networks-deep-learning?specialization=deep-learning) 6 | - [Improving Deep Neural Networks: Hyperparameter tuning, Regularization and Optimization](https://www.coursera.org/learn/deep-neural-network?specialization=deep-learning) 7 | - [Structuring Machine Learning Projects](https://www.coursera.org/learn/machine-learning-projects?specialization=deep-learning) 8 | - [Convolutional Neural Networks](https://www.coursera.org/learn/convolutional-neural-networks?specialization=deep-learning) 9 | - [Sequence Models](https://www.coursera.org/learn/nlp-sequence-models) 10 | 11 | Next is the notes of all the courses in details. 12 | 13 | ## Neural Networks and Deep Learning 14 | Competence: Completed. Final Score: 97.8%. Finished on Feb. 13th, 2020 15 | ## Convolutional Neural Networks 16 | Competence: Completed. Final Score: 100%. Finished on Feb. 28th, 2020 17 | 18 | #### Week 1 Project I: [Convolutional Neural Network - Step by Step](https://gitee.com/kaiyi96/Machine-Learning/blob/master/Deep-Learning-Specialization/Convolutional-Neural-Networks/CNN-StepbyStep.py) 19 | This project mainly talks about implementing convolutional neural network with CONV and POOL layer with numpy. 20 | 21 | #### Week 1 Project II: [Convolutional Neural Network - Applications](https://gitee.com/kaiyi96/Machine-Learning/blob/master/Deep-Learning-Specialization/Convolutional-Neural-Networks/CNN-Applications.py) -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Sequence-Models/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/Sequence-Models/README.md -------------------------------------------------------------------------------- /Deep-Learning-Specialization/Structuring-Machine-Learning-Projects/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Deep-Learning-Specialization/Structuring-Machine-Learning-Projects/README.md -------------------------------------------------------------------------------- /Gaming-Under-Uncertainty/Game-10Point30/1player-dealer-exp.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import random 3 | import time 4 | import copy 5 | 6 | iteration_times = 10000 7 | check_times = 5 8 | 9 | pokers = [] 10 | for i in range(10): 11 | pokers += [i+1] * 4 12 | pokers += [0.5] * 14 13 | 14 | player_success = 0 15 | ini_time = time.time() 16 | success_rate_cal = 0 17 | dealer_exp_cal, player_exp_cal = 0, 0 18 | dealer_decision_point = 7.0 19 | player_decision_point = 7.5 20 | 21 | for j in range(check_times): 22 | dealer_win, player_win = 0, 0 23 | dealer_exp, player_exp = 0, 0 24 | 25 | for i in range(iteration_times): 26 | dealer_definite_win = False 27 | player_definite_win = False 28 | 29 | Flag = True 30 | 31 | cur_pokers = copy.deepcopy(pokers) 32 | dealer_points, player_points = 0, 0 33 | 34 | # dealer init with a poker 35 | rand_dealer = random.randint(0, len(cur_pokers)-1) 36 | dealer_points += pokers[rand_dealer] 37 | cur_pokers.remove(pokers[rand_dealer]) 38 | # player init with a poker 39 | rand_player = random.randint(0, len(cur_pokers)-1) 40 | player_points += pokers[rand_player] 41 | cur_pokers.remove(pokers[rand_player]) 42 | 43 | # dealer round 44 | for j in range(1, 5): 45 | # print("dealer point is {}".format(dealer_points)) 46 | # if j==4 or dealer_points==10.5: 47 | # dealer_definite_win = True 48 | # break 49 | if j==4 and dealer_points==10.5: 50 | dealer_definite_win = True 51 | dealer_exp += 2 52 | elif j==4 or dealer_points==10.5: 53 | dealer_definite_win = True 54 | dealer_exp += 1 55 | if dealer_points >= dealer_decision_point: 56 | break 57 | 58 | mrandom = random.randint(0, len(cur_pokers)-1) 59 | dealer_points += cur_pokers[mrandom] 60 | cur_pokers.remove(cur_pokers[mrandom]) 61 | 62 | # player round 63 | if not dealer_definite_win: 64 | for k in range(1, 5): 65 | # print("player point is {}".format(player_points)) 66 | if player_points >= 10.5: 67 | dealer_definite_win = True 68 | dealer_exp += 1 69 | break 70 | # if player_points <= 10.5 and k==4: 71 | # player_definite_win = True 72 | # break 73 | elif player_points==10.5 and k==4: 74 | player_definite_win = True 75 | player_exp += 4 76 | elif player_points==10.5 or k==4: 77 | player_definite_win = True 78 | player_exp += 2 79 | 80 | if player_points >= player_decision_point: 81 | break 82 | 83 | mrandom = random.randint(0, len(cur_pokers)-1) 84 | player_points += cur_pokers[mrandom] 85 | cur_pokers.remove(cur_pokers[mrandom]) 86 | 87 | # print(dealer_points, player_points) 88 | # judge winner 89 | if dealer_definite_win: 90 | dealer_win += 1 91 | elif player_definite_win: 92 | player_win += 1 93 | else: 94 | if player_points > 10.5: 95 | dealer_win += 1 96 | deal_exp += 1 97 | elif dealer_points > 10.5 and player_points < 10.5: 98 | player_win += 1 99 | player_exp += 1 100 | elif dealer_points < 10.5 and player_points < 10.5: 101 | if dealer_points >= player_points: 102 | dealer_win += 1 103 | dealer_exp += 1 104 | else: 105 | player_win += 1 106 | player_exp += 1 107 | 108 | success_rate_cal += float(dealer_win)/iteration_times 109 | dealer_exp_cal += float(dealer_exp)/iteration_times 110 | player_exp_cal += float(player_exp)/iteration_times 111 | print ('Success Rate is {}'.format(float(dealer_win)/iteration_times)) 112 | print ('Dealer Exp: {}, Player Exp: {}'.format( 113 | dealer_exp_cal, player_exp_cal 114 | )) 115 | 116 | fin_time = time.time() 117 | avg_rate = success_rate_cal/check_times 118 | avg_dealer_exp = dealer_exp_cal/check_times 119 | avg_player_exp = player_exp_cal/check_times 120 | print ('Average Success Rate is {}'.format(avg_rate)) 121 | print ('Average Dealer Exp is {}'.format(avg_dealer_exp)) 122 | print ('Average Player Exp is {}'.format(avg_player_exp)) 123 | print ('Running time is {} ms'.format(1000*(fin_time-ini_time))) -------------------------------------------------------------------------------- /Gaming-Under-Uncertainty/Game-10Point30/1player-player-10P50-updated.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sys 3 | import random 4 | import time 5 | import copy 6 | 7 | iteration_times = 10000 8 | check_times = 50 9 | 10 | pokers = [] 11 | for i in range(10): 12 | pokers += [i+1] * 4 13 | pokers += [0.5] * 14 14 | 15 | dealer = [0] * 5 16 | player = [0] * 5 17 | dealer_success = 0 18 | ini_time = time.time() 19 | success_rate_cal = 0 20 | 21 | for j in range(check_times): 22 | for i in range(iteration_times): 23 | cur_pokers = copy.deepcopy(pokers) 24 | dealer_points, player_points = 0, 0 25 | # dealer init with a poker 26 | rand_dealer = random.randint(0, len(cur_pokers)-1) 27 | dealer_points += cur_pokers[rand_dealer] 28 | cur_pokers.remove(cur_pokers[rand_dealer]) 29 | # player init with a poker 30 | rand_player = random.randint(0, len(cur_pokers)-1) 31 | player_points += cur_pokers[rand_player] 32 | cur_pokers.remove(cur_pokers[rand_player]) 33 | # print(rand_dealer, rand_player) 34 | 35 | # dealer win 36 | for j in range(1, 5): 37 | # print(len(cur_pokers)) 38 | mrandom = random.randint(0, len(cur_pokers)-1) 39 | # print('mrandom is {}'.format(mrandom)) 40 | dealer_points += cur_pokers[mrandom] 41 | cur_pokers.remove(cur_pokers[mrandom]) 42 | 43 | if dealer_points > 10.5: 44 | break 45 | if j==4 and dealer_points==10.5: 46 | dealer_success += 1 47 | success_rate_cal += float(dealer_success)/iteration_times 48 | print ('Success Rate is {}'.format(float(dealer_success)/iteration_times)) 49 | 50 | fin_time = time.time() 51 | avg_rate = float(dealer_success)/iteration_times/check_times 52 | print ('Average Success Rate is {}'.format(avg_rate)) 53 | print ('Running time is {} ms'.format(1000*(fin_time-ini_time))) 54 | 55 | 56 | -------------------------------------------------------------------------------- /Gaming-Under-Uncertainty/Game-10Point30/1player-player-10P50.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import random 3 | import time 4 | import copy 5 | 6 | iteration_times = 10000 7 | check_times = 50 8 | 9 | pokers = [] 10 | for i in range(10): 11 | pokers += [i+1] * 4 12 | pokers += [0.5] * 14 13 | 14 | player_success = 0 15 | ini_time = time.time() 16 | success_rate_cal = 0 17 | dealer_decision_point = 7.0 18 | 19 | for j in range(check_times): 20 | for i in range(iteration_times): 21 | Flag = True 22 | 23 | cur_pokers = copy.deepcopy(pokers) 24 | dealer_points, player_points = 0, 0 25 | 26 | # dealer init with a poker 27 | rand_dealer = random.randint(0, len(cur_pokers)-1) 28 | cur_pokers.remove(pokers[rand_dealer]) 29 | # player init with a poker 30 | rand_player = random.randint(0, len(cur_pokers)-1) 31 | cur_pokers.remove(pokers[rand_dealer]) 32 | 33 | # dealer not win first 34 | dealer_num, dealer_points = 0, 0 35 | for j in range(1, 5): 36 | if j==4 or dealer_points==10.5: 37 | Flag = False 38 | break 39 | if dealer_points >= dealer_decision_point: 40 | break 41 | 42 | mrandom = random.randint(0, len(cur_pokers)-1) 43 | dealer_points += cur_pokers[mrandom] 44 | cur_pokers.remove(cur_pokers[mrandom]) 45 | 46 | if Flag: 47 | for k in range(1, 5): 48 | if player_points > 10.5: 49 | break 50 | if k==4 and player_points==10.5: 51 | player_success += 1 52 | 53 | mrandom = random.randint(0, len(cur_pokers)-1) 54 | player_points += cur_pokers[mrandom] 55 | cur_pokers.remove(cur_pokers[mrandom]) 56 | 57 | 58 | success_rate_cal += float(player_success)/iteration_times 59 | print ('Success Rate is {}'.format(float(player_success)/iteration_times)) 60 | 61 | fin_time = time.time() 62 | avg_rate = float(player_success)/iteration_times/check_times 63 | print ('Average Success Rate is {}'.format(avg_rate)) 64 | print ('Running time is {} s'.format((fin_time-ini_time))) -------------------------------------------------------------------------------- /Gaming-Under-Uncertainty/Game-10Point30/README.md: -------------------------------------------------------------------------------- 1 | 该子项目主要讨论多人博弈“十点半”游戏的若干分析。其中主要对多人博弈庄、闲获胜的期望进行了分析(仅保留四位小数,N表示闲家数)。 2 | 3 | 得到如下表格(其中每组数据均经过了 50,000 次数学模拟,使用 Python3 实现): 4 | 5 | | N | Dealer | Player 1 | Player 2 | Player 3 | Player 4 | Player 5 | 6 | | --- | ------ | -------- | -------- | -------- | -------- | -------- | 7 | | 1 | 0.2074 | -0.2074 | - | - | - | - | 8 | | 2 | 0.5028 | -0.2510 | -0.2517 | - | - | - | 9 | | 3 | 0.8374 | -0.2803 | -0.2765 | -0.2806 | - | - | 10 | | 4 | 1.1851 | -0.2842 | -0.3016 | -0.2997 | -0.2996 | - | 11 | | 5 | 1.5206 | -0.2959 | -0.3031 | -0.3024 | -0.3047 | -0.3146 | 12 | 13 | 两个假设: 14 | 15 | 1. 庄家补牌或停牌决策点为 7,任一闲家决策点为7.5; 16 | 2. 每次结束一局均重新洗牌。 17 | 18 | 得到的结论: 19 | 20 | 1. 不同闲家数时各个闲家的数学期望基本相同来看,闲家获胜的数学期望与发排顺序无关; 21 | 2. 庄闲数学期望的对比来看,庄家的数学期望始终为正,闲家的数学期望始终为负,且闲家数量越多,庄家的数学期望越高。 22 | 23 | 更多详情参考[文章总结](https://gitee.com/kaiyi96/Machine-Learning/raw/master/Game-10Point30/main.pdf) 和 [实验源代码](https://gitee.com/kaiyi96/Machine-Learning/blob/master/Game-10Point30/nplayers-exp.py)。 24 | -------------------------------------------------------------------------------- /Gaming-Under-Uncertainty/Game-10Point30/cvpr_eso.sty: -------------------------------------------------------------------------------- 1 | %% 2 | %% This is file `everyshi.sty', 3 | %% generated with the docstrip utility. 4 | %% 5 | %% The original source files were: 6 | %% 7 | %% everyshi.dtx (with options: `package') 8 | %% 9 | %% Copyright (C) [1994..1999] by Martin Schroeder. All rights reserved. 10 | %% 11 | %% This file is part of the EveryShi package 12 | %% 13 | %% This program may be redistributed and/or modified under the terms 14 | %% of the LaTeX Project Public License, either version 1.0 of this 15 | %% license, or (at your option) any later version. 16 | %% The latest version of this license is in 17 | %% CTAN:macros/latex/base/lppl.txt. 18 | %% 19 | %% Happy users are requested to send me a postcard. :-) 20 | %% 21 | %% The EveryShi package contains these files: 22 | %% 23 | %% everyshi.asc 24 | %% everyshi.dtx 25 | %% everyshi.dvi 26 | %% everyshi.ins 27 | %% everyshi.bug 28 | %% 29 | %% Error Reports in case of UNCHANGED versions to 30 | %% 31 | %% Martin Schr"oder 32 | %% Cr"usemannallee 3 33 | %% D-28213 Bremen 34 | %% Martin.Schroeder@ACM.org 35 | %% 36 | %% File: everyshi.dtx Copyright (C) 2001 Martin Schr\"oder 37 | \NeedsTeXFormat{LaTeX2e} 38 | \ProvidesPackage{everyshi} 39 | [2001/05/15 v3.00 EveryShipout Package (MS)] 40 | %% \CharacterTable 41 | %% {Upper-case \A\B\C\D\E\F\G\H\I\J\K\L\M\N\O\P\Q\R\S\T\U\V\W\X\Y\Z 42 | %% Lower-case \a\b\c\d\e\f\g\h\i\j\k\l\m\n\o\p\q\r\s\t\u\v\w\x\y\z 43 | %% Digits \0\1\2\3\4\5\6\7\8\9 44 | %% Exclamation \! Double quote \" Hash (number) \# 45 | %% Dollar \$ Percent \% Ampersand \& 46 | %% Acute accent \' Left paren \( Right paren \) 47 | %% Asterisk \* Plus \+ Comma \, 48 | %% Minus \- Point \. Solidus \/ 49 | %% Colon \: Semicolon \; Less than \< 50 | %% Equals \= Greater than \> Question mark \? 51 | %% Commercial at \@ Left bracket \[ Backslash \\ 52 | %% Right bracket \] Circumflex \^ Underscore \_ 53 | %% Grave accent \` Left brace \{ Vertical bar \| 54 | %% Right brace \} Tilde \~} 55 | %% 56 | %% \iffalse meta-comment 57 | %% =================================================================== 58 | %% @LaTeX-package-file{ 59 | %% author = {Martin Schr\"oder}, 60 | %% version = "3.00", 61 | %% date = "15 May 2001", 62 | %% filename = "everyshi.sty", 63 | %% address = {Martin Schr\"oder 64 | %% Cr\"usemannallee 3 65 | %% 28213 Bremen 66 | %% Germany}, 67 | %% telephone = "+49-421-2239425", 68 | %% email = "martin@oneiros.de", 69 | %% pgp-Key = "2048 bit / KeyID 292814E5", 70 | %% pgp-fingerprint = "7E86 6EC8 97FA 2995 82C3 FEA5 2719 090E", 71 | %% docstring = "LaTeX package which provides hooks into 72 | %% \cs{shipout}. 73 | %% } 74 | %% =================================================================== 75 | %% \fi 76 | 77 | \newcommand{\@EveryShipout@Hook}{} 78 | \newcommand{\@EveryShipout@AtNextHook}{} 79 | \newcommand*{\EveryShipout}[1] 80 | {\g@addto@macro\@EveryShipout@Hook{#1}} 81 | \newcommand*{\AtNextShipout}[1] 82 | {\g@addto@macro\@EveryShipout@AtNextHook{#1}} 83 | \newcommand{\@EveryShipout@Shipout}{% 84 | \afterassignment\@EveryShipout@Test 85 | \global\setbox\@cclv= % 86 | } 87 | \newcommand{\@EveryShipout@Test}{% 88 | \ifvoid\@cclv\relax 89 | \aftergroup\@EveryShipout@Output 90 | \else 91 | \@EveryShipout@Output 92 | \fi% 93 | } 94 | \newcommand{\@EveryShipout@Output}{% 95 | \@EveryShipout@Hook% 96 | \@EveryShipout@AtNextHook% 97 | \gdef\@EveryShipout@AtNextHook{}% 98 | \@EveryShipout@Org@Shipout\box\@cclv% 99 | } 100 | \newcommand{\@EveryShipout@Org@Shipout}{} 101 | \newcommand*{\@EveryShipout@Init}{% 102 | \message{ABD: EveryShipout initializing macros}% 103 | \let\@EveryShipout@Org@Shipout\shipout 104 | \let\shipout\@EveryShipout@Shipout 105 | } 106 | \AtBeginDocument{\@EveryShipout@Init} 107 | \endinput 108 | %% 109 | %% End of file `everyshi.sty'. 110 | -------------------------------------------------------------------------------- /Gaming-Under-Uncertainty/Game-10Point30/egbib.bib: -------------------------------------------------------------------------------- 1 | @article{RN1, 2 | author = {Yuzhen, Liu and Siqi, An and Qiong, Song and Changxiang Chen}, 3 | title = {Relationship between loneliness and family social support in elderly disabled elderly in Tangshan City (Chinese)}, 4 | journal = {Nursing research}, 5 | number = {07}, 6 | pages = {1057-1060}, 7 | ISSN = {1009-6493}, 8 | year = {2018}, 9 | type = {Journal Article} 10 | } 11 | @article{RN2, 12 | title={The moderating effect of loneliness on mental health and well-being in the elderly (Chinese)}, 13 | author={Na, Zhao and Mingjie, Zhou and Jianxin, Zhang}, 14 | journal={Chinese Journal of Gerontology}, 15 | volume={22}, 16 | pages={106}, 17 | year={2016} 18 | } 19 | @phdthesis{RN3, 20 | title={Research Progress on loneliness of the elderly (Chinese)}, 21 | author={Yuchen, Lai}, 22 | year={2012} 23 | } 24 | @article{hardt2012oauth, 25 | title={The OAuth 2.0 authorization framework}, 26 | author={Hardt, Dick}, 27 | year={2012} 28 | } 29 | @techreport{jones2012oauth, 30 | title={The oauth 2.0 authorization framework: Bearer token usage}, 31 | author={Jones, Michael and Hardt, Dick}, 32 | year={2012} 33 | } 34 | @book{richardson2008restful, 35 | title={RESTful web services}, 36 | author={Richardson, Leonard and Ruby, Sam}, 37 | year={2008}, 38 | publisher={O Reilly Media, Inc} 39 | } 40 | @inproceedings{pautasso2008restful, 41 | title={Restful web services vs. big'web services: making the right architectural decision}, 42 | author={Pautasso, Cesare and Zimmermann, Olaf and Leymann, Frank}, 43 | booktitle={Proceedings of the 17th international conference on World Wide Web}, 44 | pages={805--814}, 45 | year={2008}, 46 | organization={ACM} 47 | } 48 | @techreport{loreto2011known, 49 | title={Known issues and best practices for the use of long polling and streaming in bidirectional http}, 50 | author={Loreto, Salvatore and Saint-Andre, P and Salsano, S and Wilkins, G}, 51 | year={2011} 52 | } 53 | @article{crockford2006application, 54 | title={The application/json media type for javascript object notation (json)}, 55 | author={Crockford, Douglas}, 56 | year={2006} 57 | } 58 | @article{RN4, 59 | title={Risk factors for anxiety and depression in the elderly: a review}, 60 | author={Vink, D and Aartsen, M. J. and Schoevers, R. A.}, 61 | journal={Journal of Afferctive Disorders}, 62 | volume={106}, 63 | number={1}, 64 | pages={29-44}, 65 | year={2008}, 66 | } 67 | @inproceedings{RN5, 68 | title={The relationship between personality traits and loneliness of empty nesters (Chinese)}, 69 | author={Benxian, Yao and Yang, Li}, 70 | booktitle={National Symposium on Psychology}, 71 | year={2013}, 72 | } 73 | @article{RN7, 74 | title={Loneliness and social support of older people in China: a systematic literature review.}, 75 | author={Chen, Y. and Hicks, A and While, A. E.}, 76 | journal={Health \& Social Care in the Community}, 77 | volume={22}, 78 | number={2}, 79 | pages={113-123}, 80 | year={2014}, 81 | } 82 | @article{RN8, 83 | title={Factors associated with loneliness of noninstitutionalized and institutionalized older adults}, 84 | author={Prieto-Flores, M. E. and Forjaz, M. J. and Fernandez-Mayoralas, G and Rojo-Perez, F and Martinez-Martin, P}, 85 | journal={Journal of Aging \& Health}, 86 | volume={23}, 87 | number={1}, 88 | pages={177}, 89 | year={2011}, 90 | } 91 | @article{RN9, 92 | title={Social isolation, loneliness and health among older adults.}, 93 | author={Coyle, C. E. and Dugan, E}, 94 | journal={J Aging Health}, 95 | volume={24}, 96 | number={8}, 97 | pages={1346-1363}, 98 | year={2012}, 99 | } 100 | @article{RN10, 101 | title={Loneliness of older people aged 70: a comparison of two Finnish cohorts born 20 years apart.}, 102 | author={Eloranta, S. and Arve, S. and Isoaho, H. and Lehtonen, A. and Viitanen, M.}, 103 | journal={Archives of Gerontology \& Geriatrics}, 104 | volume={61}, 105 | number={2}, 106 | pages={254-260}, 107 | year={2015}, 108 | } 109 | @article{RN11, 110 | title={The three Department released fourth findings of the survey on the living conditions of the elderly in urban and rural areas in China (Chinese)}, 111 | author={Changzheng, Zhong}, 112 | journal={Social work in China}, 113 | number={29}, 114 | pages={6-6}, 115 | year={2016}, 116 | } 117 | @misc{RN12, 118 | author = {Bingzhi, Han}, 119 | title = {The medical security system basically realizes the full coverage of the elderly (Chinese)}, 120 | pages = {003}, 121 | month = {2016-10-10}, 122 | type = {Newspaper Article}, 123 | publisher = {Economic Daily} 124 | } 125 | @article{RN13, 126 | title={Elderly people in urban and rural areas have been significantly improved in their sense of well-being, and positive progress has been made in medical work in the elderly (Chinese)}, 127 | author={Xiang, Bai}, 128 | year={2016-10-09}, 129 | } 130 | @mastersthesis{RN14, 131 | title={The study of Chinese speech synthesis technology (Chinese)}, 132 | author={Shuiping, Shi}, 133 | year={2004}, 134 | school={Wanfang Data Resource System} 135 | } 136 | @article{RN15, 137 | title={Speech synthesis - brilliant prospects for great prospects (Chinese)}, 138 | author={Qingfeng, Liu}, 139 | year={2005-06-24}, 140 | } -------------------------------------------------------------------------------- /Gaming-Under-Uncertainty/Game-10Point30/main.aux: -------------------------------------------------------------------------------- 1 | \relax 2 | \providecommand\hyper@newdestlabel[2]{} 3 | \providecommand\HyperFirstAtBeginDocument{\AtBeginDocument} 4 | \HyperFirstAtBeginDocument{\ifx\hyper@anchor\@undefined 5 | \global\let\oldcontentsline\contentsline 6 | \gdef\contentsline#1#2#3#4{\oldcontentsline{#1}{#2}{#3}} 7 | \global\let\oldnewlabel\newlabel 8 | \gdef\newlabel#1#2{\newlabelxx{#1}#2} 9 | \gdef\newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}} 10 | \AtEndDocument{\ifx\hyper@anchor\@undefined 11 | \let\contentsline\oldcontentsline 12 | \let\newlabel\oldnewlabel 13 | \fi} 14 | \fi} 15 | \global\let\hyper@last\relax 16 | \gdef\HyperFirstAtBeginDocument#1{#1} 17 | \providecommand*\HyPL@Entry[1]{} 18 | \HyPL@Entry{0<>} 19 | \@writefile{toc}{\contentsline {section}{\numberline {1}\hskip -1em.\nobreakspace {}游戏简介}{1}{section.1}} 20 | \@writefile{loa}{\contentsline {algorithm}{\numberline {1}{\ignorespaces ``十点半"游戏规则\relax }}{1}{algorithm.1}} 21 | \providecommand*\caption@xref[2]{\@setref\relax\@undefined{#1}} 22 | \newlabel{alg:A}{{1}{1}{``十点半"游戏规则\relax }{algorithm.1}{}} 23 | \@writefile{toc}{\contentsline {section}{\numberline {2}\hskip -1em.\nobreakspace {}双人博弈基本分析}{1}{section.2}} 24 | \@writefile{lot}{\contentsline {table}{\numberline {1}{\ignorespaces 多人博弈庄闲获胜期望分析(仅保留四位小数, N表示闲家数)\relax }}{1}{table.caption.1}} 25 | \newlabel{lab:1}{{1}{1}{多人博弈庄闲获胜期望分析(仅保留四位小数, N表示闲家数)\relax }{table.caption.1}{}} 26 | \@writefile{toc}{\contentsline {subsection}{\numberline {2.1}\hskip -1em.\nobreakspace {}双人博弈庄闲五张牌十点半的概率分析}{1}{subsection.2.1}} 27 | \@writefile{toc}{\contentsline {subsection}{\numberline {2.2}\hskip -1em.\nobreakspace {}双人博弈庄闲获胜期望分析}{1}{subsection.2.2}} 28 | \@writefile{toc}{\contentsline {section}{\numberline {3}\hskip -1em.\nobreakspace {}多人博弈庄闲获胜期望分析}{1}{section.3}} 29 | \@writefile{toc}{\contentsline {section}{\numberline {4}\hskip -1em.\nobreakspace {}结论}{1}{section.4}} 30 | -------------------------------------------------------------------------------- /Gaming-Under-Uncertainty/Game-10Point30/main.bbl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Gaming-Under-Uncertainty/Game-10Point30/main.bbl -------------------------------------------------------------------------------- /Gaming-Under-Uncertainty/Game-10Point30/main.blg: -------------------------------------------------------------------------------- 1 | This is BibTeX, Version 0.99d (TeX Live 2017/Debian) 2 | Capacity: max_strings=100000, hash_size=100000, hash_prime=85009 3 | The top-level auxiliary file: main.aux 4 | I found no \citation commands---while reading file main.aux 5 | I found no \bibdata command---while reading file main.aux 6 | I found no \bibstyle command---while reading file main.aux 7 | You've used 0 entries, 8 | 0 wiz_defined-function locations, 9 | 83 strings with 482 characters, 10 | and the built_in function-call counts, 0 in all, are: 11 | = -- 0 12 | > -- 0 13 | < -- 0 14 | + -- 0 15 | - -- 0 16 | * -- 0 17 | := -- 0 18 | add.period$ -- 0 19 | call.type$ -- 0 20 | change.case$ -- 0 21 | chr.to.int$ -- 0 22 | cite$ -- 0 23 | duplicate$ -- 0 24 | empty$ -- 0 25 | format.name$ -- 0 26 | if$ -- 0 27 | int.to.chr$ -- 0 28 | int.to.str$ -- 0 29 | missing$ -- 0 30 | newline$ -- 0 31 | num.names$ -- 0 32 | pop$ -- 0 33 | preamble$ -- 0 34 | purify$ -- 0 35 | quote$ -- 0 36 | skip$ -- 0 37 | stack$ -- 0 38 | substring$ -- 0 39 | swap$ -- 0 40 | text.length$ -- 0 41 | text.prefix$ -- 0 42 | top$ -- 0 43 | type$ -- 0 44 | warning$ -- 0 45 | while$ -- 0 46 | width$ -- 0 47 | write$ -- 0 48 | (There were 3 error messages) 49 | -------------------------------------------------------------------------------- /Gaming-Under-Uncertainty/Game-10Point30/main.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Gaming-Under-Uncertainty/Game-10Point30/main.pdf -------------------------------------------------------------------------------- /Gaming-Under-Uncertainty/Game-10Point30/main.tex: -------------------------------------------------------------------------------- 1 | \documentclass[10pt,twocolumn,letterpaper]{article} 2 | 3 | \renewcommand{\contentsname}{目录} 4 | \renewcommand{\abstractname}{摘要} 5 | \renewcommand{\refname}{参考文献} 6 | \renewcommand{\figurename}{图} 7 | \renewcommand{\tablename}{表} 8 | \renewcommand{\appendixname}{附录} 9 | \renewcommand{\listfigurename}{图} 10 | \renewcommand{\listtablename}{表} 11 | % \renewcommand{\lstlistlistingname}{列表目录} 12 | % \renewcommand{\listname}{列表} 13 | 14 | \usepackage{cvpr} 15 | \usepackage{times} 16 | \usepackage{epsfig} 17 | \usepackage{graphicx} 18 | \usepackage{amsmath} 19 | \usepackage{amssymb} 20 | \usepackage{amsthm} 21 | % \usepackage{multicolumn} 22 | \usepackage{multirow} 23 | 24 | \usepackage{algorithm} 25 | \usepackage{algorithmic} 26 | 27 | \usepackage{listings} 28 | 29 | % \lstset{ % 30 | % language=python, % the language of the code 31 | % basicstyle=\footnotesize, % the size of the fonts that are used for the code 32 | % numbers=left, % where to put the line-numbers 33 | % numberstyle=\tiny\color{gray}, % the style that is used for the line-numbers 34 | % stepnumber=2, % the step between two line-numbers. If it's 1, each line 35 | % % will be numbered 36 | % numbersep=5pt, % how far the line-numbers are from the code 37 | % backgroundcolor=\color{white}, % choose the background color. You must add \usepackage{color} 38 | % showspaces=false, % show spaces adding particular underscores 39 | % showstringspaces=false, % underline spaces within strings 40 | % showtabs=false, % show tabs within strings adding particular underscores 41 | % frame=single, % adds a frame around the code 42 | % rulecolor=\color{black}, % if not set, the frame-color may be changed on line-breaks within not-black text (e.g. commens (green here)) 43 | % tabsize=2, % sets default tabsize to 2 spaces 44 | % captionpos=b, % sets the caption-position to bottom 45 | % breaklines=true, % sets automatic line breaking 46 | % breakatwhitespace=false, % sets if automatic breaks should only happen at whitespace 47 | % title=\lstname, % show the filename of files included with \lstinputlisting; 48 | % % also try caption instead of title 49 | % keywordstyle=\color{blue}, % keyword style 50 | % commentstyle=\color{dkgreen}, % comment style 51 | % stringstyle=\color{mauve}, % string literal style 52 | % escapeinside={\%*}{*)}, % if you want to add LaTeX within your code 53 | % morekeywords={*,...} % if you want to add more keywords to the set 54 | 55 | % Include other packages here, before hyperref. 56 | 57 | % If you comment hyperref and then uncomment it, you should delete 58 | % egpaper.aux before re-running latex. (Or just hit 'q' on the first latex 59 | % run, let it finish, and you should be clear). 60 | \usepackage[breaklinks=true,bookmarks=false]{hyperref} 61 | 62 | \usepackage[utf8]{inputenc} % allow utf-8 input 63 | \usepackage[T1]{fontenc} % use 8-bit T1 fonts 64 | \usepackage{hyperref} % hyperlinks 65 | \usepackage{url} % simple URL typesetting 66 | \usepackage{booktabs} % professional-quality tables 67 | \usepackage{amsfonts} % blackboard math symbols 68 | \usepackage{nicefrac} % compact symbols for 1/2, etc. 69 | \usepackage{microtype} % microtypography 70 | 71 | \usepackage{url} 72 | \usepackage{caption} 73 | % \usepackage{subfigure*} 74 | % \usepackage{paisubfigure*} 75 | \usepackage{graphicx} 76 | \usepackage{booktabs} 77 | % \usepackage{amsmath} 78 | 79 | \makeatletter 80 | \let\@afterindentfalse\@afterindenttrue 81 | \@afterindenttrue 82 | \makeatother 83 | \setlength{\parindent}{2em} 84 | 85 | \usepackage{amsmath,amssymb} 86 | \DeclareMathOperator{\E}{\mathbb{E}} 87 | 88 | \renewcommand{\baselinestretch}{1.2} 89 | \usepackage{xeCJK} 90 | \usepackage{fontspec} 91 | \setCJKmainfont{SimSun} %或\setCJKmainfont{KaiTi} 92 | \setCJKmonofont{SimSun} 93 | \setmainfont{Times New Roman} 94 | 95 | \cvprfinalcopy % *** Uncomment this line for the final submission 96 | 97 | % \def\cvprPaperID{****} % *** Enter the CVPR Paper ID here 98 | % \def\httilde{\mbox{\tt\raisebox{-.5ex}{\symbol{126}}}} 99 | 100 | % Pages are numbered in submission mode, and unnumbered in camera-ready 101 | %\ifcvprfinal\pagestyle{empty}\fi 102 | % \setcounter{page}{1} 103 | \begin{document} 104 | % \newtheorom{algorithm}{算法} 105 | \title{多人博弈``十点半”游戏的若干问题分析} 106 | 107 | \author{易凯\thanks{The author received his B.Eng with honor from Department of Software Engineering, Xi'an Jiaotong University in June 2019. His current research interests include cognition-based artificial intelligence, machine learning, computer vision and conputational psychology. His homepage is kaiyi.me. Now, he is planning to pursue PhD studies and internships.} 108 | % Institution1 address\\ 109 | % {\tt\small {yikai2015, yushuanghe1997, sherlockholmes, qyl916}@stu.xjtu.edu.cn} 110 | % For a paper whose authors are all at the same institution, 111 | % omit the following lines up until the closing ``}''. 112 | % Additional authors and addresses can be added with ``\and'', 113 | % just like the second author. 114 | % To save space, use either the email address or home page, not both 115 | } 116 | % \providecommand{\keywords}[1]{\textbf{\textit{Index terms---}} #1} 117 | % \providecommand{\keywords}[1]{\textbf{\textit{关键词---}} #1} 118 | 119 | \maketitle 120 | 121 | \section{游戏简介} 122 | ``十点半“游戏是多人博弈游戏,其由1个庄家和N个闲家构成。基本的游戏规则以及流程如下: 123 | 124 | \begin{algorithm}[!htp] 125 | \caption{``十点半"游戏规则} 126 | \label{alg:A} 127 | \begin{algorithmic} 128 | \STATE \textbf{步骤一}:庄家和N个闲家依次发一张牌(默认每次游戏结束重新洗牌)。所有数字牌记为实际点数,花牌记为半点(包括大小王)。 129 | \STATE \textbf{步骤二}:庄家决定轮。庄家可以自由选择补牌(持牌数上限为五张),庄家若总点数为十点半,则庄家赢。若总牌数为五张,若总点数小于等于十点半,则庄家赢(若等于十点半则闲家输双倍底金);若总点数大于十点半,则闲家赢,获得资本与底金同等数额。上述所有情况游戏结束。 130 | \STATE \textbf{步骤三}:闲家$i\in [1, N]$ 补牌,若闲家 $i$ 点数超过十点半,则闲家$i$输;若闲家 $i$ 点数等于十点半或者持牌数等于五且总点数小于等于十点半,则闲家 $i$ 赢,且庄家需付给 $i$ 底金的两倍(若持牌数等于五且总点数等于十点半,则庄家需付给 $i$ 底金的四倍)。 131 | \STATE \textbf{步骤四}:重复步骤三,直到所有闲家完成补牌。庄家与所有剩余闲家比点数,若庄家点数小于闲家$i$,则闲家$i$胜,否则,庄家胜。胜负额度均与闲家 $i$ 底金相同。游戏结束。 132 | \end{algorithmic} 133 | \end{algorithm} 134 | 135 | \section{双人博弈基本分析} 136 | 双人博弈指的是一庄一闲的基本情况,主要分析两点,庄家和闲家获得五张牌十点半的概率以及庄闲的数学期望。 137 | \subsection{双人博弈庄闲五张牌十点半的概率分析} 138 | 由于相关问题的概率分析较为复杂,因此通过数学模拟的手段进行分析。 139 | 140 | 经过50,000次迭代,庄家五张牌十点半的概率为0.012308, 闲家五张牌十点半的概率为0.010168。 141 | \subsection{双人博弈庄闲获胜期望分析} 142 | 此处做出两个假设: 143 | 144 | \begin{itemize} 145 | \item 庄家补牌或停牌决策点为7,任一闲家决策点为7.5; 146 | \item 每次结束一局均重新洗牌 147 | \end{itemize} 148 | 149 | 通过演算,庄家的胜率为0.6451,远大于0.5。此外,庄家的数学期望为 0.2074,闲家的数学期望为 -0.2074. 150 | 151 | \section{多人博弈庄闲获胜期望分析} 152 | 多人博弈庄闲获胜期望分析如 表 \ref{lab:1},其中每组数据均经过了50,000 次数学模拟,使用 Python3 实现。 153 | 154 | \begin{table} 155 | \centering 156 | \caption{多人博弈庄闲获胜期望分析(仅保留四位小数, N表示闲家数)} 157 | \label{lab:1} 158 | \small 159 | \begin{tabular}{c|c|c|c|c|c|c} 160 | \hline 161 | N & Dealer & Player 1 & Player 2 & Player 3 & Player 4 & Player 5\\ 162 | \hline 163 | 1 & 0.2074 & -0.2074 & - & - & - & - \\ 164 | 2 & 0.5028 & -0.2510 & -0.2517& - & - & - \\ 165 | 3 & 0.8374 & -0.2803 & -0.2765 & -0.2806& - & - \\ 166 | 4 & 1.1851 & -0.2842 & -0.3016 & -0.2997 & -0.2996& - \\ 167 | 5 & 1.5206 & -0.2959 & -0.3031 & -0.3024 & -0.3047 & -0.3146\\ 168 | \hline 169 | \end{tabular} 170 | \end{table} 171 | 172 | \section{结论} 173 | \begin{itemize} 174 | \item 通过表 \ref{lab:1} 不同闲家数时各个闲家的数学期望基本相同来看,闲家获胜的数学期望与发排顺序无关; 175 | \item 通过表 \ref{lab:1} 庄闲数学期望的对比来看,庄家的数学期望始终为正,闲家的数学期望始终为负,且闲家数量越多,庄家的数学期望越高。 176 | \end{itemize} 177 | 178 | % {\small 179 | % \bibliographystyle{unsrt} 180 | % \bibliography{egbib} 181 | % } 182 | 183 | \end{document} 184 | -------------------------------------------------------------------------------- /Gaming-Under-Uncertainty/Game-10Point30/nplayers-exp.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import random 3 | import time 4 | import copy 5 | 6 | iteration_times = 10000 7 | check_times = 5 8 | players_num = 5 # num <= 9 9 | 10 | pokers = [] 11 | for i in range(10): 12 | pokers += [i+1] * 4 13 | pokers += [0.5] * 14 14 | 15 | player_success = 0 16 | ini_time = time.time() 17 | success_rate_cal = 0 18 | dealer_exp_cal, player_exp_cal = 0, [0]*players_num 19 | dealer_decision_point = 7.0 20 | player_decision_point = 7.5 21 | avg_player_exp = [0]*players_num 22 | 23 | for j in range(check_times): 24 | dealer_win, player_win = 0, [0]*players_num 25 | dealer_exp, player_exp = 0, [0]*players_num 26 | 27 | for i in range(iteration_times): 28 | dealer_definite_win = False 29 | player_definite_win = [False]*players_num 30 | 31 | cur_pokers = copy.deepcopy(pokers) 32 | dealer_points, player_points = 0, [0]*players_num 33 | 34 | # dealer init with a poker 35 | rand_dealer = random.randint(0, len(cur_pokers)-1) 36 | dealer_points += pokers[rand_dealer] 37 | cur_pokers.remove(pokers[rand_dealer]) 38 | # players init with a poker 39 | for m in range(players_num): 40 | rand_player = random.randint(0, len(cur_pokers)-1) 41 | # print(len(cur_pokers), rand_player) 42 | player_points[m] += pokers[rand_player] 43 | cur_pokers.remove(cur_pokers[rand_player]) 44 | # del cur_pokers[rand_dealer] 45 | 46 | # dealer round 47 | for j in range(1, 5): 48 | if j==4 and dealer_points==10.5: 49 | dealer_definite_win = True 50 | dealer_exp += 2*players_num 51 | for m in range(players_num): 52 | player_exp[m] -= 2 53 | elif j==4 or dealer_points==10.5: 54 | dealer_definite_win = True 55 | dealer_exp += 1*players_num 56 | for m in range(players_num): 57 | player_exp[m] -= 1 58 | if dealer_points >= dealer_decision_point: 59 | break 60 | 61 | mrandom = random.randint(0, len(cur_pokers)-1) 62 | dealer_points += cur_pokers[mrandom] 63 | cur_pokers.remove(cur_pokers[mrandom]) 64 | 65 | # player round 66 | if not dealer_definite_win: 67 | for m in range(players_num): 68 | for k in range(1, 5): 69 | if player_points[m] >= 10.5: 70 | dealer_definite_win = True 71 | dealer_exp += 1 72 | player_exp[m] -= 1 73 | break 74 | elif player_points[m]==10.5 and k==4: 75 | player_definite_win[m] = True 76 | player_exp[m] += 4 77 | dealer_exp -= 4 78 | elif player_points[m]==10.5 or k==4: 79 | player_definite_win[m] = True 80 | player_exp[m] += 2 81 | dealer_exp -= 2 82 | 83 | if player_points[m] >= player_decision_point: 84 | break 85 | 86 | mrandom = random.randint(0, len(cur_pokers)-1) 87 | player_points[m] += cur_pokers[mrandom] 88 | cur_pokers.remove(cur_pokers[mrandom]) 89 | 90 | # print(dealer_points, player_points) 91 | # judge winner 92 | if dealer_definite_win: 93 | dealer_win += 1 94 | else: 95 | for m in range(players_num): 96 | if player_definite_win[m]: 97 | player_win[m] += 1 98 | else: 99 | if player_points[m] > 10.5: 100 | dealer_win += 1 101 | dealer_exp += 1 102 | player_exp[m] -= 1 103 | elif dealer_points > 10.5 and player_points[m] < 10.5: 104 | player_win[m] += 1 105 | player_exp[m] += 1 106 | dealer_exp -= 1 107 | elif dealer_points < 10.5 and player_points[m] < 10.5: 108 | if dealer_points >= player_points[m]: 109 | dealer_win += 1 110 | dealer_exp += 1 111 | player_exp[m] -= 1 112 | else: 113 | player_win[m] += 1 114 | player_exp[m] += 1 115 | dealer_exp -= 1 116 | 117 | success_rate_cal += float(dealer_win)/iteration_times 118 | dealer_exp_cal += float(dealer_exp)/iteration_times 119 | print ('Success Rate is {}'.format(float(dealer_win)/iteration_times)) 120 | for m in range(players_num): 121 | player_exp_cal[m] += float(player_exp[m])/iteration_times 122 | print ('Dealer Exp: {}, Player {} Exp: {}'.format( 123 | dealer_exp_cal, m+1, player_exp_cal[m] 124 | )) 125 | 126 | fin_time = time.time() 127 | avg_rate = success_rate_cal/check_times 128 | avg_dealer_exp = dealer_exp_cal/check_times 129 | print ('Average Success Rate is {}'.format(avg_rate)) 130 | print ('Average Dealer Exp is {}'.format(avg_dealer_exp)) 131 | for m in range(players_num): 132 | avg_player_exp[m] = player_exp_cal[m]/check_times 133 | print ('Average Player Exp is {}'.format(avg_player_exp[m])) 134 | print ('Running time is {} s'.format(fin_time-ini_time)) -------------------------------------------------------------------------------- /Gaming-Under-Uncertainty/Game-10Point30/run.sh: -------------------------------------------------------------------------------- 1 | xelatex main.tex 2 | bibtex main.aux 3 | xelatex main.tex 4 | xelatex main.tex 5 | evince main.pdf -------------------------------------------------------------------------------- /Gaming-Under-Uncertainty/Game-10Point30/two-five.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sys 3 | import random 4 | import time 5 | import copy 6 | 7 | iteration_times = 10000 8 | 9 | pokers = [] 10 | for i in range(10): 11 | pokers += [i+1] * 4 12 | pokers += [0.5] * 14 13 | cur_pokers = copy.deepcopy(pokers) 14 | print(len(pokers)) 15 | dealer = [0] * 5 16 | player = [0] * 5 17 | dealer_success = 0 18 | ini_time = time.time() 19 | for i in range(iteration_times): 20 | # dealer win 21 | dealer_num, dealer_points = 0, 0 22 | for j in range(1,6): 23 | # mrandom = random.randint(0, 53) 24 | # mrandom = [random.randint(0,53) for _ in range(iteration_times)] 25 | mrandom = np.random.randint(low=0, high=53, size=iteration_times) 26 | dealer_num += 1 27 | cur_pokers.remove(mrandom[i]) 28 | dealer_points += pokers[mrandom[i]] 29 | if dealer_points > 10.5: 30 | break 31 | if j==5 and dealer_points==10.5: 32 | dealer_success += 1 33 | fin_time = time.time() 34 | print ('Success Rate is {}, running time is {} ms'.format(float(dealer_success)/iteration_times, 1000*(fin_time-ini_time))) 35 | 36 | 37 | -------------------------------------------------------------------------------- /Gaming-Under-Uncertainty/Game-10Point30/win-2players.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import random 3 | import time 4 | import copy 5 | 6 | iteration_times = 10000 7 | check_times = 5 8 | 9 | pokers = [] 10 | for i in range(10): 11 | pokers += [i+1] * 4 12 | pokers += [0.5] * 14 13 | 14 | player_success = 0 15 | ini_time = time.time() 16 | success_rate_cal = 0 17 | dealer_decision_point = 7.0 18 | player_decision_point = 7.5 19 | 20 | for j in range(check_times): 21 | dealer_win, player_win = 0, 0 22 | dealer_exp, player_exp = 0, 0 23 | for i in range(iteration_times): 24 | dealer_definite_win = False 25 | player_definite_win = False 26 | 27 | Flag = True 28 | 29 | cur_pokers = copy.deepcopy(pokers) 30 | dealer_points, player_points = 0, 0 31 | 32 | # dealer init with a poker 33 | rand_dealer = random.randint(0, len(cur_pokers)-1) 34 | dealer_points += pokers[rand_dealer] 35 | cur_pokers.remove(pokers[rand_dealer]) 36 | # player init with a poker 37 | rand_player = random.randint(0, len(cur_pokers)-1) 38 | player_points += pokers[rand_player] 39 | cur_pokers.remove(pokers[rand_dealer]) 40 | 41 | # dealer round 42 | for j in range(1, 5): 43 | # print("dealer point is {}".format(dealer_points)) 44 | if j==4 or dealer_points==10.5: 45 | dealer_definite_win = True 46 | break 47 | if dealer_points >= dealer_decision_point: 48 | break 49 | 50 | mrandom = random.randint(0, len(cur_pokers)-1) 51 | dealer_points += cur_pokers[mrandom] 52 | cur_pokers.remove(cur_pokers[mrandom]) 53 | 54 | # player round 55 | if not dealer_definite_win: 56 | for k in range(1, 5): 57 | # print("player point is {}".format(player_points)) 58 | if player_points >= 10.5: 59 | dealer_definite_win = True 60 | break 61 | if player_points <= 10.5 and k==4: 62 | player_definite_win = True 63 | break 64 | if player_points >= player_decision_point: 65 | break 66 | 67 | mrandom = random.randint(0, len(cur_pokers)-1) 68 | player_points += cur_pokers[mrandom] 69 | cur_pokers.remove(cur_pokers[mrandom]) 70 | 71 | # print(dealer_points, player_points) 72 | # judge winner 73 | if dealer_definite_win: 74 | dealer_win += 1 75 | elif player_definite_win: 76 | player_win += 1 77 | else: 78 | if player_points > 10.5: 79 | dealer_win += 1 80 | elif dealer_points > 10.5 and player_points < 10.5: 81 | player_win += 1 82 | elif dealer_points < 10.5 and player_points < 10.5: 83 | if dealer_points >= player_points: 84 | dealer_win += 1 85 | else: 86 | player_win += 1 87 | 88 | success_rate_cal += float(dealer_win)/iteration_times 89 | print ('Success Rate is {}'.format(float(dealer_win)/iteration_times)) 90 | 91 | fin_time = time.time() 92 | avg_rate = success_rate_cal/check_times 93 | print ('Average Success Rate is {}'.format(avg_rate)) 94 | print ('Running time is {} ms'.format(1000*(fin_time-ini_time))) -------------------------------------------------------------------------------- /Gaming-Under-Uncertainty/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/Gaming-Under-Uncertainty/README.md -------------------------------------------------------------------------------- /LeetCode/0001.py: -------------------------------------------------------------------------------- 1 | # Given an array of integers, return indices of the two numbers such that they add up to a specific target. 2 | 3 | # You may assume that each input would have exactly one solution, and you may not use the same element twice. 4 | 5 | # Solved on 09/10/2019 by William 6 | 7 | def twoSum(aList, target): 8 | values = {} 9 | for i in range(len(aList)): 10 | needed_value = target - aList[i] 11 | if needed_value in values: 12 | return [values[needed_value], i] 13 | values[aList[i]] = i 14 | print(values) 15 | raise Exception('Not Found!') 16 | 17 | aList = [2,7,11,15] 18 | target = 9 19 | print(twoSum(aList, target)) 20 | 21 | -------------------------------------------------------------------------------- /LeetCode/0002.py: -------------------------------------------------------------------------------- 1 | """ 2 | You are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order and each of their nodes contain a single digit. Add the two numbers and return it as a linked list. 3 | 4 | You may assume the two numbers do not contain any leading zero, except the number 0 itself. 5 | 6 | # Solved on 09/10/2019 by William 7 | """ 8 | 9 | from collections import deque 10 | 11 | def addTwoNumbers(l1, l2): 12 | dummy = ListNode(0); p = dummy 13 | carry = 0 14 | while l1 is not None or l2 is not None or carry != 0: 15 | sum_ = carry 16 | if l1 is not None: 17 | sum_ += l1.val 18 | l1 = l1.next 19 | if l2 is not None: 20 | sum_ += l2.val 21 | l2 = l2.next 22 | p.next = ListNode(sum_ % 10) 23 | p = p.next 24 | carry = sum_ // 10 25 | return dummy.next 26 | 27 | # Continue from here -------------------------------------------------------------------------------- /LeetCode/0004.py: -------------------------------------------------------------------------------- 1 | class Solution: 2 | def calMid(self, nums): # sorted arrary: nums: 3 | mlength = len(nums) 4 | mid = mlength >> 1 5 | if mlength%2==0: 6 | return (nums[mid]+nums[mid-1])/2.0 7 | else: 8 | return nums[mid] 9 | 10 | def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float: 11 | m, n = len(nums1), len(nums2) 12 | 13 | if m == 0: 14 | return self.calMid(nums2) 15 | elif n == 0: 16 | return self.calMid(nums1) 17 | 18 | nums3 = [0] * (m+n) 19 | m_index, n_index = 0, 0 20 | for p_index in range(m+n): 21 | if nums1[m_index] >= nums2[n_index]: 22 | nums3[p_index] = nums2[n_index] 23 | n_index += 1 24 | else: 25 | nums3[p_index] = nums1[m_index] 26 | m_index += 1 27 | 28 | if m_index == m or n_index == n: 29 | break 30 | 31 | if m_index == m: 32 | for j in range(n_index, n): 33 | nums3[m+j] = nums2[j] 34 | elif n_index == n: 35 | for j in range(m_index, m): 36 | nums3[n+j] = nums1[j] 37 | 38 | # print(nums3) 39 | return self.calMid(nums3) 40 | 41 | 42 | -------------------------------------------------------------------------------- /LeetCode/0012.py: -------------------------------------------------------------------------------- 1 | # Greedy algorithm 2 | 3 | class Solution: 4 | def intToRoman(self, num): 5 | nums = [1000,900, 500,400, 100, 90, 50, 40, 10, 9, 5, 4, 1 ] 6 | symbols = ['M', 'CM','D','CD','C','XC','L','XL','X','IX','V','IV','I'] 7 | mstr = '' 8 | for i in range(len(nums)): 9 | while num >= nums[i]: 10 | mstr += symbols[i] 11 | num -= nums[i] 12 | return mstr 13 | -------------------------------------------------------------------------------- /LeetCode/0013.py: -------------------------------------------------------------------------------- 1 | class Solution: 2 | def romanToInt(self, s): 3 | symbols = ['I', 'V', 'X', 'L', 'C', 'D', 'M' ] 4 | values = [ 1 , 5 , 10, 50, 100, 500, 1000] 5 | sum = 0 6 | 7 | i = 0 8 | while i < len(s): 9 | curValue = values[symbols.index(s[i])] 10 | if i == len(s)-1: 11 | sum += curValue 12 | return sum 13 | nextValue = values[symbols.index(s[i+1])] 14 | if curValue < nextValue: 15 | i += 1 16 | sum += (nextValue - curValue) 17 | if i == len(s) - 1: 18 | return sum 19 | else: 20 | sum += curValue 21 | i += 1 22 | return sum 23 | 24 | -------------------------------------------------------------------------------- /LeetCode/0033.py: -------------------------------------------------------------------------------- 1 | class Solution: 2 | def searchRot(self, nums): 3 | left = 0 4 | right = len(nums) - 1 5 | while left <= right: 6 | mid = (left + right) >> 1 7 | if mid == len(nums) - 1: 8 | return 0 9 | # print(left, right, mid) 10 | if nums[mid] > nums[mid+1]: 11 | return mid+1 12 | elif nums[mid] < nums[mid+1]: 13 | if nums[left] < nums[mid]: 14 | left = mid + 1 15 | elif nums[left] > nums[mid]: 16 | right = mid - 1 17 | elif nums[left] == nums[mid]: 18 | left = mid + 1 19 | return 0 20 | 21 | def search(self, nums, target): 22 | mlength = len(nums) 23 | if mlength == 1: 24 | if nums[0] == target: 25 | return 0 26 | else: 27 | return -1 28 | elif mlength == 0: 29 | return -1 30 | 31 | rotIndex = self.searchRot(nums) 32 | # print(rotIndex) 33 | 34 | left = 0 35 | right = mlength - 1 36 | while left <= right: 37 | mid = (left + right) >> 1 38 | nmid = (mid + rotIndex) % mlength 39 | if nums[nmid] == target: 40 | return nmid 41 | elif nums[nmid] > target: 42 | right = mid - 1 43 | elif nums[nmid] < target: 44 | left = mid + 1 45 | return -1 -------------------------------------------------------------------------------- /LeetCode/0034.py: -------------------------------------------------------------------------------- 1 | """ 2 | Given an array of integers nums sorted in ascending order, find the starting and ending position of a given target value. 3 | 4 | Your algorithm's runtime complexity must be in the order of O(log n). 5 | 6 | If the target is not found in the array, return [-1, -1]. 7 | 8 | """ 9 | 10 | class Solution: 11 | def searchRange(self, nums, target): 12 | left = 0 13 | right = len(nums) - 1 14 | while left<=right: 15 | mid = int(left + (right - left) / 2) 16 | if nums[mid] > target: 17 | right = mid - 1 18 | elif nums[mid < target: 19 | left = mid + 1 20 | elif nums[mid] == target: 21 | 22 | -------------------------------------------------------------------------------- /LeetCode/0035.py: -------------------------------------------------------------------------------- 1 | # Given a sorted array and a target value, return the index if the target is found. 2 | 3 | # If not, return the index where it would be if it wre inserted in order. 4 | 5 | # Binary Search 0035, 0278 6 | 7 | class Solution: 8 | # Robust solution O(logn) 9 | def searchInsert(self, nums, target): 10 | for i in range(len(nums)): 11 | if nums[i] >= target: 12 | return i 13 | return len(nums) 14 | 15 | # Binary Search O(n) 16 | def BinarySearch(self, nums, target): 17 | left = 0 18 | right = len(nums) - 1 19 | res = -1 # not found 20 | while left <= right: 21 | mid = (left+right)/2 22 | if nums[mid] == target: 23 | return mid 24 | elif nums[mid] > target: 25 | right = mid - 1 26 | else: 27 | left = mid + 1 28 | res = left 29 | return res 30 | 31 | minput = [1,3,5,6] 32 | target = 5 33 | sol = Solution() 34 | output = sol.BinarySearch(minput, target) 35 | print(output) 36 | 37 | 38 | -------------------------------------------------------------------------------- /LeetCode/0050.py: -------------------------------------------------------------------------------- 1 | # Implement pow(x, n), which calculates x raised to the power n (xn). 2 | 3 | # class Solution: 4 | # def myPow(self, x, n): 5 | 6 | # TBD: 7 | # class Solution { 8 | # public: 9 | 10 | # double qpow(double a, long long b){ 11 | # double res = 1; 12 | # while(b){ 13 | # if(b&1) res = res*a; 14 | # b >>= 1; 15 | # a *= a; 16 | # } 17 | # return res; 18 | # } 19 | 20 | 21 | # double myPow(double x, long long n) { 22 | # if(n == 0) return 1; 23 | # if(n > 0) return qpow(x,n); 24 | # if(n < 0) return 1/qpow(x,-n); 25 | # return 1.0; 26 | # } 27 | # }; 28 | 29 | class Solution: 30 | def fastPow(self, x, n): 31 | if n == 0: 32 | return 1.0 33 | half = self.fastPow(x, n / 2) 34 | print(half, n) 35 | if n % 2 == 0: 36 | return half * half 37 | else: 38 | return half * half * x 39 | 40 | def myPow(self, x, n): 41 | N = n 42 | if N < 0: 43 | N = -N 44 | x = 1 / x 45 | print("x, N = {}, {}".format(x, N)) 46 | return self.fastPow(x, N) 47 | 48 | # There is some problems of this file. -------------------------------------------------------------------------------- /LeetCode/0056.py: -------------------------------------------------------------------------------- 1 | # Time Limit Exceed 2 | class Solution: 3 | def merge(self, intervals: List[List[int]]) -> List[List[int]]: 4 | mlength = len(intervals) 5 | i = j = 0 6 | Shuffle = False 7 | while i< mlength: 8 | while j < mlength: 9 | if j==i: 10 | continue 11 | a1, b1 = intervals[i][0], intervals[j][0] 12 | a2, b2 = intervals[i][1], intervals[j][1] 13 | print(a1, b1, a2, b2) 14 | if b1> 1 15 | msquare = mid * mid 16 | if msquare > x: 17 | right = mid - 1 18 | elif msquare <= x: 19 | left = mid 20 | return left 21 | 22 | x = 9 23 | sol = Solution() 24 | print(sol.mySqrt(x)) 25 | -------------------------------------------------------------------------------- /LeetCode/0074.py: -------------------------------------------------------------------------------- 1 | # class Solution: 2 | # def searchMatrix(self, matrix, target): 3 | # top = 0 4 | # bottom = len(matrix) - 1 5 | # if bottom < 0: 6 | # return False 7 | # while top <= bottom: 8 | # mid = top + ((bottom - top) >> 1) 9 | # left = 0 10 | # right = len(matrix[0]) - 1 11 | # if right < 0: 12 | # return False 13 | # if matrix[mid][0] == target: 14 | # return True 15 | # elif matrix[mid][0] > target: 16 | # bottom = mid - 1 17 | # elif matrix[mid][0] < target: 18 | # while left <= right: 19 | # rowMid = left + ((right - left) >> 1) 20 | # if matrix[mid][rowMid] == target: 21 | # return True 22 | # elif matrix[mid][rowMid] > target: 23 | # right = rowMid - 1 24 | # elif matrix[mid][rowMid] < target: 25 | # left = rowMid + 1 26 | # top = mid + 1 27 | # return False 28 | 29 | class Solution: 30 | def searchMatrix(self, matrix, target): 31 | left = 0 32 | row = len(matrix) 33 | if row == 0: 34 | return False 35 | col = len(matrix[0]) 36 | right = row * col - 1 37 | while left <= right: 38 | mid = (left + right) >> 1 39 | nrow = mid / col 40 | ncol = mid % col 41 | if matrix[nrow][ncol] == target: 42 | return True 43 | elif matrix[nrow][ncol] > target: 44 | right = mid - 1 45 | elif matrix[nrow][ncol] < target: 46 | left = mid + 1 47 | return False 48 | 49 | matrix = [ 50 | [1, 3, 5, 7], 51 | [10, 11, 16, 20], 52 | [23, 30, 34, 50] 53 | ] 54 | target = 9 55 | 56 | sol = Solution() 57 | print(sol.searchMatrix(matrix, target)) -------------------------------------------------------------------------------- /LeetCode/0081.py: -------------------------------------------------------------------------------- 1 | # Runnin time: 68ms (70.98%), memory: 13.1MB (100%) 2 | 3 | class Solution: 4 | def searchRotLeftBound(self, nums): 5 | mlength = len(nums) 6 | left = 0 7 | right = mlength - 1 8 | while left <= right: 9 | mid = (left + right) >> 1 10 | if mid == mlength - 1: 11 | return 0 12 | if nums[mid] > nums[mid+1]: 13 | return mid+1 14 | else: 15 | if nums[left] > nums[mid]: 16 | right = mid - 1 17 | elif nums[left] == nums[mid]: 18 | left += 1 19 | else: 20 | left = mid + 1 21 | return 0 22 | 23 | def search(self, nums, target): 24 | if len(nums) == 0: 25 | return False 26 | elif len(nums) == 1: 27 | if nums[0] == target: 28 | return True 29 | else: 30 | return False 31 | 32 | rotIndex = self.searchRotLeftBound(nums) 33 | left = 0 34 | right = len(nums) - 1 35 | while left <= right: 36 | mid = (left + right) >> 1 37 | nmid = (mid + rotIndex) % len(nums) 38 | if nums[nmid] == target: 39 | return True 40 | elif nums[nmid] > target: 41 | right = mid - 1 42 | elif nums[nmid] < target: 43 | left = mid + 1 44 | return False 45 | 46 | -------------------------------------------------------------------------------- /LeetCode/0100-2.py: -------------------------------------------------------------------------------- 1 | from collections import deque 2 | # Definition for a binary tree node. 3 | # class TreeNode: 4 | # def __init__(self, x): 5 | # self.val = x 6 | # self.left = None 7 | # self.right = None 8 | 9 | class Solution: 10 | def check(self, p, q): 11 | if p == None and q == None: 12 | return True 13 | elif p == None or q == None: 14 | return False 15 | elif p.val != q.val: 16 | return False 17 | return True 18 | 19 | def isSameTree(self, p: TreeNode, q: TreeNode) -> bool: 20 | deq = deque([(p, q)]) 21 | while deq: 22 | p, q = deq.popleft() 23 | # print(p.val, q.val) 24 | if not self.check(p, q): 25 | return False 26 | if p: 27 | deq.append([p.left, q.left]) 28 | deq.append([p.right, q.right]) 29 | return True -------------------------------------------------------------------------------- /LeetCode/0100.py: -------------------------------------------------------------------------------- 1 | # Definition for a binary tree node. 2 | # class TreeNode: 3 | # def __init__(self, x): 4 | # self.val = x 5 | # self.left = None 6 | # self.right = None 7 | 8 | class Solution: 9 | def isSameTree(self, p: TreeNode, q: TreeNode) -> bool: 10 | if p == None and q == None: 11 | return True 12 | elif p == None or q == None: 13 | return False 14 | elif p.val != q.val: 15 | return False 16 | 17 | leftTree = self.isSameTree(p.left, q.left) 18 | rightTree = self.isSameTree(p.right, q.right) 19 | 20 | return leftTree and rightTree -------------------------------------------------------------------------------- /LeetCode/0101.py: -------------------------------------------------------------------------------- 1 | # Definition for a binary tree node. 2 | # class TreeNode: 3 | # def __init__(self, x): 4 | # self.val = x 5 | # self.left = None 6 | # self.right = None 7 | 8 | class Solution: 9 | def isMirror(self, n1, n2): 10 | if n1 == None and n2 == None: 11 | return True 12 | elif n1 == None or n2 == None: 13 | return False 14 | return (n1.val==n2.val) and self.isMirror(n1.left, n2.right) and self.isMirror(n1.right, n2.left) 15 | 16 | def isSymmetric(self, root: TreeNode) -> bool: 17 | return self.isMirror(root, root) 18 | -------------------------------------------------------------------------------- /LeetCode/0102.py: -------------------------------------------------------------------------------- 1 | # Definition for a binary tree node. 2 | # class TreeNode: 3 | # def __init__(self, x): 4 | # self.val = x 5 | # self.left = None 6 | # self.right = None 7 | 8 | # Typical module of Binary Tree Travesal 9 | class Solution: 10 | def levelOrder(self, root: TreeNode) -> List[List[int]]: 11 | levels = [] 12 | if root == None: 13 | return levels 14 | 15 | def helper(node, level): 16 | if level == len(levels): 17 | levels.append([]) 18 | 19 | levels[level].append(node.val) 20 | 21 | if node.left: 22 | helper(node.left, level+1) 23 | if node.right: 24 | helper(node.right, level+1) 25 | 26 | helper(root, 0) 27 | return levels -------------------------------------------------------------------------------- /LeetCode/0104.py: -------------------------------------------------------------------------------- 1 | # Definition for a binary tree node. 2 | # class TreeNode: 3 | # def __init__(self, x): 4 | # self.val = x 5 | # self.left = None 6 | # self.right = None 7 | 8 | class Solution: 9 | def maxDepth(self, root: TreeNode) -> int: 10 | if not root: 11 | return 0 12 | left = self.maxDepth(root.left) 13 | right = self.maxDepth(root.right) 14 | return max(left, right) + 1 -------------------------------------------------------------------------------- /LeetCode/0107.py: -------------------------------------------------------------------------------- 1 | # Definition for a binary tree node. 2 | # class TreeNode: 3 | # def __init__(self, x): 4 | # self.val = x 5 | # self.left = None 6 | # self.right = None 7 | 8 | class Solution: 9 | def levelOrderBottom(self, root: TreeNode) -> List[List[int]]: 10 | levels = [] 11 | if root == None: 12 | return levels 13 | 14 | def helper(node, level): 15 | if len(levels) == level: 16 | levels.append([]) 17 | 18 | levels[level].append(node.val) 19 | 20 | if node.left: 21 | helper(node.left, level+1) 22 | if node.right: 23 | helper(node.right, level+1) 24 | 25 | helper(root, 0) 26 | return levels[::-1] -------------------------------------------------------------------------------- /LeetCode/0110-2.py: -------------------------------------------------------------------------------- 1 | class Solution: 2 | def calLevel(self, node): 3 | if not node: return 0 4 | left = self.calLevel(node.left) 5 | if left == -1: return -1 6 | right = self.calLevel(node.right) 7 | if right == -1: return -1 8 | return max(left, right) + 1 if abs(left-right)<=1 else -1 9 | 10 | def isBalanced(self, root: TreeNode) -> bool: 11 | return self.calLevel(root) != -1 12 | -------------------------------------------------------------------------------- /LeetCode/0110.py: -------------------------------------------------------------------------------- 1 | # Definition for a binary tree node. 2 | # class TreeNode: 3 | # def __init__(self, x): 4 | # self.val = x 5 | # self.left = None 6 | # self.right = None 7 | 8 | class Solution: 9 | def calLevel(self, node): 10 | return 0 if not node else max(self.calLevel(node.left), self.calLevel(node.right)) + 1 11 | 12 | def isBalanced(self, root: TreeNode) -> bool: 13 | if not root: 14 | return True 15 | leftLevel = self.calLevel(root.left) 16 | rightLevel = self.calLevel(root.right) 17 | if abs(leftLevel-rightLevel) > 1: 18 | return False 19 | else: 20 | return self.isBalanced(root.left) and self.isBalanced(root.right) 21 | -------------------------------------------------------------------------------- /LeetCode/0111-2.py: -------------------------------------------------------------------------------- 1 | class Solution: 2 | def minDepth(self, root): 3 | if not root: 4 | return 0 5 | 6 | children = [root.left, root.right] 7 | # if we're at leaf node 8 | if not any(children): 9 | return 1 10 | 11 | min_depth = float('inf') 12 | for c in children: 13 | if c: 14 | min_depth = min(self.minDepth(c), min_depth) 15 | return min_depth + 1 -------------------------------------------------------------------------------- /LeetCode/0111.py: -------------------------------------------------------------------------------- 1 | # Definition for a binary tree node. 2 | # class TreeNode: 3 | # def __init__(self, x): 4 | # self.val = x 5 | # self.left = None 6 | # self.right = None 7 | 8 | class Solution: 9 | # flag = True 10 | # def calMinDepth(self, root): 11 | # if not root: 12 | # return 0 13 | 14 | # left = self.calMinDepth(root.left) 15 | # right = self.calMinDepth(root.right) 16 | # return min(left, right) + 1 17 | 18 | def calMaxDepth(self, root): 19 | if not root: 20 | return 0 21 | left = self.calMaxDepth(root.left) 22 | right = self.calMaxDepth(root.right) 23 | return max(left, right) + 1 24 | 25 | # def minDepth(self, root: TreeNode) -> int: 26 | # if self.calMaxDepth(root) == 2: 27 | # return 2 28 | # else: 29 | # return self.calMinDepth(root) 30 | def minDepth(self, root: TreeNode) -> int: 31 | if self.calMaxDepth(root) == 2: 32 | return 2 33 | 34 | if not root: 35 | return 0 36 | if not root.left and not root.right: 37 | return 1 38 | left = self.minDepth(root.left) 39 | right = self.minDepth(root.right) 40 | return min(left, right) + 1 41 | 42 | 43 | -------------------------------------------------------------------------------- /LeetCode/0121-2.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | class Solution: 4 | def maxProfit(self, prices: List[int]) -> int: 5 | minValue = sys.maxsize 6 | maxProfits = 0 7 | for i in range(len(prices)): 8 | if prices[i] < minValue: 9 | minValue = prices[i] 10 | else: 11 | if prices[i]-minValue > maxProfits: 12 | maxProfits = prices[i] - minValue 13 | return maxProfits -------------------------------------------------------------------------------- /LeetCode/0121.py: -------------------------------------------------------------------------------- 1 | # Running Time Exceed 2 | class Solution: 3 | def maxProfit(self, prices: List[int]) -> int: 4 | maxProf = 0 5 | for i in range(len(prices)): 6 | for j in range(i, len(prices)): 7 | curProf = prices[j]-prices[i] 8 | if curProf > maxProf: 9 | maxProf = curProf 10 | return maxProf -------------------------------------------------------------------------------- /LeetCode/0153.py: -------------------------------------------------------------------------------- 1 | class Solution: 2 | def findMin(self, nums): 3 | mlength = len(nums) 4 | if mlength == 0: 5 | return -1 6 | elif mlength == 1: 7 | return nums[0] 8 | 9 | left = 0 10 | right = mlength - 1 11 | while left <= right: 12 | mid = (left + right) >> 1 13 | 14 | if mid == mlength - 1: # ascending order 15 | return nums[0] 16 | 17 | if nums[mid] > nums[mid+1]: 18 | return nums[mid+1] 19 | else: 20 | if nums[left] > nums[mid]: 21 | right = mid - 1 22 | else: 23 | left = mid + 1 24 | return nums[0] 25 | -------------------------------------------------------------------------------- /LeetCode/0154.py: -------------------------------------------------------------------------------- 1 | class Solution: 2 | def findMin(self, nums): 3 | mlength = len(nums) 4 | if mlength == 0: 5 | return -1 6 | 7 | left = 0 8 | right = mlength - 1 9 | while left <= right: 10 | mid = (left + right) >> 1 11 | if mid == mlength - 1: 12 | return nums[0] 13 | 14 | if nums[mid] > nums[mid+1]: 15 | return nums[mid+1] 16 | else: 17 | if nums[left] > nums[mid]: 18 | right = mid - 1 19 | elif nums[left] == nums[mid]: 20 | left += 1 21 | elif nums[left] < nums[mid]: 22 | left = mid + 1 23 | return nums[0] 24 | 25 | # There is some problems of this file -------------------------------------------------------------------------------- /LeetCode/0167.py: -------------------------------------------------------------------------------- 1 | """ 2 | Given an array of integers that is already sorted in ascending order, find two numbers such that they add up to a specific target number. 3 | 4 | The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2. 5 | 6 | Note: 7 | 8 | Your returned answers (both index1 and index2) are not zero-based. 9 | You may assume that each input would have exactly one solution and you may not use the same element twice. 10 | 11 | # Solved on 09/10/2019 by William""" 12 | 13 | def twoSum(numbers, target): 14 | values = {} 15 | for i in range(len(numbers)): 16 | needed_value = target - numbers[i] 17 | # print(needed_value, values) 18 | # if needed_value < 0: 19 | # raise Exception("Match Not Found!") 20 | if needed_value in values: 21 | return [values[needed_value]+1, i+1] 22 | values[numbers[i]] = i 23 | raise Exception("Match Not Found!") 24 | 25 | numbers = [2,7,11,15] 26 | target = 9 27 | print(twoSum(numbers, target)) -------------------------------------------------------------------------------- /LeetCode/0204.py: -------------------------------------------------------------------------------- 1 | class Solution: 2 | def countPrimes(self, n: int) -> int: 3 | 4 | if n < 2: 5 | return 0 6 | 7 | isPrime = [1] * n 8 | isPrime[0] = isPrime[1] = 0 9 | 10 | for i in range(2, int(n ** 0.5) + 1): 11 | if isPrime[i]: 12 | isPrime[i * i:n:i] = [0] * ((n - 1 - i * i) // i + 1) 13 | 14 | return sum(isPrime) 15 | -------------------------------------------------------------------------------- /LeetCode/0222-2.py: -------------------------------------------------------------------------------- 1 | # Definition for a binary tree node. 2 | # class TreeNode: 3 | # def __init__(self, x): 4 | # self.val = x 5 | # self.left = None 6 | # self.right = None 7 | 8 | class Solution: 9 | def countLevel(self, node): 10 | level = 0 11 | if node == None: 12 | return 0 13 | 14 | while node != None: 15 | level += 1 16 | node = node.left 17 | return level 18 | 19 | def countNodes(self, root: TreeNode) -> int: 20 | if root == None: 21 | return 0 22 | 23 | leftLevel = self.countLevel(root.left) 24 | rightLevel = self.countLevel(root.right) 25 | # print(leftLevel, rightLevel) 26 | 27 | if leftLevel == rightLevel: 28 | return self.countNodes(root.right) + (1< int: 10 | if root == None: 11 | return 0 12 | return self.countNodes(root.left) + self.countNodes(root.right) + 1 -------------------------------------------------------------------------------- /LeetCode/0240-2.py: -------------------------------------------------------------------------------- 1 | class Solution: 2 | def searchMatrix(self, matrix, target): 3 | if len(matrix) == 0 or len(matrix[0]) == 0: 4 | return False 5 | 6 | left, top = 0, 0 7 | right, down = len(matrix[0])-1, len(matrix)-1 8 | 9 | if target < matrix[top][left] or target > matrix[down][right]: 10 | return False 11 | 12 | while left<=len(matrix[0])-1 and down>=0: 13 | # print(down, left) 14 | curPoint = matrix[down][left] 15 | if curPoint == target: 16 | return True 17 | elif curPoint > target: 18 | down -= 1 19 | elif curPoint < target: 20 | left += 1 21 | return False 22 | 23 | # [[1,4,7,11,15],[2,5,8,12,19],[3,6,9,16,22],[10,13,14,17,24],[18,21,23,26,30]] 24 | # 20 -------------------------------------------------------------------------------- /LeetCode/0240.py: -------------------------------------------------------------------------------- 1 | class Solution: 2 | def binary_search(self, matrix, vertical, i, target): 3 | lo = 0 4 | hi = len(matrix[0])-1 if vertical else len(matrix)-1 5 | 6 | if vertical: 7 | while lo <= hi: 8 | mid = (lo + hi) >> 1 9 | if matrix[i][mid] == target: 10 | return True 11 | elif matrix[i][mid] > target: 12 | hi = mid - 1 13 | else: 14 | lo = mid + 1 15 | return False 16 | else: 17 | while lo <= hi: 18 | mid = (lo + hi) >> 1 19 | if matrix[mid][i] == target: 20 | return True 21 | elif matrix[mid][i] > target: 22 | hi = mid - 1 23 | else: 24 | lo = mid + 1 25 | return False 26 | 27 | 28 | def searchMatrix(self, matrix, target): 29 | if not matrix: 30 | return False 31 | # print('Checkpoint One Pass') # Pass 32 | 33 | for i in range(min(len(matrix), len(matrix[0]))): 34 | # print('i = {}'.format(i)) 35 | vertical_found = self.binary_search(matrix, True, i, target) 36 | horizontal_found = self.binary_search(matrix, False, i, target) 37 | # print('Checkpoint Two:') 38 | # print(vertical_found, horizontal_found) 39 | 40 | if vertical_found or horizontal_found: 41 | return True 42 | return False 43 | -------------------------------------------------------------------------------- /LeetCode/0270.py: -------------------------------------------------------------------------------- 1 | # Definition for a binary tree node. 2 | # class TreeNode: 3 | # def __init__(self, x): 4 | # self.val = x 5 | # self.left = None 6 | # self.right = None 7 | 8 | import sys 9 | 10 | class Solution: 11 | def __init__(self): 12 | self.clValue = sys.maxsize 13 | self.tag = 0 14 | 15 | def closestValue(self, root: TreeNode, target: float) -> int: 16 | # print(self.clValue, self.tag) 17 | if root == None: 18 | return self.tag 19 | else: 20 | if self.clValue > abs(root.val-target): 21 | self.clValue = abs(root.val-target) 22 | self.tag = root.val 23 | 24 | self.closestValue(root.left, target) 25 | self.closestValue(root.right, target) 26 | 27 | return self.tag 28 | 29 | 30 | -------------------------------------------------------------------------------- /LeetCode/0278.py: -------------------------------------------------------------------------------- 1 | # The isBadVersion API is already defined for you. 2 | # @param version, an integer 3 | # @return a bool 4 | # def isBadVersion(version): 5 | 6 | # Binary Search 0035, 0278 7 | 8 | class Solution: 9 | # Linear Search O(n) 10 | def linearSearch(self, n): 11 | for i in range(n): 12 | if isBadVersion(i) == True: 13 | return i 14 | 15 | # Binary Search O(logn) 16 | def firstBadVersion(self, n): 17 | """ 18 | :type n: int 19 | :rtype: int 20 | """ 21 | left = 0 22 | right = n - 1 23 | res = -1 24 | while left<=right: 25 | mid = int((left+right)/2) 26 | if isBadVersion(mid) == False: 27 | left = mid + 1 28 | if isBadVersion(mid) == True: 29 | right = mid - 1 30 | res = left 31 | return res 32 | -------------------------------------------------------------------------------- /LeetCode/0367.py: -------------------------------------------------------------------------------- 1 | # Given a positive integer num, write a function which returns True if num is a perfect square else False. 2 | 3 | class Solution: 4 | def isPerfectSquare(self, num): 5 | left = 0 6 | right = num // 2 + 1 7 | while left < right: 8 | mid = (left + right + 1) >> 1 9 | msquare = mid * mid 10 | if msquare == num: 11 | return True 12 | elif msquare > num: 13 | right = mid - 1 14 | elif msquare < num: 15 | left = mid 16 | return False -------------------------------------------------------------------------------- /LeetCode/0374.py: -------------------------------------------------------------------------------- 1 | # The guess API is already defined for you. 2 | # @return -1 if my number is lower, 1 if my number is higher, otherwise return 0 3 | # def guess(num: int) -> int: 4 | 5 | # Binary Search 0035, 0278, 0374 6 | 7 | class Solution: 8 | def guessNumber(self, n): 9 | left = 0 10 | right = n - 1 11 | res = -1 12 | while left<=right: 13 | mid = int( left + (right - left) / 2) 14 | if guess(mid) == 0: 15 | return mid 16 | elif guess(mid) == 1: 17 | left = mid + 1 18 | elif guess(mid) == -1: 19 | right = mid - 1 20 | res = left 21 | return res 22 | -------------------------------------------------------------------------------- /LeetCode/0410.py: -------------------------------------------------------------------------------- 1 | class Solution: 2 | def splitArray(self, nums: List[int], m: int) -> int: 3 | low, high = max(nums), sum(nums) 4 | 5 | while low <= high: 6 | mid = (low + high) >> 1 7 | curTotal = 0 8 | count = 1 9 | for element in nums: 10 | curTotal += element 11 | if curTotal > mid: 12 | count += 1 13 | curTotal = element 14 | if count > m: 15 | low = mid + 1 16 | else: 17 | high = mid - 1 18 | return low -------------------------------------------------------------------------------- /LeetCode/0633.py: -------------------------------------------------------------------------------- 1 | # Given a non-negative integer c, your task is to decide whether there're two integers a and b such that a2 + b2 = c. 2 | 3 | class Solution: 4 | def msquare(self, c): 5 | left = 0 6 | right = c // 2 + 1 7 | while left < right: 8 | mid = (left + right + 1) >> 1 9 | msquare = mid * mid 10 | if msquare == c: 11 | left = mid 12 | elif msquare > c: 13 | right = mid - 1 14 | elif msquare < c: 15 | left = mid 16 | return left 17 | 18 | def judgeSquareSum(self, c): 19 | # get the binary sqrt of c 20 | left = 0 21 | right = self.msquare(c) 22 | while left <= right: 23 | msum = left * left + right * right 24 | if msum == c: 25 | return True 26 | elif msum > c: 27 | right -= 1 28 | elif msum < c: 29 | left += 1 30 | return False 31 | 32 | 33 | c = 4 34 | sol = Solution() 35 | print(sol.judgeSquareSum(c)) 36 | 37 | -------------------------------------------------------------------------------- /LeetCode/0637.py: -------------------------------------------------------------------------------- 1 | # Definition for a binary tree node. 2 | # class TreeNode: 3 | # def __init__(self, x): 4 | # self.val = x 5 | # self.left = None 6 | # self.right = None 7 | 8 | class Solution: 9 | def averageOfLevels(self, root: TreeNode) -> List[float]: 10 | levels = [] 11 | if root == None: 12 | return levels 13 | 14 | def helper(node, level): 15 | if level == len(levels): 16 | levels.append([]) 17 | 18 | levels[level].append(node.val) 19 | 20 | if node.left: 21 | helper(node.left, level+1) 22 | if node.right: 23 | helper(node.right, level+1) 24 | 25 | helper(root, 0) 26 | 27 | avgLevels = [0] * len(levels) 28 | 29 | for i in range(len(levels)): 30 | curSum = 0 31 | for j in range(len(levels[i])): 32 | curSum += levels[i][j] 33 | avgLevels[i] = curSum / len(levels[i]) 34 | 35 | return avgLevels 36 | -------------------------------------------------------------------------------- /LeetCode/0653.py: -------------------------------------------------------------------------------- 1 | """ 2 | Given a Binary Search Tree and a target number, 3 | return true if there exist two elements in the BST such that their sum is equal to the given target. 4 | 5 | # Solved on 09/10/2019 by William""" 6 | 7 | # BFS 8 | 9 | from collections import deque 10 | 11 | def findTarget(root, k): 12 | queue = deque() 13 | queue.append(root) 14 | nums = [] 15 | 16 | while queue: 17 | Queue = queue.popleft() 18 | 19 | if k - Queue.val in nums: 20 | return True 21 | nums.append(Queue.val) 22 | 23 | if Queue.left: 24 | queue.append(Queue.left) 25 | if Queue.right: 26 | queue.append(Queue.right) 27 | 28 | return False -------------------------------------------------------------------------------- /LeetCode/0655.py: -------------------------------------------------------------------------------- 1 | # Definition for a binary tree node. 2 | # class TreeNode: 3 | # def __init__(self, x): 4 | # self.val = x 5 | # self.left = None 6 | # self.right = None 7 | 8 | class Solution: 9 | def calLevel(self, root): 10 | return 0 if root == None else max(self.calLevel(root.left), self.calLevel(root.right)) + 1 11 | 12 | def updateOutput(self, node, row, left, right): 13 | if node == None: 14 | return 15 | mid = (left + right) >> 1 16 | self.res[row][mid] = str(node.val) 17 | self.updateOutput(node.left, row+1, left, mid-1) 18 | self.updateOutput(node.right, row+1, mid+1, right) 19 | 20 | def printTree(self, root: TreeNode) -> List[List[str]]: 21 | height = self.calLevel(root) 22 | width = (1< TreeNode: 10 | if root == None: 11 | root = TreeNode(val) 12 | return root 13 | 14 | if root.val < val: 15 | root.right = self.insertIntoBST(root.right, val) 16 | elif root.val > val: 17 | root.left = self.insertIntoBST(root.left, val) 18 | 19 | return root 20 | -------------------------------------------------------------------------------- /LeetCode/0704.py: -------------------------------------------------------------------------------- 1 | # Given a sorted (in ascending order) integer array nums of n elements and a target value, 2 | 3 | # write a function to search target in nums. If target exists, then return its index, otherwise return -1. 4 | 5 | class Solution: 6 | def search(self, nums, target): 7 | res = [-1, -1] 8 | 9 | # find left bound 10 | left = 0 11 | right = len(nums) 12 | while left < right: 13 | mid = int(left + (right - left) / 2) 14 | if nums[mid] == target: 15 | right = mid 16 | elif nums[mid] > target: 17 | right = mid 18 | elif nums[mid] < target: 19 | left = mid + 1 20 | 21 | if left == len(nums): 22 | res[0] = -1 23 | elif nums[left] == target: 24 | res[0] = left 25 | else: 26 | res[0] = -1 27 | 28 | # find right bound 29 | left = 0 30 | right = len(nums) 31 | while left target: 36 | right = mid 37 | elif nums[mid] < target: 38 | left = mid + 1 39 | 40 | if left == 0: 41 | res[1] = -1 42 | elif nums[left-1] == target: 43 | res[1] = left - 1 44 | else: 45 | res[1] = -1 46 | 47 | return res 48 | 49 | # Another result 50 | # Given a sorted (in ascending order) integer array nums of n elements and a target value, 51 | 52 | # write a function to search target in nums. If target exists, then return its index, otherwise return -1. 53 | 54 | class Solution: 55 | def search(self, nums, target): 56 | res = [-1, -1] 57 | 58 | # find left bound 59 | left = 0 60 | right = len(nums) - 1 61 | while left<=right: 62 | mid = int(left + (right - left) / 2) 63 | if nums[mid] == target: 64 | right = mid - 1 65 | elif nums[mid] > target: 66 | right = mid - 1 67 | elif nums[mid] < target: 68 | left = mid + 1 69 | 70 | # if left == len(nums) - 1 or right == 0: 71 | # res[0] = -1 72 | # else: 73 | # res[0] = right + 1 74 | if right == len(nums) - 1: 75 | res[0] = -1 76 | elif nums[right+1] == target: 77 | res[0] = right+1 78 | else: 79 | res[0] = -1 80 | 81 | # find right bound 82 | left = 0 83 | right = len(nums) - 1 84 | while left<=right: 85 | mid = int(left + (right - left) / 2) 86 | if nums[mid] == target: 87 | left = mid + 1 88 | elif nums[mid] > target: 89 | right = mid - 1 90 | elif nums[mid] < target: 91 | left = mid + 1 92 | 93 | if left == 0: 94 | res[1] = -1 95 | elif nums[left-1] == target: 96 | res[1] = left - 1 97 | else: 98 | res[1] = -1 99 | 100 | return res 101 | 102 | 103 | 104 | -------------------------------------------------------------------------------- /LeetCode/0812.py: -------------------------------------------------------------------------------- 1 | class Solution: 2 | def largestTriangleArea(self, points: List[List[int]]) -> float: 3 | maxArea = 0 4 | for i in range(len(points)): 5 | for j in range(i, len(points)): 6 | for k in range(j, len(points)): 7 | curArea = self.Shoelace(points, i, j, k) 8 | if curArea > maxArea: 9 | maxArea = curArea 10 | return maxArea 11 | 12 | def Shoelace(self, points, i, j, k): 13 | curArea = 0.5* abs((points[i][0]*points[j][1] 14 | +points[j][0]*points[k][1] 15 | +points[k][0]*points[i][1]) - 16 | (points[i][1]*points[j][0] 17 | +points[j][1]*points[k][0] 18 | +points[k][1]*points[i][0])) 19 | print(curArea) 20 | return curArea -------------------------------------------------------------------------------- /LeetCode/0976.py: -------------------------------------------------------------------------------- 1 | class Solution: 2 | def largestPerimeter(self, A): 3 | A.sort() 4 | A = A[::-1] # decending order of A 5 | 6 | for i in range(len(A)): 7 | a, b, c = A[i], A[i+1], A[i+2] 8 | if a+b>c and a+c>b and b+c>a: 9 | return (A[i] + A[i+1] + A[i+2]) 10 | if i+2 == len(A)-1: 11 | return 0 -------------------------------------------------------------------------------- /LeetCode/0993-2.py: -------------------------------------------------------------------------------- 1 | # Definition for a binary tree node. 2 | # class TreeNode: 3 | # def __init__(self, x): 4 | # self.val = x 5 | # self.left = None 6 | # self.right = None 7 | 8 | class Solution: 9 | def isCousins(self, root: TreeNode, x: int, y: int) -> bool: 10 | parents = {} 11 | depths = {} 12 | def dfs(node, par = None): 13 | if node: 14 | depths[node.val] = 1 + depths[par.val] if par else 0 15 | parents[node.val] = par 16 | dfs(node.left, node) 17 | dfs(node.right, node) 18 | dfs(root) 19 | return depths[x]==depths[y] and parents[x]!=parents[y] 20 | 21 | 22 | -------------------------------------------------------------------------------- /LeetCode/0993.py: -------------------------------------------------------------------------------- 1 | # Definition for a binary tree node. 2 | # class TreeNode: 3 | # def __init__(self, x): 4 | # self.val = x 5 | # self.left = None 6 | # self.right = None 7 | 8 | # There are some errors of this code 9 | 10 | class Solution: 11 | def __init__(self): 12 | self.cnt = True # true represents left, false represent right 13 | self.cnt1 = 0 14 | self.cnt2 = 0 15 | 16 | def findTarget(self, root, target, cnt): 17 | if root == None: 18 | return False, self.cnt1 if cnt else self.cnt2 19 | 20 | if cnt: 21 | self.cnt1 += 1 22 | elif not cnt: 23 | self.cnt2 += 1 24 | 25 | if root.val == target: 26 | return True 27 | elif root.val > target: 28 | self.findTarget(root.left, target, cnt) 29 | else: 30 | self.findTarget(root.right, target, cnt) 31 | 32 | def isCousins(self, root: TreeNode, x: int, y: int) -> bool: 33 | if root == None: 34 | return False 35 | 36 | # make sure x < y 37 | if x > y: 38 | x, y = y, x 39 | 40 | # Judge the interval (binary search) 41 | if x > root.val: 42 | self.isCousins(root.right, x, y) 43 | elif y < root.val: 44 | self.isCousins(root.left, x, y) 45 | 46 | xBound, cnt1 = self.findTarget(root.left, x, True) 47 | yBound, cnt2 = self.findTarget(root.right, y, False) 48 | 49 | if xBound and yBound and cnt1==cnt2 and cnt1!=0 and cnt2!=0: 50 | return True 51 | else: 52 | return False 53 | -------------------------------------------------------------------------------- /LeetCode/1103.py: -------------------------------------------------------------------------------- 1 | class Solution: 2 | def distributeCandies(self, candies, num_people): 3 | disList = [0] * num_people 4 | i = 0 5 | res = candies 6 | 7 | while True: 8 | i += 1 9 | if res > i: 10 | mindex = int((i-1)%num_people) 11 | num_people[mindex] += i 12 | res = res - i 13 | else: 14 | mindex = int(i%num_people) 15 | num_people[mindex] += res 16 | return disList 17 | -------------------------------------------------------------------------------- /MathematicalFundations/Conv Optimization Review (Cont'd).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/MathematicalFundations/Conv Optimization Review (Cont'd).pdf -------------------------------------------------------------------------------- /MathematicalFundations/Conv Optimization Review.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/MathematicalFundations/Conv Optimization Review.pdf -------------------------------------------------------------------------------- /MathematicalFundations/Gaussian Process.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/MathematicalFundations/Gaussian Process.pdf -------------------------------------------------------------------------------- /MathematicalFundations/Hidden Markov Network.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/MathematicalFundations/Hidden Markov Network.pdf -------------------------------------------------------------------------------- /MathematicalFundations/Linear Algebra Review.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/MathematicalFundations/Linear Algebra Review.pdf -------------------------------------------------------------------------------- /MathematicalFundations/Multivariate Gaussian (Cont'd).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/MathematicalFundations/Multivariate Gaussian (Cont'd).pdf -------------------------------------------------------------------------------- /MathematicalFundations/Multivariate Gaussian.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/MathematicalFundations/Multivariate Gaussian.pdf -------------------------------------------------------------------------------- /MathematicalFundations/Prob Review.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WilliamYi96/Machine-Learning/75206fada2f7cf61fb3598559d3edde791b95a9f/MathematicalFundations/Prob Review.pdf -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Currently there is no plan to update this repo. 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | matplotlib --------------------------------------------------------------------------------