\xde\xc4\x00\xf0\xec\x94)>\xa9\xdf\xe4\x1a\xd7\xc36>\x83\xfb\xb2~\xe5\xa3\x14>lY\xf5\xa2\xd1\xaf`\xbe\xd3K\x82V\xa2\xdd\x81\xbe\x1c\x9d\xcb\xa7\x90\x8e\x96\xbe\x0b\x1e\xa3w\xeb\x1f\xa2\xbee\x92\xc7\xda[`\x92\xber\x9a\xaaS\xfdz\xb9>\xda\xa3\x90\xe8\x81\xdb\xd9>Z\xcd\xcav\x94\xa7\xec>\xf2\x84\x81\xb2\x93\x86\xf4>C\xda\xfd\xbf\xe8\x94\xe9>\x82\xa4\x12x\x17\x83\xfa\xbe\xb1\xc7\xcay\x93\xdc\x1a\xbf\xd43\x98\xe9*;*\xbf=\x00\xe0\xb5_\x9c0\xbf{\x8c\x98\xdb\x1a\xa3%\xbf\x83\xbcx\xf3"\xd6!?\x7f\xc6\x88\xa8j\xfaC?\x90h\xf7&0NQ?\xb8\x11\x07+\xe6ES?\x13(\xa7\x85gbH?\xd6H\xc3\xe1kH*\xbf>\xeb\x06k\x012U\xbfzq\xbd\xa1\xc3s`\xbf/F\xc3r\x8a\x10`\xbfx\x97D\xa6\xe6\xf6R\xbf\xfbm\xd7X/\xf3\xc9>a\x87M\x98\xb1\xe3O?u\x1f\xb7\x05\xed\x86V?r\x81\xd9\x88IDS?8\xa0\xb6\xfd\xf7\xb5D?\xc5c\x86\x14\xc3\xf2\x14?\xf3\x14V\x9bg\xd90\xbf\xfb\xc2\x0f\xf9\t16\xbf\x11\x10J\xa6\xc3\xa30\xbf"\xd2HB\xcd\x08 \xbfZ2\xa6\x89u\xaf\xf7\xbeEkd\x94s\x97\xf8>Xv\xff\x89Di\xff>\x9c.\xc0zr\xb5\xf4>\x13Y\xe6\xa9\x9a\xb4\xe1>\x9b\xcc\xcfX\x97\n\xbd>\rU\xe4\xcc\x97\xf2\xa7\xbe\nQh\xf7\xfa\xdf\xaf\xbe\xa1\xf0\x1bP\xf1\x92\xa2\xbe3(\x8d\xee\xe7\xfd\x8b\xbe\x8f\\C\x05(\x80$\xbc\xd3y\x8aG]pJ\xbc\xd3\x9dx\x00\xfegd\xbc\xa8\xb1\xbd\xa7\x9c5A<@\x81\xfd\x13\x99]\xb0<\x01\xd5\xa9V\x07D\xde<\x82\xb8\xa3\x13^\x18\x01=n\xc3\xe7\xec\xba\x1f\x18=\x93\xc4\x7fT\xd0\xe1\n=\xde\xa8I9}\xc1R\xbd\xf6Cxd\xb1\x13\x80\xbd\xe3\xb6S\xcf\x9c\xd5\x9f\xbd\xb1\xb2n\xa8\xe1/\xb4\xbdt\x05\xc4\xef\te\xb2\xbd\x11<;\x14-\xd0\xdd=\x08\xc75\x82\x9f\x8c\x08>\xc0\xdf\x0ed\xd6Z%>S?2\xd2\xfe\x118>\x19|\xbf\xd2\x95)9>\x8a\xf9\x9b0j\xe8O\xbe\x10;\x9d\xd65\xe3z\xbe\xad\x8a\xa3\xe2\xa5\xa5\x94\xbel\xde\x9b\xc7u\x88\xa4\xbe\x80\x1f\x10fh\xcb\xa5\xbe\xae \xf88\xd9e\xa5>\xed!\x0e\x87\xb5\x0f\xd5>>\xc4\xcbG\xc0\xc5\xec>\x82Q\xd3\x8d\xc3!\xf9>\xd6\x0fu"\xd0\x84\xf9>\xa4\x14)-\x0e5\xdb\xbe\xffr.\xcd\xf9~\x17\xbf\xba\xda\x8a\xf5\xbc\xe2,\xbfI\x13\xca\xec\x1d\x1c6\xbf\x85\xad\x83bL\xc24\xbf\xbcjKO(\xbd\xfa\xbe\xf8\xba\xd6\x87!\x8aB?\x15\xda*\x07\xc1\xe0T?8n\xc4\x1fL\xff[?\xa8\xae\x0e\x88\xa5\xccW?\x123|\x00\x84t7?\xc1\xabc\x8e\xf9uT\xbfe\x86\x15\xdai\xb4e\xbf\xaa\x1fg\xfa\xff\x88i\xbf)\xcei\xe4\xaabc\xbf\x94\xf50;/\xa0H\xbf\xbb\xa0x\x18\xd3\xeeN?\xfa,\xd3\xab\x893`?\xda\x02\xde\xc3@\xc8`?\xd6\xe8\xf8\x11j\x8dV?\xd0\xb9\xd3\x96\xe6}>?\x95\xd9\xcb;\xb7\x9e.\xbf\x1ex$\xb3)SA\xbf\xea.4\xc9\xad\xcb?\xbf\xce+\xcd\x83\xbd\xcc2\xbf\x03\x9a\xd6$\xb6\xea\x18\xbf\xe1\x912\xb3\x9de\xf1>~\xcc#bfr\n? i\xdd\x0b\x0f\xb4\x05?\xc9\x94\xd4\xec\x83\x82\xf6>\xea\x7f\x9b\x8be\xfc\xdb>\xea\xf5l\x91mU\x80\xbe\xf6\x1b\xe4h\x0c\xa9\xbc\xbe\xb0.\xac\'\xcdV\xb5\xbe\x9f\xf0\xf5\x85\xdbb\xa3\xbecOK7J\x05\x86\xbe\x82\xbc1Dnh>\xbc\xfb~2\xfd\xb3f^\xbc\x92!R\xcd\xbe\'h\xbc\x8e\xf2"\xba\x1ft\x97
\x85(K~\xe1,6>=G\xe22\x99\x02B>\xb8\xb2\xb6\xa8c)\x1e\xbe\x02w\x01\x81\xa2\x98q\xbe\xca\xd7\xd3\xfaQ)\x91\xbe\xa1\xa5\xe6F\xe2\x97\xa4\xbe\xcc\xd3Y\x98\xf2\xb0\xae\xbe\xd1\xf1=\xb4\x10\xa5\x8d\xbe\xdb\xfb\x891\x04\x18\xcd>\x06\xf9yR\x050\xea>p\x83I\x93\xee\x88\xfb>~9\xc6c\xf4{\x02?\xbb\x15\xc7u=\xba\xf0>-\xb3\xd2\x84\xaa\xbb\x10\xbf\xbbq\xb4o\xda\xb9,\xbf\xcb\xaa:v\xb8\x85:\xbf\xd2e^\x9be\xb2?\xbf\x06\x9eI\x82\x0c\x031\xbf\x1aT\xbfN\xba\x17:?\xff!/\x1d\x19\x9cV?\x96\x18\x1e\x7f\x0bib?\xfc\xbc4Y\x88nc?\xb7w\xa6\x90\x1b|U?\x17\xb9gL\x9d\xf4I\xbf\xde\xd0\xbe\xac\x1fxi\xbf\xf9\xdakJekr\xbfL\xb5\x1d\xba\xfc\x15q\xbf\xd5\xe8\xc7\xb6\xdc5b\xbf\xe2k-\x16\xa8\xb6:?1\x83\x81\x1d\xb1rd?:\x95?~\x0e\x8ej?\x0c\xdfv\xdf3\x97e?(iv\xe7$`U?\x84\xe3\x8c3UH\xfb>lY\xc8\xdd3AG\xbfv\xa8_q\xd6\x90K\xbfQU\xdc\x92\xb1\xa1C\xbf\xeb\xb3h\xe5\xf4\xa51\xbf\xcb\xf8\xc4\rx\xa3\x00\xbf:@\x9e\xeb\xf1\x89\x12?\xef0[\x00c\x95\x14?4=\xbci\x11\xb6\t?\x84\\\xd3-~\xae\xf4>6@\x17ER\xa6\xca>,\'p\x03|V\xc4\xbe\xc7\x83\x9bW\x0e\x14\xc6\xbe\xf1\x80)\xf9wC\xb8\xbe\xd7i\xb3\xd8\xadK\xa1\xbe\xc5\x02\x0e\xbf\x1f8x\xbe\xebQi\x1ecnS\xbcX\x9b*\x81L\xf1j\xbc\xc7\xe9\xf1;4\xddp \xbccOK7J\x05\x86\xbe\x9f\xf0\xf5\x85\xdbb\xa3\xbe\xb0.\xac\'\xcdV\xb5\xbe\xf6\x1b\xe4h\x0c\xa9\xbc\xbe\xea\xf5l\x91mU\x80\xbe\xea\x7f\x9b\x8be\xfc\xdb>\xc9\x94\xd4\xec\x83\x82\xf6> i\xdd\x0b\x0f\xb4\x05?~\xcc#bfr\n?\xe1\x912\xb3\x9de\xf1>\x03\x9a\xd6$\xb6\xea\x18\xbf\xce+\xcd\x83\xbd\xcc2\xbf\xea.4\xc9\xad\xcb?\xbf\x1ex$\xb3)SA\xbf\x95\xd9\xcb;\xb7\x9e.\xbf\xd0\xb9\xd3\x96\xe6}>?\xd6\xe8\xf8\x11j\x8dV?\xda\x02\xde\xc3@\xc8`?\xfa,\xd3\xab\x893`?\xbb\xa0x\x18\xd3\xeeN?\x94\xf50;/\xa0H\xbf)\xcei\xe4\xaabc\xbf\xaa\x1fg\xfa\xff\x88i\xbfe\x86\x15\xdai\xb4e\xbf\xc1\xabc\x8e\xf9uT\xbf\x123|\x00\x84t7?\xa8\xae\x0e\x88\xa5\xccW?8n\xc4\x1fL\xff[?\x15\xda*\x07\xc1\xe0T?\xf8\xba\xd6\x87!\x8aB?\xbcjKO(\xbd\xfa\xbe\x85\xad\x83bL\xc24\xbfI\x13\xca\xec\x1d\x1c6\xbf\xba\xda\x8a\xf5\xbc\xe2,\xbf\xffr.\xcd\xf9~\x17\xbf\xa4\x14)-\x0e5\xdb\xbe\xd6\x0fu"\xd0\x84\xf9>\x82Q\xd3\x8d\xc3!\xf9>>\xc4\xcbG\xc0\xc5\xec>\xed!\x0e\x87\xb5\x0f\xd5>\xae \xf88\xd9e\xa5>\x80\x1f\x10fh\xcb\xa5\xbel\xde\x9b\xc7u\x88\xa4\xbe\xad\x8a\xa3\xe2\xa5\xa5\x94\xbe\x10;\x9d\xd65\xe3z\xbe\x8a\xf9\x9b0j\xe8O\xbe\x19|\xbf\xd2\x95)9>S?2\xd2\xfe\x118>\xc0\xdf\x0ed\xd6Z%>\x08\xc75\x82\x9f\x8c\x08>\x11<;\x14-\xd0\xdd=t\x05\xc4\xef\te\xb2\xbd\xb1\xb2n\xa8\xe1/\xb4\xbd\xe3\xb6S\xcf\x9c\xd5\x9f\xbd\xf6Cxd\xb1\x13\x80\xbd\xde\xa8I9}\xc1R\xbd\x93\xc4\x7fT\xd0\xe1\n=n\xc3\xe7\xec\xba\x1f\x18=\x82\xb8\xa3\x13^\x18\x01=\x01\xd5\xa9V\x07D\xde<@\x81\xfd\x13\x99]\xb0<\xa8\xb1\xbd\xa7\x9c5A<\xd3\x9dx\x00\xfegd\xbc\xd3y\x8aG]pJ\xbc\x8f\\C\x05(\x80$\xbc3(\x8d\xee\xe7\xfd\x8b\xbe\xa1\xf0\x1bP\xf1\x92\xa2\xbe\nQh\xf7\xfa\xdf\xaf\xbe\rU\xe4\xcc\x97\xf2\xa7\xbe\x9b\xcc\xcfX\x97\n\xbd>\x13Y\xe6\xa9\x9a\xb4\xe1>\x9c.\xc0zr\xb5\xf4>Xv\xff\x89Di\xff>Ekd\x94s\x97\xf8>Z2\xa6\x89u\xaf\xf7\xbe"\xd2HB\xcd\x08 \xbf\x11\x10J\xa6\xc3\xa30\xbf\xfb\xc2\x0f\xf9\t16\xbf\xf3\x14V\x9bg\xd90\xbf\xc5c\x86\x14\xc3\xf2\x14?8\xa0\xb6\xfd\xf7\xb5D?r\x81\xd9\x88IDS?u\x1f\xb7\x05\xed\x86V?a\x87M\x98\xb1\xe3O?\xfbm\xd7X/\xf3\xc9>x\x97D\xa6\xe6\xf6R\xbf/F\xc3r\x8a\x10`\xbfzq\xbd\xa1\xc3s`\xbf>\xeb\x06k\x012U\xbf\xd6H\xc3\xe1kH*\xbf\x13(\xa7\x85gbH?\xb8\x11\x07+\xe6ES?\x90h\xf7&0NQ?\x7f\xc6\x88\xa8j\xfaC?\x83\xbcx\xf3"\xd6!?{\x8c\x98\xdb\x1a\xa3%\xbf=\x00\xe0\xb5_\x9c0\xbf\xd43\x98\xe9*;*\xbf\xb1\xc7\xcay\x93\xdc\x1a\xbf\x82\xa4\x12x\x17\x83\xfa\xbeC\xda\xfd\xbf\xe8\x94\xe9>\xf2\x84\x81\xb2\x93\x86\xf4>Z\xcd\xcav\x94\xa7\xec>\xda\xa3\x90\xe8\x81\xdb\xd9>r\x9a\xaaS\xfdz\xb9>e\x92\xc7\xda[`\x92\xbe\x0b\x1e\xa3w\xeb\x1f\xa2\xbe\x1c\x9d\xcb\xa7\x90\x8e\x96\xbe\xd3K\x82V\xa2\xdd\x81\xbelY\xf5\xa2\xd1\xaf`\xbe\x83\xfb\xb2~\xe5\xa3\x14>\xa9\xdf\xe4\x1a\xd7\xc36>\xde\xc4\x00\xf0\xec\x94)>\xdc"\r\xc19\xc0\x11>f\xa4\xd8~`}\xee=\x8d\x90\x17^\xf3\x81\x9d=\xc1\xd6z\xbe\xc2.\xb4\xbdb\x07\x8b\xe2\t\xe2\xa4\xbdy\xcf\xc4\xe8$d\x89\xbd\xef\xac\x97\x12\x1b\xacc\xbdk\xbd\x19\x91\x18\xbe#\xbd\xc7@3I\r\xf0\x18=\xf1\xff8y\x1b\x83\x08=S\x07\xfa4\xdb)\xea ~m3\xbcs,\xb5\xbd\xbf\xb3\x07\xbcB\xd9\x93\xa0\xfdZ\x8d\xbe\xa0\x12o\x94Q\xb4\x9e\xbe\xb0A\x82\x1e\x13\xb4\xa2\xbe\xafH\xf0\xbfJ\xb2\x86>\xa3l\x9a\xafy[\xc8>fNB\xc4U\xf2\xe1>\x14\xf9\xadj\xe0r\xf0>\xc6Y\xde~\xf7\x83\xf2>\xc2\xe2+\xb9\xe0\x1f\xbf>5\xd3\x11\x11Cd\x07\xbf~\xc1_\xebq\x98\x1f\xbf\xa2t\x8bX\x14^)\xbf\x99o\x9b\x82\x1c\xd7)\xbf\xa85\xc0\xa0\xac\x01\x0f\xbf\xde\xf46w\x0ch/?\xeb\xb9s\x92C\x00D?B\x02\xdf\x9e\xca-L?\x08w.\x95a\xa0I?h\xeb&\x0e\x99\xa33?L\x19\xb0\xd1N\xdb<\xbfKlU[J.R\xbfR\xf4J\x19T\x8eV\xbf\x8eo\xef\x0c\xc6&R\xbf\x89J\xd4p\xc9\x82=\xbf\xef\x99\\\x03#S1?K\xbd_me\xacG?\xa0\x92\xffZ)\x06J?\x13\x98\xe9\x11*mB?3\nu\x1fPQ-?\t%1}v\xb4\x07\xbf\xbe/?\xfc\xd0\xff%\xbfH1\xf7\xf9\x8e\xa2%\xbf}D9\xc1\x81\xde\x1a\xbf9\x98}\xf2\x19\x08\x04\xbf\xfah6\x83\x8fN\xa7>\x84\xe3\x86_0\x05\xed>\xe3\xfb\x93\xc07\xe8\xe9>\xbf@\xd0\xc7\xc8.\xdc>\xff\xb3\xb0|C/\xc3>\xf2\xd2\xeb\x9a\xf5\xd3\x85>L\x8c\xe6\x9eu\xee\x9a\xbe1\xcf|\x89\xabR\x96\xbe_\x86K\x83\xdcH\x85\xbe\x93h\xf9"\xeb\x05j\xbeg\xeb\xdbP\x97R7\xbe\x84+\'^\xd9O1>/\x82\x93\xa2\xcf\xa4+>\x85u`\n\xbc)\x17>\x8cZ!H\x01(\xf9=TE7z\xff\x99\xc9=wp0\xada\xe5\xad\xbd\xb9N\xfa\x81\xec\x8c\xa8\xbdCE\xfb5\x83*\x92\xbd\xd7\xdc|\x9a\xb5fq\xbd}\x08\xdb\xa7L\xe6A\xbd\xa8:N\x96\x96\x0f\x10=\x15\xb6\x0e4K.\x0f=\xd3MOx\xc4\x88\xf4<\x0c\xf9\xe4=\xe5E\xd1u\xdfSz\xaf\'\xc5>Q\x91\x8e\xea\xd19\x92\xbe-\xb7q\xd2\xa0\x11\xa1\xbeL\x05"v|D\x91\xbe\xa0hww\xf2\x13v\xbe\x83]7\xad\x1b\xebP\xbeCog\x87\x87v\xe8=8\xf4u\xbb\xe7\xea\xb8\xbde\xd9\xd8?\xc4\xa5\xb1\xbd~G\xed\xc7m-\xef=#\x9c\x83\x8f\xa4w\x1a>\xae\xb2\xddF\x84V9>c\xc8\xf0\x17\x84:O>\xa1e2\x97\xa8\x94N>W\xff\xcc\xfd\x89Pr\xbe\xfdw\x04\x96\xb6Z\x9e\xben\x8b\x82\xca&\x90\xb9\xbe\x8a\xad\x18\x1a\x1c\xed\xcb\xbe\xc1k^*\xaa\xd5\xcd\xbe\xa7\xe4B!\x80\x86\xdc>\xea\xb8\xc8\x8e\x0e\xf3\x08?+\x8atb,\x96"?\xf4:\xf6\x94^\xe21?\x95\xb4\xc7@\x01\xf52?\xa3\x91,\x9c\xb4\x92*\xbfa-\xd8\xffPL]\xbfE\x80\x88%\x06zs\xbf}\xd5E\xa4\xd4q\x80\xbf\xc5%\x80t\xc4v\x80\xbf\x12\x1a\xf4K]\tQ?{\x8c\x19\xc9\x82t\x98?\x17;\xb9\x13\xd0f\xad?\r\x82\xb9\xe93\xbf\xb5?f\x8d\xb4_\x0c\xfa\xb3?PY\x83\nV\xc2\x87?\x92?\xac\xdaR\xca\xbc\xbf\xa1_\xc4\x17\xde\xf1\xcf\xbf\xed\xc1\xb9\xa6\x0e\xb4\xd4\xbfm\x85\xe5(\x0b"\xd1\xbf\x1c\xc5\x13\xbcY\x1b\xb3\xbfsZ\xce\xec\x9e\x94\xc7?\x8d_\x97\xd0$\xf3\xd8?G\xcc>\xa1\xd7e\xdc?\xcc\xb9\xeb}:\xeb\xd4?\x9f.\x02$)\xad\xbb?\x9b\xe9\xcf\xd5\xbf.\xba\xbf\xb7U\xf5\xc5\xe2\xf6\xcb\xbf\x07\xe4\x0e\xb3\r\x12\xcc\xbf^\x80K\xe9\x1cC\xc2\xbf\xc0\x8d\xa3i\x9c\xe3\xa8\xbf\xe7\x16\xa4/\r\x8c\x92?\xa3\x04$I\xe2m\xa6?T\xda\xb5?\x17\xff\xa3?\x83\\\xf7,\xc1\xdd\x96?\xc3\x86\x96\xf9\xcf\n~?\xd5\xe8\xae\xec\\2K\xbf\xe3h\x8dP\xa2\xa4i\xbf\xbf<\xfc\x89\xab\x86d\xbf4w r\xd3\x92T\xbf\xf5H\xef\x90\xdf\x1b9\xbf\xe7\x8f\x94V\xac\xa2\xd3\xbe\xda\x07\x84\x91\xbd\xc3\x14?\x05\x99@\x96\x9aW\x0e?\'@\xb0\xfd\x8c\xa3\xfa>\x1fYL5p\x85\xdd>\xd9!\xe3\x17A%\xa1>t\xdb;M\xd2\x92\xa7\xbe\xc5\x19U\xef\xbf!\xa0\xbe-\xe7\x88\rH\xd7\x88\xbe\xe6\x8f\xd1\xc5\xe6\xa1h\xbe5\x8c\xa0{E\x8f3\xbe\x94\xad\xc3\x13\rj">\xf2\xd2\x8aBw\x8d\xb9\xbd\xf6\xdct\xd3:\xa4\xcd=\x05\xe6\x90l{1\x06>\x03PL\xfd\x86\x0b*>@_\x82\x07r\xa9C>-\x19K9\x94\xcdP>\xd8\r]\x13:\xf5I\xbe\x03Ec\x8a2l\x8b\xbe\xediUW\x87\xd2\xac\xbe."\tH \x14\xc3\xbe\x1e.b$\xf3w\xce\xbe\x06\xf4W\xb7I\xec|>\xaa\x83\xec\xf7\xc3\x08\xf8>\x89X&\xefD\xf8\x16?\x98\xa6_\xd1\xba\xa2*?\x12k\xaeK:b3?\xf8Z\x00\x89\x9d\x99\x19??\xe6\xa5=\x10\x9bM\xbf\xd1+\xfb\x11kWj\xbffn\xb7M\xf4\xc6z\xbf\xf64\xa2\xae}z\x81\xbf\xcd;\xe5\xab\xc8>q\xbf\x1c\xc3\xf1\xd1\xcc5\x89?\xa2 \xe1d9\xb4\xa5?\xa9T\x0b\x92;e\xb3?\xc0\xa7P\x1bRy\xb6?o\x18\xa8\xf3q\xcb\xa8?\x82\xe3\xb6\xe4\xf0\xb5\xac\xbfo\xf0W\xd9\xdf\xa4\xc9\xbf\xe5!\xee\xcd\xc2?\xd4\xbfW\x010\x99\x0c\xaf\xd4\xbf\xd7N/;\x85\xe7\xc6\xbfE\xb1`<\x14\x17\xb4?\xbf\x93\xae||\xa8\xd5?+\xe5\x0c\xad\x00x\xde?\xa5\x7f\xe5\x1e\xa4R\xdb?\xb5\x8bW}\x0b\xc3\xcc?\x05\x9e2\x8d\x14\xc1\x97\xbf\x0bI>\xe5\x95\x07\xca\xbf>\x03\x84\x96Y\x83\xd0\xbf\xdfu\xef\xa4\xbd\xf2\xc9\xbf\x97\x86I\xdd:)\xb9\xbf+e\x85\xfde\x12x\xbfM\xf2e\xe7\xd1\x18\xa6?Y\x14\xdf\x80\xf2\xc4\xa9?Q\xf7\xb8\xc7\xcf\xbc\xa1?i\xc9\xb53h\x11\x8f?CX\'1\xd4\x07a?\xfexMV\x05-j\xbf|KYH3\xeal\xbf\xe0\xfb\x9e{\xcawa\xbfG\xd0bo\xc0GK\xbfW3\xff\xeb~v"\xbf\x10\xa7m\xf1\xeb#\x15?\x04\x19\xf0p\xc8I\x17?uX\xe8\xdf\xf2\xca\x08?\xce\x18>\xf8\xe6\x1d\xf1>pQ\x1fl\xaeA\xc8>V;\xe3\xf1\xf7\x1b\xa6\xbeh\x01\xaah\xc4\xdb\xaa\xbe\xfdH\xca#3\\\x99\xbe\x8a\xc8\x88\x1ac\xcb~\xbe\x87+_\x0fX6U\xbe5/\xe5\xd7\x84u\x19>\xf4\xdc\xc4xE\x18&>\xf3\xd2%\x8a<\x9a\x8c\xbd\x11gQ\xa7\xfe\xe4\xef=\x93,\x0fL\xa9I\x18>l\x8ci\xd7:\x146>\x86N\xb4\xc3\xeajI>t\x84\xccN)\x8a@>\x07c\x10\xech\x85t\xbeV\xf3\x02\xed\xeed\x9d\xbe\x9a\xcb1(\xb9p\xb7\xbeH\xac\xf4\xe1\x84\x1c\xc8\xbeQaAq\xce\x84\xc4\xbe+#S> \x1c\xe2>\x050\xa9\xcc\x81\x8b\t?\xce3\x0e\xfe\x9a\xee!?\x86r\xf9\x8c\xaeT0?\x1c\r\xfb\n\x97\xbd-?\xd9\x95\x8dy\x04\xfe4\xbf\xca2\x06\xc5\xa9\xce_\xbfC\xdb0\x97\xe3\xc5s\xbflp\xe2\xdc\xd6\xb2\x7f\xbf\xe6\xee\xb1\xba\x82^|\xbf\x11\x801\xcf\xc5:l?\xe8\x81\xc8\xc0#F\x9c?j\xe9k\xe6\xd7k\xaf?\x18\xd1\x12\xac\x90\x17\xb6?\xe6\x01h\xcc,\x93\xb2?\x15\x07\xc3(\xb8\x85r\xbft\xdd1\xcf\xfc\xd8\xc1\xbf}\x90\x8b\xf1)\xfb\xd1\xbf\\S\xd2\x17\xed%\xd6\xbf)NE~\x9c\x07\xd1\xbf\xd9\x00\x02t\x13R\xa4\xbf\xe9]\xb0\xda\xae\xba\xcf?F\x143_\x88\x9e\xdd?\x89_\x85BS\xf9\xdf?z\xad\x0cm\xab\x19\xd6?7\xcb\x92\x1dz\xd1\xb5?\xce@_[\xce\x90\xc3\xbf\x15j\xf9yC\x88\xd1\xbf\xee&\x07x\xa4\xa0\xd0\xbf\x1f6\x9e\xe5\x83n\xc4\xbf\xd4\x1d\xd4\x8c|6\xa7\xbf\x13Nz\xad9B\xa0?@\x99\x11\x8fu\xc6\xad?Gz\xa9\xaf\x05\xed\xa8?\xa4\x13\xaeA\xa0\x06\x9b?\xa3\xff\xb4\xb4\x198\x7f?\xe8\xaf\xacp\xf5\xf6`\xbf\x10\x13\tbw\x15r\xbf/\\\xf9\x04\x86\xedj\xbf#\x97\xa4\x994\xa5Y\xbf\xae(\'E:\\<\xbf\x1dA]j\xb6\xf4\x00?\x14!\x03N\x8aI\x1f?\x02iL()\xf5\x14?F\xe3\x98\x97\xb0}\x01?\x94\x05/\xfc\xb8\xe4\xe1>\xc8\xa3~\xeft\xd3\x87>\xb48\x12=W%\xb3\xbeW\x89\x91\xf6\x8f|\xa7\xbe\xd2^y\\\xa3+\x91\xbe\x8d3\x0f\xfc\xe2\xd0o\xbew\xf8\xeb \xc1\xed0\xbe\xcd.&\x8b\x1ed0>\x01#\xaez\xf0\xed">\x80+Ac\xf9s\xd1=\x11\xe8\xfbT`V\x04>\xb8\xc1h7Zq&>\xd4\x0f|\x1d\xef\r@>\xfa\xf9\xa6\x96\x04\xefG>\xc6Dv\xf2f^T\xbe\xdd\x06K\xd4^\xad\x8a\xbe]\xcfU\x08\xfb"\xaa\xbe\x80\x93\xbd2+o\xc0\xbekw\x11\xb50\xab\xc7\xbe\xc2\x17\xea\x12\xde\x0e\xb9>\x01cS\xbc\x1d\xf2\xf8>]g|\x0b>\xee\x15?V\x0c\xe4\xcf\xec,(?\x0b\x99\xee\n(10?\x0f>\x8d\xee\x0f\xfe\xe2>J\xef\xb7\xfa\xf5\x86P\xbf\xdc\x8b)T*\x80j\xbf\xf0x7\xac\x9b\x96y\xbf,Il\x800&\x7f\xbf_\x81F\xcd\xa1Jc\xbfu\x87\xc1\x12\x85\xb8\x8e?nY\x00\xffA\t\xa7?\x99\xcc\xef\x117\x81\xb3?\x18."}\xb1A\xb5?\x94\xbb\xb1;\xb2R\xa2?\xc4\xee\xee-\xd9\xaa\xb3\xbf9>\xa4\x12_\xc4\xcc\xbf04%C\xe3l\xd5\xbf\xdd\xad\x0c\x17\x01\xb3\xd4\xbf(\xe4\xff\xe05\x82\xc3\xbfn\xe3\x92\xd2\xff\xb4\xc0?<\x85^\x98E\xbf\xd9?R,\x9d\x1c\x1a\xf6\xe0?8\xb4^pP\xde\xdc?<\x15uE\n\x06\xcb?\xf9u\x95\xaa\x81\x06\xb1\xbf\xf4:\x17\xffnv\xd0\xbfc\xf8\xafu\x0fZ\xd3\xbf\x95\xbc\x9d\x1c)\xe7\xcc\xbf\xa7\x1e#P\xe2\x91\xb9\xbf\xfd\xe2m\x88\x94|x?\xd6\x88\x88\xe6^\xee\xad?z\x11\x1aw\xb7\xce\xaf?\x8f\xf2\xec-\xfb\xce\xa4?\x86\xa78\xe5\x92\xe5\x90?\x9b>\xc7\xd6\x1b`Q?\xf1U\xb6T\xf60s\xbf\x85\'|Wq\xd0r\xbfj\xdaO\xb3\xb9\x91e\xbf\x81MQal\x90O\xbf\x1d\x84\xa5ZH\xeb\x1e\xbf\xa0\xf3\x99\x13c !?\xb5\x8e\x8c\xc8\x03\x00 ?\x82"1\x9f6\x1b\x10?\x06WW\x01\xd7\xfb\xf4>\x95o\x00}\x98x\xc8>y\xd7\'\x8bf\xbb\xb4\xbe\xd7\xbe\x0e\xa7\x02\x87\xb3\xbed\xb5\tO\x95U\xa1\xbe\xe45?\xdf\x87\xf2\x83\xbe\x00\r\x18\x01\x85\xffW\xbe\xa7\x10\x85k\x1c\t0>wk\xf1K(\x0e1>v\xd3\xa8M\xbf\xe3\x1a>D;\x83\xd7,\xbe\xed=\x1a\xe9\x00N\xed\xb7\x14>\xf9\x86\xbd\xc95\xe71>\xf5\xc0\xbc\x13J\x0fC>kl/|\xfbG">r\x81\xab\x8eX\xa9t\xbe\xb6\xa0R\x93)n\x9a\xbe\x92r\xca\xed\x02\x02\xb4\xbe^\x03{IT@\xc3\xbe\x7f?#D\xda\xb2\xb6\xbe\x89\x88_\xdf\xe1\n\xe4>\xbc\x1a\xbc-\xa5>\x08?DR\x16\xb6-\x1b ?B\x82\x13\xd4\xf9\xa5+?2\xbc\xb0\x01\t]$?j\x0e\xea\x86-\x8c:\xbf\x80\xd4\xb2c\x11\xee_\xbfZe\xc1\xa2\x98\xafr\xbf\x06\x8a\x83Ro^|\xbf\xa57\x88/\xfd\xf9u\xbf\xa4\xee\xd31\xc7\xd8v?\xe0$\xd6[\xa1\x1c\x9e?eTo\xdcI?\xaf?3\x0c\x8bV\xfd\xdc\xb4?\x87\xa5\x0f\x8f\xa4\x7f\xaf?yQC\xba\x8c\xf0\x95\xbf\xb7hoPkB\xc4\xbf\xf3\xe1\x14\x0f2\xd3\xd2\xbf\xbf\xb2\x1c\x0f\xf1\n\xd6\xbf\xd7\xa0\xa3\xbbC\x1c\xcf\xbf\x99L1\x8bA\x9fp?\xa0
x\xb8&\x02P\x95\xb8\xbel\xdbO\xfdS\xe9\xd1\xbe\x80j\xb1\xc6\xc0\x97\xc4\xbeg\x10\xc1\x98\xef\x92\xad\xber9\xb5\x8c\xec\xf1\x89\xbe\xa1|\xae\x10A7&\xbeo\x16\x12\xcajGU>\xfb\xc5\xd9\x1cp]F>\xc5!\x94\xb8m",>C\x912\xf5\xdba\x06>\xfa\xd4\xac\xa9\x99\x91\xc3=2)\x91 \xe6\xb3\xc1\xbd\x137\x82\xf1ux\xb1\xbd\xd7\x06\xb5m)\xd21\xbeO\xe4\xeat\x10\xa1a\xbeQ\xe4W\x85\xeb\xb9\x80\xbe\xe6\xfa,\xdb\x82\x94\x94\xbelQ\x9elr$\x9a\xbe\xffNz\xa1\ti\xa4>\x04\x18\xed[3l\xd6>\xf5\x9f\xd9<0\xe0\xf2>r\xe6\x9fN\x9ek\x04?}fH\x03\xcc\'\t?]\xe1_:\xa6W\xf9\xben\x96\xeb3\x91V4\xbfB]\x110(\xb2N\xbf\xde\x1f\x93\xef\x0b\x1d]\xbf}3=\xa3a\xb7`\xbf&\x91\xa2\x0b\xe0S\xe7\xbe\x16hm\x9d\xc8\'z?\xb2\xff\x12A&\xf9\x91?\xd1\xb7\xd5\x14\xd2\xdc\x9d?`\xfa0\x1bY5\x9f?\xab\x95JJ\xb8i\x7f?\xfa\x9cc\xcc\xad\x9d\xa7\xbf\x87\x08\xe1\x06\xd2H\xbe\xbf\xff\x87/R\xe4\x0e\xc6\xbf\xa3\x16\xb8\x97/\xa8\xc4\xbf\xe4D\xfa\xd4\xa8\xed\xad\xbf\xeb\xfe\xc0\x1a\x03n\xbd?FV\t\xd3FT\xd2?\\B\t\xe9\xbez\xd7?\xfd\xc4OX\x07\x81\xd3?FA\x8f\xa17<\xbf?\x90\xae\xeb\xe8\x9bn\xb8\xbfj\x83X\xd1R\xcf\xcf\xbf\xf7x\x08\x8b\xf2\x02\xd2\xbf\x9cc5\x95g^\xca\xbf\x15\x83\xd3xg\x14\xb5\xbf\x8f\xc7\xb1\xa7\xd1\xa4\x98?(^c\x08\x93\xb9\xb3?4\xd9\xad\xd2\xdf\xe9\xb3?\x13!\xe2%\x90\x96\xa9?\xfc,\\\xf9Ic\x93?\xb27\xb5\x88\x0bjS\xbf\x1a&bN\xcbe\x81\xbf57\xe4k\xf5\xb7\x7f\xbf#\xd4b\xe2w\xdaq\xbfV\x83\x00{T\xe0X\xbf\xed\xa4\xa9h\x88\x8e\x13\xbf\xbc\xc3\xa3\xc8\x8f\xa95?\x10\xdb\x0fq\xc8.2?\\j\xa5;\x01\xef!?\x9b\xaa\xe6\xe6\xc8\x8a\x06?\xb4\xad\x05\xf5\xb5[\xd2>\\2-\xd0\xa0\xcb\xd2\xbe\xafi$l\xa4\xf9\xcd\xbe\xfc\x10\x1f\xf4\x13\xf4\xb9\xbe\xd5\xdc\xbb\xe0\xbd\x10\x9d\xbeJ\x98\x00\r\xc5\xadl\xbe[\xc3\xc1\xecM.V>b5\xc2Sq\xbbQ>]\'\x8c\xa0\x10\x11;>\x19\x15\xf29h\xc9\x1a>\x98\x90\xc3\xf3\xa5v\xeb=;/\xd2G}\xd7\xc0\xbd\xd63\xe9\xee\xdb\x08\xbe\xbd!\xbc\'q\xd0W\xa4\xbd\xd8\xe22(\xab\x9eH\xbeYt5\xd64nm\xbe\x12\x08jJ\x80\xe1\x85\xbeT;\xc6f\xed\xfa\x93\xbe\x9e_6u\x07Ok\xbe\xc5gV\xf8i\x9a\xc0>\xd8Z\xf8\xfbJ1\xe2>\xea\xd6X\xd2n\xb2\xf7>Ud\x140\xc9\x94\x03?\xb7\xf7r\t\x89\x11\xf3>\x88L\xf5\xed\xbbN\x1f\xbf\x9d\xebD\xd9\xb3,@\xbf\xca\xc4sS4|R\xbf$\x15\x80\x87\x05F[\xbf\xf66\x0b\xa0U\xf5P\xbf\x92\xf57\xe8\x033d?G\xc3\xba\xb7?\xa6\x84?ly9v\x0c\xc8\x94?x1\x12\xf9\xaf!\x9b?\xbf6\xe0\xd2\xa0\xe4\x91?\xfe\x1e\x94\x81\xbb\x07\x91\xbf\xd3\xa5S+\xaa\xe1\xb2\xbf\xef\x83\x82\xff[\xd6\xc0\xbfZ DcdW\xc3\xbf\xfa\xc5\x93\xbb\xeb\xf6\xb8\xbfM\xbd\xa7\x0c\xa8[\xa0?hE\x08z\x9c\xa4\xc8?\x8a\x89\xa4;\xcb\xa8\xd3?\x1f\x8f:=/\xce\xd3?\xd25\x8d\x07\xb6\xf4\xc7?\xbe\'\xd3\xdc\xfezv\xbfbgg{\xea\xd3\xc6\xbf\x01\xe0tR\x17\x88\xd0\xbf\xde\xf5\x98\x86\x12-\xcd\xbf\xe42<\x99q\x1c\xc0\xbf8\x00\xfer\x8e\xc3\x8d\xbf\x0f\xc2Z\xd4\xae\xc0\xad?p\x147\xff\x8a\x01\xb4?\x91[\x8d\xee\x83\xf2\xae?\x98\x03Cbn\xb1\x9e?\xd1\xdcL\xa0\x17Rv?+#\xe9K)\xdcz\xbf\xf6\xf6\x96-\xcbe\x81\xbfd\x7f\x1e\x8fz\xa6w\xbf\x8f\xc5\x0b\x9f\xf1\xd5d\xbf\x12\xdd\x9b\xeb\x104A\xbfu{\x8b\xe5\x03I0?6>^\xc9B\xb35?\x06\xe3\xe6\x98\xef\x0c*?t\x81\x07\xe4\xf4=\x14?|S\xc7\x0cJ\xe4\xf0>\xfa\xd3]\xb0\x05\x92\xc8\xbe\xc4<>\xd5\xa5Z\xd3\xbe?\xb8m\x13\xb1\xad\xc4\xbeS4\x8d\x18M8\xac\xbe\x01\xa3\xbcm\xa7x\x86\xbe\xb3)t\x9bZ\xe4@>\x19\xfa\xe4]\x87\x93X>k<\x8b7Q\xa6G>\x88\xae\xc7\xaf\xc6G,>\xf2\xfeV\xa9\xa1\xca\x04>)\xc2\xeb-\xcb\x9f\xa8=\x11\xc2\x1a \xc8\x0f\xc6\xbd\xdc\xa6\xa1G\xd0x\xb3\xbd\x96\xae)P\xcbe\x94\xbd\x85)SV\x12aW\xbeN\xfa\xbbD2\xf8t\xbeb\xb5\xf0\xf9\xbeY\x88\xbe\xb2O\xfc\xe83\x8d\x89\xbeBm\x85\x0c5\x97\xa2>\xaa\x1aE\xa3\xb4x\xcf>\x9b\xa4\x93\xa1\x1a\xe6\xe8>;\xc2Ek\xe3\x87\xf9>\xb9\x13\'\xa8\x01\x85\xfb>\x95a\xaa.\xaau\xff\xbe\x0eJ\x1cy,P.\xbf\xd5\xa1\xc3\xbd`NE\xbf\x1eV\x9d\xee\xa12S\xbfi7\xe7)>\xebS\xbf\x13M\xef\xc7U+;?\xa4\x1a1\xaaj\xcet?\xc1\xc3\xfa\xb7\\E\x8a?\x11\x1d\xc1tO\xbf\x94?\xf3P\xd9[\xa4\xfa\x93?\xe7_\x91\xa5\x11\x0fN?y\xf6\xac\x1a|9\xa4\xbf#\xe2Wy\xd3R\xb7\xbfG\xf4l\xd7_"\xc0\xbfj\xbd>.<3\xbc\xbf\xe9$\xa6/\xfbZ\x9a\xbf\xfc\xf2\xce\xd6\xe2\x8c\xbb?\xea\x19o\xe5\xe7\xc9\xcd?=Ig!\x99\x12\xd2?\xd6\xc5}\x85\x1bA\xcc?5\xaf1\x93K\xd6\xb1?\xaa\xcb\x9b\x9cT\xcd\xb9\xbf\xa2s\x9d\x1d\x0cS\xcb\xbf\xbdn\xdc;\x83,\xcd\xbf\x84?(\xcfR4\xc4\xbfm\xc8-\xe3\xb1\x9c\xab\xbf\x06\xad\xf1\xf5\xb6\xec\x9f?\xf4\xf5j\xac\xcc\xf5\xb1?\x96\x8c\xeciS\xf8\xb0?\xa1\x1f\xe0\x8dy\xb2\xa4?\xde`\xfa\x8b;\xf5\x8b?\xcb\xdfP\xd6\xd5\\g\xbf\x12\x14\x98\xdf\xa0\xdc\x80\xbfE\xa4t\xea\xd8s|\xbf\xaa\xb7q\xf7\x07rn\xbfM\x15\xc4\xc5caS\xbf\xa8<"\x1ft!\x05?\xa3\xf4b\x81,\x7f6?\x10\xfb]\xeb\x1c.1?\xdc\xec\xe9p\x8e\x1a ?\xec\x96\xed\xc0K\xc9\x02?\xae\xa3\xde\xc8\xd2\xa1\xbe>WP\xd6\xd6`\'\xd5\xbe\xf0\x91\xdf\xeb\x03\xdd\xcd\xbe\x03\xbddG~\x87\xb8\xbe\x9a)F\x8e\xbc\xc1\x99\xbe\xceI\x7f\xab\xcf\xa8b\xbe\xa1\xc0a\xe8\x94\xa5[>\xd6$\x1f\x07\xf1\xa7R>\xb4o\x18j\xbf\xea:>p\xd2\xd4\x05\x13%\x19>\xf2\x88b\x9b\xc1W\xe5=\xb0\xa5_Q\xd6o\xc8\xbd\xf3p\x8c\xe0\x16\xb9\xc0\xbd5;\xa0Y6I\xa5\xbd\x93\xb7\x06K"\x8e\x81\xbdc\x98\xd6\x92n?b\xbe\x0e\x86\x9c\x9d\x89\xc9y\xbe\xfe\xaeI!4n\x85\xbeG\x05\xd9\xee\xc2\x18j>PG\xca\x0c\xf9\xbc\xb7>\x9bg\x80\x0cO\xc2\xd7>\xb7W\xc8\x00\x95j\xed>\xc4\'|$s\x80\xf6>\xcav\xbb\x7f\x14\x01\xd2>\t\xc6\xcb\xb3\r0\x18\xbfL\xb1\x8a\x90\x89D6\xbf\xef\xc2\xa8\xe5l\'H\xbfy^Z\xa1\xdc\xadP\xbf/\t~k\x80B=\xbfn@\xcd0\xd1/a?\x1f\xc4\x80\x97\xc1\x02~?y\xf1An\xbe\x92\x8c?L\x9f\x91\xebF\x96\x91?tRW<\xd3\xe4\x82?\xcb,\x1f^\x8e\xa2\x90\xbf\xc7#\xc7\xe9\xb0\x07\xad\xbf\x902*\xee\x97[\xb8\xbf\xd5\x12\xbexS\x81\xba\xbfY\x1e\xe2;\x95\xb7\xad\xbf\x14$\xdc\xbf\xf6\xcb\xa4?v%\x15N\x8d\x1a\xc4?\x8f1GF\xc4\xed\xcd?\xcf(Q_\xf3\xa2\xcc?\x9a\x1f\x1e\xbb|)\xbf?Q\xb7\xfe\xa1\xf3V\x9c\xbfA\x11N\xd2\xa8\xdb\xc3\xbf"\x18d\xc9\t\x7f\xca\xbfQ\xfb7\x17 :\xc6\xbf\x9d\x91G\x12\xee\x8e\xb6\xbf\xfa\x9d\xfb\xbb\xe85\xd8>\xbb \x05\xfa\x8b\xd0\xab?\xc0\xd1Ik`\xe4\xb0?\xd8\xd6#\xba\xe6\xd2\xa8?\xc24\xa5\x94H\xef\x96?\xae\xbd\x1e\x9d4)d?r\x19+}\xbf[{\xbf\x99\xf5E\x81\xc0\xfd~\xbfm\xdf+\x89\xec\xf6s\xbf)\xdc\xc2\x81,\x88`\xbf\xd2b\xb1\x9f\xb2\x135\xbf\xb5\x12K]\xbb\x932?\xbfrG\x95}l4?|\x14\xe6\x17N#\'?e\x08\x05\x8fX\xff\x10?\x15\xba\xb5\x89\xcb\x02\xe8>\xc6`/\xbc\x1e\xd2\xd0\xbe\xcb*\xf1\xb3TL\xd3\xbe\xde\xba0uUS\xc3\xbe\xef\x01;\xb8\xc2\x05\xa9\xbe\xc2\xc3\xef\x17\n\xaa\x81\xbe\xe8Jt\xf6-\x8dR>\x18\x9c\xfd\xc7\x04\x10Z>\xfb\xd5\xe8\xf8{CG>\xfc\xcd\x81\xd0gp*>\x82\x19\xe2\xf2n\xb3\x01> joi7F\xb0\xbdS\'\x9c\xf4\x04\t\xc9\xbd\x8f=\x04\xbf1,\xb4\xbdW\rq\xea\xeb\x15\x94\xbd0PnE\x0e\xbdh\xbd\x9dq\x0c\xf80yh\xbe\x9b7S\x11C\xacz\xbe\xf6\x1exc\r\xfet\xbe\'\xbc\xe8\x8f\xdd\xdd\x9c>\xfd4\x96;\x9ct\xc4>b\xd4\x03Nm\x93\xde>\xfb\xd3\xcec\xdd\x9c\xed>$\x99>\xea\x0b\xab\xea>V\xc9\x8b\xafd\xbb\xfc\xbe6\xe5mb|\xdc$\xbf\xdaB^\xbc\x89\x87;\xbf\x06\x8c\x97\x99\x97\x85G\xbf\xbe\x19\x8c%\xd5{E\xbf\\\x12\xd2M|\xe3A?\x8a\xb8\xb5\n\x1ann?\x8aZ\xe0\xf4\xca\xdc\x81?\xd20\xc40\x0f\xcf\x8a?\xc8V\xaag\xc0h\x87?\xeb\x19]\xa6\xb5\xecd\xbf\xbc\x9b+\xaa9\x9b\x9f\xbf\xba\xcdG\xa5\x08\xb3\xb0\xbf\xbd\x9c\x8d!h\xf6\xb5\xbf\xf5\xd6\x92"\xe3\xb9\xb1\xbf\x98G\xc3\x9e\x8fyt\xbf|\x1e\x8c\xf1V7\xb7?ZXA\xe3\xaez\xc6?L\x19\xb87\xd6\xe5\xc9?L\x137\x1f\x83\xeb\xc2?\xf8\xca\xca\xf2\x80&\xa0?\xaf\xd5V\x9b\xcf\xd8\xb7\xbf<\x8b\x1b\xb3\xeb\xc2\xc5\xbfk\x85\x94\x99\xee\xfe\xc5\xbf\xd9\x80\x84r\x96\xb2\xbc\xbf\xe6\xdc\x15u}o\x9f\xbf\x95\x12\xb4\x8d(\xc5\xa0?\xca\xd8\xec\xca?@\xae?q\xf9\x1a1\x84\xec\xaa?\x15\xdd\xb5\x8b2\x16\x9f?\xe9%H\xeex\x17\x82?\x8c\xf1G\xb3w\xdbn\xbf\\\t\x98\xeb\xef\x1e~\xbf4\x85)b?\xc0w\xbfp \xcb%\\!h\xbf\xc4w\x9f0\x8c\x82K\xbf\x16\x97\x07\x17\xa0< ?\xc57\x04M\xc9g5?\xc2s\xb8!\xbd1.?ShV\x1b\x84\xe7\x1a?\x7f(\xe7\x11@\xc2\xfc>u\x849[\x07|\x9a\xbe\xee\xe6\xd7\xed\x89\x98\xd5\xbeh\xd69\x9f:\xa5\xcb\xbe#\x1dKt\xb3\x93\xb5\xbe\xdc\xa1P\x1f\xe1\x10\x95\xbeMy\x02\x92\x05\xf1P\xbe\x06\xe2\xac\x85=\xa9^>\x87\x1a(\xe6\xb76R>\xa3GE\xd3V\xeb8>\x03Z\xb9\xefS\xda\x15>|pd\x04+\xf3\xdb=>\xa6\xd2\x8f\x9b)\xce\xbd\xee\x1c\xbc\xf6\x17?\xc1\xbd\xd4\x1d#,\x0e\xbc\xa4\xbdc\x16\xf7\xd3s\'\x80\xbd\xd1}\xf4k\x0c\x86G\xbd\x1d\x80\x1c\x05\x8b>l\xbe\xb2\xb7\xfdc\xe3\xe4t\xbe\xac\xb8Z}!\x19t>\x87]\x9c\x92V#\xaf>\x11h\xcf\x15\xa2\xdc\xcc>\xd9\xfc\xf5\\\x90\xfb\xe0>Z\xe6J@\x11\xb8\xe7>?\x17\xea\x03\x0c\\\xc0\xbe\xccv\xb5\x19\xff\xfd\x10\xbfu\xe7\x9fO\xd4},\xbf\r\x8db\xf9Q_=\xbfos\x8at?\x99\x97\xae\xa65I\x82?<\xfa!)*\x19\x85?E\x9b1\xbc\xf9\x80p?\x12\xb1N&\xf8\xfe\x8b\xbf\xec\x8a\xec\x98P\xad\xa4\xbfq\xf8-\x00\xf6f\xb0\xbfI\x14I\xd4\xe5\xd7\xb0\xbf\x04\\D\x81\xe1(\x9f\xbfl\xd1\xa7\xe6tM\xa4?^\x91\x9f\xb9\xe6N\xbe?\x17\xc1\xc8\x8dy4\xc5?\x03\xe2\xda\xa5\xcc:\xc3?3\x15\x83\xb06L\xb2?\xa1\xc3t\x04)\xc2\xa2\xbf\xaf\xd8)\xe3\x11\xca\xbf\xbf\x0c\xa2Q\x04\xaa\xc1\xc3\xbflP\x15\xcd4}\xbf\xbfS+\xb9\x94\x17\xdd\xac\xbfc\xc8\xc95\xca\xd1\x81?\x83\xde{1!\xc3\xa7?\xa0\x9dx\x13\xff\x85\xaa?\xe8\xc0\r\x86\xdd\x86\xa2?gJ\xcfyd\x89\x8f?S\rAOL\n3?\xb34w\xdcS(y\xbf\xf8h@\xb8\x9c\xa3y\xbf\xccV\xb3T\xc4_o\xbf\xbf\xd9l\xaa\x86=X\xbf%X\xe2\xdf1\xd3#\xbfxQ9N\xc9\xaa2?X\xe3\xbc\x8a\xde\xd31?f\xc7&\xb2\xcd!#?\xe3\x0c\xa0,\xd3r\n?V\x14\x06N\xa8f\xdd>\xaf*\x0f\xdc\xb9\r\xd3\xbes\xd4\x97\xde[\xce\xd1\xbe\xc8\xba\xf5/+\xd0\xc0\xbe\xd8\xf2\x7fuP\x98\xa4\xbe\xf9\xc9tS\n\xd5x\xbe\x14\x1f\xf7D\xc8\xb7Y>HH,z/}Y>lF\x96\xb9\xf6KE>\x1a
"
285 | ]
286 | },
287 | {
288 | "cell_type": "code",
289 | "execution_count": null,
290 | "metadata": {},
291 | "outputs": [],
292 | "source": [
293 | "# interactive threshold demo, evaluate this cell\n",
294 | "# You can safely skip this cell if you see any error happens.\n",
295 | "from IPython.html.widgets import interact, interactive, fixed\n",
296 | "def demoThreshold(T):\n",
297 | " predict = (np.log(ponIm/poffIm)>=T)\n",
298 | " plt.figure(1)\n",
299 | " plt.imshow(predict)\n",
300 | " p = ROCpoint(predict, gt)\n",
301 | " plt.figure(2)\n",
302 | " plt.plot(x, y)\n",
303 | " plt.plot(p[0], p[1], '*')\n",
304 | " plt.xlabel('False positive rate')\n",
305 | " plt.ylabel('True positive rate')\n",
306 | "\n",
307 | "# compute ROC curve\n",
308 | "p = []\n",
309 | "for T in np.arange(-5, 5, step=0.1):\n",
310 | " predict = (np.log(ponIm/poffIm)>=T)\n",
311 | " p.append(ROCpoint(predict, gt))\n",
312 | "x = [v[0] for v in p]\n",
313 | "y = [v[1] for v in p]\n",
314 | " \n",
315 | "interact(demoThreshold, T=(-5, 5, 0.1))"
316 | ]
317 | },
318 | {
319 | "cell_type": "markdown",
320 | "metadata": {},
321 | "source": [
322 | "# HW2 Q5:\n",
323 | "1. Load another image (e.g. the butterfly or building), apply this edge detection algorithm, find a good threshold and display your result (6 points)\n",
324 | "2. Use $ \\frac{dG*I}{dx} $ instead of $\\frac{dI}{dx}$ for edge detection where $G$ is a Gaussian. Show results for a couple of different variances `sigma`. (8 points)"
325 | ]
326 | },
327 | {
328 | "cell_type": "code",
329 | "execution_count": null,
330 | "metadata": {},
331 | "outputs": [],
332 | "source": []
333 | }
334 | ],
335 | "metadata": {
336 | "anaconda-cloud": {},
337 | "kernelspec": {
338 | "display_name": "Python 3",
339 | "language": "python",
340 | "name": "python3"
341 | },
342 | "language_info": {
343 | "codemirror_mode": {
344 | "name": "ipython",
345 | "version": 3
346 | },
347 | "file_extension": ".py",
348 | "mimetype": "text/x-python",
349 | "name": "python",
350 | "nbconvert_exporter": "python",
351 | "pygments_lexer": "ipython3",
352 | "version": "3.8.8"
353 | }
354 | },
355 | "nbformat": 4,
356 | "nbformat_minor": 1
357 | }
358 |
--------------------------------------------------------------------------------
/HW2_part2/data/edge/boundaryMap/100075.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW2_part2/data/edge/boundaryMap/100075.bmp
--------------------------------------------------------------------------------
/HW2_part2/data/edge/boundaryMap/12074.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW2_part2/data/edge/boundaryMap/12074.bmp
--------------------------------------------------------------------------------
/HW2_part2/data/edge/boundaryMap/23025.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW2_part2/data/edge/boundaryMap/23025.bmp
--------------------------------------------------------------------------------
/HW2_part2/data/edge/boundaryMap/35010.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW2_part2/data/edge/boundaryMap/35010.bmp
--------------------------------------------------------------------------------
/HW2_part2/data/edge/boundaryMap/41004.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW2_part2/data/edge/boundaryMap/41004.bmp
--------------------------------------------------------------------------------
/HW2_part2/data/edge/boundaryMap/41025.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW2_part2/data/edge/boundaryMap/41025.bmp
--------------------------------------------------------------------------------
/HW2_part2/data/edge/boundaryMap/97017.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW2_part2/data/edge/boundaryMap/97017.bmp
--------------------------------------------------------------------------------
/HW2_part2/data/edge/snapshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW2_part2/data/edge/snapshot.png
--------------------------------------------------------------------------------
/HW2_part2/data/edge/trainImgs/100075.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW2_part2/data/edge/trainImgs/100075.jpg
--------------------------------------------------------------------------------
/HW2_part2/data/edge/trainImgs/12074.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW2_part2/data/edge/trainImgs/12074.jpg
--------------------------------------------------------------------------------
/HW2_part2/data/edge/trainImgs/23025.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW2_part2/data/edge/trainImgs/23025.jpg
--------------------------------------------------------------------------------
/HW2_part2/data/edge/trainImgs/35010.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW2_part2/data/edge/trainImgs/35010.jpg
--------------------------------------------------------------------------------
/HW2_part2/data/edge/trainImgs/41004.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW2_part2/data/edge/trainImgs/41004.jpg
--------------------------------------------------------------------------------
/HW2_part2/data/edge/trainImgs/41025.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW2_part2/data/edge/trainImgs/41025.jpg
--------------------------------------------------------------------------------
/HW2_part2/data/edge/trainImgs/97017.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW2_part2/data/edge/trainImgs/97017.jpg
--------------------------------------------------------------------------------
/HW3/Gibbs Sampling.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "\n",
8 | "\n",
9 | "\n",
10 | "# Gibbs sampling\n",
11 | "\n",
12 | "Originally by Weichao Qiu.\n",
13 | "\n",
14 | "If you find this ipython notebook is unclear or contains bugs, please post it on Piazza.\n",
15 | "If there's an error says \"something is undefined\", please run the cell that contains the definition or use \"menu -> cell -> run all above\""
16 | ]
17 | },
18 | {
19 | "cell_type": "markdown",
20 | "metadata": {},
21 | "source": [
22 | "# Foreground/background classification.\n",
23 | "\n",
24 | "Here we consider a model for foreground/background classification that can include spatial context. Intuitively, neighboring pixels in the image are likely to belong to the same class, i.e. are likely to be either all background or all foreground. This is a form of prior knowledge, or natural statistic, which can be learnt by analyzing natural images.\n",
25 | "\n",
26 | "For pixel $i$, the foreground label is $ S_i = 1 $, and background label is $ S_i = -1 $.\n",
27 | "\n",
28 | "The prior term in the energy encourages neighbouring pixels to have the same intensity ($N(i)$ is the set of pixels neighboring $i$): \n",
29 | "$ E_p[S] = \\gamma \\sum_{i} \\sum_{j \\in N(i)} { - S_i S_j} $ \n",
30 | "\n",
31 | "The data term is defined as:\n",
32 | "\n",
33 | "$ E_d[S, I] = \\eta \\sum_{i} (I_i - S_i)^2 $\n",
34 | "\n",
35 | "\n",
36 | "These two terms are combined to get the energy.\n",
37 | "\n",
38 | "$ E[S] = E_p[S] + E_d[S, I] $\n",
39 | "\n",
40 | "Then the posterior of the labeling $S$ given the image $I$ (with temperature parameter $T$) is\n",
41 | "\n",
42 | "$ P(S|I) = \\frac{1}{Z} \\exp\\left( - \\frac{E[S]}{T} \\right) $"
43 | ]
44 | },
45 | {
46 | "cell_type": "markdown",
47 | "metadata": {},
48 | "source": [
49 | "The block of code below initializes the ipython notebook"
50 | ]
51 | },
52 | {
53 | "cell_type": "code",
54 | "execution_count": null,
55 | "metadata": {},
56 | "outputs": [],
57 | "source": [
58 | "# Initiialization code\n",
59 | "%matplotlib inline\n",
60 | "import numpy as np\n",
61 | "# from pylab import imshow, show, get_cmap, imread, figure, subplots, title, subplot\n",
62 | "import matplotlib.pyplot as plt\n",
63 | "from numpy import random\n",
64 | "import pylab as pl"
65 | ]
66 | },
67 | {
68 | "cell_type": "markdown",
69 | "metadata": {},
70 | "source": [
71 | "The block of code below loads an image and normalizes it to the range $[-1, 1]$."
72 | ]
73 | },
74 | {
75 | "cell_type": "code",
76 | "execution_count": null,
77 | "metadata": {},
78 | "outputs": [],
79 | "source": [
80 | "im = plt.imread('data/gibbs/cat4.jpg')\n",
81 | "plt.imshow(im)\n",
82 | "\n",
83 | "def myimshow(state):\n",
84 | " plt.imshow(state, interpolation='nearest')\n",
85 | "\n",
86 | "# Preprocess image to range (-1, 1)\n",
87 | "def preproc_data(im, scale=0.1, debug=False):\n",
88 | " import skimage.color\n",
89 | " import skimage.transform\n",
90 | " \n",
91 | " tinyim = skimage.transform.rescale(im, scale,multichannel=True)\n",
92 | " grayim = skimage.color.rgb2gray(tinyim)\n",
93 | "\n",
94 | " # Linear map the data to -1, 1\n",
95 | " scale = grayim.max() - grayim.min()\n",
96 | " data = 2 * (grayim - grayim.min()) / scale - 1\n",
97 | " if debug:\n",
98 | " print('original range:', grayim.min(), grayim.max())\n",
99 | " print('remapped range:', data.min(), data.max())\n",
100 | "\n",
101 | " return [data, tinyim]\n",
102 | "\n",
103 | "[data, im] = preproc_data(im, debug=True) # data is normalized image"
104 | ]
105 | },
106 | {
107 | "cell_type": "markdown",
108 | "metadata": {},
109 | "source": [
110 | "The block of code below defines the neighborhood structure for the Gibbs sampler."
111 | ]
112 | },
113 | {
114 | "cell_type": "code",
115 | "execution_count": null,
116 | "metadata": {},
117 | "outputs": [],
118 | "source": [
119 | "def getneighor(y, x, h, w): # get 4-side neighbor\n",
120 | " n = []\n",
121 | " if (x != 0): n.append((y, x-1))\n",
122 | " if (x != w-1): n.append((y, x+1))\n",
123 | " if (y != 0): n.append((y-1, x))\n",
124 | " if (y != h-1): n.append((y+1, x))\n",
125 | " return n\n",
126 | "\n",
127 | "def poslist(h,w):\n",
128 | " '''Get point list of a grid'''\n",
129 | " pos = []\n",
130 | " for x in range(w):\n",
131 | " for y in range(h):\n",
132 | " pos.append((y, x))\n",
133 | " return pos"
134 | ]
135 | },
136 | {
137 | "cell_type": "markdown",
138 | "metadata": {},
139 | "source": [
140 | "Define a utility function to compute energy."
141 | ]
142 | },
143 | {
144 | "cell_type": "code",
145 | "execution_count": null,
146 | "metadata": {},
147 | "outputs": [],
148 | "source": [
149 | "def energy_prior(state, gamma):\n",
150 | " total = 0\n",
151 | " (h, w) = state.shape\n",
152 | " pos = poslist(h, w)\n",
153 | " for p in pos:\n",
154 | " neighbor = getneighor(p[0], p[1], h, w) # compute neighbor\n",
155 | " \n",
156 | " for n in neighbor:\n",
157 | " total += state[p[0]][p[1]] * state[n[0]][n[1]]\n",
158 | " E = - gamma * total\n",
159 | " return E\n",
160 | " \n",
161 | "def energy_data(state, data, eta):\n",
162 | " E = eta * sum((data - state)**2)\n",
163 | " return E\n",
164 | "\n",
165 | "def energy(state, data, gamma, eta):\n",
166 | " return energy_prior(state, gamma) + energy_data(state, data, eta)"
167 | ]
168 | },
169 | {
170 | "cell_type": "markdown",
171 | "metadata": {},
172 | "source": [
173 | "Define the Gibbs sampler."
174 | ]
175 | },
176 | {
177 | "cell_type": "code",
178 | "execution_count": null,
179 | "metadata": {},
180 | "outputs": [],
181 | "source": [
182 | "def gibbs_sampler(state, data, gamma, eta, debug=False): # 0/1 state\n",
183 | " (h, w) = state.shape\n",
184 | " new_state = state.copy()\n",
185 | " pos = poslist(h, w)\n",
186 | " for p in pos:\n",
187 | " neighbor_pos = getneighor(p[0], p[1], h, w)\n",
188 | " neighbor_value = [new_state[n[0]][n[1]] for n in neighbor_pos]\n",
189 | "\n",
190 | " tmp1 = -gamma * -1 * sum(neighbor_value) # x_i = -1\n",
191 | " tmp2 = -gamma * 1 * sum(neighbor_value) # x_i = 1\n",
192 | " \n",
193 | " # add data term\n",
194 | " v = data[p[0]][p[1]]\n",
195 | " tmp1 += eta * (v - (-1))**2 # x_i = -1\n",
196 | " tmp2 += eta * (v - 1)**2 # x_i = 1\n",
197 | " \n",
198 | " tmp1 = np.exp(-tmp1)\n",
199 | " tmp2 = np.exp(-tmp2)\n",
200 | "\n",
201 | " p1 = tmp1 / (tmp1 + tmp2)\n",
202 | " prob = random.uniform() # roll a dice\n",
203 | " \n",
204 | " if (debug): print(p1)\n",
205 | " if (prob > p1):\n",
206 | " new_state[p[0]][p[1]] = 1\n",
207 | " else:\n",
208 | " new_state[p[0]][p[1]] = -1\n",
209 | " return new_state"
210 | ]
211 | },
212 | {
213 | "cell_type": "markdown",
214 | "metadata": {},
215 | "source": [
216 | "# Animation: sample with data term included\n",
217 | "Run this demo below; make sure to watch the animation as it happens!"
218 | ]
219 | },
220 | {
221 | "cell_type": "code",
222 | "execution_count": null,
223 | "metadata": {},
224 | "outputs": [],
225 | "source": [
226 | "from IPython.display import display, clear_output\n",
227 | "import time\n",
228 | "random_seed = 1 # Change this in your experiment\n",
229 | "random.seed(random_seed)\n",
230 | "\n",
231 | "(h, w) = data.shape\n",
232 | "mat = random.random((h,w))\n",
233 | "mat[mat>0.5] = 1\n",
234 | "mat[mat<=0.5] = -1\n",
235 | "random_state = mat\n",
236 | "\n",
237 | "\n",
238 | "# Initial the random state\n",
239 | "init_state = random_state\n",
240 | "\n",
241 | "# Set parameters\n",
242 | "gamma = 20\n",
243 | "eta = 1\n",
244 | "\n",
245 | "new_state = random_state.copy()\n",
246 | "E = [energy(init_state, data, gamma, eta)]# array of energies at each iteration\n",
247 | "\n",
248 | "\n",
249 | "f, ax = plt.subplots() # prepare animation\n",
250 | "for i in range(100):\n",
251 | " clear_output(wait=True)\n",
252 | " new_state = gibbs_sampler(new_state, data, gamma, eta)\n",
253 | " E.append(energy(new_state, data, gamma, eta))\n",
254 | " # time.sleep(1)\n",
255 | " myimshow(new_state)\n",
256 | " display(f)\n",
257 | "\n",
258 | "plt.title(\"Foreground\")\n",
259 | "mask = (new_state==1)\n",
260 | "fg = data.copy()\n",
261 | "x=range(30)\n",
262 | "\n",
263 | "plt.imshow(fg, cmap='gray', interpolation='nearest')\n",
264 | "plt.subplots()\n",
265 | "print(E)\n"
266 | ]
267 | },
268 | {
269 | "cell_type": "markdown",
270 | "metadata": {},
271 | "source": [
272 | "# Questions: Gibbs sampler\n",
273 | "Set random_seed to a different value (and tell me what it is in your homework!)\n",
274 | "1. Try a few different values of $ \\gamma $, $ \\eta $, including special case that only contains the prior term. What happens when the parameters change? (6 points)\n",
275 | "2. Run with different images, plot your results. Find two or three images from the web or your image collection. Can you find an image that causes the model to identify the foreground poorly? Include the image that you use. (4 points)\n",
276 | "3. Around what iteration does the sampler converge for the Einstein image with $ \\gamma = 20 $ and $ \\eta = 1 $ and how do you know it? Don't just say \"the image stopped changing very much\"! Hint: Check the energy of each state $S$.(6 points)"
277 | ]
278 | },
279 | {
280 | "cell_type": "code",
281 | "execution_count": null,
282 | "metadata": {},
283 | "outputs": [],
284 | "source": []
285 | }
286 | ],
287 | "metadata": {
288 | "anaconda-cloud": {},
289 | "kernelspec": {
290 | "display_name": "Python 3",
291 | "language": "python",
292 | "name": "python3"
293 | },
294 | "language_info": {
295 | "codemirror_mode": {
296 | "name": "ipython",
297 | "version": 3
298 | },
299 | "file_extension": ".py",
300 | "mimetype": "text/x-python",
301 | "name": "python",
302 | "nbconvert_exporter": "python",
303 | "pygments_lexer": "ipython3",
304 | "version": "3.8.8"
305 | }
306 | },
307 | "nbformat": 4,
308 | "nbformat_minor": 1
309 | }
310 |
--------------------------------------------------------------------------------
/HW3/data/gibbs/a.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW3/data/gibbs/a.jpeg
--------------------------------------------------------------------------------
/HW3/data/gibbs/b.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW3/data/gibbs/b.jpeg
--------------------------------------------------------------------------------
/HW3/data/gibbs/c.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW3/data/gibbs/c.jpeg
--------------------------------------------------------------------------------
/HW3/data/gibbs/cat3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW3/data/gibbs/cat3.jpg
--------------------------------------------------------------------------------
/HW3/data/gibbs/cat4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW3/data/gibbs/cat4.jpg
--------------------------------------------------------------------------------
/HW3/data/gibbs/d.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW3/data/gibbs/d.jpeg
--------------------------------------------------------------------------------
/HW3/data/gibbs/e.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW3/data/gibbs/e.jpeg
--------------------------------------------------------------------------------
/HW3/data/gibbs/f.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW3/data/gibbs/f.jpeg
--------------------------------------------------------------------------------
/HW3/data/gibbs/gibbs_demo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW3/data/gibbs/gibbs_demo.jpg
--------------------------------------------------------------------------------
/HW3/data/gibbs/moon.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW3/data/gibbs/moon.jpg
--------------------------------------------------------------------------------
/HW4/DeepNetwork.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Deep Network\n",
8 | "## This training is super super slow, it can take up hours!!!\n",
9 | "\n",
10 | "Originally by the Tensorflow authors, modified by Donald Li for Fall 2017\n",
11 | " \n",
12 | "This homework requires [tensorflow](https://www.tensorflow.org/install/), please make sure it is correctly installed before running the codes."
13 | ]
14 | },
15 | {
16 | "cell_type": "markdown",
17 | "metadata": {},
18 | "source": [
19 | "# Initialization and Preparing DATA\n"
20 | ]
21 | },
22 | {
23 | "cell_type": "code",
24 | "execution_count": null,
25 | "metadata": {},
26 | "outputs": [],
27 | "source": [
28 | "#Start a new session for tensorflow\n",
29 | "import tensorflow.compat.v1 as tf\n",
30 | "sess = tf.InteractiveSession()\n",
31 | "\n",
32 | "#Download Mnist dataset\n",
33 | "(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n",
34 | "\n",
35 | "tf.disable_eager_execution()\n",
36 | "tf.disable_v2_behavior()\n",
37 | "\n",
38 | "#Initialize weight\n",
39 | "def weight_variable(shape):\n",
40 | " initial = tf.truncated_normal(shape, stddev=0.1)\n",
41 | " return tf.Variable(initial)\n",
42 | "\n",
43 | "def bias_variable(shape):\n",
44 | " initial = tf.constant(0.1, shape=shape)\n",
45 | " return tf.Variable(initial)\n",
46 | "\n",
47 | "#Initialize place holder\n",
48 | "x = tf.placeholder(tf.float32, shape=[None, 28,28])\n",
49 | "y_ = tf.placeholder(tf.int32, shape=[None])"
50 | ]
51 | },
52 | {
53 | "cell_type": "markdown",
54 | "metadata": {},
55 | "source": [
56 | "Here, we define what a convolutional layer and pooling layers. The following is the structure of the current convolutional network.\n",
57 | "
"
58 | ]
59 | },
60 | {
61 | "cell_type": "code",
62 | "execution_count": null,
63 | "metadata": {},
64 | "outputs": [],
65 | "source": [
66 | "def conv2d(x, W):\n",
67 | " return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n",
68 | "\n",
69 | "def max_pool_2x2(x):\n",
70 | " return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n",
71 | " strides=[1, 2, 2, 1], padding='SAME')\n",
72 | "\n",
73 | "def avg_pool_2x2(x):\n",
74 | " return tf.nn.avg_pool(x, ksize=[1, 2, 2, 1],\n",
75 | " strides=[1, 2, 2, 1], padding='SAME')\n"
76 | ]
77 | },
78 | {
79 | "cell_type": "code",
80 | "execution_count": null,
81 | "metadata": {},
82 | "outputs": [],
83 | "source": [
84 | "import numpy as np\n",
85 | "def next_batch(num, data, labels):\n",
86 | " '''\n",
87 | " Return a total of `num` random samples and labels. \n",
88 | " '''\n",
89 | " idx = np.arange(0 , len(data))\n",
90 | " np.random.shuffle(idx)\n",
91 | " idx = idx[:num]\n",
92 | " data_shuffle = data[idx]\n",
93 | " labels_shuffle = labels[idx]\n",
94 | "\n",
95 | " return data_shuffle, labels_shuffle"
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": null,
101 | "metadata": {},
102 | "outputs": [],
103 | "source": [
104 | "batch[1].shape"
105 | ]
106 | },
107 | {
108 | "cell_type": "markdown",
109 | "metadata": {},
110 | "source": [
111 | "We have to specify how many parameters there should be, and for the first convolution and pooling layer, what kind of how many units are there, and how they are being connected. Here, h is the hidden layer."
112 | ]
113 | },
114 | {
115 | "cell_type": "code",
116 | "execution_count": null,
117 | "metadata": {},
118 | "outputs": [],
119 | "source": [
120 | "W_conv1 = weight_variable([5, 5, 1, 32])\n",
121 | "b_conv1 = bias_variable([32])\n",
122 | "x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
123 | "h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n",
124 | "h_pool1 = max_pool_2x2(h_conv1)\n",
125 | "#h_pool1 = avg_pool_2x2(h_conv1)"
126 | ]
127 | },
128 | {
129 | "cell_type": "markdown",
130 | "metadata": {},
131 | "source": [
132 | "Of course, we can do similar thing to make the second layer of the network, it is important to notice that the number of input and output units are differnet in different layers, so when you want to decrease or increase the number of layer, you have to be careful about that. The output image size is 7*7 here."
133 | ]
134 | },
135 | {
136 | "cell_type": "code",
137 | "execution_count": null,
138 | "metadata": {},
139 | "outputs": [],
140 | "source": [
141 | "W_conv2 = weight_variable([5, 5, 32, 64])\n",
142 | "b_conv2 = bias_variable([64])\n",
143 | "\n",
144 | "h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n",
145 | "h_pool2 = max_pool_2x2(h_conv2)\n",
146 | "#h_pool2 = avg_pool_2x2(h_conv2)"
147 | ]
148 | },
149 | {
150 | "cell_type": "markdown",
151 | "metadata": {},
152 | "source": [
153 | "We add a fully-connected layer to allow processing on the entire image, here we have used a ReLu function. Then do the drop out process to prevent overfitting, finally there is a read out layer to get the results. "
154 | ]
155 | },
156 | {
157 | "cell_type": "code",
158 | "execution_count": null,
159 | "metadata": {},
160 | "outputs": [],
161 | "source": [
162 | "W_fc1 = weight_variable([7 * 7 * 64, 1024])\n",
163 | "b_fc1 = bias_variable([1024])\n",
164 | "\n",
165 | "h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n",
166 | "h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n",
167 | "\n",
168 | "keep_prob = tf.placeholder(tf.float32)\n",
169 | "h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n",
170 | "W_fc2 = weight_variable([1024, 10])\n",
171 | "b_fc2 = bias_variable([10])\n",
172 | "\n",
173 | "y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n",
174 | "y_onehot = tf.one_hot(y_,10)"
175 | ]
176 | },
177 | {
178 | "cell_type": "markdown",
179 | "metadata": {},
180 | "source": [
181 | "Here, we train and test the model. Instead of using least square, we are using cross entropy to be the term being oprtimized. We do not use a steepest gradient descent here, but using a ADAM alogrithm to do the optimization, in every 100 iterations, results will be printed out. For each batch of training, we feed in 50 images. This training is super super slow, it can take up hours!!!"
182 | ]
183 | },
184 | {
185 | "cell_type": "code",
186 | "execution_count": null,
187 | "metadata": {},
188 | "outputs": [],
189 | "source": [
190 | "# Defining cross-entropy here\n",
191 | "cross_entropy = tf.reduce_mean(\n",
192 | " tf.nn.softmax_cross_entropy_with_logits(labels=y_onehot, logits=y_conv))\n",
193 | "train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n",
194 | "correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_onehot, 1))\n",
195 | "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
196 | "\n",
197 | "#Do the training\n",
198 | "with tf.Session() as sess:\n",
199 | " sess.run(tf.global_variables_initializer())\n",
200 | " for i in range(5000):\n",
201 | " batch = next_batch(50,x_train,y_train)\n",
202 | " if i % 100 == 0:\n",
203 | " train_accuracy = accuracy.eval(feed_dict={\n",
204 | " x: batch[0], y_: batch[1], keep_prob: 1.0})\n",
205 | " print('step %d, training accuracy %g' % (i, train_accuracy))\n",
206 | " train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\n",
207 | "\n",
208 | "#Test the accuracy\n",
209 | " print('test accuracy %g' % accuracy.eval(feed_dict={\n",
210 | " x: x_test, y_: y_test, keep_prob: 1.0}))"
211 | ]
212 | },
213 | {
214 | "cell_type": "markdown",
215 | "metadata": {},
216 | "source": [
217 | "# Softmax Regression Model\n",
218 | "\n",
219 | "First, we have to initialize place holders and variables. \n",
220 | "It will be better to run the initialization and data preparation cell again before running the following!"
221 | ]
222 | },
223 | {
224 | "cell_type": "code",
225 | "execution_count": null,
226 | "metadata": {},
227 | "outputs": [],
228 | "source": [
229 | "#Initialize weights and bias for the regression model\n",
230 | "#Start a new session for tensorflow\n",
231 | "import tensorflow.compat.v1 as tf\n",
232 | "sess = tf.InteractiveSession()\n",
233 | "\n",
234 | "#Download Mnist dataset\n",
235 | "(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n",
236 | "\n",
237 | "tf.disable_eager_execution()\n",
238 | "tf.disable_v2_behavior()\n",
239 | "\n",
240 | "#Initialize place holder\n",
241 | "x = tf.placeholder(tf.float32, shape=[None, 28,28])\n",
242 | "y_ = tf.placeholder(tf.int32, shape=[None])\n",
243 | "\n",
244 | "\n",
245 | "W = tf.Variable(tf.zeros([784,10]))\n",
246 | "b = tf.Variable(tf.zeros([10]))\n",
247 | "\n",
248 | "sess.run(tf.global_variables_initializer())"
249 | ]
250 | },
251 | {
252 | "cell_type": "markdown",
253 | "metadata": {},
254 | "source": [
255 | "The class is predicted using a softmax model, and the loss function uses a cross entropy in this case."
256 | ]
257 | },
258 | {
259 | "cell_type": "code",
260 | "execution_count": null,
261 | "metadata": {},
262 | "outputs": [],
263 | "source": [
264 | "#Class prediction\n",
265 | "x_oneD = tf.reshape(x, [-1, 784])\n",
266 | "y = tf.matmul(x_oneD,W) + b\n",
267 | "y_onehot = tf.one_hot(y_,10)\n",
268 | "#Loss function\n",
269 | "cross_entropy = tf.reduce_mean(\n",
270 | " tf.nn.softmax_cross_entropy_with_logits(labels=y_onehot, logits=y))"
271 | ]
272 | },
273 | {
274 | "cell_type": "markdown",
275 | "metadata": {},
276 | "source": [
277 | "The following cell do the training for the regression model, you can try to manipulate the number of training done to see what's happen."
278 | ]
279 | },
280 | {
281 | "cell_type": "code",
282 | "execution_count": null,
283 | "metadata": {},
284 | "outputs": [],
285 | "source": [
286 | "#Do the gradient decent training \n",
287 | "train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n",
288 | "for _ in range(1000): #You may want to try 10,100,1000,10000\n",
289 | " batch = next_batch(100,x_train,y_train)\n",
290 | " train_step.run(feed_dict={x: batch[0], y_: batch[1]})"
291 | ]
292 | },
293 | {
294 | "cell_type": "markdown",
295 | "metadata": {},
296 | "source": [
297 | "After training, calculate the prediction and see how off it is to the true value."
298 | ]
299 | },
300 | {
301 | "cell_type": "code",
302 | "execution_count": null,
303 | "metadata": {},
304 | "outputs": [],
305 | "source": [
306 | "#Check the prediction\n",
307 | "correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_onehot,1))\n",
308 | "\n",
309 | "#Calculate and print the accuracy\n",
310 | "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
311 | "print(\"Accuracy is\", accuracy.eval(feed_dict={x: x_test, y_: y_test}))"
312 | ]
313 | },
314 | {
315 | "cell_type": "markdown",
316 | "metadata": {},
317 | "source": [
318 | "# HW\n",
319 | "\n",
320 | "1. Try both average pooling and max-pooling for the convolutional neural network, does the results agree on what you have discussed in Question 2-2? What is the difference between them? (5 points)\n",
321 | "2. Modify the code to make it into a one-layer network (it is very simple), but you just have to be careful about the dimension of each output layer to make sure they are correct, do you find a accuracy difference between one-layer convolutional network and a two-layer convolutional network? Report the results and explain (5 points)\n",
322 | "3. Run the simple softmax regression model and see how is the results different from the convolutional neural network? You should change the number of training and see how the accuaracy changes in the regression model. (5 points)"
323 | ]
324 | },
325 | {
326 | "cell_type": "code",
327 | "execution_count": null,
328 | "metadata": {},
329 | "outputs": [],
330 | "source": []
331 | }
332 | ],
333 | "metadata": {
334 | "anaconda-cloud": {},
335 | "kernelspec": {
336 | "display_name": "Python 3",
337 | "language": "python",
338 | "name": "python3"
339 | },
340 | "language_info": {
341 | "codemirror_mode": {
342 | "name": "ipython",
343 | "version": 3
344 | },
345 | "file_extension": ".py",
346 | "mimetype": "text/x-python",
347 | "name": "python",
348 | "nbconvert_exporter": "python",
349 | "pygments_lexer": "ipython3",
350 | "version": "3.8.8"
351 | }
352 | },
353 | "nbformat": 4,
354 | "nbformat_minor": 1
355 | }
356 |
--------------------------------------------------------------------------------
/HW4/MNIST_data/t10k-images-idx3-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW4/MNIST_data/t10k-images-idx3-ubyte.gz
--------------------------------------------------------------------------------
/HW4/MNIST_data/t10k-labels-idx1-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW4/MNIST_data/t10k-labels-idx1-ubyte.gz
--------------------------------------------------------------------------------
/HW4/MNIST_data/train-images-idx3-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW4/MNIST_data/train-images-idx3-ubyte.gz
--------------------------------------------------------------------------------
/HW4/MNIST_data/train-labels-idx1-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW4/MNIST_data/train-labels-idx1-ubyte.gz
--------------------------------------------------------------------------------
/HW4/mnist_deep.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccvl/VisualCortexCourse/162ffc4d47a751397157ef4c4aa88a11cebad2b7/HW4/mnist_deep.png
--------------------------------------------------------------------------------