\x1b@f\xdbuAD\xd6#@\x90V \xf6
f\x92%c\x0f@\xe6\xa7\x8d\xf8
r\xd2\xe7
\x1b@\x0f\xeeg\xb9\xc1\x14\x02\xc0\xec\x82\xdeD\x8c\x8d)\xc0\xfe\xc1\xa9N\xb2e\xe1\xbfr\xda\x86\xfb
\x0f\xb6h\x07y?\xe1\x12\x006\x7f\xdez?0YIx\xf4%q?V\xdcp\xfd-ub?M\r\xfc\x83\xef\x04J?\x16\xdbU\xf0\xa2\xa3L?\xcep\xa2\x82\xde|U?6&\xcd\xca>\xc0W?\x90\xa2\xcf\xf3\x90\xb5O?\x06\\\x86N\xb7\xce\xfb>\xa6\x020}~F\xa4\xbe<\xa7\x86Q\xe9\x87N>\x1cf\xa2*\xc7\xf8b>\xd7mI\xda\xab\xddX>i[sl\x90\xc9\xe3\xbd\x8b\x9b\xa4\xb9\x1e\xb5\xeb\xbd\xfd\x12\xb8\x9c\xb7_\t\xbe8q`q)#\x0f\xbe\x97?\ty%\xa8\x18\xbet\x88\\\xea\xec\xb7\xca>\x10\xe8\xf6}<\xc6\x05?\xec\xb4\x0f\xca\xf6\x98$?L\x8e\xc1,wv8?\xdd\xe6\xc1\xc3\x85^??\xbci\xdc(}\xcdG?\x19\x81\xdb\xd0\xab\xa1O?\xf1"\xc1\x9d\'\xefP?\x8e5\xc3?\x03\x00N?v\x1d\x0f\x1c\xaf_P?1\x12\x1c\xa9w\xe1N?:\x84C,\xc8=X?\xa4\xa6\xa2&TNf?\x0f\x9f\xca\x0b\x8a\xf3x?61\xcf\x9c\xe7\x0cx? \xc8\x0b\xb8\x1d?q?}/\x89\x1c\xc5mc?j7\xc5\x98M\x98X?-\xcf \x1b@f\xdbuAD\xd6#@\x90V \xf6 f\x92%c\x0f@\xe6\xa7\x8d\xf8 r\xd2\xe7 \x1b@\x0f\xeeg\xb9\xc1\x14\x02\xc0\xec\x82\xdeD\x8c\x8d)\xc0\xfe\xc1\xa9N\xb2e\xe1\xbfr\xda\x86\xfb \x95\t\xc5\xbfxj`9\x95\t\xc5\xbf\x00\xe8\x8d4\x95\t\xc5\xbf(Z\x98=\x95\t\xc5\xbf\xc0\xad\xa9/\x95\t\xc5\xbf\xc0\x15\x188\x95\t\xc5\xbf\xb8\xf6\x12H\x95\t\xc5\xbf\xd8E\xa50\x95\t\xc5\xbfPO\xf0@\x95\t\xc5\xbf\xd8\xc6\x0c0\x95\t\xc5\xbf\x08\xff\xcc@\x95\t\xc5\xbf\xc0l%5\x95\t\xc5\xbf\xc0\x9b\xa7.\x95\t\xc5\xbfX0\xbaz \xf2(\xc0\xf3!\x83VH\xe8\x00\xc0\xbd\xe0A\xaa~\xdb\x17@3s\x87L\x1a\xad\x1b@\x88\xbei3\x95\t\xc5\xbf\xf8q6S\x95\t\xc5\xbf8!\xbc/\x95\t\xc5\xbfx -F\x95\t\xc5\xbf\xa8010\x95\t\xc5\xbf CY1\x95\t\xc5\xbf8\x97\x04S\x95\t\xc5\xbf\x18\x06\xa6?\x95\t\xc5\xbf\xd8\x8fe7\x95\t\xc5\xbf\x00\x0b\xacL\x95\t\xc5\xbf\xc0\x84f:\x95\t\xc5\xbf\x88\x9a\tA\x95\t\xc5\xbf\xf0;28\x95\t\xc5\xbfP\x04\xd48\x95\t\xc5\xbf 3&/\x95\t\xc5\xbf\x80\x98\xb2A\x95\t\xc5\xbf\xc0\xca\xe43\x95\t\xc5\xbfS2\xf0#l\xe3%\xc0\xfd\x1cD\xa5G\x8c\x1e\xc0W\xfc\x9a\x1e\x9c\xfe\xf2?\x14\x14 j^\xe2\x1b@\x98\x88\x85\x9e\xbam\xc2\xbfPa76\x95\t\xc5\xbf\x88d_.\x95\t\xc5\xbf\x18\x06\xa6?\x95\t\xc5\xbf\x18VF/\x95\t\xc5\xbf\xe8t\xc4>\x95\t\xc5\xbf0\xdc(3\x95\t\xc5\xbf\xc0\x7f\xb1/\x95\t\xc5\xbf\xb8\xc4\x1d/\x95\t\xc5\xbf8`\xe7B\x95\t\xc5\xbf\xd8\xa3M3\x95\t\xc5\xbf\xd8r\xe5>\x95\t\xc5\xbf\xc0\x7f\xb1/\x95\t\xc5\xbf\x10\xbb\xea:\x95\t\xc5\xbf\xb8\xc4\x1d/\x95\t\xc5\xbf@H\x8a.\x95\t\xc5\xbfH\xd8\x0b;\x95\t\xc5\xbf\x10\x0eC>\x95\t\xc5\xbf\xa8\x9c\x9eE\xbf\xbe*\xc0HF\xf3\xe8\xa6\x93\x10\xc0\x0e\xb3\xe7\xb4\xdb\xeb\n@\xa3\xee8\xc0\x95e\x1b@\xc0\x84f:\x95\t\xc5\xbfx\xdb\xc2.\x95\t\xc5\xbf\xf8\\\xaa6\x95\t\xc5\xbf\xa8\xe8\x9eC\x95\t\xc5\xbf -\xb92\x95\t\xc5\xbf\x80\x9bP.\x95\t\xc5\xbf\x801\xa0:\x95\t\xc5\xbf\xa0\xf5\x911\x95\t\xc5\xbf\xb8\xe0\xe55\x95\t\xc5\xbf\x90\xb0\xecK\x95\t\xc5\xbf\x90\xc7D<\x95\t\xc5\xbf\xd8\x8fe7\x95\t\xc5\xbf\xd8\xc6\x0c0\x95\t\xc5\xbf`\x88\xa41\x95\t\xc5\xbf\xa0\x0b20\x95\t\xc5\xbf\x88\xb8*/\x95\t\xc5\xbf@z\t0\x95\t\xc5\xbf\xe0\xa4\x07ME&\xcc\xbf\x896}\x10\x93)!\xc0\x8a \xf7\xe21\xf1\xee\xbf\xe1\x10\x1d\xc0s\x9f\x0f@\xfbv\xcc\xa5\xaco\x04@`<\xe73\x95\t\xc5\xbf@B\x1d2\x95\t\xc5\xbf\xe8\x1c\x898\x95\t\xc5\xbf\xc8\xad\x1a4\x95\t\xc5\xbfh\xdd<8\x95\t\xc5\xbf\x880\xfa1\x95\t\xc5\xbf\xe8ht.\x95\t\xc5\xbf0\xdc(3\x95\t\xc5\xbf\xd8\x8fe7\x95\t\xc5\xbf\xa8\xb6M:\x95\t\xc5\xbfH\xd2\xfa.\x95\t\xc5\xbf\xd8\x8fe7\x95\t\xc5\xbf\x08\\\x8c.\x95\t\xc5\xbfH\xa7\x163\x95\t\xc5\xbfxP\x98T\x95\t\xc5\xbf\xc0\x83\xdfk\x95\t\xc5\xbf\xb8\xc1\xf2.\x95\t\xc5\xbf\xf7\xe9b\xb0\x8f\xf7!\xc0\x1d2p@\xa5\x11\x13\xc0\xb9\xf1~\x04IQ\xf1?k\xf7k~\xd2\x01\x0e@\xc8\x93\n0\x95\t\xc5\xbf(\xd4\x92/\x95\t\xc5\xbf`\x97\x92.\x95\t\xc5\xbf\xb8\xf6\x12H\x95\t\xc5\xbf\xa0A!7\x95\t\xc5\xbf\xd0\xc6V7\x95\t\xc5\xbf@di1\x95\t\xc5\xbf\x08\\\x8c.\x95\t\xc5\xbf@H\x8a.\x95\t\xc5\xbf@A\xaa4\x95\t\xc5\xbf\xc8\xad\x1a4\x95\t\xc5\xbf\x88\x98\xad.\x95\t\xc5\xbf\xd0\xc2\xcf5\x95\t\xc5\xbfp\xf4{D\x95\t\xc5\xbf\xd8Q\xdeD\x95\t\xc5\xbf\xf0F\x0fK\x95\t\xc5\xbf\xb0\xf9\x118\x95\t\xc5\xbf\x08\xc4\x113\x95\t\xc5\xbf\xf1\x01<\x19\x97Q!\xc0[\x90\x8b\x81\xd1c\x01\xc0\xee\xf4\x8d\x9b<\x13\xff?\xe0Z\x11M5=\x02@0nM.\x95\t\xc5\xbf\xa0pG@\x95\t\xc5\xbf\xe8ht.\x95\t\xc5\xbf\xd8\xb2;0\x95\t\xc5\xbf\xb0v\xdb9\x95\t\xc5\xbf\x88\xb2JF\x95\t\xc5\xbf\xd8\xb2;0\x95\t\xc5\xbf\xc8D\xf49\x95\t\xc5\xbf\xd8\x8fe7\x95\t\xc5\xbf8\xa8K7\x95\t\xc5\xbf\xb8G\x0f9\x95\t\xc5\xbf\xb8\xb8\x15>\x95\t\xc5\xbf \x11"O\x95\t\xc5\xbf(\xce\xb2F\x95\t\xc5\xbf@z\t0\x95\t\xc5\xbf\x10\x88\xf8;\x95\t\xc5\xbf\xb8\x117.\x95\t\xc5\xbf\x82u\xf075)\xed\xbf*~\x15s%\x9a\x13\xc0\xe0\xc4\x07\xcaKM\xd0\xbf\x7f\xbdD\xbfn#\xf7?\x80\x1eZq\x98\x9b\xb9\xbfXx\xe9.\x95\t\xc5\xbfxA\x900\x95\t\xc5\xbf\xd8\xc6\x0c0\x95\t\xc5\xbf\xf0:\xed2\x95\t\xc5\xbf\x88\x87\x94B\x95\t\xc5\xbfx\xbfp?\x95\t\xc5\xbf\xf8!\xaa4\x95\t\xc5\xbf\x90&\x1bH\x95\t\xc5\xbf`\xce\xf42\x95\t\xc5\xbfp\xd8\x0f.\x95\t\xc5\xbf\xf0\xac8=\x95\t\xc5\xbf\xe0\x90w1\x95\t\xc5\xbf\xa8E^1\x95\t\xc5\xbf\xf8\xbf\x1e@\x95\t\xc5\xbf\xa0\xa6\x927\x95\t\xc5\xbf\xc0\x8a\x01/\x95\t\xc5\xbf\xd8\xdd\xc3;\x95\t\xc5\xbf\x11\xef\xbc\xd0\xcb\x03\x18\xc0\xb0\xb5\xbc2\x01\xf9\x01\xc0\xa862\xe5S\xc2\xde?\x80\xb0\xe1\x9a\x9d@\x8b?\x102^<\x95\t\xc5\xbfpz93\x95\t\xc5\xbf\xb8\x15\x907\x95\t\xc5\xbf`\x8eBQ\x95\t\xc5\xbf\xd8\x03iD\x95\t\xc5\xbf8!\xbc/\x95\t\xc5\xbf\xf0\xd1\xc68\x95\t\xc5\xbf\x80%\x0b6\x95\t\xc5\xbfX\x12\xf1_\x95\t\xc5\xbf\x08\x04\xde;\x95\t\xc5\xbf \xec\xc1K\x95\t\xc5\xbf\xf8?o2\x95\t\xc5\xbf\x10x\n.\x95\t\xc5\xbf\x90\xc7D<\x95\t\xc5\xbf\x90\xe6e;\x95\t\xc5\xbfx\xf8\x14/\x95\t\xc5\xbf\xc0\xad\xa9/\x95\t\xc5\xbf\xa8yPA\x95\t\xc5\xbf`\xf7o`\xe2\xa1\x10\xc0\xfeg\x8aC\x94\xaa\xe3\xbf\x80\x82\xb0\x9c\x05\x99\x87?\xf6\xf0-\xe5\x95]\x01\xc0\x88\x9a\tA\x95\t\xc5\xbf\xb0\x98U1\x95\t\xc5\xbf(\x89H/\x95\t\xc5\xbf\x98\x97\x92Q\x95\t\xc5\xbf\x90\x0eM/\x95\t\xc5\xbf\xe0\xf0\x05/\x95\t\xc5\xbf\xd8\xa3M3\x95\t\xc5\xbf\xa8\x05\xf1C\x95\t\xc5\xbf\xd8\xb2;0\x95\t\xc5\xbf\x88\x98\xad.\x95\t\xc5\xbf\xe8\x1c\x898\x95\t\xc5\xbf\x90&\x1bH\x95\t\xc5\xbf\xb8\xc4\x1d/\x95\t\xc5\xbf\x90\x86\xdah\x95\t\xc5\xbf\xa0\xa7MT\x95\t\xc5\xbfP\x95\xb3.\x95\t\xc5\xbf\xd8l\xa6:\x95\t\xc5\xbf\xe8\xa2^\xd7\x8ac\xf2\xbf)\xaa\xe8\xfaVu\xf8\xbf\x00\x96V\x84\xb0\xcd\x8a\xbfRF\xa3\xfaVu\xf8\xbfx@L\xdf\x8ac\xf2\xbfX\x9a\xd9=\x95\t\xc5\xbf\xd8\xf9\xb9:\x95\t\xc5\xbf\x00z\x9b3\x95\t\xc5\xbfx\xc7\xda2\x95\t\xc5\xbf\x88\x98\xad.\x95\t\xc5\xbf\x10x\n.\x95\t\xc5\xbf\xd8*"/\x95\t\xc5\xbf\xe0]\x852\x95\t\xc5\xbf\x90\xca\xb40\x95\t\xc5\xbf\xd8\x9eS4\x95\t\xc5\xbf\xd0\x804.\x95\t\xc5\xbf\xf0\xc7\xe96\x95\t\xc5\xbfX\xc5\xeb1\x95\t\xc5\xbfh\xdeSE\x95\t\xc5\xbf\xf8RqD\x95\t\xc5\xbf\x183Y:\x95\t\xc5\xbf@+!2\x95\t\xc5\xbf*\xe3\xe1\xc7\x95]\x01\xc0\x80i\r@\x05\x99\x87?\xc4\x9en4\x94\xaa\xe3\xbf\xbb\x95Da\xe2\xa1\x10\xc0\x90\xeb\x03J\x95\t\xc5\xbf@z\t0\x95\t\xc5\xbf\xb8x29\x95\t\xc5\xbf@=\x0fb\x95\t\xc5\xbf\x08\x14\xfaA\x95\t\xc5\xbf \n\x112\x95\t\xc5\xbf\x10x\n.\x95\t\xc5\xbf\xf0\x85N/\x95\t\xc5\xbf\xf0F\x0fK\x95\t\xc5\xbf\x98\x8dm0\x95\t\xc5\xbf\x08Be.\x95\t\xc5\xbf\x80|//\x95\t\xc5\xbf\x98-\x9a>\x95\t\xc5\xbf\x08\\\x8c.\x95\t\xc5\xbf \n\x112\x95\t\xc5\xbf\xe8W\xb72\x95\t\xc5\xbfh\x98H8\x95\t\xc5\xbf`Os.\x95\t\xc5\xbf\x00\xe7\x96\xe3\xa2@\x8b?`[v\x18S\xc2\xde?\xd5*\x18.\x01\xf9\x01\xc0M\xe9o\xd1\xcb\x03\x18\xc0\xf8\xa3\xfaH\x95\t\xc5\xbfX\x0c\xaf0\x95\t\xc5\xbf@\x90d:\x95\t\xc5\xbf\xd8\xb2;0\x95\t\xc5\xbf\x08\x04\xde;\x95\t\xc5\xbf\x88\x9a\tA\x95\t\xc5\xbf\x10\x04\x944\x95\t\xc5\xbfh@$.\x95\t\xc5\xbf\x10\xbb\xea:\x95\t\xc5\xbf\xd8\xb2;0\x95\t\xc5\xbf\xc0\x15\x188\x95\t\xc5\xbf\xc0\xca\xe43\x95\t\xc5\xbfpe#.\x95\t\xc5\xbf\xc0\x17\x15/\x95\t\xc5\xbf\x10\x9b\xb2.\x95\t\xc5\xbf\xb0T\xd4.\x95\t\xc5\xbf(\xf9\t/\x95\t\xc5\xbfP\xdd`\x7f\x98\x9b\xb9\xbf\xe4\x17I\xbfn#\xf7?pI\xf4\xb5LM\xd0\xbf`\xe9\xf9a%\x9a\x13\xc0L\xbb\x9395)\xed\xbf\xc8m 3\x95\t\xc5\xbfxW0/\x95\t\xc5\xbf(O\xd5Q\x95\t\xc5\xbf\xf0\xd1\xc68\x95\t\xc5\xbfP\x0b\x86:\x95\t\xc5\xbfP\xc7\x1b4\x95\t\xc5\xbf@\xcf\x196\x95\t\xc5\xbfx\xa7wY\x95\t\xc5\xbf \x11"O\x95\t\xc5\xbf\x18s\x813\x95\t\xc5\xbf\xb8X\xb58\x95\t\xc5\xbf\xd8\x9eS4\x95\t\xc5\xbf\xd8*"/\x95\t\xc5\xbf\x88\x87\x94B\x95\t\xc5\xbf Y\xfcZ\x95\t\xc5\xbf\xd8\xc5\xb0.\x95\t\xc5\xbf(\xca+E\x95\t\xc5\xbf\x9c\xcd\xaak5=\x02@ \xdc\x84a<\x13\xff?\xfch\x94c\xd1c\x01\xc0oX> \x97Q!\xc00\xbfcF\x95\t\xc5\xbf\xc0\xad\xa9/\x95\t\xc5\xbf\x10\x15\xc7G\x95\t\xc5\xbf\xe8t\xc4>\x95\t\xc5\xbf \n\x112\x95\t\xc5\xbfx\xe2t0\x95\t\xc5\xbf\x880\xfa1\x95\t\xc5\xbf\xa0\x0b20\x95\t\xc5\xbf\xe0\xc4\xc51\x95\t\xc5\xbf\x00&\xa2:\x95\t\xc5\xbfx -F\x95\t\xc5\xbf\xc0\xca\xe43\x95\t\xc5\xbf\xd8E\xa50\x95\t\xc5\xbf\xd85D6\x95\t\xc5\xbf\xd8\x9eS4\x95\t\xc5\xbf\x98\xc3\x8a/\x95\t\xc5\xbf@A\xaa4\x95\t\xc5\xbfP\x19F.\x95\t\xc5\xbf\xfa\x91\x93\x9d\xd2\x01\x0e@<\x91u\x06IQ\xf1?\x1a\xbaVA\xa5\x11\x13\xc0\x9c\x83\\\xb0\x8f\xf7!\xc0Pa76\x95\t\xc5\xbf\xc8M\xa32\x95\t\xc5\xbf\xe0\xe6\x111\x95\t\xc5\xbfx\xfc\x9b0\x95\t\xc5\xbfHv\n/\x95\t\xc5\xbf`\xd2\xf1K\x95\t\xc5\xbf\x10\xbb\xea:\x95\t\xc5\xbf\xe8ht.\x95\t\xc5\xbf\x00&\xa2:\x95\t\xc5\xbf \x11"O\x95\t\xc5\xbf\x80V\\.\x95\t\xc5\xbf\xd0\xed\xb31\x95\t\xc5\xbf\x88gE:\x95\t\xc5\xbfh!KN\x95\t\xc5\xbfHB\xa52\x95\t\xc5\xbf\xa8\x8fL0\x95\t\xc5\xbf\x90\xd8/0\x95\t\xc5\xbf\x93>\xe1\xa4\xaco\x04@\xc2T\x8f\xc0s\x9f\x0f@JK{]1\xf1\xee\xbf9ZH\t\x93)!\xc0\xb8\xb9\x0f\x82C&\xcc\xbf\xe8-\xeaC\x95\t\xc5\xbf\xd8\xf9\xb9:\x95\t\xc5\xbf\xe0\x90w1\x95\t\xc5\xbfX\x0c\xaf0\x95\t\xc5\xbf@\xcf\x196\x95\t\xc5\xbf\xe0\xd9\x96B\x95\t\xc5\xbf\xe0]\x852\x95\t\xc5\xbfH\x06I/\x95\t\xc5\xbf0_\x1a=\x95\t\xc5\xbfX\x9a\xd9=\x95\t\xc5\xbf \xdb\x04P\x95\t\xc5\xbf\xb8\xcb\\D\x95\t\xc5\xbf\x88^:Q\x95\t\xc5\xbf\xd8\x1c\xa7/\x95\t\xc5\xbfHTy;\x95\t\xc5\xbf\xd8\xca\x931\x95\t\xc5\xbf\xd8\xffT/\x95\t\xc5\xbft\xde\xcc\xc0\x95e\x1b@\xfe\xed\x1f\xd5\xdb\xeb\n@K\xe8\x14\xf9\xa6\x93\x10\xc0\x11`\x1a=\xbf\xbe*\xc0\xc0E\xf62\x95\t\xc5\xbf\x10\x89\x991\x95\t\xc5\xbf\xb8\xc4\x1d/\x95\t\xc5\xbfP\xc7\x1b4\x95\t\xc5\xbf\xf0\xd1\xc68\x95\t\xc5\xbf@B\x1d2\x95\t\xc5\xbf\x18\x06\xa6?\x95\t\xc5\xbf\xd8\x8fe7\x95\t\xc5\xbf\xc0\x7f\xb1/\x95\t\xc5\xbf\x88\x98\xad.\x95\t\xc5\xbfp\xd8\x0f.\x95\t\xc5\xbfhVh<\x95\t\xc5\xbf\xe8ht.\x95\t\xc5\xbf\xc0\x7f\xb1/\x95\t\xc5\xbf\xf0\xd1\xc68\x95\t\xc5\xbf\xd0$\xbaE\x95\t\xc5\xbfp\xd8\x0f.\x95\t\xc5\xbf\x00\x1c\x05\x9e\xbam\xc2\xbf\xb9?\x1dj^\xe2\x1b@\xe7mv!\x9c\xfe\xf2?\xd8f\xb0\xa4G\x8c\x1e\xc0\xf0\xcb\xdc#l\xe3%\xc0\xe8t\t3\x95\t\xc5\xbf\xf0\x1e\xe07\x95\t\xc5\xbf\x88\x9a\tA\x95\t\xc5\xbf\xf8?o2\x95\t\xc5\xbfX>.2\x95\t\xc5\xbf\xc0\xad\xa9/\x95\t\xc5\xbfH\xd2\xfa.\x95\t\xc5\xbf\x00\xe6\xa79\x95\t\xc5\xbf\xb8\x06\x8b>\x95\t\xc5\xbf\x98\xd8[@\x95\t\xc5\xbfH>\xd9<\x95\t\xc5\xbf\xd0\x89\xb5.\x95\t\xc5\xbf\xd8r\xe5>\x95\t\xc5\xbfp\xd7W<\x95\t\xc5\xbf\xf8\xa4\xb2:\x95\t\xc5\xbf\x00\t~2\x95\t\xc5\xbf0\x94 /\x95\t\xc5\xbf7\xac\x1dK\x1a\xad\x1b@@\x14\x15\xaa~\xdb\x17@\xa0\xf4cRH\xe8\x00\xc0\x89Qzz \xf2(\xc0X\x0c\xaf0\x95\t\xc5\xbf\xe0]\x852\x95\t\xc5\xbfP\x15\x94_\x95\t\xc5\xbf\xc8\xef\xe33\x95\t\xc5\xbf\x00cq;\x95\t\xc5\xbf\xc8\xad\x1a4\x95\t\xc5\xbf0\xdc(3\x95\t\xc5\xbf\x88\xd9bL\x95\t\xc5\xbf\x08\x04\xde;\x95\t\xc5\xbf\x18\xe6\x84/\x95\t\xc5\xbf\xc8\x8c-a\x95\t\xc5\xbf\x80\xebf5\x95\t\xc5\xbf\x90\xeb\x8d2\x95\t\xc5\xbf\xc8D\xf49\x95\t\xc5\xbf 3&/\x95\t\xc5\xbf\xc8;\\=\x95\t\xc5\xbf\xe8t\xc4>\x95\t\xc5\xbf\x10\xc9\xaa.\x95\t\xc5\xbf\xa3\x0ed\x99p\x96%@\xbb\xfe\xfd\xa6g\xf4\x10@\n\xe9!b\xa8\xcf\x19\xc0:\x00|\xaa\xe8\x192\xc0\x98o\xa82\x95\t\xc5\xbfH>\xd9<\x95\t\xc5\xbf'
35 | p13
36 | tp14
37 | b.
--------------------------------------------------------------------------------
/tests/regularizedtau/sourcethree.p:
--------------------------------------------------------------------------------
1 | cnumpy.core.multiarray
2 | _reconstruct
3 | p0
4 | (cnumpy
5 | ndarray
6 | p1
7 | (I0
8 | tp2
9 | S'b'
10 | p3
11 | tp4
12 | Rp5
13 | (I1
14 | (I20
15 | I1
16 | tp6
17 | cnumpy
18 | dtype
19 | p7
20 | (S'f8'
21 | p8
22 | I0
23 | I1
24 | tp9
25 | Rp10
26 | (I3
27 | S'<'
28 | p11
29 | NNNI-1
30 | I-1
31 | I0
32 | tp12
33 | bI00
34 | S'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00dK\x7f\xdc\x1fU\xd9\xbf\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99S\x16<\xda\x1a\xe6\xbf\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00H\x95\xba\x87I\xdd\xf4\xbf\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf3\x98\xa7\x98\xa9H\xa2\xbf'
35 | p13
36 | tp14
37 | b.
--------------------------------------------------------------------------------
/tests/regularizedtau/sourcetwo.p:
--------------------------------------------------------------------------------
1 | cnumpy.core.multiarray
2 | _reconstruct
3 | p0
4 | (cnumpy
5 | ndarray
6 | p1
7 | (I0
8 | tp2
9 | S'b'
10 | p3
11 | tp4
12 | Rp5
13 | (I1
14 | (I20
15 | I1
16 | tp6
17 | cnumpy
18 | dtype
19 | p7
20 | (S'f8'
21 | p8
22 | I0
23 | I1
24 | tp9
25 | Rp10
26 | (I3
27 | S'<'
28 | p11
29 | NNNI-1
30 | I-1
31 | I0
32 | tp12
33 | bI00
34 | S"u\xbf\xb3\xe6\xa3\x0b\xe3\xbf+\xce\x06&\x93`\xc6?\x1e?\xf2\xb5\x9d\x1e\xb4?G\xa5\xc7\xe8[\xa5\xea\xbf\xb4\xd6{\x85&\xd0\xd9\xbf(\xe8\xcb\xb1L\x88\xe8?R\x8e\xac\xc8\xe2\x1c\xfb\xbf\x85\xf1.\xba\x15\x1e\xd4?\x97^\xd1*\xc9!\xe1\xbf\xe5?\xab\xd8\x7f#\xee?3\x08\xe4\xc3\xdb\xc6\xea?\xd6\x15\xf7<\xa8\xb6\xd2?\xd2\xcfH\xd6H#\xd6?\xbb\xe6\xb5)\xcb\xd4\xcb\xbfAl\xc1l\xe1|\xe3?\x13=\xf7ts\xb2\xe5\xbf\\yU\x7f\xbbB\xcb\xbf[\xf6aq'~\xf2\xbf+\xb4#\xb5\x88\x9b\xee?\xa6\x88ML|t\xec\xbf"
35 | p13
36 | tp14
37 | b.
--------------------------------------------------------------------------------
/tests/regularizedtau/tau.p:
--------------------------------------------------------------------------------
1 | cnumpy.core.multiarray
2 | _reconstruct
3 | p0
4 | (cnumpy
5 | ndarray
6 | p1
7 | (I0
8 | tp2
9 | S'b'
10 | p3
11 | tp4
12 | Rp5
13 | (I1
14 | (I5
15 | tp6
16 | cnumpy
17 | dtype
18 | p7
19 | (S'f8'
20 | p8
21 | I0
22 | I1
23 | tp9
24 | Rp10
25 | (I3
26 | S'<'
27 | p11
28 | NNNI-1
29 | I-1
30 | I0
31 | tp12
32 | bI00
33 | S':\xeb\x96T\x84\xe3\x97?\xc43\x1f\x0e\xf9\xee\xa3?F\xcaAz\x007\xa2?\x86\x8b\xce\t\x07\xce\xaa?\x99w\xa6\xc7\x9e\xcb\xa2?'
34 | p13
35 | tp14
36 | b.
--------------------------------------------------------------------------------
/tests/regularizedtau/tauscale.m:
--------------------------------------------------------------------------------
1 | function sigmas=tauscale(x,ktau,delta)
2 | % tau scales (row vector) of x for several constants ktau (row)
3 | %delta= "delta" for initial M-scale, default=0.5
4 | if nargin<3, delta=0.5; end
5 | sigmas=[]; s0=mscale(x,0,delta);
6 | %constant for consistency of s0
7 | c0=7.8464-34.6565*delta + 75.2573*delta^2 -62.5880*delta^3;
8 | s0=s0/c0;
9 | for k=ktau
10 | romed=mean(rho(x/(s0*k))); sig=k*s0*sqrt(romed);
11 | sigmas=[sigmas sig];
12 | end
13 |
14 | function r=rho(x) %Bisquare
15 | r= (1-(1-x.^2).^3 .*(abs(x)<=1))/3; %para que rho''(0)=2
--------------------------------------------------------------------------------
/tests/regularizedtau/toolboxutilities.py:
--------------------------------------------------------------------------------
1 | """
2 | ===================================================================
3 | UTILITIES
4 | This toolbox contains many useful functions, which are not related
5 | to solving the inverse problem itself.
6 | Marta Martinez-Camara, EPFL
7 | ===================================================================
8 | """
9 |
10 | # take the division operator from future versions
11 | from __future__ import division
12 | import linvpy as lp
13 |
14 |
15 | # -------------------------------------------------------------------
16 | # Getting the source vector
17 | # -------------------------------------------------------------------
18 | def getsource(sourcetype, sourcesize, k=1):
19 | import numpy as np
20 | import pickle
21 | import sys # to be able to exit
22 | if sourcetype == 'random': # Gaussian iid source, mu = 0, sigma = 1
23 | x = np.random.randn(sourcesize, 1) # source vector
24 | elif sourcetype == 'sparse':
25 | sparsity = k * sourcesize # 20 % sparsity for the source
26 | x = np.zeros((sourcesize, 1)) # initialization with a zero source
27 | p = np.random.permutation(sourcesize)
28 | nonz = p[0:sparsity] # getting random indexes for the nonzero values
29 | # get randomly the value for the non zero elements
30 | x[nonz] = np.random.randn(sparsity, 1)
31 | elif sourcetype == 'constant':
32 | x = np.zeros((sourcesize, 1)) # initialization with a zero source
33 | x[7:15] = 1 # making a piecewise source
34 |
35 | else: # unknown type of source
36 | sys.exit('unknown source type %s' % sourcetype) # die gracefully
37 | x = np.asarray(x)
38 | return x # what we were asked to deliver
39 |
40 |
41 | # -------------------------------------------------------------------
42 | # Getting the sensing matrix
43 | # -------------------------------------------------------------------
44 | def getmatrix(sourcesize, matrixtype, measurementsize, conditionnumber=1):
45 | import numpy as np
46 | import pickle
47 | import sys # to be able to exit
48 | if matrixtype == 'random': # Gaussian iid matrix, mu = 0, sigma = 1
49 | a = np.random.randn(measurementsize, sourcesize) # sensing matrix
50 | elif matrixtype == 'illposed':
51 | # random well conditioned matrix
52 | a = np.random.randn(measurementsize, sourcesize)
53 | u, s, v = np.linalg.svd(a) # get the svd decomposition
54 | nsv = min(sourcesize, measurementsize) # number of sv
55 | # modify the sv to make cond(A) = conditionnumber
56 | s[np.nonzero(s)] = np.linspace(conditionnumber, 1, nsv)
57 | sm = np.zeros((measurementsize, sourcesize))
58 | sm[:sourcesize, :sourcesize] = np.diag(s)
59 | a = np.dot(u, np.dot(sm, v)) # putting everything together
60 | else: # unknown type of matrix
61 | sys.exit('unknown matrix type %s' % matrixtype) # die gracefully
62 | a = np.asarray(a)
63 | return a # what we were asked to deliver
64 |
65 |
66 | # -------------------------------------------------------------------
67 | # Getting the measurements
68 | # -------------------------------------------------------------------
69 | def getmeasurements(a, x, noisetype, var=1, outlierproportion=0):
70 | import numpy as np
71 | import pickle
72 | import sys # to be able to exit
73 | import matplotlib.pyplot as plt
74 | # import statistics as st
75 | measurementsize = a.shape[0] # number of measurements
76 | y = np.dot(a, x) # noiseless measurements
77 | if noisetype == 'none': # noiseless case
78 | n = np.zeros((measurementsize, 1)) # zero noise
79 | elif noisetype == 'gaussian': # gaussian noise
80 | n = var * np.random.randn(measurementsize, 1)
81 | elif noisetype == 'outliers': # gaussian noise
82 | # additive Gaussian noise
83 | n = var * np.random.randn(measurementsize, 1)
84 | p = np.random.permutation(measurementsize)
85 | # how many measurements are outliers
86 | noutliers = np.round(outlierproportion * measurementsize)
87 | outindex = p[0:noutliers] # getting random indexes for the outliers
88 | # the outliers have a variance ten times larger than clean data
89 | n[outindex] = np.var(y) * 10 * np.random.randn(noutliers, 1)
90 |
91 | else: # unknown type of additive noise
92 | sys.exit('unknown noise type %s' % noisetype) # die gracefully
93 | yn = y + n # get the measurements
94 | yn = np.asarray(yn)
95 | #plt.stem(n, 'b')
96 | # plt.show(block=True)
97 | #plt.stem(y, 'kd-')
98 | #plt.stem(yn, 'rs--')
99 | # plt.show() # show figure
100 |
101 | return yn # what we were asked to deliver
102 |
103 |
104 | # -------------------------------------------------------------------
105 | # Score functions for the robust regressors
106 | # -------------------------------------------------------------------
107 | def scorefunction(u, kind, clipping):
108 | import sys # to tbe able to exit
109 |
110 | # print"u =", u
111 |
112 | if kind == 'huber': # least squares
113 | score = huber(u, clipping) # get the estimate
114 | elif kind == 'squared':
115 | score = u
116 | elif kind == 'optimal':
117 | score = scoreoptimal(u, clipping)
118 | elif kind == 'tau':
119 | # here we compute the score function for the tau.
120 | # psi_tau = weighttau * psi_1 + psi_2
121 | weighttau = tauweights(u, 'optimal', clipping)
122 |
123 | #weighttau = lp.tau_weights_new(u, clipping)
124 |
125 | score = weighttau * \
126 | scoreoptimal(u, clipping[0]) + scoreoptimal(u, clipping[1])
127 | else: # unknown method
128 | sys.exit('unknown method %s' % kind) # die gracefully
129 | return score # return the score function that we need
130 |
131 |
132 | # -------------------------------------------------------------------
133 | # Huber score function
134 | # -------------------------------------------------------------------
135 | def huber(u, clipping):
136 | import numpy as np
137 | u = np.array(u) # converting to np array
138 | p = np.zeros(u.shape) # new array for the output
139 | u_abs = np.abs(u)
140 | i = u_abs <= clipping # logical array
141 | p[i] = u[i] # middle part of the function
142 | i = u_abs > clipping # outer part of the function
143 | p[i] = np.sign(u[i]) * clipping
144 | return p
145 |
146 |
147 | # -------------------------------------------------------------------
148 | # Optimal score function
149 | # -------------------------------------------------------------------
150 | def scoreoptimal(u, clipping):
151 | import numpy as np
152 | u = np.array(u)
153 | p = np.zeros(u.shape)
154 | uabs = np.abs(u) # u absolute values
155 | i = uabs <= 2 * clipping # central part of teh score function
156 | p[i] = u[i] / clipping ** 2 / 3.25
157 | i = np.logical_and(uabs > 2 * clipping, uabs <= 3 * clipping)
158 | f = lambda z: (-1.944 * z / clipping ** 2 + 1.728 * z ** 3 / clipping ** 4 - 0.312 * z ** 5 / clipping ** 6 +
159 | 0.016 * z ** 7 / clipping ** 8) / 3.25
160 | p[i] = f(u[i])
161 | return p
162 |
163 |
164 | # -------------------------------------------------------------------
165 | # Rho functions
166 | # -------------------------------------------------------------------
167 | def rhofunction(u, kind, clipping):
168 | import sys # to tbe able to exit
169 | if kind == 'optimal': # least squares
170 | r = rhooptimal(u, clipping) # get the estimate
171 | else: # unknown method
172 | sys.exit('unknown rho function %s' % kind) # die gracefully
173 | return r # return the score function that we need
174 |
175 |
176 | # -----------------------------------------------
177 | # Optimal loss function (rho)
178 | # -------------------------------------------------------------------
179 | def rhooptimal(u, clipping):
180 | """
181 | The Fast-Tau Estimator for Regression, Matias SALIBIAN-BARRERA, Gert WILLEMS, and Ruben ZAMAR
182 | www.tandfonline.com/doi/pdf/10.1198/106186008X343785
183 |
184 | The equation is found p. 611. To get the exact formula, it is necessary to use 3*c instead of c.
185 | """
186 |
187 | import numpy as np
188 | y = np.abs(u / clipping)
189 | r = np.ones(u.shape)
190 | i = y <= 2. # middle part of the curve
191 | r[i] = y[i] ** 2 / 2. / 3.25
192 | i = np.logical_and(y > 2, y <= 3) # intermediate part of the curve
193 | f = lambda z: (1.792 - 0.972 * z ** 2 + 0.432 * z **
194 | 4 - 0.052 * z ** 6 + 0.002 * z ** 8) / 3.25
195 | r[i] = f(y[i])
196 | return r
197 |
198 |
199 | # -------------------------------------------------------------------
200 | # Weight for the score in the tau
201 | # -------------------------------------------------------------------
202 | def tauweights(u, lossfunction, clipping):
203 | """
204 | This routine computes the 'weighttau', necessary to build the psi_tau function
205 | :param u: vector with all arguments we pass to the weights. so we just need to compute to compute this value once
206 | to find the psi_tau
207 | :param lossfunction: huber, bisquare, optimal, etc
208 | :param clipping: the two values of the clipping parameters corresponding to rho_1, rho_2
209 | :return:
210 | """
211 |
212 | import numpy as np
213 | import sys
214 | if np.sum(scoreoptimal(u, clipping[0]) * u) == 0 :
215 | return np.zeros(u.shape)
216 | if lossfunction == 'optimal': # weights for the rho tau.
217 | w = np.sum(2. * rhooptimal(u, clipping[1]) - scoreoptimal(u, clipping[1]) * u) \
218 | / np.sum(scoreoptimal(u, clipping[0]) * u)
219 | else:
220 | sys.exit('unknown type of loss function %s' %
221 | lossfunction) # die gracefully
222 | return w
223 |
224 |
225 | # -------------------------------------------------------------------
226 | # Weight functions for the IRLS
227 | # -------------------------------------------------------------------
228 | def weights(u, kind, lossfunction, clipping, nmeasurements):
229 | import sys # to be able to exit
230 | import numpy as np
231 | if kind == 'M': # if M-estimator
232 | if lossfunction == 'huber': # with Huber loss function
233 | # call the huber score function
234 | z = scorefunction(u, 'huber', clipping)
235 | w = np.zeros(u.shape)
236 | i = np.nonzero(u)
237 | # definition of the weights for M-estimator
238 | w[i] = z[i] / (2 * u[i])
239 | elif lossfunction == 'squared': # with square function
240 | # call the ls score function
241 | z = scorefunction(u, 'squared', clipping)
242 | w = np.zeros(u.shape)
243 | i = np.nonzero(u)
244 | w[i] = z[i] / (2 * u[i])
245 | elif lossfunction == 'optimal':
246 | z = scorefunction(u, 'optimal', clipping)
247 | w = np.zeros(u.shape)
248 | i = np.nonzero(u)
249 | w[i] = z[i] / (2 * u[i])
250 | else: # unknown loss function
251 | sys.exit('unknown type of loss function %s' %
252 | lossfunction) # die gracefully
253 | elif kind == 'tau': # if tau estimator
254 | # I called scorefunction to our psi function
255 | z = scorefunction(u, 'tau', clipping)
256 | w = np.zeros(u.shape)
257 |
258 | # only for the non zero u elements
259 | i = np.nonzero(u)
260 | w[i] = z[i] / (2 * nmeasurements * u[i])
261 | else:
262 | # unknown method
263 | sys.exit('unknown type of weights %s' % kind) # die gracefully
264 |
265 | return w
266 |
267 |
268 | # -------------------------------------------------------------------
269 | # M - scale estimator function
270 | # -------------------------------------------------------------------
271 | def mscaleestimator(u, tolerance, b, clipping, kind):
272 | import numpy as np
273 | maxiter = 100
274 | s = np.median(np.abs(u)) / .6745 # initial MAD estimation of the scale
275 | if (s==0):
276 | s=1.0
277 | rho_old = np.mean(rhofunction(u / s, kind, clipping)) - b
278 | k = 0
279 | while np.abs(rho_old) > tolerance and k < maxiter:
280 | #TODO : I added this test to avoid division by zero
281 | if (s == 0):
282 | s=1.0
283 | delta = rho_old / \
284 | np.mean(scorefunction(u / s, kind, clipping) * u / s) / s
285 | isqu = 1
286 | ok = 0
287 | while isqu < 30 and ok != 1:
288 | rho_new = np.mean(rhofunction(u / (s + delta), kind, clipping)) - b
289 | if np.abs(rho_new) < np.abs(rho_old):
290 | s = s + delta
291 | ok = 1
292 | else:
293 | delta /= 2
294 | isqu += 1
295 | if isqu == 30:
296 | # we tell it to stop, but we keep the iter for info
297 | maxiter = k
298 | rho_old = rho_new
299 | k += 1
300 | return np.abs(s)
301 |
302 |
303 | # -------------------------------------------------------------------
304 | # tau - scale ** 2
305 | # -------------------------------------------------------------------
306 | def tauscale(u, lossfunction, clipping, b, tolerance=1e-5):
307 | import numpy as np
308 | m, n = u.shape
309 | mscale = mscaleestimator(
310 | u, tolerance, b, clipping[0], lossfunction) # M scale
311 | #TODO : maybe remove this, I added it to avoid dividing by zero
312 | if (mscale == 0):
313 | mscale = 1.0
314 | tscale = mscale ** 2 * \
315 | (1 / m) * np.sum(rhofunction(u / mscale, lossfunction, clipping[1])
316 | ) # (tau scale) ** 2
317 | return tscale
318 |
319 |
--------------------------------------------------------------------------------
/tests/regularizedtau/toolboxutilities_latest.py:
--------------------------------------------------------------------------------
1 | """
2 | ===================================================================
3 | UTILITIES
4 | This toolbox contains many useful functions, which are not related
5 | to solving the inverse problem itself.
6 | Marta Martinez-Camara, EPFL
7 | ===================================================================
8 | """
9 |
10 | # take the division operator from future versions
11 | from __future__ import division
12 |
13 |
14 | # -------------------------------------------------------------------
15 | # Getting the source vector
16 | # -------------------------------------------------------------------
17 | def getsource(sourcetype, sourcesize, k=1):
18 | import numpy as np
19 | import pickle
20 | import sys # to be able to exit
21 | if sourcetype == 'random': # Gaussian iid source, mu = 0, sigma = 1
22 | x = np.random.randn(sourcesize, 1) # source vector
23 | elif sourcetype == 'sparse':
24 | sparsity = k * sourcesize # 20 % sparsity for the source
25 | x = np.zeros((sourcesize, 1)) # initialization with a zero source
26 | p = np.random.permutation(sourcesize)
27 | nonz = p[0:sparsity] # getting random indexes for the nonzero values
28 | # get randomly the value for the non zero elements
29 | x[nonz] = np.random.randn(sparsity, 1)
30 | elif sourcetype == 'constant':
31 | x = np.zeros((sourcesize, 1)) # initialization with a zero source
32 | x[7:15] = 1 # making a piecewise source
33 |
34 | else: # unknown type of source
35 | sys.exit('unknown source type %s' % sourcetype) # die gracefully
36 | x = np.asarray(x)
37 | return x # what we were asked to deliver
38 |
39 |
40 | # -------------------------------------------------------------------
41 | # Getting the sensing matrix
42 | # -------------------------------------------------------------------
43 | def getmatrix(sourcesize, matrixtype, measurementsize, conditionnumber=1):
44 | import numpy as np
45 | import pickle
46 | import sys # to be able to exit
47 | if matrixtype == 'random': # Gaussian iid matrix, mu = 0, sigma = 1
48 | a = np.random.randn(measurementsize, sourcesize) # sensing matrix
49 | elif matrixtype == 'illposed':
50 | # random well conditioned matrix
51 | a = np.random.randn(measurementsize, sourcesize)
52 | u, s, v = np.linalg.svd(a) # get the svd decomposition
53 | nsv = min(sourcesize, measurementsize) # number of sv
54 | # modify the sv to make cond(A) = conditionnumber
55 | s[np.nonzero(s)] = np.linspace(conditionnumber, 1, nsv)
56 | sm = np.zeros((measurementsize, sourcesize))
57 | sm[:sourcesize, :sourcesize] = np.diag(s)
58 | a = np.dot(u, np.dot(sm, v)) # putting everything together
59 | else: # unknown type of matrix
60 | sys.exit('unknown matrix type %s' % matrixtype) # die gracefully
61 | a = np.asarray(a)
62 | return a # what we were asked to deliver
63 |
64 |
65 | # -------------------------------------------------------------------
66 | # Getting the measurements
67 | # -------------------------------------------------------------------
68 | def getmeasurements(a, x, noisetype, var=1, outlierproportion=0):
69 | import numpy as np
70 | import pickle
71 | import sys # to be able to exit
72 | import matplotlib.pyplot as plt
73 | # import statistics as st
74 | measurementsize = a.shape[0] # number of measurements
75 | y = np.dot(a, x) # noiseless measurements
76 | if noisetype == 'none': # noiseless case
77 | n = np.zeros((measurementsize, 1)) # zero noise
78 | elif noisetype == 'gaussian': # gaussian noise
79 | n = var * np.random.randn(measurementsize, 1)
80 | elif noisetype == 'outliers': # gaussian noise
81 | # additive Gaussian noise
82 | n = var * np.random.randn(measurementsize, 1)
83 | p = np.random.permutation(measurementsize)
84 | # how many measurements are outliers
85 | noutliers = np.round(outlierproportion * measurementsize)
86 | outindex = p[0:noutliers] # getting random indexes for the outliers
87 | # the outliers have a variance ten times larger than clean data
88 | n[outindex] = np.var(y) * 10 * np.random.randn(noutliers, 1)
89 |
90 | else: # unknown type of additive noise
91 | sys.exit('unknown noise type %s' % noisetype) # die gracefully
92 | yn = y + n # get the measurements
93 | yn = np.asarray(yn)
94 | #plt.stem(n, 'b')
95 | # plt.show(block=True)
96 | #plt.stem(y, 'kd-')
97 | #plt.stem(yn, 'rs--')
98 | # plt.show() # show figure
99 |
100 | return yn # what we were asked to deliver
101 |
102 |
103 | # -------------------------------------------------------------------
104 | # Score functions for the robust regressors
105 | # -------------------------------------------------------------------
106 | def scorefunction(u, kind, clipping):
107 | import sys # to tbe able to exit
108 | if kind == 'huber': # least squares
109 | score = huber(u, clipping) # get the estimate
110 | elif kind == 'squared':
111 | score = u
112 | elif kind == 'optimal':
113 | score = scoreoptimal(u, clipping)
114 | elif kind == 'tau':
115 | # here we compute the score function for the tau.
116 | # psi_tau = weighttau * psi_1 + psi_2
117 | weighttau = tauweights(u, 'optimal', clipping)
118 | # print weighttau
119 |
120 | #weighttau = lp.tau_weights_new(u, clipping)
121 |
122 | score = weighttau * \
123 | scoreoptimal(u, clipping[0]) + scoreoptimal(u, clipping[1])
124 | else: # unknown method
125 | sys.exit('unknown method %s' % kind) # die gracefully
126 | return score # return the score function that we need
127 |
128 |
129 | # -------------------------------------------------------------------
130 | # Huber score function
131 | # -------------------------------------------------------------------
132 | def huber(u, clipping):
133 | import numpy as np
134 | u = np.array(u) # converting to np array
135 | p = np.zeros(u.shape) # new array for the output
136 | u_abs = np.abs(u)
137 | i = u_abs <= clipping # logical array
138 | p[i] = u[i] # middle part of the function
139 | i = u_abs > clipping # outer part of the function
140 | p[i] = np.sign(u[i]) * clipping
141 | return p
142 |
143 |
144 | # -------------------------------------------------------------------
145 | # Optimal score function
146 | # -------------------------------------------------------------------
147 | def scoreoptimal(u, clipping):
148 | import numpy as np
149 | u = np.array(u)
150 | p = np.zeros(u.shape)
151 | uabs = np.abs(u) # u absolute values
152 | i = uabs <= 2 * clipping # central part of teh score function
153 | p[i] = u[i] / clipping ** 2 / 3.25
154 | i = np.logical_and(uabs > 2 * clipping, uabs <= 3 * clipping)
155 | f = lambda z: (-1.944 * z / clipping ** 2 + 1.728 * z ** 3 / clipping ** 4 - 0.312 * z ** 5 / clipping ** 6 +
156 | 0.016 * z ** 7 / clipping ** 8) / 3.25
157 | p[i] = f(u[i])
158 | return p
159 |
160 |
161 | # -------------------------------------------------------------------
162 | # Rho functions
163 | # -------------------------------------------------------------------
164 | def rhofunction(u, kind, clipping):
165 | import sys # to tbe able to exit
166 | if kind == 'optimal': # least squares
167 | r = rhooptimal(u, clipping) # get the estimate
168 | else: # unknown method
169 | sys.exit('unknown rho function %s' % kind) # die gracefully
170 | return r # return the score function that we need
171 |
172 |
173 | # -----------------------------------------------
174 | # Optimal loss function (rho)
175 | # -------------------------------------------------------------------
176 | def rhooptimal(u, clipping):
177 | """
178 | The Fast-Tau Estimator for Regression, Matias SALIBIAN-BARRERA, Gert WILLEMS, and Ruben ZAMAR
179 | www.tandfonline.com/doi/pdf/10.1198/106186008X343785
180 |
181 | The equation is found p. 611. To get the exact formula, it is necessary to use 3*c instead of c.
182 | """
183 |
184 | import numpy as np
185 | u = np.array(u)
186 | y = np.abs(u / clipping)
187 | r = np.ones(u.shape)
188 | i = y <= 2. # middle part of the curve
189 | r[i] = y[i] ** 2 / 2. / 3.25
190 | i = np.logical_and(y > 2, y <= 3) # intermediate part of the curve
191 | f = lambda z: (1.792 - 0.972 * z ** 2 + 0.432 * z **
192 | 4 - 0.052 * z ** 6 + 0.002 * z ** 8) / 3.25
193 | r[i] = f(y[i])
194 | return r
195 |
196 |
197 | # -------------------------------------------------------------------
198 | # Weight for the score in the tau
199 | # -------------------------------------------------------------------
200 | def tauweights(u, lossfunction, clipping):
201 | """
202 | This routine computes the 'weighttau', necessary to build the psi_tau function
203 | :param u: vector with all arguments we pass to the weights. so we just need to compute to compute this value once
204 | to find the psi_tau
205 | :param lossfunction: huber, bisquare, optimal, etc
206 | :param clipping: the two values of the clipping parameters corresponding to rho_1, rho_2
207 | :return:
208 | """
209 |
210 | import numpy as np
211 | import sys
212 | if np.sum(scoreoptimal(u, clipping[0]) * u) == 0:
213 | # return np.zeros(u.shape)
214 | return np.ones(u.shape)
215 | if lossfunction == 'optimal': # weights for the rho tau.
216 | w = (np.sum(2. * rhooptimal(u, clipping[1]) - scoreoptimal(u, clipping[1]) * u)
217 | ) / np.sum(scoreoptimal(u, clipping[0]) * u)
218 | else:
219 | sys.exit('unknown type of loss function %s' %
220 | lossfunction) # die gracefully
221 | return w
222 |
223 |
224 | # -------------------------------------------------------------------
225 | # Weight functions for the IRLS
226 | # -------------------------------------------------------------------
227 | def weights(u, kind, lossfunction, clipping, nmeasurements):
228 | import sys # to be able to exit
229 | import numpy as np
230 | if kind == 'M': # if M-estimator
231 | if lossfunction == 'huber': # with Huber loss function
232 | # call the huber score function
233 | z = scorefunction(u, 'huber', clipping)
234 | w = np.zeros(u.shape)
235 | i = np.nonzero(u)
236 | # definition of the weights for M-estimator
237 | w[i] = z[i] / (2 * u[i])
238 | elif lossfunction == 'squared': # with square function
239 | # call the ls score function
240 | z = scorefunction(u, 'squared', clipping)
241 | w = np.zeros(u.shape)
242 | i = np.nonzero(u)
243 | w[i] = z[i] / (2 * u[i])
244 | elif lossfunction == 'optimal':
245 | z = scorefunction(u, 'optimal', clipping)
246 | w = np.zeros(u.shape)
247 | i = np.nonzero(u)
248 | w[i] = z[i] / (2 * u[i])
249 | else: # unknown loss function
250 | sys.exit('unknown type of loss function %s' %
251 | lossfunction) # die gracefully
252 | elif kind == 'tau': # if tau estimator
253 | # I called scorefunction to our psi function
254 | z = scorefunction(u, 'tau', clipping)
255 | # if r = zero, weights are equal to one
256 | w = np.ones(u.shape)
257 |
258 | # only for the non zero u elements
259 | i = np.nonzero(u)
260 | w[i] = z[i] / (2 * nmeasurements * u[i])
261 | else:
262 | # unknown method
263 | sys.exit('unknown type of weights %s' % kind) # die gracefully
264 |
265 | return w
266 |
267 |
268 | # -------------------------------------------------------------------
269 | # M - scale estimator function
270 | # -------------------------------------------------------------------
271 | def mscaleestimator(u, tolerance, b, clipping, kind):
272 | import numpy as np
273 | maxiter = 100
274 |
275 | #TODO : changed by Guillaume
276 | u = np.array(u)
277 |
278 | s = np.median(np.abs(u)) / .6745 # initial MAD estimation of the scale
279 | # if (s==0):
280 | # s=1.0
281 | rho_old = np.mean(rhofunction(u / s, kind, clipping)) - b
282 | k = 0
283 | while np.abs(rho_old) > tolerance and k < maxiter:
284 |
285 | #TODO : I added this test to avoid division by zero
286 | # if (s == 0):
287 | # s=1.0
288 |
289 | # print 'Marta score function = ', scorefunction(u / s, kind, clipping)
290 | #
291 | # # TODO : remove this
292 | # print 'Marta mean = ', np.mean(scorefunction(u / s, kind, clipping) * u / s)
293 | # if np.mean(scorefunction(u / s, kind, clipping) * u / s) == 0:
294 | # print 'MARTA MEAN = 0 !'
295 |
296 | delta = rho_old / \
297 | np.mean(scorefunction(u / s, kind, clipping) * u / s) / s
298 | isqu = 1
299 | ok = 0
300 | while isqu < 30 and ok != 1:
301 | rho_new = np.mean(rhofunction(u / (s + delta), kind, clipping)) - b
302 | if np.abs(rho_new) < np.abs(rho_old):
303 | s = s + delta
304 | ok = 1
305 | else:
306 | delta /= 2
307 | isqu += 1
308 | if isqu == 30:
309 | # we tell it to stop, but we keep the iter for info
310 | maxiter = k
311 | rho_old = rho_new
312 | k += 1
313 | return np.abs(s)
314 |
315 |
316 | # -------------------------------------------------------------------
317 | # Looking for initial solutions
318 | # -------------------------------------------------------------------
319 | def getinitialsolution(y, a):
320 | import numpy as np
321 | import toolboxinverse_latest as inv
322 | import sys
323 |
324 | # line added to keep a constant initialx for testing purpose. remove this later
325 | #return np.array([-0.56076046, -2.96528342]).reshape(-1,1)
326 |
327 | # TODO : remove this line "return np.random.rand(a.shape[1])", only for testing purpose
328 | #return np.random.rand(a.shape[1])
329 | #return np.random.rand(a.shape[1])
330 |
331 | m = a.shape[0] # getting dimensions
332 | n = a.shape[1] # getting dimensions
333 | k = 0 # counting iterations
334 | while k < 100:
335 | perm = np.random.permutation(m)
336 | subsample = perm[0:n] # random subsample
337 | ysubsample = y[subsample] # random measurements
338 | asubsample = a[subsample, :] # getting the rows
339 | r = np.linalg.matrix_rank(asubsample)
340 |
341 | # we assume that in these cases asubsample is well condtitioned
342 | if r == n:
343 | # use it to generate a solution
344 | initialx = inv.leastsquares(ysubsample, asubsample)
345 | return initialx
346 | else:
347 | k += 1
348 | if k == 100:
349 | # die gracefully
350 | sys.exit('I could not find initial solutions!')
351 |
352 |
353 | # -------------------------------------------------------------------
354 | # tau - scale ** 2
355 | # -------------------------------------------------------------------
356 | def tauscale(u, lossfunction, clipping, b, tolerance=1e-5):
357 | import numpy as np
358 |
359 | #TODO : uncomment this line and remove m=u.shape[0]
360 | # m, n = u.shape
361 |
362 | m = u.shape[0]
363 |
364 | mscale = mscaleestimator(u, tolerance, b, clipping[0], lossfunction) # M scale
365 |
366 | # if mscale is zero, tauscale is zero as well
367 | if (mscale == 0):
368 | tscale = 0
369 | else:
370 | # (tau scale) ** 2
371 | tscale = mscale ** 2 * (1 / m) * np.sum(rhofunction(u / mscale, lossfunction, clipping[1]))
372 | return tscale
373 |
374 |
--------------------------------------------------------------------------------
/tests/test.py:
--------------------------------------------------------------------------------
1 | __author__ = 'GuillaumeBeaud'
2 |
3 |
4 | # print '=========================================== LIMIT ====================================='
5 |
6 |
7 | # ============================================== ABOVE IS OK =====================================
8 | # ============================================== DEMO =====================================
9 |
10 | import numpy as np
11 | import linvpy as lp
12 |
13 | a = np.matrix([[1, 2], [3, 4], [5, 6]])
14 | y = np.array([1, 2, 3])
15 |
16 | # Define your own loss function
17 | class CustomLoss(lp.LossFunction):
18 |
19 | # Set your custom clipping
20 | def __init__(self, clipping=1.5):
21 | lp.LossFunction.__init__(self, clipping)
22 | if clipping is None:
23 | self.clipping = 0.7
24 |
25 | # Define your rho function : you can copy paste this and just change what's
26 | # inside the unit_rho
27 | def rho(self, array):
28 | # rho function of your loss function on ONE single element
29 | def unit_rho(element):
30 | # Simply return clipping * element for example
31 | return element + self.clipping
32 | # Vectorize the function
33 | vfunc = np.vectorize(unit_rho)
34 | return vfunc(array)
35 |
36 | # Define your psi function as the derivative of the rho function : you can
37 | # copy paste this and just change what's inside the unit_rho
38 | def psi(self, array):
39 | # rho function of your loss function on ONE single element
40 | def unit_psi(element):
41 | # Simply return the clipping for example
42 | return 1
43 | # Vectorize the function
44 | vfunc = np.vectorize(unit_psi)
45 | return vfunc(array)
46 |
47 | custom_tau = lp.TauEstimator(loss_function=CustomLoss)
48 | print custom_tau.estimate(a,y)
49 |
50 |
51 | # Define your own regularization
52 | class CustomRegularization(lp.Regularization):
53 | pass
54 | # Define your regularization function here
55 | def regularize(self, a, y, lamb=0):
56 | return np.ones(a.shape[1])
57 |
58 | # Create your custom tau estimator with custom loss and regularization functions
59 | # Pay attenation to pass the loss function as a REFERENCE (without the "()"
60 | # after the name, and the regularization as an OBJECT, i.e. with the "()").
61 | custom_tau = lp.TauEstimator(regularization=CustomRegularization())
62 | print custom_tau.estimate(a,y)
--------------------------------------------------------------------------------
/tests/test_final.py:
--------------------------------------------------------------------------------
1 | __author__ = 'GuillaumeBeaud'
2 |
3 | import linvpy as lp
4 | import numpy as np
5 | import random
6 | from random import randint
7 | from tests import generate_random as gen
8 | from regularizedtau import toolboxutilities as util
9 | from regularizedtau import toolboxutilities_latest as util_l
10 | from regularizedtau import linvpy_latest as lp_l
11 |
12 | # ===================================== DEFINITIONS ===================================
13 |
14 | TESTING_ITERATIONS = 20
15 |
16 | LOSS_FUNCTIONS = [lp.Huber, lp.Bisquare, lp.Cauchy, lp.Optimal] # references to loss classes, not instances
17 |
18 |
19 | # ===================================== TESTS ====================================
20 |
21 | # sets the print precision to 20 decimals
22 | np.set_printoptions(precision=10)
23 |
24 | def plot_loss_functions(interval):
25 | for loss in LOSS_FUNCTIONS:
26 | loss = loss() # instanciates the loss functions
27 | loss.plot(interval)
28 |
29 |
30 | def test_MEstimator():
31 | for loss in LOSS_FUNCTIONS:
32 | m_estimator = lp.MEstimator(loss_function=loss) # creates an m-estimator with each of the loss functions
33 | for i in range(2, TESTING_ITERATIONS):
34 | # random (A,y) tuple with i rows and A has a random number of columns between i and i+100
35 | m_estimator.estimate(
36 | np.random.rand(i, i + randint(0, 100)),
37 | np.random.rand(i).reshape(-1)
38 | )
39 |
40 |
41 | def test_M_weights():
42 | toolbox_losses = ['huber', 'optimal']
43 | lp_losses = [lp.Huber, lp.Optimal]
44 |
45 | for i in range(0, 2):
46 | A = np.random.rand(randint(1, 10), randint(1, 10))
47 | clipping = np.random.uniform(0.1, 5)
48 |
49 | # creates an instance of the loss function with the current clipping
50 | my_loss = lp_losses[i](clipping=clipping)
51 |
52 | uw = util.weights(A, 'M', toolbox_losses[i], clipping, None)
53 | lw = my_loss.m_weights(A)
54 |
55 | np.testing.assert_allclose(uw, lw)
56 |
57 |
58 | def test_MEstimator_ill_conditioned():
59 | for loss in LOSS_FUNCTIONS:
60 | m_estimator = lp.MEstimator(loss_function=loss) # creates an m-estimator with each of the loss functions
61 | for i in range(2, TESTING_ITERATIONS):
62 | # random (A,y) ill conditioned tuple with i rows
63 | m_estimator.estimate(
64 | gen.generate_random_ill_conditioned(i)[0],
65 | gen.generate_random_ill_conditioned(i)[1].reshape(-1)
66 | )
67 |
68 |
69 | # this is a crash test that checks that the function never crashes, not a value test
70 | def test_tikhonov():
71 | tiko = lp.Tikhonov()
72 | for i in range(2, TESTING_ITERATIONS):
73 | # random (A,y) ill conditioned tuple with i rows
74 | tiko.regularize(
75 | gen.generate_random_ill_conditioned(i)[0],
76 | gen.generate_random_ill_conditioned(i)[1].reshape(-1),
77 | lamb=randint(0, 20)
78 | )
79 |
80 |
81 | # tests the rho_optimal and psi_optimal of LinvPy VS rhooptimal and scoreoptimal of toolbox
82 | def test_Optimal():
83 | for i in range(2, TESTING_ITERATIONS):
84 | # random clipping between 0.1 and 5
85 | CLIPPING = np.random.uniform(0.1, 5)
86 |
87 | # creates an instance of lp.Optimal
88 | opt = lp.Optimal(clipping=CLIPPING)
89 |
90 | # generates a random vector of size between 0 and 100
91 | y = np.random.rand(randint(1, 100))
92 |
93 | # optimal rho function of toolbox and optimal rho function of LinvPy
94 | rho_util = util.rhooptimal(np.asarray(y), CLIPPING)
95 | rho_lp = opt.rho(y)
96 |
97 | # optimal psi function of toolbox and optimal psi function of LinvPy
98 | psi_util = util.scoreoptimal(np.asarray(y), CLIPPING)
99 | psi_lp = opt.psi(y)
100 |
101 | # returns an error if the toolbox's rhooptimal and lp.Optimal.rho() are not equal
102 | np.testing.assert_allclose(rho_lp, rho_util)
103 |
104 | # returns an error if the toolbox's scoreoptimal and lp.Optimal.psi() are not equal
105 | np.testing.assert_allclose(psi_lp, psi_util)
106 |
107 |
108 | # tests the scorefunction of LinvPy VS scorefunction of toolbox
109 | def test_scorefunction():
110 | for i in range(2, TESTING_ITERATIONS):
111 | # CLIPPINGS = two random numbers between 0.1 and 5
112 | CLIPPINGS = (np.random.uniform(0.1, 5), np.random.uniform(0.1, 5))
113 |
114 | # creates an instance of tau estimator with the two random clippings
115 | tau = lp.TauEstimator(clipping_1=CLIPPINGS[0], clipping_2=CLIPPINGS[1], loss_function=lp.Optimal)
116 |
117 | # y = random vector of size between 0 and 100
118 | y = np.random.rand(randint(1, 100))
119 |
120 | # toolbox's scorefunction
121 | score_util = util.scorefunction(np.asarray(y), 'tau', CLIPPINGS)
122 |
123 | # linvpy's scorefunction
124 | score_lp = tau.score_function(y)
125 |
126 | # returns an error if the toolbox's scorefunction and lp's scorefunction are not equal
127 | np.testing.assert_allclose(score_lp, score_util)
128 |
129 |
130 | # tests linvpy's mscale VS toolbox mscale
131 | def test_mscale():
132 | for i in range(2, TESTING_ITERATIONS):
133 | # generates a random clipping between 0.1 and 5
134 | CLIPPING = np.random.uniform(0.1, 5)
135 |
136 | # creates an instance of TauEstimator
137 | tau = lp.TauEstimator(clipping_1=CLIPPING, clipping_2=CLIPPING, loss_function=lp.Optimal)
138 |
139 | # generates a random vector of size between 0 and 100
140 | y = np.random.rand(randint(1, 100))
141 |
142 | # computes the mscale for linvpy and toolbox
143 | linvpy_scale = tau.m_scale(y)
144 | toolbox_scale = util.mscaleestimator(u=y, tolerance=1e-5, b=0.5, clipping=CLIPPING, kind='optimal')
145 |
146 | # verifies that both results are the same
147 | assert toolbox_scale == linvpy_scale
148 |
149 |
150 | def test_tau_scale():
151 | for i in range(2, TESTING_ITERATIONS):
152 | # generates random clipping between 0.1 and 5
153 | clipping_1 = np.random.uniform(0.1, 5)
154 | clipping_2 = np.random.uniform(0.1, 5)
155 |
156 | # generates a random vector of size between 0 and 100
157 | x = np.random.rand(randint(1, 100))
158 |
159 | my_tau = lp.TauEstimator(loss_function=lp.Optimal, clipping_1=clipping_1, clipping_2=clipping_2)
160 |
161 | linvpy_t = my_tau.tau_scale(x)
162 | util_t = util_l.tauscale(x, lossfunction='optimal', b=0.5, clipping=(clipping_1, clipping_2))
163 |
164 | np.testing.assert_allclose(linvpy_t, util_t)
165 |
166 |
167 | def test_M_estimator_VS_Marta():
168 | for i in range(3, TESTING_ITERATIONS):
169 | NOISE = np.random.uniform(0, 1.0)
170 | # NOISE = 0
171 | # lamb = np.random.uniform(0,1.0)
172 | lamb = 0
173 | clipping = np.random.uniform(0.1, 5)
174 |
175 | A, x, y, initial_vector, initial_scale = gen.gen_noise(i, i, NOISE)
176 |
177 | xhat_marta = lp_l.irls(
178 | matrix_a=A,
179 | vector_y=y,
180 | loss_function='huber',
181 | kind='M',
182 | regularization=lp_l.tikhonov_regularization,
183 | lamb=lamb,
184 | initial_x=initial_vector.reshape(-1, 1),
185 | scale=initial_scale,
186 | clipping=clipping)
187 |
188 | my_m = lp.MEstimator(clipping=clipping,
189 | loss_function=lp.Huber,
190 | scale=initial_scale,
191 | lamb=lamb)
192 |
193 | xhat_linvpy = my_m.estimate(A, y, initial_x=initial_vector)
194 |
195 | # print 'xhat marta = ', xhat_marta
196 | # print 'xhat linvpy = ', xhat_linvpy
197 | # print 'real x = ', x
198 | # very robust test; passes sometimes and sometimes not (a difference of 0.0000001 makes it fail)
199 | np.testing.assert_allclose(xhat_linvpy, xhat_marta)
200 | # print '=========================================='
201 |
202 |
203 | # This test checks LinvPy2.0's tau estimator on all possible inputs and verify there's no error.
204 | # NB: this DOES NOT test the mathematical correctness of outputs, it only tests that TauEstimator()
205 | # can handle any types of inputs without crashing.
206 | # For mathematical correctness, see test_TauEstimator_VS_Marta()
207 | def test_TauEstimator_alone():
208 | for i in range(2, TESTING_ITERATIONS):
209 |
210 | # tests all regularizations
211 | for reg in (lp.Tikhonov(), lp.Lasso()):
212 | # tests all loss functions
213 | for loss in LOSS_FUNCTIONS:
214 |
215 | # intiates random inputs
216 | lamb = randint(0, 20)
217 | c1 = np.random.uniform(0.1, 5)
218 | c2 = np.random.uniform(0.1, 5)
219 |
220 | # clippings are randomly chosen between a random number or None with predominance for number
221 | clipping_1 = random.choice([c1, c1, c1, None])
222 | clipping_2 = random.choice([c2, c2, c2, None])
223 |
224 | # creates a tau instance
225 | tau_estimator = lp.TauEstimator(
226 | loss_function=loss,
227 | regularization=reg,
228 | lamb=lamb,
229 | clipping_1=clipping_1,
230 | clipping_2=clipping_2) # creates a tau-estimator with each of the loss functions
231 |
232 | # random (A,y) tuple with i rows and A has a random number of columns between i and i+100
233 | tau_estimator.estimate(
234 | # A=np.random.rand(i, i + randint(0, 100)),
235 | a=np.random.rand(i, i + randint(0, 100)),
236 | y=np.random.rand(i).reshape(-1)
237 | )
238 |
239 |
240 | def test_score_function_is_odd():
241 | for loss in LOSS_FUNCTIONS:
242 |
243 | my_tau = lp.TauEstimator(loss_function=loss)
244 |
245 | # print 'loss = ', loss
246 |
247 | for i in range(2, TESTING_ITERATIONS):
248 |
249 | # generates a random vector of size i with negative and positive values
250 | y = np.random.randn(100)
251 |
252 | score = my_tau.score_function(y)
253 |
254 | # print y, score
255 |
256 | for i in range(0, score.__len__()):
257 | assert np.sign(score[i]) == np.sign(y[i])
258 |
259 | def test_TauEstimator_VS_Marta():
260 | for i in range(2, TESTING_ITERATIONS):
261 | # generates random clipping between 0.1 and 5
262 | clipping_1 = np.random.uniform(0.1, 5)
263 | clipping_2 = np.random.uniform(0.1, 5)
264 |
265 | # generates a random n_initial_x
266 | n_initial_x = 1
267 |
268 | # generates a random matrix of size i x i + random(0,100)
269 | A = np.random.rand(i, i + randint(0, 10))
270 |
271 | # generates a random vector of size i
272 | y = np.random.rand(i)
273 |
274 | my_tau = lp.TauEstimator(loss_function=lp.Optimal, clipping_1=clipping_1, clipping_2=clipping_2)
275 |
276 | linvpy_output = my_tau.estimate(a=A, y=y)
277 |
278 | marta_t = lp_l.basictau(
279 | a=A,
280 | y=np.matrix(y),
281 | loss_function='optimal',
282 | b=0.5,
283 | clipping=(clipping_1, clipping_2),
284 | ninitialx=n_initial_x
285 | )
286 |
287 | # print 'LinvPy Tau result = ', linvpy_output
288 | # print 'Marta Tau result = ', marta_t
289 | # print '========================'
290 | # print '========================'
291 | # print '========================'
292 | # print '========================'
293 |
294 | # asserts xhat are the same
295 | np.testing.assert_allclose(linvpy_output[0].reshape(-1, 1), marta_t[0])
296 |
297 | # test that simply covers the fast_tau for code coverage purpose
298 | def cover_fast_tau():
299 | my_tau = lp.TauEstimator()
300 | A = np.matrix([[2, 2], [3, 4], [7, 6]])
301 | y = np.array([1, 4, 3])
302 | my_tau.fast_estimate(A, y)
303 |
304 | # ===================================== MAIN ==================================
305 |
306 |
307 | # plot_loss_functions(15)
308 |
309 | test_TauEstimator_alone()
310 |
311 | test_TauEstimator_VS_Marta()
312 |
313 | test_M_weights()
314 |
315 | test_MEstimator()
316 |
317 | test_MEstimator_ill_conditioned()
318 |
319 | test_tikhonov()
320 |
321 | test_Optimal()
322 |
323 | test_scorefunction()
324 |
325 | test_mscale()
326 |
327 | test_tau_scale()
328 |
329 | test_M_estimator_VS_Marta()
330 |
331 | test_score_function_is_odd()
332 |
333 | cover_fast_tau()
--------------------------------------------------------------------------------