├── LICENSE README.txt ├── LICENSE-GPLv3.txt ├── README.md ├── VisemeNet_Annotation_README.md ├── data ├── ckpt │ └── pretrain_biwi │ │ └── copy_download_model_here ├── csv │ └── visemenet_intro │ │ ├── err.txt │ │ ├── test │ │ ├── clip_len.csv │ │ ├── file_dir.csv │ │ └── wav.csv │ │ ├── wav_mean_std.csv │ │ └── wav_raw.csv ├── output_viseme │ └── visemenet_intro │ │ ├── mayaparam_pred_cls.txt │ │ ├── mayaparam_pred_reg.txt │ │ └── mayaparam_viseme.txt └── test_audio │ ├── saved_param │ ├── maya_close_face.txt │ └── wav_mean_std.csv │ └── visemenet_intro.wav ├── main_test.py ├── maya_animation.py └── src ├── __init__.py ├── create_dataset_csv.py ├── eval_viseme.py ├── model.py ├── train_visemenet.py └── utl ├── __init__.py ├── load_param.py └── utl.py /LICENSE README.txt: -------------------------------------------------------------------------------- 1 | RigNet Copyright 2020 University of Massachusetts 2 | 3 | RigNet is licensed under the General Public License Version 3 (GPLv3), or under a Commercial License. 4 | 5 | The GPLv3 License may be found in the main directory under the file LICENSE-GPLv3.txt. 6 | 7 | To inquire into a Commercial License, please contact the University of Massachusetts Amherst Technology Transfer Office at tto@umass.edu, and copy to kalo@cs.umass.edu and yangzhou@cs.umass.edu. 8 | -------------------------------------------------------------------------------- /LICENSE-GPLv3.txt: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # VisemeNet Code Readme 2 | 3 | ## Environment 4 | 5 | + Python 3.5 6 | + Tensorflow 1.1.0 7 | + Cudnn 5.0 8 | 9 | ## Python Package 10 | 11 | + numpy 12 | + scipy 13 | + python_speech_features 14 | + matplotlib 15 | 16 | ## Input/Output 17 | 18 | + Input audio needs to be 44.1kHz, 16-bit, WAV format 19 | + Output visemes are applicable to the JALI-based face-rig, see [HERE](http://www.dgp.toronto.edu/~elf/jali.html) 20 | 21 | ## JALI Viseme Annotation Dataset 22 | 23 | + BIWI dataset with well-annotated JALI viseme parameters. [[DATASET](https://www.dropbox.com/sh/oj13tvq9ggf2puz/AADBPyRUcyisFtKgCoDmNhLHa?dl=0)] [[README](VisemeNet_Annotation_README.md)] 24 | 25 | ## At test time: 26 | 27 | 1. **Create and install required envs and packages** 28 | ``` 29 | conda create -n visnet python=3.5 30 | 31 | # take care of your OS and python version, here is a Linux-64bit with Python3.5 link 32 | pip install --ignore-installed --upgrade https://download.tensorflow.google.cn/linux/gpu/tensorflow_gpu-1.1.0-cp35-cp35m-linux_x86_64.whl 33 | 34 | pip install PYTHON_PACKAGE_REQUIRED 35 | ``` 36 | 2. **Download this repository to your local machine:** 37 | ``` 38 | git clone https://github.com/yzhou359/VisemeNet_tensorflow.git 39 | 40 | cd VisemeNet_tensorflow 41 | ``` 42 | 3. **Prepare data and model:** 43 | * convert your test audio files into WAV format, put it to the directory data/test_audio/ 44 | * download the public face rig model from [HERE](https://www.dropbox.com/sh/7nbqgwv0zz8pbk9/AAAghy76GVYDLqPKdANcyDuba?dl=0), put all 4 files to data/ckpt/pretrain_biwi/ 45 | 46 | 4. **Forward inference:** 47 | * put your test audio file name in file 'main_test.py', line 7. 48 | * Then run command line 49 | ``` 50 | python main_test.py 51 | ``` 52 | The result locates at: 53 | ``` 54 | data/output_viseme/[your_audio_file_name]/mayaparam_viseme.txt 55 | ``` 56 | 5. **JALI animation in Maya:** 57 | * put your test audio file name in file 'maya_animation.py', line 4. 58 | * Then run 'maya_animation.py' in Maya with JALI environment to create talking face animation automatically. (If using different version of JALI face rig, the name of phoneme/co-articulation variable might varies.) 59 | * UPDATE: 'maya_animation.py' has been updated with the [public face rig](http://www.dgp.toronto.edu/~elf/jali.html) annotations. Feel free to play with it! 60 | 61 | -------------------------------------------------------------------------------- /VisemeNet_Annotation_README.md: -------------------------------------------------------------------------------- 1 | # VisemeNet Annotation README 2 | 3 | ## Dataset 4 | 5 | BIWI dataset, 14 speakers (8 female, 6 male) 6 | 7 | ## Structure 8 | 9 | There are 14 folders representing for 14 different speaker in BIWI dataset, such as 'F1', 'F2', ... 10 | 11 | In each folder, 12 | 13 | - file_dir.csv : an info file containing the frame range information in the annotation file for each video clip. 14 | 15 | ``` 16 | # FORMAT 17 | video_name start_frame_index frame_lenght 18 | ``` 19 | 20 | - maya_param_public_model.csv : the viseme annotation file for face rig http://www.dgp.toronto.edu/%7Eelf/jali.html] 21 | 22 | + Each line represents the viseme parameter values in each frame. 23 | + The viseme parameters are in this order 24 | ``` 25 | 'JALI.translateX', 'JALI.translateY', 'AAA', 'Eh', 'AHH', 'OHH', 'UUU', 'IEE', 'RRR', 'WWW', 'SSS', 'FFF', 'TTH', 'MBP', 'SSH', 'Schwa', 'GK', 'LNTD', 'COARTIC.LNTD', 'COARTIC.GK', 'COARTIC.MMM', 'COARTIC.FFF', 'COARTIC.WA_PEDAL', 'COARTIC.YA_PEDAL' 26 | ``` 27 | 28 | - maya_param.csv : the viseme annotation file for the face rig (not public available now) used in paper. 29 | 30 | -------------------------------------------------------------------------------- /data/ckpt/pretrain_biwi/copy_download_model_here: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzhou359/VisemeNet_tensorflow/67f34950a215dd8288c8b94ddf885933f37a155d/data/ckpt/pretrain_biwi/copy_download_model_here -------------------------------------------------------------------------------- /data/csv/visemenet_intro/err.txt: -------------------------------------------------------------------------------- 1 | data/test_audio/visemenet_intro.wavFPS: 25 2 | -------------------------------------------------------------------------------- /data/csv/visemenet_intro/test/clip_len.csv: -------------------------------------------------------------------------------- 1 | 1484 2 | -------------------------------------------------------------------------------- /data/csv/visemenet_intro/test/file_dir.csv: -------------------------------------------------------------------------------- 1 | tro 0 1484 2 | -------------------------------------------------------------------------------- /data/csv/visemenet_intro/wav_mean_std.csv: -------------------------------------------------------------------------------- 1 | 16.68886 2 | 3.97479 3 | -20.30682 4 | 5.80699 5 | -15.43429 6 | 5.92388 7 | -28.63684 8 | 4.14146 9 | -19.75630 10 | -1.72316 11 | -10.24419 12 | 5.74067 13 | -3.86981 14 | 9.38637 15 | 10.30149 16 | 11.62886 17 | 12.49301 18 | 12.46678 19 | 11.90186 20 | 11.84886 21 | 11.95487 22 | 12.28070 23 | 12.54658 24 | 12.54819 25 | 12.67694 26 | 12.96691 27 | 12.98765 28 | 12.97866 29 | 12.62430 30 | 12.24909 31 | 11.83247 32 | 11.93958 33 | 11.98562 34 | 12.08022 35 | 11.97421 36 | 11.62867 37 | 10.93464 38 | 8.95501 39 | 7.04081 40 | 87.12891 41 | 173.25781 42 | 292.47222 43 | 440.17648 44 | 596.93069 45 | 769.05987 46 | 974.09208 47 | 1206.45156 48 | 1504.64536 49 | 1807.83956 50 | 2177.74605 51 | 2595.55055 52 | 3045.21528 53 | 3550.92647 54 | 4109.56849 55 | 4749.31868 56 | 5456.19422 57 | 6408.52870 58 | 7440.71526 59 | 8554.42553 60 | 9794.58080 61 | 11161.64710 62 | 12757.65047 63 | 14405.03992 64 | 15984.00054 65 | 19289.26037 66 | 2.63713 67 | 15.46299 68 | 13.12569 69 | 17.44463 70 | 16.31907 71 | 16.44532 72 | 17.10880 73 | 17.89937 74 | 16.92003 75 | 15.19639 76 | 14.47684 77 | 13.30815 78 | 12.08762 79 | 2.62696 80 | 2.75437 81 | 2.83862 82 | 3.02088 83 | 3.12501 84 | 3.06590 85 | 2.99671 86 | 2.98436 87 | 2.99794 88 | 3.01999 89 | 2.97766 90 | 2.96631 91 | 3.07834 92 | 2.99264 93 | 2.93484 94 | 2.91424 95 | 2.81031 96 | 2.71607 97 | 2.81370 98 | 2.88491 99 | 2.96294 100 | 3.01507 101 | 3.03082 102 | 2.86959 103 | 2.59370 104 | 2.74709 105 | 1000.00000 106 | 1000.00000 107 | 26.24620 108 | 36.81842 109 | 38.78619 110 | 35.94704 111 | 41.87735 112 | 51.41814 113 | 68.31984 114 | 78.18270 115 | 85.86550 116 | 94.36517 117 | 99.69723 118 | 118.70094 119 | 136.14025 120 | 141.08837 121 | 179.08942 122 | 178.79265 123 | 180.67445 124 | 197.02229 125 | 204.96015 126 | 266.20027 127 | 254.49708 128 | 270.17589 129 | 540.48172 130 | 192.19627 131 | -------------------------------------------------------------------------------- /data/test_audio/saved_param/maya_close_face.txt: -------------------------------------------------------------------------------- 1 | -0.63522 -0.63037 -0.43071 -0.80431 -0.21347 -0.95468 -0.00000 -0.98242 0.21347 -0.95468 0.43071 -0.80456 0.63522 -0.63037 -0.20890 0.05236 -0.11252 0.02298 0.00000 0.00000 0.11252 0.02211 0.20890 0.05198 -0.05739 -0.20821 -0.16401 -0.21268 -0.22784 -0.23076 -0.32045 -0.27540 -0.22764 -0.42029 -0.10126 -0.46787 -0.00000 -0.48066 0.10126 -0.46797 0.22764 -0.42056 0.33245 -0.27810 0.22784 -0.23076 0.16401 -0.21282 0.05739 -0.20839 0.00000 -0.23967 -0.17054 -0.30394 -0.28709 -0.28996 -0.20255 -0.30883 -0.08933 -0.32174 -0.00000 -0.32644 0.08933 -0.32183 0.20255 -0.30948 0.29165 -0.28881 0.17054 -0.30394 0.06859 -0.31126 -0.00000 -0.32164 -0.06859 -0.31113 2 | -------------------------------------------------------------------------------- /data/test_audio/saved_param/wav_mean_std.csv: -------------------------------------------------------------------------------- 1 | 16.68886 2 | 3.97479 3 | -20.30682 4 | 5.80699 5 | -15.43429 6 | 5.92388 7 | -28.63684 8 | 4.14146 9 | -19.75630 10 | -1.72316 11 | -10.24419 12 | 5.74067 13 | -3.86981 14 | 9.38637 15 | 10.30149 16 | 11.62886 17 | 12.49301 18 | 12.46678 19 | 11.90186 20 | 11.84886 21 | 11.95487 22 | 12.28070 23 | 12.54658 24 | 12.54819 25 | 12.67694 26 | 12.96691 27 | 12.98765 28 | 12.97866 29 | 12.62430 30 | 12.24909 31 | 11.83247 32 | 11.93958 33 | 11.98562 34 | 12.08022 35 | 11.97421 36 | 11.62867 37 | 10.93464 38 | 8.95501 39 | 7.04081 40 | 87.12891 41 | 173.25781 42 | 292.47222 43 | 440.17648 44 | 596.93069 45 | 769.05987 46 | 974.09208 47 | 1206.45156 48 | 1504.64536 49 | 1807.83956 50 | 2177.74605 51 | 2595.55055 52 | 3045.21528 53 | 3550.92647 54 | 4109.56849 55 | 4749.31868 56 | 5456.19422 57 | 6408.52870 58 | 7440.71526 59 | 8554.42553 60 | 9794.58080 61 | 11161.64710 62 | 12757.65047 63 | 14405.03992 64 | 15984.00054 65 | 19289.26037 66 | 2.63713 67 | 15.46299 68 | 13.12569 69 | 17.44463 70 | 16.31907 71 | 16.44532 72 | 17.10880 73 | 17.89937 74 | 16.92003 75 | 15.19639 76 | 14.47684 77 | 13.30815 78 | 12.08762 79 | 2.62696 80 | 2.75437 81 | 2.83862 82 | 3.02088 83 | 3.12501 84 | 3.06590 85 | 2.99671 86 | 2.98436 87 | 2.99794 88 | 3.01999 89 | 2.97766 90 | 2.96631 91 | 3.07834 92 | 2.99264 93 | 2.93484 94 | 2.91424 95 | 2.81031 96 | 2.71607 97 | 2.81370 98 | 2.88491 99 | 2.96294 100 | 3.01507 101 | 3.03082 102 | 2.86959 103 | 2.59370 104 | 2.74709 105 | 1000.00000 106 | 1000.00000 107 | 26.24620 108 | 36.81842 109 | 38.78619 110 | 35.94704 111 | 41.87735 112 | 51.41814 113 | 68.31984 114 | 78.18270 115 | 85.86550 116 | 94.36517 117 | 99.69723 118 | 118.70094 119 | 136.14025 120 | 141.08837 121 | 179.08942 122 | 178.79265 123 | 180.67445 124 | 197.02229 125 | 204.96015 126 | 266.20027 127 | 254.49708 128 | 270.17589 129 | 540.48172 130 | 192.19627 131 | -------------------------------------------------------------------------------- /data/test_audio/visemenet_intro.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzhou359/VisemeNet_tensorflow/67f34950a215dd8288c8b94ddf885933f37a155d/data/test_audio/visemenet_intro.wav -------------------------------------------------------------------------------- /main_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from src.train_visemenet import test 3 | from src.create_dataset_csv import create_dataset_csv 4 | from src.utl.load_param import * 5 | from src.eval_viseme import eval_viseme 6 | 7 | test_audio_name = 'visemenet_intro.wav' 8 | 9 | 10 | # convert audio wav to network input format 11 | create_dataset_csv(csv_dir, test_audio_name=test_audio_name) 12 | 13 | # feedforward testing 14 | test(model_name='pretrain_biwi', test_audio_name=test_audio_name[:-4]) 15 | print('Finish forward testing.') 16 | 17 | # output viseme parameter 18 | eval_viseme(test_audio_name[:-4]) 19 | print('Done.') -------------------------------------------------------------------------------- /maya_animation.py: -------------------------------------------------------------------------------- 1 | import maya.cmds as cmds 2 | 3 | # change your test audio file name here 4 | test_audio_name = 'visemenet_intro' 5 | 6 | # change to absolute root directory if necessary 7 | f_pred = '/Users/yangzhou/Documents/git/yzhou359/VisemeNet_tensorflow/data/output_viseme/' + test_audio_name + '/mayaparam_viseme.txt' 8 | 9 | params = ['Jaw', 'Lip', 'Ah', 'Aa', 'Eh', 'Ee', 'Ih', 'Oh', 'Uh', 'U', 'Eu', 'Schwa', 'R', 'S', 'Sh Ch Zh', 'Th', 'JY', 'LNTD', 'GK', 'MBP', 'FV', 'WA_PEDAL'] 10 | 11 | param_public_model = ['translateX', 'translateY', 'AAA', 'Eh', 'AHH', 'OHH', 'UUU', 'IEE', 'RRR', 'WWW', 'SSS', 'FFF', 'TTH', 'MBP', 'SSH', 'Schwa', 'GK', 'LNTD', 'COARTIC.LNTD', 'COARTIC.GK', 'COARTIC.MMM', 'COARTIC.FFF', 'COARTIC.WA_PEDAL', 'COARTIC.YA_PEDAL'] 12 | 13 | map_i = p_map = [0, 1, 3, 4, 2, [7, 8], 9, [5, 6], 12, 9, 13, 20, 15, 19, 14, 11, 18, 17, 17, 18, 19, 20, 21] 14 | 15 | 16 | pred = open(f_pred, 'r') 17 | y_pred = [] 18 | for line in pred: 19 | y_pred.append([float(f) for f in line.split()]) 20 | pred.close() 21 | 22 | print(len(y_pred)) 23 | 24 | for sample_idx in range(0, len(y_pred), 4): 25 | print(sample_idx) 26 | cmds.currentTime(sample_idx/4) 27 | #cmds.currentTime(sample_idx) 28 | sample = y_pred[sample_idx] 29 | #print sample 30 | for i in range(len(sample)): 31 | if sample[i] < 0: 32 | sample[i] = 0 33 | elif sample[i] > 1: 34 | sample[i] = 1 35 | 36 | for i in range(len(map_i)): 37 | if i in [0, 1]: 38 | cmds.setAttr("CNT_JaLi."+param_public_model[i], sample[map_i[i]]*10) 39 | cmds.setKeyframe("CNT_JaLi."+param_public_model[i]) 40 | 41 | elif i in range(2, 18): 42 | if(type(map_i[i]) == list): 43 | tmp = max(sample[map_i[i][0]], sample[map_i[i][1]]) 44 | else: 45 | tmp = sample[map_i[i]] 46 | cmds.setAttr("CNT_PHONEMES."+param_public_model[i], tmp*10) 47 | cmds.setKeyframe("CNT_PHONEMES."+param_public_model[i]) 48 | else: 49 | cmds.setAttr("CNT_"+param_public_model[i], sample[map_i[i]]*10) 50 | cmds.setKeyframe("CNT_"+param_public_model[i]) 51 | -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzhou359/VisemeNet_tensorflow/67f34950a215dd8288c8b94ddf885933f37a155d/src/__init__.py -------------------------------------------------------------------------------- /src/create_dataset_csv.py: -------------------------------------------------------------------------------- 1 | from src.utl.load_param import * 2 | from src.utl.utl import try_mkdir 3 | import numpy as np 4 | import os, math 5 | import scipy.io.wavfile as wav 6 | from python_speech_features import logfbank, mfcc, ssc 7 | 8 | 9 | 10 | 11 | def create_dataset_csv(csv_dir, test_audio_name='test_audio.wav'): 12 | loaded_data = dict() 13 | loaded_data['wav'] = [] 14 | loaded_data['phoneme'] = [] 15 | loaded_data['landmark'] = [] 16 | loaded_data['maya_pos'] = [] 17 | loaded_data['maya_param'] = [] 18 | loaded_data['face_close'] = [] 19 | loaded_data['face_open'] = [] 20 | loaded_data['pose'] = [] 21 | loaded_data['file_len'] = {'train':0, 'test':0} 22 | loaded_data['clip_len'] = {'train':[], 'test':[]} 23 | loaded_data['file_dir'] = {'train':[], 'test':[]} 24 | dataset_type_order = ['test'] 25 | 26 | csv_dir += test_audio_name[:-4] + '/' 27 | try_mkdir(csv_dir) 28 | try_mkdir(csv_dir + 'test/') 29 | errf = open(csv_dir + 'err.txt', 'w') 30 | 31 | for dataset_type_i in range(0,1): # all from train file list 32 | dataset_type = dataset_type_order[dataset_type_i] 33 | 34 | file_list = {'n':1, 'wav':[lpw_dir+test_audio_name]} 35 | 36 | for nClip in range(0, file_list['n']): 37 | 38 | print('\n==================== Processing file {:} ===================='.format(file_list["wav"][nClip])) 39 | if (not os.path.isfile(file_list["wav"][nClip])): 40 | print('# ' + str(nClip) + ' None existing file: ' + file_list["wav"][nClip]) 41 | errf.write('# ' + str(nClip) + ' None existing file: ' + file_list["wav"][nClip] + '\n') 42 | continue 43 | 44 | # WAV 45 | (rate, sig) = wav.read(file_list["wav"][nClip]) 46 | if (sig.ndim > 1): 47 | sig = sig[:, 0] # pick mono-acoustic track 48 | else: 49 | print('Notice: ' + file_list["wav"][nClip] + ' is mono-track') 50 | 51 | # fps = (nLandmark + 1) / (sig.shape[0] / rate) 52 | fps = 25 53 | errf.write(file_list["wav"][nClip] + 'FPS: {:} \n'.format(fps)) 54 | print('FPS: {:}'.format(fps)) 55 | winstep = 1.0 / fps / mfcc_win_step_per_frame / up_sample_rate 56 | mfcc_feat = mfcc(sig, samplerate=rate, winlen=0.025, winstep=winstep, numcep=13) 57 | logfbank_feat = logfbank(sig, samplerate=rate, winlen=0.025, winstep=winstep, nfilt=26) 58 | ssc_feat = ssc(sig, samplerate=rate, winlen=0.025, winstep=winstep, nfilt=26) 59 | full_feat = np.concatenate([mfcc_feat, logfbank_feat, ssc_feat], axis=1) 60 | # full_feat = logfbank_feat 61 | 62 | nFrames_represented_by_wav = math.floor(full_feat.shape[0] / mfcc_win_step_per_frame / up_sample_rate) 63 | mfcc_lines = full_feat[0: nFrames_represented_by_wav * mfcc_win_step_per_frame * up_sample_rate, :].reshape( 64 | int(nFrames_represented_by_wav * up_sample_rate), 65 | int(full_feat.shape[1] * mfcc_win_step_per_frame)) 66 | 67 | ''' 68 | # ==================== cut the tail of lpw to make sure they are in same length ==================== # 69 | ''' 70 | # print("Original length of lpw + maya_param/pos: " + str(nFrames_represented_by_wav)) 71 | aligned_length_wav = mfcc_lines 72 | 73 | ''' 74 | # ==================== process each lpw file ==================== # 75 | ''' 76 | 77 | npWav = np.array(aligned_length_wav) 78 | print("Load #Clip {:d}/{:}, wav {:}".format(nClip, file_list['n'], npWav.shape)) 79 | loaded_data['wav'].append(npWav) 80 | 81 | # length of each dataset_type 82 | loaded_data['file_len'][dataset_type] += npWav.shape[0] 83 | loaded_data['clip_len'][dataset_type].append(npWav.shape[0]) 84 | loaded_data['file_dir'][dataset_type].append(file_list["wav"][nClip][28:-4] 85 | + ' ' + str(loaded_data['file_len'][dataset_type] - npWav.shape[0]) 86 | + ' ' + str(npWav.shape[0])) 87 | # end for nClip loop 88 | # break 89 | 90 | # end for dataset_type loop 91 | # break 92 | 93 | ''' 94 | # ==================== save file ==================== # 95 | ''' 96 | key_order = ['wav'] 97 | for key_i in range(0, 1): 98 | key = key_order[key_i] 99 | # print(key) 100 | # ==================== wav normalize file ==================== # 101 | npKey = loaded_data[key][0] 102 | for i in range(1, len(loaded_data[key])): 103 | npKey = np.concatenate((npKey, loaded_data[key][i]), axis=0) 104 | 105 | # Use saved std & mean 106 | mean_std = np.loadtxt(lpw_dir + 'saved_param/wav_mean_std.csv') 107 | npKey_mean = mean_std[0:65] 108 | npKey_std = mean_std[65:130] 109 | 110 | def normal_data(loaded_data, mean, std): 111 | normed = (loaded_data - mean) / std 112 | return normed 113 | 114 | npKey = normal_data(npKey, npKey_mean, npKey_std) 115 | np.savetxt(csv_dir + key + '_mean_std.csv', np.append(npKey_mean, npKey_std), fmt='%.5f', delimiter=' ') 116 | np.savetxt(csv_dir + key + '_raw.csv', npKey, fmt='%.5f', delimiter=' ') 117 | del npKey 118 | 119 | def reshape_based_on_win_size(loaded_data, i, win_size, start_idx): 120 | npWav = (loaded_data[i] - npKey_mean) / npKey_std 121 | listWav = list(range(start_idx, start_idx + npWav.shape[0])) 122 | half_win_size = int(win_size / 2) 123 | pad_head = [start_idx for _ in range(half_win_size)] 124 | pad_tail = [listWav[-1] for _ in range(half_win_size)] 125 | pad_npWav = np.array(pad_head + listWav + pad_tail) 126 | npKey = np.zeros(shape=(npWav.shape[0], win_size)) 127 | for np_i in range(0, npWav.shape[0]): 128 | npKey[np_i] = pad_npWav[np_i:np_i + win_size].reshape(1, win_size) 129 | return npKey 130 | 131 | npKey = reshape_based_on_win_size(loaded_data['wav'], 0, win_size, 0) 132 | 133 | for i in range(1, len(loaded_data[key])): 134 | npKeytmp = reshape_based_on_win_size(loaded_data['wav'], i, win_size, npKey.shape[0]) 135 | npKey = np.concatenate((npKey, npKeytmp), axis=0) 136 | 137 | idx = 0 138 | for dataset_type_i in range(0, 1): 139 | dataset_type = dataset_type_order[dataset_type_i] 140 | dataset_type_data_len = loaded_data['file_len'][dataset_type] 141 | cur_npKey = npKey[idx:idx+dataset_type_data_len] 142 | print('Save {:} - {:} file as shape of {:}'.format(dataset_type, key, cur_npKey.shape)) 143 | np.savetxt(csv_dir + dataset_type + '/' + key + '.csv', cur_npKey, fmt='%d', delimiter=' ') 144 | idx += dataset_type_data_len 145 | 146 | for dataset_type in {'test'}: 147 | npLen = np.array(loaded_data['clip_len'][dataset_type]) 148 | np.savetxt(csv_dir + dataset_type + '/clip_len.csv', npLen, fmt='%d', delimiter=' ') 149 | # print("Saved clip length file to " + dataset_type + '/clip_len.csv') 150 | npLen = np.array(loaded_data['file_dir'][dataset_type]) 151 | np.savetxt(csv_dir + dataset_type + '/file_dir.csv', npLen, fmt='%s', delimiter=' ') 152 | # print("Saved file dir file to " + dataset_type + '/file_dir.csv') 153 | 154 | -------------------------------------------------------------------------------- /src/eval_viseme.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy as sp 3 | from scipy import signal 4 | import matplotlib.pyplot as plt 5 | import copy 6 | from src.utl.load_param import * 7 | 8 | def eval_viseme(test_audio_name): 9 | 10 | def smooth(x, window_len=21, window='hanning'): 11 | 12 | if window_len < 3: 13 | return x 14 | 15 | if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: 16 | raise(ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'") 17 | 18 | s = np.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]] 19 | # print(len(s)) 20 | if window == 'flat': # moving average 21 | w = np.ones(window_len, 'd') 22 | else: 23 | w = eval('np.' + window + '(window_len)') 24 | 25 | y = np.convolve(w / w.sum(), s, mode='valid') 26 | return y 27 | 28 | src = pred_dir + test_audio_name + '/mayaparam' 29 | 30 | ac = np.loadtxt(src + '_pred_cls.txt') 31 | ar = np.loadtxt(src + '_pred_reg.txt') 32 | 33 | for i in range(2, ac.shape[1]): 34 | ac[2:-3, i] = sp.signal.medfilt(ac[2:-3, i], kernel_size=[9]) 35 | ac[:, i] = smooth(ac[:, i], 9)[4:-4] 36 | ar[:, i] = sp.signal.medfilt(ar[:, i], kernel_size=[9]) 37 | ar[:, i] = smooth(ar[:, i], 9)[4:-4] 38 | 39 | name_list = ['Ah', 'Aa', 'Eh', 'Ee', 'Ih', 'Oh', 'Uh', 'U', 'Eu', 'Schwa', 'R', 'S', 'ShChZh', 'Th', 40 | 'JY', 'LNTD', 'GK', 'MBP', 'FV', 'W'] 41 | 42 | #pho_thd = np.array([0.35, 0.23, 0.18, 0.17, 10, 0.19, 0.18, 0.19, 10, 0.16, 43 | # 0.18, 0.29, 0.29, 0.27, 10, 10, 10, 0.004, 0.29, 0.16]) # perfect 44 | 45 | pho_thd = np.array([0.12, 0.23, 0.18, 0.02, 10, 0.19, 0.18, 0.05, 10, 0.16, 46 | 0.18, 0.29, 0.29, 0.27, 10, 10, 10, 0.004, 0.29, 0.16]) 47 | 48 | nb = np.zeros_like(ac) 49 | nb[:, 0] = smooth(ac[:, 0], 15)[7:-7] 50 | nb[:, 1] = smooth(ac[:, 1], 15)[7:-7] 51 | 52 | for i in range (2, ac.shape[1]): 53 | # times ac and ar 54 | tmp = ac[:, i] * ar[:, i] 55 | # print(pho_thd[i-2]) 56 | l_idx = tmp > pho_thd[i-2] 57 | nb[l_idx, i] = ar[l_idx, i] 58 | 59 | nb[:, i] = smooth(nb[:, i], 15)[7:-7] 60 | 61 | r = 0 62 | while r < nb.shape[0]: 63 | if nb[r, i] > 0.1: 64 | active_begin = r 65 | for r2 in range(r, nb.shape[0]): 66 | if nb[r2, i] < 0.1 or r2 == nb.shape[0] - 1: 67 | active_end = r2 68 | r = r2 69 | break 70 | # print(active_begin, active_end) 71 | if (active_begin == active_end): 72 | break 73 | max_reg = np.max(ar[active_begin:active_end, i]) 74 | max_pred = np.max(nb[active_begin:active_end, i]) 75 | rate = max_reg / max_pred 76 | nb[active_begin:active_end, i] = nb[active_begin:active_end, i] * rate 77 | r += 1 78 | nb[:, i] = smooth(nb[:, i], 15)[7:-7] 79 | 80 | r = 0 81 | while r < nb.shape[0]: 82 | if nb[r, i] > 0.1: 83 | active_begin = r 84 | for r2 in range(r, nb.shape[0]): 85 | if nb[r2, i] < 0.1 or r2 == nb.shape[0] - 1: 86 | active_end = r2 87 | r = r2 88 | break 89 | # print(active_begin, active_end) 90 | max_reg = np.max(ar[active_begin:active_end, i]) 91 | if(i==19 or i==20 or i==21): 92 | if(max_reg>0.7): 93 | max_reg = 1 94 | max_pred = np.max(nb[active_begin:active_end, i]) 95 | rate = max_reg / max_pred 96 | nb[active_begin:active_end, i] = nb[active_begin:active_end, i] * rate 97 | r += 1 98 | 99 | np.savetxt(src + '_viseme.txt',nb, '%.4f') 100 | 101 | print('Create Viseme parameter in ' + pred_dir + test_audio_name[:-4] + '/mayaparam_viseme.txt') -------------------------------------------------------------------------------- /src/model.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | from src.utl.load_param import * 4 | import numpy as np 5 | import numpy.random 6 | 7 | 8 | def model(): 9 | 10 | # tf graph input 11 | with tf.name_scope('input'): 12 | batch_size_placeholder = tf.placeholder("float32") 13 | # net 1 14 | x = tf.placeholder("float32", [None, n_steps, n_input]) 15 | x_face_id = tf.placeholder("float32", [None, n_face_id]) 16 | y_landmark = tf.placeholder("float32", [None, n_landmark]) 17 | y_phoneme = tf.placeholder("float32", [None, n_phoneme]) 18 | y_lipS = tf.placeholder("float32", [None, 1]) 19 | phase = tf.placeholder(tf.bool, name='phase') 20 | # net 2 21 | y_maya_param = tf.placeholder("float32", [None, n_maya_param]) 22 | 23 | 24 | # fully connected layer weights and bias 25 | with tf.name_scope('net1_fc'): 26 | n_out_landmark_fc2 = n_landmark 27 | n_out_phoneme_fc2 = n_phoneme 28 | 29 | w1_land = tf.Variable(tf.concat( 30 | [tf.truncated_normal([n_hidden, n_out_fc1], stddev=2 / (n_hidden + n_out_fc1), dtype=tf.float32), 31 | tf.truncated_normal([n_face_id, n_out_fc1], stddev=2 / (n_face_id + n_out_fc1), dtype=tf.float32)], 32 | axis=0), name='net1_w1_land') 33 | w1_pho = tf.Variable( 34 | tf.truncated_normal([n_hidden + n_face_id, n_out_fc1], stddev=2 / (n_hidden + n_face_id + n_out_fc1), 35 | dtype=tf.float32), name='net1_w1_pho') 36 | 37 | w2_land = tf.Variable( 38 | tf.truncated_normal([n_out_fc1, n_out_landmark_fc2], stddev=2 / (n_out_fc1 + n_out_landmark_fc2), 39 | dtype=tf.float32), name='net1_w2_land') 40 | w2_pho = tf.Variable( 41 | tf.truncated_normal([n_out_fc1, n_out_phoneme_fc2], stddev=2 / (n_out_fc1 + n_out_phoneme_fc2), 42 | dtype=tf.float32), name='net1_w2_pho') 43 | 44 | b1_land = tf.Variable(tf.ones([n_out_fc1], dtype=tf.float32) * 0.1, name='net1_b1_land') 45 | b2_land = tf.Variable(tf.zeros([n_out_landmark_fc2], dtype=tf.float32), name='net1_b2_land') 46 | b1_pho = tf.Variable(tf.ones([n_out_fc1], dtype=tf.float32) * 0.1, name='net1_b1_pho') 47 | b2_pho = tf.Variable(tf.zeros([n_out_phoneme_fc2], dtype=tf.float32), name='net1_b2_pho') 48 | 49 | 50 | # LSTM model 51 | with tf.name_scope('net1_shared_rnn'): 52 | dropout = tf.placeholder("float32") 53 | 54 | if (kernel_type == 'rnn'): 55 | cell_func = tf.contrib.rnn.BasicRNNCell 56 | elif (kernel_type == 'lstm'): 57 | # cell_func = tf.contrib.rnn.BasicLSTMCell 58 | cell_func = tf.contrib.rnn.LSTMCell 59 | elif (kernel_type == 'gru'): 60 | cell_func = tf.contrib.rnn.GRUCell 61 | 62 | def one_layer_lstm_kernel(x, dropout, n_hidden): 63 | lstm_cell = cell_func(n_hidden, initializer=tf.glorot_normal_initializer()) 64 | cell = tf.contrib.rnn.DropoutWrapper(lstm_cell, output_keep_prob=1.0 - dropout) 65 | return tf.nn.dynamic_rnn(cell, x, dtype=tf.float32) 66 | 67 | def n_layer_rnn_kernel(x, dropout, n_layers, n_hidden): 68 | cells = [] 69 | for _ in range(n_layers): 70 | lstm_cell = cell_func(n_hidden) 71 | lstm_cell = tf.contrib.rnn.DropoutWrapper(lstm_cell, output_keep_prob=1.0 - dropout) 72 | cells.append(lstm_cell) 73 | cell = tf.contrib.rnn.MultiRNNCell(cells) 74 | return tf.nn.dynamic_rnn(cell, x, dtype=tf.float32, scope='net1_rnn') 75 | 76 | net1_rnn_output, states = n_layer_rnn_kernel(x=x, dropout=dropout, n_layers=n_layers, 77 | n_hidden=n_hidden) # x in [n_batch x n_step x n_feature] 78 | # outputs = net1_rnn_output 79 | 80 | with tf.name_scope('net1_output'): 81 | outputs = net1_rnn_output[:, -1, :] 82 | outputs = tf.concat([outputs, x_face_id], axis=1) 83 | 84 | pred = dict() 85 | l1 = tf.matmul(outputs, w1_land) + b1_land 86 | l2 = tf.contrib.layers.batch_norm(l1, center=True, scale=True, is_training=phase, scope='net1_land_bn') 87 | l3 = tf.nn.relu(l2, name='net1_land_relu') 88 | l4 = tf.matmul(l3, w2_land) + b2_land 89 | pred['net1_land'] = l4 + x_face_id 90 | 91 | p1 = tf.matmul(outputs, w1_pho) + b1_pho 92 | p2 = tf.contrib.layers.batch_norm(p1, center=True, scale=True, is_training=phase, scope='net1_pho_bn') 93 | p3 = tf.nn.relu(p2, name='net1_pho_relu') 94 | p4 = tf.matmul(p3, w2_pho) + b2_pho 95 | pred['net1_pho'] = p4 96 | 97 | # error 98 | with tf.name_scope('net1_pho_err'): 99 | mistakes = tf.not_equal(tf.argmax(y_phoneme, 1), tf.argmax(pred['net1_pho'], 1)) 100 | net1_pho_err = tf.reduce_mean(tf.cast(mistakes, tf.float32)) 101 | 102 | # ========================================= NET 2 ============================================================= 103 | with tf.name_scope('net2_rnn'): 104 | 105 | net2_input = tf.concat([pred['net1_land'], p3], axis=1) 106 | n_net2_input = n_landmark + n_out_fc1 107 | 108 | net2_input_concat = tf.concat([net2_input, x[:, int(n_steps/2), :]], axis=1) 109 | n_net2_input = n_net2_input + n_input 110 | 111 | 112 | net2_input_concat = tf.concat([tf.zeros(shape=(win_size_2/2, n_net2_input), dtype=tf.float32), 113 | net2_input_concat], axis=0) 114 | 115 | net2_input = tf.map_fn( 116 | lambda i: tf.reshape(net2_input_concat[i:i + win_size_2], (n_steps, int(n_net2_input * win_size_2 / n_steps))), 117 | tf.range(tf.shape(net2_input_concat)[0] - int(win_size_2)), 118 | dtype=tf.float32) 119 | net2_input = tf.cast(net2_input, tf.float32) 120 | y_maya_param_in = y_maya_param[0 : tf.shape(net2_input)[0]] 121 | 122 | # Net2 LSTM 123 | cells_jali = [] 124 | cells_cls = [] 125 | cells_reg = [] 126 | for _ in range(3): 127 | lstm_cell_jali = tf.contrib.rnn.LSTMCell(n_hidden_net2_jali) 128 | lstm_cell_jali = tf.contrib.rnn.DropoutWrapper(lstm_cell_jali, output_keep_prob=0.5) 129 | cells_jali.append(lstm_cell_jali) 130 | for _ in range(1): 131 | lstm_cell_cls = tf.contrib.rnn.LSTMCell(n_hidden_net2_cls) 132 | lstm_cell_cls = tf.contrib.rnn.DropoutWrapper(lstm_cell_cls, output_keep_prob=0.5) 133 | cells_cls.append(lstm_cell_cls) 134 | for _ in range(3): 135 | lstm_cell_reg = tf.contrib.rnn.LSTMCell(n_hidden_net2_reg) 136 | lstm_cell_reg = tf.contrib.rnn.DropoutWrapper(lstm_cell_reg, output_keep_prob=0.5) 137 | cells_reg.append(lstm_cell_reg) 138 | cell_jali = tf.contrib.rnn.MultiRNNCell(cells_jali) 139 | cell_cls = tf.contrib.rnn.MultiRNNCell(cells_cls) 140 | cell_reg = tf.contrib.rnn.MultiRNNCell(cells_reg) 141 | output_jali, _ = tf.nn.dynamic_rnn(cell_jali, net2_input, dtype=tf.float32, scope='net2_rnn_jali') 142 | output_cls, _ = tf.nn.dynamic_rnn(cell_cls, net2_input, dtype=tf.float32, scope='net2_rnn_cls') 143 | output_reg, _ = tf.nn.dynamic_rnn(cell_reg, net2_input, dtype=tf.float32, scope='net2_rnn_reg') 144 | output_jali = output_jali[:, -1, :] 145 | output_cls = output_cls[:, -1, :] 146 | output_reg = output_reg[:, -1, :] 147 | 148 | with tf.name_scope('net2_fc'): 149 | w1_cls = tf.Variable(tf.truncated_normal([n_hidden_net2_cls, n_cls_fc1], stddev=2 / (n_hidden_net2_cls + n_cls_fc1)), 150 | dtype=tf.float32, name='net2_w1_cls') 151 | b1_cls = tf.Variable(tf.constant(0.1, shape=[n_cls_fc1]), dtype=tf.float32, name='net2_b1_cls') 152 | w2_cls = tf.Variable( 153 | tf.truncated_normal([n_cls_fc1, n_maya_param-2], stddev=2 / (n_cls_fc1 + n_maya_param-2)), 154 | dtype=tf.float32, name='net2_w2_cls') 155 | b2_cls = tf.Variable(tf.constant(0.1, shape=[n_maya_param-2]), dtype=tf.float32, name='net2_b2_cls') 156 | 157 | w1_reg = tf.Variable(tf.truncated_normal([n_hidden_net2_reg, n_reg_fc1], stddev=2 / (n_hidden_net2_reg + n_reg_fc1)), 158 | dtype=tf.float32, name='net2_w1_reg') 159 | b1_reg = tf.Variable(tf.constant(0.1, shape=[n_reg_fc1]), dtype=tf.float32, name='net2_b1_reg') 160 | w2_reg = tf.Variable( 161 | tf.truncated_normal([n_reg_fc1, 100], stddev=2 / (n_reg_fc1 + 100)), 162 | dtype=tf.float32, name='net2_w2_reg') 163 | b2_reg = tf.Variable(tf.constant(0.1, shape=[100]), dtype=tf.float32, name='net2_b2_reg') 164 | w3_reg = tf.Variable( 165 | tf.truncated_normal([100, n_maya_param-2], stddev=2 / (100 + n_maya_param-2)), 166 | dtype=tf.float32, name='net2_w3_reg') 167 | b3_reg = tf.Variable(tf.constant(0.1, shape=[n_maya_param-2]), dtype=tf.float32, name='net2_b3_reg') 168 | 169 | w1_jali = tf.Variable(tf.truncated_normal([n_hidden_net2_jali, n_jali_fc1], stddev=2 / (n_hidden_net2_jali + n_jali_fc1)), 170 | dtype=tf.float32, name='net2_w1_jali') 171 | b1_jali = tf.Variable(tf.constant(0.1, shape=[n_jali_fc1]), dtype=tf.float32, name='net2_b1_jali') 172 | w2_jali = tf.Variable(tf.truncated_normal([n_jali_fc1, 2], stddev=2 / (n_jali_fc1 + 2)), 173 | dtype=tf.float32, name='net2_w2_jali') 174 | b2_jali = tf.Variable(tf.constant(0.1, shape=[2]), dtype=tf.float32, name='net2_b2_jali') 175 | 176 | with tf.name_scope('net2_output'): 177 | v1_cls = tf.matmul(output_cls, w1_cls) + b1_cls 178 | v2_cls = tf.contrib.layers.batch_norm(v1_cls, center=True, scale=True, is_training=phase, scope='net2_v_cls_bn') 179 | v3_cls = tf.nn.relu(v2_cls, name='net2_v_cls_relu') 180 | pred['v_cls'] = tf.matmul(v3_cls, w2_cls) + b2_cls 181 | 182 | v1_reg = tf.matmul(output_reg, w1_reg) + b1_reg 183 | v2_reg = tf.contrib.layers.batch_norm(v1_reg, center=True, scale=True, is_training=phase, scope='net2_v_reg_bn') 184 | v3_reg = tf.nn.relu(v2_reg, name='net2_v_reg_relu') 185 | v4_reg = tf.matmul(v3_reg, w2_reg) + b2_reg 186 | v5_reg = tf.contrib.layers.batch_norm(v4_reg, center=True, scale=True, is_training=phase, scope='net2_v_reg_bn2') 187 | v6_reg = tf.nn.relu(v5_reg, name='net2_v_reg_relu2') 188 | pred['v_reg'] = tf.matmul(v6_reg, w3_reg) + b3_reg 189 | 190 | j1 = tf.matmul(output_jali, w1_jali) + b1_jali 191 | j2 = tf.contrib.layers.batch_norm(j1, center=True, scale=True, is_training=phase, scope='net2_jali_bn') 192 | j3 = tf.nn.relu(j2, name='net2_jali_relu') 193 | pred['jali'] = tf.matmul(j3, w2_jali) + b2_jali 194 | 195 | 196 | # loss 197 | with tf.name_scope('net1_loss'): 198 | cost = dict() 199 | # LipS weight 200 | pred_diff = pred['net1_land'] - y_landmark 201 | tile_lipS = tf.tile(y_lipS, [1, n_landmark]) 202 | weighted_pred_diff = tf.multiply(pred_diff, tile_lipS) 203 | 204 | odd_land_pred = tf.multiply(pred['net1_land'], tile_lipS)[1::2,:] 205 | even_land_pred = tf.multiply(pred['net1_land'], tile_lipS)[0::2,:] 206 | m_land_pred = odd_land_pred - even_land_pred 207 | 208 | cost['net1_motion'] = tf.reduce_mean(tf.abs(m_land_pred)) * 1000 209 | cost['net1_land'] = tf.reduce_mean(tf.abs(weighted_pred_diff)) * 1000 210 | cost['net1_pho'] = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred['net1_pho'], labels=y_phoneme)) 211 | cost['net1_pho_err'] = net1_pho_err 212 | 213 | t_vars = tf.trainable_variables() 214 | reg_losses_1 = [tf.reduce_sum(tf.nn.l2_loss(var)) for var in t_vars if ('net1_' in var.name)] 215 | cost['net1_regularization'] = sum(reg_losses_1) / len(reg_losses_1) 216 | 217 | cost['net1'] = cost['net1_land'] * 0.25 + cost['net1_pho'] * p_alpha + 0.01 * cost['net1_regularization'] + 0.01 * cost['net1_motion'] 218 | 219 | with tf.name_scope('net2_loss'): 220 | # cost = dict() 221 | 222 | cond = tf.less(y_maya_param_in[:, 2:n_maya_param], 0.01 * tf.ones(tf.shape(y_maya_param_in[:, 2:n_maya_param]))) 223 | mask = tf.where(cond, tf.zeros(tf.shape(y_maya_param_in[:, 2:n_maya_param])), 224 | tf.ones(tf.shape(y_maya_param_in[:, 2:n_maya_param]))) 225 | 226 | cost['net2_v_cls'] = tf.reduce_sum( 227 | tf.nn.sigmoid_cross_entropy_with_logits(labels=mask, logits=pred['v_cls'])) / batch_size_placeholder 228 | cost['net2_v_reg'] = tf.reduce_sum( 229 | tf.abs(pred['v_reg'] * mask - y_maya_param_in[:, 2:n_maya_param] * mask)) / batch_size_placeholder 230 | 231 | cost['net2_jali'] = tf.reduce_sum(tf.abs(pred['jali'] - y_maya_param_in[:, 0:2])) / batch_size_placeholder 232 | 233 | odd_frame_y = y_maya_param_in[1::2, :] 234 | even_frame_y = y_maya_param_in[0::2, :] 235 | m_y = odd_frame_y - even_frame_y 236 | 237 | cls_sig = tf.sigmoid(pred['v_cls'], name='net2_v_cls_sigmoid') 238 | # pvv = cls_sig*pred['v_reg'] 239 | pvv = cls_sig 240 | odd_frame_pred_phone = pvv[1::2, :] 241 | even_frame_pred_phone = pvv[0::2, :] 242 | m_pred_phone = odd_frame_pred_phone - even_frame_pred_phone 243 | 244 | odd_frame_pred_jali = pred['jali'][1::2, :] 245 | even_frame_pred_jali = pred['jali'][0::2, :] 246 | m_pred_jali = odd_frame_pred_jali - even_frame_pred_jali 247 | 248 | cost['net2_1st_deriv'] = tf.reduce_mean(tf.abs(m_pred_phone - m_y[:, 2:])) + tf.reduce_mean(tf.abs(m_pred_jali - m_y[:, 0:2])) 249 | cost['net2_motion'] = tf.reduce_mean(tf.abs(pred['v_reg'][1::2,:]-pred['v_reg'][0::2,:])) + \ 250 | tf.reduce_mean(tf.abs(pred['v_cls'][1::2, :] - pred['v_cls'][0::2, :])) 251 | 252 | pred['viseme'] = tf.cast((cls_sig > 0.5), dtype=tf.float32) * pred['v_reg'] 253 | 254 | t_vars = tf.trainable_variables() 255 | reg_losses = [tf.reduce_sum(tf.abs(var)) for var in t_vars if ('net2_' in var.name and '_b' not in var.name)] 256 | 257 | cost['net2'] = 0.35 * cost['net2_v_cls'] * p_alpha + 0.2 * cost['net2_v_reg'] + 0.2 * cost['net2_jali'] \ 258 | + cost['net2_motion'] * 0.015 + cost['net2_1st_deriv'] * 0.1 + 0.01 * sum(reg_losses) / len(reg_losses) 259 | 260 | # optimizer 261 | update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) 262 | with tf.control_dependencies(update_ops): 263 | t_vars = tf.trainable_variables() 264 | net1_vars = [var for var in t_vars if 'net1_' in var.name] 265 | net2_vars = [var for var in t_vars if 'net2_' in var.name] 266 | 267 | net2_reg_vars = [var for var in t_vars if ('net2_' in var.name and 'reg' in var.name)] 268 | net1_pho_vars = [var for var in t_vars if ('net1_' in var.name and 'pho' in var.name)] 269 | 270 | net1_optim = tf.train.AdamOptimizer(learning_rate).minimize(cost['net1'], var_list=net1_vars) 271 | net2_optim = tf.train.AdamOptimizer(learning_rate).minimize(cost['net2'], var_list=net2_vars) 272 | all_optim = tf.train.AdamOptimizer(learning_rate=0.0005).minimize(cost['net2_v_reg'], var_list=net2_reg_vars) 273 | 274 | # Initializing the variables 275 | init = tf.global_variables_initializer() 276 | 277 | # cv error for avg 278 | with tf.name_scope('err_avg'): 279 | sum_val = dict() 280 | clear_op = dict() 281 | inc_op = dict() 282 | avg = dict() 283 | sum_val['batch'] = tf.Variable(0.) 284 | clear_op['batch'] = tf.assign(sum_val['batch'], 0.) 285 | inc_op['batch'] = tf.assign_add(sum_val['batch'], batch_size_placeholder) 286 | for key in ['net1_land', 'net1_pho', 'net1', 'net1_pho_err', 'net1_regularization', 'net2_v_cls', 'net2_v_reg', 'net2_jali', 'net2', 'net1_motion', 'net2_motion', 'net2_1st_deriv']: 287 | sum_val[key] = tf.Variable(0.) 288 | clear_op[key] = tf.assign(sum_val[key], 0.) 289 | inc_op[key] = tf.assign_add(sum_val[key], cost[key] * batch_size_placeholder) 290 | avg[key] = tf.divide(sum_val[key], sum_val['batch']) 291 | 292 | tensorboard_op = dict() 293 | for d_type in ['Train', 'Val']: 294 | with tf.name_scope(d_type + '_tensorboard'): 295 | for key in ['net1_land', 'net1_pho', 'net1', 'net1_pho_err', 'net1_regularization', 'net2_v_cls', 'net2_v_reg', 'net2_jali', 'net2', 'net1_motion', 'net2_motion', 'net2_1st_deriv']: 296 | tf.summary.scalar(d_type + '_' + key, avg[key], collections=[d_type]) 297 | tensorboard_op[d_type] = tf.summary.merge_all(d_type) 298 | 299 | # check_param_num(net1_vars) 300 | 301 | return init, net1_optim, net2_optim, all_optim, x, x_face_id, y_landmark, y_phoneme, y_lipS, y_maya_param, dropout, cost, \ 302 | tensorboard_op, pred, clear_op, inc_op, avg, batch_size_placeholder, phase 303 | 304 | # return init, optimizer, optimizer_landmark, optimizer_phoneme, x, y_landmark, y_phoneme, dropout, cost, error, \ 305 | # summary_op_train, summary_op_cv, confusion_matrix, pred 306 | -------------------------------------------------------------------------------- /src/train_visemenet.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | import glob 4 | 5 | from src.utl.load_param import * 6 | from src.model import model 7 | from src.utl.load_param import * 8 | from src.utl.utl import * 9 | import math 10 | import time 11 | 12 | 13 | def test(model_name, test_audio_name): 14 | 15 | csv_test_audio = csv_dir + test_audio_name + '/' 16 | 17 | init, net1_optim, net2_optim, all_optim, x, x_face_id, y_landmark, y_phoneme, y_lipS, y_maya_param, dropout, cost, \ 18 | tensorboard_op, pred, clear_op, inc_op, avg, batch_size_placeholder, phase = model() 19 | 20 | # start tf graph 21 | config = tf.ConfigProto() 22 | config.gpu_options.allow_growth = True 23 | sess = tf.Session(config=config) 24 | max_to_keep = 20 25 | saver = tf.train.Saver(max_to_keep=max_to_keep) 26 | 27 | 28 | try_mkdir(pred_dir) 29 | 30 | # Test sess, load ckpt 31 | OLD_CHECKPOINT_FILE = model_dir + model_name + '/' + model_name +'.ckpt' 32 | 33 | saver.restore(sess, OLD_CHECKPOINT_FILE) 34 | print("Model loaded: " + model_dir + model_name) 35 | 36 | total_epoch_num = 1 37 | print(csv_test_audio) 38 | 39 | data_dir = {'train': {}, 'test': {}} 40 | data_dir['test']['wav'] = open(csv_test_audio + "test/wav.csv", 'r') 41 | data_dir['test']['clip_len'] = open(csv_test_audio + "test/clip_len.csv", 'r') 42 | cv_file_len = simple_read_clip_len(data_dir['test']['clip_len']) 43 | print('Loading wav_raw.txt file in {:}'.format(csv_test_audio)) 44 | 45 | train_wav_raw = np.loadtxt(csv_test_audio + 'wav_raw.csv') 46 | test_wav_raw = train_wav_raw 47 | 48 | 49 | for epoch in range(0, total_epoch_num): 50 | # clear data file header 51 | 52 | # ============================== TRAIN SET CHUNK ITERATION ============================== # 53 | 54 | sess.run(clear_op) 55 | for key in ['train', 'test']: 56 | for lpw_key in data_dir[key].keys(): 57 | data_dir[key][lpw_key].seek(0) 58 | 59 | print("===================== TEST/CV CHUNK - {:} ======================".format(csv_test_audio)) 60 | eof = False 61 | chunk_num = 0 62 | chunk_size_sum = 0 63 | 64 | batch_size = test_wav_raw.shape[0] 65 | chunk_size = batch_size * batch_per_chunk_size 66 | 67 | while (not eof): 68 | cv_data, eof = read_chunk_data(data_dir, 'test', chunk_size) 69 | chunk_num += 1 70 | chunk_size_sum += len(cv_data['wav']) 71 | 72 | print('Load Chunk {:d}, size {:d}, total_size {:d} ({:2.2f})' 73 | .format(chunk_num, len(cv_data['wav']), chunk_size_sum, chunk_size_sum / cv_file_len)) 74 | 75 | full_idx_array = np.arange(len(cv_data['wav'])) 76 | # np.random.shuffle(full_idx_array) 77 | for next_idx in range(0, int(np.floor(len(cv_data['wav']) / batch_size))): 78 | batch_idx_array = full_idx_array[next_idx * batch_size: (next_idx + 1) * batch_size] 79 | batch_x, batch_x_face_id, batch_x_pose, batch_y_landmark, batch_y_phoneme, batch_y_lipS, batch_y_maya_param = \ 80 | read_next_batch_easy_from_raw(test_wav_raw, cv_data, 'face_close', batch_idx_array, batch_size, n_steps, n_input, n_landmark, 81 | n_phoneme, n_face_id) 82 | npClose = np.loadtxt(lpw_dir + 'saved_param/maya_close_face.txt') 83 | batch_x_face_id = np.tile(npClose, (batch_x_face_id.shape[0], 1)) 84 | 85 | 86 | test_pred, loss, _ = sess.run([pred, cost, inc_op], 87 | feed_dict={x: batch_x, 88 | x_face_id: batch_x_face_id, 89 | y_landmark: batch_y_landmark, 90 | y_phoneme: batch_y_phoneme, 91 | y_lipS: batch_y_lipS, 92 | dropout: 0, 93 | batch_size_placeholder: batch_x.shape[0], 94 | phase: 0, 95 | y_maya_param: batch_y_maya_param}) 96 | 97 | 98 | def save_output(filename, npTxt, fmt): 99 | f = open(filename, 'wb') 100 | np.savetxt(f, npTxt, fmt=fmt) 101 | f.close() 102 | 103 | try_mkdir(pred_dir + test_audio_name) 104 | 105 | def sigmoid(x): 106 | return 1/(1+np.exp(-x)) 107 | save_output(pred_dir + test_audio_name + "/mayaparam_pred_cls.txt", 108 | np.concatenate([test_pred['jali'], sigmoid(test_pred['v_cls'])], axis=1), '%.4f') 109 | save_output(pred_dir + test_audio_name + "/mayaparam_pred_reg.txt", 110 | np.concatenate([test_pred['jali'], test_pred['v_reg']], axis=1), '%.4f') 111 | -------------------------------------------------------------------------------- /src/utl/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzhou359/VisemeNet_tensorflow/67f34950a215dd8288c8b94ddf885933f37a155d/src/utl/__init__.py -------------------------------------------------------------------------------- /src/utl/load_param.py: -------------------------------------------------------------------------------- 1 | 2 | # Dataset Setting 3 | 4 | fps = 25 # video 5 | isPCA = False # False # landmark 6 | PCA_fracs = 20 7 | landmark_mouth_only = True 8 | mfcc_win_step_per_frame = 1 # wav 9 | mfcc_dim = 26 + 26 + 13 10 | up_sample_rate = 4 11 | is_up_sample = True 12 | hasPhoneme = True 13 | hasLandmark = True 14 | hasFaceID = True 15 | hasMaya = True 16 | 17 | kernel_type = 'lstm' 18 | 19 | # kernel param setting 20 | win_size = 24 # 8 21 | n_layers = 3 22 | n_steps = 8 23 | n_input = int(mfcc_dim * mfcc_win_step_per_frame * win_size / n_steps) 24 | n_hidden = 256 25 | 26 | n_phoneme = 21 27 | n_out_fc1 = 256 28 | end_or_mid = 'e' 29 | n_filter = 256 30 | 31 | n_hidden_net2_jali = 128 32 | n_hidden_net2_cls = 256 33 | n_hidden_net2_reg = 256 34 | n_cls_fc1 = 200 35 | n_reg_fc1 = 200 36 | n_jali_fc1 = 200 37 | 38 | n_landmark = 76 39 | n_face_id = 76 40 | n_maya_param = 22 41 | 42 | reg_lambda = 0.001 43 | 44 | dropout_value = 0.5 45 | learning_rate = 0.000001 46 | batch_size = 128 47 | batch_per_chunk_size = 1 # 48 | chunk_size = batch_size * batch_per_chunk_size 49 | total_epoch_num = 10000 50 | save_epoch_num = 10 51 | save_ckpt = True 52 | check_cv_epoch_num = 1 53 | frame_delay = 0 54 | z_dim = 100 55 | 56 | p_alpha = 0.3 57 | 58 | win_size_2 = 64 59 | 60 | # DIR setting 61 | root_dir = "data/" 62 | lpw_dir = root_dir + 'test_audio/' 63 | csv_dir = root_dir + 'csv/' 64 | model_dir = root_dir + 'ckpt/' 65 | logs_dir = root_dir + 'logs/' 66 | pred_dir = root_dir + 'output_viseme/' 67 | pic_dir = root_dir + 'pic/' 68 | mp4_dir = root_dir + 'mp4/' 69 | -------------------------------------------------------------------------------- /src/utl/utl.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import numpy as np 3 | 4 | def try_mkdir(dir, warning=True): 5 | try: 6 | os.makedirs(dir) 7 | except FileExistsError: 8 | if(warning): 9 | print("Warning: dir " + dir + " already exist! Continue program...") 10 | except: 11 | print("Cannot make dir: " + dir) 12 | print(sys.exc_info()[0]) 13 | exit(0) 14 | 15 | 16 | def simple_read_clip_len(filehead): 17 | lines = 0 18 | for line in filehead: 19 | # print(line) 20 | lines += int(line) 21 | # print(lines) 22 | return lines 23 | 24 | 25 | def read_chunk_as_float_from_file(file_head, chunk_size): 26 | chunk_float = [] 27 | eof = False 28 | for i in range(0, chunk_size): 29 | line = file_head.readline() 30 | number_str = line.split() 31 | number_float = [float(x) for x in number_str] 32 | if number_float == []: 33 | eof = True 34 | break 35 | chunk_float.append(number_float) 36 | return chunk_float, eof 37 | 38 | 39 | def read_chunk_data(data_dir, data_type, chunk_size): 40 | data = dict() 41 | eof = False 42 | for lpw_key in data_dir[data_type].keys(): 43 | data[lpw_key], e = read_chunk_as_float_from_file(data_dir[data_type][lpw_key], chunk_size) 44 | if(not eof and e and lpw_key=='wav'): 45 | eof = True 46 | 47 | return data, eof 48 | 49 | 50 | 51 | def read_next_batch_easy_from_raw(wav_raw, data, face_type, batch_idx_array, batch_size, n_steps, n_input, n_landmark, 52 | n_phoneme, n_face_id): 53 | batch_x = np.zeros((batch_size, n_steps, n_input)) 54 | batch_x_face_id = np.zeros((batch_size, n_face_id)) 55 | batch_x_pose = np.zeros((batch_size, 3)) 56 | batch_y_landmark = np.zeros((batch_size, n_landmark)) 57 | batch_y_phoneme = np.zeros((batch_size, n_phoneme)) 58 | batch_y_lipS = np.zeros((batch_size, 1)) 59 | batch_y_maya_param = np.zeros((batch_size, 22)) 60 | 61 | for i in range(0, batch_size): 62 | idx = batch_idx_array[i] 63 | wav_idx = [int(i) for i in data['wav'][idx]] 64 | batch_x[i] = wav_raw[wav_idx].reshape((1, n_steps, n_input)) 65 | 66 | return batch_x, batch_x_face_id, batch_x_pose, batch_y_landmark, batch_y_phoneme, batch_y_lipS, batch_y_maya_param 67 | 68 | 69 | --------------------------------------------------------------------------------