├── .gitignore ├── eval_data ├── labels_map.txt └── panda.jpg ├── g3doc ├── flops.png └── params.png ├── keras_efficientnet ├── __init__.py ├── efficientnet_builder.py ├── efficientnet_layers.py ├── efficientnet_model.py ├── extract_weights.py ├── keras_model.py ├── load_weights.py ├── models │ └── efficientnet_b0_weights_tf_dim_ordering_tf_kernels.h5 ├── preprocessing.py └── utils.py ├── main.py ├── readme.md └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/* 2 | keras_efficientnet.egg-info/* 3 | dist/* 4 | build/* 5 | .pyc -------------------------------------------------------------------------------- /eval_data/labels_map.txt: -------------------------------------------------------------------------------- 1 | {"0": "tench, Tinca tinca", "1": "goldfish, Carassius auratus", "2": "great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias", "3": "tiger shark, Galeocerdo cuvieri", "4": "hammerhead, hammerhead shark", "5": "electric ray, crampfish, numbfish, torpedo", "6": "stingray", "7": "cock", "8": "hen", "9": "ostrich, Struthio camelus", "10": "brambling, Fringilla montifringilla", "11": "goldfinch, Carduelis carduelis", "12": "house finch, linnet, Carpodacus mexicanus", "13": "junco, snowbird", "14": "indigo bunting, indigo finch, indigo bird, Passerina cyanea", "15": "robin, American robin, Turdus migratorius", "16": "bulbul", "17": "jay", "18": "magpie", "19": "chickadee", "20": "water ouzel, dipper", "21": "kite", "22": "bald eagle, American eagle, Haliaeetus leucocephalus", "23": "vulture", "24": "great grey owl, great gray owl, Strix nebulosa", "25": "European fire salamander, Salamandra salamandra", "26": "common newt, Triturus vulgaris", "27": "eft", "28": "spotted salamander, Ambystoma maculatum", "29": "axolotl, mud puppy, Ambystoma mexicanum", "30": "bullfrog, Rana catesbeiana", "31": "tree frog, tree-frog", "32": "tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui", "33": "loggerhead, loggerhead turtle, Caretta caretta", "34": "leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea", "35": "mud turtle", "36": "terrapin", "37": "box turtle, box tortoise", "38": "banded gecko", "39": "common iguana, iguana, Iguana iguana", "40": "American chameleon, anole, Anolis carolinensis", "41": "whiptail, whiptail lizard", "42": "agama", "43": "frilled lizard, Chlamydosaurus kingi", "44": "alligator lizard", "45": "Gila monster, Heloderma suspectum", "46": "green lizard, Lacerta viridis", "47": "African chameleon, Chamaeleo chamaeleon", "48": "Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis", "49": "African crocodile, Nile crocodile, Crocodylus niloticus", "50": "American alligator, Alligator mississipiensis", "51": "triceratops", "52": "thunder snake, worm snake, Carphophis amoenus", "53": "ringneck snake, ring-necked snake, ring snake", "54": "hognose snake, puff adder, sand viper", "55": "green snake, grass snake", "56": "king snake, kingsnake", "57": "garter snake, grass snake", "58": "water snake", "59": "vine snake", "60": "night snake, Hypsiglena torquata", "61": "boa constrictor, Constrictor constrictor", "62": "rock python, rock snake, Python sebae", "63": "Indian cobra, Naja naja", "64": "green mamba", "65": "sea snake", "66": "horned viper, cerastes, sand viper, horned asp, Cerastes cornutus", "67": "diamondback, diamondback rattlesnake, Crotalus adamanteus", "68": "sidewinder, horned rattlesnake, Crotalus cerastes", "69": "trilobite", "70": "harvestman, daddy longlegs, Phalangium opilio", "71": "scorpion", "72": "black and gold garden spider, Argiope aurantia", "73": "barn spider, Araneus cavaticus", "74": "garden spider, Aranea diademata", "75": "black widow, Latrodectus mactans", "76": "tarantula", "77": "wolf spider, hunting spider", "78": "tick", "79": "centipede", "80": "black grouse", "81": "ptarmigan", "82": "ruffed grouse, partridge, Bonasa umbellus", "83": "prairie chicken, prairie grouse, prairie fowl", "84": "peacock", "85": "quail", "86": "partridge", "87": "African grey, African gray, Psittacus erithacus", "88": "macaw", "89": "sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita", "90": "lorikeet", "91": "coucal", "92": "bee eater", "93": "hornbill", "94": "hummingbird", "95": "jacamar", "96": "toucan", "97": "drake", "98": "red-breasted merganser, Mergus serrator", "99": "goose", "100": "black swan, Cygnus atratus", "101": "tusker", "102": "echidna, spiny anteater, anteater", "103": "platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus", "104": "wallaby, brush kangaroo", "105": "koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus", "106": "wombat", "107": "jellyfish", "108": "sea anemone, anemone", "109": "brain coral", "110": "flatworm, platyhelminth", "111": "nematode, nematode worm, roundworm", "112": "conch", "113": "snail", "114": "slug", "115": "sea slug, nudibranch", "116": "chiton, coat-of-mail shell, sea cradle, polyplacophore", "117": "chambered nautilus, pearly nautilus, nautilus", "118": "Dungeness crab, Cancer magister", "119": "rock crab, Cancer irroratus", "120": "fiddler crab", "121": "king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica", "122": "American lobster, Northern lobster, Maine lobster, Homarus americanus", "123": "spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish", "124": "crayfish, crawfish, crawdad, crawdaddy", "125": "hermit crab", "126": "isopod", "127": "white stork, Ciconia ciconia", "128": "black stork, Ciconia nigra", "129": "spoonbill", "130": "flamingo", "131": "little blue heron, Egretta caerulea", "132": "American egret, great white heron, Egretta albus", "133": "bittern", "134": "crane", "135": "limpkin, Aramus pictus", "136": "European gallinule, Porphyrio porphyrio", "137": "American coot, marsh hen, mud hen, water hen, Fulica americana", "138": "bustard", "139": "ruddy turnstone, Arenaria interpres", "140": "red-backed sandpiper, dunlin, Erolia alpina", "141": "redshank, Tringa totanus", "142": "dowitcher", "143": "oystercatcher, oyster catcher", "144": "pelican", "145": "king penguin, Aptenodytes patagonica", "146": "albatross, mollymawk", "147": "grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus", "148": "killer whale, killer, orca, grampus, sea wolf, Orcinus orca", "149": "dugong, Dugong dugon", "150": "sea lion", "151": "Chihuahua", "152": "Japanese spaniel", "153": "Maltese dog, Maltese terrier, Maltese", "154": "Pekinese, Pekingese, Peke", "155": "Shih-Tzu", "156": "Blenheim spaniel", "157": "papillon", "158": "toy terrier", "159": "Rhodesian ridgeback", "160": "Afghan hound, Afghan", "161": "basset, basset hound", "162": "beagle", "163": "bloodhound, sleuthhound", "164": "bluetick", "165": "black-and-tan coonhound", "166": "Walker hound, Walker foxhound", "167": "English foxhound", "168": "redbone", "169": "borzoi, Russian wolfhound", "170": "Irish wolfhound", "171": "Italian greyhound", "172": "whippet", "173": "Ibizan hound, Ibizan Podenco", "174": "Norwegian elkhound, elkhound", "175": "otterhound, otter hound", "176": "Saluki, gazelle hound", "177": "Scottish deerhound, deerhound", "178": "Weimaraner", "179": "Staffordshire bullterrier, Staffordshire bull terrier", "180": "American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier", "181": "Bedlington terrier", "182": "Border terrier", "183": "Kerry blue terrier", "184": "Irish terrier", "185": "Norfolk terrier", "186": "Norwich terrier", "187": "Yorkshire terrier", "188": "wire-haired fox terrier", "189": "Lakeland terrier", "190": "Sealyham terrier, Sealyham", "191": "Airedale, Airedale terrier", "192": "cairn, cairn terrier", "193": "Australian terrier", "194": "Dandie Dinmont, Dandie Dinmont terrier", "195": "Boston bull, Boston terrier", "196": "miniature schnauzer", "197": "giant schnauzer", "198": "standard schnauzer", "199": "Scotch terrier, Scottish terrier, Scottie", "200": "Tibetan terrier, chrysanthemum dog", "201": "silky terrier, Sydney silky", "202": "soft-coated wheaten terrier", "203": "West Highland white terrier", "204": "Lhasa, Lhasa apso", "205": "flat-coated retriever", "206": "curly-coated retriever", "207": "golden retriever", "208": "Labrador retriever", "209": "Chesapeake Bay retriever", "210": "German short-haired pointer", "211": "vizsla, Hungarian pointer", "212": "English setter", "213": "Irish setter, red setter", "214": "Gordon setter", "215": "Brittany spaniel", "216": "clumber, clumber spaniel", "217": "English springer, English springer spaniel", "218": "Welsh springer spaniel", "219": "cocker spaniel, English cocker spaniel, cocker", "220": "Sussex spaniel", "221": "Irish water spaniel", "222": "kuvasz", "223": "schipperke", "224": "groenendael", "225": "malinois", "226": "briard", "227": "kelpie", "228": "komondor", "229": "Old English sheepdog, bobtail", "230": "Shetland sheepdog, Shetland sheep dog, Shetland", "231": "collie", "232": "Border collie", "233": "Bouvier des Flandres, Bouviers des Flandres", "234": "Rottweiler", "235": "German shepherd, German shepherd dog, German police dog, alsatian", "236": "Doberman, Doberman pinscher", "237": "miniature pinscher", "238": "Greater Swiss Mountain dog", "239": "Bernese mountain dog", "240": "Appenzeller", "241": "EntleBucher", "242": "boxer", "243": "bull mastiff", "244": "Tibetan mastiff", "245": "French bulldog", "246": "Great Dane", "247": "Saint Bernard, St Bernard", "248": "Eskimo dog, husky", "249": "malamute, malemute, Alaskan malamute", "250": "Siberian husky", "251": "dalmatian, coach dog, carriage dog", "252": "affenpinscher, monkey pinscher, monkey dog", "253": "basenji", "254": "pug, pug-dog", "255": "Leonberg", "256": "Newfoundland, Newfoundland dog", "257": "Great Pyrenees", "258": "Samoyed, Samoyede", "259": "Pomeranian", "260": "chow, chow chow", "261": "keeshond", "262": "Brabancon griffon", "263": "Pembroke, Pembroke Welsh corgi", "264": "Cardigan, Cardigan Welsh corgi", "265": "toy poodle", "266": "miniature poodle", "267": "standard poodle", "268": "Mexican hairless", "269": "timber wolf, grey wolf, gray wolf, Canis lupus", "270": "white wolf, Arctic wolf, Canis lupus tundrarum", "271": "red wolf, maned wolf, Canis rufus, Canis niger", "272": "coyote, prairie wolf, brush wolf, Canis latrans", "273": "dingo, warrigal, warragal, Canis dingo", "274": "dhole, Cuon alpinus", "275": "African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus", "276": "hyena, hyaena", "277": "red fox, Vulpes vulpes", "278": "kit fox, Vulpes macrotis", "279": "Arctic fox, white fox, Alopex lagopus", "280": "grey fox, gray fox, Urocyon cinereoargenteus", "281": "tabby, tabby cat", "282": "tiger cat", "283": "Persian cat", "284": "Siamese cat, Siamese", "285": "Egyptian cat", "286": "cougar, puma, catamount, mountain lion, painter, panther, Felis concolor", "287": "lynx, catamount", "288": "leopard, Panthera pardus", "289": "snow leopard, ounce, Panthera uncia", "290": "jaguar, panther, Panthera onca, Felis onca", "291": "lion, king of beasts, Panthera leo", "292": "tiger, Panthera tigris", "293": "cheetah, chetah, Acinonyx jubatus", "294": "brown bear, bruin, Ursus arctos", "295": "American black bear, black bear, Ursus americanus, Euarctos americanus", "296": "ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus", "297": "sloth bear, Melursus ursinus, Ursus ursinus", "298": "mongoose", "299": "meerkat, mierkat", "300": "tiger beetle", "301": "ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle", "302": "ground beetle, carabid beetle", "303": "long-horned beetle, longicorn, longicorn beetle", "304": "leaf beetle, chrysomelid", "305": "dung beetle", "306": "rhinoceros beetle", "307": "weevil", "308": "fly", "309": "bee", "310": "ant, emmet, pismire", "311": "grasshopper, hopper", "312": "cricket", "313": "walking stick, walkingstick, stick insect", "314": "cockroach, roach", "315": "mantis, mantid", "316": "cicada, cicala", "317": "leafhopper", "318": "lacewing, lacewing fly", "319": "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", "320": "damselfly", "321": "admiral", "322": "ringlet, ringlet butterfly", "323": "monarch, monarch butterfly, milkweed butterfly, Danaus plexippus", "324": "cabbage butterfly", "325": "sulphur butterfly, sulfur butterfly", "326": "lycaenid, lycaenid butterfly", "327": "starfish, sea star", "328": "sea urchin", "329": "sea cucumber, holothurian", "330": "wood rabbit, cottontail, cottontail rabbit", "331": "hare", "332": "Angora, Angora rabbit", "333": "hamster", "334": "porcupine, hedgehog", "335": "fox squirrel, eastern fox squirrel, Sciurus niger", "336": "marmot", "337": "beaver", "338": "guinea pig, Cavia cobaya", "339": "sorrel", "340": "zebra", "341": "hog, pig, grunter, squealer, Sus scrofa", "342": "wild boar, boar, Sus scrofa", "343": "warthog", "344": "hippopotamus, hippo, river horse, Hippopotamus amphibius", "345": "ox", "346": "water buffalo, water ox, Asiatic buffalo, Bubalus bubalis", "347": "bison", "348": "ram, tup", "349": "bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis", "350": "ibex, Capra ibex", "351": "hartebeest", "352": "impala, Aepyceros melampus", "353": "gazelle", "354": "Arabian camel, dromedary, Camelus dromedarius", "355": "llama", "356": "weasel", "357": "mink", "358": "polecat, fitch, foulmart, foumart, Mustela putorius", "359": "black-footed ferret, ferret, Mustela nigripes", "360": "otter", "361": "skunk, polecat, wood pussy", "362": "badger", "363": "armadillo", "364": "three-toed sloth, ai, Bradypus tridactylus", "365": "orangutan, orang, orangutang, Pongo pygmaeus", "366": "gorilla, Gorilla gorilla", "367": "chimpanzee, chimp, Pan troglodytes", "368": "gibbon, Hylobates lar", "369": "siamang, Hylobates syndactylus, Symphalangus syndactylus", "370": "guenon, guenon monkey", "371": "patas, hussar monkey, Erythrocebus patas", "372": "baboon", "373": "macaque", "374": "langur", "375": "colobus, colobus monkey", "376": "proboscis monkey, Nasalis larvatus", "377": "marmoset", "378": "capuchin, ringtail, Cebus capucinus", "379": "howler monkey, howler", "380": "titi, titi monkey", "381": "spider monkey, Ateles geoffroyi", "382": "squirrel monkey, Saimiri sciureus", "383": "Madagascar cat, ring-tailed lemur, Lemur catta", "384": "indri, indris, Indri indri, Indri brevicaudatus", "385": "Indian elephant, Elephas maximus", "386": "African elephant, Loxodonta africana", "387": "lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens", "388": "giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca", "389": "barracouta, snoek", "390": "eel", "391": "coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch", "392": "rock beauty, Holocanthus tricolor", "393": "anemone fish", "394": "sturgeon", "395": "gar, garfish, garpike, billfish, Lepisosteus osseus", "396": "lionfish", "397": "puffer, pufferfish, blowfish, globefish", "398": "abacus", "399": "abaya", "400": "academic gown, academic robe, judge's robe", "401": "accordion, piano accordion, squeeze box", "402": "acoustic guitar", "403": "aircraft carrier, carrier, flattop, attack aircraft carrier", "404": "airliner", "405": "airship, dirigible", "406": "altar", "407": "ambulance", "408": "amphibian, amphibious vehicle", "409": "analog clock", "410": "apiary, bee house", "411": "apron", "412": "ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin", "413": "assault rifle, assault gun", "414": "backpack, back pack, knapsack, packsack, rucksack, haversack", "415": "bakery, bakeshop, bakehouse", "416": "balance beam, beam", "417": "balloon", "418": "ballpoint, ballpoint pen, ballpen, Biro", "419": "Band Aid", "420": "banjo", "421": "bannister, banister, balustrade, balusters, handrail", "422": "barbell", "423": "barber chair", "424": "barbershop", "425": "barn", "426": "barometer", "427": "barrel, cask", "428": "barrow, garden cart, lawn cart, wheelbarrow", "429": "baseball", "430": "basketball", "431": "bassinet", "432": "bassoon", "433": "bathing cap, swimming cap", "434": "bath towel", "435": "bathtub, bathing tub, bath, tub", "436": "beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon", "437": "beacon, lighthouse, beacon light, pharos", "438": "beaker", "439": "bearskin, busby, shako", "440": "beer bottle", "441": "beer glass", "442": "bell cote, bell cot", "443": "bib", "444": "bicycle-built-for-two, tandem bicycle, tandem", "445": "bikini, two-piece", "446": "binder, ring-binder", "447": "binoculars, field glasses, opera glasses", "448": "birdhouse", "449": "boathouse", "450": "bobsled, bobsleigh, bob", "451": "bolo tie, bolo, bola tie, bola", "452": "bonnet, poke bonnet", "453": "bookcase", "454": "bookshop, bookstore, bookstall", "455": "bottlecap", "456": "bow", "457": "bow tie, bow-tie, bowtie", "458": "brass, memorial tablet, plaque", "459": "brassiere, bra, bandeau", "460": "breakwater, groin, groyne, mole, bulwark, seawall, jetty", "461": "breastplate, aegis, egis", "462": "broom", "463": "bucket, pail", "464": "buckle", "465": "bulletproof vest", "466": "bullet train, bullet", "467": "butcher shop, meat market", "468": "cab, hack, taxi, taxicab", "469": "caldron, cauldron", "470": "candle, taper, wax light", "471": "cannon", "472": "canoe", "473": "can opener, tin opener", "474": "cardigan", "475": "car mirror", "476": "carousel, carrousel, merry-go-round, roundabout, whirligig", "477": "carpenter's kit, tool kit", "478": "carton", "479": "car wheel", "480": "cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM", "481": "cassette", "482": "cassette player", "483": "castle", "484": "catamaran", "485": "CD player", "486": "cello, violoncello", "487": "cellular telephone, cellular phone, cellphone, cell, mobile phone", "488": "chain", "489": "chainlink fence", "490": "chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour", "491": "chain saw, chainsaw", "492": "chest", "493": "chiffonier, commode", "494": "chime, bell, gong", "495": "china cabinet, china closet", "496": "Christmas stocking", "497": "church, church building", "498": "cinema, movie theater, movie theatre, movie house, picture palace", "499": "cleaver, meat cleaver, chopper", "500": "cliff dwelling", "501": "cloak", "502": "clog, geta, patten, sabot", "503": "cocktail shaker", "504": "coffee mug", "505": "coffeepot", "506": "coil, spiral, volute, whorl, helix", "507": "combination lock", "508": "computer keyboard, keypad", "509": "confectionery, confectionary, candy store", "510": "container ship, containership, container vessel", "511": "convertible", "512": "corkscrew, bottle screw", "513": "cornet, horn, trumpet, trump", "514": "cowboy boot", "515": "cowboy hat, ten-gallon hat", "516": "cradle", "517": "crane", "518": "crash helmet", "519": "crate", "520": "crib, cot", "521": "Crock Pot", "522": "croquet ball", "523": "crutch", "524": "cuirass", "525": "dam, dike, dyke", "526": "desk", "527": "desktop computer", "528": "dial telephone, dial phone", "529": "diaper, nappy, napkin", "530": "digital clock", "531": "digital watch", "532": "dining table, board", "533": "dishrag, dishcloth", "534": "dishwasher, dish washer, dishwashing machine", "535": "disk brake, disc brake", "536": "dock, dockage, docking facility", "537": "dogsled, dog sled, dog sleigh", "538": "dome", "539": "doormat, welcome mat", "540": "drilling platform, offshore rig", "541": "drum, membranophone, tympan", "542": "drumstick", "543": "dumbbell", "544": "Dutch oven", "545": "electric fan, blower", "546": "electric guitar", "547": "electric locomotive", "548": "entertainment center", "549": "envelope", "550": "espresso maker", "551": "face powder", "552": "feather boa, boa", "553": "file, file cabinet, filing cabinet", "554": "fireboat", "555": "fire engine, fire truck", "556": "fire screen, fireguard", "557": "flagpole, flagstaff", "558": "flute, transverse flute", "559": "folding chair", "560": "football helmet", "561": "forklift", "562": "fountain", "563": "fountain pen", "564": "four-poster", "565": "freight car", "566": "French horn, horn", "567": "frying pan, frypan, skillet", "568": "fur coat", "569": "garbage truck, dustcart", "570": "gasmask, respirator, gas helmet", "571": "gas pump, gasoline pump, petrol pump, island dispenser", "572": "goblet", "573": "go-kart", "574": "golf ball", "575": "golfcart, golf cart", "576": "gondola", "577": "gong, tam-tam", "578": "gown", "579": "grand piano, grand", "580": "greenhouse, nursery, glasshouse", "581": "grille, radiator grille", "582": "grocery store, grocery, food market, market", "583": "guillotine", "584": "hair slide", "585": "hair spray", "586": "half track", "587": "hammer", "588": "hamper", "589": "hand blower, blow dryer, blow drier, hair dryer, hair drier", "590": "hand-held computer, hand-held microcomputer", "591": "handkerchief, hankie, hanky, hankey", "592": "hard disc, hard disk, fixed disk", "593": "harmonica, mouth organ, harp, mouth harp", "594": "harp", "595": "harvester, reaper", "596": "hatchet", "597": "holster", "598": "home theater, home theatre", "599": "honeycomb", "600": "hook, claw", "601": "hoopskirt, crinoline", "602": "horizontal bar, high bar", "603": "horse cart, horse-cart", "604": "hourglass", "605": "iPod", "606": "iron, smoothing iron", "607": "jack-o'-lantern", "608": "jean, blue jean, denim", "609": "jeep, landrover", "610": "jersey, T-shirt, tee shirt", "611": "jigsaw puzzle", "612": "jinrikisha, ricksha, rickshaw", "613": "joystick", "614": "kimono", "615": "knee pad", "616": "knot", "617": "lab coat, laboratory coat", "618": "ladle", "619": "lampshade, lamp shade", "620": "laptop, laptop computer", "621": "lawn mower, mower", "622": "lens cap, lens cover", "623": "letter opener, paper knife, paperknife", "624": "library", "625": "lifeboat", "626": "lighter, light, igniter, ignitor", "627": "limousine, limo", "628": "liner, ocean liner", "629": "lipstick, lip rouge", "630": "Loafer", "631": "lotion", "632": "loudspeaker, speaker, speaker unit, loudspeaker system, speaker system", "633": "loupe, jeweler's loupe", "634": "lumbermill, sawmill", "635": "magnetic compass", "636": "mailbag, postbag", "637": "mailbox, letter box", "638": "maillot", "639": "maillot, tank suit", "640": "manhole cover", "641": "maraca", "642": "marimba, xylophone", "643": "mask", "644": "matchstick", "645": "maypole", "646": "maze, labyrinth", "647": "measuring cup", "648": "medicine chest, medicine cabinet", "649": "megalith, megalithic structure", "650": "microphone, mike", "651": "microwave, microwave oven", "652": "military uniform", "653": "milk can", "654": "minibus", "655": "miniskirt, mini", "656": "minivan", "657": "missile", "658": "mitten", "659": "mixing bowl", "660": "mobile home, manufactured home", "661": "Model T", "662": "modem", "663": "monastery", "664": "monitor", "665": "moped", "666": "mortar", "667": "mortarboard", "668": "mosque", "669": "mosquito net", "670": "motor scooter, scooter", "671": "mountain bike, all-terrain bike, off-roader", "672": "mountain tent", "673": "mouse, computer mouse", "674": "mousetrap", "675": "moving van", "676": "muzzle", "677": "nail", "678": "neck brace", "679": "necklace", "680": "nipple", "681": "notebook, notebook computer", "682": "obelisk", "683": "oboe, hautboy, hautbois", "684": "ocarina, sweet potato", "685": "odometer, hodometer, mileometer, milometer", "686": "oil filter", "687": "organ, pipe organ", "688": "oscilloscope, scope, cathode-ray oscilloscope, CRO", "689": "overskirt", "690": "oxcart", "691": "oxygen mask", "692": "packet", "693": "paddle, boat paddle", "694": "paddlewheel, paddle wheel", "695": "padlock", "696": "paintbrush", "697": "pajama, pyjama, pj's, jammies", "698": "palace", "699": "panpipe, pandean pipe, syrinx", "700": "paper towel", "701": "parachute, chute", "702": "parallel bars, bars", "703": "park bench", "704": "parking meter", "705": "passenger car, coach, carriage", "706": "patio, terrace", "707": "pay-phone, pay-station", "708": "pedestal, plinth, footstall", "709": "pencil box, pencil case", "710": "pencil sharpener", "711": "perfume, essence", "712": "Petri dish", "713": "photocopier", "714": "pick, plectrum, plectron", "715": "pickelhaube", "716": "picket fence, paling", "717": "pickup, pickup truck", "718": "pier", "719": "piggy bank, penny bank", "720": "pill bottle", "721": "pillow", "722": "ping-pong ball", "723": "pinwheel", "724": "pirate, pirate ship", "725": "pitcher, ewer", "726": "plane, carpenter's plane, woodworking plane", "727": "planetarium", "728": "plastic bag", "729": "plate rack", "730": "plow, plough", "731": "plunger, plumber's helper", "732": "Polaroid camera, Polaroid Land camera", "733": "pole", "734": "police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria", "735": "poncho", "736": "pool table, billiard table, snooker table", "737": "pop bottle, soda bottle", "738": "pot, flowerpot", "739": "potter's wheel", "740": "power drill", "741": "prayer rug, prayer mat", "742": "printer", "743": "prison, prison house", "744": "projectile, missile", "745": "projector", "746": "puck, hockey puck", "747": "punching bag, punch bag, punching ball, punchball", "748": "purse", "749": "quill, quill pen", "750": "quilt, comforter, comfort, puff", "751": "racer, race car, racing car", "752": "racket, racquet", "753": "radiator", "754": "radio, wireless", "755": "radio telescope, radio reflector", "756": "rain barrel", "757": "recreational vehicle, RV, R.V.", "758": "reel", "759": "reflex camera", "760": "refrigerator, icebox", "761": "remote control, remote", "762": "restaurant, eating house, eating place, eatery", "763": "revolver, six-gun, six-shooter", "764": "rifle", "765": "rocking chair, rocker", "766": "rotisserie", "767": "rubber eraser, rubber, pencil eraser", "768": "rugby ball", "769": "rule, ruler", "770": "running shoe", "771": "safe", "772": "safety pin", "773": "saltshaker, salt shaker", "774": "sandal", "775": "sarong", "776": "sax, saxophone", "777": "scabbard", "778": "scale, weighing machine", "779": "school bus", "780": "schooner", "781": "scoreboard", "782": "screen, CRT screen", "783": "screw", "784": "screwdriver", "785": "seat belt, seatbelt", "786": "sewing machine", "787": "shield, buckler", "788": "shoe shop, shoe-shop, shoe store", "789": "shoji", "790": "shopping basket", "791": "shopping cart", "792": "shovel", "793": "shower cap", "794": "shower curtain", "795": "ski", "796": "ski mask", "797": "sleeping bag", "798": "slide rule, slipstick", "799": "sliding door", "800": "slot, one-armed bandit", "801": "snorkel", "802": "snowmobile", "803": "snowplow, snowplough", "804": "soap dispenser", "805": "soccer ball", "806": "sock", "807": "solar dish, solar collector, solar furnace", "808": "sombrero", "809": "soup bowl", "810": "space bar", "811": "space heater", "812": "space shuttle", "813": "spatula", "814": "speedboat", "815": "spider web, spider's web", "816": "spindle", "817": "sports car, sport car", "818": "spotlight, spot", "819": "stage", "820": "steam locomotive", "821": "steel arch bridge", "822": "steel drum", "823": "stethoscope", "824": "stole", "825": "stone wall", "826": "stopwatch, stop watch", "827": "stove", "828": "strainer", "829": "streetcar, tram, tramcar, trolley, trolley car", "830": "stretcher", "831": "studio couch, day bed", "832": "stupa, tope", "833": "submarine, pigboat, sub, U-boat", "834": "suit, suit of clothes", "835": "sundial", "836": "sunglass", "837": "sunglasses, dark glasses, shades", "838": "sunscreen, sunblock, sun blocker", "839": "suspension bridge", "840": "swab, swob, mop", "841": "sweatshirt", "842": "swimming trunks, bathing trunks", "843": "swing", "844": "switch, electric switch, electrical switch", "845": "syringe", "846": "table lamp", "847": "tank, army tank, armored combat vehicle, armoured combat vehicle", "848": "tape player", "849": "teapot", "850": "teddy, teddy bear", "851": "television, television system", "852": "tennis ball", "853": "thatch, thatched roof", "854": "theater curtain, theatre curtain", "855": "thimble", "856": "thresher, thrasher, threshing machine", "857": "throne", "858": "tile roof", "859": "toaster", "860": "tobacco shop, tobacconist shop, tobacconist", "861": "toilet seat", "862": "torch", "863": "totem pole", "864": "tow truck, tow car, wrecker", "865": "toyshop", "866": "tractor", "867": "trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi", "868": "tray", "869": "trench coat", "870": "tricycle, trike, velocipede", "871": "trimaran", "872": "tripod", "873": "triumphal arch", "874": "trolleybus, trolley coach, trackless trolley", "875": "trombone", "876": "tub, vat", "877": "turnstile", "878": "typewriter keyboard", "879": "umbrella", "880": "unicycle, monocycle", "881": "upright, upright piano", "882": "vacuum, vacuum cleaner", "883": "vase", "884": "vault", "885": "velvet", "886": "vending machine", "887": "vestment", "888": "viaduct", "889": "violin, fiddle", "890": "volleyball", "891": "waffle iron", "892": "wall clock", "893": "wallet, billfold, notecase, pocketbook", "894": "wardrobe, closet, press", "895": "warplane, military plane", "896": "washbasin, handbasin, washbowl, lavabo, wash-hand basin", "897": "washer, automatic washer, washing machine", "898": "water bottle", "899": "water jug", "900": "water tower", "901": "whiskey jug", "902": "whistle", "903": "wig", "904": "window screen", "905": "window shade", "906": "Windsor tie", "907": "wine bottle", "908": "wing", "909": "wok", "910": "wooden spoon", "911": "wool, woolen, woollen", "912": "worm fence, snake fence, snake-rail fence, Virginia fence", "913": "wreck", "914": "yawl", "915": "yurt", "916": "web site, website, internet site, site", "917": "comic book", "918": "crossword puzzle, crossword", "919": "street sign", "920": "traffic light, traffic signal, stoplight", "921": "book jacket, dust cover, dust jacket, dust wrapper", "922": "menu", "923": "plate", "924": "guacamole", "925": "consomme", "926": "hot pot, hotpot", "927": "trifle", "928": "ice cream, icecream", "929": "ice lolly, lolly, lollipop, popsicle", "930": "French loaf", "931": "bagel, beigel", "932": "pretzel", "933": "cheeseburger", "934": "hotdog, hot dog, red hot", "935": "mashed potato", "936": "head cabbage", "937": "broccoli", "938": "cauliflower", "939": "zucchini, courgette", "940": "spaghetti squash", "941": "acorn squash", "942": "butternut squash", "943": "cucumber, cuke", "944": "artichoke, globe artichoke", "945": "bell pepper", "946": "cardoon", "947": "mushroom", "948": "Granny Smith", "949": "strawberry", "950": "orange", "951": "lemon", "952": "fig", "953": "pineapple, ananas", "954": "banana", "955": "jackfruit, jak, jack", "956": "custard apple", "957": "pomegranate", "958": "hay", "959": "carbonara", "960": "chocolate sauce, chocolate syrup", "961": "dough", "962": "meat loaf, meatloaf", "963": "pizza, pizza pie", "964": "potpie", "965": "burrito", "966": "red wine", "967": "espresso", "968": "cup", "969": "eggnog", "970": "alp", "971": "bubble", "972": "cliff, drop, drop-off", "973": "coral reef", "974": "geyser", "975": "lakeside, lakeshore", "976": "promontory, headland, head, foreland", "977": "sandbar, sand bar", "978": "seashore, coast, seacoast, sea-coast", "979": "valley, vale", "980": "volcano", "981": "ballplayer, baseball player", "982": "groom, bridegroom", "983": "scuba diver", "984": "rapeseed", "985": "daisy", "986": "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum", "987": "corn", "988": "acorn", "989": "hip, rose hip, rosehip", "990": "buckeye, horse chestnut, conker", "991": "coral fungus", "992": "agaric", "993": "gyromitra", "994": "stinkhorn, carrion fungus", "995": "earthstar", "996": "hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa", "997": "bolete", "998": "ear, spike, capitulum", "999": "toilet tissue, toilet paper, bathroom tissue"} -------------------------------------------------------------------------------- /eval_data/panda.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DableUTeeF/keras-efficientnet/f67a5c365e2e96a4029fd5f27de67a027e8dc7ac/eval_data/panda.jpg -------------------------------------------------------------------------------- /g3doc/flops.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DableUTeeF/keras-efficientnet/f67a5c365e2e96a4029fd5f27de67a027e8dc7ac/g3doc/flops.png -------------------------------------------------------------------------------- /g3doc/params.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DableUTeeF/keras-efficientnet/f67a5c365e2e96a4029fd5f27de67a027e8dc7ac/g3doc/params.png -------------------------------------------------------------------------------- /keras_efficientnet/__init__.py: -------------------------------------------------------------------------------- 1 | from .efficientnet_model import * 2 | from .efficientnet_builder import * 3 | from .efficientnet_layers import * 4 | from .keras_model import * 5 | from .load_weights import * 6 | from .extract_weights import * 7 | from .utils import * 8 | from .preprocessing import * 9 | -------------------------------------------------------------------------------- /keras_efficientnet/efficientnet_builder.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """Model Builder for EfficientNet.""" 16 | 17 | from __future__ import absolute_import 18 | from __future__ import division 19 | from __future__ import print_function 20 | 21 | import os 22 | import re 23 | import tensorflow as tf 24 | 25 | from . import efficientnet_model 26 | 27 | 28 | def efficientnet_params(model_name): 29 | """Get efficientnet params based on model name.""" 30 | params_dict = { 31 | # (width_coefficient, depth_coefficient, resolution, dropout_rate) 32 | 'efficientnet-b0': (1.0, 1.0, 224, 0.2), 33 | 'efficientnet-b1': (1.0, 1.1, 240, 0.2), 34 | 'efficientnet-b2': (1.1, 1.2, 260, 0.3), 35 | 'efficientnet-b3': (1.2, 1.4, 300, 0.3), 36 | 'efficientnet-b4': (1.4, 1.8, 380, 0.4), 37 | 'efficientnet-b5': (1.6, 2.2, 456, 0.4), 38 | 'efficientnet-b6': (1.8, 2.6, 528, 0.5), 39 | 'efficientnet-b7': (2.0, 3.1, 600, 0.5), 40 | } 41 | return params_dict[model_name] 42 | 43 | 44 | class BlockDecoder(object): 45 | """Block Decoder for readability.""" 46 | 47 | def _decode_block_string(self, block_string): 48 | """Gets a block through a string notation of arguments.""" 49 | assert isinstance(block_string, str) 50 | ops = block_string.split('_') 51 | options = {} 52 | for op in ops: 53 | splits = re.split(r'(\d.*)', op) 54 | if len(splits) >= 2: 55 | key, value = splits[:2] 56 | options[key] = value 57 | 58 | if 's' not in options or len(options['s']) != 2: 59 | raise ValueError('Strides options should be a pair of integers.') 60 | 61 | return efficientnet_model.BlockArgs( 62 | kernel_size=int(options['k']), 63 | num_repeat=int(options['r']), 64 | input_filters=int(options['i']), 65 | output_filters=int(options['o']), 66 | expand_ratio=int(options['e']), 67 | id_skip=('noskip' not in block_string), 68 | se_ratio=float(options['se']) if 'se' in options else None, 69 | strides=[int(options['s'][0]), int(options['s'][1])]) 70 | 71 | def _encode_block_string(self, block): 72 | """Encodes a block to a string.""" 73 | args = [ 74 | 'r%d' % block.num_repeat, 75 | 'k%d' % block.kernel_size, 76 | 's%d%d' % (block.strides[0], block.strides[1]), 77 | 'e%s' % block.expand_ratio, 78 | 'i%d' % block.input_filters, 79 | 'o%d' % block.output_filters 80 | ] 81 | if block.se_ratio > 0 and block.se_ratio <= 1: 82 | args.append('se%s' % block.se_ratio) 83 | if block.id_skip is False: 84 | args.append('noskip') 85 | return '_'.join(args) 86 | 87 | def decode(self, string_list): 88 | """Decodes a list of string notations to specify blocks inside the network. 89 | 90 | Args: 91 | string_list: a list of strings, each string is a notation of block. 92 | 93 | Returns: 94 | A list of namedtuples to represent blocks arguments. 95 | """ 96 | assert isinstance(string_list, list) 97 | blocks_args = [] 98 | for block_string in string_list: 99 | blocks_args.append(self._decode_block_string(block_string)) 100 | return blocks_args 101 | 102 | def encode(self, blocks_args): 103 | """Encodes a list of Blocks to a list of strings. 104 | 105 | Args: 106 | blocks_args: A list of namedtuples to represent blocks arguments. 107 | Returns: 108 | a list of strings, each string is a notation of block. 109 | """ 110 | block_strings = [] 111 | for block in blocks_args: 112 | block_strings.append(self._encode_block_string(block)) 113 | return block_strings 114 | 115 | 116 | def efficientnet(width_coefficient=None, 117 | depth_coefficient=None, 118 | dropout_rate=0.2, 119 | drop_connect_rate=0.2): 120 | """Creates a efficientnet model.""" 121 | blocks_args = [ 122 | 'r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25', 123 | 'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25', 124 | 'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25', 125 | 'r1_k3_s11_e6_i192_o320_se0.25', 126 | ] 127 | global_params = efficientnet_model.GlobalParams( 128 | batch_norm_momentum=0.99, 129 | batch_norm_epsilon=1e-3, 130 | dropout_rate=dropout_rate, 131 | drop_connect_rate=drop_connect_rate, 132 | data_format='channels_last', 133 | num_classes=1000, 134 | width_coefficient=width_coefficient, 135 | depth_coefficient=depth_coefficient, 136 | depth_divisor=8, 137 | min_depth=None) 138 | decoder = BlockDecoder() 139 | return decoder.decode(blocks_args), global_params 140 | 141 | 142 | def get_model_params(model_name, override_params): 143 | """Get the block args and global params for a given model.""" 144 | if model_name.startswith('efficientnet'): 145 | width_coefficient, depth_coefficient, _, dropout_rate = ( 146 | efficientnet_params(model_name)) 147 | blocks_args, global_params = efficientnet( 148 | width_coefficient, depth_coefficient, dropout_rate) 149 | else: 150 | raise NotImplementedError('model name is not pre-defined: %s' % model_name) 151 | 152 | if override_params: 153 | # ValueError will be raised here if override_params has fields not included 154 | # in global_params. 155 | global_params = global_params._replace(**override_params) 156 | 157 | tf.logging.info('global_params= %s', global_params) 158 | tf.logging.info('blocks_args= %s', blocks_args) 159 | return blocks_args, global_params 160 | 161 | 162 | def build_model(images, 163 | model_name, 164 | training, 165 | override_params=None, 166 | model_dir=None): 167 | """A helper functiion to creates a model and returns predicted logits. 168 | 169 | Args: 170 | images: input images tensor. 171 | model_name: string, the predefined model name. 172 | training: boolean, whether the model is constructed for training. 173 | override_params: A dictionary of params for overriding. Fields must exist in 174 | efficientnet_model.GlobalParams. 175 | model_dir: string, optional model dir for saving configs. 176 | 177 | Returns: 178 | logits: the logits tensor of classes. 179 | endpoints: the endpoints for each layer. 180 | 181 | Raises: 182 | When model_name specified an undefined model, raises NotImplementedError. 183 | When override_params has invalid fields, raises ValueError. 184 | """ 185 | assert isinstance(images, tf.Tensor) 186 | blocks_args, global_params = get_model_params(model_name, override_params) 187 | 188 | if model_dir: 189 | param_file = os.path.join(model_dir, 'model_params.txt') 190 | if not tf.gfile.Exists(param_file): 191 | with tf.gfile.GFile(param_file, 'w') as f: 192 | tf.logging.info('writing to %s' % param_file) 193 | f.write('model_name= %s\n\n' % model_name) 194 | f.write('global_params= %s\n\n' % str(global_params)) 195 | f.write('blocks_args= %s\n\n' % str(blocks_args)) 196 | 197 | with tf.variable_scope(model_name): 198 | model = efficientnet_model.Model(blocks_args, global_params) 199 | logits = model(images, training=training) 200 | 201 | logits = tf.identity(logits, 'logits') 202 | return logits, model.endpoints 203 | 204 | 205 | def build_model_base(images, model_name, training, override_params=None): 206 | """A helper functiion to create a base model and return global_pool. 207 | 208 | Args: 209 | images: input images tensor. 210 | model_name: string, the model name of a pre-defined MnasNet. 211 | training: boolean, whether the model is constructed for training. 212 | override_params: A dictionary of params for overriding. Fields must exist in 213 | mnasnet_model.GlobalParams. 214 | 215 | Returns: 216 | features: global pool features. 217 | endpoints: the endpoints for each layer. 218 | 219 | Raises: 220 | When model_name specified an undefined model, raises NotImplementedError. 221 | When override_params has invalid fields, raises ValueError. 222 | """ 223 | assert isinstance(images, tf.Tensor) 224 | blocks_args, global_params = get_model_params(model_name, override_params) 225 | 226 | with tf.variable_scope(model_name): 227 | model = efficientnet_model.Model(blocks_args, global_params) 228 | features = model(images, training=training, features_only=True) 229 | 230 | features = tf.identity(features, 'global_pool') 231 | return features, model.endpoints 232 | -------------------------------------------------------------------------------- /keras_efficientnet/efficientnet_layers.py: -------------------------------------------------------------------------------- 1 | from keras import layers 2 | # from . import efficientnet_model as em 3 | import tensorflow as tf 4 | from . import utils 5 | relu_fn = tf.nn.swish 6 | 7 | 8 | class ReduceMean(layers.Layer): 9 | def call(self, ip): 10 | spatial_dims = [1, 2] 11 | x = ip 12 | return tf.keras.backend.mean(x, spatial_dims, keepdims=True) 13 | 14 | def compute_output_shape(self, input_shape): 15 | return input_shape 16 | 17 | 18 | class SigmoidMul(layers.Layer): 19 | def call(self, ip): 20 | x, se_expand = ip 21 | return tf.sigmoid(se_expand) * x 22 | 23 | def compute_output_shape(self, input_shape): 24 | return input_shape 25 | 26 | 27 | def mbConvBlock(inputs, block_args, global_params, idx, training=True, drop_connect_rate=None): 28 | filters = block_args.input_filters * block_args.expand_ratio 29 | batch_norm_momentum = global_params.batch_norm_momentum 30 | batch_norm_epsilon = global_params.batch_norm_epsilon 31 | has_se = (block_args.se_ratio is not None) and (block_args.se_ratio > 0) and (block_args.se_ratio <= 1) 32 | x = inputs 33 | # block_name = 'efficientnet-b0_' + 'blocks_' + str(idx) + '_' 34 | block_name = 'blocks_' + str(idx) + '_' 35 | project_conv_name = block_name + 'conv2d' 36 | project_bn_name = block_name + 'tpu_batch_normalization_1' 37 | ndbn_name = block_name + 'tpu_batch_normalization' 38 | if block_args.expand_ratio != 1: 39 | # Expansion phase: 40 | expand_conv = layers.Conv2D(filters, 41 | kernel_size=[1, 1], 42 | strides=[1, 1], 43 | # kernel_initializer=em.conv_kernel_initializer, 44 | padding='same', 45 | use_bias=False, 46 | name=project_conv_name 47 | )(x) 48 | bn0 = layers.BatchNormalization(momentum=batch_norm_momentum, 49 | epsilon=batch_norm_epsilon, 50 | name=ndbn_name)(expand_conv) 51 | project_conv_name = block_name + 'conv2d_1' 52 | ndbn_name = block_name + 'tpu_batch_normalization_1' 53 | project_bn_name = block_name + 'tpu_batch_normalization_2' 54 | 55 | x = layers.Lambda(lambda x: relu_fn(x))(bn0) 56 | 57 | kernel_size = block_args.kernel_size 58 | # Depth-wise convolution phase: 59 | 60 | depthwise_conv = layers.DepthwiseConv2D( 61 | [kernel_size, kernel_size], 62 | strides=block_args.strides, 63 | # depthwise_initializer=em.conv_kernel_initializer, 64 | padding='same', 65 | use_bias=False, 66 | name=block_name + 'depthwise_conv2d' 67 | )(x) 68 | bn1 = layers.BatchNormalization(momentum=batch_norm_momentum, 69 | epsilon=batch_norm_epsilon, 70 | name=ndbn_name 71 | )(depthwise_conv) 72 | x = layers.Lambda(lambda x: relu_fn(x))(bn1) 73 | 74 | if has_se: 75 | num_reduced_filters = max( 76 | 1, int(block_args.input_filters * block_args.se_ratio)) 77 | # Squeeze and Excitation layer. 78 | se_tensor = ReduceMean()(x) 79 | 80 | se_reduce = layers.Conv2D( 81 | num_reduced_filters, 82 | kernel_size=[1, 1], 83 | strides=[1, 1], 84 | # kernel_initializer=em.conv_kernel_initializer, 85 | padding='same', 86 | name=block_name + 'se_' + 'conv2d', 87 | use_bias=True)(se_tensor) 88 | se_reduce = layers.Lambda(lambda x: relu_fn(x))(se_reduce) 89 | se_expand = layers.Conv2D( 90 | filters, 91 | kernel_size=[1, 1], 92 | strides=[1, 1], 93 | # kernel_initializer=em.conv_kernel_initializer, 94 | padding='same', 95 | name=block_name + 'se_' + 'conv2d_1', 96 | use_bias=True)(se_reduce) 97 | x = SigmoidMul()([x, se_expand]) 98 | 99 | # Output phase: 100 | filters = block_args.output_filters 101 | project_conv = layers.Conv2D( 102 | filters, 103 | kernel_size=[1, 1], 104 | strides=[1, 1], 105 | # kernel_initializer=em.conv_kernel_initializer, 106 | padding='same', 107 | name=project_conv_name, 108 | use_bias=False)(x) 109 | x = layers.BatchNormalization(momentum=batch_norm_momentum, 110 | epsilon=batch_norm_epsilon, 111 | name=project_bn_name 112 | )(project_conv) 113 | # x = layers.Lambda(lambda x: em.relu_fn(x))(bn2) 114 | if block_args.id_skip: 115 | if all( 116 | s == 1 for s in block_args.strides 117 | ) and block_args.input_filters == block_args.output_filters: 118 | # only apply drop_connect if skip presents. 119 | if drop_connect_rate: 120 | x = utils.drop_connect(x, training, drop_connect_rate) 121 | x = layers.add([x, inputs]) 122 | return x 123 | -------------------------------------------------------------------------------- /keras_efficientnet/efficientnet_model.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """Contains definitions for EfficientNet model. 16 | 17 | [1] Mingxing Tan, Quoc V. Le 18 | EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks. 19 | ICML'19, https://arxiv.org/abs/1905.11946 20 | """ 21 | 22 | from __future__ import absolute_import 23 | from __future__ import division 24 | from __future__ import print_function 25 | 26 | import collections 27 | import math 28 | import numpy as np 29 | import six 30 | from six.moves import xrange # pylint: disable=redefined-builtin 31 | import tensorflow as tf 32 | 33 | from . import utils 34 | 35 | GlobalParams = collections.namedtuple('GlobalParams', [ 36 | 'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_rate', 'data_format', 37 | 'num_classes', 'width_coefficient', 'depth_coefficient', 38 | 'depth_divisor', 'min_depth', 'drop_connect_rate', 39 | ]) 40 | GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields) 41 | 42 | # batchnorm = tf.layers.BatchNormalization 43 | batchnorm = utils.TpuBatchNormalization # TPU-specific requirement. 44 | relu_fn = tf.nn.swish 45 | 46 | BlockArgs = collections.namedtuple('BlockArgs', [ 47 | 'kernel_size', 'num_repeat', 'input_filters', 'output_filters', 48 | 'expand_ratio', 'id_skip', 'strides', 'se_ratio' 49 | ]) 50 | # defaults will be a public argument for namedtuple in Python 3.7 51 | # https://docs.python.org/3/library/collections.html#collections.namedtuple 52 | BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields) 53 | 54 | 55 | def conv_kernel_initializer(shape, dtype=None, partition_info=None): 56 | """Initialization for convolutional kernels. 57 | 58 | The main difference with tf.variance_scaling_initializer is that 59 | tf.variance_scaling_initializer uses a truncated normal with an uncorrected 60 | standard deviation, whereas here we use a normal distribution. Similarly, 61 | tf.contrib.layers.variance_scaling_initializer uses a truncated normal with 62 | a corrected standard deviation. 63 | 64 | Args: 65 | shape: shape of variable 66 | dtype: dtype of variable 67 | partition_info: unused 68 | 69 | Returns: 70 | an initialization for the variable 71 | """ 72 | del partition_info 73 | kernel_height, kernel_width, _, out_filters = shape 74 | fan_out = int(kernel_height * kernel_width * out_filters) 75 | return tf.random_normal( 76 | shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype) 77 | 78 | 79 | def dense_kernel_initializer(shape, dtype=None, partition_info=None): 80 | """Initialization for dense kernels. 81 | 82 | This initialization is equal to 83 | tf.variance_scaling_initializer(scale=1.0/3.0, mode='fan_out', 84 | distribution='uniform'). 85 | It is written out explicitly here for clarity. 86 | 87 | Args: 88 | shape: shape of variable 89 | dtype: dtype of variable 90 | partition_info: unused 91 | 92 | Returns: 93 | an initialization for the variable 94 | """ 95 | del partition_info 96 | init_range = 1.0 / np.sqrt(shape[1]) 97 | return tf.random_uniform(shape, -init_range, init_range, dtype=dtype) 98 | 99 | 100 | def round_filters(filters, global_params): 101 | """Round number of filters based on depth multiplier.""" 102 | orig_f = filters 103 | multiplier = global_params.width_coefficient 104 | divisor = global_params.depth_divisor 105 | min_depth = global_params.min_depth 106 | if not multiplier: 107 | return filters 108 | 109 | filters *= multiplier 110 | min_depth = min_depth or divisor 111 | new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor) 112 | # Make sure that round down does not go down by more than 10%. 113 | if new_filters < 0.9 * filters: 114 | new_filters += divisor 115 | tf.logging.info('round_filter input={} output={}'.format(orig_f, new_filters)) 116 | return int(new_filters) 117 | 118 | 119 | def round_repeats(repeats, global_params): 120 | """Round number of filters based on depth multiplier.""" 121 | multiplier = global_params.depth_coefficient 122 | if not multiplier: 123 | return repeats 124 | return int(math.ceil(multiplier * repeats)) 125 | 126 | 127 | class MBConvBlock(object): 128 | """A class of MBConv: Mobile Inveretd Residual Bottleneck. 129 | 130 | Attributes: 131 | has_se: boolean. Whether the block contains a Squeeze and Excitation layer 132 | inside. 133 | endpoints: dict. A list of internal tensors. 134 | """ 135 | 136 | def __init__(self, block_args, global_params): 137 | """Initializes a MBConv block. 138 | 139 | Args: 140 | block_args: BlockArgs, arguments to create a Block. 141 | global_params: GlobalParams, a set of global parameters. 142 | """ 143 | self._block_args = block_args 144 | self._batch_norm_momentum = global_params.batch_norm_momentum 145 | self._batch_norm_epsilon = global_params.batch_norm_epsilon 146 | if global_params.data_format == 'channels_first': 147 | self._channel_axis = 1 148 | self._spatial_dims = [2, 3] 149 | else: 150 | self._channel_axis = -1 151 | self._spatial_dims = [1, 2] 152 | self.has_se = (self._block_args.se_ratio is not None) and ( 153 | self._block_args.se_ratio > 0) and (self._block_args.se_ratio <= 1) 154 | 155 | self.endpoints = None 156 | 157 | # Builds the block accordings to arguments. 158 | self._build() 159 | 160 | def block_args(self): 161 | return self._block_args 162 | 163 | def _build(self): 164 | """Builds block according to the arguments.""" 165 | filters = self._block_args.input_filters * self._block_args.expand_ratio 166 | if self._block_args.expand_ratio != 1: 167 | # Expansion phase: 168 | self._expand_conv = tf.layers.Conv2D( 169 | filters, 170 | kernel_size=[1, 1], 171 | strides=[1, 1], 172 | kernel_initializer=conv_kernel_initializer, 173 | padding='same', 174 | use_bias=False) 175 | self._bn0 = batchnorm( 176 | axis=self._channel_axis, 177 | momentum=self._batch_norm_momentum, 178 | epsilon=self._batch_norm_epsilon) 179 | 180 | kernel_size = self._block_args.kernel_size 181 | # Depth-wise convolution phase: 182 | self._depthwise_conv = utils.DepthwiseConv2D( 183 | [kernel_size, kernel_size], 184 | strides=self._block_args.strides, 185 | depthwise_initializer=conv_kernel_initializer, 186 | padding='same', 187 | use_bias=False) 188 | self._bn1 = batchnorm( 189 | axis=self._channel_axis, 190 | momentum=self._batch_norm_momentum, 191 | epsilon=self._batch_norm_epsilon) 192 | 193 | if self.has_se: 194 | num_reduced_filters = max( 195 | 1, int(self._block_args.input_filters * self._block_args.se_ratio)) 196 | # Squeeze and Excitation layer. 197 | self._se_reduce = tf.layers.Conv2D( 198 | num_reduced_filters, 199 | kernel_size=[1, 1], 200 | strides=[1, 1], 201 | kernel_initializer=conv_kernel_initializer, 202 | padding='same', 203 | use_bias=True) 204 | self._se_expand = tf.layers.Conv2D( 205 | filters, 206 | kernel_size=[1, 1], 207 | strides=[1, 1], 208 | kernel_initializer=conv_kernel_initializer, 209 | padding='same', 210 | use_bias=True) 211 | 212 | # Output phase: 213 | filters = self._block_args.output_filters 214 | self._project_conv = tf.layers.Conv2D( 215 | filters, 216 | kernel_size=[1, 1], 217 | strides=[1, 1], 218 | kernel_initializer=conv_kernel_initializer, 219 | padding='same', 220 | use_bias=False) 221 | self._bn2 = batchnorm( 222 | axis=self._channel_axis, 223 | momentum=self._batch_norm_momentum, 224 | epsilon=self._batch_norm_epsilon) 225 | 226 | def _call_se(self, input_tensor): 227 | """Call Squeeze and Excitation layer. 228 | 229 | Args: 230 | input_tensor: Tensor, a single input tensor for Squeeze/Excitation layer. 231 | 232 | Returns: 233 | A output tensor, which should have the same shape as input. 234 | """ 235 | se_tensor = tf.reduce_mean(input_tensor, self._spatial_dims, keepdims=True) 236 | se_tensor = self._se_expand(relu_fn(self._se_reduce(se_tensor))) 237 | tf.logging.info('Built Squeeze and Excitation with tensor shape: %s' % 238 | se_tensor.shape) 239 | return tf.sigmoid(se_tensor) * input_tensor 240 | 241 | def call(self, inputs, training=True, drop_connect_rate=None): 242 | """Implementation of call(). 243 | 244 | Args: 245 | inputs: the inputs tensor. 246 | training: boolean, whether the model is constructed for training. 247 | drop_connect_rate: float, between 0 to 1, drop connect rate. 248 | 249 | Returns: 250 | A output tensor. 251 | """ 252 | tf.logging.info('Block input: %s shape: %s' % (inputs.name, inputs.shape)) 253 | if self._block_args.expand_ratio != 1: 254 | x = relu_fn(self._bn0(self._expand_conv(inputs), training=training)) 255 | else: 256 | x = inputs 257 | tf.logging.info('Expand: %s shape: %s' % (x.name, x.shape)) 258 | 259 | x = relu_fn(self._bn1(self._depthwise_conv(x), training=training)) 260 | tf.logging.info('DWConv: %s shape: %s' % (x.name, x.shape)) 261 | 262 | if self.has_se: 263 | with tf.variable_scope('se'): 264 | x = self._call_se(x) 265 | 266 | self.endpoints = {'expansion_output': x} 267 | 268 | x = self._bn2(self._project_conv(x), training=training) 269 | if self._block_args.id_skip: 270 | if all( 271 | s == 1 for s in self._block_args.strides 272 | ) and self._block_args.input_filters == self._block_args.output_filters: 273 | # only apply drop_connect if skip presents. 274 | if drop_connect_rate: 275 | x = utils.drop_connect(x, training, drop_connect_rate) 276 | x = tf.add(x, inputs) 277 | tf.logging.info('Project: %s shape: %s' % (x.name, x.shape)) 278 | return x 279 | 280 | 281 | class Model(tf.keras.Model): 282 | """A class implements tf.keras.Model for MNAS-like model. 283 | 284 | Reference: https://arxiv.org/abs/1807.11626 285 | """ 286 | 287 | def __init__(self, blocks_args=None, global_params=None): 288 | """Initializes an `Model` instance. 289 | 290 | Args: 291 | blocks_args: A list of BlockArgs to construct block modules. 292 | global_params: GlobalParams, a set of global parameters. 293 | 294 | Raises: 295 | ValueError: when blocks_args is not specified as a list. 296 | """ 297 | super(Model, self).__init__() 298 | if not isinstance(blocks_args, list): 299 | raise ValueError('blocks_args should be a list.') 300 | self._global_params = global_params 301 | self._blocks_args = blocks_args 302 | self.endpoints = None 303 | self._build() 304 | 305 | def _build(self): 306 | """Builds a model.""" 307 | self._blocks = [] 308 | # Builds blocks. 309 | for block_args in self._blocks_args: 310 | assert block_args.num_repeat > 0 311 | # Update block input and output filters based on depth multiplier. 312 | block_args = block_args._replace( 313 | input_filters=round_filters(block_args.input_filters, 314 | self._global_params), 315 | output_filters=round_filters(block_args.output_filters, 316 | self._global_params), 317 | num_repeat=round_repeats(block_args.num_repeat, self._global_params)) 318 | 319 | # The first block needs to take care of stride and filter size increase. 320 | self._blocks.append(MBConvBlock(block_args, self._global_params)) 321 | if block_args.num_repeat > 1: 322 | # pylint: disable=protected-access 323 | block_args = block_args._replace( 324 | input_filters=block_args.output_filters, strides=[1, 1]) 325 | # pylint: enable=protected-access 326 | for _ in xrange(block_args.num_repeat - 1): 327 | self._blocks.append(MBConvBlock(block_args, self._global_params)) 328 | 329 | batch_norm_momentum = self._global_params.batch_norm_momentum 330 | batch_norm_epsilon = self._global_params.batch_norm_epsilon 331 | if self._global_params.data_format == 'channels_first': 332 | channel_axis = 1 333 | else: 334 | channel_axis = -1 335 | 336 | # Stem part. 337 | self._conv_stem = tf.layers.Conv2D( 338 | filters=round_filters(32, self._global_params), 339 | kernel_size=[3, 3], 340 | strides=[2, 2], 341 | kernel_initializer=conv_kernel_initializer, 342 | padding='same', 343 | use_bias=False) 344 | self._bn0 = batchnorm( 345 | axis=channel_axis, 346 | momentum=batch_norm_momentum, 347 | epsilon=batch_norm_epsilon) 348 | 349 | # Head part. 350 | self._conv_head = tf.layers.Conv2D( 351 | filters=round_filters(1280, self._global_params), 352 | kernel_size=[1, 1], 353 | strides=[1, 1], 354 | kernel_initializer=conv_kernel_initializer, 355 | padding='same', 356 | use_bias=False) 357 | self._bn1 = batchnorm( 358 | axis=channel_axis, 359 | momentum=batch_norm_momentum, 360 | epsilon=batch_norm_epsilon) 361 | 362 | self._avg_pooling = tf.keras.layers.GlobalAveragePooling2D( 363 | data_format=self._global_params.data_format) 364 | self._fc = tf.layers.Dense( 365 | self._global_params.num_classes, 366 | kernel_initializer=dense_kernel_initializer) 367 | 368 | if self._global_params.dropout_rate > 0: 369 | self._dropout = tf.keras.layers.Dropout(self._global_params.dropout_rate) 370 | else: 371 | self._dropout = None 372 | 373 | def call(self, inputs, training=True, features_only=None): 374 | """Implementation of call(). 375 | 376 | Args: 377 | inputs: input tensors. 378 | training: boolean, whether the model is constructed for training. 379 | features_only: build the base feature network only. 380 | 381 | Returns: 382 | output tensors. 383 | """ 384 | self.endpoints = {} 385 | # Calls Stem layers 386 | with tf.variable_scope('stem'): 387 | outputs = relu_fn(self._bn0(self._conv_stem(inputs), training=training)) 388 | tf.logging.info('Built stem layers with output shape: %s' % outputs.shape) 389 | self.endpoints['stem'] = outputs 390 | 391 | # Calls blocks. 392 | reduction_idx = 0 393 | for idx, block in enumerate(self._blocks): 394 | is_reduction = False 395 | if ((idx == len(self._blocks) - 1) or 396 | self._blocks[idx + 1].block_args().strides[0] > 1): 397 | is_reduction = True 398 | reduction_idx += 1 399 | 400 | with tf.variable_scope('blocks_%s' % idx): 401 | drop_rate = self._global_params.drop_connect_rate 402 | if drop_rate: 403 | drop_rate *= float(idx) / len(self._blocks) 404 | tf.logging.info('block_%s drop_connect_rate: %s' % (idx, drop_rate)) 405 | outputs = block.call(outputs, training=training) 406 | self.endpoints['block_%s' % idx] = outputs 407 | if is_reduction: 408 | self.endpoints['reduction_%s' % reduction_idx] = outputs 409 | if block.endpoints: 410 | for k, v in six.iteritems(block.endpoints): 411 | self.endpoints['block_%s/%s' % (idx, k)] = v 412 | if is_reduction: 413 | self.endpoints['reduction_%s/%s' % (reduction_idx, k)] = v 414 | self.endpoints['global_pool'] = outputs 415 | 416 | if not features_only: 417 | # Calls final layers and returns logits. 418 | with tf.variable_scope('head'): 419 | outputs = relu_fn(self._bn1(self._conv_head(outputs), training=training)) 420 | outputs = self._avg_pooling(outputs) 421 | if self._dropout: 422 | outputs = self._dropout(outputs, training=training) 423 | outputs = self._fc(outputs) 424 | self.endpoints['head'] = outputs 425 | return outputs 426 | -------------------------------------------------------------------------------- /keras_efficientnet/extract_weights.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import os 4 | import re 5 | from glob import glob 6 | import numpy as np 7 | import tensorflow as tf 8 | from keras.utils.data_utils import get_file 9 | 10 | 11 | # regex for renaming the tensors to their corresponding Keras counterpart 12 | re_repeat = re.compile(r'Repeat_[0-9_]*b') 13 | re_block8 = re.compile(r'Block8_[A-Za-z]') 14 | 15 | 16 | def get_filename(key): 17 | """Rename tensor name to the corresponding Keras layer weight name. 18 | # Arguments 19 | key: tensor name in TF (determined by tf.variable_scope) 20 | """ 21 | filename = str(key) 22 | filename = filename.replace('/', '_') 23 | filename = filename.replace('efficientnet-b0_', '') 24 | 25 | # remove "Repeat" scope from filename 26 | filename = re_repeat.sub('B', filename) 27 | 28 | if re_block8.match(filename): 29 | # the last block8 has different name with the previous 9 occurrences 30 | filename = filename.replace('Block8', 'Block8_10') 31 | elif filename.startswith('Logits'): 32 | # remove duplicate "Logits" scope 33 | filename = filename.replace('Logits_', '', 1) 34 | 35 | # from TF to Keras naming 36 | filename = filename.replace('_weights', '_kernel') 37 | filename = filename.replace('_biases', '_bias') 38 | 39 | return filename + '.npy' 40 | 41 | 42 | def extract_tensors_from_checkpoint_file(filename, output_folder='weights'): 43 | """Extract tensors from a TF checkpoint file. 44 | # Arguments 45 | filename: TF checkpoint file 46 | output_folder: where to save the output numpy array files 47 | """ 48 | if not os.path.exists(output_folder): 49 | os.makedirs(output_folder) 50 | 51 | reader = tf.train.NewCheckpointReader(filename) 52 | 53 | for key in reader.get_variable_to_shape_map(): 54 | # not saving the following tensors 55 | if key == 'global_step': 56 | continue 57 | if 'AuxLogit' in key: 58 | continue 59 | if 'stem' in key: 60 | print(key) 61 | print(reader.get_tensor(key)) 62 | 63 | # convert tensor name into the corresponding Keras layer weight name and save 64 | path = os.path.join(output_folder, get_filename(key)) 65 | arr = reader.get_tensor(key) 66 | np.save(path, arr) 67 | # print("tensor_name: ", key) 68 | 69 | 70 | # extract_tensors_from_checkpoint_file('efficientnet-b0/model.ckpt-109400') 71 | -------------------------------------------------------------------------------- /keras_efficientnet/keras_model.py: -------------------------------------------------------------------------------- 1 | from . import efficientnet_layers as el 2 | from keras import models, layers 3 | 4 | 5 | def keras_efficientnet(blocks_args, global_params, training=False, input_image=None): 6 | inp = input_image if input_image is not None else layers.Input((224, 224, 3)) 7 | x = layers.Conv2D(32, 3, padding='same', strides=2, name='stem_conv2d', use_bias=False)(inp) 8 | x = layers.BatchNormalization(name='stem_tpu_batch_normalization')(x) 9 | x = layers.Lambda(lambda x: el.relu_fn(x))(x) 10 | # x = layers.Activation('swish')(x) 11 | idx = 0 12 | for block in blocks_args: 13 | x = el.mbConvBlock(x, block, global_params, idx, training=training) 14 | # x = MBConvBlock(block, global_params, idx)(x, training=training) 15 | idx += 1 16 | if block.num_repeat > 1: 17 | block = block._replace( 18 | input_filters=block.output_filters, strides=[1, 1]) 19 | for _ in range(block.num_repeat - 1): 20 | x = el.mbConvBlock(x, block, global_params, idx, training=training) 21 | idx += 1 22 | x = layers.Conv2D(1280, 1, name='head_conv2d', use_bias=False)(x) 23 | x = layers.BatchNormalization(name='head_tpu_batch_normalization')(x) 24 | x = layers.Lambda(lambda x: el.relu_fn(x))(x) 25 | x = layers.GlobalAveragePooling2D()(x) 26 | x = layers.Dense(1000, activation='softmax', name='head_dense', )(x) 27 | model = models.Model(inp, x, name='efficientnet-b0') 28 | return model 29 | -------------------------------------------------------------------------------- /keras_efficientnet/load_weights.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from tqdm import tqdm 4 | from . import utils 5 | from . import efficientnet_builder as eb 6 | from .keras_model import keras_efficientnet 7 | 8 | 9 | def load_weights(): 10 | WEIGHTS_DIR = '/home/palm/tpu/models/official/efficientnet/weights' 11 | MODEL_DIR = './models' 12 | OUTPUT_WEIGHT_FILENAME = 'efficientnet_b0_weights_tf_dim_ordering_tf_kernels.h5' 13 | model_name = 'efficientnet-b0' 14 | 15 | BatchNormalization = utils.TpuBatchNormalization 16 | print('Instantiating an empty efficientnet model...') 17 | 18 | blocks_args, global_params = eb.get_model_params(model_name, None) 19 | model = keras_efficientnet(blocks_args, global_params) 20 | 21 | print('Loading weights from', WEIGHTS_DIR) 22 | for layer in tqdm(model.layers): 23 | if layer.weights: 24 | weights = [] 25 | for w in layer.weights: 26 | weight_name = os.path.basename(w.name).replace(':0', '') 27 | weight_file = layer.name + '_' + weight_name + '.npy' 28 | weight_arr = np.load(os.path.join(WEIGHTS_DIR, weight_file)) 29 | 30 | # remove the "background class" 31 | if weight_file.startswith('Logits_bias'): 32 | weight_arr = weight_arr[1:] 33 | elif weight_file.startswith('Logits_kernel'): 34 | weight_arr = weight_arr[:, 1:] 35 | 36 | weights.append(weight_arr) 37 | layer.set_weights(weights) 38 | 39 | print('Saving model weights...') 40 | if not os.path.exists(MODEL_DIR): 41 | os.makedirs(MODEL_DIR) 42 | model.save_weights(os.path.join(MODEL_DIR, OUTPUT_WEIGHT_FILENAME)) 43 | -------------------------------------------------------------------------------- /keras_efficientnet/models/efficientnet_b0_weights_tf_dim_ordering_tf_kernels.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DableUTeeF/keras-efficientnet/f67a5c365e2e96a4029fd5f27de67a027e8dc7ac/keras_efficientnet/models/efficientnet_b0_weights_tf_dim_ordering_tf_kernels.h5 -------------------------------------------------------------------------------- /keras_efficientnet/preprocessing.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """ImageNet preprocessing.""" 16 | from __future__ import absolute_import 17 | from __future__ import division 18 | from __future__ import print_function 19 | 20 | import tensorflow as tf 21 | 22 | IMAGE_SIZE = 224 23 | CROP_PADDING = 32 24 | 25 | 26 | def distorted_bounding_box_crop(image_bytes, 27 | bbox, 28 | min_object_covered=0.1, 29 | aspect_ratio_range=(0.75, 1.33), 30 | area_range=(0.05, 1.0), 31 | max_attempts=100, 32 | scope=None): 33 | """Generates cropped_image using one of the bboxes randomly distorted. 34 | 35 | See `tf.image.sample_distorted_bounding_box` for more documentation. 36 | 37 | Args: 38 | image_bytes: `Tensor` of binary image data. 39 | bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]` 40 | where each coordinate is [0, 1) and the coordinates are arranged 41 | as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole 42 | image. 43 | min_object_covered: An optional `float`. Defaults to `0.1`. The cropped 44 | area of the image must contain at least this fraction of any bounding 45 | box supplied. 46 | aspect_ratio_range: An optional list of `float`s. The cropped area of the 47 | image must have an aspect ratio = width / height within this range. 48 | area_range: An optional list of `float`s. The cropped area of the image 49 | must contain a fraction of the supplied image within in this range. 50 | max_attempts: An optional `int`. Number of attempts at generating a cropped 51 | region of the image of the specified constraints. After `max_attempts` 52 | failures, return the entire image. 53 | scope: Optional `str` for name scope. 54 | Returns: 55 | cropped image `Tensor` 56 | """ 57 | with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]): 58 | shape = tf.image.extract_jpeg_shape(image_bytes) 59 | sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( 60 | shape, 61 | bounding_boxes=bbox, 62 | min_object_covered=min_object_covered, 63 | aspect_ratio_range=aspect_ratio_range, 64 | area_range=area_range, 65 | max_attempts=max_attempts, 66 | use_image_if_no_bounding_boxes=True) 67 | bbox_begin, bbox_size, _ = sample_distorted_bounding_box 68 | 69 | # Crop the image to the specified bounding box. 70 | offset_y, offset_x, _ = tf.unstack(bbox_begin) 71 | target_height, target_width, _ = tf.unstack(bbox_size) 72 | crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) 73 | image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) 74 | 75 | return image 76 | 77 | 78 | def _at_least_x_are_equal(a, b, x): 79 | """At least `x` of `a` and `b` `Tensors` are equal.""" 80 | match = tf.equal(a, b) 81 | match = tf.cast(match, tf.int32) 82 | return tf.greater_equal(tf.reduce_sum(match), x) 83 | 84 | 85 | def _decode_and_random_crop(image_bytes, image_size): 86 | """Make a random crop of image_size.""" 87 | bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) 88 | image = distorted_bounding_box_crop( 89 | image_bytes, 90 | bbox, 91 | min_object_covered=0.1, 92 | aspect_ratio_range=(3. / 4, 4. / 3.), 93 | area_range=(0.08, 1.0), 94 | max_attempts=10, 95 | scope=None) 96 | original_shape = tf.image.extract_jpeg_shape(image_bytes) 97 | bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3) 98 | 99 | image = tf.cond( 100 | bad, 101 | lambda: _decode_and_center_crop(image_bytes, image_size), 102 | lambda: tf.image.resize_bicubic([image], # pylint: disable=g-long-lambda 103 | [image_size, image_size])[0]) 104 | 105 | return image 106 | 107 | 108 | def _decode_and_center_crop(image_bytes, image_size): 109 | """Crops to center of image with padding then scales image_size.""" 110 | shape = tf.image.extract_jpeg_shape(image_bytes) 111 | image_height = shape[0] 112 | image_width = shape[1] 113 | 114 | padded_center_crop_size = tf.cast( 115 | ((image_size / (image_size + CROP_PADDING)) * 116 | tf.cast(tf.minimum(image_height, image_width), tf.float32)), 117 | tf.int32) 118 | 119 | offset_height = ((image_height - padded_center_crop_size) + 1) // 2 120 | offset_width = ((image_width - padded_center_crop_size) + 1) // 2 121 | crop_window = tf.stack([offset_height, offset_width, 122 | padded_center_crop_size, padded_center_crop_size]) 123 | image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) 124 | image = tf.image.resize_bicubic([image], [image_size, image_size])[0] 125 | 126 | return image 127 | 128 | 129 | def _flip(image): 130 | """Random horizontal image flip.""" 131 | image = tf.image.random_flip_left_right(image) 132 | return image 133 | 134 | 135 | def preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE): 136 | """Preprocesses the given image for evaluation. 137 | 138 | Args: 139 | image_bytes: `Tensor` representing an image binary of arbitrary size. 140 | use_bfloat16: `bool` for whether to use bfloat16. 141 | image_size: image size. 142 | 143 | Returns: 144 | A preprocessed image `Tensor`. 145 | """ 146 | image = _decode_and_random_crop(image_bytes, image_size) 147 | image = _flip(image) 148 | image = tf.reshape(image, [image_size, image_size, 3]) 149 | image = tf.image.convert_image_dtype( 150 | image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32) 151 | return image 152 | 153 | 154 | def preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE): 155 | """Preprocesses the given image for evaluation. 156 | 157 | Args: 158 | image_bytes: `Tensor` representing an image binary of arbitrary size. 159 | use_bfloat16: `bool` for whether to use bfloat16. 160 | image_size: image size. 161 | 162 | Returns: 163 | A preprocessed image `Tensor`. 164 | """ 165 | image = _decode_and_center_crop(image_bytes, image_size) 166 | image = tf.reshape(image, [image_size, image_size, 3]) 167 | image = tf.image.convert_image_dtype( 168 | image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32) 169 | return image 170 | 171 | 172 | def preprocess_image(image_bytes, 173 | is_training=False, 174 | use_bfloat16=False, 175 | image_size=IMAGE_SIZE): 176 | """Preprocesses the given image. 177 | 178 | Args: 179 | image_bytes: `Tensor` representing an image binary of arbitrary size. 180 | is_training: `bool` for whether the preprocessing is for training. 181 | use_bfloat16: `bool` for whether to use bfloat16. 182 | image_size: image size. 183 | 184 | Returns: 185 | A preprocessed image `Tensor` with value range of [0, 255]. 186 | """ 187 | if is_training: 188 | return preprocess_for_train(image_bytes, use_bfloat16, image_size) 189 | else: 190 | return preprocess_for_eval(image_bytes, use_bfloat16, image_size) 191 | -------------------------------------------------------------------------------- /keras_efficientnet/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """Model utilities.""" 16 | 17 | from __future__ import absolute_import 18 | from __future__ import division 19 | from __future__ import print_function 20 | 21 | import os 22 | import numpy as np 23 | import tensorflow as tf 24 | 25 | from tensorflow.contrib.tpu.python.ops import tpu_ops 26 | from tensorflow.contrib.tpu.python.tpu import tpu_function 27 | 28 | 29 | def build_learning_rate(initial_lr, 30 | global_step, 31 | steps_per_epoch=None, 32 | lr_decay_type='exponential', 33 | decay_factor=0.97, 34 | decay_epochs=2.4, 35 | total_steps=None, 36 | warmup_epochs=5): 37 | """Build learning rate.""" 38 | if lr_decay_type == 'exponential': 39 | assert steps_per_epoch is not None 40 | decay_steps = steps_per_epoch * decay_epochs 41 | lr = tf.train.exponential_decay( 42 | initial_lr, global_step, decay_steps, decay_factor, staircase=True) 43 | elif lr_decay_type == 'cosine': 44 | assert total_steps is not None 45 | lr = 0.5 * initial_lr * ( 46 | 1 + tf.cos(np.pi * tf.cast(global_step, tf.float32) / total_steps)) 47 | elif lr_decay_type == 'constant': 48 | lr = initial_lr 49 | else: 50 | assert False, 'Unknown lr_decay_type : %s' % lr_decay_type 51 | 52 | if warmup_epochs: 53 | tf.logging.info('Learning rate warmup_epochs: %d' % warmup_epochs) 54 | warmup_steps = int(warmup_epochs * steps_per_epoch) 55 | warmup_lr = ( 56 | initial_lr * tf.cast(global_step, tf.float32) / tf.cast( 57 | warmup_steps, tf.float32)) 58 | lr = tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr) 59 | 60 | return lr 61 | 62 | 63 | def build_optimizer(learning_rate, 64 | optimizer_name='rmsprop', 65 | decay=0.9, 66 | epsilon=0.001, 67 | momentum=0.9): 68 | """Build optimizer.""" 69 | if optimizer_name == 'sgd': 70 | tf.logging.info('Using SGD optimizer') 71 | optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) 72 | elif optimizer_name == 'momentum': 73 | tf.logging.info('Using Momentum optimizer') 74 | optimizer = tf.train.MomentumOptimizer( 75 | learning_rate=learning_rate, momentum=momentum) 76 | elif optimizer_name == 'rmsprop': 77 | tf.logging.info('Using RMSProp optimizer') 78 | optimizer = tf.train.RMSPropOptimizer(learning_rate, decay, momentum, 79 | epsilon) 80 | else: 81 | tf.logging.fatal('Unknown optimizer:', optimizer_name) 82 | 83 | return optimizer 84 | 85 | 86 | class TpuBatchNormalization(tf.layers.BatchNormalization): 87 | # class TpuBatchNormalization(tf.layers.BatchNormalization): 88 | """Cross replica batch normalization.""" 89 | 90 | def __init__(self, fused=False, **kwargs): 91 | if fused in (True, None): 92 | raise ValueError('TpuBatchNormalization does not support fused=True.') 93 | super(TpuBatchNormalization, self).__init__(fused=fused, **kwargs) 94 | 95 | def _cross_replica_average(self, t, num_shards_per_group): 96 | """Calculates the average value of input tensor across TPU replicas.""" 97 | num_shards = tpu_function.get_tpu_context().number_of_shards 98 | group_assignment = None 99 | if num_shards_per_group > 1: 100 | if num_shards % num_shards_per_group != 0: 101 | raise ValueError('num_shards: %d mod shards_per_group: %d, should be 0' 102 | % (num_shards, num_shards_per_group)) 103 | num_groups = num_shards // num_shards_per_group 104 | group_assignment = [[ 105 | x for x in range(num_shards) if x // num_shards_per_group == y 106 | ] for y in range(num_groups)] 107 | return tpu_ops.cross_replica_sum(t, group_assignment) / tf.cast( 108 | num_shards_per_group, t.dtype) 109 | 110 | def _moments(self, inputs, reduction_axes, keep_dims): 111 | """Compute the mean and variance: it overrides the original _moments.""" 112 | shard_mean, shard_variance = super(TpuBatchNormalization, self)._moments( 113 | inputs, reduction_axes, keep_dims=keep_dims) 114 | 115 | num_shards = tpu_function.get_tpu_context().number_of_shards or 1 116 | if num_shards <= 8: # Skip cross_replica for 2x2 or smaller slices. 117 | num_shards_per_group = 1 118 | else: 119 | num_shards_per_group = max(8, num_shards // 4) 120 | tf.logging.info('TpuBatchNormalization with num_shards_per_group %s', 121 | num_shards_per_group) 122 | if num_shards_per_group > 1: 123 | # Each group has multiple replicas: here we compute group mean/variance by 124 | # aggregating per-replica mean/variance. 125 | group_mean = self._cross_replica_average(shard_mean, num_shards_per_group) 126 | group_variance = self._cross_replica_average(shard_variance, 127 | num_shards_per_group) 128 | 129 | # Group variance needs to also include the difference between shard_mean 130 | # and group_mean. 131 | mean_distance = tf.square(group_mean - shard_mean) 132 | group_variance += self._cross_replica_average(mean_distance, 133 | num_shards_per_group) 134 | return (group_mean, group_variance) 135 | else: 136 | return (shard_mean, shard_variance) 137 | 138 | 139 | def drop_connect(inputs, is_training, drop_connect_rate): 140 | """Apply drop connect.""" 141 | if not is_training: 142 | return inputs 143 | 144 | # Compute keep_prob 145 | # TODO(tanmingxing): add support for training progress. 146 | keep_prob = 1.0 - drop_connect_rate 147 | 148 | # Compute drop_connect tensor 149 | batch_size = tf.shape(inputs)[0] 150 | random_tensor = keep_prob 151 | random_tensor += tf.random_uniform([batch_size, 1, 1, 1], dtype=inputs.dtype) 152 | binary_tensor = tf.floor(random_tensor) 153 | output = tf.div(inputs, keep_prob) * binary_tensor 154 | return output 155 | 156 | 157 | def archive_ckpt(ckpt_eval, ckpt_objective, ckpt_path): 158 | """Archive a checkpoint if the metric is better.""" 159 | ckpt_dir, ckpt_name = os.path.split(ckpt_path) 160 | 161 | saved_objective_path = os.path.join(ckpt_dir, 'best_objective.txt') 162 | saved_objective = float('-inf') 163 | if tf.gfile.Exists(saved_objective_path): 164 | with tf.gfile.GFile(saved_objective_path, 'r') as f: 165 | saved_objective = float(f.read()) 166 | if saved_objective > ckpt_objective: 167 | tf.logging.info('Ckpt %s is worse than %s', ckpt_objective, saved_objective) 168 | return False 169 | 170 | filenames = tf.gfile.Glob(ckpt_path + '.*') 171 | if filenames is None: 172 | tf.logging.info('No files to copy for checkpoint %s', ckpt_path) 173 | return False 174 | 175 | # Clear the old folder. 176 | dst_dir = os.path.join(ckpt_dir, 'archive') 177 | if tf.gfile.Exists(dst_dir): 178 | tf.gfile.DeleteRecursively(dst_dir) 179 | tf.gfile.MakeDirs(dst_dir) 180 | 181 | # Write checkpoints. 182 | for f in filenames: 183 | dest = os.path.join(dst_dir, os.path.basename(f)) 184 | tf.gfile.Copy(f, dest, overwrite=True) 185 | ckpt_state = tf.train.generate_checkpoint_state_proto( 186 | dst_dir, 187 | model_checkpoint_path=ckpt_name, 188 | all_model_checkpoint_paths=[ckpt_name]) 189 | with tf.gfile.GFile(os.path.join(dst_dir, 'checkpoint'), 'w') as f: 190 | f.write(str(ckpt_state)) 191 | with tf.gfile.GFile(os.path.join(dst_dir, 'best_eval.txt'), 'w') as f: 192 | f.write('%s' % ckpt_eval) 193 | 194 | # Update the best objective. 195 | with tf.gfile.GFile(saved_objective_path, 'w') as f: 196 | f.write('%f' % ckpt_objective) 197 | 198 | tf.logging.info('Copying checkpoint %s to %s', ckpt_path, dst_dir) 199 | return True 200 | 201 | 202 | # TODO(hongkuny): Consolidate this as a common library cross models. 203 | class DepthwiseConv2D(tf.keras.layers.DepthwiseConv2D, tf.layers.Layer): 204 | """Wrap keras DepthwiseConv2D to tf.layers.""" 205 | 206 | pass 207 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from keras_efficientnet.keras_model import keras_efficientnet 2 | from keras_efficientnet import efficientnet_builder as eb 3 | from keras_efficientnet import preprocessing 4 | import tensorflow as tf 5 | import numpy as np 6 | import json 7 | import cv2 8 | import os 9 | os.environ['CUDA_VISIBLE_DEVICES'] = "" 10 | 11 | if __name__ == '__main__': 12 | model_name = 'efficientnet-b0' 13 | labels_map_file = 'eval_data/labels_map.txt' 14 | image_file = 'eval_data/panda.jpg' 15 | training = False 16 | blocks_args, global_params = eb.get_model_params(model_name, None) 17 | model = keras_efficientnet(blocks_args, global_params, training) 18 | model.load_weights('models/efficientnet_b0_weights_tf_dim_ordering_tf_kernels.h5') 19 | model.summary() 20 | MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255] 21 | STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255] 22 | 23 | image_string = tf.read_file(image_file) 24 | image_decoded = preprocessing.preprocess_image(image_string, training, 224) 25 | image = tf.cast(image_decoded, tf.float32) 26 | image -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=image.dtype) 27 | image /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=image.dtype) 28 | y = model.predict(tf.expand_dims(image, 0), steps=1)[0] 29 | 30 | label_map = json.loads(open(labels_map_file).read()) 31 | pred_idx = np.argsort(y)[::-1] 32 | image = cv2.imread(image_file) 33 | for i in range(5): 34 | cv2.putText(image, 35 | ' -> top_{} ({:4.2f}%): {} '.format(i + 1, y[pred_idx[i]] * 100, label_map[str(pred_idx[i])]), 36 | (2, 14+(i*14)), 37 | cv2.FONT_HERSHEY_SIMPLEX, 38 | 0.5, 39 | (0, 255, 0), 2) 40 | print(' -> top_{} ({:4.2f}%): {} '.format(i+1, y[pred_idx[i]] * 100, label_map[str(pred_idx[i])])) 41 | cv2.imshow('image', image) 42 | cv2.waitKey() 43 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # A Keras implementation of EfficientNet 2 | # EfficientNets 3 | 4 | [1] Mingxing Tan and Quoc V. Le. EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks. ICML 2019. 5 | Arxiv link: https://arxiv.org/abs/1905.11946. 6 | 7 | 8 | ## 1. About EfficientNet Models 9 | 10 | EfficientNets are a family of image classification models, which achieve state-of-the-art accuracy, yet being an order-of-magnitude smaller and faster than previous models. 11 | 12 | We develop EfficientNets based on AutoML and Compound Scaling. In particular, we first use [AutoML Mobile framework](https://ai.googleblog.com/2018/08/mnasnet-towards-automating-design-of.html) to develop a mobile-size baseline network, named as EfficientNet-B0; Then, we use the compound scaling method to scale up this baseline to obtain EfficientNet-B1 to B7. 13 | 14 | 15 | 16 | 19 | 22 | 23 |
17 | 18 | 20 | 21 |
24 | 25 | EfficientNets achieve state-of-the-art accuracy on ImageNet with an order of magnitude better efficiency: 26 | 27 | 28 | * In high-accuracy regime, our EfficientNet-B7 achieves state-of-the-art 84.4% top-1 / 97.1% top-5 accuracy on ImageNet with 66M parameters and 37B FLOPS, being 8.4x smaller and 6.1x faster on CPU inference than previous best [Gpipe](https://arxiv.org/abs/1811.06965). 29 | 30 | * In middle-accuracy regime, our EfficientNet-B1 is 7.6x smaller and 5.7x faster on CPU inference than [ResNet-152](https://arxiv.org/abs/1512.03385), with similar ImageNet accuracy. 31 | 32 | * Compared with the widely used [ResNet-50](https://arxiv.org/abs/1512.03385), our EfficientNet-B4 improves the top-1 accuracy from 76.3% of ResNet-50 to 82.6% (+6.3%), under similar FLOPS constraint. 33 | 34 | ## 2. Get the weights 35 | In order to get the TF official weights. 36 | * Create a Colab file in your Google Drive. 37 | * Mount the drive with 38 | ``` 39 | from google.colab import drive 40 | drive.mount('/content/drive/') 41 | ``` 42 | * Run this command in your created Colab 43 | 44 | ```!gsutil cp -r gs://cloud-tpu-checkpoints/efficientnet "/content/drive/My Drive/effnets/"``` 45 | * Download the effnets folder from your drive and extract somewhere. 46 | 47 | * Extract the weights from each layer to a directory by. 48 | ```python 49 | from extract_weights import extract_tensors_from_checkpoint_file 50 | 51 | extract_tensors_from_checkpoint_file('efficientnet-b0/model.ckpt-109400') # change this line to your extracted directory 52 | ``` 53 | 54 | * Each TF weights directory should be like. 55 | ``` 56 | best_eval.txt 57 | checkpoint 58 | model.ckpt-12345.data-00000-of-00001 59 | model.ckpt-12345.index 60 | model.ckpt-12345.meta 61 | ``` 62 | Use `model.ckpt-12345` in this case. 63 | 64 | * Create the hdf5 weights by run the `load_weights.py`, make sure to change `WEIGHTS_DIR` and `model_name` first. 65 | 66 | * The example of testing a panda image is in `main.py` 67 | 68 | # Credit 69 | ## Layers and utils from 70 | https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet 71 | 72 | ## load_weights and extract_weights implemented from 73 | https://github.com/yuyang-huang/keras-inception-resnet-v2 -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | 4 | setuptools.setup( 5 | name="keras_efficientnet", 6 | version="0.1.4", 7 | author="Natthaphon Hongcharoen", 8 | author_email="palm22180@gmail.com", 9 | description="A Keras implementation of EfficientNet", 10 | # long_description="A Keras implementation of EfficientNet", 11 | # long_description_content_type="text/markdown", 12 | url="https://github.com/DableUTeeF/keras_efficientnet", 13 | packages=setuptools.find_packages(), 14 | classifiers=[ 15 | "Programming Language :: Python :: 3", 16 | "License :: OSI Approved :: MIT License", 17 | "Operating System :: OS Independent", 18 | ], 19 | ) 20 | --------------------------------------------------------------------------------