├── .gitattributes ├── LICENSE ├── README.md ├── examples ├── caffe │ ├── inference │ │ ├── intel-caffe │ │ │ ├── ctpn │ │ │ │ ├── README.md │ │ │ │ ├── ctpn.Dockerfile │ │ │ │ ├── ctpn.conf │ │ │ │ └── demo_service.py │ │ │ └── rfcn │ │ │ │ ├── README.md │ │ │ │ ├── demo_service.py │ │ │ │ ├── rfcn.Dockerfile │ │ │ │ └── rfcn.conf │ │ ├── mnist │ │ │ ├── 2.jpg │ │ │ ├── README.md │ │ │ ├── caffe_mnist.conf │ │ │ ├── checkpoint_dir │ │ │ │ ├── mnist_model.caffemodel │ │ │ │ └── mnist_model.prototxt │ │ │ ├── mnist-cpu.Dockerfile │ │ │ ├── mnist-gpu.Dockerfile │ │ │ └── mnist_inference.py │ │ ├── mtcnn │ │ │ ├── README.md │ │ │ ├── code │ │ │ │ ├── mtcnn.py │ │ │ │ └── mtcnn_inference.py │ │ │ ├── mtcnn-caffe-cpu.Dockerfile │ │ │ ├── mtcnn-intel-caffe-cpu.Dockerfile │ │ │ └── mtcnn.conf │ │ ├── nsfw │ │ │ ├── README.md │ │ │ ├── caffe_nsfw.conf │ │ │ ├── code │ │ │ │ └── nsfw_inference.py │ │ │ └── nsfw-cpu.Dockerfile │ │ └── rfcn │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── rfcn-gpu.Dockerfile │ │ │ ├── rfcn-nms-gpu.Dockerfile │ │ │ ├── rfcn-nms.conf │ │ │ ├── rfcn.conf │ │ │ └── rfcn_inference.py │ └── train │ │ ├── faster-rcnn │ │ ├── README.md │ │ ├── code │ │ │ ├── lib │ │ │ │ ├── Makefile │ │ │ │ ├── datasets │ │ │ │ │ ├── VOCdevkit-matlab-wrapper │ │ │ │ │ │ ├── get_voc_opts.m │ │ │ │ │ │ ├── voc_eval.m │ │ │ │ │ │ └── xVOCap.m │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── coco.py │ │ │ │ │ ├── ds_utils.py │ │ │ │ │ ├── factory.py │ │ │ │ │ ├── imdb.py │ │ │ │ │ ├── pascal_voc.py │ │ │ │ │ ├── tools │ │ │ │ │ │ └── mcg_munge.py │ │ │ │ │ └── voc_eval.py │ │ │ │ ├── fast_rcnn │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── bbox_transform.py │ │ │ │ │ ├── config.py │ │ │ │ │ ├── nms_wrapper.py │ │ │ │ │ ├── test.py │ │ │ │ │ └── train.py │ │ │ │ ├── nms │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── cpu_nms.pyx │ │ │ │ │ ├── gpu_nms.hpp │ │ │ │ │ ├── gpu_nms.pyx │ │ │ │ │ ├── nms_kernel.cu │ │ │ │ │ └── py_cpu_nms.py │ │ │ │ ├── pycocotools │ │ │ │ │ ├── UPSTREAM_REV │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── _mask.c │ │ │ │ │ ├── _mask.pyx │ │ │ │ │ ├── _mask.so │ │ │ │ │ ├── coco.py │ │ │ │ │ ├── cocoeval.py │ │ │ │ │ ├── license.txt │ │ │ │ │ ├── mask.py │ │ │ │ │ ├── maskApi.c │ │ │ │ │ └── maskApi.h │ │ │ │ ├── roi_data_layer │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── layer.py │ │ │ │ │ ├── minibatch.py │ │ │ │ │ └── roidb.py │ │ │ │ ├── rpn │ │ │ │ │ ├── README.md │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── anchor_target_layer.py │ │ │ │ │ ├── generate.py │ │ │ │ │ ├── generate_anchors.py │ │ │ │ │ ├── proposal_layer.py │ │ │ │ │ └── proposal_target_layer.py │ │ │ │ ├── setup.py │ │ │ │ ├── transform │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── torch_image_transform_layer.py │ │ │ │ └── utils │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── bbox.pyx │ │ │ │ │ ├── blob.py │ │ │ │ │ └── timer.py │ │ │ └── tools │ │ │ │ ├── .train_faster_rcnn_alt_opt.py.swp │ │ │ │ ├── README.md │ │ │ │ ├── _init_paths.py │ │ │ │ ├── compress_net.py │ │ │ │ ├── demo.py │ │ │ │ ├── eval_recall.py │ │ │ │ ├── reval.py │ │ │ │ ├── rpn_generate.py │ │ │ │ ├── test_net.py │ │ │ │ ├── train_faster_rcnn_alt_opt.py │ │ │ │ ├── train_net.py │ │ │ │ └── train_svms.py │ │ └── docker │ │ │ ├── Makefile.config │ │ │ ├── README.md │ │ │ └── gpu_ubuntu-14.04_python-2.7.6_caffe-py-faster-rcnn.Dockerfile │ │ ├── imagenet │ │ └── code │ │ │ ├── README.md │ │ │ ├── resnet_101.prototxt │ │ │ ├── resnet_101_solver.prototxt │ │ │ └── train.py │ │ ├── mnist │ │ └── code │ │ │ ├── lenet_solver.prototxt │ │ │ ├── lenet_train_test.prototxt │ │ │ └── train.py │ │ └── rfcn │ │ ├── README.md │ │ └── code │ │ └── tools │ │ ├── README.md │ │ ├── _init_paths.py │ │ ├── compress_net.py │ │ ├── demo.py │ │ ├── demo_rfcn.py │ │ ├── eval_recall.py │ │ ├── reval.py │ │ ├── rpn_generate.py │ │ ├── test_net.py │ │ ├── train_faster_rcnn_alt_opt.py │ │ ├── train_net.py │ │ ├── train_rfcn_alt_opt_5stage.py │ │ └── train_svms.py ├── caffe2 │ └── train │ │ └── detectron │ │ ├── README.md │ │ ├── dataset_catalog.py │ │ ├── detectron_voc_example.Dockerfile │ │ ├── uai_tutorial_1gpu_e2e_faster_rcnn_R-50-FPN.yaml │ │ └── uai_tutorial_1gpu_e2e_faster_rcnn_coco_R-50-FPN.yaml ├── case-study │ ├── face-compare │ │ ├── README.md │ │ ├── code │ │ │ ├── facecompare_service.py │ │ │ ├── facenet_json_inference.py │ │ │ └── gen_example_json.py │ │ ├── face-service.Dockerfile │ │ ├── face-service.conf │ │ ├── facenet-compare-json-cpu.Dockerfile │ │ ├── facenet-compare-json.conf │ │ ├── facenet-mtcnn-json-cpu.Dockerfile │ │ └── facenet-mtcnn-json.conf │ └── nsfw-vedio │ │ ├── README.md │ │ ├── code │ │ ├── nsfw_stream_inference.py │ │ └── nsfw_video_inference.py │ │ ├── config │ │ ├── caffe_nsfw_stream.conf │ │ ├── caffe_nsfw_video.conf │ │ └── gunicorn.conf.py │ │ ├── nsfw-stream-cpu.Dockerfile │ │ └── nsfw-video-cpu.Dockerfile ├── keras │ ├── inference │ │ └── mnist │ │ │ ├── 2.jpg │ │ │ ├── README.md │ │ │ ├── checkpoint_dir │ │ │ ├── mnist_model.h5 │ │ │ └── mnist_model.json │ │ │ ├── keras_mnist.conf │ │ │ ├── mnist-cpu.Dockerfile │ │ │ └── mnist_inference.py │ └── train │ │ └── mnist │ │ ├── code │ │ ├── mnist_cnn.py │ │ └── mnist_datasets.py │ │ └── data │ │ └── mnist.npz ├── mxnet │ ├── inference │ │ └── mnist │ │ │ ├── 2.jpg │ │ │ ├── README.md │ │ │ ├── checkpoint_dir │ │ │ ├── mnist-model-0010.params │ │ │ └── mnist-model-symbol.json │ │ │ ├── mnist-cpu.Dockerfile │ │ │ ├── mnist_inference.py │ │ │ └── mxnet_mnist.conf │ ├── insightface │ │ ├── inference │ │ │ ├── README.md │ │ │ ├── code │ │ │ │ └── insightface_infer.py │ │ │ ├── insightface_infer.conf │ │ │ ├── insightface_infer_cpu.Dockerfile │ │ │ └── insightface_infer_gpu.Dockerfile │ │ └── train │ │ │ ├── README.md │ │ │ ├── code │ │ │ ├── train_softmax_dist.py │ │ │ └── train_softmax_single.py │ │ │ ├── data │ │ │ ├── README.md │ │ │ └── dir2lmk.py │ │ │ └── insightface.Dockerfile │ └── train │ │ ├── cifar │ │ ├── README.md │ │ └── code │ │ │ ├── common │ │ │ ├── __init__.py │ │ │ ├── data.py │ │ │ ├── find_mxnet.py │ │ │ ├── fit.py │ │ │ ├── modelzoo.py │ │ │ └── util.py │ │ │ ├── symbols │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── alexnet.py │ │ │ ├── googlenet.py │ │ │ ├── inception-bn.py │ │ │ ├── inception-resnet-v2.py │ │ │ ├── inception-v3.py │ │ │ ├── inception-v4.py │ │ │ ├── lenet.py │ │ │ ├── mlp.py │ │ │ ├── mobilenet.py │ │ │ ├── resnet-v1.py │ │ │ ├── resnet.py │ │ │ ├── resnext.py │ │ │ └── vgg.py │ │ │ └── train_cifar10.py │ │ ├── imagenet │ │ ├── README.md │ │ └── code │ │ │ ├── common │ │ │ ├── __init__.py │ │ │ ├── data.py │ │ │ ├── find_mxnet.py │ │ │ ├── fit.py │ │ │ ├── modelzoo.py │ │ │ └── util.py │ │ │ ├── symbols │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── alexnet.py │ │ │ ├── googlenet.py │ │ │ ├── inception-bn.py │ │ │ ├── inception-resnet-v2.py │ │ │ ├── inception-v3.py │ │ │ ├── inception-v4.py │ │ │ ├── lenet.py │ │ │ ├── mlp.py │ │ │ ├── mobilenet.py │ │ │ ├── resnet-v1.py │ │ │ ├── resnet.py │ │ │ ├── resnext.py │ │ │ └── vgg.py │ │ │ └── train_imagenet.py │ │ └── mnist │ │ ├── code │ │ ├── common │ │ │ ├── __init__.py │ │ │ ├── find_mxnet.py │ │ │ └── fit.py │ │ ├── symbols │ │ │ ├── __init__.py │ │ │ ├── lenet.py │ │ │ └── mlp.py │ │ └── train_mnist.py │ │ └── data │ │ ├── t10k-images-idx3-ubyte.gz │ │ ├── t10k-labels-idx1-ubyte.gz │ │ ├── train-images-idx3-ubyte.gz │ │ └── train-labels-idx1-ubyte.gz ├── pytorch │ └── train │ │ ├── imagenet │ │ └── code │ │ │ ├── README.md │ │ │ └── main_ddp.py │ │ └── mnist │ │ ├── code │ │ └── mnist.py │ │ └── data │ │ └── processed │ │ ├── test.pt │ │ └── training.pt ├── tensorflow-2.0 │ ├── imagenet │ │ └── train │ │ │ ├── README.md │ │ │ ├── code │ │ │ ├── imagenet.py │ │ │ ├── imagenet_main.py │ │ │ ├── imagenet_utils.py │ │ │ ├── resnet_model.py │ │ │ └── vgg_preprocessing.py │ │ │ └── imagenet_tf2.0.Dockerfile │ └── mnist │ │ ├── inference │ │ ├── mnist_inference.py │ │ └── tf_mnist.conf │ │ └── train │ │ ├── README.md │ │ ├── mnist.Dockerfile │ │ ├── mnist.py │ │ ├── mnist_data.py │ │ └── mnist_dist.py ├── tensorflow │ ├── inference │ │ ├── cifar_simple │ │ │ ├── README.md │ │ │ ├── cifar.conf │ │ │ ├── cifar_infer.Dockerfile │ │ │ ├── code │ │ │ │ ├── __init__.py │ │ │ │ ├── cifar10.py │ │ │ │ └── cifar10_input.py │ │ │ ├── inference │ │ │ │ ├── checkpoint_dir │ │ │ │ │ └── intro.txt │ │ │ │ └── cifar_infer.py │ │ │ └── test_images │ │ │ │ ├── bird.png │ │ │ │ └── deer.png │ │ ├── crnn │ │ │ ├── CRNN_Tensorflow │ │ │ │ ├── crnn_model │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── cnn_basenet.py │ │ │ │ │ └── crnn_model.py │ │ │ │ ├── data │ │ │ │ │ ├── char_dict │ │ │ │ │ │ ├── char_dict.json │ │ │ │ │ │ └── ord_map.json │ │ │ │ │ └── test_images │ │ │ │ │ │ ├── test_01.jpg │ │ │ │ │ │ ├── test_02.jpg │ │ │ │ │ │ └── test_03.jpg │ │ │ │ ├── data_provider │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── base_data_provider.py │ │ │ │ │ └── data_provider.py │ │ │ │ ├── global_configuration │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── config.py │ │ │ │ └── local_utils │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── data_utils.py │ │ │ │ │ ├── establish_char_dict.py │ │ │ │ │ └── log_utils.py │ │ │ ├── README.md │ │ │ ├── inference │ │ │ │ ├── checkpoint_dir │ │ │ │ │ └── intro.txt │ │ │ │ └── ocr_inference.py │ │ │ ├── ocr.Dockerfile │ │ │ └── ocr.conf │ │ ├── crnn_chinese │ │ │ ├── README.md │ │ │ ├── code │ │ │ │ ├── crnn_model │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── cnn_basenet.py │ │ │ │ │ └── crnn_model.py │ │ │ │ ├── data_provider │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── base_data_provider.py │ │ │ │ │ └── data_provider.py │ │ │ │ ├── global_configuration │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── config.py │ │ │ │ ├── local_utils │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── data_utils.py │ │ │ │ │ ├── establish_char_dict.py │ │ │ │ │ ├── log_utils.py │ │ │ │ │ └── tensorboard_vis_summary.py │ │ │ │ └── tools │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── inference.py │ │ │ │ │ └── train_shadownet.py │ │ │ ├── crnn_multi.conf │ │ │ ├── crnn_multi_infer.Dockerfile │ │ │ ├── data │ │ │ │ ├── char_dict.json │ │ │ │ ├── index_2_ord_map.json │ │ │ │ └── ord_2_index_map.json │ │ │ ├── inference │ │ │ │ └── ocr_inference.py │ │ │ ├── inference_multi │ │ │ │ ├── crnn_multi_infer.py │ │ │ │ └── crnn_multi_inference.py │ │ │ ├── ocr.Dockerfile │ │ │ ├── ocr.conf │ │ │ └── test_images │ │ │ │ └── test_02.jpg │ │ ├── east │ │ │ ├── README.md │ │ │ ├── code │ │ │ │ ├── data_util.py │ │ │ │ ├── east_inference.py │ │ │ │ ├── east_multi_infer.py │ │ │ │ ├── east_multi_inference.py │ │ │ │ ├── icdar.py │ │ │ │ ├── lanms │ │ │ │ │ ├── .gitignore │ │ │ │ │ ├── .ycm_extra_conf.py │ │ │ │ │ ├── Makefile │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── __main__.py │ │ │ │ │ ├── adaptor.cpp │ │ │ │ │ ├── include │ │ │ │ │ │ ├── clipper │ │ │ │ │ │ │ ├── clipper.cpp │ │ │ │ │ │ │ └── clipper.hpp │ │ │ │ │ │ └── pybind11 │ │ │ │ │ │ │ ├── attr.h │ │ │ │ │ │ │ ├── buffer_info.h │ │ │ │ │ │ │ ├── cast.h │ │ │ │ │ │ │ ├── chrono.h │ │ │ │ │ │ │ ├── class_support.h │ │ │ │ │ │ │ ├── common.h │ │ │ │ │ │ │ ├── complex.h │ │ │ │ │ │ │ ├── descr.h │ │ │ │ │ │ │ ├── eigen.h │ │ │ │ │ │ │ ├── embed.h │ │ │ │ │ │ │ ├── eval.h │ │ │ │ │ │ │ ├── functional.h │ │ │ │ │ │ │ ├── numpy.h │ │ │ │ │ │ │ ├── operators.h │ │ │ │ │ │ │ ├── options.h │ │ │ │ │ │ │ ├── pybind11.h │ │ │ │ │ │ │ ├── pytypes.h │ │ │ │ │ │ │ ├── stl.h │ │ │ │ │ │ │ ├── stl_bind.h │ │ │ │ │ │ │ └── typeid.h │ │ │ │ │ └── lanms.h │ │ │ │ ├── model.py │ │ │ │ └── nets │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── resnet_utils.py │ │ │ │ │ └── resnet_v1.py │ │ │ ├── east-cpu.Dockerfile │ │ │ ├── east-dist-gpu.Dockerfile │ │ │ ├── east-dist.conf │ │ │ ├── east-gpu.Dockerfile │ │ │ └── east.conf │ │ ├── facenet │ │ │ ├── README.md │ │ │ ├── code │ │ │ │ ├── align │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── det1.npy │ │ │ │ │ ├── det2.npy │ │ │ │ │ ├── det3.npy │ │ │ │ │ └── detect_face.py │ │ │ │ ├── facenet.py │ │ │ │ ├── facenet_inference.py │ │ │ │ ├── gen_example_json.py │ │ │ │ └── gen_img_json.py │ │ │ ├── facenet-compare-cpu.Dockerfile │ │ │ ├── facenet-compare.conf │ │ │ ├── facenet-embed-cpu.Dockerfile │ │ │ ├── facenet-embed.conf │ │ │ ├── facenet-mtcnn-cpu.Dockerfile │ │ │ └── facenet-mtcnn.conf │ │ ├── im2txt │ │ │ ├── README.md │ │ │ ├── code │ │ │ │ ├── checkpoint_dir │ │ │ │ │ └── word_counts.txt │ │ │ │ ├── configuration.py │ │ │ │ ├── im2txt_conf.py │ │ │ │ ├── im2txt_inference.py │ │ │ │ ├── inference_utils │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── caption_generator.py │ │ │ │ │ ├── caption_generator_test.py │ │ │ │ │ ├── inference_wrapper_base.py │ │ │ │ │ └── vocabulary.py │ │ │ │ ├── inference_wrapper.py │ │ │ │ ├── ops │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── image_embedding.py │ │ │ │ │ ├── image_embedding_test.py │ │ │ │ │ ├── image_processing.py │ │ │ │ │ └── inputs.py │ │ │ │ └── show_and_tell_model.py │ │ │ ├── im2txt-infer-cpu.Dockerfile │ │ │ └── im2txt.conf │ │ ├── mnist_0.11 │ │ │ ├── 2.jpg │ │ │ ├── 2.json │ │ │ ├── checkpoint_dir │ │ │ │ ├── checkpoint │ │ │ │ ├── mnist.mod │ │ │ │ └── mnist.mod.meta │ │ │ ├── mnist_inference.py │ │ │ ├── mnist_inference_json.py │ │ │ ├── tf_mnist.conf │ │ │ └── tf_mnist_json.conf │ │ ├── mnist_1.1 │ │ │ ├── 2.jpg │ │ │ ├── README.md │ │ │ ├── checkpoint_dir │ │ │ │ ├── checkpoint │ │ │ │ ├── mnist.mod.data-00000-of-00001 │ │ │ │ ├── mnist.mod.index │ │ │ │ └── mnist.mod.meta │ │ │ ├── mnist-cpu.Dockerfile │ │ │ ├── mnist-gpu.Dockerfile │ │ │ ├── mnist_inference.py │ │ │ └── tf_mnist.conf │ │ ├── object-detect │ │ │ ├── LICENSE │ │ │ ├── README.md │ │ │ ├── code │ │ │ │ ├── box_list.py │ │ │ │ ├── box_list_ops.py │ │ │ │ ├── label_map_util.py │ │ │ │ ├── object_detect_inference.py │ │ │ │ ├── ops.py │ │ │ │ ├── shape_utils.py │ │ │ │ ├── standard_fields.py │ │ │ │ ├── static_shape.py │ │ │ │ └── string_int_label_map_pb2.py │ │ │ ├── object-detect-cpu.Dockerfile │ │ │ └── object-detect.conf │ │ ├── retrain │ │ │ ├── README.md │ │ │ ├── code │ │ │ │ ├── retrain_conf.py │ │ │ │ └── retrain_inference.py │ │ │ ├── retrained-classification-cpu.Dockerfile │ │ │ └── retrained-classification.conf │ │ ├── text-classification-ch │ │ │ ├── README.md │ │ │ ├── code │ │ │ │ ├── cnews_loader.py │ │ │ │ ├── cnn_model.py │ │ │ │ ├── rnn_model.py │ │ │ │ └── txt_cnn_rnn_inference.py │ │ │ ├── test.txt │ │ │ ├── test2.txt │ │ │ ├── text-cnn-cpu.Dockerfile │ │ │ ├── text-rnn-cpu.Dockerfile │ │ │ ├── txt_class_cnn.conf │ │ │ └── txt_class_rnn.conf │ │ └── tf-serving │ │ │ ├── inception │ │ │ ├── README.md │ │ │ ├── conf.json │ │ │ ├── inception_saved_model.py │ │ │ ├── inference.py │ │ │ └── uaiservice.Dockerfile │ │ │ ├── mnist │ │ │ ├── README.md │ │ │ ├── checkpoint_dir │ │ │ │ ├── saved_model.pb │ │ │ │ └── variables │ │ │ │ │ ├── variables.data-00000-of-00001 │ │ │ │ │ └── variables.index │ │ │ ├── conf.json │ │ │ ├── inference.py │ │ │ ├── mnist_saved_model.py │ │ │ └── uaiservice.Dockerfile │ │ │ └── wide_deep │ │ │ ├── README.md │ │ │ ├── conf.json │ │ │ ├── inference.py │ │ │ ├── uaiservice.Dockerfile │ │ │ └── wide_deep.py │ └── train │ │ ├── __init__.py │ │ ├── bert │ │ ├── README.md │ │ ├── bert │ │ │ ├── __init__.py │ │ │ ├── create_pretraining_data.py │ │ │ ├── extract_features.py │ │ │ ├── modeling.py │ │ │ ├── modeling_test.py │ │ │ ├── optimization.py │ │ │ ├── optimization_test.py │ │ │ ├── run_classifier.py │ │ │ ├── run_classifier_with_tfhub.py │ │ │ ├── run_pretraining.py │ │ │ ├── run_squad.py │ │ │ ├── tokenization.py │ │ │ └── tokenization_test.py │ │ ├── bert_train.Dockerfile │ │ └── requirements.txt │ │ ├── cifar │ │ ├── README.md │ │ └── code │ │ │ ├── __init__.py │ │ │ ├── cifar10.py │ │ │ ├── cifar10_main.py │ │ │ ├── cifar10_model.py │ │ │ ├── cifar10_utils.py │ │ │ ├── model_base.py │ │ │ └── resnet_model.py │ │ ├── cifar_simple │ │ ├── README.md │ │ ├── cifar-cpu.Dockerfile │ │ ├── cifar-gpu.Dockerfile │ │ └── code │ │ │ ├── __init__.py │ │ │ ├── cifar10.py │ │ │ ├── cifar10_input.py │ │ │ └── cifar10_train.py │ │ ├── crnn │ │ ├── .gitignore │ │ ├── README.md │ │ ├── crnn-generate-tfrecords.Dockerfile │ │ ├── crnnmodel │ │ │ ├── __init__.py │ │ │ ├── cnn_basenet.py │ │ │ └── crnn_model.py │ │ ├── data │ │ │ ├── char_dict.json │ │ │ └── ord_map.json │ │ ├── data_provider │ │ │ ├── __init__.py │ │ │ ├── base_data_provider.py │ │ │ └── data_provider.py │ │ ├── global_configuration │ │ │ ├── __init__.py │ │ │ └── config.py │ │ ├── local_utils │ │ │ ├── __init__.py │ │ │ ├── data_utils.py │ │ │ ├── establish_char_dict.py │ │ │ └── log_utils.py │ │ ├── ocr-cpu.Dockerfile │ │ ├── ocr-gpu.Dockerfile │ │ ├── requirements.txt │ │ └── tools │ │ │ ├── train_shadownet.py │ │ │ └── write_text_features.py │ │ ├── crnn_chinese │ │ ├── README.md │ │ ├── code │ │ │ ├── crnn_model │ │ │ │ ├── __init__.py │ │ │ │ ├── cnn_basenet.py │ │ │ │ └── crnn_model.py │ │ │ ├── data_provider │ │ │ │ ├── __init__.py │ │ │ │ ├── base_data_provider.py │ │ │ │ └── data_provider.py │ │ │ ├── gen_data │ │ │ │ ├── MSYHL.TTC │ │ │ │ ├── THUOCL_poem.txt │ │ │ │ ├── gen_chinesetxt.py │ │ │ │ ├── gen_pic.py │ │ │ │ └── gen_sample.py │ │ │ ├── global_configuration │ │ │ │ ├── __init__.py │ │ │ │ └── config.py │ │ │ ├── local_utils │ │ │ │ ├── __init__.py │ │ │ │ ├── data_utils.py │ │ │ │ ├── establish_char_dict.py │ │ │ │ ├── log_utils.py │ │ │ │ └── tensorboard_vis_summary.py │ │ │ └── tools │ │ │ │ ├── __init__.py │ │ │ │ ├── establish_char_dict.py │ │ │ │ ├── train_shadownet.py │ │ │ │ └── write_text_tfrecords.py │ │ ├── code_multi │ │ │ └── tools │ │ │ │ ├── shadownet.py │ │ │ │ └── train_shadownet_multi.py │ │ ├── crnn-cpu.Dockerfile │ │ ├── crnn-gpu.Dockerfile │ │ └── crnn-multi-gpu.Dockerfile │ │ ├── deep-speech │ │ ├── README.md │ │ ├── code │ │ │ └── DeepSpeech.py │ │ └── deepspeech.Dockerfile │ │ ├── east │ │ ├── README.md │ │ ├── code │ │ │ ├── distgpu_train.py │ │ │ ├── icdar.py │ │ │ ├── icdar_dataset.py │ │ │ ├── icdar_tfrecord.py │ │ │ ├── icdar_tfrecord_dataset.py │ │ │ └── multigpu_train.py │ │ ├── east-dist.Dockerfile │ │ └── east.Dockerfile │ │ ├── facenet │ │ ├── README.md │ │ ├── code │ │ │ ├── __init__.py │ │ │ ├── align │ │ │ │ ├── __init__.py │ │ │ │ ├── align_dataset_mtcnn.py │ │ │ │ ├── det1.npy │ │ │ │ ├── det2.npy │ │ │ │ ├── det3.npy │ │ │ │ └── detect_face.py │ │ │ ├── calculate_filtering_metrics.py │ │ │ ├── classifier.py │ │ │ ├── compare.py │ │ │ ├── decode_msceleb_dataset.py │ │ │ ├── download_and_extract.py │ │ │ ├── facenet.py │ │ │ ├── freeze_graph.py │ │ │ ├── lfw.py │ │ │ ├── models │ │ │ │ ├── __init__.py │ │ │ │ ├── dummy.py │ │ │ │ ├── inception_resnet_v1.py │ │ │ │ ├── inception_resnet_v2.py │ │ │ │ └── squeezenet.py │ │ │ ├── train_softmax.py │ │ │ ├── train_tripletloss.py │ │ │ └── validate_on_lfw.py │ │ ├── facenet.Dockerfile │ │ └── lr-data │ │ │ ├── learning_rate_retrain_tripletloss.txt │ │ │ ├── learning_rate_schedule_classifier_casia.txt │ │ │ ├── learning_rate_schedule_classifier_msceleb.txt │ │ │ ├── learning_rate_schedule_classifier_vggface2.txt │ │ │ └── pairs.txt │ │ ├── im2txt │ │ ├── README.md │ │ ├── build_mscoco_data.py │ │ ├── im2txt │ │ │ ├── __init__.py │ │ │ ├── configuration.py │ │ │ ├── ops │ │ │ │ ├── __init__.py │ │ │ │ ├── image_embedding.py │ │ │ │ ├── image_embedding_test.py │ │ │ │ ├── image_processing.py │ │ │ │ └── inputs.py │ │ │ └── show_and_tell_model.py │ │ ├── train.py │ │ ├── uaitrain.Dockerfile │ │ └── word_counts.txt │ │ ├── imagenet │ │ ├── README.md │ │ └── code │ │ │ ├── __init__.py │ │ │ ├── imagenet.py │ │ │ ├── imagenet_main.py │ │ │ ├── imagenet_utils.py │ │ │ ├── resnet_model.py │ │ │ └── vgg_preprocessing.py │ │ ├── mnist_summary_1.1 │ │ ├── code │ │ │ └── mnist_summary.py │ │ └── data │ │ │ ├── t10k-images-idx3-ubyte.gz │ │ │ ├── t10k-labels-idx1-ubyte.gz │ │ │ ├── train-images-idx3-ubyte.gz │ │ │ └── train-labels-idx1-ubyte.gz │ │ ├── object-detection │ │ ├── README.md │ │ ├── faster_rcnn_resnet101.config │ │ └── uaitrain.Dockerfile │ │ ├── retrain │ │ ├── README.md │ │ ├── code │ │ │ ├── BUILD │ │ │ ├── __init__.py │ │ │ └── retrain_v2.py │ │ └── uaitrain_v2.Dockerfile │ │ ├── slim │ │ ├── inference │ │ │ ├── README.md │ │ │ ├── slim.conf │ │ │ ├── slim_infer.Dockerfile │ │ │ └── sliminfer.py │ │ └── train │ │ │ ├── README.md │ │ │ ├── code │ │ │ ├── datasets │ │ │ │ ├── download_and_convert_fer.py │ │ │ │ └── fer.py │ │ │ ├── deployment │ │ │ │ └── model_deploy.py │ │ │ ├── download_and_convert_data.py │ │ │ └── train_image_classifier.py │ │ │ └── slim.Dockerfile │ │ ├── text-classification-ch │ │ ├── README.md │ │ ├── code │ │ │ ├── cnews_loader.py │ │ │ ├── cnn_model.py │ │ │ ├── helper │ │ │ │ ├── __init__.py │ │ │ │ ├── cnews_group.py │ │ │ │ └── copy_data.sh │ │ │ ├── predict.py │ │ │ ├── rnn_model.py │ │ │ ├── run_cnn.py │ │ │ └── run_rnn.py │ │ └── text-cnn-rnn.Dockerfile │ │ └── wide-deep │ │ ├── README.md │ │ └── code │ │ ├── wide_deep.py │ │ └── wide_deep_dist.py └── xgboost │ └── inference │ └── binary_classification │ ├── 0002.model │ ├── README.md │ ├── binary.Dockerfile │ ├── binary.py │ └── xgboost_binary.conf ├── setup.py ├── uai ├── __init__.py ├── api │ ├── __init__.py │ ├── auth_uai_service_client.py │ ├── base_api.py │ ├── check_uai_base_img_exist.py │ ├── check_uai_deploy_progress.py │ ├── create_uai_exclusive_service.py │ ├── create_uai_service.py │ ├── delete_uai_service.py │ ├── delete_uai_srv_client.py │ ├── deploy_uai_service.py │ ├── deploy_uai_service_by_docker.py │ ├── describe_uai_metric.py │ ├── get_uai_available_base_img.py │ ├── get_uai_available_env_pkg.py │ ├── get_uai_available_srv_type.py │ ├── get_uai_metric.py │ ├── get_uai_service_list.py │ ├── get_uai_srv_available_resource.py │ ├── get_uai_srv_bill_info.py │ ├── get_uai_srv_bill_unitprice.py │ ├── get_uai_srv_client_list.py │ ├── get_uai_srv_real_time_metric.py │ ├── get_uai_srv_scale_rule.py │ ├── get_uai_srv_version_list.py │ ├── modify_uai_srv_name.py │ ├── modify_uai_srv_version_memo.py │ ├── modify_uai_srv_version_node_range.py │ ├── modify_uai_srv_version_weight.py │ ├── start_uai_service.py │ ├── stop_uai_service.py │ └── update_uai_srv_scale_rule.py ├── arch │ ├── __init__.py │ ├── base_model.py │ ├── caffe_model.py │ ├── keras_model.py │ ├── mxnet_model.py │ ├── tf_model.py │ ├── tf_serving.py │ └── xgboost_model.py ├── arch_conf │ ├── __init__.py │ ├── base_conf.py │ ├── caffe_conf.py │ ├── keras_conf.py │ ├── mxnet_conf.py │ ├── tf_conf.py │ └── xgboost_conf.py ├── contrib │ ├── README.md │ ├── __init__.py │ ├── image │ │ └── img_utils.py │ └── media │ │ ├── README.md │ │ ├── __init__.py │ │ ├── ffmpeg.py │ │ └── opencv_rtsp.py ├── operation │ ├── __init__.py │ ├── base_operation.py │ ├── create_uaiservice │ │ ├── __init__.py │ │ └── create_uaiservice.py │ ├── delete_uaiservice │ │ ├── __init__.py │ │ └── delete_uaiservice.py │ ├── deploy_uaiservice │ │ ├── __init__.py │ │ └── deploy_uaiservice.py │ ├── deploy_uaiservice_docker │ │ ├── __init__.py │ │ └── deploy_uaiservice_docker.py │ ├── get_real_time_metric │ │ ├── __init__.py │ │ └── get_real_time_metric.py │ ├── list_uaiservice │ │ ├── __init__.py │ │ └── list_uaiservice.py │ ├── list_uaiversion │ │ ├── __init__.py │ │ └── list_uaiversion.py │ ├── modify_node_count │ │ ├── __init__.py │ │ └── modify_node_count.py │ ├── modify_node_range │ │ ├── __init__.py │ │ └── modify_node_range.py │ ├── modify_service_name │ │ ├── __init__.py │ │ └── modify_service_name.py │ ├── modify_version_memo │ │ ├── __init__.py │ │ └── modify_version_memo.py │ ├── modify_version_weight │ │ ├── __init__.py │ │ └── modify_version_weight.py │ ├── pack │ │ ├── __init__.py │ │ ├── base_pack_op.py │ │ ├── caffe_pack_op.py │ │ ├── keras_pack_op.py │ │ ├── mxnet_pack_op.py │ │ └── tf_pack_op.py │ ├── packdocker │ │ ├── __init__.py │ │ ├── base_packdocker_op.py │ │ ├── caffe_packdocker_op.py │ │ ├── keras_packdocker_op.py │ │ ├── mxnet_packdocker_op.py │ │ ├── self_define_packdokcer_op.py │ │ └── tf_packdocker_op.py │ ├── start_uaiservice │ │ ├── __init__.py │ │ └── start_uaiservice.py │ ├── stop_uaiservice │ │ ├── __init__.py │ │ └── stop_uaiservice.py │ └── tar │ │ ├── __init__.py │ │ ├── base_tar_op.py │ │ ├── caffe_tar_op.py │ │ ├── keras_tar_op.py │ │ ├── mxnet_tar_op.py │ │ └── tf_tar_op.py └── utils │ ├── __init__.py │ ├── common_handler.py │ ├── databackend_utils.py │ ├── logger.py │ ├── retcode_checker.py │ ├── super_large_file.py │ ├── tar_util.py │ ├── utils.py │ └── utils_ufs.py ├── uai_tools ├── __init__.py └── uai_tool.py ├── uaitrain ├── __init__.py ├── api │ ├── __init__.py │ ├── base_op.py │ ├── check_and_get_base_image_op.py │ ├── create_train_job.py │ ├── get_env_pkg.py │ ├── get_train_available_backend.py │ ├── get_train_available_dist_aiframe.py │ ├── get_train_available_resource.py │ ├── get_train_available_train_mode.py │ ├── get_train_dist_work_amount_range.py │ ├── get_train_job_bill_info.py │ ├── get_train_job_list.py │ ├── get_train_job_predict_start_time.py │ ├── get_train_job_running_info.py │ ├── get_train_job_running_log.py │ ├── get_train_log_topic_list.py │ ├── get_train_max_exec_time_range.py │ ├── get_train_tensorboard_url.py │ ├── modify_train_job_memo.py │ ├── modify_train_job_name.py │ ├── remove_train_job.py │ └── stop_train_job.py ├── arch │ ├── __init__.py │ ├── caffe │ │ ├── __init__.py │ │ ├── train.py │ │ └── train_large_file.py │ ├── keras │ │ └── __init__.py │ ├── mxnet │ │ ├── README.md │ │ ├── __init__.py │ │ └── uargs.py │ ├── pytorch │ │ ├── __init__.py │ │ └── uargs.py │ └── tensorflow │ │ ├── README.md │ │ ├── __init__.py │ │ ├── uai_dist.py │ │ └── uflag.py ├── arch_conf │ ├── __init__.py │ ├── base_conf.py │ └── tf_conf.py ├── cmd │ ├── __init__.py │ └── base_cmd.py └── operation │ ├── __init__.py │ ├── base_op.py │ ├── create_train_job │ ├── __init__.py │ └── base_create_op.py │ ├── delete_train_job │ ├── __init__.py │ └── base_delete_op.py │ ├── get_log_topic │ ├── __init__.py │ └── get_log_topic.py │ ├── get_realtime_log │ ├── __init__.py │ └── base_log_op.py │ ├── get_tensorboard_url │ ├── __init__.py │ └── get_tensorboard_url.py │ ├── get_train_job_conf │ ├── __init__.py │ └── base_conf_op.py │ ├── info_train_job │ ├── __init__.py │ └── info_train_op.py │ ├── list_bill_info │ ├── __init__.py │ └── base_bill_op.py │ ├── list_train_job │ ├── __init__.py │ └── base_list_job_op.py │ ├── pack_docker_image │ ├── __init__.py │ ├── base_pack_op.py │ ├── caffe_pack_op.py │ ├── keras_pack_op.py │ ├── mxnet_pack_op.py │ ├── pytorch_pack_op.py │ ├── self_def_pack_op.py │ └── tf_pack_op.py │ ├── predict_train_job │ ├── __init__.py │ └── base_predict_op.py │ ├── rename_train_job │ ├── __init__.py │ └── base_rename_op.py │ └── stop_train_job │ ├── __init__.py │ └── base_stop_op.py ├── uaitrain_tool ├── README.md ├── __init__.py ├── base_tool.py ├── caffe │ └── caffe_tool.py ├── keras │ └── keras_tool.py ├── mxnet │ ├── mpi_tool │ │ ├── README.md │ │ └── mpi_wrapper.py │ └── mxnet_tool.py ├── pytorch │ └── pytorch_tool.py ├── split_tool.py └── tf │ └── tf_tool.py └── ufile ├── __init__.py ├── api ├── __init__.py └── ufile_api.py ├── operation ├── __init__.py ├── download_batch_op.py ├── download_single_op.py ├── operation.py ├── upload_batch_op.py └── upload_single_op.py └── ufile_tool.py /.gitattributes: -------------------------------------------------------------------------------- 1 | *mdb filter=lfs diff=lfs merge=lfs -text 2 | -------------------------------------------------------------------------------- /examples/caffe/inference/intel-caffe/ctpn/README.md: -------------------------------------------------------------------------------- 1 | For how to use these example code with UCloud AI Service Images, Please see: 2 | 3 | https://docs.ucloud.cn/ai/uai-service/solution/intel-caffe 4 | -------------------------------------------------------------------------------- /examples/caffe/inference/intel-caffe/ctpn/ctpn.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mkl_uaiservice_ubuntu-14.04_python-2.7.6_intel_caffe-ctpn:v1.0 2 | 3 | COPY ctpn_trained_model.caffemodel /ai-ucloud-client-django/models/ctpn_trained_model.caffemodel 4 | COPY ./demo_service.py /ai-ucloud-client-django/tools/demo_service.py 5 | COPY ./ctpn.conf /ai-ucloud-client-django/ufile.json 6 | 7 | EXPOSE 8080 8 | 9 | ENV UAI_SERVICE_CONFIG "ufile.json" 10 | 11 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi 12 | -------------------------------------------------------------------------------- /examples/caffe/inference/intel-caffe/ctpn/ctpn.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "CTPNModel", 5 | "main_file": "tools.demo_service" 6 | }, 7 | "caffe" : { 8 | "model_dir" : "", 9 | "model_name" : "" 10 | } 11 | } 12 | } 13 | 14 | -------------------------------------------------------------------------------- /examples/caffe/inference/intel-caffe/ctpn/demo_service.py: -------------------------------------------------------------------------------- 1 | from cfg import Config as cfg 2 | from other import draw_boxes, resize_im, CaffeModel 3 | import cv2, os, caffe, sys 4 | from detectors import TextProposalDetector, TextDetector 5 | import numpy as np 6 | 7 | from uai.arch.caffe_model import CaffeAiUcloudModel 8 | 9 | NET_DEF_FILE="models/deploy.prototxt" 10 | MODEL_FILE="models/ctpn_trained_model.caffemodel" 11 | 12 | class CTPNModel(CaffeAiUcloudModel): 13 | """ Mnist example model 14 | """ 15 | def __init__(self, conf): 16 | super(CTPNModel, self).__init__(conf) 17 | 18 | def load_model(self): 19 | caffe.set_mode_cpu() 20 | text_proposals_detector=TextProposalDetector(CaffeModel(NET_DEF_FILE, MODEL_FILE)) 21 | self.text_detector=TextDetector(text_proposals_detector) 22 | 23 | def execute(self, data, batch_size): 24 | ret = [] 25 | for i in range(batch_size): 26 | img_array = np.asarray(bytearray(data[i].read()), dtype=np.uint8) 27 | im = cv2.imdecode(img_array, -1) 28 | 29 | im, f=resize_im(im, cfg.SCALE, cfg.MAX_SCALE) 30 | text_lines=self.text_detector.detect(im) 31 | 32 | ret_val=str(text_lines) + '\n' 33 | ret.append(ret_val) 34 | return ret 35 | -------------------------------------------------------------------------------- /examples/caffe/inference/intel-caffe/rfcn/README.md: -------------------------------------------------------------------------------- 1 | For how to use these example code with UCloud AI Service Images, Please see: 2 | 3 | https://docs.ucloud.cn/ai/uai-service/solution/intel-caffe 4 | -------------------------------------------------------------------------------- /examples/caffe/inference/intel-caffe/rfcn/demo_service.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # -------------------------------------------------------- 4 | # R-FCN 5 | # Copyright (c) 2016 Yuwen Xiong 6 | # Licensed under The MIT License [see LICENSE for details] 7 | # Written by Yuwen Xiong 8 | # -------------------------------------------------------- 9 | 10 | """ 11 | Demo script showing detections in sample images. 12 | 13 | See README.md for installation instructions before running. 14 | """ 15 | 16 | import _init_paths 17 | from fast_rcnn.config import cfg 18 | from fast_rcnn.test import im_detect 19 | from fast_rcnn.nms_wrapper import nms 20 | import numpy as np 21 | import caffe, os, sys, cv2 22 | 23 | from uai.arch.caffe_model import CaffeAiUcloudModel 24 | 25 | class RFCNModel(CaffeAiUcloudModel): 26 | """ Mnist example model 27 | """ 28 | def __init__(self, conf): 29 | super(RFCNModel, self).__init__(conf) 30 | 31 | def load_model(self): 32 | caffe.set_mode_cpu() 33 | prototxt = 'models/test_agnostic.prototxt' 34 | caffemodel = "models/resnet101_rfcn_final.caffemodel" 35 | cfg.TEST.HAS_RPN = True 36 | self.net = caffe.Net(prototxt, caffemodel, caffe.TEST) 37 | 38 | def execute(self, data, batch_size): 39 | ret = [] 40 | for i in range(batch_size): 41 | img_array = np.asarray(bytearray(data[i].read()), dtype=np.uint8) 42 | im = cv2.imdecode(img_array, -1) 43 | 44 | scores, boxes = im_detect(self.net, im) 45 | 46 | ret_val=str(scores) + '\n' + str(boxes) + "\n" 47 | ret.append(ret_val) 48 | return ret 49 | -------------------------------------------------------------------------------- /examples/caffe/inference/intel-caffe/rfcn/rfcn.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mkl_uaiservice_ubuntu-14.04_python-2.7.6_intel_caffe-rfcn:v1.0 2 | 3 | ADD ./demo_service.py /ai-ucloud-client-django/tools/demo_service.py 4 | ADD ./docker_opt/py-R-FCN/models/pascal_voc/ResNet-101/rfcn_end2end/test_agnostic.prototxt /ai-ucloud-client-django/models/test_agnostic.prototxt 5 | ADD ./rfcn_models/ /ai-ucloud-client-django/models/ 6 | 7 | COPY ./rfcn.conf /ai-ucloud-client-django/ufile.json 8 | 9 | EXPOSE 8080 10 | 11 | ENV UAI_SERVICE_CONFIG "ufile.json" 12 | 13 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi 14 | 15 | 16 | -------------------------------------------------------------------------------- /examples/caffe/inference/intel-caffe/rfcn/rfcn.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "RFCNModel", 5 | "main_file": "tools.demo_service" 6 | }, 7 | "caffe" : { 8 | "model_dir" : "", 9 | "model_name" : "" 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /examples/caffe/inference/mnist/2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/caffe/inference/mnist/2.jpg -------------------------------------------------------------------------------- /examples/caffe/inference/mnist/caffe_mnist.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "MnistModel", 5 | "main_file": "mnist_inference" 6 | }, 7 | "caffe" : { 8 | "model_dir" : "./checkpoint_dir", 9 | "model_name" : "mnist_model" 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /examples/caffe/inference/mnist/checkpoint_dir/mnist_model.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/caffe/inference/mnist/checkpoint_dir/mnist_model.caffemodel -------------------------------------------------------------------------------- /examples/caffe/inference/mnist/mnist-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-14.04_python-2.7.6_caffe-1.0.0:v1.2 2 | 3 | EXPOSE 8080 4 | ADD ./code/ /ai-ucloud-client-django/ 5 | ADD ./caffe_mnist.conf /ai-ucloud-client-django/conf.json 6 | ENV UAI_SERVICE_CONFIG "conf.json" 7 | 8 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/caffe/inference/mnist/mnist-gpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/gpu_uaiservice_ubuntu-14.04_python-2.7.6_caffe-1.0.0:v1.0 2 | 3 | EXPOSE 8080 4 | ADD ./code/ /ai-ucloud-client-django/ 5 | ADD ./caffe_mnist.conf /ai-ucloud-client-django/conf.json 6 | ENV UAI_SERVICE_CONFIG "conf.json" 7 | 8 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/caffe/inference/mtcnn/mtcnn-caffe-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-14.04_python-2.7.6_caffe-1.0.0:v1.2 2 | 3 | COPY code/ /ai-ucloud-client-django/ 4 | COPY ./mtcnn.conf /ai-ucloud-client-django/ufile.json 5 | 6 | EXPOSE 8080 7 | 8 | ENV UAI_SERVICE_CONFIG "ufile.json" 9 | 10 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/caffe/inference/mtcnn/mtcnn-intel-caffe-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/mkl_uaiservice_ubuntu-14.04_python-2.7.6_intel_caffe-1.0.0:v1.1 2 | 3 | COPY code/ /ai-ucloud-client-django/ 4 | COPY ./mtcnn.conf /ai-ucloud-client-django/ufile.json 5 | 6 | EXPOSE 8080 7 | 8 | ENV UAI_SERVICE_CONFIG "ufile.json" 9 | 10 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/caffe/inference/mtcnn/mtcnn.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "MTCNNCpuModel", 5 | "main_file": "mtcnn_inference" 6 | }, 7 | "caffe" : { 8 | "model_dir" : "", 9 | "model_name" : "" 10 | } 11 | } 12 | } -------------------------------------------------------------------------------- /examples/caffe/inference/nsfw/caffe_nsfw.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "NsfwModel", 5 | "main_file": "nsfw_inference" 6 | }, 7 | "caffe" : { 8 | "model_dir" : "./checkpoint_dir", 9 | "model_name" : "resnet" 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /examples/caffe/inference/nsfw/nsfw-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-14.04_python-2.7.6_caffe-1.0.0:v1.2 2 | 3 | EXPOSE 8080 4 | ADD ./code/ /ai-ucloud-client-django/ 5 | ADD ./caffe_nsfw.conf /ai-ucloud-client-django/conf.json 6 | ENV UAI_SERVICE_CONFIG "/ai-ucloud-client-django/conf.json" 7 | 8 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi 9 | -------------------------------------------------------------------------------- /examples/caffe/inference/rfcn/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/caffe/inference/rfcn/__init__.py -------------------------------------------------------------------------------- /examples/caffe/inference/rfcn/rfcn-gpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/gpu_uaiservice_ubuntu-14.04_python-2.7.6_caffe-py-rfcn:v1.0 2 | 3 | EXPOSE 8080 4 | 5 | ADD ./rfcn_inference.py /ai-ucloud-client-django/tools/rfcn_inference.py 6 | ADD ./__init__.py /ai-ucloud-client-django/tools/__init__.py 7 | ADD ./rfcn_models/ /ai-ucloud-client-django/models/ 8 | COPY ./rfcn.conf /ai-ucloud-client-django/conf.json 9 | 10 | RUN cp -fr /root/caffe-py-rfcn/lib /ai-ucloud-client-django/lib/ 11 | RUN cp /root/caffe-py-rfcn/tools/_init_paths.py /ai-ucloud-client-django/tools/ 12 | 13 | ENV UAI_SERVICE_CONFIG "conf.json" 14 | 15 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi 16 | 17 | 18 | -------------------------------------------------------------------------------- /examples/caffe/inference/rfcn/rfcn-nms-gpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/gpu_uaiservice_ubuntu-14.04_python-2.7.6_caffe-py-rfcn:v1.0 2 | 3 | EXPOSE 8080 4 | 5 | ADD ./rfcn_inference.py /ai-ucloud-client-django/tools/rfcn_inference.py 6 | ADD ./__init__.py /ai-ucloud-client-django/tools/__init__.py 7 | ADD ./rfcn_models/ /ai-ucloud-client-django/models/ 8 | COPY ./rfcn-nms.conf /ai-ucloud-client-django/conf.json 9 | 10 | RUN cp -fr /root/caffe-py-rfcn/lib /ai-ucloud-client-django/lib/ 11 | RUN cp /root/caffe-py-rfcn/tools/_init_paths.py /ai-ucloud-client-django/tools/ 12 | 13 | ENV UAI_SERVICE_CONFIG "conf.json" 14 | 15 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi 16 | 17 | 18 | -------------------------------------------------------------------------------- /examples/caffe/inference/rfcn/rfcn-nms.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "RFCNNmsModel", 5 | "main_file": "tools.rfcn_inference" 6 | }, 7 | "caffe" : { 8 | "model_dir" : "", 9 | "model_name" : "" 10 | } 11 | } 12 | } -------------------------------------------------------------------------------- /examples/caffe/inference/rfcn/rfcn.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "RFCNModel", 5 | "main_file": "tools.rfcn_inference" 6 | }, 7 | "caffe" : { 8 | "model_dir" : "", 9 | "model_name" : "" 10 | } 11 | } 12 | } -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | python setup.py build_ext --inplace 3 | rm -rf build 4 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/datasets/VOCdevkit-matlab-wrapper/get_voc_opts.m: -------------------------------------------------------------------------------- 1 | function VOCopts = get_voc_opts(path) 2 | 3 | tmp = pwd; 4 | cd(path); 5 | try 6 | addpath('VOCcode'); 7 | VOCinit; 8 | catch 9 | rmpath('VOCcode'); 10 | cd(tmp); 11 | error(sprintf('VOCcode directory not found under %s', path)); 12 | end 13 | rmpath('VOCcode'); 14 | cd(tmp); 15 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/datasets/VOCdevkit-matlab-wrapper/voc_eval.m: -------------------------------------------------------------------------------- 1 | function res = voc_eval(path, comp_id, test_set, output_dir) 2 | 3 | VOCopts = get_voc_opts(path); 4 | VOCopts.testset = test_set; 5 | 6 | for i = 1:length(VOCopts.classes) 7 | cls = VOCopts.classes{i}; 8 | res(i) = voc_eval_cls(cls, VOCopts, comp_id, output_dir); 9 | end 10 | 11 | fprintf('\n~~~~~~~~~~~~~~~~~~~~\n'); 12 | fprintf('Results:\n'); 13 | aps = [res(:).ap]'; 14 | fprintf('%.1f\n', aps * 100); 15 | fprintf('%.1f\n', mean(aps) * 100); 16 | fprintf('~~~~~~~~~~~~~~~~~~~~\n'); 17 | 18 | function res = voc_eval_cls(cls, VOCopts, comp_id, output_dir) 19 | 20 | test_set = VOCopts.testset; 21 | year = VOCopts.dataset(4:end); 22 | 23 | addpath(fullfile(VOCopts.datadir, 'VOCcode')); 24 | 25 | res_fn = sprintf(VOCopts.detrespath, comp_id, cls); 26 | 27 | recall = []; 28 | prec = []; 29 | ap = 0; 30 | ap_auc = 0; 31 | 32 | do_eval = (str2num(year) <= 2007) | ~strcmp(test_set, 'test'); 33 | if do_eval 34 | % Bug in VOCevaldet requires that tic has been called first 35 | tic; 36 | [recall, prec, ap] = VOCevaldet(VOCopts, comp_id, cls, true); 37 | ap_auc = xVOCap(recall, prec); 38 | 39 | % force plot limits 40 | ylim([0 1]); 41 | xlim([0 1]); 42 | 43 | print(gcf, '-djpeg', '-r0', ... 44 | [output_dir '/' cls '_pr.jpg']); 45 | end 46 | fprintf('!!! %s : %.4f %.4f\n', cls, ap, ap_auc); 47 | 48 | res.recall = recall; 49 | res.prec = prec; 50 | res.ap = ap; 51 | res.ap_auc = ap_auc; 52 | 53 | save([output_dir '/' cls '_pr.mat'], ... 54 | 'res', 'recall', 'prec', 'ap', 'ap_auc'); 55 | 56 | rmpath(fullfile(VOCopts.datadir, 'VOCcode')); 57 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/datasets/VOCdevkit-matlab-wrapper/xVOCap.m: -------------------------------------------------------------------------------- 1 | function ap = xVOCap(rec,prec) 2 | % From the PASCAL VOC 2011 devkit 3 | 4 | mrec=[0 ; rec ; 1]; 5 | mpre=[0 ; prec ; 0]; 6 | for i=numel(mpre)-1:-1:1 7 | mpre(i)=max(mpre(i),mpre(i+1)); 8 | end 9 | i=find(mrec(2:end)~=mrec(1:end-1))+1; 10 | ap=sum((mrec(i)-mrec(i-1)).*mpre(i)); 11 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/datasets/ds_utils.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast/er R-CNN 3 | # Licensed under The MIT License [see LICENSE for details] 4 | # Written by Ross Girshick 5 | # -------------------------------------------------------- 6 | 7 | import numpy as np 8 | 9 | def unique_boxes(boxes, scale=1.0): 10 | """Return indices of unique boxes.""" 11 | v = np.array([1, 1e3, 1e6, 1e9]) 12 | hashes = np.round(boxes * scale).dot(v) 13 | _, index = np.unique(hashes, return_index=True) 14 | return np.sort(index) 15 | 16 | def xywh_to_xyxy(boxes): 17 | """Convert [x y w h] box format to [x1 y1 x2 y2] format.""" 18 | return np.hstack((boxes[:, 0:2], boxes[:, 0:2] + boxes[:, 2:4] - 1)) 19 | 20 | def xyxy_to_xywh(boxes): 21 | """Convert [x1 y1 x2 y2] box format to [x y w h] format.""" 22 | return np.hstack((boxes[:, 0:2], boxes[:, 2:4] - boxes[:, 0:2] + 1)) 23 | 24 | def validate_boxes(boxes, width=0, height=0): 25 | """Check that a set of boxes are valid.""" 26 | x1 = boxes[:, 0] 27 | y1 = boxes[:, 1] 28 | x2 = boxes[:, 2] 29 | y2 = boxes[:, 3] 30 | assert (x1 >= 0).all() 31 | assert (y1 >= 0).all() 32 | assert (x2 >= x1).all() 33 | assert (y2 >= y1).all() 34 | assert (x2 < width).all() 35 | assert (y2 < height).all() 36 | 37 | def filter_small_boxes(boxes, min_size): 38 | w = boxes[:, 2] - boxes[:, 0] 39 | h = boxes[:, 3] - boxes[:, 1] 40 | keep = np.where((w >= min_size) & (h > min_size))[0] 41 | return keep 42 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/datasets/factory.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | """Factory method for easily getting imdbs by name.""" 9 | 10 | __sets = {} 11 | 12 | from datasets.pascal_voc import pascal_voc 13 | from datasets.coco import coco 14 | import numpy as np 15 | 16 | # Set up voc__ using selective search "fast" mode 17 | for year in ['2007', '2012']: 18 | for split in ['train', 'val', 'trainval', 'test']: 19 | name = 'voc_{}_{}'.format(year, split) 20 | __sets[name] = (lambda split=split, year=year: pascal_voc(split, year)) 21 | 22 | # Set up coco_2014_ 23 | for year in ['2014']: 24 | for split in ['train', 'val', 'minival', 'valminusminival']: 25 | name = 'coco_{}_{}'.format(year, split) 26 | __sets[name] = (lambda split=split, year=year: coco(split, year)) 27 | 28 | # Set up coco_2015_ 29 | for year in ['2015']: 30 | for split in ['test', 'test-dev']: 31 | name = 'coco_{}_{}'.format(year, split) 32 | __sets[name] = (lambda split=split, year=year: coco(split, year)) 33 | 34 | def get_imdb(name): 35 | """Get an imdb (image database) by name.""" 36 | if not __sets.has_key(name): 37 | raise KeyError('Unknown dataset: {}'.format(name)) 38 | return __sets[name]() 39 | 40 | def list_imdbs(): 41 | """List all registered imdbs.""" 42 | return __sets.keys() 43 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/datasets/tools/mcg_munge.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | """Hacky tool to convert file system layout of MCG boxes downloaded from 5 | http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/mcg/ 6 | so that it's consistent with those computed by Jan Hosang (see: 7 | http://www.mpi-inf.mpg.de/departments/computer-vision-and-multimodal- 8 | computing/research/object-recognition-and-scene-understanding/how- 9 | good-are-detection-proposals-really/) 10 | 11 | NB: Boxes from the MCG website are in (y1, x1, y2, x2) order. 12 | Boxes from Hosang et al. are in (x1, y1, x2, y2) order. 13 | """ 14 | 15 | def munge(src_dir): 16 | # stored as: ./MCG-COCO-val2014-boxes/COCO_val2014_000000193401.mat 17 | # want: ./MCG/mat/COCO_val2014_0/COCO_val2014_000000141/COCO_val2014_000000141334.mat 18 | 19 | files = os.listdir(src_dir) 20 | for fn in files: 21 | base, ext = os.path.splitext(fn) 22 | # first 14 chars / first 22 chars / all chars + .mat 23 | # COCO_val2014_0/COCO_val2014_000000447/COCO_val2014_000000447991.mat 24 | first = base[:14] 25 | second = base[:22] 26 | dst_dir = os.path.join('MCG', 'mat', first, second) 27 | if not os.path.exists(dst_dir): 28 | os.makedirs(dst_dir) 29 | src = os.path.join(src_dir, fn) 30 | dst = os.path.join(dst_dir, fn) 31 | print 'MV: {} -> {}'.format(src, dst) 32 | os.rename(src, dst) 33 | 34 | if __name__ == '__main__': 35 | # src_dir should look something like: 36 | # src_dir = 'MCG-COCO-val2014-boxes' 37 | src_dir = sys.argv[1] 38 | munge(src_dir) 39 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/fast_rcnn/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/fast_rcnn/nms_wrapper.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | from fast_rcnn.config import cfg 9 | from nms.gpu_nms import gpu_nms 10 | from nms.cpu_nms import cpu_nms 11 | 12 | def nms(dets, thresh, force_cpu=False): 13 | """Dispatch to either CPU or GPU NMS implementations.""" 14 | 15 | if dets.shape[0] == 0: 16 | return [] 17 | if cfg.USE_GPU_NMS and not force_cpu: 18 | return gpu_nms(dets, thresh, device_id=cfg.GPU_ID) 19 | else: 20 | return cpu_nms(dets, thresh) 21 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/nms/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/caffe/train/faster-rcnn/code/lib/nms/__init__.py -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/nms/gpu_nms.hpp: -------------------------------------------------------------------------------- 1 | void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, 2 | int boxes_dim, float nms_overlap_thresh, int device_id); 3 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/nms/gpu_nms.pyx: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Faster R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | import numpy as np 9 | cimport numpy as np 10 | 11 | assert sizeof(int) == sizeof(np.int32_t) 12 | 13 | cdef extern from "gpu_nms.hpp": 14 | void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) 15 | 16 | def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, 17 | np.int32_t device_id=0): 18 | cdef int boxes_num = dets.shape[0] 19 | cdef int boxes_dim = dets.shape[1] 20 | cdef int num_out 21 | cdef np.ndarray[np.int32_t, ndim=1] \ 22 | keep = np.zeros(boxes_num, dtype=np.int32) 23 | cdef np.ndarray[np.float32_t, ndim=1] \ 24 | scores = dets[:, 4] 25 | cdef np.ndarray[np.int_t, ndim=1] \ 26 | order = scores.argsort()[::-1] 27 | cdef np.ndarray[np.float32_t, ndim=2] \ 28 | sorted_dets = dets[order, :] 29 | _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) 30 | keep = keep[:num_out] 31 | return list(order[keep]) 32 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/nms/py_cpu_nms.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | import numpy as np 9 | 10 | def py_cpu_nms(dets, thresh): 11 | """Pure Python NMS baseline.""" 12 | x1 = dets[:, 0] 13 | y1 = dets[:, 1] 14 | x2 = dets[:, 2] 15 | y2 = dets[:, 3] 16 | scores = dets[:, 4] 17 | 18 | areas = (x2 - x1 + 1) * (y2 - y1 + 1) 19 | order = scores.argsort()[::-1] 20 | 21 | keep = [] 22 | while order.size > 0: 23 | i = order[0] 24 | keep.append(i) 25 | xx1 = np.maximum(x1[i], x1[order[1:]]) 26 | yy1 = np.maximum(y1[i], y1[order[1:]]) 27 | xx2 = np.minimum(x2[i], x2[order[1:]]) 28 | yy2 = np.minimum(y2[i], y2[order[1:]]) 29 | 30 | w = np.maximum(0.0, xx2 - xx1 + 1) 31 | h = np.maximum(0.0, yy2 - yy1 + 1) 32 | inter = w * h 33 | ovr = inter / (areas[i] + areas[order[1:]] - inter) 34 | 35 | inds = np.where(ovr <= thresh)[0] 36 | order = order[inds + 1] 37 | 38 | return keep 39 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/pycocotools/UPSTREAM_REV: -------------------------------------------------------------------------------- 1 | https://github.com/pdollar/coco/commit/3ac47c77ebd5a1ed4254a98b7fbf2ef4765a3574 2 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/pycocotools/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'tylin' 2 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/pycocotools/_mask.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/caffe/train/faster-rcnn/code/lib/pycocotools/_mask.so -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/pycocotools/license.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014, Piotr Dollar and Tsung-Yi Lin 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 2. Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 14 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 15 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 16 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 17 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 18 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 19 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 20 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 22 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | 24 | The views and conclusions contained in the software and documentation are those 25 | of the authors and should not be interpreted as representing official policies, 26 | either expressed or implied, of the FreeBSD Project. 27 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/roi_data_layer/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/rpn/README.md: -------------------------------------------------------------------------------- 1 | ### `rpn` module overview 2 | 3 | ##### `generate_anchors.py` 4 | 5 | Generates a regular grid of multi-scale, multi-aspect anchor boxes. 6 | 7 | ##### `proposal_layer.py` 8 | 9 | Converts RPN outputs (per-anchor scores and bbox regression estimates) into object proposals. 10 | 11 | ##### `anchor_target_layer.py` 12 | 13 | Generates training targets/labels for each anchor. Classification labels are 1 (object), 0 (not object) or -1 (ignore). 14 | Bbox regression targets are specified when the classification label is > 0. 15 | 16 | ##### `proposal_target_layer.py` 17 | 18 | Generates training targets/labels for each object proposal: classification labels 0 - K (bg or object class 1, ... , K) 19 | and bbox regression targets in that case that the label is > 0. 20 | 21 | ##### `generate.py` 22 | 23 | Generate object detection proposals from an imdb using an RPN. 24 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/rpn/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick and Sean Bell 6 | # -------------------------------------------------------- 7 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/transform/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/caffe/train/faster-rcnn/code/lib/transform/__init__.py -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/lib/utils/timer.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | import time 9 | 10 | class Timer(object): 11 | """A simple timer.""" 12 | def __init__(self): 13 | self.total_time = 0. 14 | self.calls = 0 15 | self.start_time = 0. 16 | self.diff = 0. 17 | self.average_time = 0. 18 | 19 | def tic(self): 20 | # using time.time instead of time.clock because time time.clock 21 | # does not normalize for multithreading 22 | self.start_time = time.time() 23 | 24 | def toc(self, average=True): 25 | self.diff = time.time() - self.start_time 26 | self.total_time += self.diff 27 | self.calls += 1 28 | self.average_time = self.total_time / self.calls 29 | if average: 30 | return self.average_time 31 | else: 32 | return self.diff 33 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/tools/.train_faster_rcnn_alt_opt.py.swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/caffe/train/faster-rcnn/code/tools/.train_faster_rcnn_alt_opt.py.swp -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/tools/README.md: -------------------------------------------------------------------------------- 1 | Tools for training, testing, and compressing Fast R-CNN networks. 2 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/code/tools/_init_paths.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | """Set up paths for Fast R-CNN.""" 9 | 10 | import os.path as osp 11 | import sys 12 | 13 | def add_path(path): 14 | if path not in sys.path: 15 | sys.path.insert(0, path) 16 | 17 | this_dir = osp.dirname(__file__) 18 | 19 | # Add caffe to PYTHONPATH 20 | caffe_path = osp.join(this_dir, '..', 'caffe-fast-rcnn', 'python') 21 | add_path(caffe_path) 22 | 23 | # Add lib to PYTHONPATH 24 | lib_path = osp.join(this_dir, '..', 'lib') 25 | add_path(lib_path) 26 | -------------------------------------------------------------------------------- /examples/caffe/train/faster-rcnn/docker/gpu_ubuntu-14.04_python-2.7.6_caffe-py-faster-rcnn.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/nvidia-cudnn6.0.21-cuda8.0:v1.0 2 | 3 | RUN apt-get update 4 | RUN apt-get install -y libprotobuf-dev \ 5 | libleveldb-dev \ 6 | libsnappy-dev \ 7 | libopencv-dev \ 8 | libhdf5-serial-dev \ 9 | protobuf-compiler \ 10 | libgflags-dev \ 11 | libgoogle-glog-dev \ 12 | liblmdb-dev \ 13 | libopenblas-dev 14 | 15 | RUN apt-get update 16 | 17 | RUN apt-get install -y --no-install-recommends libboost-all-dev libatlas-base-dev libhdf5-dev python-tk \ 18 | && \ 19 | apt-get clean && \ 20 | rm -rf /var/lib/apt/lists/* 21 | 22 | RUN pip install numpy cython opencv-python easydict -i http://pypi.douban.com/simple/ --trusted-host pypi.douban.com 23 | 24 | ADD ./docker_file/py-faster-rcnn/ /root/py-faster-rcnn 25 | 26 | RUN cd /root/py-faster-rcnn/caffe-fast-rcnn/python && for req in $(cat requirements.txt); do pip install $req -i http://pypi.douban.com/simple/ --trusted-host pypi.douban.com; done 27 | 28 | RUN cd /root/py-faster-rcnn/caffe-fast-rcnn/ && make all -j8 && make pycaffe 29 | RUN cd /root/py-faster-rcnn/lib && make 30 | 31 | ADD ./docker_file/uai-sdk /uai-sdk 32 | RUN cd /uai-sdk && /usr/bin/python setup.py install 33 | 34 | RUN ln -s /dev/null dev/raw1394 35 | ENV PYTHONPATH=/root/py-faster-rcnn/caffe-fast-rcnn/python -------------------------------------------------------------------------------- /examples/caffe/train/imagenet/code/README.md: -------------------------------------------------------------------------------- 1 | # Resnet101 for imagenet lmdb 2 | Then resnet prototxt is from https://github.com/yihui-he/resnet-imagenet-caffe 3 | 4 | ## Data Prepare 5 | To generate lmdb data please See https://github.com/BVLC/caffe/tree/master/examples/imagenet, You can use examples/imagenet/create_imagenet.sh to generate imagenet lmdb files 6 | 7 | This will generate: 8 | 9 | ilsvrc12_train_lmdb/data.mdb 10 | ilsvrc12_train_lmdb/lock.mdb 11 | ilsvrc12_val_lmdb/data.mdb 12 | ilsvrc12_val_lmdb/lock.mdb 13 | 14 | ### Work with ufile 15 | To run training with UAI Train Platform with ufile as data backend, we should split both train data.mdb and val data.mdb into chunchs with uaitrain_tools/split_tool.py 16 | 17 | Also we should use uaitrain/arch/caffe/train_large_file.py as the train entrance 18 | -------------------------------------------------------------------------------- /examples/caffe/train/imagenet/code/resnet_101_solver.prototxt: -------------------------------------------------------------------------------- 1 | net: "/data/data/resnet_101.prototxt" 2 | #test_iter: 6000 3 | #test_interval: 50000 4 | #test_initialization: true 5 | display: 300 6 | average_loss: 3000 7 | base_lr: 0.01 8 | lr_policy: "step" 9 | iter_size: 2 10 | stepsize: 400000 11 | gamma: 0.1 12 | max_iter: 1600000 13 | momentum: 0.9 14 | weight_decay: 0.0001 15 | snapshot: 100000 16 | snapshot_prefix: "/data/output/resnet_101" 17 | solver_mode: GPU 18 | -------------------------------------------------------------------------------- /examples/caffe/train/mnist/code/lenet_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "/data/lenet_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 0.01 11 | momentum: 0.9 12 | weight_decay: 0.0005 13 | # The learning rate policy 14 | lr_policy: "inv" 15 | gamma: 0.0001 16 | power: 0.75 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 10000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_prefix: "/data/output/" 24 | # solver mode: CPU or GPU 25 | solver_mode: GPU 26 | -------------------------------------------------------------------------------- /examples/caffe/train/rfcn/code/tools/README.md: -------------------------------------------------------------------------------- 1 | Tools for training, testing, and compressing Fast R-CNN networks. 2 | -------------------------------------------------------------------------------- /examples/caffe2/train/detectron/detectron_voc_example.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/gpu_uaitrain_ubuntu-16.04_python-2.7.6_caffe2-detectron:v1.0 2 | 3 | COPY dataset_catalog.py /data/detectron/lib/datasets/dataset_catalog.py 4 | COPY weights/ /data/weights/ 5 | COPY conf/ /data/conf/ -------------------------------------------------------------------------------- /examples/caffe2/train/detectron/uai_tutorial_1gpu_e2e_faster_rcnn_R-50-FPN.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: generalized_rcnn 3 | CONV_BODY: FPN.add_fpn_ResNet50_conv5_body 4 | NUM_CLASSES: 81 5 | FASTER_RCNN: True 6 | NUM_GPUS: 1 7 | SOLVER: 8 | WEIGHT_DECAY: 0.0001 9 | LR_POLICY: steps_with_decay 10 | BASE_LR: 0.0025 11 | GAMMA: 0.1 12 | MAX_ITER: 60000 13 | STEPS: [0, 30000, 40000] 14 | # Equivalent schedules with... 15 | # 1 GPU: 16 | # BASE_LR: 0.0025 17 | # MAX_ITER: 60000 18 | # STEPS: [0, 30000, 40000] 19 | # 2 GPUs: 20 | # BASE_LR: 0.005 21 | # MAX_ITER: 30000 22 | # STEPS: [0, 15000, 20000] 23 | # 4 GPUs: 24 | # BASE_LR: 0.01 25 | # MAX_ITER: 15000 26 | # STEPS: [0, 7500, 10000] 27 | # 8 GPUs: 28 | # BASE_LR: 0.02 29 | # MAX_ITER: 7500 30 | # STEPS: [0, 3750, 5000] 31 | FPN: 32 | FPN_ON: True 33 | MULTILEVEL_ROIS: True 34 | MULTILEVEL_RPN: True 35 | FAST_RCNN: 36 | ROI_BOX_HEAD: fast_rcnn_heads.add_roi_2mlp_head 37 | ROI_XFORM_METHOD: RoIAlign 38 | ROI_XFORM_RESOLUTION: 7 39 | ROI_XFORM_SAMPLING_RATIO: 2 40 | TRAIN: 41 | WEIGHTS: "/data/weights/R-50.pkl" 42 | DATASETS: ('uai_voc_2007_trainval',) 43 | SCALES: (500,) 44 | MAX_SIZE: 833 45 | BATCH_SIZE_PER_IM: 256 46 | RPN_PRE_NMS_TOP_N: 2000 # Per FPN level 47 | TEST: 48 | DATASETS: ('uai_voc_2007_test',) 49 | SCALE: 500 50 | MAX_SIZE: 833 51 | NMS: 0.5 52 | RPN_PRE_NMS_TOP_N: 1000 # Per FPN level 53 | RPN_POST_NMS_TOP_N: 1000 54 | OUTPUT_DIR: "/data/output/" 55 | -------------------------------------------------------------------------------- /examples/caffe2/train/detectron/uai_tutorial_1gpu_e2e_faster_rcnn_coco_R-50-FPN.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: generalized_rcnn 3 | CONV_BODY: FPN.add_fpn_ResNet50_conv5_body 4 | NUM_CLASSES: 81 5 | FASTER_RCNN: True 6 | NUM_GPUS: 1 7 | SOLVER: 8 | WEIGHT_DECAY: 0.0001 9 | LR_POLICY: steps_with_decay 10 | BASE_LR: 0.0025 11 | GAMMA: 0.1 12 | MAX_ITER: 60000 13 | STEPS: [0, 30000, 40000] 14 | # Equivalent schedules with... 15 | # 1 GPU: 16 | # BASE_LR: 0.0025 17 | # MAX_ITER: 60000 18 | # STEPS: [0, 30000, 40000] 19 | # 2 GPUs: 20 | # BASE_LR: 0.005 21 | # MAX_ITER: 30000 22 | # STEPS: [0, 15000, 20000] 23 | # 4 GPUs: 24 | # BASE_LR: 0.01 25 | # MAX_ITER: 15000 26 | # STEPS: [0, 7500, 10000] 27 | # 8 GPUs: 28 | # BASE_LR: 0.02 29 | # MAX_ITER: 7500 30 | # STEPS: [0, 3750, 5000] 31 | FPN: 32 | FPN_ON: True 33 | MULTILEVEL_ROIS: True 34 | MULTILEVEL_RPN: True 35 | FAST_RCNN: 36 | ROI_BOX_HEAD: fast_rcnn_heads.add_roi_2mlp_head 37 | ROI_XFORM_METHOD: RoIAlign 38 | ROI_XFORM_RESOLUTION: 7 39 | ROI_XFORM_SAMPLING_RATIO: 2 40 | TRAIN: 41 | WEIGHTS: "/data/weights/R-50.pkl" 42 | DATASETS: ('uai_coco_2017_train',) 43 | SCALES: (500,) 44 | MAX_SIZE: 833 45 | BATCH_SIZE_PER_IM: 256 46 | RPN_PRE_NMS_TOP_N: 2000 # Per FPN level 47 | TEST: 48 | DATASETS: ('uai_coco_2017_test',) 49 | SCALE: 500 50 | MAX_SIZE: 833 51 | NMS: 0.5 52 | RPN_PRE_NMS_TOP_N: 1000 # Per FPN level 53 | RPN_POST_NMS_TOP_N: 1000 54 | OUTPUT_DIR: "/data/output/" 55 | -------------------------------------------------------------------------------- /examples/case-study/face-compare/face-service.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-16.04_python-2.7_tensorflow-1.9.0:v1.2 2 | 3 | RUN pip install requests 4 | 5 | COPY uai-sdk /uai-sdk 6 | RUN cd uai-sdk && python setup.py install 7 | 8 | EXPOSE 8080 9 | ADD ./code/ /ai-ucloud-client-django/ 10 | ADD ./face-service.conf /ai-ucloud-client-django/conf.json 11 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 12 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/case-study/face-compare/face-service.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "FaceCompareService", 5 | "main_file": "facecompare_service" 6 | } 7 | }, 8 | "backend" : { 9 | "mtcnn" : "http://localhost:8081/service", 10 | "compare" : "http://localhost:8082/service" 11 | } 12 | } -------------------------------------------------------------------------------- /examples/case-study/face-compare/facenet-compare-json-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-16.04_python-2.7_tensorflow-1.9.0:v1.2 2 | 3 | COPY uai-sdk /uai-sdk 4 | RUN cd uai-sdk && python setup.py install 5 | 6 | EXPOSE 8080 7 | ADD ./code/ /ai-ucloud-client-django/ 8 | ADD ./facenet-compare-json.conf /ai-ucloud-client-django/conf.json 9 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 10 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/case-study/face-compare/facenet-compare-json.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "FaceCompareJsonModel", 5 | "main_file": "facenet_json_inference" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "./checkpoint_dir" 9 | } 10 | } 11 | } -------------------------------------------------------------------------------- /examples/case-study/face-compare/facenet-mtcnn-json-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-16.04_python-2.7_tensorflow-1.9.0:v1.2 2 | 3 | COPY uai-sdk /uai-sdk 4 | RUN cd uai-sdk && python setup.py install 5 | 6 | EXPOSE 8080 7 | ADD ./code/ /ai-ucloud-client-django/ 8 | ADD ./facenet-mtcnn-json.conf /ai-ucloud-client-django/conf.json 9 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 10 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/case-study/face-compare/facenet-mtcnn-json.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "FaceDetectionJsonModel", 5 | "main_file": "facenet_json_inference" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "" 9 | } 10 | } 11 | } -------------------------------------------------------------------------------- /examples/case-study/nsfw-vedio/config/caffe_nsfw_stream.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "NsfwStreamModel", 5 | "main_file": "nsfw_stream_inference" 6 | }, 7 | "caffe" : { 8 | "model_dir" : "./checkpoint_dir", 9 | "model_name" : "resnet" 10 | } 11 | } 12 | } -------------------------------------------------------------------------------- /examples/case-study/nsfw-vedio/config/caffe_nsfw_video.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "NsfwModel", 5 | "main_file": "nsfw_video_inference" 6 | }, 7 | "caffe" : { 8 | "model_dir" : "./checkpoint_dir", 9 | "model_name" : "resnet" 10 | } 11 | } 12 | } -------------------------------------------------------------------------------- /examples/case-study/nsfw-vedio/config/gunicorn.conf.py: -------------------------------------------------------------------------------- 1 | bind = '[::]:8080' 2 | workers = 1 #use 1 worker, easy to debug 3 | worker_class = 'gthread' 4 | threads = 2 5 | timeout = 60 6 | graceful_timeout = 60 7 | keep_alive = 5 8 | limit_request_line = 2048 9 | limit_request_fields = 50 10 | limit_request_field_site = 8190 11 | forwarded_allow_ips = '::' 12 | #accesslog = '' 13 | #access_log_format = '' 14 | loglevel = 'debug' 15 | capture_output = True -------------------------------------------------------------------------------- /examples/case-study/nsfw-vedio/nsfw-stream-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-14.04_python-2.7.6_caffe-1.0.0:v1.2 2 | 3 | RUN apt-get update 4 | RUN apt-get install -y software-properties-common python-software-properties 5 | RUN add-apt-repository ppa:mc3man/trusty-media 6 | RUN apt-get update 7 | RUN apt-get install -y ffmpeg 8 | 9 | COPY uai-sdk /uai-sdk 10 | RUN cd uai-sdk && python setup.py install 11 | 12 | EXPOSE 8080 13 | ADD ./code/ /ai-ucloud-client-django/ 14 | ADD ./config/caffe_nsfw_stream.conf /ai-ucloud-client-django/conf.json 15 | ENV UAI_SERVICE_CONFIG "/ai-ucloud-client-django/conf.json" 16 | RUN echo "DATA_UPLOAD_MAX_MEMORY_SIZE = 536870912" >> /ai-ucloud-client-django/httpserver/settings.py 17 | 18 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi 19 | -------------------------------------------------------------------------------- /examples/case-study/nsfw-vedio/nsfw-video-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-14.04_python-2.7.6_caffe-1.0.0:v1.2 2 | 3 | RUN apt-get update 4 | RUN apt-get install -y software-properties-common python-software-properties 5 | RUN add-apt-repository ppa:mc3man/trusty-media 6 | RUN apt-get update 7 | RUN apt-get install -y ffmpeg 8 | 9 | COPY uai-sdk /uai-sdk 10 | RUN cd uai-sdk && python setup.py install 11 | 12 | EXPOSE 8080 13 | ADD ./code/ /ai-ucloud-client-django/ 14 | ADD ./config/caffe_nsfw_video.conf /ai-ucloud-client-django/conf.json 15 | ENV UAI_SERVICE_CONFIG "/ai-ucloud-client-django/conf.json" 16 | RUN echo "DATA_UPLOAD_MAX_MEMORY_SIZE = 536870912" >> /ai-ucloud-client-django/httpserver/settings.py 17 | 18 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/keras/inference/mnist/2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/keras/inference/mnist/2.jpg -------------------------------------------------------------------------------- /examples/keras/inference/mnist/checkpoint_dir/mnist_model.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/keras/inference/mnist/checkpoint_dir/mnist_model.h5 -------------------------------------------------------------------------------- /examples/keras/inference/mnist/checkpoint_dir/mnist_model.json: -------------------------------------------------------------------------------- 1 | {"class_name": "Sequential", "keras_version": "1.2.2", "config": [{"class_name": "Dense", "config": {"W_constraint": null, "b_constraint": null, "name": "dense_1", "output_dim": 512, "activity_regularizer": null, "trainable": true, "init": "glorot_uniform", "bias": true, "input_dtype": "float32", "input_dim": 784, "b_regularizer": null, "W_regularizer": null, "activation": "linear", "batch_input_shape": [null, 784]}}, {"class_name": "Activation", "config": {"activation": "relu", "trainable": true, "name": "activation_1"}}, {"class_name": "Dropout", "config": {"p": 0.2, "trainable": true, "name": "dropout_1"}}, {"class_name": "Dense", "config": {"W_constraint": null, "b_constraint": null, "name": "dense_2", "activity_regularizer": null, "trainable": true, "init": "glorot_uniform", "bias": true, "input_dim": 512, "b_regularizer": null, "W_regularizer": null, "activation": "linear", "output_dim": 512}}, {"class_name": "Activation", "config": {"activation": "relu", "trainable": true, "name": "activation_2"}}, {"class_name": "Dropout", "config": {"p": 0.2, "trainable": true, "name": "dropout_2"}}, {"class_name": "Dense", "config": {"W_constraint": null, "b_constraint": null, "name": "dense_3", "activity_regularizer": null, "trainable": true, "init": "glorot_uniform", "bias": true, "input_dim": 512, "b_regularizer": null, "W_regularizer": null, "activation": "linear", "output_dim": 10}}, {"class_name": "Activation", "config": {"activation": "softmax", "trainable": true, "name": "activation_3"}}]} -------------------------------------------------------------------------------- /examples/keras/inference/mnist/keras_mnist.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "MnistModel", 5 | "main_file": "mnist_inference" 6 | }, 7 | "keras" : { 8 | "model_dir" : "./checkpoint_dir", 9 | "model_name" : "mnist_model", 10 | "all_one_file" : false, 11 | "model_arch_type" : "json" 12 | } 13 | } 14 | } -------------------------------------------------------------------------------- /examples/keras/inference/mnist/mnist-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-14.04_python-2.7.6_keras-1.2.0:v1.2 2 | 3 | EXPOSE 8080 4 | ADD ./code/ /ai-ucloud-client-django/ 5 | ADD ./keras_mnist.conf /ai-ucloud-client-django/conf.json 6 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 7 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/keras/train/mnist/code/mnist_datasets.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def load_data(dataDir): 5 | """Loads the MNIST dataset. 6 | # Arguments 7 | path: path where to cache the dataset locally 8 | (relative to ~/.keras/datasets). 9 | # Returns 10 | Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. 11 | """ 12 | path = dataDir+'/mnist.npz' 13 | f = np.load(path) 14 | x_train, y_train = f['x_train'], f['y_train'] 15 | x_test, y_test = f['x_test'], f['y_test'] 16 | f.close() 17 | return (x_train, y_train), (x_test, y_test) -------------------------------------------------------------------------------- /examples/keras/train/mnist/data/mnist.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/keras/train/mnist/data/mnist.npz -------------------------------------------------------------------------------- /examples/mxnet/inference/mnist/2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/mxnet/inference/mnist/2.jpg -------------------------------------------------------------------------------- /examples/mxnet/inference/mnist/checkpoint_dir/mnist-model-0010.params: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/mxnet/inference/mnist/checkpoint_dir/mnist-model-0010.params -------------------------------------------------------------------------------- /examples/mxnet/inference/mnist/mnist-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-14.04_python-2.7.6_mxnet-0.9.5:v1.2 2 | 3 | EXPOSE 8080 4 | ADD ./code/ /ai-ucloud-client-django/ 5 | ADD ./mxnet_mnist.conf /ai-ucloud-client-django/conf.json 6 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 7 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/mxnet/inference/mnist/mxnet_mnist.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "MnistModel", 5 | "main_file": "mnist_inference" 6 | }, 7 | "mxnet" : { 8 | "model_dir" : "./checkpoint_dir", 9 | "model_name" : "mnist-model", 10 | "num_epoch" : 10 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /examples/mxnet/insightface/inference/insightface_infer.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "InsightFaceModel", 5 | "main_file": "insightface_infer" 6 | }, 7 | "mxnet" : { 8 | "model_dir" : "./checkpoint_dir", 9 | "model_name" : "model-r100-aceFace", 10 | "num_epoch" : 3 11 | } 12 | } 13 | } -------------------------------------------------------------------------------- /examples/mxnet/insightface/inference/insightface_infer_cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-14.04_python-2.7.6_mxnet-1.0.0:v1.2 2 | 3 | EXPOSE 8080 4 | ADD ./code/ /ai-ucloud-client-django/ 5 | ADD ./insightface/deploy/mtcnn_detector.py /ai-ucloud-client-django/ 6 | ADD ./insightface/deploy/helper.py /ai-ucloud-client-django/ 7 | ADD ./insightface/deploy/mtcnn-model/ /ai-ucloud-client-django/mtcnn-model/ 8 | ADD ./insightface/src/common/ /ai-ucloud-client-django/common/ 9 | ADD ./insightface_infer.conf /ai-ucloud-client-django/conf.json 10 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 11 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/mxnet/insightface/inference/insightface_infer_gpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/gpu_uaiservice_ubuntu-14.04_python-2.7.6_mxnet-1.0.0:v1.2 2 | 3 | EXPOSE 8080 4 | ADD ./code/ /ai-ucloud-client-django/ 5 | ADD ./insightface/deploy/mtcnn_detector.py /ai-ucloud-client-django/ 6 | ADD ./insightface/deploy/helper.py /ai-ucloud-client-django/ 7 | ADD ./insightface/deploy/mtcnn-model/ /ai-ucloud-client-django/mtcnn-model/ 8 | ADD ./insightface/src/common/ /ai-ucloud-client-django/common/ 9 | ADD ./insightface_infer.conf /ai-ucloud-client-django/conf.json 10 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 11 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/mxnet/insightface/train/insightface.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/gpu_uaitrain_ubuntu-16.04_python-2.7_mxnet-1.2.0:v1.0 2 | 3 | COPY ./insightface/ /data/ -------------------------------------------------------------------------------- /examples/mxnet/train/cifar/code/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/mxnet/train/cifar/code/common/__init__.py -------------------------------------------------------------------------------- /examples/mxnet/train/cifar/code/common/find_mxnet.py: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an 13 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | # KIND, either express or implied. See the License for the 15 | # specific language governing permissions and limitations 16 | # under the License. 17 | 18 | import os, sys 19 | try: 20 | import mxnet as mx 21 | except ImportError: 22 | curr_path = os.path.abspath(os.path.dirname(__file__)) 23 | sys.path.append(os.path.join(curr_path, "../../../python")) 24 | import mxnet as mx 25 | -------------------------------------------------------------------------------- /examples/mxnet/train/cifar/code/symbols/README.md: -------------------------------------------------------------------------------- 1 | # Symbol 2 | 3 | This fold contains definition of various networks. To add a new network, please 4 | use the following format. 5 | 6 | ## Python 7 | 8 | - A file implements one network proposed in a paper, with the network name as the 9 | filename. 10 | - Mention the paper and the modifications made if any at the beginning 11 | of the file. 12 | - Indicate how to reproduce the accuracy numbers in the paper if it is not straightforward 13 | - Provide a function `get_symbol()` that return the network 14 | -------------------------------------------------------------------------------- /examples/mxnet/train/cifar/code/symbols/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/mxnet/train/cifar/code/symbols/__init__.py -------------------------------------------------------------------------------- /examples/mxnet/train/cifar/code/symbols/mlp.py: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an 13 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | # KIND, either express or implied. See the License for the 15 | # specific language governing permissions and limitations 16 | # under the License. 17 | 18 | """ 19 | a simple multilayer perceptron 20 | """ 21 | import mxnet as mx 22 | 23 | def get_symbol(num_classes=10, **kwargs): 24 | data = mx.symbol.Variable('data') 25 | data = mx.sym.Flatten(data=data) 26 | fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128) 27 | act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu") 28 | fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64) 29 | act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu") 30 | fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=num_classes) 31 | mlp = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax') 32 | return mlp 33 | -------------------------------------------------------------------------------- /examples/mxnet/train/imagenet/code/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/mxnet/train/imagenet/code/common/__init__.py -------------------------------------------------------------------------------- /examples/mxnet/train/imagenet/code/common/find_mxnet.py: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an 13 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | # KIND, either express or implied. See the License for the 15 | # specific language governing permissions and limitations 16 | # under the License. 17 | 18 | import os, sys 19 | try: 20 | import mxnet as mx 21 | except ImportError: 22 | curr_path = os.path.abspath(os.path.dirname(__file__)) 23 | sys.path.append(os.path.join(curr_path, "../../../python")) 24 | import mxnet as mx 25 | -------------------------------------------------------------------------------- /examples/mxnet/train/imagenet/code/symbols/README.md: -------------------------------------------------------------------------------- 1 | # Symbol 2 | 3 | This fold contains definition of various networks. To add a new network, please 4 | use the following format. 5 | 6 | ## Python 7 | 8 | - A file implements one network proposed in a paper, with the network name as the 9 | filename. 10 | - Mention the paper and the modifications made if any at the beginning 11 | of the file. 12 | - Indicate how to reproduce the accuracy numbers in the paper if it is not straightforward 13 | - Provide a function `get_symbol()` that return the network 14 | -------------------------------------------------------------------------------- /examples/mxnet/train/imagenet/code/symbols/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/mxnet/train/imagenet/code/symbols/__init__.py -------------------------------------------------------------------------------- /examples/mxnet/train/imagenet/code/symbols/mlp.py: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an 13 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | # KIND, either express or implied. See the License for the 15 | # specific language governing permissions and limitations 16 | # under the License. 17 | 18 | """ 19 | a simple multilayer perceptron 20 | """ 21 | import mxnet as mx 22 | 23 | def get_symbol(num_classes=10, **kwargs): 24 | data = mx.symbol.Variable('data') 25 | data = mx.sym.Flatten(data=data) 26 | fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128) 27 | act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu") 28 | fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64) 29 | act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu") 30 | fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=num_classes) 31 | mlp = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax') 32 | return mlp 33 | -------------------------------------------------------------------------------- /examples/mxnet/train/mnist/code/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/mxnet/train/mnist/code/common/__init__.py -------------------------------------------------------------------------------- /examples/mxnet/train/mnist/code/common/find_mxnet.py: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an 13 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | # KIND, either express or implied. See the License for the 15 | # specific language governing permissions and limitations 16 | # under the License. 17 | 18 | import os, sys 19 | try: 20 | import mxnet as mx 21 | except ImportError: 22 | curr_path = os.path.abspath(os.path.dirname(__file__)) 23 | sys.path.append(os.path.join(curr_path, "../../../python")) 24 | import mxnet as mx 25 | -------------------------------------------------------------------------------- /examples/mxnet/train/mnist/code/symbols/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/mxnet/train/mnist/code/symbols/__init__.py -------------------------------------------------------------------------------- /examples/mxnet/train/mnist/code/symbols/mlp.py: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an 13 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | # KIND, either express or implied. See the License for the 15 | # specific language governing permissions and limitations 16 | # under the License. 17 | 18 | """ 19 | a simple multilayer perceptron 20 | """ 21 | import mxnet as mx 22 | 23 | def get_symbol(num_classes=10, **kwargs): 24 | data = mx.symbol.Variable('data') 25 | data = mx.sym.Flatten(data=data) 26 | fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128) 27 | act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu") 28 | fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64) 29 | act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu") 30 | fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=num_classes) 31 | mlp = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax') 32 | return mlp 33 | -------------------------------------------------------------------------------- /examples/mxnet/train/mnist/data/t10k-images-idx3-ubyte.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/mxnet/train/mnist/data/t10k-images-idx3-ubyte.gz -------------------------------------------------------------------------------- /examples/mxnet/train/mnist/data/t10k-labels-idx1-ubyte.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/mxnet/train/mnist/data/t10k-labels-idx1-ubyte.gz -------------------------------------------------------------------------------- /examples/mxnet/train/mnist/data/train-images-idx3-ubyte.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/mxnet/train/mnist/data/train-images-idx3-ubyte.gz -------------------------------------------------------------------------------- /examples/mxnet/train/mnist/data/train-labels-idx1-ubyte.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/mxnet/train/mnist/data/train-labels-idx1-ubyte.gz -------------------------------------------------------------------------------- /examples/pytorch/train/imagenet/code/README.md: -------------------------------------------------------------------------------- 1 | ### 使用流程 2 | 1. 上传docker镜像至Uhub 3 | 2. 上传数据至文件存储ufs或对象存储us3中 4 | 3. 创建分布式训练任务 5 | 6 | ### 调用方式 7 | UAI-Train平台为用户作业注入了启动分布式训练所需的环境变量,结合这些环境变量,通过PyTorch官方提供的torch.distributed.launch或者torch.multiprocessing启动分布式训练作业。 8 | 9 | #### 关键参数 10 | * --**work_dir**,默认值/data 11 | * --**data_dir**,外部存储挂载进容器中的输入目录,默认值/data/data 12 | * --**output_dir**,外部存储挂载进容器中的输出目录,默认值/data/output 13 | * --**num_gpus**,当前节点的GPU数量 14 | 15 | #### 环境变量 16 | * **MASTER_ADDR**:master的地址(即RANK=0的worker) 17 | * **MASTER_PORT**:master的端口(即RANK=0的worker) 18 | * **RANK**:当前节点的编号,不同节点编号不同 19 | * **WORKER_NUM**:训练节点数量 20 | * **WORKER_GPU_NUM**:单个节点包含GPU数量 21 | 22 | #### 启动训练 23 | 使用如下命令启动DDP分布式训练任务: 24 | ``` 25 | -m torch.distributed.launch --use_env --nproc_per_node $WORKER_GPU_NUM --master_addr $MASTER_ADDR --node_rank $RANK --master_port $MASTER_PORT --nnodes=$WORKER_NUM <代码文件的绝对路径及参数> 26 | 27 | 注:UAI-Train平台默认使用python执行,故cmd这里不需要显式添加python指令 28 | ``` 29 | 30 | 关于torch.distributed.launch的更多信息可参见官网或[代码文件](https://github.com/pytorch/pytorch/blob/fc8404b5bc7e9721aa93021cdc27de818df64d8d/torch/distributed/launch.py) 31 | -------------------------------------------------------------------------------- /examples/pytorch/train/mnist/data/processed/test.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/pytorch/train/mnist/data/processed/test.pt -------------------------------------------------------------------------------- /examples/pytorch/train/mnist/data/processed/training.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/pytorch/train/mnist/data/processed/training.pt -------------------------------------------------------------------------------- /examples/tensorflow-2.0/imagenet/train/imagenet_tf2.0.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/gpu_uaitrain_ubuntu-16.04_python-3.5_tensorflow-2.0.0a:v1.0 2 | 3 | COPY code/ /data/ -------------------------------------------------------------------------------- /examples/tensorflow-2.0/mnist/inference/tf_mnist.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "MnistModel", 5 | "main_file": "mnist_inference" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "./checkpoint_dir" 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/tensorflow-2.0/mnist/train/mnist.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/gpu_uaitrain_ubuntu-16.04_python-2.7_tensorflow-2.0.0a:v1.0 2 | 3 | COPY ./code/ /data/ -------------------------------------------------------------------------------- /examples/tensorflow-2.0/mnist/train/mnist_data.py: -------------------------------------------------------------------------------- 1 | """Fashion-MNIST dataset. 2 | """ 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | 7 | import gzip 8 | import os 9 | 10 | import numpy as np 11 | from absl import flags 12 | 13 | import tensorflow as tf 14 | FLAGS = flags.FLAGS 15 | 16 | def load_data(): 17 | """Loads the Fashion-MNIST dataset. 18 | 19 | Returns: 20 | Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. 21 | 22 | License: 23 | The copyright for Fashion-MNIST is held by Zalando SE. 24 | Fashion-MNIST is licensed under the [MIT license]( 25 | https://github.com/zalandoresearch/fashion-mnist/blob/master/LICENSE). 26 | 27 | """ 28 | dirname = FLAGS.data_dir 29 | base = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' 30 | files = [ 31 | 'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz', 32 | 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz' 33 | ] 34 | 35 | paths = [] 36 | for fname in files: 37 | paths.append(os.path.join(dirname, fname)) 38 | 39 | with gzip.open(paths[0], 'rb') as lbpath: 40 | y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8) 41 | 42 | with gzip.open(paths[1], 'rb') as imgpath: 43 | x_train = np.frombuffer( 44 | imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28) 45 | 46 | with gzip.open(paths[2], 'rb') as lbpath: 47 | y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8) 48 | 49 | with gzip.open(paths[3], 'rb') as imgpath: 50 | x_test = np.frombuffer( 51 | imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28) 52 | 53 | return (x_train, y_train), (x_test, y_test) -------------------------------------------------------------------------------- /examples/tensorflow/inference/cifar_simple/cifar.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "cifarModel", 5 | "main_file": "cifar_infer" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "./checkpoint_dir" 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/cifar_simple/cifar_infer.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-14.04_python-2.7.6_tensorflow-1.4.0:v1.2 2 | EXPOSE 8080 3 | ADD ./inference/ /ai-ucloud-client-django/ 4 | ADD ./code/ /ai-ucloud-client-django/ 5 | ADD ./cifar.conf /ai-ucloud-client-django/conf.json 6 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 7 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi 8 | 9 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/cifar_simple/code/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | """Makes helper libraries available in the cifar10 package.""" 17 | from __future__ import absolute_import 18 | from __future__ import division 19 | from __future__ import print_function 20 | 21 | import cifar10 22 | import cifar10_input 23 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/cifar_simple/inference/checkpoint_dir/intro.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/cifar_simple/inference/checkpoint_dir/intro.txt -------------------------------------------------------------------------------- /examples/tensorflow/inference/cifar_simple/test_images/bird.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/cifar_simple/test_images/bird.png -------------------------------------------------------------------------------- /examples/tensorflow/inference/cifar_simple/test_images/deer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/cifar_simple/test_images/deer.png -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn/CRNN_Tensorflow/crnn_model/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 17-9-21 下午6:37 4 | # @Author : Luo Yao 5 | # @Site : http://github.com/TJCVRS 6 | # @File : __init__.py.py 7 | # @IDE: PyCharm Community Edition -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn/CRNN_Tensorflow/data/test_images/test_01.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/crnn/CRNN_Tensorflow/data/test_images/test_01.jpg -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn/CRNN_Tensorflow/data/test_images/test_02.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/crnn/CRNN_Tensorflow/data/test_images/test_02.jpg -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn/CRNN_Tensorflow/data/test_images/test_03.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/crnn/CRNN_Tensorflow/data/test_images/test_03.jpg -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn/CRNN_Tensorflow/data_provider/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 17-9-22 下午1:39 4 | # @Author : Luo Yao 5 | # @Site : http://github.com/TJCVRS 6 | # @File : __init__.py.py 7 | # @IDE: PyCharm Community Edition -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn/CRNN_Tensorflow/global_configuration/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 17-9-22 下午3:25 4 | # @Author : Luo Yao 5 | # @Site : http://github.com/TJCVRS 6 | # @File : __init__.py.py 7 | # @IDE: PyCharm Community Edition -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn/CRNN_Tensorflow/global_configuration/config.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 17-9-22 下午3:25 4 | # @Author : Luo Yao 5 | # @Site : http://github.com/TJCVRS 6 | # @File : config.py 7 | # @IDE: PyCharm Community Edition 8 | """ 9 | Set some global configuration 10 | """ 11 | from easydict import EasyDict as edict 12 | 13 | __C = edict() 14 | # Consumers can get config by: from config import cfg 15 | 16 | cfg = __C 17 | 18 | # Train options 19 | __C.TRAIN = edict() 20 | 21 | # Set the shadownet training epochs 22 | __C.TRAIN.EPOCHS = 5 23 | # Set the display step 24 | __C.TRAIN.DISPLAY_STEP = 1 25 | # Set the test display step during training process 26 | __C.TRAIN.TEST_DISPLAY_STEP = 100 27 | # Set the momentum parameter of the optimizer 28 | __C.TRAIN.MOMENTUM = 0.9 29 | # Set the initial learning rate 30 | __C.TRAIN.LEARNING_RATE = 0.1 31 | # Set the GPU resource used during training process 32 | __C.TRAIN.GPU_MEMORY_FRACTION = 0.85 33 | # Set the GPU allow growth parameter during tensorflow training process 34 | __C.TRAIN.TF_ALLOW_GROWTH = True 35 | # Set the shadownet training batch size 36 | __C.TRAIN.BATCH_SIZE = 32 37 | # Set the shadownet validation batch size 38 | __C.TRAIN.VAL_BATCH_SIZE = 32 39 | # Set the learning rate decay steps 40 | __C.TRAIN.LR_DECAY_STEPS = 10000 41 | # Set the learning rate decay rate 42 | __C.TRAIN.LR_DECAY_RATE = 0.1 43 | 44 | # Test options 45 | __C.TEST = edict() 46 | 47 | # Set the GPU resource used during testing process 48 | __C.TEST.GPU_MEMORY_FRACTION = 0.5 49 | # Set the GPU allow growth parameter during tensorflow testing process 50 | __C.TEST.TF_ALLOW_GROWTH = False 51 | # Set the test batch size 52 | __C.TEST.BATCH_SIZE = 32 53 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn/CRNN_Tensorflow/local_utils/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 17-9-22 下午6:45 4 | # @Author : Luo Yao 5 | # @Site : http://github.com/TJCVRS 6 | # @File : __init__.py.py 7 | # @IDE: PyCharm Community Edition -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn/inference/checkpoint_dir/intro.txt: -------------------------------------------------------------------------------- 1 | You can download model at https://github.com/MaybeShewill-CV/CRNN_Tensorflow/tree/master/model/shadownet -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn/ocr.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-16.04_python-3.6.2_tensorflow-1.3.0:v1.2 2 | EXPOSE 8080 3 | ADD ./inference/ /ai-ucloud-client-django/ 4 | COPY ./CRNN_Tensorflow/crnn_model/ /ai-ucloud-client-django/crnn_model/ 5 | COPY ./CRNN_Tensorflow/local_utils/ /ai-ucloud-client-django/local_utils/ 6 | COPY ./CRNN_Tensorflow/global_configuration/ /ai-ucloud-client-django/global_configuration/ 7 | ADD ./ocr.conf /ai-ucloud-client-django/conf.json 8 | ADD ./CRNN_Tensorflow/data/char_dict/ /data/data/ 9 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 10 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn/ocr.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "ocrModel", 5 | "main_file": "ocr_inference" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "./checkpoint_dir" 9 | } 10 | } 11 | } -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn_chinese/code/crnn_model/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 17-9-21 下午6:37 4 | # @Author : Luo Yao 5 | # @Site : http://github.com/TJCVRS 6 | # @File : __init__.py.py 7 | # @IDE: PyCharm Community Edition -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn_chinese/code/data_provider/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 17-9-22 下午1:39 4 | # @Author : Luo Yao 5 | # @Site : http://github.com/TJCVRS 6 | # @File : __init__.py.py 7 | # @IDE: PyCharm Community Edition -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn_chinese/code/global_configuration/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 17-9-22 下午3:25 4 | # @Author : Luo Yao 5 | # @Site : http://github.com/TJCVRS 6 | # @File : __init__.py.py 7 | # @IDE: PyCharm Community Edition -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn_chinese/code/local_utils/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 17-9-22 下午6:45 4 | # @Author : Luo Yao 5 | # @Site : http://github.com/TJCVRS 6 | # @File : __init__.py.py 7 | # @IDE: PyCharm Community Edition -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn_chinese/code/tools/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 17-9-21 下午6:37 4 | # @Author : Luo Yao 5 | # @Site : http://github.com/TJCVRS 6 | # @File : __init__.py.py 7 | # @IDE: PyCharm Community Edition -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn_chinese/crnn_multi.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "CrnnModel", 5 | "main_file": "crnn_multi_inference" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "./checkpoint_dir" 9 | } 10 | } 11 | } -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn_chinese/crnn_multi_infer.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-16.04_python-3.6_tensorflow-1.9.0:v1.2 2 | 3 | EXPOSE 8080 4 | ADD ./inference_multi/ /ai-ucloud-client-django/ 5 | ADD ./crnn_multi.conf /ai-ucloud-client-django/conf.json 6 | ADD ./code/ /data/crnn/ 7 | COPY ./data/ /data/data/char_dict/ 8 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 9 | ENV LANG C.UTF-8 10 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn_chinese/inference_multi/crnn_multi_inference.py: -------------------------------------------------------------------------------- 1 | """ 2 | Use shadow net to recognize the scene text 3 | """ 4 | import tensorflow as tf 5 | import os.path as ops 6 | import numpy as np 7 | import cv2 8 | import argparse 9 | import sys 10 | from PIL import Image 11 | sys.path.append('/data/crnn') 12 | #import matplotlib.pyplot as plt 13 | try: 14 | from cv2 import cv2 15 | except ImportError: 16 | pass 17 | 18 | from crnn_model import crnn_model 19 | from global_configuration import config 20 | from local_utils import log_utils, data_utils 21 | from uai.arch.tf_model import TFAiUcloudModel 22 | import crnn_multi_infer 23 | 24 | #logger = log_utils.init_logger() 25 | class CrnnModel(TFAiUcloudModel): 26 | def __init__(self,conf): 27 | super(CrnnModel,self).__init__(conf) 28 | 29 | def load_model(self): 30 | predictor = crnn_multi_infer.crnnPredictor('./checkpoint_dir') 31 | predictor.load_serve_model() 32 | self._predictor = predictor 33 | 34 | def execute(self,data,batch_size): 35 | predictor = self._predictor 36 | 37 | images = [] 38 | for i in range(batch_size): 39 | image = Image.open(data[i]) 40 | images.append(image) 41 | 42 | word = predictor.do_serve_predict(images) 43 | return word 44 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn_chinese/ocr.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-16.04_python-3.6_tensorflow-1.9.0:v1.2 2 | EXPOSE 8080 3 | ADD ./inference/ /ai-ucloud-client-django/ 4 | ADD ./ocr.conf /ai-ucloud-client-django/conf.json 5 | ADD ./code /data/code 6 | COPY ./data/ /data/data/char_dict/ 7 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 8 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi 9 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn_chinese/ocr.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "ocrModel", 5 | "main_file": "ocr_inference" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "./checkpoint_dir" 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/crnn_chinese/test_images/test_02.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/crnn_chinese/test_images/test_02.jpg -------------------------------------------------------------------------------- /examples/tensorflow/inference/east/code/lanms/.gitignore: -------------------------------------------------------------------------------- 1 | adaptor.so 2 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/east/code/lanms/Makefile: -------------------------------------------------------------------------------- 1 | CXXFLAGS = -I include -std=c++11 -O3 $(shell python-config --cflags) 2 | LDFLAGS = $(shell python-config --ldflags) 3 | 4 | DEPS = lanms.h $(shell find include -xtype f) 5 | CXX_SOURCES = adaptor.cpp include/clipper/clipper.cpp 6 | 7 | LIB_SO = adaptor.so 8 | 9 | $(LIB_SO): $(CXX_SOURCES) $(DEPS) 10 | $(CXX) -o $@ $(CXXFLAGS) $(LDFLAGS) $(CXX_SOURCES) --shared -fPIC 11 | 12 | clean: 13 | rm -rf $(LIB_SO) 14 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/east/code/lanms/__init__.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import os 3 | import numpy as np 4 | 5 | BASE_DIR = os.path.dirname(os.path.realpath(__file__)) 6 | 7 | if subprocess.call(['make', '-C', BASE_DIR]) != 0: # return value 8 | raise RuntimeError('Cannot compile lanms: {}'.format(BASE_DIR)) 9 | 10 | 11 | def merge_quadrangle_n9(polys, thres=0.3, precision=10000): 12 | from .adaptor import merge_quadrangle_n9 as nms_impl 13 | if len(polys) == 0: 14 | return np.array([], dtype='float32') 15 | p = polys.copy() 16 | p[:,:8] *= precision 17 | ret = np.array(nms_impl(p, thres), dtype='float32') 18 | ret[:,:8] /= precision 19 | return ret 20 | 21 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/east/code/lanms/__main__.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | from . import merge_quadrangle_n9 5 | 6 | if __name__ == '__main__': 7 | # unit square with confidence 1 8 | q = np.array([0, 0, 0, 1, 1, 1, 1, 0, 1], dtype='float32') 9 | 10 | print(merge_quadrangle_n9(np.array([q, q + 0.1, q + 2]))) 11 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/east/code/lanms/include/clipper/clipper.cpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/east/code/lanms/include/clipper/clipper.cpp -------------------------------------------------------------------------------- /examples/tensorflow/inference/east/code/lanms/include/pybind11/typeid.h: -------------------------------------------------------------------------------- 1 | /* 2 | pybind11/typeid.h: Compiler-independent access to type identifiers 3 | 4 | Copyright (c) 2016 Wenzel Jakob 5 | 6 | All rights reserved. Use of this source code is governed by a 7 | BSD-style license that can be found in the LICENSE file. 8 | */ 9 | 10 | #pragma once 11 | 12 | #include 13 | #include 14 | 15 | #if defined(__GNUG__) 16 | #include 17 | #endif 18 | 19 | NAMESPACE_BEGIN(pybind11) 20 | NAMESPACE_BEGIN(detail) 21 | /// Erase all occurrences of a substring 22 | inline void erase_all(std::string &string, const std::string &search) { 23 | for (size_t pos = 0;;) { 24 | pos = string.find(search, pos); 25 | if (pos == std::string::npos) break; 26 | string.erase(pos, search.length()); 27 | } 28 | } 29 | 30 | PYBIND11_NOINLINE inline void clean_type_id(std::string &name) { 31 | #if defined(__GNUG__) 32 | int status = 0; 33 | std::unique_ptr res { 34 | abi::__cxa_demangle(name.c_str(), nullptr, nullptr, &status), std::free }; 35 | if (status == 0) 36 | name = res.get(); 37 | #else 38 | detail::erase_all(name, "class "); 39 | detail::erase_all(name, "struct "); 40 | detail::erase_all(name, "enum "); 41 | #endif 42 | detail::erase_all(name, "pybind11::"); 43 | } 44 | NAMESPACE_END(detail) 45 | 46 | /// Return a string representation of a C++ type 47 | template static std::string type_id() { 48 | std::string name(typeid(T).name()); 49 | detail::clean_type_id(name); 50 | return name; 51 | } 52 | 53 | NAMESPACE_END(pybind11) 54 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/east/code/nets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/east/code/nets/__init__.py -------------------------------------------------------------------------------- /examples/tensorflow/inference/east/east-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-16.04_python-2.7.6_tensorflow-1.6.0:v1.0 2 | 3 | RUN apt-get update && apt-get install -y --no-install-recommends python-dev python-tk 4 | 5 | RUN pip install shapely -i http://pypi.douban.com/simple/ --trusted-host pypi.douban.com 6 | 7 | EXPOSE 8080 8 | ADD ./code/ /ai-ucloud-client-django/ 9 | RUN cd /ai-ucloud-client-django/lanms/&&make 10 | ADD ./east.conf /ai-ucloud-client-django/conf.json 11 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 12 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/tensorflow/inference/east/east-dist-gpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/gpu_uaiservice_ubuntu-16.04_python-2.7.6_tensorflow-1.6.0:v1.0 2 | 3 | RUN apt-get update && apt-get install -y --no-install-recommends python-dev python-tk 4 | 5 | RUN pip install shapely typing -i http://pypi.douban.com/simple/ --trusted-host pypi.douban.com 6 | 7 | EXPOSE 8080 8 | ADD ./code/ /ai-ucloud-client-django/ 9 | RUN cd /ai-ucloud-client-django/lanms/&&make 10 | ADD ./east-dist.conf /ai-ucloud-client-django/conf.json 11 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 12 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/tensorflow/inference/east/east-dist.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "EASTTextDetectModel", 5 | "main_file": "east_multi_inference" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "./checkpoint_dir" 9 | } 10 | } 11 | } -------------------------------------------------------------------------------- /examples/tensorflow/inference/east/east-gpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/gpu_uaiservice_ubuntu-16.04_python-2.7.6_tensorflow-1.6.0:v1.0 2 | 3 | RUN apt-get update && apt-get install -y --no-install-recommends python-dev python-tk 4 | 5 | RUN pip install shapely -i http://pypi.douban.com/simple/ --trusted-host pypi.douban.com 6 | 7 | EXPOSE 8080 8 | ADD ./code/ /ai-ucloud-client-django/ 9 | RUN cd /ai-ucloud-client-django/lanms/&&make 10 | ADD ./east.conf /ai-ucloud-client-django/conf.json 11 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 12 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/tensorflow/inference/east/east.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "EASTTextDetectModel", 5 | "main_file": "east_inference" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "./checkpoint_dir" 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/facenet/code/align/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/facenet/code/align/__init__.py -------------------------------------------------------------------------------- /examples/tensorflow/inference/facenet/code/align/det1.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/facenet/code/align/det1.npy -------------------------------------------------------------------------------- /examples/tensorflow/inference/facenet/code/align/det2.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/facenet/code/align/det2.npy -------------------------------------------------------------------------------- /examples/tensorflow/inference/facenet/code/align/det3.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/facenet/code/align/det3.npy -------------------------------------------------------------------------------- /examples/tensorflow/inference/facenet/code/gen_example_json.py: -------------------------------------------------------------------------------- 1 | """ input a list of files and pack them into json file """ 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | 7 | from scipy import misc 8 | 9 | import sys 10 | import os 11 | import argparse 12 | import json 13 | import base64 14 | import StringIO 15 | 16 | def main(args): 17 | images = load_images(args.image_files, args.image_size) 18 | cnt = len(images) 19 | raw_images = [] 20 | for image in images: 21 | buf = StringIO.StringIO() 22 | image.save(buf, format='PNG') 23 | image = base64.b64encode(buf.getvalue()) 24 | raw_images.append(image) 25 | 26 | json_data = {'cnt': cnt, 'images': raw_images} 27 | 28 | with open('test.json', 'w') as f: 29 | json.dump(json_data, f) 30 | 31 | def load_images(tmp_image_paths, image_size): 32 | img_list = [] 33 | for image in tmp_image_paths: 34 | img = misc.imread(os.path.expanduser(image), mode='RGB') 35 | img = misc.imresize(img,(image_size, image_size), interp='bilinear') 36 | im = misc.toimage(img) 37 | img_list.append(im) 38 | 39 | return img_list 40 | 41 | def parse_arguments(argv): 42 | parser = argparse.ArgumentParser() 43 | 44 | parser.add_argument('--image_files', type=str, nargs='+', help='Images to compare') 45 | parser.add_argument('--image_size', type=int, 46 | help='Image size (height, width) in pixels.', default=160) 47 | return parser.parse_args(argv) 48 | 49 | if __name__ == '__main__': 50 | main(parse_arguments(sys.argv[1:])) -------------------------------------------------------------------------------- /examples/tensorflow/inference/facenet/facenet-compare-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-16.04_python-2.7.6_tensorflow-1.6.0:v1.0 2 | 3 | EXPOSE 8080 4 | ADD ./code/ /ai-ucloud-client-django/ 5 | ADD ./facenet-compare.conf /ai-ucloud-client-django/conf.json 6 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 7 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/tensorflow/inference/facenet/facenet-compare.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "FaceCompareModel", 5 | "main_file": "facenet_inference" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "./checkpoint_dir" 9 | } 10 | } 11 | } -------------------------------------------------------------------------------- /examples/tensorflow/inference/facenet/facenet-embed-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-16.04_python-2.7.6_tensorflow-1.6.0:v1.0 2 | 3 | EXPOSE 8080 4 | ADD ./code/ /ai-ucloud-client-django/ 5 | ADD ./facenet-embed.conf /ai-ucloud-client-django/conf.json 6 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 7 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/tensorflow/inference/facenet/facenet-embed.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "FaceEmbedModel", 5 | "main_file": "facenet_inference" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "./checkpoint_dir" 9 | } 10 | } 11 | } -------------------------------------------------------------------------------- /examples/tensorflow/inference/facenet/facenet-mtcnn-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-16.04_python-2.7.6_tensorflow-1.6.0:v1.0 2 | 3 | EXPOSE 8080 4 | ADD ./code/ /ai-ucloud-client-django/ 5 | ADD ./facenet-mtcnn.conf /ai-ucloud-client-django/conf.json 6 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 7 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/tensorflow/inference/facenet/facenet-mtcnn.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "FaceDetectionModel", 5 | "main_file": "facenet_inference" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "" 9 | } 10 | } 11 | } -------------------------------------------------------------------------------- /examples/tensorflow/inference/im2txt/code/im2txt_conf.py: -------------------------------------------------------------------------------- 1 | from uai.arch_conf.tf_conf import TFJsonConfLoader 2 | 3 | class Im2txtJsonConfLoader(TFJsonConfLoader): 4 | def __init__(self, conf): 5 | super(Im2txtJsonConfLoader, self).__init__(conf) 6 | 7 | def _load(self): 8 | super(Im2txtJsonConfLoader, self)._load() 9 | self.input_width = self.server_conf['tensorflow']['input_width'] 10 | self.input_height = self.server_conf['tensorflow']['input_height'] 11 | self.checkpoint = self.server_conf['tensorflow']['checkpoint'] 12 | 13 | 14 | def get_model_dir(self): 15 | return self.model_dir 16 | 17 | def get_input_width(self): 18 | return self.input_width 19 | 20 | def get_input_height(self): 21 | return self.input_height 22 | 23 | def get_checkpoint(self): 24 | return self.checkpoint 25 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/im2txt/code/inference_utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/im2txt/code/inference_utils/__init__.py -------------------------------------------------------------------------------- /examples/tensorflow/inference/im2txt/code/ops/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/im2txt/code/ops/__init__.py -------------------------------------------------------------------------------- /examples/tensorflow/inference/im2txt/im2txt-infer-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-16.04_python-2.7_tensorflow-1.7.0:v1.2 2 | 3 | EXPOSE 8080 4 | ADD ./code/ /ai-ucloud-client-django/ 5 | ADD ./im2txt.conf /ai-ucloud-client-django/conf.json 6 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 7 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi 8 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/im2txt/im2txt.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "Im2txtModel", 5 | "main_file": "im2txt_inference" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "./checkpoint_dir", 9 | "checkpoint" : 6000, 10 | "input_width" : 299, 11 | "input_height" : 299 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/mnist_0.11/2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/mnist_0.11/2.jpg -------------------------------------------------------------------------------- /examples/tensorflow/inference/mnist_0.11/checkpoint_dir/checkpoint: -------------------------------------------------------------------------------- 1 | model_checkpoint_path: "mnist.mod" 2 | all_model_checkpoint_paths: "mnist.mod" 3 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/mnist_0.11/checkpoint_dir/mnist.mod: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/mnist_0.11/checkpoint_dir/mnist.mod -------------------------------------------------------------------------------- /examples/tensorflow/inference/mnist_0.11/checkpoint_dir/mnist.mod.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/mnist_0.11/checkpoint_dir/mnist.mod.meta -------------------------------------------------------------------------------- /examples/tensorflow/inference/mnist_0.11/tf_mnist.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "MnistModel", 5 | "main_file": "mnist_inference" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "./checkpoint_dir" 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/mnist_0.11/tf_mnist_json.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "MnistModel", 5 | "main_file": "mnist_inference_json" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "./checkpoint_dir" 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/mnist_1.1/2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/mnist_1.1/2.jpg -------------------------------------------------------------------------------- /examples/tensorflow/inference/mnist_1.1/checkpoint_dir/checkpoint: -------------------------------------------------------------------------------- 1 | model_checkpoint_path: "mnist.mod" 2 | all_model_checkpoint_paths: "mnist.mod" 3 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/mnist_1.1/checkpoint_dir/mnist.mod.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/mnist_1.1/checkpoint_dir/mnist.mod.data-00000-of-00001 -------------------------------------------------------------------------------- /examples/tensorflow/inference/mnist_1.1/checkpoint_dir/mnist.mod.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/mnist_1.1/checkpoint_dir/mnist.mod.index -------------------------------------------------------------------------------- /examples/tensorflow/inference/mnist_1.1/checkpoint_dir/mnist.mod.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/mnist_1.1/checkpoint_dir/mnist.mod.meta -------------------------------------------------------------------------------- /examples/tensorflow/inference/mnist_1.1/mnist-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-14.04_python-2.7.6_tensorflow-1.1.0:v1.2 2 | 3 | EXPOSE 8080 4 | ADD ./code/ /ai-ucloud-client-django/ 5 | ADD ./tf_mnist.conf /ai-ucloud-client-django/conf.json 6 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 7 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/tensorflow/inference/mnist_1.1/mnist-gpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/gpu_uaiservice_ubuntu-14.04_python-2.7.6_tensorflow-1.6.0:v1.0 2 | 3 | EXPOSE 8080 4 | ADD ./code/ /ai-ucloud-client-django/ 5 | ADD ./tf_mnist.conf /ai-ucloud-client-django/conf.json 6 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 7 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/tensorflow/inference/mnist_1.1/tf_mnist.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "MnistModel", 5 | "main_file": "mnist_inference" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "./checkpoint_dir" 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/object-detect/object-detect-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-16.04_python-2.7_tensorflow-1.7.0:v1.2 2 | 3 | EXPOSE 8080 4 | ADD ./code/ /ai-ucloud-client-django/ 5 | ADD ./object-detect.conf /ai-ucloud-client-django/conf.json 6 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 7 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi 8 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/object-detect/object-detect.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "ObjectDetectModel", 5 | "main_file": "object_detect_inference" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "./checkpoint_dir" 9 | } 10 | } 11 | } -------------------------------------------------------------------------------- /examples/tensorflow/inference/retrain/code/retrain_conf.py: -------------------------------------------------------------------------------- 1 | from uai.arch_conf.tf_conf import TFJsonConfLoader 2 | 3 | class RetrainJsonConfLoader(TFJsonConfLoader): 4 | def __init__(self, conf): 5 | super(RetrainJsonConfLoader, self).__init__(conf) 6 | 7 | def _load(self): 8 | super(RetrainJsonConfLoader, self)._load() 9 | self.input_width = eval(self.server_conf['tensorflow']['input_width']) 10 | self.input_height = eval(self.server_conf['tensorflow']['input_height']) 11 | 12 | def get_model_dir(self): 13 | return self.model_dir 14 | 15 | def get_input_width(self): 16 | return self.input_width 17 | 18 | def get_input_height(self): 19 | return self.input_height 20 | 21 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/retrain/retrained-classification-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-16.04_python-2.7_tensorflow-1.7.0:v1.2 2 | 3 | EXPOSE 8080 4 | ADD ./code/ /ai-ucloud-client-django/ 5 | ADD ./retrained-classification.conf /ai-ucloud-client-django/conf.json 6 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 7 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi 8 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/retrain/retrained-classification.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "RetrainedClassificationModel", 5 | "main_file": "retrain_inference" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "./checkpoint_dir", 9 | "input_width" : "224", 10 | "input_height" : "224" 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/text-classification-ch/test.txt: -------------------------------------------------------------------------------- 1 | 专家预测最后9战6胜3负 火箭闯入前八概率只三成新浪体育讯北京时间3月29日消息,美国网站ESPN专家约翰-霍林格给出了他自己的季后赛出线预测,根据他的预测,主要竞争西部季后赛席位的几支球队晋级概率如下:开拓者96.3%、黄蜂93.0%、灰熊87.5%、火箭22.6%、太阳0.6%。换句话说,霍林格认为火箭晋级季后赛的希望已经不足三成了。霍林格的这项预测是基于各队目前的状况,以及随后的赛程。霍林格认为还有9场比赛没打的火箭,最佳战绩可能是9胜0负,而最差战绩有可能是1胜8负。最可能的战绩是44胜38负,也就是在这9场比赛中取得6胜3负的成绩。而霍林格预测火箭的竞争对手在常规赛结束后最可能出现的战绩分别是开拓者47胜35负,黄蜂46胜36负,灰熊46胜36负,火箭将以两场的差距无缘季后赛。应该说,这项分析还是合情合理的,除非有奇迹出现,否则火箭晋级季后赛的希望确实只有不到三成了。该项数据还给出了火箭打进总决赛的概率,是1.9%,而夺取总冠军的概率只有0.6%。当然,这些微小的概率只是理论上的,如果火箭连季后赛都打不进去,这些概率也只能成为大家饭后的谈资。再来看看其他球队的情况,根据这项数据预测,西部晋级总决赛概率最高的是湖人,达到23.7%,随后依次是掘金、马刺和小牛。东部晋级总决赛概率最高的是公牛,达到49.3%,随后依次是热火、凯尔特人和魔术。在夺冠概率上,公牛一马当先,达到了31.1%,湖人和热火平分秋色,都是11.6%。显然,在霍林格眼中,东部异军突起的公牛已经成为了夺冠第一热门,罗斯将带领他的球队走上芝加哥复兴之路,而热火和湖人依然是他们最强劲的竞争者。至于年纪稍大的凯尔特人、马刺,则不被霍林格所看好。(肥仔) 2 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/text-classification-ch/text-cnn-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-16.04_python-2.7.6_tensorflow-1.6.0:v1.0 2 | 3 | EXPOSE 8080 4 | ADD ./code/ /ai-ucloud-client-django/ 5 | ADD ./txt_class_cnn.conf /ai-ucloud-client-django/conf.json 6 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 7 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/tensorflow/inference/text-classification-ch/text-rnn-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-16.04_python-2.7.6_tensorflow-1.6.0:v1.0 2 | 3 | EXPOSE 8080 4 | ADD ./code/ /ai-ucloud-client-django/ 5 | ADD ./txt_class_rnn.conf /ai-ucloud-client-django/conf.json 6 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 7 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/tensorflow/inference/text-classification-ch/txt_class_cnn.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "TxtClassCNNModel", 5 | "main_file": "txt_cnn_rnn_inference" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "./checkpoint_dir" 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/text-classification-ch/txt_class_rnn.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "TxtClassRNNModel", 5 | "main_file": "txt_cnn_rnn_inference" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "./checkpoint_dir" 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/tf-serving/inception/conf.json: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "InceptionModel", 5 | "main_file": "inference" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "./checkpoint_dir/", 9 | "tag": ["serve"], 10 | "signature": "predict_images", 11 | "input": { 12 | "name": "images" 13 | }, 14 | "output": { 15 | "name": ["classes"] 16 | } 17 | } 18 | } 19 | } -------------------------------------------------------------------------------- /examples/tensorflow/inference/tf-serving/inception/uaiservice.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-14.04_python-2.7.6_tensorflow-1.4.0:v1.1 2 | 3 | EXPOSE 8080 4 | ADD ./inception /ai-ucloud-client-django/ 5 | ADD ./inception/conf.json /ai-ucloud-client-django/conf.json 6 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 7 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/tensorflow/inference/tf-serving/mnist/checkpoint_dir/saved_model.pb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/tf-serving/mnist/checkpoint_dir/saved_model.pb -------------------------------------------------------------------------------- /examples/tensorflow/inference/tf-serving/mnist/checkpoint_dir/variables/variables.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/tf-serving/mnist/checkpoint_dir/variables/variables.data-00000-of-00001 -------------------------------------------------------------------------------- /examples/tensorflow/inference/tf-serving/mnist/checkpoint_dir/variables/variables.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/inference/tf-serving/mnist/checkpoint_dir/variables/variables.index -------------------------------------------------------------------------------- /examples/tensorflow/inference/tf-serving/mnist/conf.json: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "MnistModel", 5 | "main_file": "inference" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "./checkpoint_dir/", 9 | "tag": ["serve"], 10 | "signature": "predict_images", 11 | "input": { 12 | "name": "images" 13 | }, 14 | "output": { 15 | "name": ["scores"] 16 | } 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/tf-serving/mnist/uaiservice.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-14.04_python-2.7.6_tensorflow-1.4.0:v1.1 2 | 3 | EXPOSE 8080 4 | ADD ./mnist /ai-ucloud-client-django/ 5 | ADD ./mnist/conf.json /ai-ucloud-client-django/conf.json 6 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 7 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi 8 | -------------------------------------------------------------------------------- /examples/tensorflow/inference/tf-serving/wide_deep/conf.json: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "WideDeepModel", 5 | "main_file": "inference" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "./checkpoint_dir/", 9 | "tag": ["serve"], 10 | "signature": "predict", 11 | "input": { 12 | "name": "input" 13 | }, 14 | "output": { 15 | "name": ["classes", "logits"] 16 | } 17 | } 18 | } 19 | } -------------------------------------------------------------------------------- /examples/tensorflow/inference/tf-serving/wide_deep/uaiservice.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-14.04_python-2.7.6_tensorflow-1.4.0:v1.1 2 | 3 | EXPOSE 8080 4 | ADD ./wide_deep /ai-ucloud-client-django/ 5 | ADD ./wide_deep/conf.json /ai-ucloud-client-django/conf.json 6 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 7 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/tensorflow/train/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/train/__init__.py -------------------------------------------------------------------------------- /examples/tensorflow/train/bert/bert/__init__.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2018 The Google AI Language Team Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | -------------------------------------------------------------------------------- /examples/tensorflow/train/bert/bert_train.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/gpu_uaitrain_ubuntu-16.04_python-3.5_tensorflow-1.13.1:v1.0 2 | 3 | RUN apt-get install -y locales 4 | RUN locale-gen zh_CN.UTF-8 5 | RUN ln -sfn /usr/local/bin/python /usr/bin/python 6 | 7 | ENV LANG C.UTF-8 8 | 9 | COPY ./chinese_L-12_H-768_A-12 /data/chinese_L-12_H-768_A-12/ 10 | COPY ./bert/ /data/ -------------------------------------------------------------------------------- /examples/tensorflow/train/bert/requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow >= 1.11.0 # CPU Version of TensorFlow. 2 | # tensorflow-gpu >= 1.11.0 # GPU version of TensorFlow. 3 | -------------------------------------------------------------------------------- /examples/tensorflow/train/cifar/code/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/train/cifar/code/__init__.py -------------------------------------------------------------------------------- /examples/tensorflow/train/cifar_simple/cifar-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaitrain_ubuntu-14.04_python-2.7.6_tensorflow-1.4.0:v1.0 2 | ADD ./code/ /data/ 3 | -------------------------------------------------------------------------------- /examples/tensorflow/train/cifar_simple/cifar-gpu.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/gpu_uaitrain_ubuntu-14.04_python-2.7.6_tensorflow-1.4.0:v1.0 2 | ADD ./code/ /data/ 3 | -------------------------------------------------------------------------------- /examples/tensorflow/train/cifar_simple/code/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | """Makes helper libraries available in the cifar10 package.""" 17 | from __future__ import absolute_import 18 | from __future__ import division 19 | from __future__ import print_function 20 | 21 | import cifar10 22 | import cifar10_input 23 | -------------------------------------------------------------------------------- /examples/tensorflow/train/crnn/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # dotenv 83 | .env 84 | 85 | # virtualenv 86 | .venv 87 | venv/ 88 | ENV/ 89 | 90 | # Spyder project settings 91 | .spyderproject 92 | .spyproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | 97 | # mkdocs documentation 98 | /site 99 | 100 | # mypy 101 | .mypy_cache/ 102 | -------------------------------------------------------------------------------- /examples/tensorflow/train/crnn/crnn-generate-tfrecords.Dockerfile: -------------------------------------------------------------------------------- 1 | From uhub.service.ucloud.cn/uaishare/gpu_uaitrain_ubuntu-16.04_python-3.6.2_tensorflow-1.3.0:v1.0 2 | ADD ./code/ /data/code/ 3 | 4 | -------------------------------------------------------------------------------- /examples/tensorflow/train/crnn/crnnmodel/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 17-9-21 下午6:37 4 | # @Author : Luo Yao 5 | # @Site : http://github.com/TJCVRS 6 | # @File : __init__.py.py 7 | # @IDE: PyCharm Community Edition -------------------------------------------------------------------------------- /examples/tensorflow/train/crnn/data_provider/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 17-9-22 下午1:39 4 | # @Author : Luo Yao 5 | # @Site : http://github.com/TJCVRS 6 | # @File : __init__.py.py 7 | # @IDE: PyCharm Community Edition -------------------------------------------------------------------------------- /examples/tensorflow/train/crnn/global_configuration/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 17-9-22 下午3:25 4 | # @Author : Luo Yao 5 | # @Site : http://github.com/TJCVRS 6 | # @File : __init__.py.py 7 | # @IDE: PyCharm Community Edition -------------------------------------------------------------------------------- /examples/tensorflow/train/crnn/global_configuration/config.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 17-9-22 下午3:25 4 | # @Author : Luo Yao 5 | # @Site : http://github.com/TJCVRS 6 | # @File : config.py 7 | # @IDE: PyCharm Community Edition 8 | """ 9 | Set some global configuration 10 | """ 11 | from easydict import EasyDict as edict 12 | 13 | __C = edict() 14 | # Consumers can get config by: from config import cfg 15 | 16 | cfg = __C 17 | 18 | # Train options 19 | __C.TRAIN = edict() 20 | 21 | # Set the shadownet training epochs 22 | __C.TRAIN.EPOCHS = 40000 23 | # Set the display step 24 | __C.TRAIN.DISPLAY_STEP = 1 25 | # Set the test display step during training process 26 | __C.TRAIN.TEST_DISPLAY_STEP = 100 27 | # Set the momentum parameter of the optimizer 28 | __C.TRAIN.MOMENTUM = 0.9 29 | # Set the initial learning rate 30 | __C.TRAIN.LEARNING_RATE = 0.1 31 | # Set the GPU resource used during training process 32 | __C.TRAIN.GPU_MEMORY_FRACTION = 0.85 33 | # Set the GPU allow growth parameter during tensorflow training process 34 | __C.TRAIN.TF_ALLOW_GROWTH = True 35 | # Set the shadownet training batch size 36 | __C.TRAIN.BATCH_SIZE = 32 37 | # Set the shadownet validation batch size 38 | __C.TRAIN.VAL_BATCH_SIZE = 32 39 | # Set the learning rate decay steps 40 | __C.TRAIN.LR_DECAY_STEPS = 10000 41 | # Set the learning rate decay rate 42 | __C.TRAIN.LR_DECAY_RATE = 0.1 43 | 44 | # Test options 45 | __C.TEST = edict() 46 | 47 | # Set the GPU resource used during testing process 48 | __C.TEST.GPU_MEMORY_FRACTION = 0.5 49 | # Set the GPU allow growth parameter during tensorflow testing process 50 | __C.TEST.TF_ALLOW_GROWTH = False 51 | # Set the test batch size 52 | __C.TEST.BATCH_SIZE = 32 53 | -------------------------------------------------------------------------------- /examples/tensorflow/train/crnn/local_utils/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 17-9-22 下午6:45 4 | # @Author : Luo Yao 5 | # @Site : http://github.com/TJCVRS 6 | # @File : __init__.py.py 7 | # @IDE: PyCharm Community Edition -------------------------------------------------------------------------------- /examples/tensorflow/train/crnn/ocr-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | From uhub.service.ucloud.cn/uaishare/cpu_uaitrain_ubuntu-16.04_python-3.6.2_tensorflow-1.3.0:v1.0 2 | ADD ./ ./data/ 3 | -------------------------------------------------------------------------------- /examples/tensorflow/train/crnn/ocr-gpu.Dockerfile: -------------------------------------------------------------------------------- 1 | From uhub.service.ucloud.cn/uaishare/gpu_uaitrain_ubuntu-16.04_python-3.6.2_tensorflow-1.3.0:v1.0 2 | ADD ./ ./data/ 3 | -------------------------------------------------------------------------------- /examples/tensorflow/train/crnn/requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow_gpu==1.6.0 2 | numpy==1.13.1 3 | opencv_python==3.2.0.7 4 | matplotlib==2.0.2 5 | easydict==1.6 6 | tensorflow==1.6.0 7 | -------------------------------------------------------------------------------- /examples/tensorflow/train/crnn_chinese/code/crnn_model/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 17-9-21 下午6:37 4 | # @Author : Luo Yao 5 | # @Site : http://github.com/TJCVRS 6 | # @File : __init__.py.py 7 | # @IDE: PyCharm Community Edition -------------------------------------------------------------------------------- /examples/tensorflow/train/crnn_chinese/code/data_provider/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 17-9-22 下午1:39 4 | # @Author : Luo Yao 5 | # @Site : http://github.com/TJCVRS 6 | # @File : __init__.py.py 7 | # @IDE: PyCharm Community Edition -------------------------------------------------------------------------------- /examples/tensorflow/train/crnn_chinese/code/gen_data/MSYHL.TTC: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/train/crnn_chinese/code/gen_data/MSYHL.TTC -------------------------------------------------------------------------------- /examples/tensorflow/train/crnn_chinese/code/gen_data/gen_chinesetxt.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import io 3 | import argparse 4 | parser = argparse.ArgumentParser() 5 | parser.add_argument('--path_poem_txt', type=str,default='/data/data/gen_data/THUOCL_poem.txt',help='path of chinese char txt') 6 | parser.add_argument('--txt_save_path', type=str, help='chinese char pic path',default='/data/data/chinese.txt') 7 | 8 | args = parser.parse_args() 9 | 10 | def writetxt(path_poem_txt,txt_save_path): 11 | with open(path_poem_txt,'r') as file_to_read: 12 | f = file_to_read.readlines() 13 | f = list(map(lambda line:line.split('\t')[0].strip(),f)) 14 | f = ''.join(f) 15 | f = list(set(f)) 16 | 17 | chinese=io.open(txt_save_path,'w',encoding='utf-8') 18 | for char in enumerate(f): 19 | #if char not in f[:index]: 20 | chinese.write(char[1]+'\n') 21 | chinese.close() 22 | file_to_read.close() 23 | 24 | writetxt(args.path_poem_txt,args.txt_save_path) 25 | -------------------------------------------------------------------------------- /examples/tensorflow/train/crnn_chinese/code/global_configuration/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 17-9-22 下午3:25 4 | # @Author : Luo Yao 5 | # @Site : http://github.com/TJCVRS 6 | # @File : __init__.py.py 7 | # @IDE: PyCharm Community Edition -------------------------------------------------------------------------------- /examples/tensorflow/train/crnn_chinese/code/local_utils/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 17-9-22 下午6:45 4 | # @Author : Luo Yao 5 | # @Site : http://github.com/TJCVRS 6 | # @File : __init__.py.py 7 | # @IDE: PyCharm Community Edition -------------------------------------------------------------------------------- /examples/tensorflow/train/crnn_chinese/code/tools/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 17-9-21 下午6:37 4 | # @Author : Luo Yao 5 | # @Site : http://github.com/TJCVRS 6 | # @File : __init__.py.py 7 | # @IDE: PyCharm Community Edition -------------------------------------------------------------------------------- /examples/tensorflow/train/crnn_chinese/crnn-cpu.Dockerfile: -------------------------------------------------------------------------------- 1 | From uhub.service.ucloud.cn/uaishare/cpu_uaitrain_ubuntu-16.04_python-3.6_tensorflow-1.9.0:v1.0 2 | RUN pip install tqdm 3 | ENV LANG C.UTF-8 4 | ADD ./code/ /data/ 5 | -------------------------------------------------------------------------------- /examples/tensorflow/train/crnn_chinese/crnn-gpu.Dockerfile: -------------------------------------------------------------------------------- 1 | From uhub.service.ucloud.cn/uaishare/gpu_uaitrain_ubuntu-16.04_python-3.6_tensorflow-1.9.0:v1.0 2 | RUN pip install tqdm 3 | ENV LANG C.UTF-8 4 | ADD ./code/ /data/ 5 | -------------------------------------------------------------------------------- /examples/tensorflow/train/crnn_chinese/crnn-multi-gpu.Dockerfile: -------------------------------------------------------------------------------- 1 | From uhub.service.ucloud.cn/uaishare/gpu_uaitrain_ubuntu-16.04_python-3.6_tensorflow-1.9.0:v1.0 2 | RUN pip install tqdm 3 | ENV LANG C.UTF-8 4 | ADD ./code/ /data/ 5 | ADD ./code_multi/tools/ /data/tools/ 6 | 7 | -------------------------------------------------------------------------------- /examples/tensorflow/train/deep-speech/deepspeech.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.ucloud.cn/uaishare/gpu_uaitrain_ubuntu-14.04_python-2.7.6_tensorflow-1.4.0:v1.0 2 | 3 | RUN pip install progressbar2 pysftp sox python_speech_features pyxdg bs4 six -i http://pypi.douban.com/simple/ --trusted-host pypi.douban.com 4 | RUN pip install paramiko==2.1.1 5 | 6 | ADD ./DeepSpeech /data/DeepSpeech 7 | -------------------------------------------------------------------------------- /examples/tensorflow/train/east/east-dist.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/gpu_uaitrain_ubuntu-16.04_python-3.6_tensorflow-1.6.0:v1.0 2 | 3 | RUN apt-get update 4 | RUN apt-get install -y python-opencv python-tk 5 | 6 | RUN pip install shapely tqdm -i http://pypi.douban.com/simple/ --trusted-host pypi.douban.com 7 | 8 | ADD ./EAST/ /data/ 9 | -------------------------------------------------------------------------------- /examples/tensorflow/train/east/east.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/gpu_uaitrain_ubuntu-16.04_python-2.7.6_tensorflow-1.5.0:v1.0 2 | 3 | RUN apt-get update 4 | RUN apt-get install -y python-opencv python-tk 5 | 6 | RUN pip install shapely -i http://pypi.douban.com/simple/ --trusted-host pypi.douban.com 7 | 8 | ADD ./EAST/ /data/ -------------------------------------------------------------------------------- /examples/tensorflow/train/facenet/code/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | 3 | -------------------------------------------------------------------------------- /examples/tensorflow/train/facenet/code/align/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/train/facenet/code/align/__init__.py -------------------------------------------------------------------------------- /examples/tensorflow/train/facenet/code/align/det1.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/train/facenet/code/align/det1.npy -------------------------------------------------------------------------------- /examples/tensorflow/train/facenet/code/align/det2.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/train/facenet/code/align/det2.npy -------------------------------------------------------------------------------- /examples/tensorflow/train/facenet/code/align/det3.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/train/facenet/code/align/det3.npy -------------------------------------------------------------------------------- /examples/tensorflow/train/facenet/code/models/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | 3 | -------------------------------------------------------------------------------- /examples/tensorflow/train/facenet/facenet.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/gpu_uaitrain_ubuntu-16.04_python-3.6_tensorflow-1.7.0:v1.0 2 | 3 | COPY code/ /data/ 4 | ADD lr-data/ /data/lr-data/ -------------------------------------------------------------------------------- /examples/tensorflow/train/facenet/lr-data/learning_rate_retrain_tripletloss.txt: -------------------------------------------------------------------------------- 1 | # Learning rate schedule 2 | # Maps an epoch number to a learning rate 3 | 0: 0.1 4 | 300: 0.01 5 | 400: 0.001 6 | 1000: 0.0001 -------------------------------------------------------------------------------- /examples/tensorflow/train/facenet/lr-data/learning_rate_schedule_classifier_casia.txt: -------------------------------------------------------------------------------- 1 | # Learning rate schedule 2 | # Maps an epoch number to a learning rate 3 | 0: 0.05 4 | 60: 0.005 5 | 80: 0.0005 6 | 91: -1 7 | -------------------------------------------------------------------------------- /examples/tensorflow/train/facenet/lr-data/learning_rate_schedule_classifier_msceleb.txt: -------------------------------------------------------------------------------- 1 | # Learning rate schedule 2 | # Maps an epoch number to a learning rate 3 | 0: 0.1 4 | 150: 0.01 5 | 180: 0.001 6 | 251: 0.0001 -------------------------------------------------------------------------------- /examples/tensorflow/train/facenet/lr-data/learning_rate_schedule_classifier_vggface2.txt: -------------------------------------------------------------------------------- 1 | # Learning rate schedule 2 | # Maps an epoch number to a learning rate 3 | 0: 0.05 4 | 100: 0.005 5 | 200: 0.0005 6 | 276: -1 -------------------------------------------------------------------------------- /examples/tensorflow/train/im2txt/im2txt/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/train/im2txt/im2txt/__init__.py -------------------------------------------------------------------------------- /examples/tensorflow/train/im2txt/im2txt/ops/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/train/im2txt/im2txt/ops/__init__.py -------------------------------------------------------------------------------- /examples/tensorflow/train/im2txt/uaitrain.Dockerfile: -------------------------------------------------------------------------------- 1 | From uhub.service.ucloud.cn/uaishare/gpu_uaitrain_ubuntu-16.04_python-2.7_tensorflow-1.7.0:v1.0 2 | 3 | RUN pip install -U nltk 4 | CMD python -m nltk.downloader punkt 5 | 6 | ADD im2txt /data/im2txt/ 7 | ADD ./train.py /data/ 8 | 9 | -------------------------------------------------------------------------------- /examples/tensorflow/train/imagenet/code/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/train/imagenet/code/__init__.py -------------------------------------------------------------------------------- /examples/tensorflow/train/mnist_summary_1.1/data/t10k-images-idx3-ubyte.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/train/mnist_summary_1.1/data/t10k-images-idx3-ubyte.gz -------------------------------------------------------------------------------- /examples/tensorflow/train/mnist_summary_1.1/data/t10k-labels-idx1-ubyte.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/train/mnist_summary_1.1/data/t10k-labels-idx1-ubyte.gz -------------------------------------------------------------------------------- /examples/tensorflow/train/mnist_summary_1.1/data/train-images-idx3-ubyte.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/train/mnist_summary_1.1/data/train-images-idx3-ubyte.gz -------------------------------------------------------------------------------- /examples/tensorflow/train/mnist_summary_1.1/data/train-labels-idx1-ubyte.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/train/mnist_summary_1.1/data/train-labels-idx1-ubyte.gz -------------------------------------------------------------------------------- /examples/tensorflow/train/object-detection/uaitrain.Dockerfile: -------------------------------------------------------------------------------- 1 | From uhub.service.ucloud.cn/uaishare/gpu_uaitrain_ubuntu-16.04_python-2.7.6_tensorflow-1.6.0:v1.0 2 | 3 | RUN apt-get update 4 | RUN apt-get install python-tk -y 5 | 6 | ADD ./research /data/ 7 | 8 | RUN cd /data/ && python setup.py install && cd slim && python setup.py install 9 | -------------------------------------------------------------------------------- /examples/tensorflow/train/retrain/code/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/train/retrain/code/__init__.py -------------------------------------------------------------------------------- /examples/tensorflow/train/retrain/uaitrain_v2.Dockerfile: -------------------------------------------------------------------------------- 1 | From uhub.service.ucloud.cn/uaishare/gpu_uaitrain_ubuntu-16.04_python-2.7_tensorflow-1.7.0:v1.0 2 | 3 | RUN pip install tensorflow-hub 4 | 5 | ADD ./code/ /data/ 6 | -------------------------------------------------------------------------------- /examples/tensorflow/train/slim/inference/slim.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "slimModel", 5 | "main_file": "sliminfer" 6 | }, 7 | "tensorflow" : { 8 | "model_dir" : "/data/output" 9 | } 10 | }, 11 | "infor" : { 12 | "preprocessing_name" : "None", 13 | "model_name" : "vgg_19", 14 | "eval_image_size" : "None" 15 | } 16 | } 17 | 18 | -------------------------------------------------------------------------------- /examples/tensorflow/train/slim/inference/slim_infer.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-16.04_python-2.7.6_tensorflow-1.6.0:v1.2 2 | 3 | EXPOSE 8080 4 | ADD ./slim/. /ai-ucloud-client-django/. 5 | ADD ./sliminfer.py /ai-ucloud-client-django/sliminfer.py 6 | ADD ./slim.conf /ai-ucloud-client-django/conf.json 7 | ADD ./checkpoint_dir/ /data/output/ 8 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/conf.json 9 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi 10 | 11 | -------------------------------------------------------------------------------- /examples/tensorflow/train/slim/train/slim.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/gpu_uaitrain_ubuntu-16.04_python-2.7.6_tensorflow-1.5_models:v1.8.0 2 | 3 | COPY ./slim/ /data/ 4 | -------------------------------------------------------------------------------- /examples/tensorflow/train/text-classification-ch/code/helper/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/tensorflow/train/text-classification-ch/code/helper/__init__.py -------------------------------------------------------------------------------- /examples/tensorflow/train/text-classification-ch/code/helper/copy_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # copy MAXCOUNT files from each directory 4 | 5 | MAXCOUNT=6500 6 | 7 | for category in $( ls THUCNews); do 8 | echo item: $category 9 | 10 | dir=THUCNews/$category 11 | newdir=data/thucnews/$category 12 | if [ -d $newdir ]; then 13 | rm -rf $newdir 14 | mkdir $newdir 15 | fi 16 | 17 | COUNTER=1 18 | for i in $(ls $dir); do 19 | cp $dir/$i $newdir 20 | if [ $COUNTER -ge $MAXCOUNT ] 21 | then 22 | echo finished 23 | break 24 | fi 25 | let COUNTER=COUNTER+1 26 | done 27 | 28 | done 29 | -------------------------------------------------------------------------------- /examples/tensorflow/train/text-classification-ch/text-cnn-rnn.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/gpu_uaitrain_ubuntu-16.04_python-2.7_tensorflow-1.7.0:v1.0 2 | 3 | COPY code/ /data/ -------------------------------------------------------------------------------- /examples/xgboost/inference/binary_classification/0002.model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/examples/xgboost/inference/binary_classification/0002.model -------------------------------------------------------------------------------- /examples/xgboost/inference/binary_classification/binary.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM uhub.service.ucloud.cn/uaishare/cpu_uaiservice_ubuntu-16.04_python-2.7.6_xgboost-0.7:v1.0 2 | 3 | EXPOSE 8080 4 | ADD ./binary_classification/ "/ai-ucloud-client-django/" 5 | ENV UAI_SERVICE_CONFIG /ai-ucloud-client-django/xgboost_binary.conf 6 | CMD cd /ai-ucloud-client-django && gunicorn -c gunicorn.conf.py httpserver.wsgi -------------------------------------------------------------------------------- /examples/xgboost/inference/binary_classification/xgboost_binary.conf: -------------------------------------------------------------------------------- 1 | { 2 | "http_server" : { 3 | "exec" : { 4 | "main_class": "BinaryClassModel", 5 | "main_file": "binary" 6 | }, 7 | "xgboost" : { 8 | "model_name" : "0002.model" 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /uai/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | UCloud ai SDK for python 5 | """ 6 | 7 | __version__ = '1.0.0' -------------------------------------------------------------------------------- /uai/api/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/api/__init__.py -------------------------------------------------------------------------------- /uai/api/auth_uai_service_client.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.api.base_api import BaseUaiServiceApiOp 17 | 18 | class AuthUAISrvServiceClientApiOp(BaseUaiServiceApiOp): 19 | 20 | ACTION_NAME = "AuthUAISrvServiceClient" 21 | 22 | def __init__(self, public_key, private_key, service_id, client_id, project_id='', region='', zone=''): 23 | super(AuthUAISrvServiceClientApiOp, self).__init__(self.ACTION_NAME, public_key, private_key, project_id, region, zone) 24 | self.cmd_params['Region'] = region if region != '' else super(AuthUAISrvServiceClientApiOp, self).PARAMS_DEFAULT_REGION 25 | self.cmd_params['ServiceID'] = service_id 26 | self.cmd_params['ClientID'] = client_id 27 | 28 | def _check_args(self, params): 29 | if params['ServiceID'] == '': 30 | return False 31 | if params['ClientID'] == '': 32 | return False 33 | return True 34 | 35 | def call_api(self): 36 | succ, self.rsp = super(AuthUAISrvServiceClientApiOp, self).call_api() 37 | return succ, self.rsp -------------------------------------------------------------------------------- /uai/api/delete_uai_service.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.api.base_api import BaseUaiServiceApiOp 17 | 18 | class DeleteUAIServiceApiOp(BaseUaiServiceApiOp): 19 | 20 | ACTION_NAME = "DeleteUAIService" 21 | 22 | def __init__(self, public_key, private_key, service_id, srv_version='', project_id='', region='', zone=''): 23 | super(DeleteUAIServiceApiOp, self).__init__(self.ACTION_NAME, public_key, private_key, project_id, region, zone) 24 | self.cmd_params['Region'] = region if region != '' else super(DeleteUAIServiceApiOp, self).PARAMS_DEFAULT_REGION 25 | self.cmd_params['ServiceID'] = service_id 26 | self.cmd_params['SrvVersion'] = srv_version 27 | 28 | def _check_args(self, params): 29 | if params['ServiceID'] == '': 30 | return False 31 | return True 32 | 33 | def call_api(self): 34 | succ, self.rsp = super(DeleteUAIServiceApiOp, self).call_api() 35 | return succ, self.rsp 36 | -------------------------------------------------------------------------------- /uai/api/delete_uai_srv_client.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.api.base_api import BaseUaiServiceApiOp 17 | 18 | class DeleteUAISrvServiceClientApiOp(BaseUaiServiceApiOp): 19 | 20 | ACTION_NAME = "DeleteUAISrvServiceClient" 21 | 22 | def __init__(self, public_key, private_key, service_id, client_id, project_id='', region='', zone=''): 23 | super(DeleteUAISrvServiceClientApiOp, self).__init__(self.ACTION_NAME, public_key, private_key, project_id, region, zone) 24 | self.cmd_params['Region'] = region if region != '' else super(DeleteUAISrvServiceClientApiOp, self).PARAMS_DEFAULT_REGION 25 | self.cmd_params['ServiceID'] = service_id 26 | self.cmd_params['ClientID'] = client_id 27 | 28 | def _check_args(self, params): 29 | if params['ServiceID'] == '': 30 | return False 31 | if params['ClientID'] == '': 32 | return False 33 | return True 34 | 35 | def call_api(self): 36 | succ, self.rsp = super(DeleteUAISrvServiceClientApiOp, self).call_api() 37 | return succ, self.rsp -------------------------------------------------------------------------------- /uai/api/get_uai_available_base_img.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.api.base_api import BaseUaiServiceApiOp 17 | 18 | class GetUAISrvAvailableBaseImageApiOp(BaseUaiServiceApiOp): 19 | 20 | ACTION_NAME = "GetUAISrvAvailableBaseImage" 21 | 22 | def __init__(self, public_key, private_key, project_id='', region='', zone='' ): 23 | super(GetUAISrvAvailableBaseImageApiOp, self).__init__(self.ACTION_NAME, public_key, private_key, project_id, region, zone) 24 | self.cmd_params['Region'] = region if region != '' else super(GetUAISrvAvailableBaseImageApiOp, self).PARAMS_DEFAULT_REGION 25 | 26 | def _check_args(self, params): 27 | return True 28 | 29 | def call_api(self): 30 | succ, self.rsp = super(GetUAISrvAvailableBaseImageApiOp, self).call_api() 31 | return succ, self.rsp 32 | -------------------------------------------------------------------------------- /uai/api/get_uai_available_env_pkg.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.api.base_api import BaseUaiServiceApiOp 17 | 18 | class GetUAIAvailableEnvPkgApiOp(BaseUaiServiceApiOp): 19 | 20 | ACTION_NAME = "GetUAIAvailableEnvPkg" 21 | 22 | def __init__(self, public_key, private_key, pkg_type='', project_id='', region='', zone=''): 23 | super(GetUAIAvailableEnvPkgApiOp, self).__init__(self.ACTION_NAME, public_key, private_key, project_id, region, zone) 24 | self.cmd_params['Region'] = region if region != '' else super(GetUAIAvailableEnvPkgApiOp, self).PARAMS_DEFAULT_REGION 25 | self.cmd_params['PkgType'] = pkg_type 26 | 27 | def _check_args(self, params): 28 | return True 29 | 30 | def call_api(self): 31 | succ, self.rsp = super(GetUAIAvailableEnvPkgApiOp, self).call_api() 32 | return succ, self.rsp 33 | -------------------------------------------------------------------------------- /uai/api/get_uai_available_srv_type.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.api.base_api import BaseUaiServiceApiOp 17 | 18 | class GetUAISrvAvailableServiceTypeApiOp(BaseUaiServiceApiOp): 19 | 20 | ACTION_NAME = "GetUAISrvAvailableServiceType" 21 | 22 | def __init__(self, public_key, private_key, project_id='', region='', zone='' ): 23 | super(GetUAISrvAvailableServiceTypeApiOp, self).__init__(self.ACTION_NAME, public_key, private_key, project_id, region, zone) 24 | self.cmd_params['Region'] = region if region != '' else super(GetUAISrvAvailableServiceTypeApiOp, self).PARAMS_DEFAULT_REGION 25 | 26 | def _check_args(self, params): 27 | return True 28 | 29 | def call_api(self): 30 | succ, self.rsp = super(GetUAISrvAvailableServiceTypeApiOp, self).call_api() 31 | return succ, self.rsp 32 | -------------------------------------------------------------------------------- /uai/api/get_uai_service_list.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.api.base_api import BaseUaiServiceApiOp 17 | 18 | class GetUAIServiceListApiOp(BaseUaiServiceApiOp): 19 | 20 | ACTION_NAME = "GetUAIServiceList" 21 | 22 | def __init__(self, public_key, private_key, project_id='', region='', zone='', service_id='',offset=0, limit=0): 23 | super(GetUAIServiceListApiOp, self).__init__(self.ACTION_NAME, public_key, private_key, project_id, region, zone) 24 | self.cmd_params['Region'] = region if region != '' else super(GetUAIServiceListApiOp, self).PARAMS_DEFAULT_REGION 25 | self.cmd_params['ServiceID'] = service_id 26 | self.cmd_params['Offset'] = offset 27 | self.cmd_params['Limit'] = limit 28 | 29 | def _check_args(self, params): 30 | return 31 | 32 | def call_api(self): 33 | succ, self.rsp = super(GetUAIServiceListApiOp, self).call_api() 34 | return succ, self.rsp 35 | -------------------------------------------------------------------------------- /uai/api/get_uai_srv_available_resource.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.api.base_api import BaseUaiServiceApiOp 17 | 18 | class GetUAISrvAvailableResourceApiOp(BaseUaiServiceApiOp): 19 | 20 | ACTION_NAME = "GetUAISrvAvailableResource" 21 | 22 | def __init__(self, public_key, private_key,service_type, project_id='', region='', zone=''): 23 | super(GetUAISrvAvailableResourceApiOp, self).__init__(self.ACTION_NAME, public_key, private_key, project_id, region, zone) 24 | self.cmd_params['Region'] = region if region != '' else super(GetUAISrvAvailableResourceApiOp, self).PARAMS_DEFAULT_REGION 25 | self.cmd_params['ServiceTypeId'] = service_type 26 | 27 | def _check_args(self, params): 28 | if params['ServiceTypeId'] == '': 29 | return False 30 | return True 31 | 32 | def call_api(self): 33 | succ, self.rsp = super(GetUAISrvAvailableResourceApiOp, self).call_api() 34 | return succ, self.rsp 35 | -------------------------------------------------------------------------------- /uai/api/get_uai_srv_client_list.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.api.base_api import BaseUaiServiceApiOp 17 | 18 | class GetUAISrvServiceClientListApiOp(BaseUaiServiceApiOp): 19 | 20 | ACTION_NAME = "GetUAISrvServiceClientList" 21 | 22 | def __init__(self, public_key, private_key, service_id, offset=0, limit=0, project_id='', region='', zone=''): 23 | super(GetUAISrvServiceClientListApiOp, self).__init__(self.ACTION_NAME, public_key, private_key, project_id, region, zone) 24 | self.cmd_params['Region'] = region if region != '' else super(GetUAISrvServiceClientListApiOp, self).PARAMS_DEFAULT_REGION 25 | self.cmd_params['ServiceID'] = service_id 26 | self.cmd_params['Offset'] = offset 27 | self.cmd_params['Limit'] = limit 28 | 29 | def _check_args(self, params): 30 | if params['ServiceID'] == '': 31 | return False 32 | return True 33 | 34 | def call_api(self): 35 | succ, self.rsp = super(GetUAISrvServiceClientListApiOp, self).call_api() 36 | return succ, self.rsp -------------------------------------------------------------------------------- /uai/api/get_uai_srv_real_time_metric.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.api.base_api import BaseUaiServiceApiOp 17 | 18 | class GetUAISrvRealTimeMetricApiOp(BaseUaiServiceApiOp): 19 | 20 | ACTION_NAME = "GetUAISrvRealTimeMetric" 21 | 22 | def __init__(self, public_key, private_key, service_id, srv_version='', project_id='', region='', zone=''): 23 | super(GetUAISrvRealTimeMetricApiOp, self).__init__(self.ACTION_NAME, public_key, private_key, project_id, region, zone) 24 | self.cmd_params['Region'] = region if region != '' else super(GetUAISrvRealTimeMetricApiOp, self).PARAMS_DEFAULT_REGION 25 | self.cmd_params['ServiceID'] = service_id 26 | self.cmd_params['SrvVersion'] = srv_version 27 | 28 | def _check_args(self, params): 29 | if params['ServiceID'] == '': 30 | return False 31 | return True 32 | 33 | def call_api(self): 34 | succ, self.rsp = super(GetUAISrvRealTimeMetricApiOp, self).call_api() 35 | return succ, self.rsp -------------------------------------------------------------------------------- /uai/api/modify_uai_srv_name.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.api.base_api import BaseUaiServiceApiOp 17 | 18 | class ModifyUAISrvNameApiOp(BaseUaiServiceApiOp): 19 | 20 | ACTION_NAME = "ModifyUAISrvName" 21 | 22 | def __init__(self, public_key, private_key, service_id, srv_name, project_id='', region='', zone=''): 23 | super(ModifyUAISrvNameApiOp, self).__init__(self.ACTION_NAME, public_key, private_key, project_id, region, zone) 24 | self.cmd_params['Region'] = region if region != '' else super(ModifyUAISrvNameApiOp, self).PARAMS_DEFAULT_REGION 25 | self.cmd_params['ServiceID'] = service_id 26 | self.cmd_params['SrvName'] = srv_name 27 | 28 | def _check_args(self, params): 29 | if params['ServiceID'] == '': 30 | return False 31 | if params['SrvName'] == '': 32 | return False 33 | return True 34 | 35 | def call_api(self): 36 | succ, self.rsp = super(ModifyUAISrvNameApiOp, self).call_api() 37 | return succ, self.rsp 38 | -------------------------------------------------------------------------------- /uai/api/start_uai_service.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.api.base_api import BaseUaiServiceApiOp 17 | 18 | class StartUAIServiceApiOp(BaseUaiServiceApiOp): 19 | 20 | ACTION_NAME = "StartUAIService" 21 | 22 | def __init__(self, public_key, private_key, service_id, srv_version, project_id='', region='', zone=''): 23 | super(StartUAIServiceApiOp, self).__init__(self.ACTION_NAME, public_key, private_key, project_id, region, zone) 24 | self.cmd_params['Region'] = region if region != '' else super(StartUAIServiceApiOp, self).PARAMS_DEFAULT_REGION 25 | self.cmd_params['ServiceID'] = service_id 26 | self.cmd_params['SrvVersion'] = srv_version 27 | 28 | def _check_args(self, params): 29 | if params['ServiceID'] == '': 30 | return False 31 | if params['SrvVersion'] == '': 32 | return False 33 | return True 34 | 35 | def call_api(self): 36 | succ, self.rsp = super(StartUAIServiceApiOp, self).call_api() 37 | return succ, self.rsp 38 | -------------------------------------------------------------------------------- /uai/api/stop_uai_service.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.api.base_api import BaseUaiServiceApiOp 17 | 18 | class StopUAIServiceApiOp(BaseUaiServiceApiOp): 19 | 20 | ACTION_NAME = "StopUAIService" 21 | 22 | def __init__(self, public_key, private_key, service_id, srv_version='', project_id='', region='', zone=''): 23 | super(StopUAIServiceApiOp, self).__init__(self.ACTION_NAME, public_key, private_key, project_id, region, zone) 24 | self.cmd_params['Region'] = region if region != '' else super(StopUAIServiceApiOp, self).PARAMS_DEFAULT_REGION 25 | self.cmd_params['ServiceID'] = service_id 26 | self.cmd_params['SrvVersion'] = srv_version 27 | 28 | def _check_args(self, params): 29 | if params['ServiceID'] == '': 30 | return False 31 | return True 32 | 33 | def call_api(self): 34 | succ, self.rsp = super(StopUAIServiceApiOp, self).call_api() 35 | return succ, self.rsp 36 | -------------------------------------------------------------------------------- /uai/arch/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/arch/__init__.py -------------------------------------------------------------------------------- /uai/arch/base_model.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | import os 17 | from uai.arch_conf.base_conf import ArchJsonConf 18 | from uai.arch_conf.base_conf import ArchJsonConfLoader 19 | 20 | class AiUcloudModel(object): 21 | """ 22 | Base class struct for user defined AI Model 23 | """ 24 | def __init__(self, conf, model_type): 25 | """ 26 | Args: 27 | conf: key/val object for AI architecture specific config 28 | model_type: str, the model type 29 | """ 30 | self.conf = conf 31 | self.model_type = model_type 32 | self._parse_conf(conf) 33 | 34 | def _parse_conf(self, conf): 35 | arch_json_conf_loader = ArchJsonConfLoader(conf) 36 | self.main_file = arch_json_conf_loader.get_main_file 37 | self.main_class = arch_json_conf_loader.get_main_class 38 | 39 | def load_model(self): 40 | pass 41 | 42 | def execute(self, data, batch_size): 43 | pass 44 | -------------------------------------------------------------------------------- /uai/arch/tf_model.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.arch.base_model import AiUcloudModel 17 | from uai.arch_conf.tf_conf import TFJsonConf 18 | from uai.arch_conf.tf_conf import TFJsonConfLoader 19 | 20 | class TFAiUcloudModel(AiUcloudModel): 21 | """ 22 | Base model class for user defined Tensorflow Model 23 | """ 24 | def __init__(self, conf=None, model_type='tensorflow'): 25 | super(TFAiUcloudModel, self).__init__(conf, model_type) 26 | self.output = {} 27 | self._parse_conf(conf) 28 | self.model = self.load_model() 29 | 30 | def _parse_conf(self, conf): 31 | """ 32 | Parse Tensorflow related config 33 | Args: 34 | conf: key/val object for AI architecture specific config 35 | """ 36 | tf_json_conf_loader = TFJsonConfLoader(conf) 37 | self.model_dir = tf_json_conf_loader.get_model_dir() 38 | 39 | def load_model(self): 40 | pass 41 | 42 | def execute(self, data, batch_size): 43 | pass 44 | -------------------------------------------------------------------------------- /uai/arch/xgboost_model.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.arch.base_model import AiUcloudModel 17 | from uai.arch_conf.xgboost_conf import XGBoostJsonConf 18 | from uai.arch_conf.xgboost_conf import XGBoostJsonConfLoader 19 | 20 | class XGBoostUcloudModel(AiUcloudModel): 21 | """ 22 | Base model class for user defined Tensorflow Model 23 | """ 24 | def __init__(self, conf=None, model_type='xgboost'): 25 | super(XGBoostUcloudModel, self).__init__(conf, model_type) 26 | self.output = {} 27 | self._parse_conf(conf) 28 | self.load_model() 29 | 30 | def _parse_conf(self, conf): 31 | """ 32 | Parse Tensorflow related config 33 | Args: 34 | conf: key/val object for AI architecture specific config 35 | """ 36 | json_conf_loader = XGBoostJsonConfLoader(conf) 37 | self.model_name = json_conf_loader.get_model_name() 38 | 39 | def load_model(self): 40 | pass 41 | 42 | def execute(self, data, batch_size): 43 | pass 44 | -------------------------------------------------------------------------------- /uai/arch_conf/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/arch_conf/__init__.py -------------------------------------------------------------------------------- /uai/contrib/README.md: -------------------------------------------------------------------------------- 1 | # UAI contrib 2 | 3 | The contrib directory contains project directories, each of which is maintained separately. It provides useful tools for different purpose. 4 | 5 | ## media 6 | Include libs for video processing 7 | 8 | ## image 9 | Include libs for image formating, as format PIL image object into json object 10 | -------------------------------------------------------------------------------- /uai/contrib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/contrib/__init__.py -------------------------------------------------------------------------------- /uai/contrib/media/README.md: -------------------------------------------------------------------------------- 1 | # UAI Contrib Media 2 | 3 | Provide basic media-related funcs for AI component 4 | 5 | ## ffmpeg 6 | 7 | It provide basic operations: 8 | 9 | - Video frame extraction with specified rate 10 | - Video stream frame extraction with specified rate -------------------------------------------------------------------------------- /uai/contrib/media/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/contrib/media/__init__.py -------------------------------------------------------------------------------- /uai/operation/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /uai/operation/create_uaiservice/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/operation/create_uaiservice/__init__.py -------------------------------------------------------------------------------- /uai/operation/delete_uaiservice/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/operation/delete_uaiservice/__init__.py -------------------------------------------------------------------------------- /uai/operation/deploy_uaiservice/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/operation/deploy_uaiservice/__init__.py -------------------------------------------------------------------------------- /uai/operation/deploy_uaiservice_docker/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/operation/deploy_uaiservice_docker/__init__.py -------------------------------------------------------------------------------- /uai/operation/get_real_time_metric/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/operation/get_real_time_metric/__init__.py -------------------------------------------------------------------------------- /uai/operation/list_uaiservice/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/operation/list_uaiservice/__init__.py -------------------------------------------------------------------------------- /uai/operation/list_uaiversion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/operation/list_uaiversion/__init__.py -------------------------------------------------------------------------------- /uai/operation/modify_node_count/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/operation/modify_node_count/__init__.py -------------------------------------------------------------------------------- /uai/operation/modify_node_range/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/operation/modify_node_range/__init__.py -------------------------------------------------------------------------------- /uai/operation/modify_service_name/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/operation/modify_service_name/__init__.py -------------------------------------------------------------------------------- /uai/operation/modify_version_memo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/operation/modify_version_memo/__init__.py -------------------------------------------------------------------------------- /uai/operation/modify_version_weight/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/operation/modify_version_weight/__init__.py -------------------------------------------------------------------------------- /uai/operation/pack/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/operation/pack/__init__.py -------------------------------------------------------------------------------- /uai/operation/pack/caffe_pack_op.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.operation.pack.base_pack_op import UaiServicePackOp 17 | from uai.operation.tar.caffe_tar_op import UaiServiceCaffeTarOp 18 | 19 | class UaiServiceCaffePackOp(UaiServicePackOp, UaiServiceCaffeTarOp): 20 | """ Caffe Pack Tool Class 21 | """ 22 | def __init__(self, parser): 23 | self.platform = 'caffe' 24 | self.pack_source = True 25 | 26 | super(UaiServiceCaffePackOp, self).__init__(parser) 27 | 28 | def _add_args(self): 29 | super(UaiServiceCaffePackOp, self)._add_args() 30 | 31 | def _parse_args(self, args): 32 | super(UaiServiceCaffePackOp, self)._parse_args(args) 33 | -------------------------------------------------------------------------------- /uai/operation/pack/keras_pack_op.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.operation.pack.base_pack_op import UaiServicePackOp 17 | from uai.operation.tar.keras_tar_op import UaiServiceKerasTarOp 18 | 19 | class UaiServiceKerasPackOp(UaiServicePackOp, UaiServiceKerasTarOp): 20 | """ Keras Pack Tool Class 21 | """ 22 | def __init__(self, parser): 23 | self.platform = 'keras' 24 | self.pack_source = True 25 | 26 | super(UaiServiceKerasPackOp, self).__init__(parser) 27 | 28 | def _add_args(self): 29 | super(UaiServiceKerasPackOp, self)._add_args() 30 | 31 | def _parse_args(self, args): 32 | super(UaiServiceKerasPackOp, self)._parse_args(args) 33 | -------------------------------------------------------------------------------- /uai/operation/pack/mxnet_pack_op.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.operation.pack.base_pack_op import UaiServicePackOp 17 | from uai.operation.tar.mxnet_tar_op import UaiServiceMxnetTarOp 18 | 19 | class UaiServiceMxnetPackOp(UaiServicePackOp, UaiServiceMxnetTarOp): 20 | """ MXnet Pack Tool Class 21 | """ 22 | def __init__(self, parser): 23 | self.platform = 'mxnet' 24 | self.pack_source = True 25 | 26 | super(UaiServiceMxnetPackOp, self).__init__(parser) 27 | 28 | def _add_args(self): 29 | super(UaiServiceMxnetPackOp, self)._add_args() 30 | 31 | def _parse_args(self, args): 32 | super(UaiServiceMxnetPackOp, self)._parse_args(args) 33 | -------------------------------------------------------------------------------- /uai/operation/pack/tf_pack_op.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.operation.pack.base_pack_op import UaiServicePackOp 17 | from uai.operation.tar.tf_tar_op import UaiServiceTFTarOp 18 | 19 | class UaiServiceTFPackOp(UaiServicePackOp, UaiServiceTFTarOp): 20 | """ TF Pack Tool Class 21 | """ 22 | def __init__(self, parser): 23 | self.platform = 'tensorflow' 24 | self.pack_source = True 25 | 26 | super(UaiServiceTFPackOp, self).__init__(parser) 27 | 28 | def _add_args(self): 29 | super(UaiServiceTFPackOp, self)._add_args() 30 | 31 | def _parse_args(self, args): 32 | super(UaiServiceTFPackOp, self)._parse_args(args) 33 | -------------------------------------------------------------------------------- /uai/operation/packdocker/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/operation/packdocker/__init__.py -------------------------------------------------------------------------------- /uai/operation/packdocker/caffe_packdocker_op.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.operation.tar.caffe_tar_op import UaiServiceCaffeTarOp 17 | from uai.operation.packdocker.base_packdocker_op import UaiServiceDockerPackOp 18 | 19 | class UaiServiceCaffeDockerPackOp(UaiServiceDockerPackOp, UaiServiceCaffeTarOp): 20 | """ 21 | Caffe Docker Image Pack Tool Class 22 | """ 23 | def __init__(self, parser): 24 | super(UaiServiceCaffeDockerPackOp, self).__init__(parser) 25 | self.platform = 'caffe' 26 | 27 | def _add_args(self): 28 | super(UaiServiceCaffeDockerPackOp, self)._add_args() 29 | self._add_model_args(self.parser) 30 | 31 | def _parse_args(self, args): 32 | super(UaiServiceCaffeDockerPackOp, self)._parse_args(args) 33 | self._parse_model_args(args) 34 | -------------------------------------------------------------------------------- /uai/operation/packdocker/keras_packdocker_op.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.operation.tar.keras_tar_op import UaiServiceKerasTarOp 17 | from uai.operation.packdocker.base_packdocker_op import UaiServiceDockerPackOp 18 | 19 | class UaiServiceKerasDockerPackOp(UaiServiceDockerPackOp, UaiServiceKerasTarOp): 20 | """ 21 | Keras Docker Image Pack Tool Class 22 | """ 23 | 24 | def __init__(self, parser): 25 | super(UaiServiceKerasDockerPackOp, self).__init__(parser) 26 | self.platform = 'keras' 27 | 28 | def _add_args(self): 29 | super(UaiServiceKerasDockerPackOp, self)._add_args() 30 | self._add_model_args(self.parser) 31 | 32 | def _parse_args(self, args): 33 | super(UaiServiceKerasDockerPackOp, self)._parse_args(args) 34 | self._parse_model_args(args) 35 | -------------------------------------------------------------------------------- /uai/operation/packdocker/mxnet_packdocker_op.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.operation.tar.mxnet_tar_op import UaiServiceMxnetTarOp 17 | from uai.operation.packdocker.base_packdocker_op import UaiServiceDockerPackOp 18 | 19 | class UaiServiceMxnetDockerPackOp(UaiServiceDockerPackOp, UaiServiceMxnetTarOp): 20 | """ 21 | Mxnet Docker Image Pack Tool Class 22 | """ 23 | def __init__(self, parser): 24 | super(UaiServiceMxnetDockerPackOp, self).__init__(parser) 25 | self.platform = 'mxnet' 26 | 27 | def _add_args(self): 28 | super(UaiServiceMxnetDockerPackOp, self)._add_args() 29 | self._add_model_args(self.parser) 30 | 31 | def _parse_args(self, args): 32 | super(UaiServiceMxnetDockerPackOp, self)._parse_args(args) 33 | self._parse_model_args(args) 34 | -------------------------------------------------------------------------------- /uai/operation/packdocker/tf_packdocker_op.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uai.operation.tar.tf_tar_op import UaiServiceTFTarOp 17 | from uai.operation.packdocker.base_packdocker_op import UaiServiceDockerPackOp 18 | 19 | class UaiServiceTFDockerPackOp(UaiServiceDockerPackOp, UaiServiceTFTarOp): 20 | """ 21 | TF Docker Image Pack Tool Class 22 | """ 23 | def __init__(self, parser): 24 | super(UaiServiceTFDockerPackOp, self).__init__(parser) 25 | self.platform = 'tensorflow' 26 | 27 | def _add_args(self): 28 | super(UaiServiceTFDockerPackOp, self)._add_args() 29 | 30 | def _parse_args(self, args): 31 | super(UaiServiceTFDockerPackOp, self)._parse_args(args) 32 | self._parse_model_args(args) 33 | -------------------------------------------------------------------------------- /uai/operation/start_uaiservice/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/operation/start_uaiservice/__init__.py -------------------------------------------------------------------------------- /uai/operation/stop_uaiservice/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/operation/stop_uaiservice/__init__.py -------------------------------------------------------------------------------- /uai/operation/tar/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/operation/tar/__init__.py -------------------------------------------------------------------------------- /uai/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai/utils/__init__.py -------------------------------------------------------------------------------- /uai/utils/utils_ufs.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | import re 17 | 18 | UFS_MOUNT_POINT_FORMAT = r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\/ufs-\w+' 19 | UFS_PATH_FORMAT = r'(\w+\/)+' 20 | 21 | def concat_ufs_path(path, mount_point): 22 | mount_point_pattern = re.compile(UFS_MOUNT_POINT_FORMAT) 23 | path_pattern = re.compile(UFS_PATH_FORMAT) 24 | 25 | if mount_point_pattern.match(mount_point) is None: 26 | raise RuntimeError("UFS mount point should be in format x.x.x.x:/ufs-xxx") 27 | 28 | if path_pattern.match(path) is None: 29 | raise RuntimeError("UFS path should match xxx/xxx/") 30 | 31 | return mount_point + '/' + path 32 | -------------------------------------------------------------------------------- /uai_tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uai_tools/__init__.py -------------------------------------------------------------------------------- /uaitrain/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/__init__.py -------------------------------------------------------------------------------- /uaitrain/api/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/api/__init__.py -------------------------------------------------------------------------------- /uaitrain/arch/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/arch/__init__.py -------------------------------------------------------------------------------- /uaitrain/arch/caffe/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/arch/caffe/__init__.py -------------------------------------------------------------------------------- /uaitrain/arch/keras/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/arch/keras/__init__.py -------------------------------------------------------------------------------- /uaitrain/arch/mxnet/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/arch/mxnet/__init__.py -------------------------------------------------------------------------------- /uaitrain/arch/pytorch/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/arch/pytorch/__init__.py -------------------------------------------------------------------------------- /uaitrain/arch/tensorflow/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/arch/tensorflow/__init__.py -------------------------------------------------------------------------------- /uaitrain/arch_conf/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/arch_conf/__init__.py -------------------------------------------------------------------------------- /uaitrain/arch_conf/tf_conf.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/arch_conf/tf_conf.py -------------------------------------------------------------------------------- /uaitrain/cmd/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/cmd/__init__.py -------------------------------------------------------------------------------- /uaitrain/operation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/operation/__init__.py -------------------------------------------------------------------------------- /uaitrain/operation/create_train_job/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/operation/create_train_job/__init__.py -------------------------------------------------------------------------------- /uaitrain/operation/delete_train_job/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/operation/delete_train_job/__init__.py -------------------------------------------------------------------------------- /uaitrain/operation/get_log_topic/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/operation/get_log_topic/__init__.py -------------------------------------------------------------------------------- /uaitrain/operation/get_realtime_log/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/operation/get_realtime_log/__init__.py -------------------------------------------------------------------------------- /uaitrain/operation/get_tensorboard_url/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/operation/get_tensorboard_url/__init__.py -------------------------------------------------------------------------------- /uaitrain/operation/get_train_job_conf/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/operation/get_train_job_conf/__init__.py -------------------------------------------------------------------------------- /uaitrain/operation/info_train_job/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/operation/info_train_job/__init__.py -------------------------------------------------------------------------------- /uaitrain/operation/list_bill_info/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/operation/list_bill_info/__init__.py -------------------------------------------------------------------------------- /uaitrain/operation/list_train_job/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/operation/list_train_job/__init__.py -------------------------------------------------------------------------------- /uaitrain/operation/pack_docker_image/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/operation/pack_docker_image/__init__.py -------------------------------------------------------------------------------- /uaitrain/operation/pack_docker_image/pytorch_pack_op.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uaitrain.operation.pack_docker_image.base_pack_op import BaseUAITrainDockerImagePackOp 17 | 18 | 19 | class PytorchUAITrainDockerImagePackOp(BaseUAITrainDockerImagePackOp): 20 | def __init__(self, parser): 21 | super(PytorchUAITrainDockerImagePackOp, self).__init__(parser) 22 | self.ai_arch = "pytorch" 23 | 24 | def _gen_gpu_docker_cmd(self, pycmd): 25 | gpu_docker_cmd = "sudo nvidia-docker run -it " + \ 26 | "-v " + self.test_data_path + ":" + "/data/data " + \ 27 | "-v " + self.test_output_path + ":" + "/data/output " + \ 28 | self.user_gpu_image + " " + "/bin/bash -c " + \ 29 | "\"cd /data && /usr/bin/python " + pycmd + " " + "--num_gpus=1 --work_dir=/data --data_dir=/data/data --output_dir=/data/output --log_dir=/data/output\"" 30 | return gpu_docker_cmd 31 | -------------------------------------------------------------------------------- /uaitrain/operation/pack_docker_image/tf_pack_op.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from uaitrain.operation.pack_docker_image.base_pack_op import BaseUAITrainDockerImagePackOp 17 | 18 | 19 | class TensorFlowUAITrainDockerImagePackOp(BaseUAITrainDockerImagePackOp): 20 | """docstring for ClassName""" 21 | def __init__(self, parser): 22 | super(TensorFlowUAITrainDockerImagePackOp, self).__init__(parser) 23 | self.ai_arch = "tensorflow" 24 | 25 | def _gen_gpu_docker_cmd(self, pycmd): 26 | gpu_docker_cmd = "sudo nvidia-docker run -it " + \ 27 | "-v " + self.test_data_path + ":" + "/data/data " + \ 28 | "-v " + self.test_output_path + ":" + "/data/output " + \ 29 | self.user_gpu_image + " " + "/bin/bash -c " + \ 30 | "\"cd /data && /usr/bin/python " + pycmd + " " + "--num_gpus=1 --work_dir=/data --data_dir=/data/data --output_dir=/data/output --log_dir=/data/output\"" 31 | return gpu_docker_cmd 32 | -------------------------------------------------------------------------------- /uaitrain/operation/predict_train_job/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/operation/predict_train_job/__init__.py -------------------------------------------------------------------------------- /uaitrain/operation/rename_train_job/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/operation/rename_train_job/__init__.py -------------------------------------------------------------------------------- /uaitrain/operation/stop_train_job/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain/operation/stop_train_job/__init__.py -------------------------------------------------------------------------------- /uaitrain_tool/README.md: -------------------------------------------------------------------------------- 1 | # Uaitrain tools 2 | ## Basic tools 3 | 1. base_tools.py general uaitrain tool 4 | 2. tf/tf_tools.py uaitrain tool for tensorflow only 5 | 3. caffe/caffe_tools.py uaitrain tool for caffe only 6 | 4. keras/keras_tools.py uaitrain tool for keras only 7 | 5. mxnet/mxnet_tools.py uaitrain tool for mxnet only 8 | 6. pytorch/pytorch_tools.py uaitrain tool for pytorch only 9 | 10 | For more details please see: https://docs.ucloud.cn/ai/uai-train/guide/scripts 11 | 12 | ## Helper tools 13 | 1. split_tools.py, help split a large file into small chuncks, especially for caffe lmdb files. As UAI Train platform perform to download large file from ufile, it is better to split it out at beginning to help upload the file into ufile and merge it before training (Uploading and download large file 100GB+ is fragile). 14 | 15 | How to split: 16 | python split_tools.py 17 | 18 | How to merge: 19 | See uaitrain/arch/caffe/train_large_file.py 20 | -------------------------------------------------------------------------------- /uaitrain_tool/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/uaitrain_tool/__init__.py -------------------------------------------------------------------------------- /uaitrain_tool/mxnet/mpi_tool/README.md: -------------------------------------------------------------------------------- 1 | # MPI Wrapper for MXNet Dist Train 2 | We provide mpi\_wrapper.py for users want to run distributed training based on MPI. You do not need to config all nodes and use launch.py provided by original MXNet to run the MXNet Distributed training job. 3 | 4 | UAI Train platform automatically config the GPU nodes for you and launch worker/ps tasks on each node for you. (Please refer to https://docs.ucloud.cn/ai/uai-train/introduction/distructive-job/mxnet for more detail) For MPI tasks, in order to run job with **mpirun**, we provide mpi\_wrapper.py to help you to run job with mpi env. So the entrypoint for your task in UAI Train should be mpi\_wrapper.py. We can run a dist job as: 5 | 6 | /data/mpi_wrapper.py --command="python /data/train_imagenet.py --batch-size=128 --num-layers=50 --image-shape=3,299,299 --num-epochs=1 --kv-store=dist_sync --data-train=/data/data/train-rec --data-val=/data/data/val-rec --data-nthreads=8" 7 | 8 | mpi\_wrapper.py accepts only one argument **command** as the actual entrypoint of your program, e.g., **python /data/train\_imagenet.py**. all following args will be attached to your entrypoint. It also provides an extra argument as --uai-hosts=ip1,ip2,ip3,ip4 that you can get all hosts in the cluster. 9 | -------------------------------------------------------------------------------- /uaitrain_tool/split_tool.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | import os 17 | import sys 18 | from uai.utils.super_large_file import split 19 | 20 | if __name__ == '__main__': 21 | if len(sys.argv) == 2 and sys.argv[1] == '--help': 22 | print('Use: split.py [file-dir file-to-split target-dir]') 23 | else: 24 | if len(sys.argv) != 4: 25 | print('Use: split.py [file-dir file-to-split target-dir]') 26 | os.exit(-1) 27 | else: 28 | fromdir = sys.argv[1] # args in cmdline 29 | fromfile = sys.argv[2] 30 | todir = sys.argv[3] 31 | 32 | print('Splitting {0}{1} to {2} by 128MB'.format(fromdir, fromfile, todir)) 33 | 34 | try: 35 | parts = split(fromdir, fromfile, todir) 36 | except: 37 | print('Error during split') 38 | else: 39 | print('Split finished: {0}, parts are in {1}'.format(parts, todir)) 40 | 41 | -------------------------------------------------------------------------------- /ufile/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/ufile/__init__.py -------------------------------------------------------------------------------- /ufile/api/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/ufile/api/__init__.py -------------------------------------------------------------------------------- /ufile/operation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/uai-sdk/0ccaf0dd3495a5d0a459c061f31a0aa6aee2307a/ufile/operation/__init__.py -------------------------------------------------------------------------------- /ufile/operation/operation.py: -------------------------------------------------------------------------------- 1 | class Operation(object): 2 | 3 | def __init__(self, parser): 4 | self.parser = parser 5 | self._add_args(self.parser) 6 | pass 7 | 8 | def _add_args(self, parser): 9 | pass 10 | #add other params in subclasses# 11 | 12 | def _parse_args(self): 13 | pass 14 | # add other params in subclasses# 15 | 16 | def cmd_run(self, params): 17 | self.params = params 18 | self._parse_args() 19 | pass 20 | # add other params in subclasses# -------------------------------------------------------------------------------- /ufile/ufile_tool.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from ufile.operation.upload_single_op import UploadUfileSingleOp 3 | from ufile.operation.download_single_op import DownloadUfileSingleOp 4 | from ufile.operation.download_batch_op import DownloadUfileBatchOp 5 | from ufile.operation.upload_batch_op import UploadUfileBatchOp 6 | 7 | def parse_param(subparsers): 8 | download_batch_parser = subparsers.add_parser('download_batch', help='Batch download from ufile') 9 | download_batch_op = DownloadUfileBatchOp(download_batch_parser) 10 | 11 | upload_batch_parser = subparsers.add_parser('upload_batch', help='Batch upload to ufile') 12 | upload_batch_op = UploadUfileBatchOp(upload_batch_parser) 13 | 14 | download_single_parser = subparsers.add_parser('download_single', help='Download single file from ufile') 15 | download_single_op = DownloadUfileSingleOp(download_single_parser) 16 | 17 | upload_single_parser = subparsers.add_parser('upload_single', help='Upload single file to ufile') 18 | upload_single_op = UploadUfileSingleOp(upload_single_parser) 19 | 20 | cmd_op_dict = { 21 | 'download_batch': download_batch_op, 22 | 'upload_batch': upload_batch_op, 23 | 'download_single': download_single_op, 24 | 'upload_single': upload_single_op 25 | } 26 | 27 | return cmd_op_dict 28 | 29 | if __name__ == '__main__': 30 | parser = argparse.ArgumentParser( 31 | description='Ufile Commander', 32 | formatter_class=argparse.ArgumentDefaultsHelpFormatter) 33 | subparsers = parser.add_subparsers(dest='commands', help='commands') 34 | cmd_op_dict = parse_param(subparsers) 35 | 36 | params = vars(parser.parse_args()) 37 | cmd_op_dict.get(params['commands']).cmd_run(params) 38 | --------------------------------------------------------------------------------