├── list_models.py ├── main.py └── inference_results.json /list_models.py: -------------------------------------------------------------------------------- 1 | import json 2 | from huggingface_hub.utils._pagination import paginate 3 | 4 | endpoint = "https://huggingface.co/api/models" 5 | params = {"inference": "warm"} 6 | 7 | results = [] 8 | for page in paginate(endpoint, params=params, headers={}): 9 | results.append(page) 10 | 11 | with open('models_data.json', 'w') as f: 12 | json.dump(results, f, indent=4) -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | from huggingface_hub import InferenceClient 4 | import time 5 | 6 | # Load models data 7 | with open('models_data.json') as f: 8 | models_data = json.load(f) 9 | 10 | # Initialize InferenceClient 11 | client = InferenceClient() 12 | 13 | results = [] 14 | 15 | for model in models_data: 16 | model_id = model['id'] 17 | task = model['pipeline_tag'] 18 | 19 | print(f"Processing model: {model_id}") 20 | if task in ['text-generation', 'summarization', 'question-answering']: 21 | try: 22 | if task == 'text-generation': 23 | response = client.text_generation("Hello, how are you?", model=model_id) 24 | elif task == 'summarization': 25 | response = client.summarization("Your long text to summarize here", model=model_id) 26 | elif task == 'question-answering': 27 | response = client.question_answering("What is the capital of France?", "France is a country in Europe.", model=model_id) 28 | # elif task == 'image-classification': 29 | # with open('sample_image.jpg', 'rb') as image_file: 30 | # response = client.image_classification(image_file, model=model_id) 31 | 32 | success = True 33 | error_message = None 34 | except Exception as e: 35 | success = False 36 | error_message = str(e) 37 | response = None 38 | 39 | result = { 40 | 'model_id': model_id, 41 | 'task': task, 42 | 'success': success, 43 | 'error_message': error_message, 44 | 'response': response 45 | } 46 | 47 | results.append(result) 48 | 49 | # Add a small delay to avoid rate limiting 50 | time.sleep(1) 51 | 52 | # Save results to file 53 | with open('inference_results.json', 'w') as f: 54 | json.dump(results, f, indent=2) 55 | 56 | print("Inference completed. Results saved to inference_results.json") 57 | -------------------------------------------------------------------------------- /inference_results.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "model_id": "openai-community/gpt2-large", 4 | "task": "text-generation", 5 | "success": true, 6 | "error_message": null, 7 | "response": " I'm so glad you're here. I'm so glad you're here. I'm so glad you're here. I'm so glad you're here. I'm so glad you're here. I'm so glad you're here. I'm so glad you're here. I'm so glad you're here. I'm so glad you're here. I'm so glad you're here. I'm so glad you're here. I'm so glad you're here. I'm so glad" 8 | }, 9 | { 10 | "model_id": "openai-community/gpt2-xl", 11 | "task": "text-generation", 12 | "success": true, 13 | "error_message": null, 14 | "response": " I'm doing well. I'm doing well. I'm doing well. I'm doing well. I'm doing well. I'm doing well. I'm doing well. I'm doing well. I'm doing well. I'm doing well. I'm doing well. I'm doing well. I'm doing well. I'm doing well. I'm doing well. I'm doing well. I'm doing well. I'm doing well. I'm doing well. I'm doing well." 15 | }, 16 | { 17 | "model_id": "openai-community/gpt2", 18 | "task": "text-generation", 19 | "success": true, 20 | "error_message": null, 21 | "response": "\n\nI'm a little bit of a nerd. I'm a big nerd. I'm a big nerd. I'm a big nerd. I'm a big nerd. I'm a big nerd. I'm a big nerd. I'm a big nerd. I'm a big nerd. I'm a big nerd. I'm a big nerd. I'm a big nerd. I'm a big nerd. I'm a big nerd. I'm a big nerd. I'm a big nerd" 22 | }, 23 | { 24 | "model_id": "EleutherAI/gpt-neo-2.7B", 25 | "task": "text-generation", 26 | "success": true, 27 | "error_message": null, 28 | "response": "" 29 | }, 30 | { 31 | "model_id": "microsoft/DialoGPT-medium", 32 | "task": "text-generation", 33 | "success": true, 34 | "error_message": null, 35 | "response": "" 36 | }, 37 | { 38 | "model_id": "Gustavosta/MagicPrompt-Stable-Diffusion", 39 | "task": "text-generation", 40 | "success": true, 41 | "error_message": null, 42 | "response": " Wearing uniform! intricate, elegant, highly detailed, digital painting, artstation, concept art, smooth, sharp focus, illustration, art by artgerm and greg rutkowski and alphonse mucha\n" 43 | }, 44 | { 45 | "model_id": "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", 46 | "task": "text-generation", 47 | "success": true, 48 | "error_message": null, 49 | "response": "\n\nI am a large language model trained to answer questions and provide information. How can I help you today?\n\nI can answer questions on a wide range of topics, from science and technology to history and literature. I can also provide information on a variety of subjects, from current events to health and wellness.\n\nFeel free to ask me anything, and I will do my best to provide helpful and accurate answers.\n\nThank you for your interest in me, and I look" 50 | }, 51 | { 52 | "model_id": "bigcode/starcoder", 53 | "task": "text-generation", 54 | "success": true, 55 | "error_message": null, 56 | "response": " I am fine, thank you. And you?')\nprint(result)\n\n# 2. \u8bed\u97f3\u8f6c\u6587\u672c\nresult = aipSpeech.asr(audio_file, 'wav', 16000, {\n 'dev_pid': 1537,\n})\nprint(result)\n\n# 3. \u6587\u672c\u8f6c\u8bed\u97f3\nresult = aipSpeech.synthesis('\u4f60\u597d\uff0c\u6b22\u8fce\u4f7f\u7528\u8bed\u97f3\u5408\u6210" 57 | }, 58 | { 59 | "model_id": "tiiuae/falcon-7b-instruct", 60 | "task": "text-generation", 61 | "success": true, 62 | "error_message": null, 63 | "response": "\nI'm doing well, thank you. I'm glad to hear that you're doing well too.\nI'm doing well, thank you. I'm glad to hear that you're doing well too.\nI'm doing well, thank you. I'm glad to hear that you're doing well too.\nI'm doing well, thank you. I'm glad to hear that you're doing well too.\nI'm" 64 | }, 65 | { 66 | "model_id": "JackFram/llama-160m", 67 | "task": "text-generation", 68 | "success": true, 69 | "error_message": null, 70 | "response": " I am looking for a good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good good" 71 | }, 72 | { 73 | "model_id": "meta-llama/Llama-2-7b-chat-hf", 74 | "task": "text-generation", 75 | "success": true, 76 | "error_message": null, 77 | "response": " I'm doing well, thanks for asking! *smiles* It's great to see you here.\n\nI hope you're having a wonderful day and enjoying the beautiful weather. *glances around* It's such a lovely day outside, isn't it?\n\nI'm just here, enjoying the peace and quiet of this lovely park. *sighs contentedly* It's so nice to have a moment to myself," 78 | }, 79 | { 80 | "model_id": "codellama/CodeLlama-7b-hf", 81 | "task": "text-generation", 82 | "success": true, 83 | "error_message": null, 84 | "response": "\n\nI'm fine, thank you.\n\nI'm fine, thank you.\n\nI'm fine, thank you.\n\nI'm fine, thank you.\n\nI'm fine, thank you.\n\nI'm fine, thank you.\n\nI'm fine, thank you.\n\nI'm fine, thank you.\n\nI'm fine, thank you.\n\nI'm fine, thank you." 85 | }, 86 | { 87 | "model_id": "codellama/CodeLlama-13b-hf", 88 | "task": "text-generation", 89 | "success": true, 90 | "error_message": null, 91 | "response": "\n\nI am fine.\n\nHow is your day?\n\nIt is good.\n\nHow is your day?\n\nIt is good.\n\nHow is your day?\n\nIt is good.\n\nHow is your day?\n\nIt is good.\n\nHow is your day?\n\nIt is good.\n\nHow is your day?\n\nIt is good.\n\nHow is your day?\n\nIt is good.\n\nHow" 92 | }, 93 | { 94 | "model_id": "codellama/CodeLlama-34b-Instruct-hf", 95 | "task": "text-generation", 96 | "success": true, 97 | "error_message": null, 98 | "response": "\nI'm fine, thank you.\nI'm glad to hear that.\nI'm not very well.\nI'm sorry to hear that.\nI'm a little tired.\nI'm a little hungry.\nI'm a little thirsty.\nI'm a little cold.\nI'm a little hot.\nI'm a little tired.\nI'm a little bored.\nI'm a" 99 | }, 100 | { 101 | "model_id": "mistralai/Mistral-7B-v0.1", 102 | "task": "text-generation", 103 | "success": true, 104 | "error_message": null, 105 | "response": "\n\nI\u2019m doing well, thank you.\n\nI\u2019m writing this post from my bed, which is a rare occurrence. I\u2019m usually writing from my desk, but I\u2019m feeling a little under the weather today.\n\nI\u2019m not sure if it\u2019s the weather, or if I\u2019m coming down with something, but I\u2019m feeling a little under the weather.\n\nI\u2019m not sure if it\u2019s the weather," 106 | }, 107 | { 108 | "model_id": "mistralai/Mistral-7B-Instruct-v0.1", 109 | "task": "text-generation", 110 | "success": true, 111 | "error_message": null, 112 | "response": "\n\nI'm doing well, thank you for asking. I'm excited to be here and help you with any questions you may have. Is there anything specific you would like to know?" 113 | } 114 | ] --------------------------------------------------------------------------------