├── README.md ├── modelfile.sh ├── download.sh ├── hllama-gradio.py └── hub-hllama-server.py /README.md: -------------------------------------------------------------------------------- 1 | To run: 2 | 3 | ``` 4 | ./download.sh llama3.2:1b 5 | ``` 6 | 7 | You may need to run `chmod +x download.sh` to make the script executable. 8 | 9 | To import and create a model file with ollama: 10 | 11 | ``` 12 | ./modelfile.sh bartowski/Qwen2.5-0.5B-Instruct-GGUF Qwen2.5-0.5B-Instruct-IQ2_M.gguf q05 13 | ``` -------------------------------------------------------------------------------- /modelfile.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if the correct number of arguments are provided 4 | if [ "$#" -ne 3 ]; then 5 | echo "Usage: $0 " 6 | exit 1 7 | fi 8 | 9 | # Assign the input arguments to variables 10 | hf_repo="$1" 11 | hf_file="$2" 12 | llm_name="$3" 13 | 14 | # Download the file from the Hugging Face repository 15 | echo "Downloading file from Hugging Face repository..." 16 | huggingface-cli download "$hf_repo" "$hf_file" --local-dir . 17 | 18 | # Check if the download was successful 19 | if [ $? -ne 0 ]; then 20 | echo "Failed to download the file." 21 | exit 1 22 | fi 23 | 24 | # Create the Modelfile with the specified content 25 | echo "Creating Modelfile..." 26 | echo "FROM ./$hf_file" > Modelfile 27 | 28 | # Check if the file was created successfully 29 | if [ $? -ne 0 ]; then 30 | echo "Failed to create Modelfile." 31 | exit 1 32 | fi 33 | 34 | echo "Modelfile created successfully." 35 | 36 | # Create the LLM using ollama 37 | echo "Creating LLM with name $llm_name..." 38 | ollama create "$llm_name" -f Modelfile 39 | 40 | # Check if the LLM creation was successful 41 | if [ $? -ne 0 ]; then 42 | echo "Failed to create LLM." 43 | exit 1 44 | fi 45 | 46 | echo "LLM created successfully." -------------------------------------------------------------------------------- /download.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Function to download a file from the registry 4 | download_file() { 5 | local digest=$1 6 | local url="https://registry.ollama.ai/v2/library/${IMAGE}/blobs/${digest}" 7 | local file_name="blobs/${digest}" # Use the full digest as the file name 8 | 9 | # Create the directory if it doesn't exist 10 | mkdir -p $(dirname "$file_name") 11 | 12 | # Download the file 13 | echo "Downloading $url to $file_name" 14 | curl -L -o "$file_name" "$url" 15 | } 16 | 17 | # Check if an input is provided 18 | if [ -z "$1" ]; then 19 | echo "Usage: $0 image:tag" 20 | exit 1 21 | fi 22 | 23 | # Extract image and tag from the input 24 | IMAGE=$(echo "$1" | cut -d: -f1) 25 | TAG=$(echo "$1" | cut -d: -f2) 26 | 27 | # Construct the manifest URL 28 | MANIFEST_URL="https://registry.ollama.ai/v2/library/${IMAGE}/manifests/${TAG}" 29 | 30 | # Fetch the manifest JSON 31 | manifest_json=$(curl -s "$MANIFEST_URL") 32 | 33 | # Save the manifest JSON to a file 34 | echo "$manifest_json" > manifest 35 | 36 | # Check if the manifest JSON is empty or contains an error 37 | if [ -z "$manifest_json" ] || echo "$manifest_json" | grep -q '"errors":'; then 38 | echo "Failed to fetch the manifest for ${IMAGE}:${TAG}" 39 | exit 1 40 | fi 41 | 42 | # Extract the digest values from the JSON 43 | digests=$(echo "$manifest_json" | jq -r '.layers[].digest') 44 | 45 | # Download each file 46 | for digest in $digests; do 47 | download_file "$digest" 48 | done 49 | 50 | # Download the config file 51 | config_digest=$(echo "$manifest_json" | jq -r '.config.digest') 52 | download_file "$config_digest" -------------------------------------------------------------------------------- /hllama-gradio.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | import os 3 | import requests 4 | import json 5 | from huggingface_hub import HfApi 6 | 7 | def download_file(digest, image): 8 | url = f"https://registry.ollama.ai/v2/library/{image}/blobs/{digest}" 9 | file_name = f"blobs/{digest}" 10 | 11 | # Create the directory if it doesn't exist 12 | os.makedirs(os.path.dirname(file_name), exist_ok=True) 13 | 14 | # Download the file 15 | print(f"Downloading {url} to {file_name}") 16 | response = requests.get(url, allow_redirects=True) 17 | if response.status_code == 200: 18 | with open(file_name, 'wb') as f: 19 | f.write(response.content) 20 | else: 21 | print(f"Failed to download {url}") 22 | 23 | def fetch_manifest(image, tag): 24 | manifest_url = f"https://registry.ollama.ai/v2/library/{image}/manifests/{tag}" 25 | response = requests.get(manifest_url) 26 | if response.status_code == 200: 27 | return response.json() 28 | else: 29 | return None 30 | 31 | def upload_to_huggingface(repo_id, folder_path): 32 | api = HfApi() 33 | try: 34 | api.upload_folder( 35 | folder_path=folder_path, 36 | repo_id=repo_id, 37 | repo_type="model", 38 | ) 39 | return "Upload successful" 40 | except Exception as e: 41 | return f"Upload failed: {str(e)}" 42 | 43 | def process_image_tag(image_tag, repo_id): 44 | # Extract image and tag from the input 45 | image, tag = image_tag.split(':') 46 | 47 | # Fetch the manifest JSON 48 | manifest_json = fetch_manifest(image, tag) 49 | if not manifest_json or 'errors' in manifest_json: 50 | return f"Failed to fetch the manifest for {image}:{tag}" 51 | 52 | # Save the manifest JSON to the blobs folder 53 | manifest_file_path = "blobs/manifest.json" 54 | os.makedirs(os.path.dirname(manifest_file_path), exist_ok=True) 55 | with open(manifest_file_path, 'w') as f: 56 | json.dump(manifest_json, f) 57 | 58 | # Extract the digest values from the JSON 59 | digests = [layer['digest'] for layer in manifest_json.get('layers', [])] 60 | 61 | # Download each file 62 | for digest in digests: 63 | download_file(digest, image) 64 | 65 | # Download the config file 66 | config_digest = manifest_json.get('config', {}).get('digest') 67 | if config_digest: 68 | download_file(config_digest, image) 69 | 70 | # Upload to Hugging Face Hub 71 | upload_result = upload_to_huggingface(repo_id, 'blobs') 72 | 73 | # Delete the blobs folder 74 | try: 75 | os.rmtree('blobs') 76 | return f"Successfully fetched and downloaded files for {image}:{tag}\n{upload_result}\nBlobs folder deleted" 77 | except Exception as e: 78 | return f"Failed to delete blobs folder: {str(e)}" 79 | 80 | # Create the Gradio interface 81 | iface = gr.Interface( 82 | fn=process_image_tag, 83 | inputs=[ 84 | gr.Textbox(placeholder="Enter image:tag", label="Image and Tag"), 85 | gr.Textbox(placeholder="Enter Hugging Face repo ID", label="Hugging Face Repo ID") 86 | ], 87 | outputs=gr.Textbox(label="Result"), 88 | title="Registry File Downloader and Uploader", 89 | description="Enter the image and tag to download the corresponding files from the registry and upload them to the Hugging Face Hub." 90 | ) 91 | 92 | # Launch the Gradio app 93 | iface.launch() -------------------------------------------------------------------------------- /hub-hllama-server.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, redirect, request, Response 2 | import requests 3 | 4 | app = Flask(__name__) 5 | 6 | @app.route('/v2///blobs/', methods=['GET', "HEAD"]) 7 | def blobs(namespace, name, sha): 8 | oid = sha.split(':')[1] 9 | r = requests.get(f'https://huggingface.co/api/models/{namespace}/{name}/tree/main') 10 | result = r.json() 11 | 12 | resp = Response() 13 | for r in result: 14 | if oid == r['oid'] or ('lfs' in r and oid == r['lfs']['oid']): 15 | print(f'https://huggingface.co/{namespace}/{name}/resolve/main/{r["path"]}') 16 | resp.status_code = 307 17 | resp.headers['Location'] = f'https://huggingface.co/{namespace}/{name}/resolve/main/{r["path"]}' 18 | resp.headers['Content-Type'] = 'application/octet-stream' 19 | return resp 20 | 21 | @app.route('/v2///manifests/', methods=['GET']) 22 | def manifest(namespace, name, tag): 23 | r = requests.get(f'https://huggingface.co/api/models/{namespace}/{name}/tree/main') 24 | result = r.json() 25 | 26 | model = None 27 | model_size = 0 28 | config = None 29 | config_size = 0 30 | system = None 31 | system_size = 0 32 | template = None 33 | template_size = 0 34 | license = None 35 | license_size = 0 36 | 37 | for r in result: 38 | if r['path'] == 'config.json': 39 | config = r['oid'] 40 | config_size = r['size'] 41 | if r['path'] == 'system': 42 | system = r['oid'] 43 | system_size = r['size'] 44 | if r['path'].endswith('.gguf'): 45 | if 'lfs' in r: 46 | model = 'sha256:' + r['lfs']['oid'] 47 | model_size = r['lfs']['size'] 48 | else: 49 | model = 'sha1:' + r['oid'] 50 | model_size = r['size'] 51 | if r['path'] == 'template': 52 | template = r['oid'] 53 | template_size = r['size'] 54 | if r['path'] == 'license': 55 | license = r['oid'] 56 | license_size = r['size'] 57 | 58 | manifest = { 59 | "schemaVersion": 2, 60 | "mediaType": "application/vnd.docker.distribution.manifest.v2+json", 61 | "config": { 62 | "digest": f"sha1:{config}", 63 | "mediaType": "application/vnd.docker.container.image.v1+json", 64 | "size": config_size 65 | } if config else None, 66 | "layers": [] 67 | } 68 | 69 | if model: 70 | manifest["layers"].append({ 71 | "digest": model, 72 | "mediaType": "application/vnd.ollama.image.model", 73 | "size": model_size 74 | }) 75 | 76 | if system: 77 | manifest["layers"].append({ 78 | "digest": f"sha1:{system}", 79 | "mediaType": "application/vnd.ollama.image.system", 80 | "size": system_size 81 | }) 82 | 83 | if template: 84 | manifest["layers"].append({ 85 | "digest": f"sha1:{template}", 86 | "mediaType": "application/vnd.ollama.image.template", 87 | "size": template_size 88 | }) 89 | 90 | if license: 91 | manifest["layers"].append({ 92 | "digest": f"sha1:{license}", 93 | "mediaType": "application/vnd.ollama.image.license", 94 | "size": license_size 95 | }) 96 | print(manifest) 97 | return manifest 98 | 99 | app.run(debug=True, host='0.0.0.0', port=4242) --------------------------------------------------------------------------------