├── .gitignore ├── .vscode └── settings.json ├── README.md ├── blur ├── README.md ├── cog.yaml ├── examples │ ├── kodim24-blur.png │ └── kodim24.png └── predict.py ├── canary ├── README.md ├── cog.yaml └── predict.py ├── hello-concurrency ├── .dockerignore ├── .gitignore ├── README.md ├── cog.yaml └── predict.py ├── hello-context ├── .dockerignore ├── README.md ├── cog.yaml ├── predict.py └── requirements.txt ├── hello-image ├── README.md ├── cog.yaml ├── hello.webp └── predict.py ├── hello-replicate ├── .dockerignore ├── cog.yaml ├── main.py └── requirements.txt ├── hello-train ├── README.md ├── cog.yaml ├── predict.py └── train.py ├── hello-world ├── README.md ├── cog.yaml └── predict.py ├── notebook ├── README.md ├── cog.yaml ├── my_notebook.ipynb ├── my_notebook.py └── predict.py └── resnet ├── README.md ├── cat.png ├── cog.yaml └── predict.py /.gitignore: -------------------------------------------------------------------------------- 1 | blur/output.png 2 | .cog 3 | __pycache__ 4 | notebook/.ipynb_checkpoints 5 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "editor.formatOnSave": true, 3 | "files.insertFinalNewline": true, 4 | "files.trimFinalNewlines": true, 5 | "[json]": { 6 | "editor.defaultFormatter": "esbenp.prettier-vscode" 7 | }, 8 | "[jsonc]": { 9 | "editor.defaultFormatter": "esbenp.prettier-vscode" 10 | }, 11 | "python.formatting.provider": "black", 12 | "python.languageServer": "Pylance", 13 | "[python]": { 14 | "editor.defaultFormatter": "ms-python.python" 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Cog example models 2 | 3 | This repo contains example machine learning models you can use to try out [Cog](https://github.com/replicate/cog). 4 | 5 | Once you've got a working model and want to publish it so others can see it in action, check out [replicate.com/docs](https://replicate.com/docs). 6 | 7 | ## Examples in this repo 8 | 9 | - [blur](blur) 10 | - [canary](canary) 11 | - [hello-world](hello-world) 12 | - [notebook](notebook) 13 | - [resnet](resnet) 14 | - [hello-train](hello-train) 15 | 16 | ## Real world examples 17 | 18 | The models in this repo are small and contrived. Here are a few real-world examples: 19 | 20 | - https://github.com/andreasjansson/pretrained-gan-70s-scifi 21 | - https://github.com/minzwon/sota-music-tagging-models 22 | - https://github.com/orpatashnik/StyleCLIP 23 | - https://github.com/andreasjansson/InstColorization ([PR](https://github.com/ericsujw/InstColorization/pull/36)) 24 | - https://github.com/andreasjansson/SRResCGAN/tree/cog-config 25 | 26 | ## Support 27 | 28 | Having trouble getting a model working? Let us know and we'll help. If you encountered a problem with Cog, you can [file a GitHub issue](https://github.com/replicate/cog/issues). Otherwise [chat with us in Discord](https://discord.gg/replicate) or send us an email at [team@replicate.com](mailto:team@replicate.com). 29 | -------------------------------------------------------------------------------- /blur/README.md: -------------------------------------------------------------------------------- 1 | # Blur 2 | 3 | This model applies box blur to an input image. 4 | 5 | ## Usage 6 | 7 | First, make sure you've got the [latest version of Cog](https://github.com/replicate/cog#install) installed. 8 | 9 | Build the image: 10 | 11 | ```sh 12 | cog build 13 | ``` 14 | 15 | Now you can run predictions on the model: 16 | 17 | ```sh 18 | cog predict -i image=@examples/kodim24.png -i blur=4 19 | 20 | cog predict -i image=@examples/kodim24.png -i blur=6 21 | ``` 22 | -------------------------------------------------------------------------------- /blur/cog.yaml: -------------------------------------------------------------------------------- 1 | build: 2 | python_version: "3.8" 3 | python_packages: 4 | - "pillow==8.2.0" 5 | system_packages: 6 | - "libpng-dev" 7 | - "libjpeg-dev" 8 | predict: "predict.py:Predictor" 9 | -------------------------------------------------------------------------------- /blur/examples/kodim24-blur.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/replicate/cog-examples/dd7699628fda562942f5b3891b2a9b8e0dc0f9c5/blur/examples/kodim24-blur.png -------------------------------------------------------------------------------- /blur/examples/kodim24.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/replicate/cog-examples/dd7699628fda562942f5b3891b2a9b8e0dc0f9c5/blur/examples/kodim24.png -------------------------------------------------------------------------------- /blur/predict.py: -------------------------------------------------------------------------------- 1 | import tempfile 2 | 3 | from cog import BasePredictor, Input, Path 4 | from PIL import Image, ImageFilter 5 | 6 | 7 | class Predictor(BasePredictor): 8 | def predict( 9 | self, 10 | image: Path = Input(description="Input image"), 11 | blur: float = Input(description="Blur radius", default=5), 12 | ) -> Path: 13 | if blur == 0: 14 | return input 15 | im = Image.open(str(image)) 16 | im = im.filter(ImageFilter.BoxBlur(blur)) 17 | out_path = Path(tempfile.mkdtemp()) / "out.png" 18 | im.save(str(out_path)) 19 | return out_path 20 | -------------------------------------------------------------------------------- /canary/README.md: -------------------------------------------------------------------------------- 1 | # Canary 2 | 3 | This simple model takes a string as input and returns a streaming string output. 4 | 5 | ## Usage 6 | 7 | First, make sure you've got the [latest version of Cog](https://github.com/replicate/cog#install) installed. 8 | 9 | Build the container image: 10 | 11 | ```sh 12 | cog build 13 | ``` 14 | 15 | Now you can run predictions on the model: 16 | 17 | ```sh 18 | cog predict -i text=Athena 19 | 20 | cog predict -i text=Zeus 21 | ``` 22 | -------------------------------------------------------------------------------- /canary/cog.yaml: -------------------------------------------------------------------------------- 1 | build: 2 | python_version: "3.8" 3 | predict: "predict.py:Predictor" 4 | -------------------------------------------------------------------------------- /canary/predict.py: -------------------------------------------------------------------------------- 1 | from cog import BasePredictor, ConcatenateIterator, Input 2 | 3 | 4 | class Predictor(BasePredictor): 5 | def predict(self, text: str = Input(description="Text to prefix with 'hello there, '")) -> ConcatenateIterator[str]: 6 | yield "hello " 7 | yield "there, " 8 | yield text 9 | -------------------------------------------------------------------------------- /hello-concurrency/.dockerignore: -------------------------------------------------------------------------------- 1 | # The .dockerignore file excludes files from the container build process. 2 | # 3 | # https://docs.docker.com/engine/reference/builder/#dockerignore-file 4 | 5 | # Exclude Git files 6 | .git 7 | .github 8 | .gitignore 9 | 10 | # Exclude Python cache files 11 | __pycache__ 12 | .mypy_cache 13 | .pytest_cache 14 | .ruff_cache 15 | 16 | # Exclude Python virtual environment 17 | .venv 18 | venv 19 | -------------------------------------------------------------------------------- /hello-concurrency/.gitignore: -------------------------------------------------------------------------------- 1 | .venv 2 | honeycomb_token.key 3 | -------------------------------------------------------------------------------- /hello-concurrency/README.md: -------------------------------------------------------------------------------- 1 | # hello-concurrency 2 | 3 | This is an example Cog project that demonstrates the newly added concurrency support within 4 | cog >= 0.14.0. 5 | 6 | The key piece is the new `concurrency` field in the cog.yaml. 7 | 8 | ```yaml 9 | concurrency: 10 | max: 32 11 | ``` 12 | 13 | This combined with the async setup and predict methods in the predict.py allows Cog to run up to 14 | 32 concurrent predictions. If cog reaches the max concurrency threshold it will reject subsequent 15 | predictions with a `409 Conflict` response. 16 | 17 | ### Telemetry 18 | 19 | It also uses the open-telemetry package to demonstrate how to collect telemetry for your model. 20 | 21 | This requires a file named `honeycomb_token.key` to be included in the image build. 22 | 23 | It will then start sending events to the `cog-model` data source. You can configure this by 24 | editing the `OTEL_SERVICE_NAME`. If you use a custom endpoint this can be configured via `OTEL_EXPORTER_OTLP_ENDPOINT`. 25 | 26 | Lastly, there is a section in predict.py that can be uncommented to run telemetry locally and print events to the console for debugging. 27 | -------------------------------------------------------------------------------- /hello-concurrency/cog.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for Cog ⚙️ 2 | # Reference: https://github.com/replicate/cog/blob/main/docs/yaml.md 3 | build: 4 | gpu: false 5 | python_version: "3.12" 6 | python_packages: 7 | - "opentelemetry-api" 8 | - "opentelemetry-sdk" 9 | - "opentelemetry-exporter-otlp-proto-http" 10 | predict: "predict.py:Predictor" 11 | concurrency: 12 | max: 4 13 | -------------------------------------------------------------------------------- /hello-concurrency/predict.py: -------------------------------------------------------------------------------- 1 | # Prediction interface for Cog ⚙️ 2 | # https://github.com/replicate/cog/blob/main/docs/python.md 3 | 4 | import asyncio 5 | import logging 6 | import os 7 | import time 8 | import warnings 9 | 10 | from cog import ( 11 | AsyncConcatenateIterator, 12 | BasePredictor, 13 | ExperimentalFeatureWarning, 14 | Input, 15 | __version__, 16 | emit_metric, 17 | current_scope, 18 | ) 19 | from cog.types import Weights 20 | from opentelemetry import trace 21 | from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter 22 | from opentelemetry.sdk.resources import Resource 23 | from opentelemetry.sdk.trace import TracerProvider 24 | from opentelemetry.sdk.trace.export import ( 25 | BatchSpanProcessor, 26 | ) 27 | 28 | warnings.filterwarnings("ignore", category=ExperimentalFeatureWarning) 29 | warnings.filterwarnings("once", category=DeprecationWarning) 30 | 31 | logging.basicConfig( 32 | format="%(asctime)s %(levelname)-8s %(message)s", 33 | level=logging.INFO, 34 | datefmt="%Y-%m-%d %H:%M:%S", 35 | ) 36 | 37 | honeycomb_token = "" 38 | try: 39 | with open("./honeycomb_token.key", "r") as f: 40 | honeycomb_token = f.read().strip() 41 | except FileNotFoundError: 42 | pass 43 | 44 | if not honeycomb_token: 45 | os.environ["OTEL_SDK_DISABLED"] = "true" 46 | 47 | os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://api.honeycomb.io/" 48 | os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = f"x-honeycomb-team={honeycomb_token}" 49 | os.environ["OTEL_SERVICE_NAME"] = "cog-model" 50 | 51 | resource = Resource( 52 | attributes={"model.name": "replicate/hello-concurrency", "cog_version": __version__} 53 | ) 54 | provider = TracerProvider(resource=resource) 55 | provider.add_span_processor(BatchSpanProcessor(OTLPSpanExporter())) 56 | trace.set_tracer_provider(provider) 57 | tracer = trace.get_tracer("predict") 58 | 59 | # Local OTEL debugging 60 | # from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor 61 | 62 | # os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = http://otel-collector.local-otel.orb.local:4318 63 | # os.environ["OTEL_SDK_DISABLED"] = "" 64 | # provider.add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter())) 65 | 66 | 67 | class Predictor(BasePredictor): 68 | async def setup(self, weights: Weights | None = None) -> None: 69 | with tracer.start_as_current_span("setup") as span: 70 | self._setup_context = span.get_span_context() 71 | span.set_attribute("model.weights", str(weights)) 72 | 73 | start_time = time.time() 74 | logging.info(f"starting setup: cog_version={__version__}") 75 | 76 | time.sleep(1) 77 | 78 | duration = time.time() - start_time 79 | logging.info(f"completed setup in {duration} seconds") 80 | span.set_attribute("model.setup_time_seconds", duration) 81 | 82 | async def predict( # pyright: ignore 83 | self, 84 | total: int = Input(default=5), 85 | interval: int = Input(default=3), 86 | ) -> AsyncConcatenateIterator[str]: # pyright: ignore 87 | links = [] 88 | if setup_context := getattr(self, "_setup_context", None): 89 | links.append(trace.Link(setup_context)) 90 | 91 | with tracer.start_as_current_span("predict", links=links) as span: 92 | span.set_attribute("inputs.total", total) 93 | span.set_attribute("inputs.interval", interval) 94 | 95 | start_time = time.time() 96 | logging.info( 97 | f"starting prediction: cog_version={__version__} total={total} interval={interval}" 98 | ) 99 | 100 | """Run a single prediction on the model""" 101 | fruits = [ 102 | "Apple", 103 | "Banana", 104 | "Orange", 105 | "Grape", 106 | "Strawberry", 107 | "Mango", 108 | "Pineapple", 109 | "Blueberry", 110 | "Watermelon", 111 | "Peach", 112 | ][:total] 113 | 114 | for index, fruit in enumerate(fruits): 115 | if index + 1 == total: 116 | yield f"{fruit}" 117 | else: 118 | yield f"{fruit}\n" 119 | logging.info(f"output fruit: {fruit}") 120 | await asyncio.sleep(interval) 121 | 122 | logging.info(f"emit_metric: output_tokens={total}") 123 | emit_metric("output_tokens", total) 124 | current_scope().record_metric("output_tokens", total) 125 | span.set_attribute("metrics.output_tokens", total) 126 | 127 | duration = time.time() - start_time 128 | logging.info(f"completed prediction in {duration} seconds") 129 | span.set_attribute("model.predict_time_seconds", duration) 130 | -------------------------------------------------------------------------------- /hello-context/.dockerignore: -------------------------------------------------------------------------------- 1 | # The .dockerignore file excludes files from the container build process. 2 | # 3 | # https://docs.docker.com/engine/reference/builder/#dockerignore-file 4 | 5 | # Exclude Git files 6 | **/.git 7 | **/.github 8 | **/.gitignore 9 | 10 | # Exclude Python tooling 11 | .python-version 12 | 13 | # Exclude Python cache files 14 | __pycache__ 15 | .mypy_cache 16 | .pytest_cache 17 | .ruff_cache 18 | 19 | # Exclude Python virtual environment 20 | /venv 21 | -------------------------------------------------------------------------------- /hello-context/README.md: -------------------------------------------------------------------------------- 1 | hello-context 2 | ------------- 3 | 4 | A simple model that takes no inputs but will echo back any context provided with the prediction as the output. 5 | -------------------------------------------------------------------------------- /hello-context/cog.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for Cog ⚙️ 2 | # Reference: https://cog.run/yaml 3 | 4 | build: 5 | # set to true if your model requires a GPU 6 | gpu: false 7 | 8 | # a list of ubuntu apt packages to install 9 | # system_packages: 10 | # - "libgl1-mesa-glx" 11 | # - "libglib2.0-0" 12 | 13 | # python version in the form '3.11' or '3.11.4' 14 | python_version: "3.11" 15 | 16 | # path to a Python requirements.txt file 17 | python_requirements: requirements.txt 18 | 19 | # enable fast boots 20 | fast: false 21 | 22 | # commands run after the environment is setup 23 | # run: 24 | # - "echo env is ready!" 25 | # - "echo another command if needed" 26 | 27 | # predict.py defines how predictions are run on your model 28 | predict: "predict.py:run" 29 | -------------------------------------------------------------------------------- /hello-context/predict.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | from cog import current_scope, Input, ExperimentalFeatureWarning 4 | 5 | warnings.filterwarnings(action="ignore", category=ExperimentalFeatureWarning) 6 | 7 | 8 | def run( 9 | text: str = Input(description="Example text input"), 10 | ) -> dict[str, dict[str, str]]: 11 | return {"inputs": {"text": text}, "context": current_scope().context} 12 | -------------------------------------------------------------------------------- /hello-context/requirements.txt: -------------------------------------------------------------------------------- 1 | # This is a normal Python requirements.txt file. 2 | 3 | # You can add dependencies directly from PyPI: 4 | # 5 | # numpy==1.26.4 6 | # torch==2.2.1 7 | # torchvision==0.17.1 8 | 9 | 10 | # You can also add Git repos as dependencies, but you'll need to add git to the system_packages list in cog.yaml: 11 | # 12 | # build: 13 | # system_packages: 14 | # - "git" 15 | # 16 | # Then you can use a URL like this: 17 | # 18 | # git+https://github.com/huggingface/transformers 19 | 20 | 21 | # You can also pin Git repos to a specific commit: 22 | # 23 | # git+https://github.com/huggingface/transformers@2d1602a 24 | -------------------------------------------------------------------------------- /hello-image/README.md: -------------------------------------------------------------------------------- 1 | # Hello image 2 | 3 | This simple model takes no input and returns a static image. 4 | 5 | ## Usage 6 | 7 | First, make sure you've got the [latest version of Cog](https://github.com/replicate/cog#install) installed. 8 | 9 | Build the container image: 10 | 11 | ```sh 12 | cog build 13 | ``` 14 | 15 | Now you can run predictions on the model: 16 | 17 | ```sh 18 | cog predict 19 | ``` 20 | -------------------------------------------------------------------------------- /hello-image/cog.yaml: -------------------------------------------------------------------------------- 1 | build: 2 | python_version: "3.11" 3 | predict: "predict.py:Predictor" 4 | -------------------------------------------------------------------------------- /hello-image/hello.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/replicate/cog-examples/dd7699628fda562942f5b3891b2a9b8e0dc0f9c5/hello-image/hello.webp -------------------------------------------------------------------------------- /hello-image/predict.py: -------------------------------------------------------------------------------- 1 | from cog import BasePredictor, Path 2 | 3 | class Predictor(BasePredictor): 4 | def predict(self) -> Path: 5 | return Path("hello.webp") 6 | -------------------------------------------------------------------------------- /hello-replicate/.dockerignore: -------------------------------------------------------------------------------- 1 | # The .dockerignore file excludes files from the container build process. 2 | # 3 | # https://docs.docker.com/engine/reference/builder/#dockerignore-file 4 | 5 | # Exclude Git files 6 | **/.git 7 | **/.github 8 | **/.gitignore 9 | 10 | # Exclude Python tooling 11 | .python-version 12 | 13 | # Exclude Python cache files 14 | __pycache__ 15 | .mypy_cache 16 | .pytest_cache 17 | .ruff_cache 18 | 19 | # Exclude Python virtual environment 20 | /venv 21 | -------------------------------------------------------------------------------- /hello-replicate/cog.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for Cog ⚙️ 2 | # Reference: https://cog.run/yaml 3 | 4 | build: 5 | # set to true if your model requires a GPU 6 | gpu: false 7 | 8 | # a list of ubuntu apt packages to install 9 | # system_packages: 10 | # - "libgl1-mesa-glx" 11 | # - "libglib2.0-0" 12 | 13 | # python version in the form '3.11' or '3.11.4' 14 | python_version: "3.12" 15 | 16 | # path to a Python requirements.txt file 17 | python_requirements: requirements.txt 18 | 19 | # enable fast boots 20 | fast: false 21 | 22 | # commands run after the environment is setup 23 | # run: 24 | # - "echo env is ready!" 25 | # - "echo another command if needed" 26 | 27 | # predict.py defines how predictions are run on your model 28 | predict: "main.py:run" 29 | -------------------------------------------------------------------------------- /hello-replicate/main.py: -------------------------------------------------------------------------------- 1 | import tempfile 2 | import os 3 | import warnings 4 | from cog import Input, Path, ExperimentalFeatureWarning 5 | 6 | from replicate.client import Client 7 | 8 | warnings.filterwarnings("ignore", category=ExperimentalFeatureWarning) 9 | 10 | 11 | def run( 12 | image: Path = Input(description="Input image to test"), 13 | ) -> Path: 14 | replicate = Client() 15 | claude_prompt = """ 16 | You have been asked to generate a prompt for an image model that should re-create the 17 | image provided to you exactly. Please describe the provided image in great detail 18 | paying close attention to the contents, layout and style. 19 | """ 20 | prompt = replicate.run( 21 | "anthropic/claude-4-sonnet", input={"prompt": claude_prompt, "image": image} 22 | ) 23 | output = replicate.run( 24 | "black-forest-labs/flux-dev", input={"prompt": "".join(prompt)} 25 | ) 26 | 27 | with tempfile.TemporaryDirectory(delete=False) as tmpdir: 28 | dest_path = os.path.join(tmpdir, "output.webp") 29 | with open(dest_path, "wb") as file: 30 | file.write(output[0].read()) 31 | return Path(dest_path) 32 | -------------------------------------------------------------------------------- /hello-replicate/requirements.txt: -------------------------------------------------------------------------------- 1 | # This is a normal Python requirements.txt file. 2 | 3 | # You can add dependencies directly from PyPI: 4 | # 5 | # numpy==1.26.4 6 | # torch==2.2.1 7 | # torchvision==0.17.1 8 | replicate>=1.0.7 9 | 10 | 11 | # You can also add Git repos as dependencies, but you'll need to add git to the system_packages list in cog.yaml: 12 | # 13 | # build: 14 | # system_packages: 15 | # - "git" 16 | # 17 | # Then you can use a URL like this: 18 | # 19 | # git+https://github.com/huggingface/transformers 20 | 21 | 22 | # You can also pin Git repos to a specific commit: 23 | # 24 | # git+https://github.com/huggingface/transformers@2d1602a 25 | -------------------------------------------------------------------------------- /hello-train/README.md: -------------------------------------------------------------------------------- 1 | # Hello, Train 🚂 2 | 3 | Cog's [training API](https://github.com/replicate/cog/blob/main/docs/training.md) allows you to define a fine-tuning interface for an existing Cog model, so users of the model can bring their own training data to create derivative fune-tuned models. Real-world examples of this API in use include [fine-tuning SDXL with images](https://replicate.com/blog/fine-tune-sdxl) or [fine-tuning Llama 2 with structured text](https://replicate.com/blog/fine-tune-llama-2). 4 | 5 | See the [Cog training reference docs](https://github.com/replicate/cog/blob/main/docs/training.md) for more details. 6 | 7 | This simple trainable model takes a string as input and returns a string as output. 8 | 9 | ## Training with Cog 10 | 11 | Then you can run it like this: 12 | 13 | ```console 14 | cog train -i prefix=hello 15 | ``` 16 | 17 | ## Creating new fine-tunes with Replicate's API 18 | 19 | Check out these guides to learn how to fine-tune models on Replicate: 20 | 21 | - [Fine-tune a language model](https://replicate.com/docs/guides/fine-tune-a-language-model) 22 | - [Fine-tune an image model](https://replicate.com/docs/guides/fine-tune-an-image-model) 23 | -------------------------------------------------------------------------------- /hello-train/cog.yaml: -------------------------------------------------------------------------------- 1 | build: 2 | python_version: "3.8" 3 | predict: "predict.py:Predictor" 4 | train: "train.py:train" 5 | -------------------------------------------------------------------------------- /hello-train/predict.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from cog import BasePredictor, Input, Path 3 | 4 | from typing import Optional 5 | 6 | 7 | class Predictor(BasePredictor): 8 | def setup(self, weights: Optional[Path] = None): 9 | if weights: 10 | self.prefix = requests.get(weights).text 11 | else: 12 | self.prefix = "hello" 13 | 14 | def predict( 15 | self, text: str = Input(description="Text to prefix with 'hello ' or weights") 16 | ) -> str: 17 | return self.prefix + " " + text 18 | -------------------------------------------------------------------------------- /hello-train/train.py: -------------------------------------------------------------------------------- 1 | from cog import BaseModel, Input, Path 2 | 3 | 4 | class TrainingOutput(BaseModel): 5 | weights: Path 6 | 7 | 8 | def train( 9 | prefix: str = Input(description="data you wish to save"), 10 | ) -> TrainingOutput: 11 | weights = Path("output.txt") 12 | with open(weights, "w") as f: 13 | f.write(prefix) 14 | 15 | return TrainingOutput(weights=weights) 16 | -------------------------------------------------------------------------------- /hello-world/README.md: -------------------------------------------------------------------------------- 1 | # Hello World 2 | 3 | This simple model takes a string as input and returns a string as output. 4 | 5 | ## Usage 6 | 7 | First, make sure you've got the [latest version of Cog](https://github.com/replicate/cog#install) installed. 8 | 9 | Build the container image: 10 | 11 | ```sh 12 | cog build 13 | ``` 14 | 15 | Now you can run predictions on the model: 16 | 17 | ```sh 18 | cog predict -i text=Athena 19 | 20 | cog predict -i text=Zeus 21 | ``` -------------------------------------------------------------------------------- /hello-world/cog.yaml: -------------------------------------------------------------------------------- 1 | build: 2 | python_version: "3.12" 3 | predict: "predict.py:Predictor" 4 | -------------------------------------------------------------------------------- /hello-world/predict.py: -------------------------------------------------------------------------------- 1 | from cog import BasePredictor, Input 2 | 3 | 4 | class Predictor(BasePredictor): 5 | def setup(self): 6 | self.prefix = "hello" 7 | 8 | def predict(self, text: str = Input(description="Text to prefix with 'hello '")) -> str: 9 | return self.prefix + " " + text 10 | -------------------------------------------------------------------------------- /notebook/README.md: -------------------------------------------------------------------------------- 1 | # notebook 2 | 3 | A simple example using a Jupyter Notebook with Cog 4 | 5 | ## Usage 6 | 7 | First, make sure you've got the [latest version of Cog](https://github.com/replicate/cog#install) installed. 8 | 9 | Build the image: 10 | 11 | ```sh 12 | cog build 13 | ``` 14 | 15 | Run the Jupyter Notebook server with Cog: 16 | 17 | ```sh 18 | cog run -p 8888 --debug jupyter notebook --allow-root --ip=0.0.0.0 --NotebookApp.token=mytoken 19 | ``` 20 | 21 | Copy the notebook URL to your browser (you can change the `mytoken` to your preferred token or have it autogenerated): 22 | 23 | ```sh 24 | http://127.0.0.1:8888/?token=mytoken 25 | ``` 26 | 27 | Save any changes you make to your notebook, then export it as a Python script: 28 | 29 | ```sh 30 | jupyter nbconvert --to script my_notebook.ipynb # creates my_notebook.py 31 | ``` 32 | 33 | Now you can run predictions on the model: 34 | 35 | ```sh 36 | cog predict -i name=Alice 37 | ``` 38 | -------------------------------------------------------------------------------- /notebook/cog.yaml: -------------------------------------------------------------------------------- 1 | build: 2 | gpu: false 3 | python_packages: 4 | - "jupyterlab==3.2.4" 5 | python_version: "3.9" 6 | 7 | predict: "predict.py:Predictor" 8 | -------------------------------------------------------------------------------- /notebook/my_notebook.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "id": "4622e827", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "def say_hello(name):\n", 11 | " return f\"hello, {name}\"" 12 | ] 13 | } 14 | ], 15 | "metadata": { 16 | "kernelspec": { 17 | "display_name": "Python 3 (ipykernel)", 18 | "language": "python", 19 | "name": "python3" 20 | }, 21 | "language_info": { 22 | "codemirror_mode": { 23 | "name": "ipython", 24 | "version": 3 25 | }, 26 | "file_extension": ".py", 27 | "mimetype": "text/x-python", 28 | "name": "python", 29 | "nbconvert_exporter": "python", 30 | "pygments_lexer": "ipython3", 31 | "version": "3.9.12" 32 | } 33 | }, 34 | "nbformat": 4, 35 | "nbformat_minor": 5 36 | } 37 | -------------------------------------------------------------------------------- /notebook/my_notebook.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # In[ ]: 5 | 6 | 7 | def say_hello(name): 8 | return f"hello, {name}" 9 | 10 | -------------------------------------------------------------------------------- /notebook/predict.py: -------------------------------------------------------------------------------- 1 | from cog import BasePredictor, Input 2 | 3 | import my_notebook 4 | 5 | 6 | class Predictor(BasePredictor): 7 | def setup(self): 8 | """Prepare the model so multiple predictions run efficiently (optional)""" 9 | 10 | def predict(self, name: str = Input(description="name of person to greet")) -> str: 11 | """Run a single prediction""" 12 | 13 | output = my_notebook.say_hello(name) 14 | return output 15 | -------------------------------------------------------------------------------- /resnet/README.md: -------------------------------------------------------------------------------- 1 | # resnet 2 | 3 | This model classifies images. 4 | 5 | ## Usage 6 | 7 | ✋ Note for M1 Mac users: This model uses TensorFlow, which does not currently work on M1 machines using Docker. See [replicate/cog#336](https://github.com/replicate/cog/issues/336) for more information. 8 | 9 | --- 10 | 11 | First, make sure you've got the [latest version of Cog](https://github.com/replicate/cog#install) installed. 12 | 13 | Download the pre-trained weights: 14 | 15 | ``` 16 | curl -O https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5 17 | ``` 18 | 19 | Build the image: 20 | 21 | ```sh 22 | cog build 23 | ``` 24 | 25 | Now you can run predictions on the model: 26 | 27 | ```sh 28 | cog predict -i image=@cat.png 29 | ``` -------------------------------------------------------------------------------- /resnet/cat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/replicate/cog-examples/dd7699628fda562942f5b3891b2a9b8e0dc0f9c5/resnet/cat.png -------------------------------------------------------------------------------- /resnet/cog.yaml: -------------------------------------------------------------------------------- 1 | build: 2 | python_version: "3.8" 3 | python_packages: 4 | - "pillow==9.1.0" 5 | - "tensorflow==2.8.0" 6 | predict: "predict.py:Predictor" 7 | -------------------------------------------------------------------------------- /resnet/predict.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | import numpy as np 4 | from cog import BasePredictor, Input, Path 5 | from tensorflow.keras.applications.resnet50 import ( 6 | ResNet50, 7 | decode_predictions, 8 | preprocess_input, 9 | ) 10 | from tensorflow.keras.preprocessing import image as keras_image 11 | 12 | 13 | class Predictor(BasePredictor): 14 | def setup(self): 15 | """Load the model into memory to make running multiple predictions efficient""" 16 | self.model = ResNet50(weights="resnet50_weights_tf_dim_ordering_tf_kernels.h5") 17 | 18 | # Define the arguments and types the model takes as input 19 | def predict(self, image: Path = Input(description="Image to classify")) -> Any: 20 | """Run a single prediction on the model""" 21 | # Preprocess the image 22 | img = keras_image.load_img(image, target_size=(224, 224)) 23 | x = keras_image.img_to_array(img) 24 | x = np.expand_dims(x, axis=0) 25 | x = preprocess_input(x) 26 | # Run the prediction 27 | preds = self.model.predict(x) 28 | # Return the top 3 predictions 29 | return decode_predictions(preds, top=3)[0] 30 | --------------------------------------------------------------------------------