├── README.md
├── daclip_uir_gradio_colab.ipynb
└── daclip_uir_colab.ipynb
/README.md:
--------------------------------------------------------------------------------
1 | 🐣 Please follow me for new updates https://twitter.com/camenduru
2 | 🔥 Please join our discord server https://discord.gg/k5BwmmvJJU
3 | 🥳 Please join my patreon community https://patreon.com/camenduru
4 |
5 | ## 🦒 Colab
6 |
7 | | Colab | Info
8 | | --- | --- |
9 | [](https://colab.research.google.com/github/camenduru/daclip-uir-colab/blob/main/daclip_uir_gradio_colab.ipynb) | daclip_uir_gradio_colab
10 |
11 | ## ⚠ Important
12 | From https://github.com/Algolzw/daclip-uir#notice
13 |
14 | #### Notice!!
15 | In testing we found that the current pretrained model is still difficult to process some real-world images 🙁 which might have distribution shifts with our training dataset (captured from different devices or with different resolutions or degradations). We regard it as a future work and will try to make our model more practical!
16 |
17 | We also encourage users who are interested in our work to train their own models with larger dataset and more degradation types.
18 |
19 |
20 | ## Main Repo
21 | https://github.com/Algolzw/daclip-uir
22 |
23 | ## Paper
24 | https://arxiv.org/abs/2310.01018
25 |
26 | ## Page
27 | https://algolzw.github.io/daclip-uir/
28 |
29 | ## Output
30 | 
31 |
32 |
--------------------------------------------------------------------------------
/daclip_uir_gradio_colab.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "id": "view-in-github"
7 | },
8 | "source": [
9 | "[](https://colab.research.google.com/github/camenduru/daclip-uir-colab/blob/main/daclip_uir_gradio_colab.ipynb)"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": null,
15 | "metadata": {
16 | "id": "VjYy0F2gZIPR"
17 | },
18 | "outputs": [],
19 | "source": [
20 | "%cd /content\n",
21 | "!git clone -b gradio https://github.com/camenduru/daclip-uir\n",
22 | "%cd /content/daclip-uir\n",
23 | "\n",
24 | "!apt -y install -qq aria2\n",
25 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/daclip-uir/resolve/main/daclip_ViT-B-32.pt -d /content/daclip-uir/universal-image-restoration/config/daclip-sde/pretrained -o daclip_ViT-B-32.pt\n",
26 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/daclip-uir/resolve/main/universal-ir.pth -d /content/daclip-uir/universal-image-restoration/config/daclip-sde/pretrained -o universal-ir.pth\n",
27 | "\n",
28 | "!pip install -q einops ema-pytorch ftfy lmdb gradio\n",
29 | "\n",
30 | "%cd /content/daclip-uir/universal-image-restoration/config/daclip-sde\n",
31 | "!python app.py"
32 | ]
33 | }
34 | ],
35 | "metadata": {
36 | "accelerator": "GPU",
37 | "colab": {
38 | "gpuType": "T4",
39 | "provenance": []
40 | },
41 | "kernelspec": {
42 | "display_name": "Python 3",
43 | "name": "python3"
44 | },
45 | "language_info": {
46 | "name": "python"
47 | }
48 | },
49 | "nbformat": 4,
50 | "nbformat_minor": 0
51 | }
52 |
--------------------------------------------------------------------------------
/daclip_uir_colab.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "id": "view-in-github"
7 | },
8 | "source": [
9 | "[](https://colab.research.google.com/github/camenduru/daclip-uir-colab/blob/main/daclip_uir_colab.ipynb)"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": null,
15 | "metadata": {
16 | "id": "VjYy0F2gZIPR"
17 | },
18 | "outputs": [],
19 | "source": [
20 | "%cd /content\n",
21 | "!git clone -b dev https://github.com/camenduru/daclip-uir\n",
22 | "%cd /content/daclip-uir\n",
23 | "\n",
24 | "!apt -y install -qq aria2\n",
25 | "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/daclip-uir/resolve/main/daclip_ViT-B-32-2023-09_b768x4_lr3e-5_e50_zeroadd.pt -d /content/daclip-uir/pretrained -o daclip_ViT-B-32-2023-09_b768x4_lr3e-5_e50_zeroadd.pt\n",
26 | "\n",
27 | "!pip install -q einops ema-pytorch ftfy lmdb"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": null,
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "%cd /content/daclip-uir/universal-image-restoration\n",
37 | "\n",
38 | "import torch\n",
39 | "from PIL import Image\n",
40 | "import open_clip\n",
41 | "\n",
42 | "checkpoint = '/content/daclip-uir/pretrained/daclip_ViT-B-32-2023-09_b768x4_lr3e-5_e50_zeroadd.pt'\n",
43 | "model, preprocess = open_clip.create_model_from_pretrained('daclip_ViT-B-32', pretrained=checkpoint)\n",
44 | "tokenizer = open_clip.get_tokenizer('ViT-B-32')\n",
45 | "\n",
46 | "image = preprocess(Image.open(\"/content/test.jpg\")).unsqueeze(0)\n",
47 | "degradations = ['motion-blurry','hazy','jpeg-compressed','low-light','noisy','raindrop','rainy','shadowed','snowy','uncompleted']\n",
48 | "text = tokenizer(degradations)\n",
49 | "\n",
50 | "with torch.no_grad(), torch.cuda.amp.autocast():\n",
51 | " text_features = model.encode_text(text)\n",
52 | " image_features, degra_features = model.encode_image(image, control=True)\n",
53 | " degra_features /= degra_features.norm(dim=-1, keepdim=True)\n",
54 | " text_features /= text_features.norm(dim=-1, keepdim=True)\n",
55 | "\n",
56 | " text_probs = (100.0 * degra_features @ text_features.T).softmax(dim=-1)\n",
57 | " index = torch.argmax(text_probs[0])\n",
58 | "\n",
59 | "print(f\"Task: {degradations[index]} - {text_probs[0][index]}\")"
60 | ]
61 | }
62 | ],
63 | "metadata": {
64 | "accelerator": "GPU",
65 | "colab": {
66 | "gpuType": "T4",
67 | "provenance": []
68 | },
69 | "kernelspec": {
70 | "display_name": "Python 3",
71 | "name": "python3"
72 | },
73 | "language_info": {
74 | "name": "python"
75 | }
76 | },
77 | "nbformat": 4,
78 | "nbformat_minor": 0
79 | }
80 |
--------------------------------------------------------------------------------