├── components ├── Footer.vue ├── Header.vue ├── t2i │ └── T2iTimeline.vue └── Timeline3.client.vue ├── server └── tsconfig.json ├── public ├── favicon.ico ├── t2i │ └── cover.jpg └── locale │ └── zh-cn.json ├── uno.config.ts ├── tsconfig.json ├── app └── app.config.ts ├── .vscode ├── settings.json ├── timelineSlideSchema.json └── timelineSchema.json ├── pages ├── t2i.vue └── index.vue ├── layouts └── default.vue ├── .gitignore ├── content └── t2i │ ├── index.json │ └── events │ ├── vidu.json │ ├── nuwa.json │ ├── dreambooth.json │ ├── make-a-video.json │ ├── dalle-2.json │ ├── cyclegan.json │ ├── microsoft-coco.json │ ├── muse.json │ ├── nuwainfinity.json │ ├── rudalle.json │ ├── riffusion.json │ ├── aligndraw.json │ ├── pix2pix.json │ ├── veo.json │ ├── attngan.json │ ├── lumiere.json │ ├── animatediff.json │ ├── clip.json │ ├── nuwa-xl.json │ ├── googlenet.json │ ├── midjourney-v6-1.json │ ├── omnihuman-1.json │ ├── phenaki.json │ ├── stylegan2.json │ ├── general-adversarial-network-gan.json │ ├── imagen-video.json │ ├── cogview.json │ ├── dalle.json │ ├── wan2.2.json │ ├── gen-4.json │ ├── gen-2.json │ ├── imagen.json │ ├── laion-400m.json │ ├── stackgan.json │ ├── sora.json │ ├── midjourney-v6.json │ ├── midjourney.json │ ├── photon.json │ ├── gen-1.json │ ├── magicanimate.json │ ├── deepdream.json │ ├── show-1.json │ ├── imagenet.json │ ├── dream-machine.json │ ├── hailuo-ai.json │ ├── aphantasia.json │ ├── glide.json │ ├── parti.json │ ├── kling.json │ ├── make-a-scene.json │ ├── aurora.json │ ├── dalle-3.json │ ├── hunyuan.json │ ├── seedance-1.json │ ├── gaugan-2.json │ ├── gen-3-alpha.json │ ├── boximator.json │ ├── gaugan.json │ ├── instructpix2pix.json │ ├── midjourney-v5.json │ ├── lumina-image-2.json │ ├── biggan.json │ ├── ltx-video.json │ ├── marey.json │ ├── big-sleep.json │ ├── cogview2.json │ ├── artbreeder-ganbreeder.json │ ├── pyramid-flow.json │ ├── aleph2image.json │ ├── imagen-3.json │ ├── oasis.json │ ├── jax-guided-diffusion.json │ ├── pd12m.json │ ├── sdxl.json │ ├── snap-video.json │ ├── controlnet.json │ ├── niji-journey.json │ ├── dalle-mini-craiyon.json │ ├── deepdaze.json │ ├── veo-2.json │ ├── vqganclip.json │ ├── wan.json │ ├── cogvideox.json │ ├── lenet-5.json │ ├── pika-labs.json │ ├── zeroscope.json │ ├── cogvideo.json │ ├── stable-diffusion-2.json │ ├── recraft-v3.json │ ├── sora-release.json │ ├── stable-diffusion-3.json │ ├── potat1.json │ ├── wurstchen.json │ ├── clip-guided-diffusion.json │ ├── imagen-2.json │ ├── meta-movie-gen.json │ ├── latent-diffusion.json │ ├── veo-3.json │ ├── laion-5b.json │ ├── tooncrafter.json │ ├── stable-diffusion.json │ ├── alexnet.json │ ├── videojam.json │ ├── latent-consistency-model.json │ ├── stylegan.json │ ├── styletransfer.json │ ├── centipede-diffusion.json │ ├── 4o-image-generation.json │ ├── adobe-firefly.json │ ├── flux1.json │ ├── modelscope-text2video-synthesis.json │ ├── firefly-video-model.json │ ├── ernie-vilg-20.json │ ├── pytti-5.json │ ├── disco-diffusion.json │ └── skyreels-v1.json ├── nuxt.config.ts ├── README.md └── package.json /components/Footer.vue: -------------------------------------------------------------------------------- 1 | 4 | -------------------------------------------------------------------------------- /server/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../.nuxt/tsconfig.server.json" 3 | } 4 | -------------------------------------------------------------------------------- /public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mrrhq/ai-timeline/HEAD/public/favicon.ico -------------------------------------------------------------------------------- /public/t2i/cover.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mrrhq/ai-timeline/HEAD/public/t2i/cover.jpg -------------------------------------------------------------------------------- /uno.config.ts: -------------------------------------------------------------------------------- 1 | import config from "@mrrhq/shadcn-pro/uno.config"; 2 | 3 | export default config 4 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | // https://nuxt.com/docs/guide/concepts/typescript 3 | "extends": "./.nuxt/tsconfig.json" 4 | } 5 | -------------------------------------------------------------------------------- /app/app.config.ts: -------------------------------------------------------------------------------- 1 | export default defineAppConfig({ 2 | shadcn: { 3 | color: "zinc", 4 | radius: 0.5, 5 | }, 6 | }); 7 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "json.schemas": [ 3 | { 4 | "fileMatch": ["/content/t2i/**/*.json"], 5 | "url": "./.vscode/timelineSlideSchema.json" 6 | } 7 | ] 8 | } 9 | -------------------------------------------------------------------------------- /components/Header.vue: -------------------------------------------------------------------------------- 1 | 9 | -------------------------------------------------------------------------------- /pages/t2i.vue: -------------------------------------------------------------------------------- 1 | 6 | 7 | 12 | -------------------------------------------------------------------------------- /layouts/default.vue: -------------------------------------------------------------------------------- 1 | 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Nuxt dev/build outputs 2 | .output 3 | .data 4 | .nuxt 5 | .nitro 6 | .cache 7 | dist 8 | 9 | # Node dependencies 10 | node_modules 11 | 12 | # Logs 13 | logs 14 | *.log 15 | 16 | # Misc 17 | .DS_Store 18 | .fleet 19 | .idea 20 | 21 | # Local env files 22 | .env 23 | .env.* 24 | !.env.example 25 | -------------------------------------------------------------------------------- /content/t2i/index.json: -------------------------------------------------------------------------------- 1 | { 2 | "text": { 3 | "headline": "AI 生图模型发展史", 4 | "text": "一个记录了生成式 AI 中的文生图、文生视频模型等不同技术重要时间点的发展史。\n Fork 自 Fabian Mosele" 5 | }, 6 | "media": { 7 | "url": "/t2i/cover.jpg" 8 | }, 9 | "background": { 10 | "color": "#39167a" 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /nuxt.config.ts: -------------------------------------------------------------------------------- 1 | // https://nuxt.com/docs/api/configuration/nuxt-config 2 | export default defineNuxtConfig({ 3 | compatibilityDate: "2024-04-03", 4 | extends: ["@mrrhq/shadcn-pro"], 5 | modules: ["@nuxt/content"], 6 | devtools: { enabled: true }, 7 | app: { 8 | head: { 9 | title: "AI 技术发展史 | 探索 AI 技术发展历程", 10 | meta: [ 11 | { 12 | name: "description", 13 | content: 14 | "记录文生图、文生视频、大语言模型等技术在发展过程中的重要时间点", 15 | }, 16 | ], 17 | }, 18 | }, 19 | }); 20 | -------------------------------------------------------------------------------- /content/t2i/events/vidu.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 7, 5 | "day": 31 6 | }, 7 | "text": { 8 | "headline": "Vidu", 9 | "text": "生数科技携手清华大学研发出的视频生成模型,早在4月28号时就公布了演示,号称国内第一个类 Sora 模型,上线后免费用户支持生成4秒视频" 10 | }, 11 | "media": { 12 | "url": "https://www.vidu.studio/vidu.mp4", 13 | "credit": "媒体来源", 14 | "caption": "网站", 15 | "thumbnail": "https://www.vidu.studio/vidu.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#416bfb" 20 | } 21 | } -------------------------------------------------------------------------------- /components/t2i/T2iTimeline.vue: -------------------------------------------------------------------------------- 1 | 12 | 13 | 20 | -------------------------------------------------------------------------------- /content/t2i/events/nuwa.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2021, 4 | "month": 11, 5 | "day": 24 6 | }, 7 | "text": { 8 | "headline": "NÜWA", 9 | "text": "微软的多模态文生图和文生视频模型。" 10 | }, 11 | "media": { 12 | "url": "https://github.com/microsoft/NUWA/raw/main/assets/NUWA.gif", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 ", 15 | "thumbnail": "https://github.com/microsoft/NUWA/raw/main/assets/NUWA.gif" 16 | }, 17 | "group": "Other", 18 | "background": { 19 | "color": "#f6c137" 20 | } 21 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 |
3 | AI Timeline Cover 4 |
5 |

6 |

AI Timeline

7 | 8 |

9 | 记录文生图、文生视频、大语言模型等 AI 相关技术在发展过程中的重要时间点 10 |
11 |

12 | 13 | Website 14 | 15 | 16 | 三花 AI 17 | 18 |

19 | -------------------------------------------------------------------------------- /content/t2i/events/dreambooth.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2022, 4 | "month": 8, 5 | "day": 25 6 | }, 7 | "text": { 8 | "headline": "DreamBooth", 9 | "text": "谷歌通过微调文生图模型输出特定连贯对象。" 10 | }, 11 | "media": { 12 | "url": "https://dreambooth.github.io/DreamBooth_files/teaser_static.jpg", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 ", 15 | "thumbnail": "https://dreambooth.github.io/DreamBooth_files/teaser_static.jpg" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#4388f0" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/make-a-video.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2022, 4 | "month": 9, 5 | "day": 29 6 | }, 7 | "text": { 8 | "headline": "Make-A-Video", 9 | "text": "Meta的文生视频模型。" 10 | }, 11 | "media": { 12 | "url": "https://makeavideo.studio/assets/overview.webp", 13 | "credit": " 媒体来源", 14 | "caption": " 网站 , 论文", 15 | "thumbnail": "https://makeavideo.studio/assets/overview.webp" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#285abd" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/dalle-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2022, 4 | "month": 4, 5 | "day": 7 6 | }, 7 | "text": { 8 | "headline": "DALL·E 2", 9 | "text": "OpenAI最大规模的文生图模型发布。可以通过付费积分系统生成。" 10 | }, 11 | "media": { 12 | "url": "https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg", 13 | "credit": " 媒体来源", 14 | "caption": " 网站 ", 15 | "thumbnail": "https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#4c46f0" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/cyclegan.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2017, 4 | "month": 3, 5 | "day": 30 6 | }, 7 | "text": { 8 | "headline": "CycleGAN", 9 | "text": "一种能够将图像内容更改为另一个类别的GAN类型。" 10 | }, 11 | "media": { 12 | "url": "https://miro.medium.com/max/896/1*dWd0lVTbnu80UZM641gCbw.gif", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 ", 15 | "thumbnail": "https://miro.medium.com/max/896/1*dWd0lVTbnu80UZM641gCbw.gif" 16 | }, 17 | "group": "GAN", 18 | "background": { 19 | "color": "#92ac67" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/microsoft-coco.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2014, 4 | "month": 5, 5 | "day": 1 6 | }, 7 | "text": { 8 | "headline": "Microsoft COCO", 9 | "text": "大规模目标检测、分割和描述数据集,包含超过20万张标注图像。" 10 | }, 11 | "media": { 12 | "url": "https://cocodataset.org/images/coco-examples.jpg", 13 | "credit": "媒体来源", 14 | "caption": "论文, 网站", 15 | "thumbnail": "https://cocodataset.org/images/coco-examples.jpg" 16 | }, 17 | "group": "Dataset", 18 | "background": { 19 | "color": "#28b572" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/muse.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2023, 4 | "month": 1, 5 | "day": 2 6 | }, 7 | "text": { 8 | "headline": "Muse", 9 | "text": "一个使用LLM的令牌潜在空间而不是扩散模型的文生图Transformer模型。" 10 | }, 11 | "media": { 12 | "url": "https://muse-model.github.io/images/logo/muse_cake_1.jpg", 13 | "credit": "媒体来源", 14 | "caption": "论文, 网站", 15 | "thumbnail": "https://muse-model.github.io/images/logo/muse_cake_1.jpg" 16 | }, 17 | "group": "Other", 18 | "background": { 19 | "color": "#4388f0" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/nuwainfinity.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2022, 4 | "month": 7, 5 | "day": 20 6 | }, 7 | "text": { 8 | "headline": "NUWA_Infinity", 9 | "text": "微软的自回归视觉合成预训练模型,用于文生图和文生视频。" 10 | }, 11 | "media": { 12 | "url": "https://www.msra.cn/wp-content/uploads/2022/07/nuwa-infinity-3.jpg", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 ", 15 | "thumbnail": "https://www.msra.cn/wp-content/uploads/2022/07/nuwa-infinity-3.jpg" 16 | }, 17 | "group": "Other", 18 | "background": { 19 | "color": "#f6c137" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/rudalle.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2021, 4 | "month": 11, 5 | "day": 17 6 | }, 7 | "text": { 8 | "headline": "RuDALLE", 9 | "text": "一个俄罗斯版的DALLE,在架构模型上有所不同。使用俄罗斯语言版本的CLIP,ruCLIP进行训练。" 10 | }, 11 | "media": { 12 | "url": "https://sberdevices.s3pd01.sbercloud.ru/rndml-nlp/dalle-landing/xl/first_fon.jpeg", 13 | "credit": " 媒体来源", 14 | "caption": " 网站 ", 15 | "thumbnail": "https://sberdevices.s3pd01.sbercloud.ru/rndml-nlp/dalle-landing/xl/first_fon.jpeg" 16 | }, 17 | "group": "GAN", 18 | "background": { 19 | "color": "#528cb0" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/riffusion.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2022, 4 | "month": 12, 5 | "day": 15 6 | }, 7 | "text": { 8 | "headline": "Riffusion", 9 | "text": "一个微调在可翻译为音频文件的频谱图像上的文生图Stable Diffusion模型。" 10 | }, 11 | "media": { 12 | "url": "https://www.riffusion.com/_next/image?url=%2Fabout%2Ffunky_sax.gif&w=640&q=75", 13 | "credit": " 媒体来源", 14 | "caption": "网站", 15 | "thumbnail": "https://www.riffusion.com/_next/image?url=%2Fabout%2Ffunky_sax.gif&w=640&q=75" 16 | }, 17 | "group": "Other", 18 | "background": { 19 | "color": "#112240" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/aligndraw.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2015, 4 | "month": 11, 5 | "day": 9 6 | }, 7 | "text": { 8 | "headline": "alignDRAW", 9 | "text": "最早的文生图模型之一,是DRAW网络(深度递归注意力写入器)的扩展。该模型在微软COCO数据集上训练。" 10 | }, 11 | "media": { 12 | "url": "https://nathanfradet.com/posts/text_to_image/alignDRAW_results.png", 13 | "credit": "媒体来源", 14 | "caption": "论文", 15 | "thumbnail": "https://nathanfradet.com/posts/text_to_image/alignDRAW_results.png" 16 | }, 17 | "group": "Other", 18 | "background": { 19 | "color": "#719dc9" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/pix2pix.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2016, 4 | "month": 11, 5 | "day": 21 6 | }, 7 | "text": { 8 | "headline": "Pix2Pix", 9 | "text": "一个条件对抗网络,可以从特定类别的标签图生成图像。" 10 | }, 11 | "media": { 12 | "url": "https://phillipi.github.io/pix2pix/images/edges2cats.jpg", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 , DEMO ", 15 | "thumbnail": "https://phillipi.github.io/pix2pix/images/edges2cats.jpg" 16 | }, 17 | "group": "GAN", 18 | "background": { 19 | "color": "#f72672" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/veo.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 5, 5 | "day": 14 6 | }, 7 | "text": { 8 | "headline": "Veo", 9 | "text": "谷歌的文生视频模型,能够从文本、图像和视频输入生成视频。目前仅通过加入候补名单可用。" 10 | }, 11 | "media": { 12 | "url": "https://deepmind.google/api/blob/website/media/veo_example_043_alpacas.mp4", 13 | "credit": "媒体来源", 14 | "caption": "网站", 15 | "thumbnail": "https://deepmind.google/api/blob/website/media/veo_example_043_alpacas.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#4388f0" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/attngan.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2017, 4 | "month": 11, 5 | "day": 28 6 | }, 7 | "text": { 8 | "headline": "AttnGAN", 9 | "text": "最早基于生成对抗网络(GAN)的文生图模型之一。" 10 | }, 11 | "media": { 12 | "url": "https://github.com/taoxugit/AttnGAN/raw/master/example_coco.png", 13 | "credit": "媒体来源", 14 | "caption": "论文, GITHUB", 15 | "thumbnail": "https://github.com/taoxugit/AttnGAN/raw/master/example_coco.png" 16 | }, 17 | "group": "GAN", 18 | "background": { 19 | "color": "#f59b2d" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/lumiere.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 1, 5 | "day": 23 6 | }, 7 | "text": { 8 | "headline": "Lumiere", 9 | "text": "谷歌的生成视频扩散模型。" 10 | }, 11 | "media": { 12 | "url": "https://lumiere-video.github.io/videos/stylization/wooden_woman.mp4", 13 | "credit": "媒体来源", 14 | "caption": "论文, 网站", 15 | "thumbnail": "https://lumiere-video.github.io/images/Girl_with_a_Pearl_Earring.jpg" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#4388f0" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/animatediff.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2023, 4 | "month": 7, 5 | "day": 10 6 | }, 7 | "text": { 8 | "headline": "AnimateDiff", 9 | "text": "通过Stable Diffusion模型生成视频的文生视频模型。" 10 | }, 11 | "media": { 12 | "url": "https://animatediff.github.io/animations/teaser/ani_01.mp4", 13 | "credit": "媒体来源", 14 | "caption": "论文, GITHUB", 15 | "thumbnail": "https://animatediff.github.io/animations/teaser/ani_01.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#fa98ae" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/clip.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2021, 4 | "month": 1, 5 | "day": 5 6 | }, 7 | "text": { 8 | "headline": "CLIP", 9 | "text": "对比语言-图像预训练是一个在图像和文本关系上训练的神经网络。这个模型对公众开放,开启了文生图模型的文艺复兴。" 10 | }, 11 | "media": { 12 | "url": "https://cdn.openai.com/research-covers/clip/2x-no-mark.jpg", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 , BLOG POST ", 15 | "thumbnail": "https://cdn.openai.com/research-covers/clip/2x-no-mark.jpg" 16 | }, 17 | "group": "Other", 18 | "background": { 19 | "color": "#4c46f0" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/nuwa-xl.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2023, 4 | "month": 3, 5 | "day": 22 6 | }, 7 | "text": { 8 | "headline": "NUWA-XL", 9 | "text": "一个多模态文生视频模型,通过不同的扩散模型架构生成长视频。" 10 | }, 11 | "media": { 12 | "url": "https://media.arxiv-vanity.com/render-output/7483960/x1.png", 13 | "credit": "媒体来源", 14 | "caption": "论文, 网站", 15 | "thumbnail": "https://media.arxiv-vanity.com/render-output/7483960/x1.png" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#f6c137" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/googlenet.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2014, 4 | "month": 9, 5 | "day": 17 6 | }, 7 | "text": { 8 | "headline": "GoogLeNet", 9 | "text": "谷歌在ILSVRC14上提出的卷积神经网络。" 10 | }, 11 | "media": { 12 | "url": "https://production-media.paperswithcode.com/methods/Screen_Shot_2020-06-22_at_3.28.59_PM.png", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 ", 15 | "thumbnail": "https://production-media.paperswithcode.com/methods/Screen_Shot_2020-06-22_at_3.28.59_PM.png" 16 | }, 17 | "group": "CNN Model", 18 | "background": { 19 | "color": "#4388f0" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/midjourney-v6-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 7, 5 | "day": 30 6 | }, 7 | "text": { 8 | "headline": "Midjourney v6.1", 9 | "text": "Midjourney 第 6 版的小升级,图像质量、处理速度和个性化体验的显著提升。" 10 | }, 11 | "media": { 12 | "url": "https://updates.midjourney.com/content/images/size/w960/2024/07/image-1.png", 13 | "credit": "媒体来源", 14 | "caption": "网站", 15 | "thumbnail": "https://updates.midjourney.com/content/images/size/w960/2024/07/image-1.png" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#091331" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/omnihuman-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2025, 4 | "month": 2, 5 | "day": 3 6 | }, 7 | "text": { 8 | "headline": "OmniHuman-1", 9 | "text": "字节跳动开发的专门制作逼真角色唇同步和动作的视频生成模型。允许图像和音频输入,该模型致力于生成自然的人类唇同步和相应的身体动作。" 10 | }, 11 | "media": { 12 | "url": "https://omnihuman-lab.github.io/video/main2.mp4", 13 | "credit": "媒体来源", 14 | "caption": "论文, GitHub", 15 | "thumbnail": "https://omnihuman-lab.github.io/video/main2.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#3158B2" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/phenaki.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2022, 4 | "month": 9, 5 | "day": 29 6 | }, 7 | "text": { 8 | "headline": "Phenaki", 9 | "text": "一个用于从文本生成视频的模型,提示可以随时间变化,视频可以长达数分钟。" 10 | }, 11 | "media": { 12 | "url": "https://pub-bede3007802c4858abc6f742f405d4ef.r2.dev/stories/teddy_bear_2.gif", 13 | "credit": " 媒体来源", 14 | "caption": " 网站 , 论文", 15 | "thumbnail": "https://pub-bede3007802c4858abc6f742f405d4ef.r2.dev/stories/teddy_bear_2.gif" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#7DC3A4" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/stylegan2.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2019, 4 | "month": 12, 5 | "day": 3 6 | }, 7 | "text": { 8 | "headline": "StyleGAN2", 9 | "text": "NVIDIA更新版StyleGAN。可以在任何数据集上训练,但最知名的是https://thispersondoesnotexist.com/。" 10 | }, 11 | "media": { 12 | "url": "https://pythonawesome.com/content/images/2019/12/StyleGAN2.jpg", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 ", 15 | "thumbnail": "https://pythonawesome.com/content/images/2019/12/StyleGAN2.jpg" 16 | }, 17 | "group": "GAN", 18 | "background": { 19 | "color": "#76b52c" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/general-adversarial-network-gan.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2014, 4 | "month": 6, 5 | "day": 10 6 | }, 7 | "text": { 8 | "headline": "General Adversarial Network (GAN)", 9 | "text": "一个用于在两个对抗神经网络之间生成图像的机器学习框架。" 10 | }, 11 | "media": { 12 | "url": "https://miro.medium.com/max/1316/1*AyzNHv-ftBzHQ2uwmwG08A.png", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 ", 15 | "thumbnail": "https://miro.medium.com/max/1316/1*AyzNHv-ftBzHQ2uwmwG08A.png" 16 | }, 17 | "group": "GAN", 18 | "background": { 19 | "color": "#ceb720" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/imagen-video.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2022, 4 | "month": 10, 5 | "day": 5 6 | }, 7 | "text": { 8 | "headline": "Imagen Video", 9 | "text": "谷歌的文生视频模型,是他们的T2I模型Imagen的继任者。" 10 | }, 11 | "media": { 12 | "url": "https://imagen.research.google/video/videos/fairytale-2.mp4", 13 | "credit": " 媒体来源", 14 | "caption": " 网站 , 论文", 15 | "thumbnail": "https://imagen.research.google/video/videos/fairytale-2.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#4388f0" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/cogview.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2021, 4 | "month": 5, 5 | "day": 26 6 | }, 7 | "text": { 8 | "headline": "CogView", 9 | "text": "类似DALL·E的简体中文文生图模型。" 10 | }, 11 | "media": { 12 | "url": "https://p6-tt.byteimg.com/origin/pgc-image/375fc86a1a9945bab6b7d1f843e5cc74?from=pc", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 , DEMO ", 15 | "thumbnail": "https://p6-tt.byteimg.com/origin/pgc-image/375fc86a1a9945bab6b7d1f843e5cc74?from=pc" 16 | }, 17 | "group": "VQ-VAE", 18 | "background": { 19 | "color": "#12112a" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/dalle.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2021, 4 | "month": 1, 5 | "day": 5 6 | }, 7 | "text": { 8 | "headline": "DALL·E", 9 | "text": "OpenAI的首个文生图模型,是其CLIP模型的首次实现。由于代码未发布,这为各种试图模仿它的开源模型铺平了道路。" 10 | }, 11 | "media": { 12 | "url": "https://daleonai.com/images/screen-shot-2021-01-06-at-1.37.37-pm.png", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 , BLOG POST ", 15 | "thumbnail": "https://daleonai.com/images/screen-shot-2021-01-06-at-1.37.37-pm.png" 16 | }, 17 | "group": "VQ-VAE", 18 | "background": { 19 | "color": "#4c46f0" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/wan2.2.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2025, 4 | "month": 7, 5 | "day": 28 6 | }, 7 | "text": { 8 | "headline": "Wan2.2", 9 | "text": "Wan2.2,阿里巴巴发布的开源视频生成模型,是 Wan 基础视频模型的重大升级。" 10 | }, 11 | "media": { 12 | "url": "https://cdn.wanxai.com/static/demo-wan22/top.mp4", 13 | "credit": "媒体来源", 14 | "caption": "GitHub, HuggingFace, 网站", 15 | "thumbnail": "https://cdn.wanxai.com/static/demo-wan22/top.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#5e4bf6" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/gen-4.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2025, 4 | "month": 3, 5 | "day": 31 6 | }, 7 | "text": { 8 | "headline": "Gen-4", 9 | "text": "Runway视频模型的第四代。相比前代产品,具有更强的提示词遵循能力和运动灵活性。与Runway的图像生成器Frames结合使用,现在可以使用参考图像来组合和生成新视频。" 10 | }, 11 | "media": { 12 | "url": "https://d3phaj0sisr2ct.cloudfront.net/site/content/videos/gen4/Footer.webm", 13 | "credit": "媒体来源", 14 | "caption": "网站", 15 | "thumbnail": "https://d3phaj0sisr2ct.cloudfront.net/site/content/videos/gen4/Footer.webm" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#4640f5" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/gen-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2023, 4 | "month": 3, 5 | "day": 20 6 | }, 7 | "text": { 8 | "headline": "Gen-2", 9 | "text": "RunwayML的文生视频工具。基于与一个月前发布的video2video工具Gen-1相同的论文。" 10 | }, 11 | "media": { 12 | "url": "https://d3phaj0sisr2ct.cloudfront.net/research/gen1/videos/eugene/eugene.webm", 13 | "credit": "媒体来源", 14 | "caption": "论文, 网站", 15 | "thumbnail": "https://d3phaj0sisr2ct.cloudfront.net/research/gen1/videos/eugene/eugene.webm" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#4640f5" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/imagen.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2022, 4 | "month": 5, 5 | "day": 23 6 | }, 7 | "text": { 8 | "headline": "Imagen", 9 | "text": "谷歌的DALL·E竞争对手,虽然尚未向公众开放。" 10 | }, 11 | "media": { 12 | "url": "https://imagen.research.google/main_gallery_images/sprouts-in-the-shape-of-text-imagen.jpg", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 , 网站 ", 15 | "thumbnail": "https://imagen.research.google/main_gallery_images/sprouts-in-the-shape-of-text-imagen.jpg" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#4388f0" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/laion-400m.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2021, 4 | "month": 8, 5 | "day": 8 6 | }, 7 | "text": { 8 | "headline": "LAION-400M", 9 | "text": "一个开放的数据集,包含2014年至2021年间随机网页的文本图像对,通过OpenAI的CLIP过滤。" 10 | }, 11 | "media": { 12 | "url": "https://assets-global.website-files.com/5d7b77b063a9066d83e1209c/61e9cecf1ccbd5180cb79e69_7CFUx6h.png", 13 | "credit": " 媒体来源", 14 | "caption": " 网站 ", 15 | "thumbnail": "https://assets-global.website-files.com/5d7b77b063a9066d83e1209c/61e9cecf1ccbd5180cb79e69_7CFUx6h.png" 16 | }, 17 | "group": "Dataset", 18 | "background": { 19 | "color": "#ca0927" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/stackgan.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2016, 4 | "month": 12, 5 | "day": 10 6 | }, 7 | "text": { 8 | "headline": "StackGAN", 9 | "text": "最早基于生成对抗网络(GAN)的文生图模型之一,通过将工作负载分成两个独立的阶段生成256x256图像。" 10 | }, 11 | "media": { 12 | "url": "https://github.com/hanzhanggit/StackGAN/raw/master/examples/bird1.jpg", 13 | "credit": "媒体来源", 14 | "caption": " 论文, GITHUB", 15 | "thumbnail": "https://github.com/hanzhanggit/StackGAN/raw/master/examples/bird1.jpg" 16 | }, 17 | "group": "GAN", 18 | "background": { 19 | "color": "#f59b2d" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/sora.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 2, 5 | "day": 15 6 | }, 7 | "text": { 8 | "headline": "Sora", 9 | "text": "OpenAI开发的生成视频扩散模型,能够生成一分钟的生成视频,在现实主义和一致性方面超越了所有前代模型。目前仅对少数人开放。" 10 | }, 11 | "media": { 12 | "url": "https://cdn.openai.com/tmp/s/title_0.mp4", 13 | "credit": "媒体来源", 14 | "caption": "网站, RESEARCH", 15 | "thumbnail": "https://cdn.openai.com/tmp/s/title_0.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#4c46f0" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/midjourney-v6.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2023, 4 | "month": 12, 5 | "day": 21 6 | }, 7 | "text": { 8 | "headline": "Midjourney v6", 9 | "text": "Midjourney发布的第六版。这个版本更善于处理详细的提示。" 10 | }, 11 | "media": { 12 | "url": "https://cdn.mos.cms.futurecdn.net/NzUyRsyPDPFPwchrdUwbv8-1200-80.png.webp", 13 | "credit": "媒体来源", 14 | "caption": "网站", 15 | "thumbnail": "https://cdn.mos.cms.futurecdn.net/NzUyRsyPDPFPwchrdUwbv8-1200-80.png.webp" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#091331" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/midjourney.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2022, 4 | "month": 3, 5 | "day": 13 6 | }, 7 | "text": { 8 | "headline": "Midjourney", 9 | "text": "先前为封闭测试版,Midjourney现为开放测试版文生图模型,通过其Discord服务器的订阅模式工作。" 10 | }, 11 | "media": { 12 | "url": "https://user-images.githubusercontent.com/105028755/167756032-0059cb74-d437-4747-8778-902c03403be6.gif", 13 | "credit": " 媒体来源", 14 | "caption": " 网站 ", 15 | "thumbnail": "https://user-images.githubusercontent.com/105028755/167756032-0059cb74-d437-4747-8778-902c03403be6.gif" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#091331" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/photon.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 11, 5 | "day": 25 6 | }, 7 | "text": { 8 | "headline": "Photon", 9 | "text": "Luma开发的图像生成模型。具有非常精确的提示词遵循能力,可以使用图像作为提示并支持角色参考。" 10 | }, 11 | "media": { 12 | "url": "https://framerusercontent.com/images/fZ0hgovuW1uzmx6Kt9KCRnvzmw.jpg?scale-down-to=1024", 13 | "credit": "媒体来源", 14 | "caption": "网站, 演示", 15 | "thumbnail": "https://framerusercontent.com/images/fZ0hgovuW1uzmx6Kt9KCRnvzmw.jpg?scale-down-to=1024" 16 | }, 17 | "group": “Diffusion Model", 18 | "background": { 19 | "color": "#0f2cf5" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/gen-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2023, 4 | "month": 2, 5 | "day": 6 6 | }, 7 | "text": { 8 | "headline": "Gen-1", 9 | "text": "RunwayML的video2video工具,通过文本或图像提示用生成视觉效果编辑视频。其公开发布在2023年3月27日。3月20日宣布了Gen-2,一个基于同一论文的文生视频工具。" 10 | }, 11 | "media": { 12 | "url": "https://research.runwayml.com/videos/gen1/carrousel/astronaut.webm", 13 | "credit": " 媒体来源", 14 | "caption": "论文, 网站", 15 | "thumbnail": "https://research.runwayml.com/videos/gen1/carrousel/astronaut.webm" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#4640f5" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/magicanimate.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2023, 4 | "month": 11, 5 | "day": 27 6 | }, 7 | "text": { 8 | "headline": "MagicAnimate", 9 | "text": "一个视频生成模型,将图像的主体转移到视频的人物主体的动作上。" 10 | }, 11 | "media": { 12 | "url": "https://github.com/magic-research/magic-animate/raw/main/assets/teaser/t4.gif", 13 | "credit": "媒体来源", 14 | "caption": "论文, GITHUB", 15 | "thumbnail": "https://github.com/magic-research/magic-animate/raw/main/assets/teaser/t4.gif" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#3a064e" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/deepdream.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2015, 4 | "month": 7, 5 | "day": 1 6 | }, 7 | "text": { 8 | "headline": "DeepDream", 9 | "text": "谷歌的计算机程序以其迷幻的视觉效果为特征。是可视化神经网络如何识别和生成图像模式的首个用例之一。" 10 | }, 11 | "media": { 12 | "url": "https://1.bp.blogspot.com/-XZ0i0zXOhQk/VYIXdyIL9kI/AAAAAAAAAmQ/UbA6j41w28o/s1600/building-dreams.png", 13 | "credit": " 媒体来源", 14 | "caption": " GITHUB ", 15 | "thumbnail": "https://1.bp.blogspot.com/-XZ0i0zXOhQk/VYIXdyIL9kI/AAAAAAAAAmQ/UbA6j41w28o/s1600/building-dreams.png" 16 | }, 17 | "group": "CNN Model", 18 | "background": { 19 | "color": "#4388f0" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/show-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2023, 4 | "month": 9, 5 | "day": 27 6 | }, 7 | "text": { 8 | "headline": "Show-1", 9 | "text": "Showlab 在新加坡国立大学开发的文生视频模型,具有更高效的GPU使用率。" 10 | }, 11 | "media": { 12 | "url": "https://showlab.github.io/Show-1/assets/videos/comparison/show1/05.mp4", 13 | "credit": "媒体来源", 14 | "caption": "网站, 论文, GITHUB", 15 | "thumbnail": "https://showlab.github.io/Show-1/assets/videos/comparison/show1/05.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#7ea75c" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/imagenet.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2009, 4 | "month": 6, 5 | "day": 20 6 | }, 7 | "text": { 8 | "headline": "ImageNet", 9 | "text": "包含超过1400万张图像的数据集,所有图像都有人工标注的内容描述。当时最大的图像数据集,推动了计算机视觉研究的发展。" 10 | }, 11 | "media": { 12 | "url": "https://devopedia.org/images/article/172/7316.1561043304.png", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 ", 15 | "thumbnail": "https://devopedia.org/images/article/172/7316.1561043304.png" 16 | }, 17 | "group": "Dataset", 18 | "background": { 19 | "color": "#8eb1d5" 20 | }, 21 | "end_date": { 22 | "year": 2009, 23 | "month": 6, 24 | "day": 25 25 | } 26 | } -------------------------------------------------------------------------------- /content/t2i/events/dream-machine.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 6, 5 | "day": 13 6 | }, 7 | "text": { 8 | "headline": "Dream Machine", 9 | "text": "Luma Labs开发的文生视频模型,通过文本或图像提示生成视频。通过其网站向公众开放。" 10 | }, 11 | "media": { 12 | "url": "https://cdn-luma.com/public/lumalabs.ai/june-12-launch-dream-machine-high-res/assets/dream-machine/featured/videos/32.webm", 13 | "credit": "媒体来源", 14 | "caption": "网站", 15 | "thumbnail": "https://cdn-luma.com/public/lumalabs.ai/june-12-launch-dream-machine-high-res/assets/dream-machine/featured/videos/32.webm" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#0f2cf5" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/hailuo-ai.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 9, 5 | "day": 1 6 | }, 7 | "text": { 8 | "headline": "Hailuo AI", 9 | "text": "由创业公司MiniMax开发的文本到视频模型,可在其网站上使用。相比之前的视频生成模型有明显升级,在灵活性和提示词遵循方面表现出色。" 10 | }, 11 | "media": { 12 | "url": "https://cdn.mos.cms.futurecdn.net/vY7UvKfqeJA9h4zDA5q8Ni-1200-80.gif", 13 | "credit": "媒体来源", 14 | "caption": "网站", 15 | "thumbnail": "https://cdn.mos.cms.futurecdn.net/vY7UvKfqeJA9h4zDA5q8Ni-1200-80.gif" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#051418" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/aphantasia.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2021, 4 | "month": 2, 5 | "day": 18 6 | }, 7 | "text": { 8 | "headline": "Aphantasia", 9 | "text": "Vadim Epstein(@eps969)的Colab笔记本,将CLIP连接到Lucent库。" 10 | }, 11 | "media": { 12 | "url": "https://github.com/eps696/aphantasia/raw/master/_out/Aphantasia4.jpg", 13 | "credit": " 媒体来源", 14 | "caption": " COLAB , GITHUB ", 15 | "thumbnail": "https://github.com/eps696/aphantasia/raw/master/_out/Aphantasia4.jpg" 16 | }, 17 | "group": "Other", 18 | "background": { 19 | "color": "#e7c749" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/glide.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2021, 4 | "month": 12, 5 | "day": 22 6 | }, 7 | "text": { 8 | "headline": "GLIDE", 9 | "text": "由OpenAI开发的扩散模型。它将成为DALLE 2架构的基础之一。" 10 | }, 11 | "media": { 12 | "url": "https://miro.medium.com/max/1400/0*Z4ZlPUiXIKZh5PxH.png", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 , GITHUB ", 15 | "thumbnail": "https://miro.medium.com/max/1400/0*Z4ZlPUiXIKZh5PxH.png" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#4c46f0" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/parti.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2022, 4 | "month": 6, 5 | "day": 22 6 | }, 7 | "text": { 8 | "headline": "Parti", 9 | "text": "谷歌的文生图模型,是OpenAI的DALL·E的竞争对手。" 10 | }, 11 | "media": { 12 | "url": "https://sites.research.google/parti/paper_images_green_watermark_outputs/figures/scaling_comparison/kangaroo_3.jpg", 13 | "credit": " 媒体来源", 14 | "caption": " GITHUB , 网站 ", 15 | "thumbnail": "https://sites.research.google/parti/paper_images_green_watermark_outputs/figures/scaling_comparison/kangaroo_3.jpg" 16 | }, 17 | "group": "GAN", 18 | "background": { 19 | "color": "#4388f0" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/kling.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 6, 5 | "day": 6 6 | }, 7 | "text": { 8 | "headline": "KLING", 9 | "text": "快手开发的文生视频模型,第一个严肃的Sora竞争对手,能够生成长达2分钟的视频。此外可以通过OpenPose骨架输入提示(主要用于舞蹈)。在其应用内加入候补名单的用户可用。" 10 | }, 11 | "media": { 12 | "url": "https://the-decoder.com/wp-content/uploads/2024/06/chinese_boy_eating_burger_prompt.mp4", 13 | "credit": "媒体来源", 14 | "caption": "网站", 15 | "thumbnail": "https://the-decoder.com/wp-content/uploads/2024/06/chinese_boy_eating_burger_prompt.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#fc4b08" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/make-a-scene.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2022, 4 | "month": 7, 5 | "day": 14 6 | }, 7 | "text": { 8 | "headline": "Make-A-Scene", 9 | "text": "更强大的GauGAN版本。Meta的带有标签图的文生图模型。" 10 | }, 11 | "media": { 12 | "url": "https://media.cybernews.com/images/featured-big/2022/07/290601794_1176479646540355_5132798628346215961_n.jpeg", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 ", 15 | "thumbnail": "https://media.cybernews.com/images/featured-big/2022/07/290601794_1176479646540355_5132798628346215961_n.jpeg" 16 | }, 17 | "group": "VQ-VAE", 18 | "background": { 19 | "color": "#285abd" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/aurora.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 12, 5 | "day": 9 6 | }, 7 | "text": { 8 | "headline": "Aurora", 9 | "text": "xAI开发的图像生成模型,在Grok 2上可用,拥有X(原Twitter)账号的所有人都可使用。在生成名人准确图像方面表现出色,且无限制。" 10 | }, 11 | "media": { 12 | "url": "https://x.ai/_next/image?url=%2Fimages%2Flegacy%2Faurora%2Fcomparisons%2Fcybertruck%2Faurora.webp&w=1920&q=75", 13 | "credit": "媒体来源", 14 | "caption": "博客文章", 15 | "thumbnail": "https://x.ai/_next/image?url=%2Fimages%2Flegacy%2Faurora%2Fcomparisons%2Fcybertruck%2Faurora.webp&w=1920&q=75" 16 | }, 17 | "group": “Diffusion Model", 18 | "background": { 19 | "color": "#000000" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/dalle-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2023, 4 | "month": 9, 5 | "day": 20 6 | }, 7 | "text": { 8 | "headline": "DALL·E 3", 9 | "text": "OpenAI开发的第三代DALLE。由于改进了数据集图像的描述,这个模型对文本有更细致的理解,并且能够更好地遵循提示中的描述。" 10 | }, 11 | "media": { 12 | "url": "https://images.openai.com/blob/bc8a8a6b-36f2-4774-bc95-c563cb32dcdd/banana.png?trim=0,0,0,0&width=1400", 13 | "credit": "媒体来源", 14 | "caption": "论文, 网站", 15 | "thumbnail": "https://images.openai.com/blob/bc8a8a6b-36f2-4774-bc95-c563cb32dcdd/banana.png?trim=0,0,0,0&width=1400" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#4c46f0" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/hunyuan.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 12, 5 | "day": 3 6 | }, 7 | "text": { 8 | "headline": "Hunyuan", 9 | "text": "腾讯发布的开源视频生成模型,是该公司首个此类模型。生成过程较长,但迄今为止拥有最佳的开源视频生成效果。" 10 | }, 11 | "media": { 12 | "url": "https://aivideo.hunyuan.tencent.com/movie/part-5/part-5-4.mp4", 13 | "credit": "媒体来源", 14 | "caption": "网站, HuggingFace, GitHub", 15 | "thumbnail": "https://aivideo.hunyuan.tencent.com/movie/part-5/part-5-4.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#7f59e6" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/seedance-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2025, 4 | "month": 6, 5 | "day": 12 6 | }, 7 | "text": { 8 | "headline": "Seedance 1.0", 9 | "text": "字节跳动开发的视频生成模型,据说具有与Veo 3相同的功能,但生成成本更低。一个显著特点是可以轻松创建多镜头生成。" 10 | }, 11 | "media": { 12 | "url": "https://d2g64w682n9w0w.cloudfront.net/media/videos/1750084389819163030_cPqmieb6.mp4", 13 | "credit": "媒体来源", 14 | "caption": "论文, 网站", 15 | "thumbnail": "https://d2g64w682n9w0w.cloudfront.net/media/videos/1750084389819163030_cPqmieb6.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#3158B2" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/gaugan-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2021, 4 | "month": 11, 5 | "day": 22 6 | }, 7 | "text": { 8 | "headline": "GauGAN 2", 9 | "text": "GauGAN的续集,现称为NVIDIA Canvas。可以从更细粒度的标签图生成景观。" 10 | }, 11 | "media": { 12 | "url": "https://blogs.nvidia.com/wp-content/uploads/2021/06/Canvas-UI-1536x834.png.webp", 13 | "credit": " 媒体来源", 14 | "caption": " BLOG POST , APP ", 15 | "thumbnail": "https://blogs.nvidia.com/wp-content/uploads/2021/06/Canvas-UI-1536x834.png.webp" 16 | }, 17 | "group": "GAN", 18 | "background": { 19 | "color": "#76b52c" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/gen-3-alpha.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 6, 5 | "day": 17 6 | }, 7 | "text": { 8 | "headline": "Gen-3 Alpha", 9 | "text": "Runway开发的生成视频模型,继Gen-1和Gen-2之后。其两个前辈的改进版本,Gen-3 Alpha承诺可以自定义模型以进行风格控制。仅对其网站上的付费用户开放。" 10 | }, 11 | "media": { 12 | "url": "https://d3phaj0sisr2ct.cloudfront.net/site/videos/gen-3-alpha/carousel-01/gen-3-alpha-output-001.mp4", 13 | "credit": "媒体来源", 14 | "caption": "网站", 15 | "thumbnail": "https://d3phaj0sisr2ct.cloudfront.net/site/videos/gen-3-alpha/carousel-01/gen-3-alpha-output-001.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#4640f5" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/boximator.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 2, 5 | "day": 13 6 | }, 7 | "text": { 8 | "headline": "Boximator", 9 | "text": "ByteDance开发的用于视频扩散模型的运动控制插件。通过框定不同元素的运动的边界框,可以详细控制生成视频的运动。" 10 | }, 11 | "media": { 12 | "url": "https://boximator.github.io/assets/C2V/9-A%20girl%20in%20red%20is%20covering%20her%20face%20with%20a%20skull..mp4", 13 | "credit": "媒体来源", 14 | "caption": "论文, GITHUB", 15 | "thumbnail": "https://boximator.github.io/assets/C2V/9-A%20girl%20in%20red%20is%20covering%20her%20face%20with%20a%20skull..mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#da3d27" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/gaugan.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2019, 4 | "month": 3, 5 | "day": 18 6 | }, 7 | "text": { 8 | "headline": "GauGAN", 9 | "text": "NVIDIA通过标签图生成真实景观的GAN。2021年发布的GauGAN2现由NVIDIA Canvas提供支持。" 10 | }, 11 | "media": { 12 | "url": "https://blogs.nvidia.com/wp-content/uploads/2019/03/guagan-demo-2x-speedup_1_1.mp4", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 , APP ", 15 | "thumbnail": "https://blogs.nvidia.com/wp-content/uploads/2019/03/guagan-demo-2x-speedup_1_1.mp4" 16 | }, 17 | "group": "GAN", 18 | "background": { 19 | "color": "#76b52c" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/instructpix2pix.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2022, 4 | "month": 11, 5 | "day": 17 6 | }, 7 | "text": { 8 | "headline": "InstructPix2Pix", 9 | "text": "一个通过Stable Diffusion和GPT-3生成的数据训练的模型,可以根据人类指令编辑图像。" 10 | }, 11 | "media": { 12 | "url": "https://instruct-pix2pix.timothybrooks.com/teaser.jpg", 13 | "credit": " 媒体来源", 14 | "caption": "论文 , 网站 , DEMO ", 15 | "thumbnail": "https://instruct-pix2pix.timothybrooks.com/teaser.jpg" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#cc7120" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/midjourney-v5.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2023, 4 | "month": 3, 5 | "day": 30 6 | }, 7 | "text": { 8 | "headline": "Midjourney v5", 9 | "text": "Midjourney发布的第五版。" 10 | }, 11 | "media": { 12 | "url": "https://cdn.document360.io/3040c2b6-fead-4744-a3a9-d56d621c6c7e/Images/Documentation/MJ_V51_VibrantPoppies.jpg", 13 | "credit": "媒体来源", 14 | "caption": "网站, DOCUMENTATION", 15 | "thumbnail": "https://cdn.document360.io/3040c2b6-fead-4744-a3a9-d56d621c6c7e/Images/Documentation/MJ_V51_VibrantPoppies.jpg" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#091331" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/lumina-image-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2025, 4 | "month": 1, 5 | "day": 25 6 | }, 7 | "text": { 8 | "headline": "Lumina Image 2.0", 9 | "text": "精通英语和简体中文的多功能图像生成模型,能够在图像中生成连贯文本,并可与ControlNet模型配合使用。" 10 | }, 11 | "media": { 12 | "url": "https://stable-diffusion-art.com/wp-content/uploads/2025/02/image-39.png", 13 | "credit": "媒体来源", 14 | "caption": "GitHub, HuggingFace", 15 | "thumbnail": "https://stable-diffusion-art.com/wp-content/uploads/2025/02/image-39.png" 16 | }, 17 | "group": “Diffusion Model", 18 | "background": { 19 | "color": "#ff6208" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/biggan.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2018, 4 | "month": 9, 5 | "day": 28 6 | }, 7 | "text": { 8 | "headline": "BigGAN", 9 | "text": "一个能够生成多个类别图像的大规模GAN。" 10 | }, 11 | "media": { 12 | "url": "https://machinelearningmastery.com/wp-content/uploads/2019/06/Examples-of-Large-High-Quality-512x512-Class-Conditional-Images-Generated-by-BigGAN.png", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 ", 15 | "thumbnail": "https://machinelearningmastery.com/wp-content/uploads/2019/06/Examples-of-Large-High-Quality-512x512-Class-Conditional-Images-Generated-by-BigGAN.png" 16 | }, 17 | "group": "GAN", 18 | "background": { 19 | "color": "#4fb342" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/ltx-video.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 11, 5 | "day": 22 6 | }, 7 | "text": { 8 | "headline": "LTX-Video", 9 | "text": "开源视频生成模型,可在768x512分辨率下生成24 FPS视频,他们声称生成速度比观看视频的时间还要快。" 10 | }, 11 | "media": { 12 | "url": "https://huggingface.co/Lightricks/LTX-Video/resolve/main/media/trailer.gif", 13 | "credit": "媒体来源", 14 | "caption": "GitHub, HuggingFace, 网站", 15 | "thumbnail": "https://huggingface.co/Lightricks/LTX-Video/resolve/main/media/trailer.gif" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#9426e1" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/marey.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2025, 4 | "month": 7, 5 | "day": 8 6 | }, 7 | "text": { 8 | "headline": "Marey", 9 | "text": "由Moonvalley和Asteria Film开发的仅使用授权数据训练的生成视频模型。首批不使用未经同意数据收集进行训练的视频模型之一。作为封闭模型,它具有多种控制功能,如姿态控制、风格转换、图像参考和起始-结束帧。仅通过其付费计划提供。该模型之前在3月曾预告,7月正式发布。" 10 | }, 11 | "media": { 12 | "url": "https://framerusercontent.com/assets/F5sGTAVmqMYWMGJtkSBudaZyg.mp4", 13 | "credit": "媒体来源", 14 | "caption": "网站, 博客文章", 15 | "thumbnail": "https://framerusercontent.com/assets/boCxZxT4qkyvmIr0BJdam9HfOYQ.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#000000" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/big-sleep.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2021, 4 | "month": 1, 5 | "day": 18 6 | }, 7 | "text": { 8 | "headline": "Big Sleep", 9 | "text": "Ryan Murdock(@advadnoun)的Colab笔记本,将CLIP连接到BigGAN。第一个使用CLIP从文本生成图像的流行笔记本。" 10 | }, 11 | "media": { 12 | "url": "https://github.com/lucidrains/big-sleep/raw/main/samples/artificial_intelligence.png", 13 | "credit": " 媒体来源", 14 | "caption": " COLAB , GITHUB ", 15 | "thumbnail": "https://github.com/lucidrains/big-sleep/raw/main/samples/artificial_intelligence.png" 16 | }, 17 | "group": "GAN", 18 | "background": { 19 | "color": "#4fb342" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/cogview2.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2022, 4 | "month": 4, 5 | "day": 28 6 | }, 7 | "text": { 8 | "headline": "CogView2", 9 | "text": "CogView的继任者,这个文生图模型支持中文和英语。" 10 | }, 11 | "media": { 12 | "url": "https://replicate.com/api/models/thudm/cogview2/files/3281b937-45ae-4ceb-a66f-a2219f71bbea/output_4.png", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 , GITHUB , DEMO ", 15 | "thumbnail": "https://replicate.com/api/models/thudm/cogview2/files/3281b937-45ae-4ceb-a66f-a2219f71bbea/output_4.png" 16 | }, 17 | "group": "VQ-VAE", 18 | "background": { 19 | "color": "#12112a" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/artbreeder-ganbreeder.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2019, 4 | "month": 11, 5 | "day": 19 6 | }, 7 | "text": { 8 | "headline": "Artbreeder (GANBreeder)", 9 | "text": "一个工具,能够在现实肖像或动漫面孔等类别中通过图像和特征参数进行杂交。由StyleGAN和BigGAN提供支持。" 10 | }, 11 | "media": { 12 | "url": "https://upload.wikimedia.org/wikipedia/commons/0/0c/Artbreeder_example_portraits.jpg", 13 | "credit": " 媒体来源", 14 | "caption": " FIRST TWEET , 网站 ", 15 | "thumbnail": "https://upload.wikimedia.org/wikipedia/commons/0/0c/Artbreeder_example_portraits.jpg" 16 | }, 17 | "group": "GAN", 18 | "background": { 19 | "color": "#fa9dfd" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/pyramid-flow.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 10, 5 | "day": 10 6 | }, 7 | "text": { 8 | "headline": "Pyramid Flow", 9 | "text": "基于Flow Matching的开源自回归视频生成方法。仅在开源数据集上训练。" 10 | }, 11 | "media": { 12 | "url": "https://pyramid-flow.github.io/static/videos/t2v_10s/tokyo.mp4", 13 | "credit": "媒体来源", 14 | "caption": "网站, 论文, GitHub, 演示", 15 | "thumbnail": "https://pyramid-flow.github.io/static/videos/t2v_10s/tokyo.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#c66d36" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/aleph2image.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2021, 4 | "month": 2, 5 | "day": 27 6 | }, 7 | "text": { 8 | "headline": "Aleph2Image", 9 | "text": "Ryan Murdock(@advadnoun)的Colab笔记本,使用CLIP和DALLE的解码器生成图像。" 10 | }, 11 | "media": { 12 | "url": "https://live.staticflickr.com/65535/51180527560_9f582c9168_o.png", 13 | "credit": "媒体来源", 14 | "caption": "FIRST TWEET, COLAB", 15 | "thumbnail": "https://live.staticflickr.com/65535/51180527560_9f582c9168_o.png" 16 | }, 17 | "group": "Other", 18 | "background": { 19 | "color": "#5a2865" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/imagen-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 5, 5 | "day": 14 6 | }, 7 | "text": { 8 | "headline": "Imagen 3", 9 | "text": "谷歌的第三代文生图模型Imagen,可在其ImageFX网站上使用。" 10 | }, 11 | "media": { 12 | "url": "https://lh3.googleusercontent.com/UV358yng5Em7XB5nUOsIHFwK93YkdaXjxGf5c0T1H6zBAikVG2z6AubTjpyp0oLr41W6ge7bBsJlEfx7tGwhi21QNo1vlaKnWLdEPdIohJ5TNllDUg=h1200-rw", 13 | "credit": "媒体来源", 14 | "caption": "网站", 15 | "thumbnail": "https://lh3.googleusercontent.com/UV358yng5Em7XB5nUOsIHFwK93YkdaXjxGf5c0T1H6zBAikVG2z6AubTjpyp0oLr41W6ge7bBsJlEfx7tGwhi21QNo1vlaKnWLdEPdIohJ5TNllDUg=h1200-rw" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#4388f0" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/oasis.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 10, 5 | "day": 31 6 | }, 7 | "text": { 8 | "headline": "Oasis", 9 | "text": "基于实时用户输入生成下一帧的交互式视频生成模型。演示版本在Minecraft游戏视频和键盘输入上训练。首个此类开源交互式实时视频生成模型。" 10 | }, 11 | "media": { 12 | "url": "https://oasis-model.github.io/wide1.mp4", 13 | "credit": "媒体来源", 14 | "caption": "博客文章, GitHub, HuggingFace, 演示", 15 | "thumbnail": "https://oasis-model.github.io/wide1.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#1436f5" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/jax-guided-diffusion.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2021, 4 | "month": 11, 5 | "day": 9 6 | }, 7 | "text": { 8 | "headline": "JAX Guided Diffusion", 9 | "text": "由@RiversHaveWings和@jd_pressman创建的扩散模型。" 10 | }, 11 | "media": { 12 | "url": "https://weirdwonderfulai.art/wp-content/uploads/2022/05/20220429102028_3_0_250.jpg", 13 | "credit": " 媒体来源", 14 | "caption": " GITHUB , FIRST TWEET ", 15 | "thumbnail": "https://weirdwonderfulai.art/wp-content/uploads/2022/05/20220429102028_3_0_250.jpg" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#fefff2" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/pd12m.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 10, 5 | "day": 30 6 | }, 7 | "text": { 8 | "headline": "PD12M", 9 | "text": "包含1240万张公共领域和CC0许可图像及合成标题的数据集。迄今为止最大的公共领域图像-文本数据集,规模足以训练基础模型,同时最大化减少版权问题。由Spawning(\"Have I Been Trained\"背后的公司)创建。" 10 | }, 11 | "media": { 12 | "url": "https://a-us.storyblok.com/f/1012441/1600x800/d905fee389/public-diffusion-collage.jpg", 13 | "credit": "媒体来源", 14 | "caption": "网站, 论文, HuggingFace", 15 | "thumbnail": "https://a-us.storyblok.com/f/1012441/1600x800/d905fee389/public-diffusion-collage.jpg" 16 | }, 17 | "group": “Dataset", 18 | "background": { 19 | "color": "#DF7A31" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/sdxl.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2023, 4 | "month": 7, 5 | "day": 26 6 | }, 7 | "text": { 8 | "headline": "SDXL", 9 | "text": "Stability AI开发的更大规模的Stable Diffusion文生图模型,这次训练了1024像素的图像而不是512像素。" 10 | }, 11 | "media": { 12 | "url": "https://github.com/Stability-AI/generative-models/raw/main/assets/000.jpg", 13 | "credit": "媒体来源", 14 | "caption": "GITHUB, 论文, MODEL CARD", 15 | "thumbnail": "https://github.com/Stability-AI/generative-models/raw/main/assets/000.jpg" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#ca0927" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/snap-video.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 2, 5 | "day": 22 6 | }, 7 | "text": { 8 | "headline": "Snap Video", 9 | "text": "Snapchat开发的文生视频模型。公司在图像/视频生成领域的首次尝试。" 10 | }, 11 | "media": { 12 | "url": "https://storage.googleapis.com/snap-snapvideo-website-cdn/video_samples/our_samples_teaser/b822d1a7-d931-4d7f-99ce-8a8282a7a7e7.mp4", 13 | "credit": "媒体来源", 14 | "caption": "论文, 网站", 15 | "thumbnail": "https://storage.googleapis.com/snap-snapvideo-website-cdn/video_samples/our_samples_teaser/b822d1a7-d931-4d7f-99ce-8a8282a7a7e7.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#dfdb00" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/controlnet.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2023, 4 | "month": 2, 5 | "day": 10 6 | }, 7 | "text": { 8 | "headline": "ControlNet", 9 | "text": "一种用于通过不同技术控制扩散模型的神经网络结构。它允许通过img2img对图像结构进行更多控制。不同技术包括边缘检测、深度图、分割图和人体姿态。" 10 | }, 11 | "media": { 12 | "url": "https://github.com/lllyasviel/ControlNet/blob/main/github_page/p17.png?raw=true", 13 | "credit": "媒体来源", 14 | "caption": "论文, GITHUB, 演示", 15 | "thumbnail": "https://github.com/lllyasviel/ControlNet/blob/main/github_page/p17.png?raw=true" 16 | }, 17 | "group": "Other", 18 | "background": { 19 | "color": "#8281f6" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/niji-journey.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2022, 4 | "month": 11, 5 | "day": 7 6 | }, 7 | "text": { 8 | "headline": "Niji Journey", 9 | "text": "Midjourney和Spellbrush合作的漫画/动漫图像模型。使用修改后的Midjourney模型。" 10 | }, 11 | "media": { 12 | "url": "https://cdn.discordapp.com/attachments/978239059979812905/1049707449084555284/Fg7v07iUAAAM9sE.png", 13 | "credit": " 媒体来源", 14 | "caption": " 网站 , FIRST TWEET", 15 | "thumbnail": "https://cdn.discordapp.com/attachments/978239059979812905/1049707449084555284/Fg7v07iUAAAM9sE.png" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#a3b380" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/dalle-mini-craiyon.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2022, 4 | "month": 4, 5 | "day": 21 6 | }, 7 | "text": { 8 | "headline": "DALL·E Mini (Craiyon)", 9 | "text": "由Boris Dayma开发的文生图模型,试图成为DALL·E 2的开源版本。因模因在AI社区之外获得人气,因与OpenAI的法律纠纷后更名为Craiyon。" 10 | }, 11 | "media": { 12 | "url": "https://i.kym-cdn.com/photos/images/newsfeed/002/383/640/bb9", 13 | "credit": " 媒体来源", 14 | "caption": " GITHUB , 网站 , KNOWYOURMEME ", 15 | "thumbnail": "https://i.kym-cdn.com/photos/images/newsfeed/002/383/640/bb9" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#f7732a" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/deepdaze.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2021, 4 | "month": 1, 5 | "day": 10 6 | }, 7 | "text": { 8 | "headline": "DeepDaze", 9 | "text": "首个使用CLIP的开源模型,与SIREN(正弦表示网络)配对。由Ryan Murdock(@advadnoun)创建。" 10 | }, 11 | "media": { 12 | "url": "https://preview.redd.it/41bvvncplrb61.png?width=512&format=png&auto=webp&s=da3e316ce923fefb08771c956e23e9f7cd7c7c8b", 13 | "credit": " 媒体来源", 14 | "caption": " FIRST TWEET , GITHUB ", 15 | "thumbnail": "https://cdn.openai.com/research-covers/clip/2x-no-mark.jpg" 16 | }, 17 | "group": "Other", 18 | "background": { 19 | "color": "#5a2865" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/veo-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 12, 5 | "day": 16 6 | }, 7 | "text": { 8 | "headline": "Veo 2", 9 | "text": "Google DeepMind开发的视频生成模型,在质量、提示词遵循和因果关系方面超越其他视频模型。目前仅通过VideoFX的封闭等待列表或在Fal等不同网站付费使用。" 10 | }, 11 | "media": { 12 | "url": "https://deepmind.google/api/blob/website/media/WM_141324283_12_video_0.webm", 13 | "credit": "媒体来源", 14 | "caption": "网站, 博客文章", 15 | "thumbnail": "https://deepmind.google/api/blob/website/media/WM_141324283_12_video_0.webm" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#4388f0" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/vqganclip.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2021, 4 | "month": 4, 5 | "day": 11 6 | }, 7 | "text": { 8 | "headline": "VQGAN+CLIP", 9 | "text": "Katherine Crowson(@RiversHaveWings)的Colab笔记本,使文生图模型普及。受Big Sleep启发,这个笔记本是普通用户可以尝试这些工具的最早实例之一。" 10 | }, 11 | "media": { 12 | "url": "https://github.com/nerdyrodent/VQGAN-CLIP/blob/main/samples/Bedroom.png?raw=true", 13 | "credit": " 媒体来源", 14 | "caption": " COLAB , FIRST TWEET ", 15 | "thumbnail": "https://github.com/nerdyrodent/VQGAN-CLIP/blob/main/samples/Bedroom.png?raw=true" 16 | }, 17 | "group": "GAN", 18 | "background": { 19 | "color": "#f7aa29" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/wan.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2025, 4 | "month": 2, 5 | "day": 22 6 | }, 7 | "text": { 8 | "headline": "Wan", 9 | "text": "Wan2.1,原名WanX,是阿里巴巴发布的开源视频生成模型。通过LoRA微调实现的高度个性化视频模型。" 10 | }, 11 | "media": { 12 | "url": "https://video-intl.alicdn.com/2025/Blog/Wan-2.1Video-in-Original-Quality.mp4", 13 | "credit": "媒体来源", 14 | "caption": "GitHub, HuggingFace, 网站", 15 | "thumbnail": "https://video-intl.alicdn.com/2025/Blog/Wan-2.1Video-in-Original-Quality.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#5e4bf6" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/cogvideox.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 8, 5 | "day": 6 6 | }, 7 | "text": { 8 | "headline": "CogVideoX", 9 | "text": "CogVideoX 是由智谱开源的与其清影模型同源的文生视频系列模型,目前只开源了 2B 模型,能生成 6 秒长,8帧/秒的视频" 10 | }, 11 | "media": { 12 | "url": "https://github.com/THUDM/CogVideo/raw/main/resources/videos/1.mp4", 13 | "credit": "媒体来源", 14 | "caption": "网站\r\nGITHUB\r\nHUGGINGFACE\r\n演示\r\n论文", 15 | "thumbnail": "" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#0746ff" 20 | } 21 | } -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ai-timeline", 3 | "private": true, 4 | "type": "module", 5 | "scripts": { 6 | "build": "nuxt build", 7 | "dev": "nuxt dev", 8 | "generate": "nuxt generate", 9 | "preview": "nuxt preview", 10 | "postinstall": "nuxt prepare" 11 | }, 12 | "dependencies": { 13 | "@knight-lab/timelinejs": "^3.9.3", 14 | "@mrrhq/shadcn-pro": "npm:@mrrhq/shadcn-pro-edge@latest", 15 | "@vueuse/core": "^10.11.0", 16 | "nuxt": "^3.12.4", 17 | "vue": "latest" 18 | }, 19 | "devDependencies": { 20 | "@iconify-json/ph": "^1.1.14", 21 | "@iconify-json/simple-icons": "^1.1.112", 22 | "@nuxt/content": "^2.13.2", 23 | "less": "^4.2.0", 24 | "unocss": "^0.61.9" 25 | }, 26 | "packageManager": "pnpm@9.6.0+sha512.38dc6fba8dba35b39340b9700112c2fe1e12f10b17134715a4aa98ccf7bb035e76fd981cf0bb384dfa98f8d6af5481c2bef2f4266a24bfa20c34eb7147ce0b5e" 27 | } 28 | -------------------------------------------------------------------------------- /content/t2i/events/lenet-5.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 1998, 4 | "month": 0, 5 | "day": 0 6 | }, 7 | "text": { 8 | "headline": "LeNet-5", 9 | "text": "最早的卷积神经网络之一,设计用于手写和机器打印字符识别。" 10 | }, 11 | "media": { 12 | "url": "https://www.researchgate.net/profile/Vina-Ayumi/publication/308788359/figure/fig1/AS:412924624556032@1475460310318/Architecture-of-CNN-by-LeCun-et-al-LeNet5.png", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 ", 15 | "thumbnail": "https://www.researchgate.net/profile/Vina-Ayumi/publication/308788359/figure/fig1/AS:412924624556032@1475460310318/Architecture-of-CNN-by-LeCun-et-al-LeNet5.png" 16 | }, 17 | "group": "CNN Model", 18 | "background": { 19 | "color": "#303030" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/pika-labs.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2023, 4 | "month": 6, 5 | "day": 28 6 | }, 7 | "text": { 8 | "headline": "Pika Labs", 9 | "text": "通过Discord服务器运行的文生视频模型。Pika 1.0于2023年11月28日宣布拥有自己的网站。" 10 | }, 11 | "media": { 12 | "url": "https://static.wixstatic.com/media/af70ef_656e8758c8b3475b9ba2b873e0e6d728f000.jpg/v1/fill/w_632,h_293,al_c,q_80,usm_0.33_1.00_0.00,enc_auto/af70ef_656e8758c8b3475b9ba2b873e0e6d728f000.jpg", 13 | "credit": "媒体来源", 14 | "caption": "网站", 15 | "thumbnail": "https://static.wixstatic.com/media/af70ef_656e8758c8b3475b9ba2b873e0e6d728f000.jpg/v1/fill/w_632,h_293,al_c,q_80,usm_0.33_1.00_0.00,enc_auto/af70ef_656e8758c8b3475b9ba2b873e0e6d728f000.jpg" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#f2b64e" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/zeroscope.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2023, 4 | "month": 6, 5 | "day": 3 6 | }, 7 | "text": { 8 | "headline": "Zeroscope", 9 | "text": "一个基于Modelscope的开源文生视频模型。不同版本可用,质量和大小不断增加。由Spencer Sterling开发。" 10 | }, 11 | "media": { 12 | "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/darthvader_cerpense.gif", 13 | "credit": "媒体来源", 14 | "caption": "GITHUB, HUGGINGFACE", 15 | "thumbnail": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/darthvader_cerpense.gif" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#d40c3c" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/cogvideo.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2022, 4 | "month": 5, 5 | "day": 29 6 | }, 7 | "text": { 8 | "headline": "CogVideo", 9 | "text": "由CogView的创建者开发,CogVideo是一个可以生成短GIF的中文文生视频模型。" 10 | }, 11 | "media": { 12 | "url": "https://wudao.aminer.cn/cogvideo_images/%E7%B3%BB%E5%88%974/41_%E5%A5%B3%E4%BA%BA%E5%9C%A8%E5%86%AC%E5%A4%A9%E6%95%A3%E6%AD%A5/3.gif", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 , GITHUB , 网站 ", 15 | "thumbnail": "https://wudao.aminer.cn/cogvideo_images/%E7%B3%BB%E5%88%974/41_%E5%A5%B3%E4%BA%BA%E5%9C%A8%E5%86%AC%E5%A4%A9%E6%95%A3%E6%AD%A5/3.gif" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#12112a" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/stable-diffusion-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2022, 4 | "month": 11, 5 | "day": 24 6 | }, 7 | "text": { 8 | "headline": "Stable Diffusion 2", 9 | "text": "Stable Diffusion的更新版本,与v1相比一切都是开源的。v1使用OpenAI的CLIP,v2使用由LAION开发并由Stability AI支持的OpenCLIP。" 10 | }, 11 | "media": { 12 | "url": "https://github.com/Stability-AI/stablediffusion/raw/main/assets/stable-samples/txt2img/768/merged-0005.png", 13 | "credit": " 媒体来源", 14 | "caption": " 网站 , GITHUB ", 15 | "thumbnail": "https://github.com/Stability-AI/stablediffusion/raw/main/assets/stable-samples/txt2img/768/merged-0005.png" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#ca0927" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/recraft-v3.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 10, 5 | "day": 30 6 | }, 7 | "text": { 8 | "headline": "Recraft v3", 9 | "text": "发布时最好的图像生成器之一,由Recraft创建,该公司自称为专业设计师AI公司。该模型能够生成非常长的连贯文本,最重要的是能生成SVG图像。仅通过其网站和API提供。" 10 | }, 11 | "media": { 12 | "url": "https://cdn.prod.website-files.com/655741af3f04e006606d26ad/6722a4e02cb3fd04911494e0_jpg-16-p-1080.jpg", 13 | "credit": "媒体来源", 14 | "caption": "博客文章", 15 | "thumbnail": "https://cdn.prod.website-files.com/655741af3f04e006606d26ad/6722a4e02cb3fd04911494e0_jpg-16-p-1080.jpg" 16 | }, 17 | "group": “Diffusion Model", 18 | "background": { 19 | "color": "#ff5b00" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/sora-release.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 12, 5 | "day": 9 6 | }, 7 | "text": { 8 | "headline": "Sora (发布)", 9 | "text": "OpenAI早在2月就宣布的视频生成扩散模型。迄今为止最受期待的视觉生成模型,通过20美元或200美元订阅提供。发布版本是之前模型的turbo版本。虽然与大多数其他视频模型类似,但其故事板界面是首创,允许对一个动作接一个动作进行关键帧设置,并无缝融合两个视频。" 10 | }, 11 | "media": { 12 | "url": "https://bgr.com/wp-content/uploads/2024/12/OpenAI-Sora.jpg?quality=82", 13 | "credit": "媒体来源", 14 | "caption": "网站, 博客文章, 系统卡", 15 | "thumbnail": "https://bgr.com/wp-content/uploads/2024/12/OpenAI-Sora.jpg?quality=82" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#4c46f0" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/stable-diffusion-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 2, 5 | "day": 22 6 | }, 7 | "text": { 8 | "headline": "Stable Diffusion 3", 9 | "text": "Stability AI开发的最受欢迎的开源图像生成模型Stable Diffusion的第三代。虽然模型尚未发布,但已开放早期预览候补名单。" 10 | }, 11 | "media": { 12 | "url": "https://images.squarespace-cdn.com/content/v1/6213c340453c3f502425776e/c24904d4-f0f0-4a26-9470-fec227dde15c/image-90.png?format=1500w", 13 | "credit": "媒体来源", 14 | "caption": "论文, ARTICLE", 15 | "thumbnail": "https://images.squarespace-cdn.com/content/v1/6213c340453c3f502425776e/c24904d4-f0f0-4a26-9470-fec227dde15c/image-90.png?format=1500w" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#ca0927" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/potat1.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2023, 4 | "month": 6, 5 | "day": 5 6 | }, 7 | "text": { 8 | "headline": "Potat1", 9 | "text": "一个文生视频模型,第一个开源生成1024x576视频的模型。由Camenduru开发,以Modelscope为基础模型。" 10 | }, 11 | "media": { 12 | "url": "https://user-images.githubusercontent.com/54370274/244223175-604a8817-5ad3-421a-9240-e5d3b195074d.mp4", 13 | "credit": "媒体来源", 14 | "caption": "GITHUB, TWEET, 演示", 15 | "thumbnail": "https://user-images.githubusercontent.com/54370274/244223175-604a8817-5ad3-421a-9240-e5d3b195074d.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#d40c0c" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/wurstchen.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2023, 4 | "month": 6, 5 | "day": 1 6 | }, 7 | "text": { 8 | "headline": "Würstchen", 9 | "text": "一个生成成本更低的文生图模型,因为其高度压缩的潜在空间(名字很搞笑)。" 10 | }, 11 | "media": { 12 | "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/wuertschen/Anthropomorphic_cat_dressed_as_a_fire_fighter.jpg", 13 | "credit": "媒体来源", 14 | "caption": "论文, BLOG POST, GITHUB", 15 | "thumbnail": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/wuertschen/Anthropomorphic_cat_dressed_as_a_fire_fighter.jpg" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#f72672" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/clip-guided-diffusion.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2021, 4 | "month": 6, 5 | "day": 26 6 | }, 7 | "text": { 8 | "headline": "CLIP Guided Diffusion", 9 | "text": "虽然是指与CLIP一起工作的扩散模型的通用术语,但这是第一个CLIP引导的扩散模型。由Katherine Crowson(@RiversHaveWings)创建。" 10 | }, 11 | "media": { 12 | "url": "https://live.staticflickr.com/65535/51350255352_6c1e592c26_o.png", 13 | "credit": " 媒体来源", 14 | "caption": " GITHUB , FIRST TWEET , COLAB ", 15 | "thumbnail": "https://live.staticflickr.com/65535/51350255352_6c1e592c26_o.png" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#12443b" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/imagen-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2023, 4 | "month": 12, 5 | "day": 13 6 | }, 7 | "text": { 8 | "headline": "Imagen 2", 9 | "text": "谷歌的文生图模型。这个是首个Imagen的继任者,用于各种谷歌生成服务,如Gemini。" 10 | }, 11 | "media": { 12 | "url": "https://lh3.googleusercontent.com/Tlsj9qg0NNcYZxuatYzuKhSBXOXaDVa3-e3gWCFLKjXFRnxjnEz3ozhTqUPGEzHYnkF5tTpNFWCO0WgG4KsC39xjzhpNazKjqnit1jniYwaKc-Xmeg=h600-rw", 13 | "credit": "媒体来源", 14 | "caption": "网站, BLOG", 15 | "thumbnail": "https://lh3.googleusercontent.com/Tlsj9qg0NNcYZxuatYzuKhSBXOXaDVa3-e3gWCFLKjXFRnxjnEz3ozhTqUPGEzHYnkF5tTpNFWCO0WgG4KsC39xjzhpNazKjqnit1jniYwaKc-Xmeg=h600-rw" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#4388f0" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/meta-movie-gen.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 10, 5 | "day": 4 6 | }, 7 | "text": { 8 | "headline": "Meta Movie Gen", 9 | "text": "Meta开发的视频生成模型,可从文本创建视频、编辑现有视频,并通过人脸输入将人物放入生成的场景中。目前尚未发布。" 10 | }, 11 | "media": { 12 | "url": "https://media.wired.com/clips/66ff6ba6967e31bbf220c2f7/master/pass/Comp%201%20copy%202.mp4", 13 | "credit": "媒体来源", 14 | "caption": "网站, 论文, 博客文章", 15 | "thumbnail": "https://media.wired.com/clips/66ff6ba6967e31bbf220c2f7/master/pass/Comp%201%20copy%202.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#285abd" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/latent-diffusion.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2021, 4 | "month": 12, 5 | "day": 20 6 | }, 7 | "text": { 8 | "headline": "Latent Diffusion", 9 | "text": "由CompVis开发的文生图模型。" 10 | }, 11 | "media": { 12 | "url": "https://github.com/CompVis/latent-diffusion/raw/main/assets/txt2img-preview.png", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 , GITHUB , COLAB ", 15 | "thumbnail": "https://github.com/CompVis/latent-diffusion/raw/main/assets/txt2img-preview.png" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#e1a240" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/veo-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2025, 4 | "month": 5, 5 | "day": 20 6 | }, 7 | "text": { 8 | "headline": "Veo 3", 9 | "text": "Google DeepMind开发的视频生成模型,还可以原生生成声音和语音。首个具备这些一体化功能的模型,Veo 3允许使用图像参考生成视频,可在Google的服务如Flow和Gemini聊天界面中使用。" 10 | }, 11 | "media": { 12 | "url": "https://deepmind.google/api/blob/website/media/us_wm_veo_3_a-medium-shot-frames-an-old-sailor-his-knitted-blue-sailor-hat_LLpQrIw.mp4", 13 | "credit": "媒体来源", 14 | "caption": "网站, 模型卡", 15 | "thumbnail": "https://deepmind.google/api/blob/website/media/us_wm_veo_3_a-medium-shot-frames-an-old-sailor-his-knitted-blue-sailor-hat_LLpQrIw.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#4388f0" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/laion-5b.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2022, 4 | "month": 5, 5 | "day": 27 6 | }, 7 | "text": { 8 | "headline": "LAION-5B", 9 | "text": "LAION最大的开放数据集,包含58.5亿个CLIP过滤的图像-文本对,比其前身LAION-400M大14倍。" 10 | }, 11 | "media": { 12 | "url": "https://lh5.googleusercontent.com/u4ax53sZ0oABJ2tCt4FH6fs4V6uUQ_DRirV24fX0EPpGLMZrA8OlknEohbC0L1Nctvo7hLi01R4I0a3HCfyUMnUcCm76u86ML5CyJ-5boVk_8E5BPG5Z2eeJtPDQ00IhVE-camk4", 13 | "credit": " 媒体来源", 14 | "caption": " 网站 ", 15 | "thumbnail": "https://lh5.googleusercontent.com/u4ax53sZ0oABJ2tCt4FH6fs4V6uUQ_DRirV24fX0EPpGLMZrA8OlknEohbC0L1Nctvo7hLi01R4I0a3HCfyUMnUcCm76u86ML5CyJ-5boVk_8E5BPG5Z2eeJtPDQ00IhVE-camk4" 16 | }, 17 | "group": "Dataset", 18 | "background": { 19 | "color": "#ca0927" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/tooncrafter.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 5, 5 | "day": 28 6 | }, 7 | "text": { 8 | "headline": "ToonCrafter", 9 | "text": "生成动画插帧的生成模型,能够生成两帧或多帧图像之间的插帧。与其他插帧模型不同,这是由生成视频模型驱动的,能够预测更准确的运动。它还可以为草图上色。" 10 | }, 11 | "media": { 12 | "url": "https://doubiiu.github.io/projects/ToonCrafter/02comparison/Japan_v2_1_070321_s3_frame1/Japan_v2_1_070321_s3_frame1_Ours.mp4", 13 | "credit": "媒体来源", 14 | "caption": "论文, 网站, 演示", 15 | "thumbnail": "https://doubiiu.github.io/projects/ToonCrafter/02comparison/Japan_v2_1_070321_s3_frame1/Japan_v2_1_070321_s3_frame1_Ours.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#273118" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/stable-diffusion.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2022, 4 | "month": 8, 5 | "day": 22 6 | }, 7 | "text": { 8 | "headline": "Stable Diffusion", 9 | "text": "Stability AI和CompVis开发的开源文生图模型。" 10 | }, 11 | "media": { 12 | "url": "https://github.com/CompVis/stable-diffusion/raw/main/assets/stable-samples/img2img/mountains-3.png", 13 | "credit": " 媒体来源", 14 | "caption": " 网站 , RELEASE POST , HUGGINGFACE, GITHUB", 15 | "thumbnail": "https://github.com/CompVis/stable-diffusion/raw/main/assets/stable-samples/img2img/mountains-3.png" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#ca0927" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/alexnet.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2012, 4 | "month": 9, 5 | "day": 12 6 | }, 7 | "text": { 8 | "headline": "AlexNet", 9 | "text": "在2012年ImageNet竞赛(ILSVRC12)中,这个卷积神经网络革新了图像分类的方式。" 10 | }, 11 | "media": { 12 | "url": "https://lh6.googleusercontent.com/UHM_RdQEmlv6Y28gvKxBLrUTgvgQuS8aH8oLIm0VZJZ2n7AJuF9sX3LfmzFDFXZMfoiJCLTT9HZpC6C0x0jeUAXog1pQdE3G1YOt1HpT8exDsddJsnEiWkczEkdekOrROZnB2j2r", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 ", 15 | "thumbnail": "https://lh6.googleusercontent.com/UHM_RdQEmlv6Y28gvKxBLrUTgvgQuS8aH8oLIm0VZJZ2n7AJuF9sX3LfmzFDFXZMfoiJCLTT9HZpC6C0x0jeUAXog1pQdE3G1YOt1HpT8exDsddJsnEiWkczEkdekOrROZnB2j2r" 16 | }, 17 | "group": "CNN Model", 18 | "background": { 19 | "color": "#fdaaab" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/videojam.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2025, 4 | "month": 2, 5 | "day": 4 6 | }, 7 | "text": { 8 | "headline": "VideoJAM", 9 | "text": "Meta开发的框架,为任何视频生成模型注入强烈的运动先验,通过增强模型运动的真实感来提升效果。" 10 | }, 11 | "media": { 12 | "url": "https://hila-chefer.github.io/videojam-paper.github.io/assets/our_results_labeled/A_ballet_dancer_twirls_on_the_surface_of_a_still_lake,_the_golden_sunset_casting_warm_hues..mp4", 13 | "credit": "媒体来源", 14 | "caption": "论文, GitHub", 15 | "thumbnail": "https://hila-chefer.github.io/videojam-paper.github.io/assets/our_results_labeled/A_ballet_dancer_twirls_on_the_surface_of_a_still_lake,_the_golden_sunset_casting_warm_hues..mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#285abd" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/latent-consistency-model.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2023, 4 | "month": 10, 5 | "day": 6 6 | }, 7 | "text": { 8 | "headline": "Latent Consistency Model", 9 | "text": "一个替代潜在扩散模型的文生图模型,能够在几个推理步骤中生成高质量的图像。一个流行的应用是LCM LoRAs,发布于2023年11月9日,可以加速Stable Diffusion模型中的生成过程。" 10 | }, 11 | "media": { 12 | "url": "https://latent-consistency-models.github.io/static/images/4Step_Image/realistic_115_sample_0.png", 13 | "credit": "媒体来源", 14 | "caption": "论文, GITHUB, 网站, LCM LoRAs 论文", 15 | "thumbnail": "https://latent-consistency-models.github.io/static/images/4Step_Image/realistic_115_sample_0.png" 16 | }, 17 | "group": "Other", 18 | "background": { 19 | "color": "#426023" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/stylegan.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2018, 4 | "month": 12, 5 | "day": 12 6 | }, 7 | "text": { 8 | "headline": "StyleGAN", 9 | "text": "NVIDIA受风格迁移技术启发的GAN。首次生成的面孔看起来异常真实,通过https://thispersondoesnotexist.com/网站(现由StyleGAN2提供支持)变得流行。" 10 | }, 11 | "media": { 12 | "url": "https://www.researchgate.net/publication/340774978/figure/fig2/AS:960479711203329@1606007616532/Examples-of-face-images-generated-by-StyleGAN-model-Karras-etal-2019-The-images-in.png", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 ", 15 | "thumbnail": "https://www.researchgate.net/publication/340774978/figure/fig2/AS:960479711203329@1606007616532/Examples-of-face-images-generated-by-StyleGAN-model-Karras-etal-2019-The-images-in.png" 16 | }, 17 | "group": "GAN", 18 | "background": { 19 | "color": "#76b52c" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/styletransfer.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2016, 4 | "month": 6, 5 | "day": 0 6 | }, 7 | "text": { 8 | "headline": "StyleTransfer", 9 | "text": "一个能够分离图像内容和风格并将不同的内容和风格结合的深度神经网络。" 10 | }, 11 | "media": { 12 | "url": "https://external-content.duckduckgo.com/iu/?u=https%3A%2F%2F3.bp.blogspot.com%2F-jYGbp0Ow1Cc%2FWA6oWw63F7I%2FAAAAAAAABWc%2F8_E5A1dbPP4xeo1GuIGTsYvG6TuXIfmoQCLcB%2Fs1600%2Fimage06.png&f=1&nofb=1", 13 | "credit": " 媒体来源", 14 | "caption": " 论文 ", 15 | "thumbnail": "https://external-content.duckduckgo.com/iu/?u=https%3A%2F%2F3.bp.blogspot.com%2F-jYGbp0Ow1Cc%2FWA6oWw63F7I%2FAAAAAAAABWc%2F8_E5A1dbPP4xeo1GuIGTsYvG6TuXIfmoQCLcB%2Fs1600%2Fimage06.png&f=1&nofb=1" 16 | }, 17 | "group": "CNN Model", 18 | "background": { 19 | "color": "#dd723c" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/centipede-diffusion.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2022, 4 | "month": 4, 5 | "day": 20 6 | }, 7 | "text": { 8 | "headline": "Centipede Diffusion", 9 | "text": "正如名字所暗示的,这个笔记本结合了两个扩散模型的优势。潜在扩散在连贯性方面表现良好,而Disco Diffusion在艺术性方面更好,结合后它们创造了一个中间地带。" 10 | }, 11 | "media": { 12 | "url": "https://preview.redd.it/gzrl97a9unu81.png?width=768&format=png&auto=webp&s=14d28ef1b59fadf844348a687fa69734feb88ec8", 13 | "credit": " 媒体来源", 14 | "caption": " FIRST POST , COLAB ", 15 | "thumbnail": "https://preview.redd.it/gzrl97a9unu81.png?width=768&format=png&auto=webp&s=14d28ef1b59fadf844348a687fa69734feb88ec8" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#e4a734" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/4o-image-generation.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2025, 4 | "month": 3, 5 | "day": 25 6 | }, 7 | "text": { 8 | "headline": "4o Image Generation", 9 | "text": "原生嵌入ChatGPT的自回归模型。OpenAI的图像模型DALLE 3的继任者,在提示词遵循、长文本准确性和图像到图像功能方面表现出色。通过ChatGPT界面或Sora网站提供。" 10 | }, 11 | "media": { 12 | "url": "https://images.ctfassets.net/kftzwdyauwt9/5msykBd6Wu5mBcTgoqeJkj/4481c11698ff69f3d44d4c6220fade12/hero_image_1-whiteboard1.png?w=828&q=90&fm=webp", 13 | "credit": "媒体来源", 14 | "caption": "博客文章, 系统卡", 15 | "thumbnail": "https://images.ctfassets.net/kftzwdyauwt9/5msykBd6Wu5mBcTgoqeJkj/4481c11698ff69f3d44d4c6220fade12/hero_image_1-whiteboard1.png?w=828&q=90&fm=webp" 16 | }, 17 | "group": “Diffusion Model", 18 | "background": { 19 | "color": "#4c46f0" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/adobe-firefly.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2023, 4 | "month": 3, 5 | "day": 21 6 | }, 7 | "text": { 8 | "headline": "Adobe Firefly", 9 | "text": "Firefly是Adobe开发的生成文本到图像工具系列。" 10 | }, 11 | "media": { 12 | "url": "https://i.guim.co.uk/img/media/66f3ced92079ae2635e031bc1139cfe11279f411/19_0_762_457/master/762.jpg?width=620&quality=85&dpr=1&s=none", 13 | "credit": "媒体来源", 14 | "caption": "网站, LAUNCH POST", 15 | "thumbnail": "https://i.guim.co.uk/img/media/66f3ced92079ae2635e031bc1139cfe11279f411/19_0_762_457/master/762.jpg?width=620&quality=85&dpr=1&s=none" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#fa1000" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/flux1.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 8, 5 | "day": 1 6 | }, 7 | "text": { 8 | "headline": "FLUX.1", 9 | "text": "FLUX.1由前 SAI 开发者组成的Black Forest Labs团队推出首个文本到图像生成模型。现有三个不同版本:Pro,效果最好只支持API调用;Dev,开放权重模型,可用于非商业应用;Schnell,速度最快,基于 Apache 2.0" 10 | }, 11 | "media": { 12 | "url": "https://blog.fal.ai/content/images/size/w2000/2024/08/HcS0Kgj7eNkeoC5PCMbkS.jpeg", 13 | "credit": "媒体来源", 14 | "caption": "网站, HUGGINGFACE, GITHUB, 演示, 博客文章", 15 | "thumbnail": "https://blog.fal.ai/content/images/size/w2000/2024/08/HcS0Kgj7eNkeoC5PCMbkS.jpeg" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#102522" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/modelscope-text2video-synthesis.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2023, 4 | "month": 3, 5 | "day": 19 6 | }, 7 | "text": { 8 | "headline": "ModelScope Text2Video Synthesis", 9 | "text": "一个通过英语提示生成2秒视频的文生视频模型。由同名的中国模型即服务库发布,由阿里巴巴拥有。" 10 | }, 11 | "media": { 12 | "url": "https://www.modelscope.cn/api/v1/models/damo/cv_diffusion_text-to-video-synthesis/repo?Revision=master&FilePath=./samples/A_litter_of_puppies_running_through_the_yard.gif&View=true", 13 | "credit": "媒体来源", 14 | "caption": "网站, GITHUB, 演示", 15 | "thumbnail": "https://www.modelscope.cn/api/v1/models/damo/cv_diffusion_text-to-video-synthesis/repo?Revision=master&FilePath=./samples/A_litter_of_puppies_running_through_the_yard.gif&View=true" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#5e4bf6" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/firefly-video-model.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2024, 4 | "month": 9, 5 | "day": 11 6 | }, 7 | "text": { 8 | "headline": "Firefly Video Model", 9 | "text": "Adobe开发的文本到视频模型。目前尚未发布,只能通过加入等待列表。这是一个商业安全的模型,不同之处在于它还能生成与图像在风格上相似的视频,将来还会集成到Adobe Premiere中。" 10 | }, 11 | "media": { 12 | "url": "https://techcrunch.com/wp-content/uploads/2024/09/cinematic-closeup-and-detailed-portrait-of-a-reindeer-in-a-snowy-forest-at-sunset.-the-lighting-is-cinematic-and-gorgeous-and-soft-and-sun-kissed-with-golden-backlight-and-dreamy-bokeh-and-lens-flares.mp4", 13 | "credit": "媒体来源", 14 | "caption": "博客", 15 | "thumbnail": "https://techcrunch.com/wp-content/uploads/2024/09/cinematic-closeup-and-detailed-portrait-of-a-reindeer-in-a-snowy-forest-at-sunset.-the-lighting-is-cinematic-and-gorgeous-and-soft-and-sun-kissed-with-golden-backlight-and-dreamy-bokeh-and-lens-flares.mp4" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#fa1000" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /content/t2i/events/ernie-vilg-20.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2022, 4 | "month": 10, 5 | "day": 27 6 | }, 7 | "text": { 8 | "headline": "ERNIE ViLG 2.0", 9 | "text": "百度的文生图模型。参数少于DALLE或Stable Diffusion,但在空间理解和颜色匹配方面表现出色。" 10 | }, 11 | "media": { 12 | "url": "https://camo.githubusercontent.com/2cc4a89a3169b5e9f86c02e65313c44d0cfc45c0d3e9be689d2f649331bab4a0/68747470733a2f2f6263652e62647374617469632e636f6d2f646f632f414944502f77656e78696e2f335f373264393334332e706e67", 13 | "credit": " 媒体来源", 14 | "caption": " DEMO , 论文 , GITHUB", 15 | "thumbnail": "https://camo.githubusercontent.com/2cc4a89a3169b5e9f86c02e65313c44d0cfc45c0d3e9be689d2f649331bab4a0/68747470733a2f2f6263652e62647374617469632e636f6d2f646f632f414944502f77656e78696e2f335f373264393334332e706e67" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#ffa500" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/pytti-5.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2021, 4 | "month": 11, 5 | "day": 20 6 | }, 7 | "text": { 8 | "headline": "Pytti 5", 9 | "text": "一个由sportsracer48创建的基于VQGAN的笔记本,在Patreon上作为封闭测试版提供。基于Katherine Crowson的笔记本,Pytti 5以创建迷幻动画而闻名。" 10 | }, 11 | "media": { 12 | "url": "https://raw.githubusercontent.com/dmarx/pytti-settings-test/main/images_out/permutations_limited_palette_2D_palettes-10_palette_size-3_gamma-0_hdr_weight-0_smoothing_weight-0_palette_normalization_weight-0/permutations_limited_palette_2D_palettes-10_palette_size-3_gamma-0_hdr_weight-0_smoothing_weight-0_palette_normalization_weight-0_21.png", 13 | "credit": " 媒体来源", 14 | "caption": " COLAB ", 15 | "thumbnail": "https://raw.githubusercontent.com/dmarx/pytti-settings-test/main/images_out/permutations_limited_palette_2D_palettes-10_palette_size-3_gamma-0_hdr_weight-0_smoothing_weight-0_palette_normalization_weight-0/permutations_limited_palette_2D_palettes-10_palette_size-3_gamma-0_hdr_weight-0_smoothing_weight-0_palette_normalization_weight-0_21.png" 16 | }, 17 | "group": "GAN", 18 | "background": { 19 | "color": "#191054" 20 | } 21 | } -------------------------------------------------------------------------------- /content/t2i/events/disco-diffusion.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2021, 4 | "month": 10, 5 | "day": 29 6 | }, 7 | "text": { 8 | "headline": "Disco Diffusion", 9 | "text": "从Crowson的CLIP引导扩散模型演变而来,Disco Diffusion是一个流行的文生图模型,可以创建画风图像。" 10 | }, 11 | "media": { 12 | "url": "https://sweet-hall-e72.notion.site/image/https%3A%2F%2Fs3-us-west-2.amazonaws.com%2Fsecure.notion-static.com%2F7c0f3149-8bf5-4026-8db0-7b203a1867c3%2Fimage37.png?table=block&id=fe1f6aa6-af84-4fb2-b083-1915eaa63b4c&spaceId=384da9c9-4a1a-4574-9728-bda0273a7d8b&width=1340&userId=&cache=v2", 13 | "credit": " 媒体来源", 14 | "caption": " COLAB , TWEET ", 15 | "thumbnail": "https://sweet-hall-e72.notion.site/image/https%3A%2F%2Fs3-us-west-2.amazonaws.com%2Fsecure.notion-static.com%2F7c0f3149-8bf5-4026-8db0-7b203a1867c3%2Fimage37.png?table=block&id=fe1f6aa6-af84-4fb2-b083-1915eaa63b4c&spaceId=384da9c9-4a1a-4574-9728-bda0273a7d8b&width=1340&userId=&cache=v2" 16 | }, 17 | "group": "Diffusion Model", 18 | "background": { 19 | "color": "#c32713" 20 | } 21 | } -------------------------------------------------------------------------------- /public/locale/zh-cn.json: -------------------------------------------------------------------------------- 1 | { 2 | "lang": "zh-cn", 3 | "date": { 4 | "month_abbr": [ 5 | "1月", 6 | "2月", 7 | "3月", 8 | "4月", 9 | "5月", 10 | "6月", 11 | "7月", 12 | "8月", 13 | "9月", 14 | "10月", 15 | "11月", 16 | "12月" 17 | ], 18 | "day_abbr": [ 19 | "周日", 20 | "周一", 21 | "周二", 22 | "周三", 23 | "周四", 24 | "周五", 25 | "周六" 26 | ], 27 | "day": [ 28 | "星期日", 29 | "星期一", 30 | "星期二", 31 | "星期三", 32 | "星期四", 33 | "星期五", 34 | "星期六" 35 | ], 36 | "month": [ 37 | "1月", 38 | "2月", 39 | "3月", 40 | "4月", 41 | "5月", 42 | "6月", 43 | "7月", 44 | "8月", 45 | "9月", 46 | "10月", 47 | "11月", 48 | "12月" 49 | ] 50 | }, 51 | "api": { 52 | "wikipedia": "zh" 53 | }, 54 | "messages": { 55 | "loading": "加载中", 56 | "return_to_title": "回到开头", 57 | "wikipedia": "来自维基百科,自由的百科全书", 58 | "loading_content": "正在加载内容", 59 | "loading_timeline": "加载时间线... ", 60 | "swipe_to_navigate": "左右拨来浏览
OK" 61 | }, 62 | "dateformats": { 63 | "full_long": "dddd',' yyyy年 mmm d日'um' HH:MM", 64 | "full_short": "mmm d日", 65 | "full": "yyyy年mmmm d日", 66 | "month_short": "mmm", 67 | "time_no_seconds_small_date": "HH:MM'
'yyyy年mmmm d日''", 68 | "month": "yyyy年 mmmm", 69 | "time_no_seconds_short": "HH:MM", 70 | "time_short": "HH:MM:ss", 71 | "year": "yyyy年", 72 | "full_long_small_date": "HH:MM'
'dddd',' yyyy年 mmm d日''" 73 | } 74 | } -------------------------------------------------------------------------------- /content/t2i/events/skyreels-v1.json: -------------------------------------------------------------------------------- 1 | { 2 | "start_date": { 3 | "year": 2025, 4 | "month": 2, 5 | "day": 18 6 | }, 7 | "text": { 8 | "headline": "SkyReels V1", 9 | "text": "通过在高质量影视片段大数据集上微调Hunyuan视频模型创建的视频生成模型。" 10 | }, 11 | "media": { 12 | "url": "https://private-user-images.githubusercontent.com/82783347/412786663-2dbd116a-033d-4f7e-bd90-78a3da47cd9c.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3NDM0MzQ1OTAsIm5iZiI6MTc0MzQzNDI5MCwicGF0aCI6Ii84Mjc4MzM0Ny80MTI3ODY2NjMtMmRiZDExNmEtMDMzZC00ZjdlLWJkOTAtNzhhM2RhNDdjZDljLm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNTAzMzElMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjUwMzMxVDE1MTgxMFomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPTEzYTYwZGFkMDViMjM1ODdhN2E0N2NiZjVhNTdjYzMzNGI2YjVmNTk0OWRkN2QxY2FmNTI5MmEwOTJmZTY4MWYmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0In0.UzCD7ayMn0djIGFLJ6yWtR2SsQhGPN9_psov2MKC9zA", 13 | "credit": "媒体来源", 14 | "caption": "GitHub, HuggingFace, 网站", 15 | "thumbnail": "https://private-user-images.githubusercontent.com/82783347/412786663-2dbd116a-033d-4f7e-bd90-78a3da47cd9c.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3NDM0MzQ1OTAsIm5iZiI6MTc0MzQzNDI5MCwicGF0aCI6Ii84Mjc4MzM0Ny80MTI3ODY2NjMtMmRiZDExNmEtMDMzZC00ZjdlLWJkOTAtNzhhM2RhNDdjZDljLm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNTAzMzElMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjUwMzMxVDE1MTgxMFomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPTEzYTYwZGFkMDViMjM1ODdhN2E0N2NiZjVhNTdjYzMzNGI2YjVmNTk0OWRkN2QxY2FmNTI5MmEwOTJmZTY4MWYmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0In0.UzCD7ayMn0djIGFLJ6yWtR2SsQhGPN9_psov2MKC9zA" 16 | }, 17 | "group": "Text-To-Video", 18 | "background": { 19 | "color": "#e5c314" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /pages/index.vue: -------------------------------------------------------------------------------- 1 | 68 | -------------------------------------------------------------------------------- /.vscode/timelineSlideSchema.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema#", 3 | "title": "TimelineJS JSON Schema", 4 | "type": "object", 5 | "$ref": "#/definitions/slide", 6 | "definitions": { 7 | "slide": { 8 | "type": "object", 9 | "properties": { 10 | "start_date": { 11 | "$ref": "#/definitions/date" 12 | }, 13 | "end_date": { 14 | "$ref": "#/definitions/date" 15 | }, 16 | "text": { 17 | "$ref": "#/definitions/text" 18 | }, 19 | "media": { 20 | "$ref": "#/definitions/media" 21 | }, 22 | "group": { 23 | "type": "string" 24 | }, 25 | "display_date": { 26 | "type": "string" 27 | }, 28 | "background": { 29 | "type": "object", 30 | "properties": { 31 | "url": { 32 | "type": "string" 33 | }, 34 | "alt": { 35 | "type": "string" 36 | }, 37 | "color": { 38 | "type": "string" 39 | } 40 | } 41 | }, 42 | "autolink": { 43 | "type": "boolean" 44 | }, 45 | "unique_id": { 46 | "type": "string" 47 | } 48 | } 49 | }, 50 | "date": { 51 | "type": "object", 52 | "properties": { 53 | "year": { 54 | "type": "integer" 55 | }, 56 | "month": { 57 | "type": "integer", 58 | "minimum": 1, 59 | "maximum": 12 60 | }, 61 | "day": { 62 | "type": "integer" 63 | }, 64 | "hour": { 65 | "type": "integer", 66 | "minimum": 0, 67 | "maximum": 23 68 | }, 69 | "minute": { 70 | "type": "integer", 71 | "minimum": 0, 72 | "maximum": 59 73 | }, 74 | "second": { 75 | "type": "integer", 76 | "minimum": 0, 77 | "maximum": 59 78 | }, 79 | "millisecond": { 80 | "type": "integer" 81 | }, 82 | "display_date": { 83 | "type": "string" 84 | }, 85 | "format": { 86 | "type": "string" 87 | } 88 | }, 89 | "required": [ 90 | "year" 91 | ] 92 | }, 93 | "text": { 94 | "type": "object", 95 | "properties": { 96 | "headline": { 97 | "type": "string" 98 | }, 99 | "text": { 100 | "type": "string" 101 | } 102 | } 103 | }, 104 | "media": { 105 | "type": "object", 106 | "properties": { 107 | "url": { 108 | "type": "string" 109 | }, 110 | "caption": { 111 | "type": "string" 112 | }, 113 | "credit": { 114 | "type": "string" 115 | }, 116 | "thumbnail": { 117 | "type": "string" 118 | }, 119 | "alt": { 120 | "type": "string" 121 | }, 122 | "title": { 123 | "type": "string" 124 | }, 125 | "link": { 126 | "type": "string" 127 | }, 128 | "link_target": { 129 | "type": "string" 130 | } 131 | }, 132 | "required": [ 133 | "url" 134 | ] 135 | } 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /components/Timeline3.client.vue: -------------------------------------------------------------------------------- 1 | 28 | 29 | 32 | 33 | 140 | -------------------------------------------------------------------------------- /.vscode/timelineSchema.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema#", 3 | "title": "TimelineJS JSON Schema", 4 | "type": "object", 5 | "properties": { 6 | "events": { 7 | "type": "array", 8 | "items": { 9 | "$ref": "#/definitions/slide" 10 | } 11 | }, 12 | "title": { 13 | "$ref": "#/definitions/slide" 14 | }, 15 | "eras": { 16 | "type": "array", 17 | "items": { 18 | "$ref": "#/definitions/era" 19 | } 20 | }, 21 | "scale": { 22 | "type": "string", 23 | "enum": ["human", "cosmological"] 24 | } 25 | }, 26 | "required": ["events"], 27 | "definitions": { 28 | "slide": { 29 | "type": "object", 30 | "properties": { 31 | "start_date": {"$ref": "#/definitions/date"}, 32 | "end_date": {"$ref": "#/definitions/date"}, 33 | "text": {"$ref": "#/definitions/text"}, 34 | "media": {"$ref": "#/definitions/media"}, 35 | "group": { 36 | "type": "string" 37 | }, 38 | "display_date": { 39 | "type": "string" 40 | }, 41 | "background": { 42 | "type": "object", 43 | "properties": { 44 | "url": { 45 | "type": "string" 46 | }, 47 | "alt": { 48 | "type": "string" 49 | }, 50 | "color": { 51 | "type": "string" 52 | } 53 | }, 54 | "required": ["url"] 55 | }, 56 | "autolink": { 57 | "type": "boolean" 58 | }, 59 | "unique_id": { 60 | "type": "string" 61 | } 62 | }, 63 | "required": ["start_date"] 64 | }, 65 | "era": { 66 | "allOf": [{"$ref": "#/definitions/slide"}], 67 | "properties": { 68 | "start_date": {"$ref": "#/definitions/date"}, 69 | "end_date": {"$ref": "#/definitions/date"} 70 | }, 71 | "required": ["start_date", "end_date"] 72 | }, 73 | "date": { 74 | "type": "object", 75 | "properties": { 76 | "year": { 77 | "type": "integer" 78 | }, 79 | "month": { 80 | "type": "integer", 81 | "minimum": 1, 82 | "maximum": 12 83 | }, 84 | "day": { 85 | "type": "integer" 86 | }, 87 | "hour": { 88 | "type": "integer", 89 | "minimum": 0, 90 | "maximum": 23 91 | }, 92 | "minute": { 93 | "type": "integer", 94 | "minimum": 0, 95 | "maximum": 59 96 | }, 97 | "second": { 98 | "type": "integer", 99 | "minimum": 0, 100 | "maximum": 59 101 | }, 102 | "millisecond": { 103 | "type": "integer" 104 | }, 105 | "display_date": { 106 | "type": "string" 107 | }, 108 | "format": { 109 | "type": "string" 110 | } 111 | }, 112 | "required": ["year"] 113 | }, 114 | "text": { 115 | "type": "object", 116 | "properties": { 117 | "headline": { 118 | "type": "string" 119 | }, 120 | "text": { 121 | "type": "string" 122 | } 123 | } 124 | }, 125 | "media": { 126 | "type": "object", 127 | "properties": { 128 | "url": { 129 | "type": "string" 130 | }, 131 | "caption": { 132 | "type": "string" 133 | }, 134 | "credit": { 135 | "type": "string" 136 | }, 137 | "thumbnail": { 138 | "type": "string" 139 | }, 140 | "alt": { 141 | "type": "string" 142 | }, 143 | "title": { 144 | "type": "string" 145 | }, 146 | "link": { 147 | "type": "string" 148 | }, 149 | "link_target": { 150 | "type": "string" 151 | } 152 | }, 153 | "required": ["url"] 154 | } 155 | } 156 | } 157 | --------------------------------------------------------------------------------