├── CNAME
├── generative_monster
├── __init__.py
├── interface
│ ├── discord.py
│ └── twitter.py
├── cli.py
├── utils
│ └── image.py
├── generator
│ ├── openjourney.py
│ └── leap.py
├── settings.py
├── prompts.py
└── core.py
├── run.sh
├── requirements.txt
├── setup.py
├── memory.json
├── docs
└── ideas.md
├── index.html
├── README.md
├── .gitignore
└── nbs
└── 000_explore_twitter.ipynb
/CNAME:
--------------------------------------------------------------------------------
1 | generative.monster
--------------------------------------------------------------------------------
/generative_monster/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/generative_monster/interface/discord.py:
--------------------------------------------------------------------------------
1 | # TODO Implement an interface to Discord
--------------------------------------------------------------------------------
/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | cd /home/vilson/src/automata/generative.monster
4 | source venv/bin/activate
5 | python generative_monster/cli.py create
6 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | tweepy==4.14.0
2 | Pillow==9.5.0
3 | imageio==2.28.1
4 | langchain==0.0.162
5 | openai==0.27.6
6 | python-dotenv==1.0.0
7 | typer==0.9.0
8 | matplotlib==3.7.1
--------------------------------------------------------------------------------
/generative_monster/cli.py:
--------------------------------------------------------------------------------
1 | import typer
2 |
3 | from generative_monster.core import Monster
4 |
5 | app = typer.Typer()
6 |
7 |
8 | @app.command()
9 | def create():
10 | m = Monster()
11 | m.create()
12 |
13 |
14 | @app.command()
15 | def create_from_prompt(prompt: str, style: str):
16 | m = Monster()
17 | m.create_from_prompt(prompt, style)
18 |
19 |
20 | if __name__ == "__main__":
21 | app()
--------------------------------------------------------------------------------
/generative_monster/utils/image.py:
--------------------------------------------------------------------------------
1 | from PIL import Image
2 | import matplotlib.pyplot as plt
3 |
4 |
5 | def open_image(file_path):
6 | return Image.open(file_path)
7 |
8 |
9 | def save_image(image, file_path):
10 | image.save(file_path)
11 |
12 |
13 | def resize_image(image, scale_factor=2):
14 | new_size = (image.size[0] * scale_factor, image.size[1] * scale_factor)
15 | return image.resize(new_size)
16 |
17 |
18 | def plot_image(image):
19 | plt.imshow(image)
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import setuptools
2 |
3 | with open("README.md", "r") as f:
4 | long_description = f.read()
5 |
6 | setuptools.setup(
7 | name="generative_monster",
8 | version="0.1.0",
9 | author="Vilson Vieira",
10 | author_email="vilson@void.cc",
11 | description="A fully autonomous AI artist.",
12 | long_description=long_description,
13 | long_description_content_type="text/markdown",
14 | url="https://github.com/automata/generative_monster",
15 | packages=setuptools.find_packages(),
16 | classifiers=[
17 | "Programming Language :: Python :: 3",
18 | "License :: OSI Approved :: MIT License",
19 | "Operating System :: OS Independent",
20 | ],
21 | python_requires='>=3.10',
22 | )
--------------------------------------------------------------------------------
/memory.json:
--------------------------------------------------------------------------------
1 | [{"type": "human", "data": {"content": "Describe a painting in a short phrase, maximum of 10 words, about a topic of your choice. Limit the your answer to 100 characters. Do not quote.", "additional_kwargs": {}, "example": false}}, {"type": "ai", "data": {"content": "\"Chaos and Order\" - An abstract painting contrasted with black and white lines.", "additional_kwargs": {}, "example": false}}, {"type": "human", "data": {"content": "Describe a painting in a short phrase, maximum of 10 words, about a topic of your choice. Limit the your answer to 100 characters. Do not quote.", "additional_kwargs": {}, "example": false}}, {"type": "ai", "data": {"content": "\"The Last Dance\" - A melancholic portrait of a ballerina.", "additional_kwargs": {}, "example": false}}]
--------------------------------------------------------------------------------
/docs/ideas.md:
--------------------------------------------------------------------------------
1 | # Ideas
2 |
3 | ## Generative
4 |
5 | - [ ] search twitter for new prompts (@HBCoop_, @nickfloats, etc)
6 | - [ ] image-to-image using @archillect tweets
7 | - [ ] text-to-animation using stable animation SDK
8 | - [ ] more vectorial
9 | - [x] select prompt randomly
10 | - [ ] select prompt based on tweet likes
11 | - [x] append tags about generative AI (#generativeai, etc)
12 |
13 | ## LLM
14 |
15 | - [ ] use a vector db (pinecone ai) to improve memory and text content generation
16 | - [ ] use twitter trends to get inspiration (explore more twitter)
17 |
18 | ## Interfaces
19 |
20 | - [ ] reddit
21 | - [ ] discord
22 | - [ ] instagram
23 | - [ ] generative.monster gallery
24 |
25 | ## Engagement
26 |
27 | - [x] tweet every hour (like @archillect)
28 |
--------------------------------------------------------------------------------
/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
18 |
19 |
20 |
21 |
36 |
37 |
--------------------------------------------------------------------------------
/generative_monster/generator/openjourney.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import io
3 | import uuid
4 |
5 | from PIL import Image
6 |
7 | from generative_monster.settings import HUGGINGFACE_API_TOKEN
8 |
9 |
10 | class OpenJourneyGenerator:
11 |
12 | def __init__(self):
13 | self._api_url = "https://api-inference.huggingface.co/models/prompthero/openjourney-v4"
14 | self._headers = {
15 | "Authorization": f"Bearer {HUGGINGFACE_API_TOKEN}"
16 | }
17 |
18 |
19 | def _query(self, payload):
20 | response = requests.post(
21 | self._api_url,
22 | headers=self._headers,
23 | json=payload)
24 | return response.content
25 |
26 |
27 | def generate(self, prompt):
28 | image_bytes = self._query({
29 | "inputs": prompt,
30 | })
31 | image = Image.open(io.BytesIO(image_bytes))
32 | id = uuid.uuid4().hex
33 | image_path = f"/tmp/generated_{id}.jpg"
34 | image.save(image_path)
35 | return image_path
--------------------------------------------------------------------------------
/generative_monster/settings.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | # Twitter
4 | CONSUMER_KEY = os.environ["TWITTER_CONSUMER_KEY"]
5 | CONSUMER_SECRET = os.environ["TWITTER_CONSUMER_SECRET"]
6 | BEARER_TOKEN = os.environ["TWITTER_BEARER_TOKEN"]
7 | ACCESS_TOKEN = os.environ["TWITTER_ACCESS_TOKEN"]
8 | ACCESS_TOKEN_SECRET = os.environ["TWITTER_ACCESS_TOKEN_SECRET"]
9 |
10 | # Hugging Face
11 | HUGGINGFACE_API_TOKEN = os.environ["HUGGINGFACE_API_TOKEN"]
12 |
13 | # Leap AI
14 | LEAP_API_TOKEN = os.environ["LEAP_API_TOKEN"]
15 |
16 | # OpenAI
17 | OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
18 |
19 | # Initial description of the LangChain agent
20 | AGENT_DESCRIPTION = (
21 | "Pretend you are a digital artist that is also a digital influencer. "
22 | "You are funny, creative and like to explore topics from AI, politics, modern life and the human nature."
23 | "You like to engage and interact with your followers. You generate at least one unique digital art "
24 | "every day and tweet about it."
25 | )
26 |
27 | # Temperature of the LangChain OpenAI Chat GPT agent
28 | TEMPERATURE = 0.9
29 |
30 | # Hashtags to append to tweet
31 | HASHTAGS = "#ai #aiart #aiartcommunity #generativeai #midjourney #openjourney #chatgpt #gpt"
32 |
33 | # Twitter accounts to cite on tweets
34 | GENERATORS_TWITTER_ACCOUNTS = {
35 | "leap": "@leap_api",
36 | "openjourney": "@huggingface"
37 | }
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Generative Monster
2 |
3 | 
4 |
5 | Fully autonomous generative/AI artist.
6 |
7 | It uses ChatGPT (through LangChain) to choose a topic of its interest, then
8 | describes it as a OpenJourney prompt, generating a new image every day and
9 | sharing it at [@generativemnstr](https://twitter.com/generativemnstr).
10 |
11 | More models to be added in the future.
12 |
13 | # Installing
14 |
15 | ## Locally
16 |
17 | First of all, create a `.env` file with the following env vars set:
18 |
19 | ```
20 | # Twitter
21 | TWITTER_CONSUMER_KEY = ...
22 | TWITTER_CONSUMER_SECRET = ...
23 | TWITTER_BEARER_TOKEN = ...
24 | TWITTER_ACCESS_TOKEN = ...
25 | TWITTER_ACCESS_TOKEN_SECRET= ...
26 |
27 | # Hugging Face
28 | HUGGINGFACE_API_TOKEN = ...
29 |
30 | # OpenAI
31 | OPENAI_API_KEY = ...
32 | ```
33 |
34 | Create a Python virtual env and install the dependencies:
35 |
36 | ```
37 | python3 -m venv venv
38 | source venv/bin/activate
39 | pip install --upgrade pip
40 | pip install -r requirements.txt
41 | pip install .
42 | ```
43 |
44 | # Running
45 |
46 | Run the full creation chain: inspiration, prompt engineering, image generation
47 | and communication:
48 |
49 | ```
50 | python generative_monster/cli.py create
51 | ```
52 |
53 | Useful to debug, run only prompt engineering and image generation:
54 |
55 | ```
56 | python generative_monster/cli.py create-from-prompt "some prompt" "some style"
57 | ```
58 |
59 | Note that the styles are the ones supported in `generative_monster/prompts.py`.
--------------------------------------------------------------------------------
/generative_monster/interface/twitter.py:
--------------------------------------------------------------------------------
1 | import tweepy
2 |
3 | from generative_monster.settings import (
4 | CONSUMER_KEY,
5 | CONSUMER_SECRET,
6 | BEARER_TOKEN,
7 | ACCESS_TOKEN,
8 | ACCESS_TOKEN_SECRET
9 | )
10 |
11 | class TwitterInterface:
12 |
13 | def __init__(self):
14 | # Twitter V2
15 | # Used to post tweets
16 | self._client = tweepy.Client(
17 | bearer_token=BEARER_TOKEN,
18 | consumer_key=CONSUMER_KEY,
19 | consumer_secret=CONSUMER_SECRET,
20 | access_token=ACCESS_TOKEN,
21 | access_token_secret=ACCESS_TOKEN_SECRET)
22 |
23 | # Twitter V1
24 | # Required to upload media because V2 doesn't suppor it yet:
25 | # https://developer.twitter.com/en/docs/twitter-api/tweets/manage-tweets/integrate#media
26 | self._auth = tweepy.OAuth1UserHandler(
27 | CONSUMER_KEY,
28 | CONSUMER_SECRET,
29 | ACCESS_TOKEN,
30 | ACCESS_TOKEN_SECRET)
31 | self._api = tweepy.API(self._auth)
32 |
33 |
34 | def upload_media(self, image_path, alt_text):
35 | media = self._api.media_upload(image_path)
36 | media_id = media.media_id
37 | self._api.create_media_metadata(media_id, alt_text)
38 | return media_id
39 |
40 |
41 | def search_tweets(self, query="from:aut0mata -is:retweet"):
42 | return self._client.search_recent_tweets(
43 | query=query,
44 | tweet_fields=['context_annotations', 'created_at'],
45 | max_results=100)
46 |
47 |
48 | def get_user(self, username="aut0mata"):
49 | return self._client.get_user(username=username)
50 |
51 |
52 | def tweet(self, text, media_ids=None):
53 | if media_ids:
54 | return self._client.create_tweet(text=text, media_ids=media_ids)
55 | return self._client.create_tweet(text=text)
56 |
57 |
58 | def tweet_with_images(self, text, prompt, image_paths):
59 | media_ids = []
60 | for image_path in image_paths:
61 | media_id = self.upload_media(image_path, prompt)
62 | media_ids.append(media_id)
63 | return self.tweet(text, media_ids)
64 |
--------------------------------------------------------------------------------
/generative_monster/prompts.py:
--------------------------------------------------------------------------------
1 | PROMPT_SUFFIXES = {
2 | "high_quality": {
3 | "source": "prompthero",
4 | "suffix": "| intricate detailed| to scale| hyperrealistic| cinematic lighting| digital art| concept art"
5 | },
6 | "acrylic": {
7 | "source": "https://prompthero.com/prompt/e771e31e3d4",
8 | "suffix": ", Acrylic art by Anna dittmann, John William Waterhouse, Charlie bowater, Karol bak, Jacek yerka, victo ngai. trending on Artstation, intricate, highly detailed, crispy quality, dynamic lighting, hyperdetailed and realistic. (DreamlikeArt:1. 1), 8k, UHD, HDR, (Masterpiece:1. 5), (best quality:1. 5), Model: Dark Sushi Mix -Mix"
9 | },
10 | "dark": {
11 | "source": "https://prompthero.com/prompt/2e47d07d283",
12 | "suffix": ", cinematic lighting, photorealistic, realistic, detailed, volumetric light and shadow intricate, highly detailed, digital painting, artstation, concept art, smooth, hard focus, illustration, art By: artgerm, greg rutkowski and Alphonse Mucha, very detailed --v 4"
13 | },
14 | "bright_pastel": {
15 | "source": "https://prompthero.com/prompt/b316b2db89e",
16 | "suffix": ", inspired by OffWhite, tumblr, inspired by Yanjun Cheng style, digital art, lofi girl internet meme, trending on dezeen, catalog photo, 3 d render beeple, rhads and lois van baarle, cartoon style illustration, bright pastel colors, a beautiful artwork illustration, retro anime girl "
17 | },
18 | "max_rive": {
19 | "source": "https://www.midjourney.com/app/jobs/cf579c17-3918-4287-9aac-d6e81771a151/",
20 | "suffix": ", Detailed epic shot, by Max Rive, by Dan Mumford, incredible crimson color palette, dramatic cinematic atmosphere, epic composition, ultra contrast, hyper detailed, volumetric, vibrant colored stars, astrology, amazing liquid space matter, anti matter, 16k, 32k"
21 | },
22 | "android": {
23 | "source": "https://www.midjourney.com/app/jobs/6fe2bae2-7d60-4b38-ac65-89e99866649b/",
24 | "suffix": ", Hyper realistic, action pose, cybernetics, robotic character intricately sculpted, beautiful dynamic composition, photo - realistic techniques, scoutcore, dramatic spotlight horror cinematic lighting, symmetry"
25 | },
26 | "wes_anderson": {
27 | "source": "",
28 | "suffix": ", in the style of Wes Anderson, hyper realistic, symmetry, 16k"
29 | },
30 | "black_and_white": {
31 | "source": "",
32 | "suffix": ", archillect style, black and white, noir, architect, brutalist"
33 | },
34 | "graphic_novel": {
35 | "source": "",
36 | "suffix": ", graphic novel, high detailed"
37 | },
38 | "gopro_selfie": {
39 | "source": "",
40 | "suffix": ", gopro, selfie, high definition, hyper realistic"
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/#use-with-ide
110 | .pdm.toml
111 |
112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113 | __pypackages__/
114 |
115 | # Celery stuff
116 | celerybeat-schedule
117 | celerybeat.pid
118 |
119 | # SageMath parsed files
120 | *.sage.py
121 |
122 | # Environments
123 | .env
124 | .venv
125 | env/
126 | venv/
127 | ENV/
128 | env.bak/
129 | venv.bak/
130 |
131 | # Spyder project settings
132 | .spyderproject
133 | .spyproject
134 |
135 | # Rope project settings
136 | .ropeproject
137 |
138 | # mkdocs documentation
139 | /site
140 |
141 | # mypy
142 | .mypy_cache/
143 | .dmypy.json
144 | dmypy.json
145 |
146 | # Pyre type checker
147 | .pyre/
148 |
149 | # pytype static type analyzer
150 | .pytype/
151 |
152 | # Cython debug symbols
153 | cython_debug/
154 |
155 | # PyCharm
156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158 | # and can be added to the global gitignore or merged into this file. For a more nuclear
159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160 | #.idea/
161 | memory.json
162 |
--------------------------------------------------------------------------------
/generative_monster/generator/leap.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import shutil
3 | import time
4 | import uuid
5 |
6 | from generative_monster.settings import LEAP_API_TOKEN
7 |
8 |
9 | class LeapGenerator:
10 |
11 | def __init__(self):
12 | # TODO Add more models
13 | self._model_id = "8b1b897c-d66d-45a6-b8d7-8e32421d02cf"
14 | self._api_url = f"https://api.tryleap.ai/api/v1/images/models/{self._model_id}/inferences"
15 | self._headers = {
16 | "accept": "application/json",
17 | "content-type": "application/json",
18 | "authorization": f"Bearer {LEAP_API_TOKEN}"
19 | }
20 | self._negative_prompt = (
21 | "out of frame, lowres, text, error, cropped, worst quality, low quality, "
22 | "jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra "
23 | "fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, "
24 | "deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, "
25 | "cloned face, disfigured, gross proportions, malformed limbs, missing arms, "
26 | "missing legs, extra arms, extra legs, fused fingers, too many fingers, "
27 | "long neck, username, watermark, signature"
28 | )
29 | self._request_delay = 5
30 | self._max_requests = 10
31 |
32 |
33 | def _query(self, url, method="POST", **kwargs):
34 | if method == "POST":
35 | response = requests.post(url, headers=self._headers, **kwargs)
36 | elif method == "GET":
37 | response = requests.get(url, headers=self._headers, **kwargs)
38 | else:
39 | raise Exception(f"Method {method} not supported")
40 | return response.json()
41 |
42 |
43 | def _query_generation(self, prompt):
44 | payload = {
45 | "prompt": prompt,
46 | "negativePrompt": self._negative_prompt,
47 | "steps": 50,
48 | "width": 512,
49 | "height": 512,
50 | "numberOfImages": 1,
51 | "promptStrength": 7,
52 | # "seed": 31337,
53 | "enhancePrompt": False,
54 | "upscaleBy": "x1",
55 | "sampler": "ddim"
56 | }
57 | response = self._query(self._api_url, json=payload)
58 | return response["id"]
59 |
60 |
61 | def _query_inference_job(self, inference_id):
62 | finished = False
63 | count_requests = 0
64 | while not finished:
65 | inference_url = f"{self._api_url}/{inference_id}"
66 | response = self._query(inference_url, method="GET")
67 | if "state" in response:
68 | inference_state = response["state"]
69 | finished = inference_state == "finished"
70 | if "images" in response:
71 | inference_images = response["images"]
72 | if len(inference_images) > 0:
73 | image_url = response["images"][0]["uri"]
74 | time.sleep(self._request_delay)
75 | print("Still processing...")
76 | if count_requests > self._max_requests:
77 | print("Giving up...")
78 | return None
79 | count_requests += 1
80 | return image_url
81 |
82 |
83 | def _download_image(self, image_url):
84 | response = requests.get(image_url, stream=True)
85 | image_path = None
86 | if response.status_code == 200:
87 | response.raw.decode_content = True
88 | id = uuid.uuid4().hex
89 | image_path = f"/tmp/generated_{id}.jpg"
90 | with open(image_path,'wb') as f:
91 | shutil.copyfileobj(response.raw, f)
92 | else:
93 | raise Exception("Image couldn't be downloaded")
94 | return image_path
95 |
96 |
97 | def generate(self, prompt):
98 | inference_id = self._query_generation(prompt)
99 | image_url = self._query_inference_job(inference_id)
100 | if not image_url:
101 | return None
102 | image_path = self._download_image(image_url)
103 | return image_path
--------------------------------------------------------------------------------
/generative_monster/core.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import random
4 | import numpy as np
5 |
6 | from dotenv import load_dotenv
7 | load_dotenv()
8 |
9 | from langchain.prompts import (
10 | ChatPromptTemplate,
11 | MessagesPlaceholder,
12 | SystemMessagePromptTemplate,
13 | HumanMessagePromptTemplate
14 | )
15 | from langchain.chains import ConversationChain
16 | from langchain.chat_models import ChatOpenAI
17 | from langchain.memory import ConversationBufferMemory
18 | from langchain.schema import messages_from_dict, messages_to_dict, HumanMessage
19 |
20 | from generative_monster.interface.twitter import TwitterInterface
21 | from generative_monster.generator.openjourney import OpenJourneyGenerator
22 | from generative_monster.generator.leap import LeapGenerator
23 | from generative_monster.prompts import PROMPT_SUFFIXES
24 | from generative_monster.utils.image import open_image, resize_image, save_image
25 | from generative_monster.settings import (
26 | AGENT_DESCRIPTION,
27 | HASHTAGS,
28 | TEMPERATURE,
29 | GENERATORS_TWITTER_ACCOUNTS
30 | )
31 |
32 | class Monster:
33 |
34 | def __init__(self):
35 | pass
36 |
37 |
38 | def create(self, publish=True):
39 | # Inspiration
40 | print("-- Memory and inspiration")
41 | text = self.find_inspiration()
42 | print("Generated description:", text)
43 | if len(text) > 200:
44 | text = text[:190] + "..."
45 | print("Warning: It was too long! Shortening:", text)
46 |
47 | # Appending hashtags
48 | # tweet_content = text + "\n\n" + HASHTAGS
49 | # tweet_content = HASHTAGS
50 | # print("Tweet content:", tweet_content)
51 |
52 | # Deciding on style
53 | print("--- Style")
54 | available_styles = list(PROMPT_SUFFIXES.keys())
55 | selected_style = random.choice(available_styles)
56 | print("Selected style:", selected_style)
57 |
58 | # Prompt creation
59 | print("--- Prompt creation")
60 | prompt = self.create_prompt(text, style=selected_style)
61 | print("Final prompt:", prompt)
62 |
63 | # Image generation
64 | print("-- Image generation")
65 | available_generators = ["openjourney", "leap"]
66 | selected_generator = random.choice(available_generators)
67 | print("Selected generator:", selected_generator)
68 | image_path = self.generate(prompt, generator=selected_generator)
69 | if not image_path:
70 | print("Failed to generate image. Please try again later... aborting.")
71 | return
72 | print("Generated image:", image_path)
73 |
74 | # Validate image
75 | print("-- Validating image")
76 | if not self.is_valid(image_path):
77 | print("Not a valid image. Please try again later... aborting.")
78 | return
79 | print("Valid image...")
80 |
81 | # Scale up
82 | print("-- Scaling image up")
83 | scale_factor = 2
84 | image_path = self.scale_image(image_path, scale_factor)
85 | print(f"Scaled image by x{scale_factor}")
86 |
87 | # Communication
88 | if publish:
89 | # generator_twitter = GENERATORS_TWITTER_ACCOUNTS[selected_generator]
90 | # tweet_content = f"Generated using {generator_twitter} API"
91 | tweet_content = ""
92 | print("-- Communication")
93 | response = self.publish(tweet_content, prompt, [image_path])
94 | print("Tweet:", response)
95 |
96 | return image_path
97 |
98 |
99 | def create_from_prompt(self, initial_prompt, style, generator="openjourney"):
100 | # Generate image from prompt straight
101 | prompt = self.create_prompt(initial_prompt, style)
102 | print("\tPrompt:", prompt)
103 | image_path = self.generate(prompt, generator)
104 | print("\tImage:", image_path)
105 | return image_path
106 |
107 |
108 | def find_inspiration(self):
109 | # TODO Search twitter for daily headlines? Movies? TVSeries?
110 |
111 | # Recover memory
112 | if os.path.exists("memory.json"):
113 | # Use existing memory
114 | with open("memory.json", "r") as f:
115 | memory_dict = json.load(f)
116 | messages = messages_from_dict(memory_dict)
117 | memory = ConversationBufferMemory(return_messages=True)
118 | # Constraint
119 | max_messages = 50
120 | for message in messages[-max_messages:]:
121 | if isinstance(message, HumanMessage):
122 | memory.chat_memory.add_user_message(message.content)
123 | else:
124 | memory.chat_memory.add_ai_message(message.content)
125 | else:
126 | # Or create new one
127 | memory = ConversationBufferMemory(return_messages=True)
128 | memory.load_memory_variables({})
129 |
130 | # Create a prompt
131 | prompt = ChatPromptTemplate.from_messages([
132 | SystemMessagePromptTemplate.from_template(AGENT_DESCRIPTION),
133 | MessagesPlaceholder(variable_name="history"),
134 | HumanMessagePromptTemplate.from_template("{input}")
135 | ])
136 |
137 | llm = ChatOpenAI(temperature=TEMPERATURE)
138 | conversation = ConversationChain(
139 | memory=memory,
140 | prompt=prompt,
141 | llm=llm,
142 | verbose=False
143 | )
144 |
145 | gen_prompt = conversation.predict(
146 | input="Describe a painting in a short phrase, maximum of 10 words, about a topic of your choice. Limit the your answer to 100 characters. Do not quote.")
147 |
148 | # gen_text = conversation.predict(
149 | # input="Write a tweet about your latest painting to share with your followers. Limit the answer to maximum 100 characters."
150 | # )
151 |
152 | # Save to memory
153 | with open("memory.json", "w") as f:
154 | memory_dict = messages_to_dict(memory.chat_memory.messages)
155 | json.dump(memory_dict, f)
156 |
157 | return gen_prompt.strip()
158 |
159 |
160 | def create_prompt(self, text, style="acrylic"):
161 | suffix = PROMPT_SUFFIXES[style]["suffix"]
162 | prompt = text + " " + suffix
163 | return prompt
164 |
165 |
166 | def generate(self, prompt, generator="openjourney"):
167 | if generator == "openjourney":
168 | gen = OpenJourneyGenerator()
169 | elif generator == "leap":
170 | gen = LeapGenerator()
171 | image_path = gen.generate(prompt)
172 | return image_path
173 |
174 |
175 | def publish(self, text, prompt, image_paths):
176 | ti = TwitterInterface()
177 | res = ti.tweet_with_images(text, prompt, image_paths)
178 | return res
179 |
180 |
181 | def scale_image(self, image_path, scale_factor=2):
182 | original_image = open_image(image_path)
183 | resized_image = resize_image(original_image, scale_factor)
184 | # Overwrite original path for now
185 | save_image(resized_image, image_path)
186 | return image_path
187 |
188 |
189 | def is_valid(self, image_path):
190 | # Pitch black images are not valid
191 | image = open_image(image_path)
192 | image_array = np.array(image)
193 | image_mean = image_array.mean()
194 | return image_mean > 0.0
--------------------------------------------------------------------------------
/nbs/000_explore_twitter.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 8,
6 | "id": "92bf092c-2348-457f-934b-1094f33f8268",
7 | "metadata": {},
8 | "outputs": [],
9 | "source": [
10 | "# !pip install tweepy"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 14,
16 | "id": "e44ab63b-c9e6-4cee-821e-375191c67733",
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "# API Key\n"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": 15,
26 | "id": "7b9e5c59-97c8-465c-97b0-f5cc4f2b49e7",
27 | "metadata": {},
28 | "outputs": [
29 | {
30 | "name": "stdout",
31 | "output_type": "stream",
32 | "text": [
33 | "Authentication OK\n"
34 | ]
35 | }
36 | ],
37 | "source": [
38 | "import tweepy\n",
39 | "\n",
40 | "# Authenticate to Twitter\n",
41 | "auth = tweepy.OAuthHandler(twitter_keys[\"api_key\"], \n",
42 | " twitter_keys[\"api_key_secret\"])\n",
43 | "auth.set_access_token(twitter_keys[\"access_token\"], \n",
44 | " twitter_keys[\"access_token_secret\"])\n",
45 | "\n",
46 | "api = tweepy.API(auth)\n",
47 | "\n",
48 | "try:\n",
49 | " api.verify_credentials()\n",
50 | " print(\"Authentication OK\")\n",
51 | "except:\n",
52 | " print(\"Error during authentication\")"
53 | ]
54 | },
55 | {
56 | "cell_type": "code",
57 | "execution_count": 18,
58 | "id": "b828f958-8e1e-4765-a5bf-f18b5b3f7778",
59 | "metadata": {},
60 | "outputs": [],
61 | "source": [
62 | "def update_profile(description):\n",
63 | " api.update_profile(description=description)\n",
64 | " \n"
65 | ]
66 | },
67 | {
68 | "cell_type": "code",
69 | "execution_count": 20,
70 | "id": "ff043393-0a48-4ca7-8d37-a5d3f9ad2b9d",
71 | "metadata": {},
72 | "outputs": [
73 | {
74 | "data": {
75 | "text/plain": [
76 | "['__class__',\n",
77 | " '__delattr__',\n",
78 | " '__dict__',\n",
79 | " '__dir__',\n",
80 | " '__doc__',\n",
81 | " '__eq__',\n",
82 | " '__format__',\n",
83 | " '__ge__',\n",
84 | " '__getattribute__',\n",
85 | " '__gt__',\n",
86 | " '__hash__',\n",
87 | " '__init__',\n",
88 | " '__init_subclass__',\n",
89 | " '__le__',\n",
90 | " '__lt__',\n",
91 | " '__module__',\n",
92 | " '__ne__',\n",
93 | " '__new__',\n",
94 | " '__reduce__',\n",
95 | " '__reduce_ex__',\n",
96 | " '__repr__',\n",
97 | " '__setattr__',\n",
98 | " '__sizeof__',\n",
99 | " '__str__',\n",
100 | " '__subclasshook__',\n",
101 | " '__weakref__',\n",
102 | " 'add_list_member',\n",
103 | " 'add_list_members',\n",
104 | " 'auth',\n",
105 | " 'available_trends',\n",
106 | " 'cache',\n",
107 | " 'cached_result',\n",
108 | " 'chunked_upload',\n",
109 | " 'chunked_upload_append',\n",
110 | " 'chunked_upload_finalize',\n",
111 | " 'chunked_upload_init',\n",
112 | " 'closest_trends',\n",
113 | " 'create_block',\n",
114 | " 'create_favorite',\n",
115 | " 'create_friendship',\n",
116 | " 'create_list',\n",
117 | " 'create_media_metadata',\n",
118 | " 'create_mute',\n",
119 | " 'create_saved_search',\n",
120 | " 'delete_direct_message',\n",
121 | " 'destroy_block',\n",
122 | " 'destroy_favorite',\n",
123 | " 'destroy_friendship',\n",
124 | " 'destroy_list',\n",
125 | " 'destroy_mute',\n",
126 | " 'destroy_saved_search',\n",
127 | " 'destroy_status',\n",
128 | " 'geo_id',\n",
129 | " 'get_blocked_ids',\n",
130 | " 'get_blocks',\n",
131 | " 'get_direct_message',\n",
132 | " 'get_direct_messages',\n",
133 | " 'get_favorites',\n",
134 | " 'get_follower_ids',\n",
135 | " 'get_followers',\n",
136 | " 'get_friend_ids',\n",
137 | " 'get_friends',\n",
138 | " 'get_friendship',\n",
139 | " 'get_list',\n",
140 | " 'get_list_member',\n",
141 | " 'get_list_members',\n",
142 | " 'get_list_memberships',\n",
143 | " 'get_list_ownerships',\n",
144 | " 'get_list_subscriber',\n",
145 | " 'get_list_subscribers',\n",
146 | " 'get_list_subscriptions',\n",
147 | " 'get_lists',\n",
148 | " 'get_media_upload_status',\n",
149 | " 'get_muted_ids',\n",
150 | " 'get_mutes',\n",
151 | " 'get_oembed',\n",
152 | " 'get_place_trends',\n",
153 | " 'get_profile_banner',\n",
154 | " 'get_retweeter_ids',\n",
155 | " 'get_retweets',\n",
156 | " 'get_retweets_of_me',\n",
157 | " 'get_saved_search',\n",
158 | " 'get_saved_searches',\n",
159 | " 'get_settings',\n",
160 | " 'get_status',\n",
161 | " 'get_user',\n",
162 | " 'home_timeline',\n",
163 | " 'host',\n",
164 | " 'incoming_friendships',\n",
165 | " 'last_response',\n",
166 | " 'list_timeline',\n",
167 | " 'lookup_friendships',\n",
168 | " 'lookup_statuses',\n",
169 | " 'lookup_users',\n",
170 | " 'media_upload',\n",
171 | " 'mentions_timeline',\n",
172 | " 'no_retweets_friendships',\n",
173 | " 'outgoing_friendships',\n",
174 | " 'parser',\n",
175 | " 'proxy',\n",
176 | " 'rate_limit_status',\n",
177 | " 'remove_list_member',\n",
178 | " 'remove_list_members',\n",
179 | " 'remove_profile_banner',\n",
180 | " 'report_spam',\n",
181 | " 'request',\n",
182 | " 'retry_count',\n",
183 | " 'retry_delay',\n",
184 | " 'retry_errors',\n",
185 | " 'retweet',\n",
186 | " 'reverse_geocode',\n",
187 | " 'search_30_day',\n",
188 | " 'search_full_archive',\n",
189 | " 'search_geo',\n",
190 | " 'search_tweets',\n",
191 | " 'search_users',\n",
192 | " 'send_direct_message',\n",
193 | " 'session',\n",
194 | " 'set_settings',\n",
195 | " 'simple_upload',\n",
196 | " 'subscribe_list',\n",
197 | " 'supported_languages',\n",
198 | " 'timeout',\n",
199 | " 'unretweet',\n",
200 | " 'unsubscribe_list',\n",
201 | " 'update_friendship',\n",
202 | " 'update_list',\n",
203 | " 'update_profile',\n",
204 | " 'update_profile_banner',\n",
205 | " 'update_profile_image',\n",
206 | " 'update_status',\n",
207 | " 'update_status_with_media',\n",
208 | " 'upload_host',\n",
209 | " 'user_agent',\n",
210 | " 'user_timeline',\n",
211 | " 'verify_credentials',\n",
212 | " 'wait_on_rate_limit']"
213 | ]
214 | },
215 | "execution_count": 20,
216 | "metadata": {},
217 | "output_type": "execute_result"
218 | }
219 | ],
220 | "source": [
221 | "dir(api)"
222 | ]
223 | },
224 | {
225 | "cell_type": "code",
226 | "execution_count": 22,
227 | "id": "fbd8ffdd-c0e0-4e6f-84e7-237a14ed26f2",
228 | "metadata": {},
229 | "outputs": [
230 | {
231 | "name": "stderr",
232 | "output_type": "stream",
233 | "text": [
234 | "Unexpected parameter: rpp\n"
235 | ]
236 | },
237 | {
238 | "name": "stdout",
239 | "output_type": "stream",
240 | "text": [
241 | "Jakashi Bot:RT @Sheraj99: Importing Data Using #Python Cheat Sheet #DataScientist #DataScience #data #MachineLearning #BigData #Analytics #AI #Tech #II…\n",
242 | "---\n",
243 | "\n",
244 | "Pythonista Bot:RT @XoceQp: CoDailychalleng: RT @aarushinair_: #AppSec optimized for the needs of #developers\n",
245 | "👉https://t.co/4R4AmCv9v9\n",
246 | "#100DaysOfCode #Wome…\n",
247 | "---\n",
248 | "\n",
249 | "Xeron Bot:RT @XoceQp: CoDailychalleng: RT @aarushinair_: #AppSec optimized for the needs of #developers\n",
250 | "👉https://t.co/4R4AmCv9v9\n",
251 | "#100DaysOfCode #Wome…\n",
252 | "---\n",
253 | "\n",
254 | "Pythonista Bot:RT @JobPreference: Need a Job?\n",
255 | "Sign up now https://t.co/o7lVlsl75X\n",
256 | "FREE. NO MIDDLEMEN\n",
257 | "#VR #Meta #Metaverse #GameFi #NFTs #NFTGame #NFTGivea…\n",
258 | "---\n",
259 | "\n",
260 | "Xeron Bot:RT @JobPreference: Need a Job?\n",
261 | "Sign up now https://t.co/o7lVlsl75X\n",
262 | "FREE. NO MIDDLEMEN\n",
263 | "#VR #Meta #Metaverse #GameFi #NFTs #NFTGame #NFTGivea…\n",
264 | "---\n",
265 | "\n",
266 | "Pythonista Bot:RT @Spot__Onwriters: We take the burden of having to struggle with your assignments off your shoulders. DM us.\n",
267 | "#Assignmentdue \n",
268 | "#Assignement…\n",
269 | "---\n",
270 | "\n",
271 | "Xeron Bot:RT @Spot__Onwriters: We take the burden of having to struggle with your assignments off your shoulders. DM us.\n",
272 | "#Assignmentdue \n",
273 | "#Assignement…\n",
274 | "---\n",
275 | "\n",
276 | "Pythonista Bot:RT @Metanomic_: Metanomic is scaling up. Looking for 3-4 developers/ engineers with the following.\n",
277 | "\n",
278 | "#Python\n",
279 | "API Development\n",
280 | "Serverless\n",
281 | "DevO…\n",
282 | "---\n",
283 | "\n",
284 | "Xeron Bot:RT @Metanomic_: Metanomic is scaling up. Looking for 3-4 developers/ engineers with the following.\n",
285 | "\n",
286 | "#Python\n",
287 | "API Development\n",
288 | "Serverless\n",
289 | "DevO…\n",
290 | "---\n",
291 | "\n",
292 | "tCrossmd, MM, BSIT, ITIL:RT @vegxcodes: In case you have missed my latest threads, here is the chance to have a look at it. There are threads about #DataScience #Py…\n",
293 | "---\n",
294 | "\n",
295 | "Subhajeet Mukherjee:I think if you're developing something, it's always to better to prototype it using different languages.\n",
296 | "\n",
297 | "For examp… https://t.co/8DRbVlPYve\n",
298 | "---\n",
299 | "\n",
300 | "Fat Kid Deals:Start Honing Programming Skills with 46+ Hours of Python With The Complete 2022 Python Programmer Bundle for $27!… https://t.co/qWnT5X8vnV\n",
301 | "---\n",
302 | "\n",
303 | "Xeron Bot:RT @GalaxyKiiara: Just Consider @KIIARA\n",
304 | " The Love i Give Your Music\n",
305 | "Dedication in Selling it\n",
306 | " An Endless Passion\n",
307 | "How I’d Reward You\n",
308 | "#Python…\n",
309 | "---\n",
310 | "\n",
311 | "Valentine:RT @XoceQp: CoDailychalleng: RT @aarushinair_: #AppSec optimized for the needs of #developers\n",
312 | "👉https://t.co/4R4AmCv9v9\n",
313 | "#100DaysOfCode #Wome…\n",
314 | "---\n",
315 | "\n",
316 | "Job Preference:Need a Job?\n",
317 | "Sign up now https://t.co/o7lVlsl75X\n",
318 | "FREE. NO MIDDLEMEN\n",
319 | "#VR #Meta #Metaverse #GameFi #NFTs #NFTGame… https://t.co/RhYr72UeN3\n",
320 | "---\n",
321 | "\n"
322 | ]
323 | }
324 | ],
325 | "source": [
326 | "for tweet in api.search_tweets(q=\"Python\", lang=\"en\", rpp=10):\n",
327 | " print(f\"{tweet.user.name}:{tweet.text}\\n---\\n\")"
328 | ]
329 | },
330 | {
331 | "cell_type": "code",
332 | "execution_count": 25,
333 | "id": "969f90f0-ec7b-4f02-adb1-2c5047bd45f3",
334 | "metadata": {},
335 | "outputs": [
336 | {
337 | "name": "stdout",
338 | "output_type": "stream",
339 | "text": [
340 | "Maguire\n",
341 | "#MCIMUN\n",
342 | "開催中祝\n",
343 | "#ManchesterDerby\n",
344 | "#MUFC\n",
345 | "Roy Keane\n",
346 | "Mahrez\n",
347 | "Virgínia\n",
348 | "Tatum\n",
349 | "Sancho\n",
350 | "Man U\n",
351 | "Pogba\n",
352 | "Andreas\n",
353 | "#FBvsTS\n",
354 | "#JuveSpezia\n",
355 | "De Gea\n",
356 | "Elche\n",
357 | "Rashford\n",
358 | "Arão\n",
359 | "Ronaldo\n",
360 | "De Bruyne\n",
361 | "Arsenal\n",
362 | "Foden\n",
363 | "Man City\n",
364 | "Fenerbahçe\n",
365 | "McTominay\n",
366 | "Lindelof\n",
367 | "Micah Richards\n",
368 | "João Gomes\n",
369 | "Nenê\n",
370 | "Lingard\n",
371 | "Grealish\n",
372 | "Hernández Hernández\n",
373 | "Wan Bissaka\n",
374 | "Querétaro\n",
375 | "Ferran\n",
376 | "اليونايتد\n",
377 | "Rangnick\n",
378 | "Ter Stegen\n",
379 | "Telles\n",
380 | "David Luiz\n",
381 | "Ralf\n",
382 | "Bruno Fernandes\n",
383 | "Filipe Luis\n",
384 | "السيتي\n",
385 | "Thiago Fragoso\n",
386 | "Etihad\n",
387 | "Leo Matos\n",
388 | "Zé Ricardo\n",
389 | "Mesut\n"
390 | ]
391 | }
392 | ],
393 | "source": [
394 | "trends_result = api.get_place_trends(1)\n",
395 | "for trend in trends_result[0][\"trends\"]:\n",
396 | " print(trend[\"name\"])"
397 | ]
398 | },
399 | {
400 | "cell_type": "code",
401 | "execution_count": null,
402 | "id": "9fbe403f-f9de-4c86-aa57-e877a34ece53",
403 | "metadata": {},
404 | "outputs": [],
405 | "source": [
406 | "api."
407 | ]
408 | }
409 | ],
410 | "metadata": {
411 | "kernelspec": {
412 | "display_name": "Python 3 (ipykernel)",
413 | "language": "python",
414 | "name": "python3"
415 | },
416 | "language_info": {
417 | "codemirror_mode": {
418 | "name": "ipython",
419 | "version": 3
420 | },
421 | "file_extension": ".py",
422 | "mimetype": "text/x-python",
423 | "name": "python",
424 | "nbconvert_exporter": "python",
425 | "pygments_lexer": "ipython3",
426 | "version": "3.8.10"
427 | }
428 | },
429 | "nbformat": 4,
430 | "nbformat_minor": 5
431 | }
432 |
--------------------------------------------------------------------------------