├── LICENSE
├── README.md
├── articles
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-311.pyc
│ ├── __init__.cpython-39.pyc
│ ├── review.cpython-39.pyc
│ ├── seo.cpython-39.pyc
│ ├── skeleton.cpython-311.pyc
│ ├── skeleton.cpython-39.pyc
│ ├── writing.cpython-311.pyc
│ └── writing.cpython-39.pyc
├── prompts.txt
├── review.py
├── seo.py
├── skeleton.py
└── writing.py
├── blogs
└── blogs.toml
├── config.py
├── enhancer
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-311.pyc
│ ├── __init__.cpython-39.pyc
│ ├── linker.cpython-39.pyc
│ ├── midjourney_ai.cpython-311.pyc
│ └── translate.cpython-39.pyc
├── compressed_output.jpg
├── down.py
├── linker.py
├── midjourney_ai.py
├── post_enhancer.py
└── translate.py
├── example.env
├── initiation
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-311.pyc
│ ├── __init__.cpython-39.pyc
│ ├── kickoff.cpython-311.pyc
│ └── kickoff.cpython-39.pyc
└── kickoff.py
├── interlinker
├── __init__.py
├── linker.py
└── output.txt
├── main.py
├── orchestrar
├── .DS_Store
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-311.pyc
│ ├── __init__.cpython-39.pyc
│ ├── blogs.cpython-311.pyc
│ ├── blogs.cpython-39.pyc
│ ├── gutenberg.cpython-311.pyc
│ ├── gutenberg.cpython-39.pyc
│ ├── wp.cpython-311.pyc
│ └── wp.cpython-39.pyc
├── blogs.py
├── gutenberg.py
└── wp.py
├── programmer
├── __init__.py
└── code.py
├── requirements.txt
├── researcher
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-311.pyc
│ ├── __init__.cpython-39.pyc
│ ├── search.cpython-311.pyc
│ └── search.cpython-39.pyc
├── pdf.py
└── search.py
└── temp
└── .DS_Store
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 seedgularity
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # AIBlogPilotGPT
2 |
3 | Auto blogging/article writing tool including image generation from Midjourney and publishing to Wordpress (Gutenberg ready).
4 |
5 | Research of internet included to get most relevant articles.
6 |
7 | The length of articles is 4000 to 8000 words (can be updated via structure of article).
8 |
9 | ## What You Need for AIBlogPilot to Run
10 |
11 | - OpenAI API key
12 | - SerpAPI key
13 | - Browserless API Key
14 | - TNL Api Key (https://www.thenextleg.io/) for Midjourney Image Generation
15 |
16 | ## Steps
17 |
18 | Install requirements via `pip install -r requirements.txt`
19 |
20 | Include all API keys to example.env and rename it to .env.
21 |
22 | Include your Wordpress details to .env. If you want to include more blogs, you cann add as WP_ADMIN_USERNAME_2, WP_ADMIN_USERNAME_3 etc. The numbering goes for all details.
23 |
24 | Go to blogs/blogs.toml and include all details about your blog. In terms of keywords, I suggest you change them with every run.
25 |
26 | For a single article, go to `main.py and uncomment line 319 (article()) and comment line 320 (parse_blog_articles())`
27 |
28 | For multiple articles, go to `main.py and comment line 319 (article()) and uncomment line 320 (parse_blog_articles())`
29 |
30 | The default generation is 15 articles. If you want to change it, go to orchestrar/blogs.py and change the prompt in init_blog function ("I want you to propose next 15 article titles")
31 |
32 | All articles and images are saved temp directory.
33 |
34 | ## Demo Blog
35 |
36 | https://nomadlyworking.com/
37 |
--------------------------------------------------------------------------------
/articles/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/articles/__init__.py
--------------------------------------------------------------------------------
/articles/__pycache__/__init__.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/articles/__pycache__/__init__.cpython-311.pyc
--------------------------------------------------------------------------------
/articles/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/articles/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/articles/__pycache__/review.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/articles/__pycache__/review.cpython-39.pyc
--------------------------------------------------------------------------------
/articles/__pycache__/seo.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/articles/__pycache__/seo.cpython-39.pyc
--------------------------------------------------------------------------------
/articles/__pycache__/skeleton.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/articles/__pycache__/skeleton.cpython-311.pyc
--------------------------------------------------------------------------------
/articles/__pycache__/skeleton.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/articles/__pycache__/skeleton.cpython-39.pyc
--------------------------------------------------------------------------------
/articles/__pycache__/writing.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/articles/__pycache__/writing.cpython-311.pyc
--------------------------------------------------------------------------------
/articles/__pycache__/writing.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/articles/__pycache__/writing.cpython-39.pyc
--------------------------------------------------------------------------------
/articles/prompts.txt:
--------------------------------------------------------------------------------
1 | Write an engaging and compelling article that is clear, concise,
2 | and tailored to the target audience. Ensure the content is SEO
3 | optimized with relevant keywords and includes a strong call to action.
4 | The article should be free of grammatical errors and spelling mistakes,
5 | demonstrating originality and creativity. Incorporate well-researched
6 | facts and data to support your points, maintaining a consistent tone and style throughout.
7 | The article should be properly structured and formatted for easy readability.
8 | Article Title:
--------------------------------------------------------------------------------
/articles/review.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import time
4 | import requests
5 | import openai
6 | from serpapi import GoogleSearch
7 | from dotenv import load_dotenv
8 | from termcolor import colored
9 | from tqdm import tqdm
10 | import colorama
11 | colorama.init(autoreset=True)
12 | from config import Config
13 | import json
14 | import re
15 |
16 | load_dotenv()
17 |
18 | cfg = Config()
19 |
20 | # Configure OpenAI API key
21 | try:
22 | openai.api_key = cfg.openai_api_key
23 | browserless_api_key = cfg.browserless_api_key
24 | llm_model = cfg.llm_model
25 | except KeyError:
26 | sys.stderr.write("OpenAI key configuration failed.")
27 | exit(1)
28 |
29 |
30 | def convert_to_gutenberg_blocks(text):
31 | # Split the text into lines
32 | lines = text.split('\n')
33 |
34 | # Initialize an empty list to hold the blocks
35 | blocks = []
36 |
37 | # Initialize an empty string to hold the current paragraph
38 | paragraph = ""
39 |
40 | # Iterate over each line
41 | for line in lines:
42 | # Check if the line is a headline
43 | if re.match(r"^# ", line):
44 | # If there's a current paragraph, add it as a block
45 | if paragraph:
46 | blocks.append(create_paragraph_block(paragraph))
47 | paragraph = ""
48 |
49 | # Add the headline as a block
50 | blocks.append(create_heading_block(line))
51 |
52 | # Check if the line is a list item
53 | elif re.match(r"^- ", line):
54 | # If there's a current paragraph, add it as a block
55 | if paragraph:
56 | blocks.append(create_paragraph_block(paragraph))
57 | paragraph = ""
58 |
59 | # Add the list as a block
60 | blocks.append(create_list_block(line))
61 |
62 | # Check if the line is an image
63 | elif re.match(r"^!\[.*\]\(.*\)", line):
64 | # If there's a current paragraph, add it as a block
65 | if paragraph:
66 | blocks.append(create_paragraph_block(paragraph))
67 | paragraph = ""
68 |
69 | # Add the image as a block
70 | blocks.append(create_image_block(line))
71 |
72 | # Check if the line is a quote
73 | elif re.match(r"^> ", line):
74 | # If there's a current paragraph, add it as a block
75 | if paragraph:
76 | blocks.append(create_paragraph_block(paragraph))
77 | paragraph = ""
78 |
79 | # Add the quote as a block
80 | blocks.append(create_quote_block(line))
81 |
82 | # Otherwise, add the line to the current paragraph
83 | else:
84 | paragraph += line
85 |
86 | # If there's a current paragraph, add it as a block
87 | if paragraph:
88 | blocks.append(create_paragraph_block(paragraph))
89 |
90 | # Return the blocks as a JSON string
91 | return json.dumps({"blocks": blocks}, ensure_ascii=False)
92 |
93 | def create_paragraph_block(text):
94 | # Replace **text** with text
95 | text = re.sub(r"\*\*(.*?)\*\*", r"\1", text)
96 | return {
97 | "blockName": "core/paragraph",
98 | "attrs": {},
99 | "innerBlocks": [],
100 | "innerHTML": text
101 | }
102 |
103 | def create_heading_block(text):
104 | return {
105 | "blockName": "core/heading",
106 | "attrs": {"level": text.count('#')},
107 | "innerBlocks": [],
108 | "innerHTML": text.replace('#', '').strip()
109 | }
110 |
111 | def create_list_block(text):
112 | return {
113 | "blockName": "core/list",
114 | "attrs": {},
115 | "innerBlocks": [],
116 | "innerHTML": f"
{text.replace('-', '').strip()}
"
117 | }
118 |
119 | def create_image_block(text):
120 | alt_text, url = re.match(r"^!\[(.*)\]\((.*)\)", text).groups()
121 | return {
122 | "blockName": "core/image",
123 | "attrs": {"url": url, "alt": alt_text},
124 | "innerBlocks": [],
125 | "innerHTML": ""
126 | }
127 |
128 | def create_quote_block(text):
129 | return {
130 | "blockName": "core/quote",
131 | "attrs": {},
132 | "innerBlocks": [],
133 | "innerHTML": text.replace('> ', '').strip()
134 | }
135 |
136 |
137 | def init(article, research):
138 | prompt = f"""The following article has been written by AI with user input. Review, if the user input is incorporated correctly.
139 | Rewrite the article if no. If yes, return NO CHANGE only.
140 | User input {research}\n\n
141 | Article: {article}"""
142 |
143 | chunked_output = ""
144 | for chunk in openai.ChatCompletion.create(
145 | model=cfg.llm_model,
146 | temperature=1,
147 | stream=True,
148 | messages=[
149 | {'role': 'system', 'content': 'You are a helpful assistant.'},
150 | {'role': 'system', 'content': "You're an expert in blogging, research and SEO."},
151 | {'role': 'system', 'content': 'Your name is BloggingGPT.'},
152 | {'role': 'system', 'content': 'Content you write is well SEO Optimised.'},
153 | {'role': 'system', 'content': 'You use engaging tone of voice.'},
154 | {"role": "user", "content": prompt}
155 | ]
156 | ):
157 | content = chunk["choices"][0].get("delta", {}).get("content")
158 | if content is not None:
159 | #print(content, end='')
160 | chunked_output += content
161 | return chunked_output
162 |
163 | if __name__ == "__main__":
164 | # Open the file in read mode ('r')
165 | with open('your_file.txt', 'r') as file:
166 | # Read the file content and store it in the 'chunk' variable
167 | chunk = file.read()
168 |
169 | # Now you can use the 'chunk' variable in the convert_to_gutenberg_blocks function
170 | gutenberg_blocks = convert_to_gutenberg_blocks(chunk)
--------------------------------------------------------------------------------
/articles/seo.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import time
4 | import requests
5 | import openai
6 | from serpapi import GoogleSearch
7 | from dotenv import load_dotenv
8 | from termcolor import colored
9 | from tqdm import tqdm
10 | import colorama
11 | colorama.init(autoreset=True)
12 | from config import Config
13 |
14 | load_dotenv()
15 |
16 | cfg = Config()
17 |
18 | # Configure OpenAI API key
19 | try:
20 | openai.api_key = cfg.openai_api_key
21 | browserless_api_key = cfg.browserless_api_key
22 | llm_model = cfg.llm_model
23 | fast_llm_model = cfg.fast_llm_model
24 | except KeyError:
25 | sys.stderr.write("OpenAI key configuration failed.")
26 | exit(1)
27 |
28 | def optimise_article(article):
29 | # Split the article into chunks of approximately 1000 words each
30 | article_chunks = [article[i:i+1000] for i in range(0, len(article), 1000)]
31 |
32 | optimised_article = ""
33 |
34 | for chunk in article_chunks:
35 | prompt = f"""You are an expert in SEO and blogging. A blogger wrote the following article.
36 | Your task is the review the article and optimise it for SEO. You don't return tips, you update the article by yourself.
37 | You can add, remove or change the text. Focus on long tail keywords and readability.
38 | Article:{chunk}"""
39 |
40 | chunked_output = ""
41 | for response in openai.ChatCompletion.create(
42 | model=cfg.llm_model,
43 | temperature=0.9,
44 | stream=True,
45 | messages=[
46 | {'role': 'system', 'content': 'You are a helpful assistant.'},
47 | {'role': 'system', 'content': "You're an expert in blogging, research and SEO."},
48 | {'role': 'system', 'content': 'Your name is BloggingGPT.'},
49 | {'role': 'system', 'content': 'Content you write is well SEO Optimised.'},
50 | {'role': 'system', 'content': 'You use engaging tone of voice.'},
51 | {"role": "user", "content": prompt}
52 | ]
53 | ):
54 | content = response["choices"][0].get("delta", {}).get("content")
55 | if content is not None:
56 | chunked_output += content
57 |
58 | # Add the optimised chunk to the final article
59 | optimised_article += chunked_output
60 |
61 | return optimised_article
--------------------------------------------------------------------------------
/articles/skeleton.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import openai
3 | from dotenv import load_dotenv
4 | import colorama
5 | colorama.init(autoreset=True)
6 | from config import Config
7 |
8 | load_dotenv()
9 |
10 | cfg = Config()
11 |
12 | # Configure OpenAI API key
13 | try:
14 | openai.api_key = cfg.openai_api_key
15 | browserless_api_key = cfg.browserless_api_key
16 | llm_model = cfg.llm_model
17 | except KeyError:
18 | sys.stderr.write("OpenAI key configuration failed.")
19 | exit(1)
20 |
21 | def write_skeleton_product_review(title):
22 | prompt = """Write a product review for """+title+"""". Use the following information:
23 |
24 | Have you been looking for the best [keyword] recipe? I've got you covered!
25 | >write an [tone] introduction about [keyword] including [experience], [expertise], authority and trustworthiness talking to [audience] who are [pain point].
26 | Optional add ons to the intro prompt to keep it on track :
27 | Describe the flavors of the dish as [flavors].
28 | The ingredients in this dish are [x].
29 | The dish is made with [type of cooking method]
30 | [highlight an affiliate related to this post]
31 | What is [keyword]?
32 | >write a high-level overview of [keyword] to [audience] explaining [feature(s)]
33 | [Keyword] Ingredients
34 | >write a paragraph of the ingredients needed to make [keyword]. This ingredients are [list ingredients]. Include [note and specific features/health benefits or allergen notices]
35 | How to Make [keyword]
36 | >write a list of # steps explaining how to [keyword] to [audience]
37 | You will take this list and put them into h3s. You can skip this if you have particular steps already.
38 | Step 1: [step here]
39 | >write a paragraph explaining [step] in [how to do X] in simple terms. Explain why this is necessary and how it helps
40 | Step 2: [step here]
41 | >write a paragraph explaining [step] in [how to do X] in simple terms. Explain why this is necessary and how it helps
42 | [highlight an affiliate related to this post. Duplicate this or move this to areas where you can promote a certain cooking tool or an ingredient from Amazon]
43 | Substitutes/Alternatives/Variations for [Keyword]
44 | >write a paragraph about alternative/substitute/variations for [keyword] replacing [ingredient/cooking method] with [replacement]
45 | You can rerun this command if there are multiple substitutes or variations.
46 | [highlight an affiliate related to this post]
47 | Any Other Headings Go Here
48 | >write a paragraph about [header] for [audience] including [x]
49 | This is a great place to answer questions about the best brands for ingredients, where to buy things, if they can replace x with y, and varied cooking methods.
50 | [highlight an affiliate related to this post]
51 | [keyword] Recipe
52 | [insert the recipe block from either WP recipe maker or Create by Mediavine. I recommend filling this in manually. Include an image in the recipe template]
53 | FAQs
54 | >write a list of FAQs that [audience] would ask about [keyword] and answer them
55 | Alternatively, find questions then ask Jasper to:
56 | >write a concise answer to this question: "question" including [x]
57 | [highlight an affiliate related to this post]
58 | Conclusion/Final Thoughts: How to [keyword]
59 | >write an engaging conclusion for a blog post about [keyword] talking to [audience]. Include a call to action for readers to [action. i.e. read next post titled “x”, leave a comment, buy a course titled “x” that does “y”]
60 | Read More:
61 | [insert relevant read more block here]
62 |
63 | Strictly follow the example structure of JSON:
64 |
65 | {
66 | "Title": "The Role of Energy Efficiency in Home Design",
67 | "Description": "This article focuses on the importance and incorporation of energy efficiency in home design and its resulting benefits",
68 | "Sections": [
69 | {
70 | "Heading_H2": "Incorporating Energy Efficiency in Home Design",
71 | "Description": "A detailed guide on how energy efficiency can be embedded into home design, with sub-sections including",
72 | "SubSections": [
73 | {
74 | "Heading_H3": "Passive Design Strategies",
75 | "Description": "Explanation of passive design strategies to optimize energy efficiency."
76 | },
77 | {
78 | "Heading_H3": "Selecting Efficient Appliances",
79 | "Description": "Guide on choosing energy-efficient appliances for the home."
80 | },
81 | {
82 | "Heading_H3": "Material Selection for Energy Efficiency",
83 | "Description": "Discussion on how material selection impacts energy efficiency and the best materials to choose.",
84 | "SubSections": [
85 | {
86 | "Heading_H4": "Insulation Materials",
87 | "Description": "Analysis on the role of insulation materials in enhancing energy efficiency."
88 | },
89 | {
90 | "Heading_H4": "Window Materials",
91 | "Description": "Guide to choosing energy-efficient window materials."
92 | }
93 | ]
94 | }
95 | ]
96 | }
97 | ]
98 | }
99 | """
100 |
101 | chunked_output = ""
102 | for chunk in openai.ChatCompletion.create(
103 | model=llm_model,
104 | stream=True,
105 | messages=[
106 | {'role': 'system', 'content': 'You are a helpful assistant.'},
107 | {'role': 'system', 'content': "You're an expert in blogging and SEO."},
108 | {'role': 'system', 'content': 'Your name is BloggingGPT.'},
109 | {'role': 'system', 'content': 'Return the output as JSON.'},
110 | {"role": "user", "content": prompt}
111 | ]
112 | ):
113 | content = chunk["choices"][0].get("delta", {}).get("content")
114 | if content is not None:
115 | #print(content, end='')
116 | chunked_output += content
117 |
118 | return chunked_output
119 |
120 | def write_skeleton(title):
121 | prompt = """Propose a structure for an article with title """+title+"""".
122 | Make sure you include headings - H2, H3, H4.
123 | Please ensure to include actual names in the structure.
124 | Include description for each section.
125 | Add FAQ section if relevant. Strictly follow the example structure of JSON below. Make sure the JSON is valid.
126 |
127 | {
128 | "Title": "The Role of Energy Efficiency in Home Design",
129 | "Description": "This article focuses on the importance and incorporation of energy efficiency in home design and its resulting benefits",
130 | "Sections": [
131 | {
132 | "Heading_H2": "Incorporating Energy Efficiency in Home Design",
133 | "Description": "A detailed guide on how energy efficiency can be embedded into home design, with sub-sections including",
134 | "SubSections": [
135 | {
136 | "Heading_H3": "Passive Design Strategies",
137 | "Description": "Explanation of passive design strategies to optimize energy efficiency."
138 | },
139 | {
140 | "Heading_H3": "Selecting Efficient Appliances",
141 | "Description": "Guide on choosing energy-efficient appliances for the home."
142 | },
143 | {
144 | "Heading_H3": "Material Selection for Energy Efficiency",
145 | "Description": "Discussion on how material selection impacts energy efficiency and the best materials to choose.",
146 | "SubSections": [
147 | {
148 | "Heading_H4": "Insulation Materials",
149 | "Description": "Analysis on the role of insulation materials in enhancing energy efficiency."
150 | },
151 | {
152 | "Heading_H4": "Window Materials",
153 | "Description": "Guide to choosing energy-efficient window materials."
154 | }
155 | ]
156 | }
157 | ]
158 | }
159 | ]
160 | }"""
161 |
162 | chunked_output = ""
163 | for chunk in openai.ChatCompletion.create(
164 | model=llm_model,
165 | stream=True,
166 | messages=[
167 | {'role': 'system', 'content': 'You are a helpful assistant.'},
168 | {'role': 'system', 'content': "You're an expert in blogging and SEO."},
169 | {'role': 'system', 'content': 'Your name is BloggingGPT.'},
170 | {'role': 'system', 'content': 'Return the output as JSON.'},
171 | {"role": "user", "content": prompt}
172 | ]
173 | ):
174 | content = chunk["choices"][0].get("delta", {}).get("content")
175 | if content is not None:
176 | #print(content, end='')
177 | chunked_output += content
178 |
179 | return chunked_output
180 |
181 | if __name__ == "__main__":
182 | write_skeleton_product_review("Review of Acure Ultra Hydrating Shampoo")
--------------------------------------------------------------------------------
/articles/writing.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import time
4 | import requests
5 | import openai
6 | from serpapi import GoogleSearch
7 | from dotenv import load_dotenv
8 | from termcolor import colored
9 | from tqdm import tqdm
10 | import colorama
11 | colorama.init(autoreset=True)
12 | from config import Config
13 |
14 | load_dotenv()
15 |
16 | cfg = Config()
17 |
18 | # Configure OpenAI API key
19 | try:
20 | openai.api_key = cfg.openai_api_key
21 | browserless_api_key = cfg.browserless_api_key
22 | llm_model = cfg.llm_model
23 | except KeyError:
24 | sys.stderr.write("OpenAI key configuration failed.")
25 | exit(1)
26 |
27 |
28 | def find_conclusion(text, function_to_call):
29 | if "In conclusion," in text:
30 | print("Found In conclusion text. Removing...")
31 | return function_to_call(text)
32 | return text
33 | def in_conclusion_killer(text):
34 | ###THIS BLOODY THING###
35 | prompt = f"""Remove in conclusion text. Don't add anything.: {text}"""
36 | chunked_output = ""
37 | for chunk in openai.ChatCompletion.create(
38 | model=cfg.llm_model,
39 | temperature=1,
40 | stream=True,
41 | messages=[
42 | {'role': 'system', 'content': 'You are a helpful assistant.'},
43 | {'role': 'system', 'content': "You're an expert in blogging, research and SEO."},
44 | {'role': 'system', 'content': 'Your name is BloggingGPT.'},
45 | {'role': 'system', 'content': 'Content you write is well SEO Optimised.'},
46 | {'role': 'system', 'content': 'You use engaging tone of voice.'},
47 | {'role': 'system', 'content': 'You strictly follow user request.'},
48 | {'role': 'system', 'content': 'You strictly return content user asked for only.'},
49 | {'role': 'system', 'content': "You don't say or add anything, just return the content."},
50 | {"role": "user", "content": prompt}
51 | ]
52 | ):
53 | content = chunk["choices"][0].get("delta", {}).get("content")
54 | if content is not None:
55 | # print(content, end='')
56 | chunked_output += content
57 |
58 | return chunked_output
59 | def write_intro(title, toneofvoice):
60 |
61 | prompt = f"""
62 | As an AI blog post writer, your task is to craft an engaging and professional introduction paragraph for an article. Here are the details you need to consider:
63 |
64 | The article's title is {title}. However, do not include the title in the introduction.
65 | Write in the style of a professional blogger crafting a long-form article.
66 | The article will have following tone of voice: {toneofvoice}.
67 | Do not include any form of concluding statements like 'in conclusion'.
68 | Remember, your goal is to create an introduction that hooks the reader and sets the stage for the rest of the article."""
69 |
70 | chunked_output = ""
71 | for chunk in openai.ChatCompletion.create(
72 | model=cfg.llm_model,
73 | temperature=1,
74 | stream=True,
75 | messages=[
76 | {'role': 'system', 'content': 'You are a helpful assistant.'},
77 | {'role': 'system', 'content': "You're an expert in blogging, research and SEO."},
78 | {'role': 'system', 'content': 'Your name is BloggingGPT.'},
79 | {'role': 'system', 'content': 'Content you write is well SEO Optimised.'},
80 | {'role': 'system', 'content': 'You use engaging tone of voice.'},
81 | {"role": "user", "content": prompt}
82 | ]
83 | ):
84 | content = chunk["choices"][0].get("delta", {}).get("content")
85 | if content is not None:
86 | #print(content, end='')
87 | chunked_output += content
88 | chunked_output = find_conclusion(chunked_output, in_conclusion_killer)
89 | return chunked_output
90 |
91 |
92 | def write_section(title, article_description, heading, heading_description, toneofvoice):
93 | if article_description == "":
94 | article_description = "Not available, use the title"
95 | if heading_description == "":
96 | heading_description = "Not available, use the heading"
97 |
98 | prompt = f"""
99 | As an AI blog post section writer, your task is to generate unique, compelling, and SEO-optimized content for various blog post sections. Here are the details you need to consider:
100 | You will not include any concluding summaries.
101 | You will not include section headings.
102 | The article will have following tone of voice: {toneofvoice}.
103 | You will be provided with an article title {title}, an article description {article_description}, a section heading {heading}, and a section description {heading_description}.
104 | Using these inputs, generate captivating, grammatically correct, and easy-to-read content that is suitable for the respective section.
105 | Make important parts of the text bold - but focus on keywords the article is targeting. You can also use quotes and citations.
106 | The content should engage readers and facilitate their understanding of the blog post's content. Maintain an engaging tone of voice throughout.
107 | The content should be ready to be copied and pasted directly into Wordpress with Gutenberg formatting, without the need for any additional formatting.
108 | Remember, your goal is to create a section body that aligns with the provided inputs and is optimized for search engines."""
109 |
110 | chunked_output = ""
111 | for chunk in openai.ChatCompletion.create(
112 | model=cfg.llm_model,
113 | temperature=1,
114 | stream=True,
115 | messages=[
116 | {'role': 'system', 'content': 'Your name is BloggingGPT.'},
117 | {'role': 'system', 'content': "You're an expert in blogging, research and SEO."},
118 | {'role': 'system', 'content': 'Content produced is well SEO Optimised.'},
119 | {'role': 'system', 'content': 'You strictly return content user asked for only.'},
120 | {"role": "user", "content": prompt}
121 | ]
122 | ):
123 | content = chunk["choices"][0].get("delta", {}).get("content")
124 | if content is not None:
125 | #print(content, end='')
126 | chunked_output += content
127 |
128 | chunked_output = in_conclusion_killer(chunked_output)
129 | return chunked_output
130 |
131 | def write_subsection(title, article_description, heading, heading_description, subheading, subheading_description, toneofvoice):
132 | if article_description == "":
133 | article_description = "Not available, use the title"
134 | if heading_description == "":
135 | heading_description = "Not available, use the heading"
136 | if subheading_description == "":
137 | subheading_description = "Not available, use the subheading"
138 |
139 | prompt = f"""As an AI blog post section writer, your task is to generate unique, compelling, and SEO-optimized content for various blog post sections and subsections. Here are the details you need to consider:
140 |
141 | You will not include any concluding summaries.
142 | You will not include section or subsections headings.
143 | The article will have following tone of voice: {toneofvoice}.
144 | You will be provided with an article title {title}, an article description {article_description}, a section heading {heading}, a section description {heading_description}, a subsection heading {subheading}, and a subsection description {subheading_description}.
145 | Using these inputs, generate captivating, grammatically correct, and easy-to-read content that is suitable for the respective section and subsection.
146 | Make important parts of the text bold. You can also use quotes and citations. Add bulleted lists.
147 | When relevant, add pros and cons comparisons.
148 | The content should engage readers and facilitate their understanding of the blog post's content. Maintain an engaging tone of voice throughout.
149 | The content should be ready to be copied and pasted directly into Wordpress with Gutenberg formatting, without the need for any additional formatting.
150 | Remember, your goal is to create a section body and subsection body that align with the provided inputs and are optimized for search engines."""
151 |
152 | chunked_output = ""
153 | for chunk in openai.ChatCompletion.create(
154 | model=cfg.llm_model,
155 | temperature=1,
156 | stream=True,
157 | messages=[
158 | {'role': 'system', 'content': 'You are a helpful assistant.'},
159 | {'role': 'system', 'content': "You're an expert in blogging, research and SEO."},
160 | {'role': 'system', 'content': 'Your name is BloggingGPT.'},
161 | {'role': 'system', 'content': 'Content produced is well SEO Optimised.'},
162 | {'role': 'system', 'content': 'You use engaging tone of voice.'},
163 | {'role': 'system', 'content': 'You strictly return content user asked for only.'},
164 | {"role": "user", "content": prompt}
165 | ]
166 | ):
167 | content = chunk["choices"][0].get("delta", {}).get("content")
168 | if content is not None:
169 | #print(content, end='')
170 | chunked_output += content
171 |
172 | chunked_output = find_conclusion(chunked_output, in_conclusion_killer)
173 | return chunked_output
174 |
175 | if __name__ == "__main__":
176 | write_section()
--------------------------------------------------------------------------------
/blogs/blogs.toml:
--------------------------------------------------------------------------------
1 | [[blog]]
2 | id = 1
3 | name = "Nomadly Working"
4 | description = "Nomadly Working is a comprehensive blog dedicated to the journey of independent entrepreneurship and the indie hacker lifestyle within the context of remote work. This blog serves as your ultimate resource for mastering the art of building profitable online businesses while enjoying the freedom of location independence. It is meticulously designed to assist individuals who are eager to break free from traditional work environments and embrace the empowering lifestyle of an indie hacker. Nomadly Working covers a wide range of topics, neatly categorized into sections such as Building Online Businesses, Product Development, Marketing & Growth and more. However, it doesn't cover tools. These categories encompass everything from getting started as an indie hacker, maintaining mental health while building businesses, strategies to boost productivity, marketing tips for indie hackers, to reviews of co-working spaces and services relevant to indie hackers and remote workers. The aim is to provide comprehensive information on various aspects of indie hacking, making Nomadly Working a one-stop resource for anyone interested in this lifestyle. Nomadly Working is more than just a blog; it's a reliable platform designed for those who wish to make a positive impact on their lives through the freedom and flexibility of indie hacking. Whether you're a seasoned indie hacker or just starting your journey, Nomadly Working is here to guide you every step of the way."
5 | url = "https://nomadlyworking.com/"
6 | tone = "Straightforward, community-focused, inspiring. Use words that a 36-years old nomad and indie hacker would use."
7 | keywords = "Eco-friendly travel for digital nomads, Building a personal brand as a digital nomad, Remote work policies and regulations, Adventure sports for digital nomads, Remote work and homeschooling children, Cultural etiquette for digital nomads, Remote work and pet ownership, Volunteering opportunities for digital nomads, Remote work and maintaining relationships, Digital nomad cooking and meal prep, Remote work and physical fitness routines, Art and creativity in the digital nomad lifestyle, Remote work and cybersecurity measures, Digital nomad book recommendations, Remote work and dealing with time differences, Digital nomad photography tips, Remote work and managing a team across time zones, Digital nomad travel gear recommendations, Remote work and dealing with language barriers, Digital nomad experiences and personal stories"
--------------------------------------------------------------------------------
/config.py:
--------------------------------------------------------------------------------
1 | # Importing necessary libraries
2 | import os
3 | import openai
4 | from dotenv import load_dotenv
5 |
6 | # Loading environment variables from .env file
7 | load_dotenv()
8 |
9 | # Singleton class to ensure only one instance of a class exists
10 | class Singleton(type):
11 | # Dictionary to store instances of classes
12 | _instances = {}
13 |
14 | # Overriding the __call__ method to control class instantiation
15 | def __call__(cls, *args, **kwargs):
16 | # If an instance of the class does not exist, create one
17 | if cls not in cls._instances:
18 | cls._instances[cls] = super(
19 | Singleton, cls).__call__(
20 | *args, **kwargs)
21 | # Return the instance of the class
22 | return cls._instances[cls]
23 |
24 | # Config class to manage configuration settings
25 | class Config(metaclass=Singleton):
26 | # Initialization method
27 | def __init__(self):
28 | # Fetching environment variables and setting them as class attributes
29 | self.llm_model = os.getenv("LLM_MODEL")
30 | self.fast_llm_model = os.getenv("FAST_LLM_MODEL")
31 | self.openai_api_key = os.getenv("OPENAI_API_KEY")
32 | self.openai_api_key_fast = os.getenv("OPENAI_API_KEY_FAST")
33 | openai.api_key = self.openai_api_key
34 | self.serpapi_api_key = os.getenv("SERPAPI_API_KEY")
35 | self.browserless_api_key = os.getenv("BROWSERLESS_API_KEY")
36 | self.brave_search_api_key = os.getenv("BRAVE_SEARCH_API_KEY")
37 | self.wolfram_alpha_appid = os.getenv("WOLFRAM_ALPHA_APPID")
38 | self.site_url = os.getenv("SITE_URL")
39 | self.wp_admin_username = os.getenv("WP_ADMIN_USERNAME")
40 | self.wp_admin_password = os.getenv("WP_ADMIN_PASSWORD")
41 |
42 | # Method to set the llm_model attribute
43 | def set_llm_model(self, value: str):
44 | self.llm_model = value
45 |
46 | # Method to set the llm_model attribute
47 | def set_fast_llm_model(self, value: str):
48 | self.fast_llm_model = value
49 |
50 | # Method to set the openai_api_key attribute
51 | def set_openai_api_key(self, value: str):
52 | self.openai_api_key = value
53 |
54 | def set_openai_api_key_fast(self, value: str):
55 | self.openai_api_key_fast = value
56 |
57 | # Method to set the serp_api_key attribute
58 | def set_serpapi_api_key(self, value: str):
59 | self.serpapi_api_key = value
60 |
61 | # Method to set the browserless_api_key attribute
62 | def set_browserless_api_key(self, value: str):
63 | self.browserless_api_key = value
64 |
65 | # Method to set the brave_search_api_key attribute
66 | def set_brave_search_api_key(self, value: str):
67 | self.brave_search_api_key = value
68 |
69 | def set_site_url(self, value: str):
70 | self.site_url = value
71 |
72 | def set_wp_admin_username(self, value: str):
73 | self.wp_admin_username = value
74 |
75 | def set_wp_admin_password(self, value: str):
76 | self.wp_admin_password = value
--------------------------------------------------------------------------------
/enhancer/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/enhancer/__init__.py
--------------------------------------------------------------------------------
/enhancer/__pycache__/__init__.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/enhancer/__pycache__/__init__.cpython-311.pyc
--------------------------------------------------------------------------------
/enhancer/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/enhancer/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/enhancer/__pycache__/linker.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/enhancer/__pycache__/linker.cpython-39.pyc
--------------------------------------------------------------------------------
/enhancer/__pycache__/midjourney_ai.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/enhancer/__pycache__/midjourney_ai.cpython-311.pyc
--------------------------------------------------------------------------------
/enhancer/__pycache__/translate.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/enhancer/__pycache__/translate.cpython-39.pyc
--------------------------------------------------------------------------------
/enhancer/compressed_output.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/enhancer/compressed_output.jpg
--------------------------------------------------------------------------------
/enhancer/down.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import json
3 | from PIL import Image
4 | from io import BytesIO
5 |
6 | url = "https://api.thenextleg.io/getImage"
7 |
8 | payload = json.dumps({
9 | "imgUrl": "https://cdn.midjourney.com/0109b6ae-a6b0-4bbd-a89c-fd6071cc1763/0_2.png"
10 | })
11 | headers = {
12 | 'Content-Type': 'application/json',
13 | 'Authorization': 'Bearer dffc660d-8ad0-4a13-bc91-119e27b8e4a3'
14 | }
15 |
16 | response = requests.request("POST", url, headers=headers, data=payload)
17 |
18 | # Assuming the response is in bytes
19 | image = Image.open(BytesIO(response.content))
20 | image.save('output.png')
21 |
22 |
23 |
--------------------------------------------------------------------------------
/enhancer/linker.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import time
4 | import requests
5 | import openai
6 | from serpapi import GoogleSearch
7 | from dotenv import load_dotenv
8 | from termcolor import colored
9 | from tqdm import tqdm
10 | import colorama
11 | colorama.init(autoreset=True)
12 | from config import Config
13 |
14 | load_dotenv()
15 |
16 | cfg = Config()
17 |
18 | # Configure OpenAI API key
19 | try:
20 | openai.api_key = cfg.openai_api_key
21 | browserless_api_key = cfg.browserless_api_key
22 | llm_model = cfg.llm_model
23 | fast_llm_model = cfg.fast_llm_model
24 | except KeyError:
25 | sys.stderr.write("OpenAI key configuration failed.")
26 | exit(1)
27 |
28 | def add_links(article):
29 | # Split the article into chunks of approximately 100 words each
30 | word_list = article.split()
31 | chunks = [' '.join(word_list[i:i + 100]) for i in range(0, len(word_list), 100)]
32 |
33 | chunked_output = ""
34 | for chunk in chunks:
35 | prompt = f"""You are an expert in SEO and blogging. A blogger wrote the following article.
36 | Your task is the review the article and add links to relevant sources. Add 5 links maximum.
37 | Article:{chunk}"""
38 |
39 | for output in openai.ChatCompletion.create(
40 | model=llm_model,
41 | stream=True,
42 | messages=[
43 | {'role': 'system', 'content': 'You are a helpful assistant.'},
44 | {'role': 'system', 'content': "You're an expert in blogging, research and SEO."},
45 | {'role': 'system', 'content': 'Your name is BloggingGPT.'},
46 | {'role': 'system', 'content': 'Content you write is well SEO Optimised.'},
47 | {'role': 'system', 'content': 'You use engaging tone of voice.'},
48 | {"role": "user", "content": prompt}
49 | ]
50 | ):
51 | content = output["choices"][0].get("delta", {}).get("content")
52 | if content is not None:
53 | #print(content, end='')
54 | chunked_output += content
55 |
56 | return chunked_output
--------------------------------------------------------------------------------
/enhancer/midjourney_ai.py:
--------------------------------------------------------------------------------
1 | # Import necessary libraries
2 | from midjourney_api import TNL
3 | import requests
4 | import json
5 | import time
6 | import os
7 | from tqdm import tqdm
8 | from PIL import Image
9 | from io import BytesIO
10 | from dotenv import load_dotenv
11 |
12 | # Load the .env file
13 | load_dotenv()
14 |
15 | # Define API key and initialize TNL
16 | TNL_API_KEY = os.getenv("TNL_API_KEY")
17 | tnl = TNL(TNL_API_KEY)
18 |
19 |
20 | def generate_image(title):
21 | """
22 | Function to generate image using TNL API
23 | """
24 | # Define the prompt for the image
25 | prompt = f"""Header image for an article '{title}',extra sharp, 8k, photorealistic, shot on Kodak gold --ar 16:9"""
26 |
27 | # Get the response from the API
28 | response = tnl.imagine(prompt)
29 |
30 | # Keep checking until message_id is not None
31 | while "messageId" not in response:
32 | print("Waiting for message id...")
33 | time.sleep(5) # wait for 5 seconds before checking again
34 |
35 | print("Message id: ", response["messageId"])
36 |
37 | # Call check_progress function to wait for the image generation to complete
38 | check_progress(response["messageId"])
39 |
40 | return response["messageId"]
41 |
42 |
43 | def check_progress(id):
44 | """
45 | Function to check the progress of the image generation and download the image
46 | """
47 | # Define the URL and headers for the request
48 | url = f"https://api.thenextleg.io/v2/message/{id}?expireMins=12"
49 | headers = {'Authorization': f'Bearer {TNL_API_KEY}'}
50 |
51 | # Initialize progress bar
52 | progress_bar = tqdm(total=100)
53 |
54 | # Keep checking the progress until it reaches 100
55 | while True:
56 | response = requests.get(url, headers=headers)
57 | response_json = response.json()
58 |
59 | # Convert response_json['progress'] to an integer before subtracting progress_bar.n
60 | progress_bar.update(int(response_json['progress']) - progress_bar.n) # Update progress bar
61 |
62 | if int(response_json['progress']) == 100:
63 | progress_bar.close() # Close progress bar
64 | break
65 | else:
66 | time.sleep(5)
67 |
68 | # Get the URL of the third image
69 | third_image_url = response_json['response']['imageUrls'][2]
70 |
71 | print(third_image_url)
72 |
73 | # Download the image
74 | download_image(third_image_url, id)
75 |
76 |
77 | def download_image(image_url, id):
78 | """
79 | Function to download the image and save it in different formats
80 | """
81 | url = "https://api.thenextleg.io/getImage"
82 | payload = json.dumps({"imgUrl": image_url})
83 | headers = {
84 | 'Content-Type': 'application/json',
85 | 'Authorization': f'Bearer {TNL_API_KEY}'
86 | }
87 |
88 | response = requests.post(url, headers=headers, data=payload)
89 |
90 | # Assuming the response is in bytes
91 | image = Image.open(BytesIO(response.content))
92 |
93 | # Define the directory to save the images
94 | directory = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'temp', 'imgs')
95 |
96 | # Create the directory if it doesn't exist
97 | os.makedirs(directory, exist_ok=True)
98 |
99 | # Save the images in the defined directory
100 | image.save(os.path.join(directory, f'{id}.png'))
101 |
102 | original_image = Image.open(os.path.join(directory, f'{id}.png'))
103 | rgb_image = original_image.convert('RGB')
104 | small_image_name = f'{id}_small.jpg'
105 | rgb_image.save(os.path.join(directory, small_image_name), "JPEG", quality=70)
106 |
107 | return small_image_name
108 |
109 |
110 | def generate_and_check(title):
111 | """
112 | Main function to generate the image and check its progress
113 | """
114 | message_id = generate_image(title)
115 | check_progress(message_id)
116 |
117 |
118 | if __name__ == "__main__":
119 | generate_and_check("Top 5 Renewable Energy Sources for 2023")
--------------------------------------------------------------------------------
/enhancer/post_enhancer.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import json
3 |
4 | def get_post_by_title(site_url, title):
5 | response = requests.get(f'{site_url}/wp-json/wp/v2/posts?search={title}')
6 | posts = json.loads(response.text)
7 |
8 | if len(posts) == 0:
9 | print("No posts found with that title.")
10 | else:
11 | for post in posts:
12 | print(post['title']['rendered'])
13 | print(post['content']['rendered'])
14 |
15 | # Example usage:
16 | site_url = 'https://iminsweden.com/'
17 | title = 'A Comprehensive Overview of the Swedish Education System'
18 | get_post_by_title(site_url, title)
19 |
--------------------------------------------------------------------------------
/enhancer/translate.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import time
4 | import requests
5 | import openai
6 | from serpapi import GoogleSearch
7 | from dotenv import load_dotenv
8 | from termcolor import colored
9 | from tqdm import tqdm
10 | import colorama
11 | colorama.init(autoreset=True)
12 | from config import Config
13 |
14 | load_dotenv()
15 |
16 | cfg = Config()
17 |
18 | # Configure OpenAI API key
19 | try:
20 | openai.api_key = cfg.openai_api_key
21 | browserless_api_key = cfg.browserless_api_key
22 | llm_model = cfg.llm_model
23 | fast_llm_model = cfg.fast_llm_model
24 | except KeyError:
25 | sys.stderr.write("OpenAI key configuration failed.")
26 | exit(1)
27 |
28 |
29 |
30 | def translate_content(article, language):
31 | prompt = f"""Please translate the following English article into the specified language.
32 | Make sure it's SEO optimised and grammatically correct.
33 | Language: {language}
34 | English Article: {article}"""
35 |
36 | chunked_output = ""
37 | for chunk in openai.ChatCompletion.create(
38 | model=cfg.llm_model,
39 | temperature=1,
40 | stream=True,
41 | messages=[
42 | {'role': 'system', 'content': 'You are a helpful assistant.'},
43 | {'role': 'system', 'content': "You're an expert translating from English to any language."},
44 | {'role': 'system', 'content': 'You use engaging tone of voice.'},
45 | {"role": "user", "content": prompt}
46 | ]
47 | ):
48 | content = chunk["choices"][0].get("delta", {}).get("content")
49 | if content is not None:
50 | # print(content, end='')
51 | chunked_output += content
52 |
53 | return chunked_output
--------------------------------------------------------------------------------
/example.env:
--------------------------------------------------------------------------------
1 | OPENAI_API_KEY=""
2 | OPENAI_API_KEY_FAST=""
3 | SERPAPI_API_KEY=""
4 | BROWSERLESS_API_KEY=""
5 | LLM_MODEL="gpt-4-0613"
6 | FAST_LLM_MODEL="gpt-3.5-turbo-0613"
7 | TNL_API_KEY=""
8 |
9 | ##SITE DETAILS FOR A SINGLE ARTICLE - GO TO MAIN.PY LINE 319
10 | ## URL example: https://yoururl.com/xmlrpc.php
11 | SITE_URL=""
12 | WP_ADMIN_USERNAME=""
13 | WP_ADMIN_PASSWORD=""
14 |
15 | # BLOG LISTS FOR MULTIMPLE ARTICLES - GO TO MAIN.PY LINE 320
16 | ## URL example: https://yoururl.com/xmlrpc.php
17 | WP_ADMIN_USERNAME_1=""
18 | WP_ADMIN_PASSWORD_1=""
19 | WP_URL_1=""
--------------------------------------------------------------------------------
/initiation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/initiation/__init__.py
--------------------------------------------------------------------------------
/initiation/__pycache__/__init__.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/initiation/__pycache__/__init__.cpython-311.pyc
--------------------------------------------------------------------------------
/initiation/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/initiation/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/initiation/__pycache__/kickoff.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/initiation/__pycache__/kickoff.cpython-311.pyc
--------------------------------------------------------------------------------
/initiation/__pycache__/kickoff.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/initiation/__pycache__/kickoff.cpython-39.pyc
--------------------------------------------------------------------------------
/initiation/kickoff.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import time
4 | import requests
5 | import openai
6 | from serpapi import GoogleSearch
7 | from dotenv import load_dotenv
8 | from termcolor import colored
9 | from tqdm import tqdm
10 | import colorama
11 | colorama.init(autoreset=True)
12 | from config import Config
13 |
14 | load_dotenv()
15 |
16 | cfg = Config()
17 |
18 | # Configure OpenAI API key
19 | try:
20 | openai.api_key = cfg.openai_api_key
21 | browserless_api_key = cfg.browserless_api_key
22 | llm_model = cfg.llm_model
23 | except KeyError:
24 | sys.stderr.write("OpenAI key configuration failed.")
25 | exit(1)
26 |
27 | def propose_niche(user_search):
28 | input_text = f"""Your task is to propose a niche that will grow fastest in organic
29 | results and once the user agrees to the niche, your goal is to propose a name,
30 | domain name and then topics and write articles. User has also found the following on Google:
31 | '{user_search}'.
32 | Start with proposing the niche."""
33 |
34 | chunked_output = ""
35 | for chunk in openai.ChatCompletion.create(
36 | model=cfg.llm_model,
37 | stream=True,
38 | messages=[
39 | {'role': 'system', 'content': 'You are a helpful assistant.'},
40 | {'role': 'system', 'content': "You're an expert in blogging and SEO."},
41 | {'role': 'system', 'content': 'Your name is BloggingGPT.'},
42 | {"role": "user", "content": input_text}
43 | ]
44 | ):
45 | content = chunk["choices"][0].get("delta", {}).get("content")
46 | if content is not None:
47 | print(content, end='')
48 | chunked_output += content
49 |
50 | return chunked_output
51 |
--------------------------------------------------------------------------------
/interlinker/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/interlinker/__init__.py
--------------------------------------------------------------------------------
/interlinker/linker.py:
--------------------------------------------------------------------------------
1 | ## DOESN'T WORK YET
2 |
3 | from orchestrar import wp
4 | from langchain.llms import OpenAI
5 | from langchain.document_loaders import TextLoader
6 | from langchain.embeddings.openai import OpenAIEmbeddings
7 | from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
8 | from langchain.vectorstores import FAISS
9 | from langchain.agents import Tool
10 | from langchain.agents import AgentType
11 | from langchain.memory import ConversationBufferMemory
12 | from langchain import OpenAI
13 | from langchain.utilities import SerpAPIWrapper
14 | from langchain.agents import initialize_agent
15 | from langchain import PromptTemplate
16 | from langchain.embeddings import OpenAIEmbeddings
17 | from langchain.vectorstores import Chroma
18 | from langchain.document_loaders import WebBaseLoader
19 | from langchain.document_loaders import TextLoader
20 | from langchain.retrievers import SVMRetriever
21 | from langchain.chat_models import ChatOpenAI
22 | from langchain.retrievers.multi_query import MultiQueryRetriever
23 | from langchain.chains import RetrievalQA
24 | from langchain.chat_models import ChatOpenAI
25 | import logging
26 | from dotenv import load_dotenv
27 | from config import Config
28 | import openai
29 | import sys
30 |
31 | # Load the .env file
32 | load_dotenv()
33 |
34 | # Configure OpenAI API key
35 | cfg = Config()
36 | try:
37 | openai.api_key = cfg.openai_api_key
38 | except KeyError:
39 | sys.stderr.write("OpenAI key configuration failed.")
40 | exit(1)
41 |
42 | text = wp.get_all_articles('https://iminsweden.com/')
43 |
44 | def analyse_text():
45 | # Your analysis logic goes here
46 | pass
47 |
48 | def save_file(text):
49 | with open('output.txt', 'w', encoding='utf-8') as f:
50 | f.write(text)
51 |
52 | def load_file(filename='output.txt'):
53 | with open(filename, 'r', encoding='utf-8') as f:
54 | return f.read()
55 |
56 | def load_AI():
57 | loader = TextLoader("./output.txt")
58 | loader.load()
59 |
60 | if __name__ == "__main__":
61 | #text = wp.get_all_articles('https://iminsweden.com/')
62 | #save_file(text)
63 |
64 | #loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
65 | loader = TextLoader("output.txt")
66 | data = loader.load()
67 |
68 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=2500, chunk_overlap=0)
69 | all_splits = text_splitter.split_documents(data)
70 | #print(all_splits)
71 |
72 | vectorstore = Chroma.from_documents(documents=all_splits, embedding=OpenAIEmbeddings())
73 | question = ("What is the Article Title of article with Article ID: 252?")
74 | docs = vectorstore.similarity_search(question)
75 |
76 | logging.basicConfig()
77 | logging.getLogger('langchain.retrievers.multi_query').setLevel(logging.INFO)
78 |
79 | retriever_from_llm = MultiQueryRetriever.from_llm(retriever=vectorstore.as_retriever(),
80 | llm=ChatOpenAI(temperature=0))
81 | unique_docs = retriever_from_llm.get_relevant_documents(query=question)
82 | len(unique_docs)
83 |
84 | #llm = ChatOpenAI(model_name="gpt-4-0613", temperature=0)
85 | #qa_chain = RetrievalQA.from_chain_type(llm, retriever=vectorstore.as_retriever())
86 | #result = qa_chain({"query": question})
87 | #print(result["result"])
88 |
89 | template = """Use the following pieces of context to answer the question at the end.
90 | If you don't know the answer, just say that you don't know, don't try to make up an answer.
91 | Use three sentences maximum and keep the answer as concise as possible.
92 | Always say "thanks for asking!" at the end of the answer.
93 | {context}
94 | Question: {question}
95 | Helpful Answer:"""
96 | QA_CHAIN_PROMPT = PromptTemplate.from_template(template)
97 |
98 | llm = ChatOpenAI(model_name="gpt-4-0613", temperature=0)
99 | qa_chain = RetrievalQA.from_chain_type(
100 | llm,
101 | retriever=vectorstore.as_retriever(),
102 | chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}
103 | )
104 | result = qa_chain({"query": question})
105 | print(result["result"])
106 |
107 | # Save text to file
108 | # save_file(text)
109 |
110 | # Load text from file as a string
111 | #loaded_text = load_file()
112 |
113 | # You can now use 'loaded_text' as a string in your script
114 | #print(loaded_text)
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import json
4 | from dotenv import load_dotenv
5 | import openai
6 | import toml
7 | from halo import Halo
8 | import colorama
9 | from termcolor import colored
10 | import time
11 | from colorama import Fore, Style
12 | colorama.init(autoreset=True)
13 | from pyfiglet import Figlet
14 |
15 | from config import Config
16 |
17 | from initiation import kickoff
18 | import articles.writing
19 | from articles import skeleton
20 | from articles import writing
21 | from researcher import search
22 | from orchestrar import gutenberg
23 | from orchestrar import wp
24 | from orchestrar import blogs
25 | from enhancer import midjourney_ai
26 |
27 | import ssl
28 | ssl._create_default_https_context = ssl._create_unverified_context
29 |
30 | # Initialize colorama
31 | colorama.init(autoreset=True)
32 |
33 | # Load the .env file
34 | load_dotenv()
35 |
36 | # Configure OpenAI API key
37 | cfg = Config()
38 | try:
39 | openai.api_key = cfg.openai_api_key
40 | except KeyError:
41 | sys.stderr.write("OpenAI key configuration failed.")
42 | exit(1)
43 |
44 | # Global variables for article content
45 | article_content = ""
46 | article_title = ""
47 | article_description = ""
48 | findings = ""
49 | wp_admin = ""
50 | wp_pass = ""
51 | wp_url = ""
52 | published_articles = ""
53 | tone = ""
54 | keywords = ""
55 |
56 | def process_section(section, level=2, max_depth=5):
57 | """
58 | Process a section or subsection of the article.
59 | """
60 | section_type = 'section' if level == 2 else 'subsection'
61 | section_content = ""
62 | spinner = Halo(text=f'Processing {section_type}: {section[f"Heading_H{level}"]}', spinner='dots')
63 | spinner.start()
64 |
65 | # Write section or subsection
66 | section_content = articles.writing.write_section(article_title,
67 | article_description,
68 | section[f'Heading_H{level}'],
69 | section['Description'],
70 | tone)
71 |
72 | spinner.succeed(f"Finished processing {section_type}: {section[f'Heading_H{level}']}")
73 |
74 | # Process subsections if they exist and the maximum depth has not been reached
75 | if 'SubSections' in section and level < max_depth:
76 | for sub_section in section['SubSections']:
77 | section_content += process_section(sub_section, level + 1)
78 |
79 | return "\n\n" + f"" + section[f'Heading_H{level}'] + f"" + "\n\n" + section_content
80 |
81 |
82 | def process_json(json_string):
83 | """
84 | Process the JSON string to generate the article content.
85 | """
86 | global article_content
87 | global article_title # Declare article_title as global
88 | spinner = Halo(text='Parsing JSON', spinner='dots')
89 | spinner.start()
90 |
91 | data = json.loads(json_string)
92 | article_title = data['Title'] # This now refers to the global variable
93 | if findings.strip():
94 | article_description = data['Description'] + " " +f"""{findings}"""
95 | #print("\n\n\n\nArticle_description: ", article_description)
96 | #print("\n\n\n\n")
97 | else:
98 | article_description = data['Description']
99 |
100 | spinner.succeed('Finished parsing JSON')
101 |
102 | # Add the intro to the article content
103 | article_content += writing.write_intro(article_title, tone) + "\n\n"
104 |
105 | for section in data['Sections']:
106 | article_content += process_section(section)
107 |
108 | return article_content
109 |
110 | def is_json(json_string):
111 | """
112 | Check if a string is valid JSON.
113 | """
114 | try:
115 | json.loads(json_string)
116 | except ValueError:
117 | return False
118 | return True
119 |
120 | def wait_for_image(image_name):
121 | """
122 | Function to wait for the image file to appear in the specified directory
123 | """
124 | # Define the base directory
125 | base_dir = "./temp/imgs/"
126 |
127 | # Use os.path.join to create the full path
128 | image_path = os.path.join(base_dir, image_name)
129 | print(f"Looking for image at: {image_path}")
130 |
131 | # Keep checking for the image file until it appears
132 | while True:
133 | try:
134 | if os.path.isfile(image_path):
135 | print("Image file found.")
136 | break
137 | else:
138 | print("Image file not found. Waiting...")
139 | time.sleep(5) # wait for 5 seconds before checking again
140 | except Exception as e:
141 | print(f"Error while checking for image: {e}")
142 | break
143 |
144 |
145 | def article(title=None, category=None):
146 | """
147 | Main function to generate the article.
148 | """
149 | global findings
150 | global article_content
151 | global article_title
152 |
153 | # Reset article_content
154 | article_content = ""
155 |
156 | if title is None:
157 | title = input("Please enter the article title: ")
158 | else:
159 | print(f"Article Title: {title}")
160 | if category is None:
161 | category = input("Please enter the article category: ")
162 | else:
163 | print(f"Article Category: {category}")
164 |
165 | # RESEARCH
166 | if title is not None and category is not None:
167 | research = 'y'
168 | else:
169 | research = input("Do you want me to research the internet? (y/n): ")
170 | if research == 'y':
171 | search_results = search.go(title)
172 | findings = f"""This is additional info from user you need to incorporate: {search_results}"""
173 | print(colored("\n" + "################### RESEARCH FINISHED ###################\n", "green", attrs=["bold"]))
174 |
175 | # ARTICLE TYPE
176 | if title is not None and category is not None:
177 | article_type = 'a'
178 | else:
179 | article_type = input("Do you want Article or Product Review? (a/p): ")
180 | spinner = Halo(text='Preparing Structure of Article', spinner='dots')
181 | spinner.start()
182 |
183 | article_skeleton = ""
184 | while not is_json(article_skeleton):
185 | try:
186 | if article_type == 'a':
187 | article_skeleton = skeleton.write_skeleton(title)
188 | elif article_type == 'p':
189 | article_skeleton = skeleton.write_skeleton_product_review(title)
190 | except Exception as e:
191 | spinner.fail(str(e))
192 | else:
193 | spinner.succeed("Finished writing the article skeleton")
194 |
195 | # PROCESS SECTIONS
196 | try:
197 | article_content += process_json(article_skeleton)
198 | except Exception as e:
199 | spinner.fail(str(e))
200 | else:
201 | spinner.succeed("Finished processing JSON")
202 |
203 | print(colored("\n" + "################### ARTICLE GENERATED ###################", "green",
204 | attrs=["bold"]))
205 |
206 | # SAVE TO TXT FILE
207 | if title is not None and category is not None:
208 | save_to_txt = 'y'
209 | else:
210 | save_to_txt = input("Do you want to save this article to a txt file? (y/n): ")
211 | if save_to_txt == 'y':
212 | with open(f"./temp/articles/{title}.txt", "w") as file:
213 | file.write(article_content)
214 | print(colored("\nArticle saved to txt file.", "green", attrs=["bold"]))
215 |
216 | # GENERATE IMAGES
217 | featured_image_name = midjourney_ai.generate_image(title)
218 |
219 | # Wait for the image file to appear
220 | wait_for_image(featured_image_name + "_small.jpg")
221 |
222 | # Define the base directory
223 | base_dir = "/temp/imgs"
224 |
225 | # Use os.path.join to create the full path
226 | featured_image_path = os.path.join(base_dir, featured_image_name + "_small.jpg")
227 | featured_image_path = "." + featured_image_path
228 |
229 | # WORDPRESS IMPORT
230 | if title is not None and category is not None:
231 | wp_import = 'y'
232 | else:
233 | wp_import = input("Do you want to import this article to WordPress? (y/n): ")
234 | if wp_import == 'y':
235 | print(colored("\n" + "################### WORDPRESS IMPORT ###################", "green",
236 | attrs=["bold"]))
237 |
238 | spinner = Halo(text='Preparing article for WordPress import', spinner='dots')
239 | spinner.start()
240 | try:
241 | to_wordpress = gutenberg.convert_to_gutenberg_blocks(article_content)
242 | tags = [category]
243 | wp.post_to_wordpress(article_title, to_wordpress, category, tags, featured_image_path, wp_admin, wp_pass, wp_url)
244 | except Exception as e:
245 | spinner.fail(str(e))
246 | else:
247 | spinner.succeed("Article imported to WordPress\n\n")
248 |
249 | def parse_blog_articles():
250 | data = json.loads(get_blog_details())
251 |
252 | for item in data:
253 | #print(f'Category: {item["Category"]}, Title: {item["Title"]}')
254 | article(item["Title"], item["Category"])
255 |
256 | def get_blog_details():
257 | global wp_admin
258 | global wp_pass
259 | global wp_url
260 | global tone
261 | global keywords
262 |
263 | data = toml.load("blogs/blogs.toml")
264 |
265 | # print all the blogs and ask user to choose one
266 | print("List of blogs:")
267 | for i, blog in enumerate(data["blog"], start=1):
268 | print(f"{i}. {blog['name']}")
269 |
270 | # ask user for blog number
271 | blog_number = int(input("\nEnter the number of the blog: "))
272 | # get the chosen blog
273 | chosen_blog = data["blog"][blog_number - 1]
274 |
275 | # get the WP admin username and password from the .env file
276 | wp_admin = os.getenv(f"WP_ADMIN_USERNAME_{chosen_blog['id']}")
277 | wp_pass = os.getenv(f"WP_ADMIN_PASSWORD_{chosen_blog['id']}")
278 | wp_url = os.getenv(f"WP_URL_{chosen_blog['id']}")
279 |
280 | print(f"\nBlog Name: {chosen_blog['name']}")
281 | print(f"Description: {chosen_blog['description']}")
282 | print(f"URL: {chosen_blog['url']}")
283 | print(f"Tone: {chosen_blog['tone']}")
284 | print(f"Keywords: {chosen_blog['keywords']}")
285 | #print(f"WordPress Admin: {wp_admin}")
286 | #print(f"WordPress Password: {wp_pass}")
287 |
288 | print("\n")
289 |
290 | tone = chosen_blog['tone']
291 | keywords = chosen_blog['keywords']
292 |
293 | # get the published articles from the blog
294 | spinner = Halo(text='Loading existing articles...', spinner='dots')
295 | spinner.start()
296 | try:
297 | published_articles = wp.get_all_posts_titles(chosen_blog['url'])
298 | except Exception as e:
299 | spinner.fail(str(e))
300 | else:
301 | spinner.succeed("Articles loaded")
302 |
303 | # generate the JSON for the articles
304 | spinner = Halo(text='Generating articles JSON', spinner='dots')
305 | spinner.start()
306 | try:
307 | json_articles = blogs.init_blog(chosen_blog['description'],published_articles, tone, keywords)
308 | except Exception as e:
309 | spinner.fail(str(e))
310 | else:
311 | spinner.succeed("JSON Generated")
312 |
313 | print(json_articles)
314 | return json_articles
315 |
316 | if __name__ == "__main__":
317 | f = Figlet(font='big', width=300)
318 | print(Fore.CYAN + Style.BRIGHT + f.renderText('AI BLOG PILOT'))
319 | #article()
320 | parse_blog_articles()
--------------------------------------------------------------------------------
/orchestrar/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/orchestrar/.DS_Store
--------------------------------------------------------------------------------
/orchestrar/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/orchestrar/__init__.py
--------------------------------------------------------------------------------
/orchestrar/__pycache__/__init__.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/orchestrar/__pycache__/__init__.cpython-311.pyc
--------------------------------------------------------------------------------
/orchestrar/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/orchestrar/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/orchestrar/__pycache__/blogs.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/orchestrar/__pycache__/blogs.cpython-311.pyc
--------------------------------------------------------------------------------
/orchestrar/__pycache__/blogs.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/orchestrar/__pycache__/blogs.cpython-39.pyc
--------------------------------------------------------------------------------
/orchestrar/__pycache__/gutenberg.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/orchestrar/__pycache__/gutenberg.cpython-311.pyc
--------------------------------------------------------------------------------
/orchestrar/__pycache__/gutenberg.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/orchestrar/__pycache__/gutenberg.cpython-39.pyc
--------------------------------------------------------------------------------
/orchestrar/__pycache__/wp.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/orchestrar/__pycache__/wp.cpython-311.pyc
--------------------------------------------------------------------------------
/orchestrar/__pycache__/wp.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xroyce/AIBlogPilotGPT/a2e446d8d89467e0a6e7789ff4480c234df2e6e0/orchestrar/__pycache__/wp.cpython-39.pyc
--------------------------------------------------------------------------------
/orchestrar/blogs.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import json
4 | from dotenv import load_dotenv
5 | import openai
6 | import toml
7 | from halo import Halo
8 | import colorama
9 | from termcolor import colored
10 |
11 | from config import Config
12 |
13 | # Load the .env file
14 | load_dotenv()
15 |
16 | # Configure OpenAI API key
17 | cfg = Config()
18 | try:
19 | openai.api_key = cfg.openai_api_key
20 | except KeyError:
21 | sys.stderr.write("OpenAI key configuration failed.")
22 | exit(1)
23 |
24 |
25 | def init_blog(description, published_articles, toneofvoice, keywords):
26 | if keywords is None:
27 | prompt = """I have the following blog with the following description: """ + description + """.
28 | The blog has published articles with titles: """ + str(published_articles) + """.
29 | The blog has following tone of voice: """ + str(toneofvoice) + """.
30 | I want you to propose next 15 article titles with categories but be creative. Rotate categories randomly.
31 | Focus on articles with long-tail keywords, have high potential to rank in top positions on Google
32 | and titles will have high click through rate. Read the categories from description in random order.
33 | The titles will have maximum of 9 words.
34 | If you want to use year, it should be 2023 but make sure it makes sense to use year!
35 | Output all in JSON format. Strictly follow the following JSON format:
36 |
37 | {
38 | "Category": "Category name",
39 | "Title": "Article title"
40 | }
41 | """
42 | else:
43 | prompt = """I have the following blog with the following description: """ + description + """.
44 | The blog has following articles with titles: """ + str(published_articles) + """.
45 | The blog has following tone of voice: """ + str(toneofvoice) + """.
46 | I want you to propose next 3 article titles with categories but be creative. Rotate categories randomly.
47 | Focus on articles with following keywords: """ +str(keywords)+""". You can create and use long-tail version of them.
48 | Titles will have high click through rate. Read the categories from description in random order.
49 | The titles will have maximum of 9 words.
50 | If you want to use year, it should be 2023 but make sure it makes sense to use year!
51 | Output all in JSON format. Strictly follow the following JSON format:
52 |
53 | {
54 | "Category": "Category name",
55 | "Title": "Article title"
56 | }
57 | """
58 |
59 | chunked_output = ""
60 | for chunk in openai.ChatCompletion.create(
61 | model=cfg.llm_model,
62 | temperature=0.7,
63 | stream=True,
64 | messages=[
65 | {'role': 'system', 'content': 'Your name is BloggingGPT.'},
66 | {'role': 'system', 'content': "You're an expert in blogging, research and SEO."},
67 | {'role': 'system', 'content': 'You strictly return content user asked for only.'},
68 | {"role": "user", "content": prompt}
69 | ]
70 | ):
71 | content = chunk["choices"][0].get("delta", {}).get("content")
72 | if content is not None:
73 | #print(content, end='')
74 | chunked_output += content
75 |
76 | return chunked_output
--------------------------------------------------------------------------------
/orchestrar/gutenberg.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import openai
3 | from dotenv import load_dotenv
4 | import colorama
5 | colorama.init(autoreset=True)
6 | from config import Config
7 | import re
8 |
9 | from wordpress_xmlrpc import Client, WordPressPost
10 | from wordpress_xmlrpc.methods.posts import NewPost
11 |
12 | import ssl
13 | ssl._create_default_https_context = ssl._create_unverified_context
14 |
15 | load_dotenv()
16 |
17 | cfg = Config()
18 |
19 | # Configure OpenAI API key
20 | try:
21 | openai.api_key = cfg.openai_api_key
22 | browserless_api_key = cfg.browserless_api_key
23 | llm_model = cfg.llm_model
24 | except KeyError:
25 | sys.stderr.write("OpenAI key configuration failed.")
26 | exit(1)
27 |
28 |
29 | def convert_to_gutenberg_blocks(text):
30 | # Split the text into paragraphs
31 | paragraphs = text.split('\n\n')
32 |
33 | # Initialize an empty list to hold the blocks
34 | blocks = []
35 |
36 | # Iterate over each paragraph
37 | for paragraph in paragraphs:
38 | # Split the paragraph into lines
39 | lines = paragraph.split('\n')
40 |
41 | # Initialize an empty string to hold the current paragraph
42 | current_paragraph = ""
43 |
44 | # Iterate over each line
45 | for line in lines:
46 | # Check if the line is a headline
47 | if re.match(r"^", line):
48 | # If there's a current paragraph, add it as a block
49 | if current_paragraph:
50 | blocks.append(create_paragraph_block(current_paragraph))
51 | current_paragraph = ""
52 |
53 | # Add the headline as a block
54 | blocks.append(create_heading_block(line))
55 |
56 | # Check if the line is a link
57 | elif re.search(r"\[(.*?)\]\((.*?)\)", line):
58 | # If there's a current paragraph, add it as a block
59 | if current_paragraph:
60 | blocks.append(create_paragraph_block(current_paragraph))
61 | current_paragraph = ""
62 |
63 | # Add the link as a block
64 | blocks.append(create_link_block(line))
65 |
66 | # Otherwise, add the line to the current paragraph
67 | else:
68 | current_paragraph += line
69 |
70 | # If there's a current paragraph, add it as a block
71 | if current_paragraph:
72 | blocks.append(create_paragraph_block(current_paragraph))
73 |
74 | # Return the blocks as a HTML string
75 | return '\n'.join(blocks)
76 |
77 | def create_paragraph_block(text):
78 | # Replace **text** with text
79 | text = re.sub(r"\*\*(.*?)\*\*", r"\1", text)
80 | return '
{}
'.format(text)
81 |
82 | def create_link_block(text):
83 | # Replace [text](url) with text
84 | text = re.sub(r"\[(.*?)\]\((.*?)\)", r'\1', text)
85 | return '