├── .gitignore
├── LICENSE
├── README.md
├── next.config.mjs
├── package-lock.json
├── package.json
├── public
├── next.svg
└── vercel.svg
├── screenshot.png
├── src
└── app
│ ├── api
│ ├── auth
│ │ └── [...nextauth]
│ │ │ └── route.ts
│ └── twitter
│ │ └── route.js
│ ├── favicon.ico
│ ├── globals.css
│ ├── layout.tsx
│ ├── page.module.css
│ ├── page.tsx
│ └── providers.js
└── tsconfig.json
/.gitignore:
--------------------------------------------------------------------------------
1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2 |
3 | # dependencies
4 | /node_modules
5 | /.pnp
6 | .pnp.js
7 | .yarn/install-state.gz
8 |
9 | # testing
10 | /coverage
11 |
12 | # next.js
13 | /.next/
14 | /out/
15 |
16 | # production
17 | /build
18 |
19 | # misc
20 | .DS_Store
21 | *.pem
22 |
23 | # debug
24 | npm-debug.log*
25 | yarn-debug.log*
26 | yarn-error.log*
27 |
28 | # local env files
29 | .env*.local
30 |
31 | # vercel
32 | .vercel
33 |
34 | # typescript
35 | *.tsbuildinfo
36 | next-env.d.ts
37 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License
2 |
3 | Copyright (c) Laurie Voss
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in
13 | all copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 | THE SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Newsletter Generator
2 |
3 | ## We make a lot of content
4 |
5 | At LlamaIndex we [tweet a lot](https://x.com/llama_index). We put quite a lot of effort into these tweets: we carefully create content, or curate it from the community, and we link to it with carefully-crafted summaries highlighting what's interesting about it, what you'll learn, and why it's important to our audience of AI Engineers. Each tweet takes between 30-60 minutes to write, mostly the time it takes to read the underlying content and ferret out the important parts.
6 |
7 | ## We put it in our newsletter
8 |
9 | But tweets are fleeting, and it's easy to miss them. So every week we publish a newsletter, and the newsletter is basically a summary of what we tweeted, roughly categorized by topic. The newsletter is very popular -- something like 30,000 people subscribe to it, of whom about 40% open it every week, which is really high for a newsletter that goes out so often.
10 |
11 | ## Writing the newsletter is a lot of work
12 |
13 | But unlike the tweets, crafting the newsletter is pretty mechanical: you just scroll back through the tweets, rephrase them, and put them in the right section. It's a bit of a slog, but it's not hard. It's just time-consuming -- it can take a couple of hours.
14 |
15 | ## Can AI help?
16 |
17 | So I thought: this seems like something AI can help with. Reading information, summarizing it, transforming it? It's a process that requires no creativity. This is the kind of thing AI is good at!
18 |
19 | ## The result: Newsletter Generator
20 |
21 | And this is the result: a web app (open source, of course!) that helps us craft our newsletter. You can see it in action in this 90-second video:
22 |
23 | [](https://www.youtube.com/watch?v=3_RciT8yi9I)
24 |
25 | ## What it does:
26 |
27 | * Logs in with our Twitter account (and only ours)
28 | * Loads the last week's worth of tweets
29 | * Finds the most popular ones (as measured by likes)
30 | * These become the highlights of the week
31 | * Groups the rest by topic
32 | * Stuff about our paid products, LlamaCloud and LlamaParse
33 | * Changes to the LlamaIndex framework itself
34 | * Interesting stuff from the community
35 | * It pulls content and links out of the tweets and summarizes each one
36 | * It generates a markdown draft of the newsletter
37 | * It lets us edit the draft, because it doesn't always get it right
38 |
39 | ## How it works
40 |
41 | **Caution: nitty gritty technical details, probably only web developers care.**
42 |
43 | This is a full-stack Next.js application using [LlamaIndex.TS](https://ts.llamaindex.ai/), the TypeScript version of LlamaIndex. It's deployed on Vercel's serverless platform. There are several tricky parts that I'm noting here to help others attempting to build similar apps:
44 |
45 | * **next.config.mjs**: There are two vitally important lines necessary to make LlamaIndex work in Vercel's serverless environment:
46 | * `serverComponentsExternalPackages: ['sharp', 'onnxruntime-node']`
47 | * I wish I could tell you why Vercel needs this, but I don't. There error I was getting was `- error ./node_modules/onnxruntime-node/bin/napi-v3/darwin/arm64/onnxruntime_binding.node Module parse failed: Unexpected character '�' (1:0)` and [this helpful issue](https://github.com/xenova/transformers.js/issues/210) pointed me to what I needed to do.
48 | * `outputFileTracingIncludes: { "/api/*": ["./node_modules/**/*.wasm"], }`
49 | * This one I understand a bit better: Vercel attempts to optimize bundle sizes by excluding code that doesn't run, and it gets it wrong sometimes. This adds an override that forces it to include a missing `tiktoken_bg.wasm` file that LlamaIndex needs. Thanks to [this issue](https://github.com/orgs/vercel/discussions/1278) for the pointer.
50 | * **Twitter auth**: I'm using [next-auth](https://next-auth.js.org/) to help with the vagaries of Twitter's OAuth 2.0 login flow, but it's still quite the pain to get right. I used [Claude](https://claude.ai/) a lot to help me put this together, because the docs are spartan. The key file is [src/app/api/auth/\[...nextauth\]/route.ts](src/app/api/auth/\[...nextauth\]/route.ts) which does all the tedious JWT token wrangling necessary to get Twitter to give you the bearer token you need to make API calls. This is also where we configure the app to only let you log in as our Twitter account, because otherwise you could use up a lot of our OpenAI API credits playing with it.
51 | * **Frontend**: mostly a [vanilla React application](src/app/page.tsx), though to handle streaming responses from the API I'm using an `EventSource` which is worth looking at if you've not used it before. Markdown rendering is handled by the excellent [markdown-it](https://markdown-it.github.io/) package.
52 | * **API**: the real [meat of this app](src/app/api/twitter/route.js).
53 | * First, an extensive one-shot example of a previous, human-written newsletter. This is where the AI gets the pattern of what the newsletter should look like. It's really good at copying our house style!
54 | * Then we fetch all the tweets from the last 10 days (we do more than a week to account for the newsletter being written on Tuesdays, so we don't want to miss anything). There's some subtleties here, mostly around Twitter stuff like need to specify `fields` to get more than just tweet text, in particular the undocumented `note_tweet` field which contains the text of tweets that go over 280 characters, as nearly all of ours do.
55 | * We sort the tweets by their public likes and pick the top 3 as highlights
56 | * We have an honestly pretty short prompt about what to do. Nearly all of the instructions about how to format this newsletter are implicit in the example newsletter we gave it, so we're just emphasizing a few points.
57 | * Then we stream the response. This was a real pain to get right, getting the headers correct and making sure the LLM and the `complete` method are both set to stream, but works flawlessly once you do.
58 |
59 | ## What's next
60 |
61 | There are some improvements I'm considering, like following the links themselves and reading the content to be able to better describe the content, and to search the web for people's names to be able to link to them directly, but I'm really happy with this as-is. It's not enough to totally automate our newsletter but it will take the time necessary down from hours to minutes.
62 |
63 | Plus the whole thing is open source! Feel free to copy anything out of this you find helpful.
64 |
--------------------------------------------------------------------------------
/next.config.mjs:
--------------------------------------------------------------------------------
1 | import CopyWebpackPlugin from 'copy-webpack-plugin';
2 |
3 | const nextConfig = {
4 | experimental: {
5 | serverComponentsExternalPackages: ['sharp', 'onnxruntime-node'],
6 | outputFileTracingIncludes: { "/api/*": ["./node_modules/**/*.wasm"], }
7 | },
8 | };
9 |
10 | export default nextConfig;
11 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "newsletter-generator",
3 | "version": "0.1.0",
4 | "private": true,
5 | "scripts": {
6 | "dev": "next dev",
7 | "build": "next build",
8 | "start": "next start",
9 | "lint": "next lint",
10 | "type-check": "tsc --noEmit"
11 | },
12 | "dependencies": {
13 | "copy-webpack-plugin": "^12.0.2",
14 | "llamaindex": "^0.5.19",
15 | "markdown-it": "^14.1.0",
16 | "next": "14.2.6",
17 | "next-auth": "^4.24.7",
18 | "react": "^18",
19 | "react-dom": "^18"
20 | },
21 | "devDependencies": {
22 | "@types/markdown-it": "^14.1.2",
23 | "@types/node": "^20",
24 | "@types/react": "^18",
25 | "@types/react-dom": "^18",
26 | "typescript": "^5.5.4"
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/public/next.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/public/vercel.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/run-llama/newsletter-generator/7b7be3eb8e715f12ada8a90e41245b999b0828bb/screenshot.png
--------------------------------------------------------------------------------
/src/app/api/auth/[...nextauth]/route.ts:
--------------------------------------------------------------------------------
1 | import NextAuth from "next-auth"
2 | import TwitterProvider from "next-auth/providers/twitter"
3 | import { JWT } from "next-auth/jwt"
4 |
5 | async function getTwitterUsername(accessToken: string): Promise {
6 | const response = await fetch('https://api.twitter.com/2/users/me', {
7 | headers: {
8 | Authorization: `Bearer ${accessToken}`,
9 | },
10 | });
11 | const data = await response.json();
12 | return data.data.username;
13 | }
14 |
15 | if (!process.env.TWITTER_CLIENT_ID || !process.env.TWITTER_CLIENT_SECRET) {
16 | throw new Error('Missing Twitter OAuth credentials');
17 | }
18 |
19 | export const authOptions = {
20 | providers: [
21 | TwitterProvider({
22 | clientId: process.env.TWITTER_CLIENT_ID,
23 | clientSecret: process.env.TWITTER_CLIENT_SECRET,
24 | version: "2.0", // opt-in to Twitter OAuth 2.0
25 | })
26 | ],
27 | callbacks: {
28 | async signIn({ user, account }: { user: any; account: any }) {
29 | const username = await getTwitterUsername(account.access_token);
30 | // Check if the user's Twitter username matches the allowed username
31 | if (username === process.env.TWITTER_USER) {
32 | return true; // Allow sign in
33 | } else {
34 | return false; // Deny sign in
35 | }
36 | },
37 | async jwt({ token, account }: { token: JWT; account: any }) {
38 | // Persist the OAuth access_token to the token right after signin
39 | if (account) {
40 | token.accessToken = account.access_token
41 | }
42 | return token
43 | },
44 | async session({ session, token}: { session: any; token: JWT }) {
45 | // Send properties to the client, like an access_token from a provider.
46 | session.accessToken = token.accessToken
47 | return session
48 | }
49 | }
50 | }
51 |
52 | const handler = NextAuth(authOptions)
53 | export { handler as GET, handler as POST }
54 |
--------------------------------------------------------------------------------
/src/app/api/twitter/route.js:
--------------------------------------------------------------------------------
1 | import { getServerSession } from "next-auth/next"
2 | import { authOptions } from "../auth/[...nextauth]/route"
3 | import { NextRequest, NextResponse } from 'next/server';
4 | import { OpenAI } from "llamaindex";
5 | import util from 'node:util';
6 |
7 | const exampleNewsletter = `
8 | Hi there, Llama Lovers! 🦙
9 |
10 | Welcome to this week's edition of the LlamaIndex newsletter! We're excited to bring you updates including the Multimodal Report Generation Guide, a revamped Multi-Agent Concierge Workflow, robust Box integration for efficient data management, and innovative Event-Driven RAG Templates. Check out these developments along with our comprehensive guides and tutorials to maximize your use of these new features.
11 |
12 | If you haven't explored LlamaCloud yet, make sure to [sign up](https://cloud.llamaindex.ai/) and [get in touch with us](https://www.llamaindex.ai/contact) to discuss your specific enterprise use case.
13 |
14 | 🤩 **The highlights:**
15 |
16 | - **Multimodal Report Generation Guide:** Build a multi-agent system with LlamaParse and LlamaIndex to generate detailed reports combining text and images from complex data. [Notebook](https://github.com/run-llama/llama_parse/blob/main/examples/multimodal/multimodal_report_generation_agent.ipynb), [Tweet](https://x.com/llama_index/status/1824483475338170541).
17 | - **Multi-Agent Concierge Workflow:** Redesign of our financial concierge system with LlamaIndex's enhanced Workflows for improved looping, branching, debugging, and visualization. [Video](https://www.youtube.com/watch?v=DqiIDMxuoKA&feature=youtu.be), [Tweet](https://x.com/llama_index/status/1823425199704039863).
18 | - **Event-Driven RAG Templates:** Use our event-driven workflows to implement techniques from key RAG papers---LongRAG, CorrectiveRAG, Self-Discover RAG---with added visualization and debugging, available as templates or for custom development. [Tweet](https://x.com/llama_index/status/1824833283928264952).
19 | - **Box Integration in LlamaIndex:** New Box Readers integrated into LlamaIndex workflows facilitate efficient data extraction and authentication for enhanced AI applications. [Blogpost](https://medium.com/box-developer-blog/introducing-box-llama-index-reader-13903442a9e6), [Tweet](https://x.com/llama_index/status/1823464513301307787).
20 |
21 | **🗺️ LlamaCloud And LlamaParse:**
22 |
23 | - Guide to Building a Multimodal Report Generation Agent using LlamaParse and LlamaIndex workflows to develop a multi-agent system that generates detailed reports with text and images from complex data sources. [Notebook](https://github.com/run-llama/llama_parse/blob/main/examples/multimodal/multimodal_report_generation_agent.ipynb), [Tweet](https://x.com/llama_index/status/1824483475338170541).
24 |
25 | **✨ Framework:**
26 |
27 | 1. Event-Driven Templates for Advanced RAG + Agent Techniques of three key RAG and agent papers - LongRAG, CorrectiveRAG, Self-Discover RAG, using our event-driven workflows, complete with visualization and debug features, available as templates or for custom development. [Tweet](https://x.com/llama_index/status/1824833283928264952).
28 | 2. We have integrated Box documents into LlamaIndex workflows with new Box Readers, enabling efficient data extraction, authentication, and retrieval to enhance your LLM applications with robust, data-driven AI solutions. [Blogpost](https://medium.com/box-developer-blog/introducing-box-llama-index-reader-13903442a9e6), [Tweet](https://x.com/llama_index/status/1823464513301307787).
29 | 3. Multi-Agent Concierge as a Workflow, re-implementation of our financial concierge system using LlamaIndex's new Workflows abstraction, which supports looping, branching, debugging, and automatic visualization. [Video](https://www.youtube.com/watch?v=DqiIDMxuoKA&feature=youtu.be), [Tweet](https://x.com/llama_index/status/1823425199704039863).
30 |
31 | **✍️ Community:**
32 |
33 | - [Dave Bechberger's](https://x.com/bechbd) [tutorial](https://medium.com/@bechbd/knowledge-graphs-and-generative-ai-graphrag-with-amazon-neptune-and-llamaindex-part-1-39cd7255bac4) on Building a Natural Language Querying System for Graph Databases using LlamaIndex with Amazon Neptune to translate natural language into openCypher queries, execute them, and optimize with Amazon Bedrock's LLMs.
34 | - [Ravi Theja's](https://x.com/ravithejads) video [tutorial](https://www.youtube.com/watch?v=Skm70sGaME4) on rebuilding JSONalyze Query Engine using workflows.
35 | - [BeyondLLM](https://github.com/aiplanethub/beyondllm) by AI Planet Hub simplifies the development of advanced RAG pipelines to 5-7 lines of code, with features like auto-retrieval, reranking, and embedding fine-tuning. It integrates with Arize AI Phoenix for comprehensive evaluation and observability.
36 | - [Richmond Alake's](https://x.com/richmondalake) [video tutorial](https://www.youtube.com/watch?v=UfBQxl_Pe1w) on implementing Agentic RAG Using Claude 3.5 Sonnet, LlamaIndex, and MongoDB.
37 | - Rajib Deb's [video tutorial](https://www.youtube.com/watch?v=UFCpF6W2j3w) on Workflows, highlighting decorators for control flow, event-driven chaining, and custom orchestration steps.
38 | - [Tomaz Bratanic's](https://x.com/tb_tomaz) Neo4j [tutorial](https://medium.com/neo4j/entity-linking-and-relationship-extraction-with-relik-in-llamaindex-ca18892c169f) demonstrates using the Relik framework for information extraction, integrating spaCy, Coreferee, LlamaIndex, and Neo4j for entity linking, relationship extraction, and graph-based question answering.
39 | - [Andrei](https://x.com/_nerdai_) [video tutorial](https://www.youtube.com/watch?v=3yG--HKxmi8) on discussing llama-agents, our framework for building multi-agent systems with a focus on production use cases.
40 | - [Ravi Theja's](https://x.com/ravithejads) video [tutorial](https://www.youtube.com/watch?v=P4xHWojIB-M) on re-building our Citation Query Engine using workflows.
41 | - [Farzad Sunavala's](https://hashnode.com/@Farzzy528) [guide](https://farzzy.hashnode.dev/exploring-llamaindex-workflows-a-step-by-step-guide-to-building-a-rag-system-with-azure-ai-search-and-azure-openai) to Building a RAG System with Azure AI Search and Azure OpenAI using LlamaIndex workflows.
42 | - [Benito Martin's](https://medium.com/@benitomartin) [tutorial](https://medium.com/@benitomartin/cooking-with-ai-building-a-smart-multimodal-recipe-recommender-using-qdrant-llamaindex-and-2d6d1fa6566c) on Building a Smart Multimodal Recipe Recommender using Qdrant, LlamaIndex, and Google Gemini.
43 | `
44 |
45 | function getDateDaysAgo(days) {
46 | // Create a new Date object for the current date
47 | const date = new Date();
48 |
49 | // Subtract days
50 | date.setDate(date.getDate() - days);
51 |
52 | // Set the time to 00:00:00
53 | date.setHours(0, 0, 0, 0);
54 |
55 | // Convert to ISO string and remove milliseconds
56 | let isoString = date.toISOString().split('.')[0] + 'Z';
57 |
58 | return isoString;
59 | }
60 |
61 | function listTweets(tweets) {
62 | return tweets.map( (tweet) => {
63 | if (tweet.note_tweet) {
64 | return tweet.note_tweet.text
65 | } else {
66 | return tweet.text
67 | }
68 | }).join('\n')
69 | }
70 |
71 | export async function GET(request) {
72 | const session = await getServerSession(authOptions)
73 | const encoder = new TextEncoder();
74 |
75 | if (!session || !session.accessToken) {
76 | return new Response(JSON.stringify({ error: "Not authenticated" }), {
77 | status: 401,
78 | headers: { 'Content-Type': 'application/json' },
79 | })
80 | }
81 |
82 | let userId = false
83 | try {
84 | const response = await fetch(`https://api.twitter.com/2/users/me`, {
85 | headers: {
86 | Authorization: `Bearer ${session.accessToken}`
87 | }
88 | })
89 | const data = await response.json()
90 | userId = data.data.id
91 | } catch (error) {
92 | console.log(error)
93 | return new Response(JSON.stringify({ error: "Failed to fetch Twitter User ID" }), {
94 | status: 500,
95 | headers: { 'Content-Type': 'application/json' },
96 | })
97 | }
98 |
99 | const ten_days_ago = getDateDaysAgo(10)
100 | const fields = [
101 | "id",
102 | "text",
103 | "attachments",
104 | "in_reply_to_user_id",
105 | "public_metrics",
106 | "note_tweet"
107 | ].join(',')
108 | const expansions = [
109 | "attachments.media_keys"
110 | ].join(',')
111 | const media_fields = [
112 | "media_key",
113 | "type",
114 | "url"
115 | ].join(',')
116 |
117 | let tweets = false
118 | try {
119 | const url = `https://api.twitter.com/2/users/${userId}/tweets?max_results=100&start_time=${ten_days_ago}&tweet.fields=${fields}&expansions=${expansions}&media.fields=${media_fields}`
120 | console.log(url)
121 | const response = await fetch(url, {
122 | headers: {
123 | Authorization: `Bearer ${session.accessToken}`
124 | }
125 | })
126 |
127 | if (response.status !== 200) {
128 | console.log(response)
129 | return new Response(JSON.stringify({ error: "Twitter API call failed" }), {
130 | status: 500,
131 | headers: { 'Content-Type': 'application/json' },
132 | })
133 | }
134 |
135 | tweets = await response.json()
136 | } catch (error) {
137 | return new Response(JSON.stringify({ error: "Error fetching recent tweets" }), {
138 | status: 500,
139 | headers: { 'Content-Type': 'application/json' },
140 | })
141 | }
142 |
143 | console.log(util.inspect(tweets,{depth: 4}))
144 |
145 | // list all the tweets
146 | let tweetsList = listTweets(tweets.data)
147 |
148 | // get the top 3 most liked tweets for highlights
149 | let mostLiked = tweets.data.sort((a, b) => b.public_metrics.like_count - a.public_metrics.like_count).slice(0, 3)
150 | let mostLikedList = listTweets(mostLiked)
151 |
152 | const llm = new OpenAI({
153 | model: "gpt-4o-mini",
154 | temperature: 0.2,
155 | streaming: true,
156 | openai_api_key: process.env.OPENAI_API_KEY,
157 | });
158 | // const llm = new Anthropic({
159 | // model: "claude-3-5-sonnet",
160 | // temperature: 0.2,
161 | // streaming: true,
162 | // apiKey: process.env.ANTHROPIC_API_KEY
163 | // })
164 |
165 | const stream = new ReadableStream({
166 | async start(controller) {
167 | const response = await llm.complete({
168 | prompt: `
169 | Your task is to generate a newsletter based on the last 7 days of tweets
170 | from our Twitter account.
171 |
172 | Here are all of the tweets:
173 | ${tweetsList}
174 |
175 | Here is a sample newsletter we have generated previously:
176 | ---------
177 | ${exampleNewsletter}
178 | ---------
179 |
180 | Important features to note:
181 | * The headings should be:
182 | * The Highlights (most liked tweets, see below)
183 | * LlamaCloud & LlamaParse (tweets that mention llamacloud or llamaparse)
184 | * Framework (tweets that are about changes to the llamaindex framework itself)
185 | * Community (everything else)
186 | * Each section should have a bullet point list of items
187 | * Each item should link to the relevant blog post, tutorial, etc. from the tweet.
188 |
189 | The highlights section should focus on the most-liked tweets; these are:
190 | -----------
191 | ${mostLikedList}
192 | -----------
193 | `,
194 | stream: true
195 | })
196 | for await (const chunk of response) {
197 | const message = `data: ${JSON.stringify({ chunk: chunk.text })}\n\n`;
198 | controller.enqueue(encoder.encode(message));
199 | }
200 |
201 | controller.close();
202 | },
203 | });
204 |
205 | return new NextResponse(stream, {
206 | headers: {
207 | 'Content-Type': 'text/event-stream',
208 | 'Cache-Control': 'no-cache',
209 | 'Connection': 'keep-alive',
210 | },
211 | });
212 |
213 | }
214 |
--------------------------------------------------------------------------------
/src/app/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/run-llama/newsletter-generator/7b7be3eb8e715f12ada8a90e41245b999b0828bb/src/app/favicon.ico
--------------------------------------------------------------------------------
/src/app/globals.css:
--------------------------------------------------------------------------------
1 | :root {
2 | --primary-color: #1DA1F2; /* Twitter blue */
3 | --background-color: #f0f2f5;
4 | --text-color: #333;
5 | --button-text-color: #fff;
6 | }
7 |
8 | body {
9 | font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen',
10 | 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue',
11 | sans-serif;
12 | background-color: var(--background-color);
13 | color: var(--text-color);
14 | line-height: 1.6;
15 | }
16 |
17 | .main {
18 | max-width: 1200px;
19 | margin: 0 auto;
20 | padding: 2rem;
21 | }
22 |
23 | button {
24 | background-color: var(--primary-color);
25 | color: var(--button-text-color);
26 | border: none;
27 | padding: 10px 20px;
28 | border-radius: 20px;
29 | cursor: pointer;
30 | font-size: 1rem;
31 | transition: background-color 0.3s ease;
32 | }
33 |
34 | button:hover {
35 | background-color: #0f84c7;
36 | }
37 |
38 | button:disabled {
39 | background-color: #ccc;
40 | cursor: not-allowed;
41 | }
42 |
43 | h1, h2, h3, h4, h5, h6 {
44 | color: var(--primary-color);
45 | }
46 |
47 | a {
48 | color: var(--primary-color);
49 | text-decoration: none;
50 | }
51 |
52 | a:hover {
53 | text-decoration: underline;
54 | }
55 |
56 | .login-status {
57 | margin-bottom: 20px;
58 | font-weight: bold;
59 | }
60 |
61 | .login-status button {
62 | margin-left: 10px;
63 | }
64 |
65 | .markdown-container {
66 | display: flex;
67 | gap: 20px;
68 | margin-top: 20px;
69 | }
70 |
71 | .markdown-edit, .markdown-preview {
72 | flex: 1;
73 | background-color: #fff;
74 | border-radius: 8px;
75 | padding: 20px;
76 | box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
77 | }
78 |
79 | .markdown-edit textarea {
80 | width: calc(100% - 20px);
81 | min-height: 1000px;
82 | max-height: 1000px;
83 | border: 1px solid #ccc;
84 | border-radius: 4px;
85 | padding: 10px;
86 | font-family: monospace;
87 | font-size: 14px;
88 | resize: vertical;
89 | }
90 |
91 | .markdown-preview {
92 | overflow-y: auto;
93 | max-height: 1000px;
94 | }
95 |
--------------------------------------------------------------------------------
/src/app/layout.tsx:
--------------------------------------------------------------------------------
1 | import type { Metadata } from "next";
2 | import { Inter } from "next/font/google";
3 | import { Providers } from "./providers"
4 | import "./globals.css";
5 |
6 | const inter = Inter({ subsets: ["latin"] });
7 |
8 | export const metadata: Metadata = {
9 | title: "Newsletter Generator",
10 | description: "Tries to generate a newsletter from recent tweets",
11 | };
12 |
13 | export default function RootLayout({
14 | children,
15 | }: Readonly<{
16 | children: React.ReactNode;
17 | }>) {
18 | return (
19 |
20 |
21 | {children}
22 |
23 |
24 | );
25 | }
26 |
--------------------------------------------------------------------------------
/src/app/page.module.css:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/run-llama/newsletter-generator/7b7be3eb8e715f12ada8a90e41245b999b0828bb/src/app/page.module.css
--------------------------------------------------------------------------------
/src/app/page.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 | import React, { useState, useEffect, FormEvent } from 'react';
3 | import { useSession, signIn, signOut } from "next-auth/react";
4 | import markdownit from 'markdown-it';
5 |
6 | const md = markdownit();
7 |
8 | function LoginStatus() {
9 | const { data: session } = useSession();
10 | if (session && session.user) {
11 | return (
12 |