├── .env ├── src ├── utils │ ├── color.js │ ├── api.js │ ├── url-parser.js │ └── claude-api.js ├── processors │ ├── canvas-processor.js │ └── token-processor.js ├── index.js └── generators │ └── pseudo-generator.js ├── package.json ├── .gitignore └── README.md /.env: -------------------------------------------------------------------------------- 1 | FIGMA_ACCESS_TOKEN=your_access_token_here 2 | OPENAI_API_KEY=your_openai_api_key_here -------------------------------------------------------------------------------- /src/utils/color.js: -------------------------------------------------------------------------------- 1 | export function rgbToHex(r, g, b) { 2 | const toHex = (n) => { 3 | const hex = n.toString(16); 4 | return hex.length === 1 ? '0' + hex : hex; 5 | }; 6 | return `#${toHex(r)}${toHex(g)}${toHex(b)}`; 7 | } -------------------------------------------------------------------------------- /src/utils/api.js: -------------------------------------------------------------------------------- 1 | import fetch from 'node-fetch'; 2 | 3 | export async function getFigmaFileData(fileId) { 4 | const response = await fetch(`https://api.figma.com/v1/files/${fileId}`, { 5 | headers: { 6 | 'X-Figma-Token': process.env.FIGMA_ACCESS_TOKEN 7 | } 8 | }); 9 | 10 | if (!response.ok) { 11 | throw new Error(`Figma API error: ${response.statusText}`); 12 | } 13 | 14 | return response.json(); 15 | } -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "fig4ai", 3 | "version": "0.1.5", 4 | "description": "A CLI tool to parse Figma URLs and generate AI-powered design rules for your AI editor.", 5 | "main": "src/index.js", 6 | "bin": { 7 | "fig4ai": "./src/index.js" 8 | }, 9 | "type": "module", 10 | "scripts": { 11 | "test": "echo \"Error: no test specified\" && exit 1", 12 | "fig4ai": "node src/index.js" 13 | }, 14 | "keywords": [ 15 | "figma", 16 | "cli", 17 | "url-parser", 18 | "ai", 19 | "design-rules" 20 | ], 21 | "author": "", 22 | "license": "MIT", 23 | "dependencies": { 24 | "@anthropic-ai/sdk": "^0.33.1", 25 | "chalk": "^5.3.0", 26 | "dotenv": "^16.3.1", 27 | "node-fetch": "^3.3.2", 28 | "openai": "^4.24.1", 29 | "ora": "^7.0.1" 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/utils/url-parser.js: -------------------------------------------------------------------------------- 1 | export function parseFigmaUrl(url) { 2 | try { 3 | // Handle URLs without protocol 4 | const urlWithProtocol = url.startsWith('http') ? url : `https://${url}`; 5 | const urlObj = new URL(urlWithProtocol); 6 | 7 | if (!urlObj.hostname.includes('figma.com')) { 8 | throw new Error('Not a valid Figma URL'); 9 | } 10 | 11 | const pathParts = urlObj.pathname.split('/').filter(Boolean); 12 | const fileId = pathParts[1]; 13 | const nodeId = urlObj.searchParams.get('node-id'); 14 | 15 | // Extract additional parameters 16 | const page = urlObj.searchParams.get('p'); 17 | const type = urlObj.searchParams.get('t'); 18 | const title = pathParts[2] ? decodeURIComponent(pathParts[2]) : null; 19 | 20 | return { 21 | type: pathParts[0], // 'file' or 'design' 22 | fileId, 23 | nodeId, 24 | page, 25 | viewType: type, 26 | title, 27 | fullPath: urlObj.pathname, 28 | originalUrl: url, 29 | params: Object.fromEntries(urlObj.searchParams) 30 | }; 31 | } catch (error) { 32 | throw new Error('Invalid URL format'); 33 | } 34 | } -------------------------------------------------------------------------------- /src/utils/claude-api.js: -------------------------------------------------------------------------------- 1 | import Anthropic from '@anthropic-ai/sdk'; 2 | 3 | export class ClaudeClient { 4 | constructor(apiKey) { 5 | this.client = new Anthropic({ 6 | apiKey: apiKey 7 | }); 8 | } 9 | 10 | async chat(messages, functions, functionCall) { 11 | try { 12 | const systemPrompt = functions ? 13 | `You are a function calling AI. Available functions: ${JSON.stringify(functions)}. 14 | When responding, you must call one of these functions using the exact format: 15 | {"name": "function_name", "arguments": {arg1: value1, arg2: value2}}` : undefined; 16 | 17 | const response = await this.client.messages.create({ 18 | model: 'claude-3-sonnet-20240229', 19 | max_tokens: 4096, 20 | temperature: 0.7, 21 | system: systemPrompt, 22 | messages: messages.map(msg => ({ 23 | role: msg.role === 'user' ? 'user' : 'assistant', 24 | content: msg.content 25 | })) 26 | }); 27 | 28 | if (functions) { 29 | // Parse function call from the response content 30 | try { 31 | const text = response.content[0].text; 32 | // Find the first JSON object in the response 33 | const match = text.match(/\{(?:[^{}]|{[^{}]*})*\}/); 34 | if (match) { 35 | const parsedCall = JSON.parse(match[0]); 36 | if (parsedCall.name && parsedCall.arguments) { 37 | return { 38 | choices: [{ 39 | message: { 40 | function_call: { 41 | name: parsedCall.name, 42 | arguments: JSON.stringify(parsedCall.arguments) 43 | } 44 | } 45 | }] 46 | }; 47 | } 48 | } 49 | // If no valid function call found, throw an error 50 | throw new Error('No valid function call found in response'); 51 | } catch (error) { 52 | console.error('Error parsing function call from Claude response:', error); 53 | throw new Error('Failed to parse function call from response'); 54 | } 55 | } 56 | 57 | return { 58 | choices: [{ 59 | message: { 60 | content: response.content[0].text 61 | } 62 | }] 63 | }; 64 | } catch (error) { 65 | if (error.message === 'Failed to parse function call from response') { 66 | throw error; 67 | } 68 | throw new Error(`Claude API error: ${error.message}`); 69 | } 70 | } 71 | } -------------------------------------------------------------------------------- /src/processors/canvas-processor.js: -------------------------------------------------------------------------------- 1 | export function processCanvases(document) { 2 | if (!document || !document.children) return []; 3 | 4 | return document.children.map(canvas => { 5 | const frames = canvas.children 6 | ?.filter(child => child.type === 'FRAME') 7 | ?.map(frame => ({ 8 | id: frame.id, 9 | name: frame.name, 10 | type: frame.type, 11 | size: { 12 | width: frame.absoluteBoundingBox?.width || null, 13 | height: frame.absoluteBoundingBox?.height || null 14 | }, 15 | position: { 16 | x: frame.x || 0, 17 | y: frame.y || 0 18 | }, 19 | background: frame.backgroundColor, 20 | layoutMode: frame.layoutMode, 21 | itemSpacing: frame.itemSpacing, 22 | padding: { 23 | top: frame.paddingTop, 24 | right: frame.paddingRight, 25 | bottom: frame.paddingBottom, 26 | left: frame.paddingLeft 27 | }, 28 | constraints: frame.constraints, 29 | clipsContent: frame.clipsContent, 30 | elements: frame.children?.length || 0 31 | })) || []; 32 | 33 | return { 34 | id: canvas.id, 35 | name: canvas.name, 36 | type: canvas.type, 37 | backgroundColor: canvas.backgroundColor, 38 | children: canvas.children ? canvas.children.length : 0, 39 | size: { 40 | width: canvas.absoluteBoundingBox?.width || null, 41 | height: canvas.absoluteBoundingBox?.height || null 42 | }, 43 | constraints: canvas.constraints || null, 44 | exportSettings: canvas.exportSettings || [], 45 | flowStartingPoints: canvas.flowStartingPoints || [], 46 | prototypeStartNode: canvas.prototypeStartNode || null, 47 | frames 48 | }; 49 | }); 50 | } 51 | 52 | export function processComponentInstances(node, instances = [], parentName = '') { 53 | if (!node) return instances; 54 | 55 | const fullName = parentName ? `${parentName}/${node.name}` : node.name; 56 | 57 | if (node.type === 'INSTANCE') { 58 | instances.push({ 59 | id: node.id, 60 | name: fullName, 61 | componentId: node.componentId, 62 | mainComponent: node.mainComponent, 63 | styles: node.styles || null, 64 | position: { 65 | x: node.x || 0, 66 | y: node.y || 0 67 | }, 68 | size: { 69 | width: node.absoluteBoundingBox?.width || null, 70 | height: node.absoluteBoundingBox?.height || null 71 | } 72 | }); 73 | } 74 | 75 | if (node.children) { 76 | node.children.forEach(child => { 77 | processComponentInstances(child, instances, fullName); 78 | }); 79 | } 80 | 81 | return instances; 82 | } 83 | 84 | export function generateComponentYAML(components, instances) { 85 | // Create a map of component IDs to their instances 86 | const componentMap = new Map(); 87 | components.forEach(comp => { 88 | componentMap.set(comp.id, { 89 | name: comp.name, 90 | type: comp.type, 91 | description: comp.description, 92 | instances: [] 93 | }); 94 | }); 95 | 96 | // Map instances to their components 97 | instances.forEach(instance => { 98 | if (componentMap.has(instance.componentId)) { 99 | componentMap.get(instance.componentId).instances.push({ 100 | id: instance.id, 101 | name: instance.name 102 | }); 103 | } 104 | }); 105 | 106 | // Generate YAML-like string 107 | let yaml = 'components:\n'; 108 | componentMap.forEach((value, key) => { 109 | yaml += ` ${key}:\n`; 110 | yaml += ` name: "${value.name}"\n`; 111 | yaml += ` type: ${value.type}\n`; 112 | if (value.description) { 113 | yaml += ` description: "${value.description}"\n`; 114 | } 115 | if (value.instances.length > 0) { 116 | yaml += ' instances:\n'; 117 | value.instances.forEach(instance => { 118 | yaml += ` - id: ${instance.id}\n`; 119 | yaml += ` name: "${instance.name}"\n`; 120 | }); 121 | } 122 | yaml += '\n'; 123 | }); 124 | 125 | return yaml; 126 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | lerna-debug.log* 8 | .pnpm-debug.log* 9 | 10 | # Diagnostic reports (https://nodejs.org/api/report.html) 11 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 12 | 13 | # Runtime data 14 | pids 15 | *.pid 16 | *.seed 17 | *.pid.lock 18 | 19 | # Directory for instrumented libs generated by jscoverage/JSCover 20 | lib-cov 21 | 22 | # Coverage directory used by tools like istanbul 23 | coverage 24 | *.lcov 25 | 26 | # nyc test coverage 27 | .nyc_output 28 | 29 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 30 | .grunt 31 | 32 | # Bower dependency directory (https://bower.io/) 33 | bower_components 34 | 35 | # node-waf configuration 36 | .lock-wscript 37 | 38 | # Compiled binary addons (https://nodejs.org/api/addons.html) 39 | build/Release 40 | 41 | # Dependency directories 42 | node_modules/ 43 | jspm_packages/ 44 | 45 | # Snowpack dependency directory (https://snowpack.dev/) 46 | web_modules/ 47 | 48 | # TypeScript cache 49 | *.tsbuildinfo 50 | 51 | # Optional npm cache directory 52 | .npm 53 | 54 | # Optional eslint cache 55 | .eslintcache 56 | 57 | # Optional stylelint cache 58 | .stylelintcache 59 | 60 | # Microbundle cache 61 | .rpt2_cache/ 62 | .rts2_cache_cjs/ 63 | .rts2_cache_es/ 64 | .rts2_cache_umd/ 65 | 66 | # Optional REPL history 67 | .node_repl_history 68 | 69 | # Output of 'npm pack' 70 | *.tgz 71 | 72 | # Yarn Integrity file 73 | .yarn-integrity 74 | 75 | # dotenv environment variable files 76 | .env 77 | .env.development.local 78 | .env.test.local 79 | .env.production.local 80 | .env.local 81 | 82 | # parcel-bundler cache (https://parceljs.org/) 83 | .cache 84 | .parcel-cache 85 | 86 | # Next.js build output 87 | .next 88 | out 89 | 90 | # Nuxt.js build / generate output 91 | .nuxt 92 | dist 93 | 94 | # Gatsby files 95 | .cache/ 96 | # Comment in the public line in if your project uses Gatsby and not Next.js 97 | # https://nextjs.org/blog/next-9-1#public-directory-support 98 | # public 99 | 100 | # vuepress build output 101 | .vuepress/dist 102 | 103 | # vuepress v2.x temp and cache directory 104 | .temp 105 | .cache 106 | 107 | # vitepress build output 108 | **/.vitepress/dist 109 | 110 | # vitepress cache directory 111 | **/.vitepress/cache 112 | 113 | # Docusaurus cache and generated files 114 | .docusaurus 115 | 116 | # Serverless directories 117 | .serverless/ 118 | 119 | # FuseBox cache 120 | .fusebox/ 121 | 122 | # DynamoDB Local files 123 | .dynamodb/ 124 | 125 | # TernJS port file 126 | .tern-port 127 | 128 | # Stores VSCode versions used for testing VSCode extensions 129 | .vscode-test 130 | 131 | # yarn v2 132 | .yarn/cache 133 | .yarn/unplugged 134 | .yarn/build-state.yml 135 | .yarn/install-state.gz 136 | .pnp.* 137 | 138 | example/ 139 | .designrules# Logs 140 | logs 141 | *.log 142 | npm-debug.log* 143 | yarn-debug.log* 144 | yarn-error.log* 145 | lerna-debug.log* 146 | .pnpm-debug.log* 147 | 148 | # Diagnostic reports (https://nodejs.org/api/report.html) 149 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 150 | 151 | # Runtime data 152 | pids 153 | *.pid 154 | *.seed 155 | *.pid.lock 156 | 157 | # Directory for instrumented libs generated by jscoverage/JSCover 158 | lib-cov 159 | 160 | # Coverage directory used by tools like istanbul 161 | coverage 162 | *.lcov 163 | 164 | # nyc test coverage 165 | .nyc_output 166 | 167 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 168 | .grunt 169 | 170 | # Bower dependency directory (https://bower.io/) 171 | bower_components 172 | 173 | # node-waf configuration 174 | .lock-wscript 175 | 176 | # Compiled binary addons (https://nodejs.org/api/addons.html) 177 | build/Release 178 | 179 | # Dependency directories 180 | node_modules/ 181 | jspm_packages/ 182 | 183 | # Snowpack dependency directory (https://snowpack.dev/) 184 | web_modules/ 185 | 186 | # TypeScript cache 187 | *.tsbuildinfo 188 | 189 | # Optional npm cache directory 190 | .npm 191 | 192 | # Optional eslint cache 193 | .eslintcache 194 | 195 | # Optional stylelint cache 196 | .stylelintcache 197 | 198 | # Microbundle cache 199 | .rpt2_cache/ 200 | .rts2_cache_cjs/ 201 | .rts2_cache_es/ 202 | .rts2_cache_umd/ 203 | 204 | # Optional REPL history 205 | .node_repl_history 206 | 207 | # Output of 'npm pack' 208 | *.tgz 209 | 210 | # Yarn Integrity file 211 | .yarn-integrity 212 | 213 | # dotenv environment variable files 214 | .env 215 | .env.development.local 216 | .env.test.local 217 | .env.production.local 218 | .env.local 219 | 220 | # parcel-bundler cache (https://parceljs.org/) 221 | .cache 222 | .parcel-cache 223 | 224 | # Next.js build output 225 | .next 226 | out 227 | 228 | # Nuxt.js build / generate output 229 | .nuxt 230 | dist 231 | 232 | # Gatsby files 233 | .cache/ 234 | # Comment in the public line in if your project uses Gatsby and not Next.js 235 | # https://nextjs.org/blog/next-9-1#public-directory-support 236 | # public 237 | 238 | # vuepress build output 239 | .vuepress/dist 240 | 241 | # vuepress v2.x temp and cache directory 242 | .temp 243 | .cache 244 | 245 | # vitepress build output 246 | **/.vitepress/dist 247 | 248 | # vitepress cache directory 249 | **/.vitepress/cache 250 | 251 | # Docusaurus cache and generated files 252 | .docusaurus 253 | 254 | # Serverless directories 255 | .serverless/ 256 | 257 | # FuseBox cache 258 | .fusebox/ 259 | 260 | # DynamoDB Local files 261 | .dynamodb/ 262 | 263 | # TernJS port file 264 | .tern-port 265 | 266 | # Stores VSCode versions used for testing VSCode extensions 267 | .vscode-test 268 | 269 | # yarn v2 270 | .yarn/cache 271 | .yarn/unplugged 272 | .yarn/build-state.yml 273 | .yarn/install-state.gz 274 | .pnp.* 275 | 276 | .designrules 277 | .env -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # fig4ai 2 | 3 | ![License](https://img.shields.io/badge/license-MIT-blue.svg) 4 | 5 | A CLI tool that uses AI to generate design rules and documentation from your Figma files. It analyzes your Figma designs and automatically extracts design tokens, components, and layout information into a structured format. 6 | 7 | ## Overview 8 | 9 | 10 | 11 | https://github.com/user-attachments/assets/c80b7eee-7027-4872-ae30-5279289ff6f7 12 | 13 | 14 | 15 | ## Features 16 | 17 | - 🎨 Extract design tokens (colors, typography, spacing, effects) 18 | - 🧩 Generate component documentation 19 | - 📐 Analyze layout structures 20 | - 🤖 AI-powered pseudo-code generation 21 | - 🔄 Real-time progress indicators 22 | - 📝 Markdown output format 23 | 24 | ## Run 25 | Run directly with npx: 26 | 27 | ```bash 28 | npx fig4ai [--model=claude|gpt4] [--no-ai] 29 | ``` 30 | 31 | ## IDE Integration 32 | 33 | After generating your `.designrules` file, you can use it with AI-powered IDEs to automatically generate code and configurations: 34 | 35 | ### Cursor, Windsurf, VS Code 36 | 37 | Simply mention the `.designrules` file in your prompts: 38 | 39 | ``` 40 | > Generate a Tailwind config based on @.designrules file 41 | ``` 42 | ``` 43 | > Create a Vue login page using the design tokens from @.designrules 44 | ``` 45 | ``` 46 | > Build a React component library following @.designrules specifications 47 | ``` 48 | 49 | 50 | The AI will analyze your `.designrules` file and generate code that matches your design system's: 51 | - Color palette 52 | - Typography scales 53 | - Spacing system 54 | - Component structures 55 | - Layout patterns 56 | - Shadow effects 57 | - Border styles 58 | - And more... 59 | 60 | ## How it Works 61 | 62 | fig4ai follows a sophisticated process to transform your Figma designs into AI-ready context: 63 | 64 | 1. **Data Extraction** 65 | - Connects to Figma API and retrieves comprehensive file data 66 | - Processes complex nested JSON structure containing all design information 67 | 68 | 2. **Design Token Parsing** 69 | - Parses the JSON structure hierarchically: Canvas > Frame > Component / Instance 70 | - Extracts design tokens (colors, typography, spacing, effects) 71 | - Organizes components and their instances with style references 72 | - Maintains relationship between components and their variants 73 | 74 | 3. **AI-Powered Transformation** 75 | - For each Canvas, sends structured data to GPT-4o 76 | - Generates semantic pseudo-code with complete styling context 77 | - Preserves all design decisions, constraints, and relationships 78 | - Includes accessibility considerations and responsive behaviors 79 | 80 | 4. **Structured Documentation** 81 | - Stores all design tokens and pseudo-code representations in `.designrules` 82 | - Uses Markdown format for maximum compatibility 83 | - Maintains hierarchical structure of the design system 84 | - Preserves all style references and component relationships 85 | 86 | 5. **AI Context Integration** 87 | - `.designrules` file serves as a comprehensive design context 88 | - When mentioned in AI-powered IDEs (Cursor/Windsurf), the file is parsed 89 | - AI understands the complete design system and can generate accurate code 90 | - Enables context-aware code generation based on your design system 91 | 92 | In essence, fig4ai transforms your Figma file into a structured AI context, making your design system programmatically accessible to AI tools. 93 | 94 | ## Usage 95 | 96 | ### Command Line 97 | 98 | ```bash 99 | npx fig4ai [--model=claude|gpt4] [--no-ai] 100 | ``` 101 | 102 | Or if you've set `FIGMA_DESIGN_URL` in your `.env` file: 103 | 104 | ```bash 105 | npx fig4ai [--model=claude|gpt4] [--no-ai] 106 | ``` 107 | 108 | ### AI Options 109 | 110 | The tool supports two AI models for enhanced design analysis: 111 | 112 | 1. **Claude (Default)** 113 | - Uses Anthropic's Claude 3 Sonnet model 114 | - Set `CLAUDE_API_KEY` in your environment variables 115 | - Generally better at understanding design context 116 | - More detailed component analysis 117 | 118 | 2. **GPT-4o** 119 | - Uses OpenAI's GPT-4 model 120 | - Set `OPENAI_API_KEY` in your environment variables 121 | - Alternative option if you prefer OpenAI 122 | 123 | You can also run without AI enhancement: 124 | ```bash 125 | npx fig4ai --no-ai 126 | ``` 127 | This will output raw design data in a structured format without AI processing. 128 | 129 | ### Environment Setup 130 | 131 | ```env 132 | # Required 133 | FIGMA_ACCESS_TOKEN=your_figma_token 134 | 135 | # Optional - At least one needed for AI features 136 | CLAUDE_API_KEY=your_claude_api_key 137 | OPENAI_API_KEY=your_openai_api_key 138 | 139 | # Optional 140 | FIGMA_DESIGN_URL=your_default_figma_url 141 | ``` 142 | 143 | ### Output 144 | 145 | The tool generates a `.designrules` file containing: 146 | 147 | - Design token documentation 148 | - Component specifications 149 | - Layout structures 150 | - AI-generated pseudo-code 151 | - Style references 152 | - Accessibility considerations 153 | 154 | ## Contributing 155 | 156 | Contributions are welcome! Please feel free to submit a Pull Request. For major changes, please open an issue first to discuss what you would like to change. 157 | 158 | 1. Fork the repository 159 | 2. Create your feature branch (`git checkout -b feature/AmazingFeature`) 160 | 3. Commit your changes (`git commit -m 'Add some AmazingFeature'`) 161 | 4. Push to the branch (`git push origin feature/AmazingFeature`) 162 | 5. Open a Pull Request 163 | 164 | ## License 165 | 166 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. 167 | 168 | ## Support 169 | 170 | If you encounter any issues or have questions, please: 171 | 1. Check the [issues page](https://github.com/f/fig4ai/issues) 172 | 2. Create a new issue if your problem isn't already listed 173 | -------------------------------------------------------------------------------- /src/processors/token-processor.js: -------------------------------------------------------------------------------- 1 | import { rgbToHex } from '../utils/color.js'; 2 | 3 | export function processDesignTokens(node, tokens = { 4 | typography: { 5 | headings: { 6 | h1: [], h2: [], h3: [], h4: [], h5: [], h6: [] 7 | }, 8 | body: [], 9 | other: [] 10 | }, 11 | colors: { 12 | primary: [], 13 | secondary: [], 14 | text: [], 15 | background: [], 16 | other: [] 17 | }, 18 | spacing: [], 19 | effects: { 20 | shadows: [], 21 | blurs: [], 22 | other: [] 23 | }, 24 | components: [], 25 | styles: [] 26 | }, parentName = '') { 27 | if (!node) return tokens; 28 | 29 | const fullName = parentName ? `${parentName}/${node.name}` : node.name; 30 | const nameLower = node.name.toLowerCase(); 31 | 32 | // Process node based on type 33 | switch (node.type) { 34 | case 'COMPONENT': 35 | case 'COMPONENT_SET': 36 | tokens.components.push({ 37 | id: node.id, 38 | name: fullName, 39 | type: node.type, 40 | description: node.description || null, 41 | styles: node.styles || null 42 | }); 43 | break; 44 | 45 | case 'TEXT': 46 | const textStyle = { 47 | id: node.id, 48 | name: fullName, 49 | content: node.characters, 50 | style: { 51 | fontFamily: node.style?.fontFamily, 52 | fontWeight: node.style?.fontWeight, 53 | fontSize: node.style?.fontSize, 54 | lineHeight: node.style?.lineHeightPx || node.style?.lineHeight, 55 | letterSpacing: node.style?.letterSpacing, 56 | textCase: node.style?.textCase, 57 | textDecoration: node.style?.textDecoration, 58 | textAlignHorizontal: node.style?.textAlignHorizontal, 59 | paragraphSpacing: node.style?.paragraphSpacing, 60 | fills: node.fills 61 | } 62 | }; 63 | 64 | // Categorize typography 65 | if (nameLower.includes('heading') || nameLower.match(/h[1-6]/)) { 66 | const headingLevel = nameLower.match(/h([1-6])/)?.[1]; 67 | if (headingLevel) { 68 | tokens.typography.headings[`h${headingLevel}`].push(textStyle); 69 | } 70 | } else if (nameLower.includes('body') || nameLower.includes('text') || nameLower.includes('paragraph')) { 71 | tokens.typography.body.push(textStyle); 72 | } else { 73 | tokens.typography.other.push(textStyle); 74 | } 75 | break; 76 | 77 | case 'RECTANGLE': 78 | case 'VECTOR': 79 | case 'ELLIPSE': 80 | if (node.fills && node.fills.length > 0) { 81 | node.fills.forEach(fill => { 82 | if (fill.type === 'SOLID') { 83 | const colorToken = { 84 | id: node.id, 85 | name: fullName, 86 | color: { 87 | r: Math.round(fill.color.r * 255), 88 | g: Math.round(fill.color.g * 255), 89 | b: Math.round(fill.color.b * 255), 90 | a: fill.color.a, 91 | }, 92 | hex: rgbToHex( 93 | Math.round(fill.color.r * 255), 94 | Math.round(fill.color.g * 255), 95 | Math.round(fill.color.b * 255) 96 | ), 97 | opacity: fill.color.a 98 | }; 99 | 100 | // Categorize colors 101 | if (nameLower.includes('primary')) { 102 | tokens.colors.primary.push(colorToken); 103 | } else if (nameLower.includes('secondary')) { 104 | tokens.colors.secondary.push(colorToken); 105 | } else if (nameLower.includes('text') || nameLower.includes('typography')) { 106 | tokens.colors.text.push(colorToken); 107 | } else if (nameLower.includes('background') || nameLower.includes('bg')) { 108 | tokens.colors.background.push(colorToken); 109 | } else { 110 | tokens.colors.other.push(colorToken); 111 | } 112 | } 113 | }); 114 | } 115 | 116 | // Process effects 117 | if (node.effects && node.effects.length > 0) { 118 | node.effects.forEach(effect => { 119 | const effectToken = { 120 | id: node.id, 121 | name: fullName, 122 | type: effect.type, 123 | value: effect 124 | }; 125 | 126 | if (effect.type === 'DROP_SHADOW' || effect.type === 'INNER_SHADOW') { 127 | tokens.effects.shadows.push(effectToken); 128 | } else if (effect.type === 'LAYER_BLUR' || effect.type === 'BACKGROUND_BLUR') { 129 | tokens.effects.blurs.push(effectToken); 130 | } else { 131 | tokens.effects.other.push(effectToken); 132 | } 133 | }); 134 | } 135 | break; 136 | 137 | case 'FRAME': 138 | // Process spacing from auto-layout frames 139 | if (node.layoutMode === 'VERTICAL' || node.layoutMode === 'HORIZONTAL') { 140 | tokens.spacing.push({ 141 | id: node.id, 142 | name: fullName, 143 | type: node.layoutMode, 144 | itemSpacing: node.itemSpacing, 145 | padding: { 146 | top: node.paddingTop, 147 | right: node.paddingRight, 148 | bottom: node.paddingBottom, 149 | left: node.paddingLeft 150 | } 151 | }); 152 | } 153 | break; 154 | } 155 | 156 | // Process styles if present 157 | if (node.styles) { 158 | tokens.styles.push({ 159 | id: node.id, 160 | name: fullName, 161 | styles: node.styles 162 | }); 163 | } 164 | 165 | // Recursively process children 166 | if (node.children) { 167 | node.children.forEach(child => { 168 | processDesignTokens(child, tokens, fullName); 169 | }); 170 | } 171 | 172 | return tokens; 173 | } 174 | 175 | export function formatTokenCount(tokens) { 176 | let counts = { 177 | typography: Object.values(tokens.typography.headings).flat().length + 178 | tokens.typography.body.length + 179 | tokens.typography.other.length, 180 | colors: Object.values(tokens.colors).flat().length, 181 | effects: Object.values(tokens.effects).flat().length, 182 | spacing: tokens.spacing.length, 183 | components: tokens.components.length, 184 | styles: tokens.styles.length 185 | }; 186 | return Object.entries(counts) 187 | .map(([key, value]) => `${key}: ${value}`) 188 | .join(', '); 189 | } -------------------------------------------------------------------------------- /src/index.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | // Suppress punycode deprecation warning 4 | process.noDeprecation = true; 5 | 6 | import chalk from 'chalk'; 7 | import dotenv from 'dotenv'; 8 | import { fileURLToPath } from 'url'; 9 | import { dirname, join } from 'path'; 10 | import fs from 'fs'; 11 | import ora from 'ora'; 12 | 13 | import { parseFigmaUrl } from './utils/url-parser.js'; 14 | import { getFigmaFileData } from './utils/api.js'; 15 | import { processDesignTokens, formatTokenCount } from './processors/token-processor.js'; 16 | import { processCanvases, processComponentInstances, generateComponentYAML } from './processors/canvas-processor.js'; 17 | import { generateAllPseudoCode, initializeAI } from './generators/pseudo-generator.js'; 18 | 19 | // Load environment variables 20 | const __filename = fileURLToPath(import.meta.url); 21 | const __dirname = dirname(__filename); 22 | dotenv.config({ path: join(dirname(__dirname), '.env') }); 23 | 24 | // Validate required environment variables 25 | const requiredEnvVars = { 26 | 'FIGMA_ACCESS_TOKEN': process.env.FIGMA_ACCESS_TOKEN 27 | }; 28 | 29 | // Optional environment variables 30 | const optionalEnvVars = { 31 | 'OPENAI_API_KEY': process.env.OPENAI_API_KEY, 32 | 'CLAUDE_API_KEY': process.env.CLAUDE_API_KEY 33 | }; 34 | 35 | const missingEnvVars = Object.entries(requiredEnvVars) 36 | .filter(([_, value]) => !value) 37 | .map(([key]) => key); 38 | 39 | if (missingEnvVars.length > 0) { 40 | console.error(chalk.red('\nMissing required environment variables:')); 41 | missingEnvVars.forEach(envVar => { 42 | console.error(chalk.yellow(` • ${envVar}`)); 43 | }); 44 | console.error(chalk.blue('\nPlease set these variables in your .env file:')); 45 | console.error(chalk.gray('\n# .env')); 46 | missingEnvVars.forEach(envVar => { 47 | console.error(chalk.gray(`${envVar}=your_${envVar.toLowerCase()}_here`)); 48 | }); 49 | process.exit(1); 50 | } 51 | 52 | // Parse command line arguments 53 | const args = process.argv.slice(2); 54 | const figmaUrl = args[0] || process.env.FIGMA_DESIGN_URL; 55 | const modelArg = args.find(arg => arg.startsWith('--model=')); 56 | const noAI = args.includes('--no-ai'); 57 | const model = modelArg ? modelArg.split('=')[1].toLowerCase() : 'claude'; 58 | 59 | if (!figmaUrl) { 60 | console.error(chalk.red('Please provide a Figma URL')); 61 | console.log(chalk.blue('\nUsage:')); 62 | console.log(' npx fig4ai [--model=claude|gpt4] [--no-ai]'); 63 | console.log(chalk.blue('\nOptions:')); 64 | console.log(' --model=claude|gpt4 Choose AI model (default: claude)'); 65 | console.log(' --no-ai Skip AI enhancements and output raw data'); 66 | console.log(chalk.blue('\nOr set it in your .env file:')); 67 | console.log(chalk.gray('FIGMA_DESIGN_URL=your_figma_url_here')); 68 | process.exit(1); 69 | } 70 | 71 | // Check if AI enhancement is possible and desired 72 | const hasAICapability = !noAI && ((model === 'claude' && process.env.CLAUDE_API_KEY) || 73 | (model === 'gpt4' && process.env.OPENAI_API_KEY)); 74 | 75 | if (noAI) { 76 | console.info(chalk.blue('\nAI enhancement disabled via --no-ai flag.')); 77 | } else if (!hasAICapability) { 78 | console.warn(chalk.yellow('\nNo AI API keys found. Running without AI enhancement.')); 79 | console.warn(chalk.gray('To enable AI features, set CLAUDE_API_KEY or OPENAI_API_KEY in your .env file.')); 80 | } 81 | 82 | async function main() { 83 | const spinner = ora(); 84 | try { 85 | // Initialize AI with selected model 86 | initializeAI(model); 87 | 88 | const result = parseFigmaUrl(figmaUrl); 89 | let output = ''; 90 | 91 | // Capture URL details 92 | output += '# Figma Design Rules\n\n'; 93 | output += '## File Information\n'; 94 | output += `Type: ${result.type}\n`; 95 | output += `File ID: ${result.fileId}\n`; 96 | output += `Title: ${result.title || 'Not specified'}\n`; 97 | output += `Node ID: ${result.nodeId || 'Not specified'}\n\n`; 98 | 99 | spinner.start('Processing Figma URL details...'); 100 | spinner.succeed('Figma URL details processed'); 101 | 102 | spinner.start('Fetching file data from Figma API...'); 103 | const figmaData = await getFigmaFileData(result.fileId); 104 | spinner.succeed('Figma file data fetched'); 105 | 106 | output += `File Name: ${figmaData.name}\n`; 107 | output += `Last Modified: ${new Date(figmaData.lastModified).toLocaleString()}\n\n`; 108 | 109 | spinner.start('Processing design tokens...'); 110 | const tokens = processDesignTokens(figmaData.document); 111 | spinner.succeed('Design tokens processed'); 112 | 113 | // Add token summary 114 | output += '## Design Tokens Summary\n'; 115 | output += formatTokenCount(tokens) + '\n\n'; 116 | 117 | spinner.info(`Total tokens found: ${formatTokenCount(tokens)}`); 118 | 119 | // Process and capture detailed token information 120 | spinner.start('Processing typography tokens...'); 121 | output += '## Typography\n\n'; 122 | Object.entries(tokens.typography.headings).forEach(([level, styles]) => { 123 | if (styles.length > 0) { 124 | output += `### ${level.toUpperCase()}\n`; 125 | styles.forEach(style => { 126 | output += `- ${style.name}\n`; 127 | output += ` - Font: ${style.style.fontFamily} (${style.style.fontWeight})\n`; 128 | output += ` - Size: ${style.style.fontSize}px\n`; 129 | output += ` - Line Height: ${style.style.lineHeight}\n`; 130 | if (style.style.letterSpacing) { 131 | output += ` - Letter Spacing: ${style.style.letterSpacing}\n`; 132 | } 133 | output += '\n'; 134 | }); 135 | } 136 | }); 137 | spinner.succeed('Typography tokens processed'); 138 | 139 | spinner.start('Processing color tokens...'); 140 | output += '## Colors\n\n'; 141 | Object.entries(tokens.colors).forEach(([category, colors]) => { 142 | if (colors.length > 0) { 143 | output += `### ${category.toUpperCase()}\n`; 144 | colors.forEach(color => { 145 | output += `- ${color.name}\n`; 146 | output += ` - HEX: ${color.hex}\n`; 147 | output += ` - RGB: ${color.color.r}, ${color.color.g}, ${color.color.b}\n`; 148 | if (color.opacity !== 1) { 149 | output += ` - Opacity: ${color.opacity}\n`; 150 | } 151 | output += '\n'; 152 | }); 153 | } 154 | }); 155 | spinner.succeed('Color tokens processed'); 156 | 157 | // Process canvas information 158 | spinner.start('Processing canvas information...'); 159 | const canvases = processCanvases(figmaData.document); 160 | output += '## Canvases and Frames\n\n'; 161 | canvases.forEach(canvas => { 162 | output += `### ${canvas.name}\n`; 163 | output += `- ID: ${canvas.id}\n`; 164 | output += `- Type: ${canvas.type}\n`; 165 | output += `- Total Elements: ${canvas.children}\n`; 166 | if (canvas.frames && canvas.frames.length > 0) { 167 | output += `\n#### Frames (${canvas.frames.length})\n`; 168 | canvas.frames.forEach(frame => { 169 | output += `\n##### ${frame.name}\n`; 170 | output += `- ID: ${frame.id}\n`; 171 | if (frame.size.width && frame.size.height) { 172 | output += `- Size: ${frame.size.width}x${frame.size.height}\n`; 173 | } 174 | if (frame.layoutMode) { 175 | output += `- Layout: ${frame.layoutMode}\n`; 176 | output += `- Item Spacing: ${frame.itemSpacing}\n`; 177 | } 178 | }); 179 | } 180 | output += '\n'; 181 | }); 182 | spinner.succeed('Canvas information processed'); 183 | 184 | // Process component instances 185 | spinner.start('Processing component instances...'); 186 | const instances = processComponentInstances(figmaData.document); 187 | output += '## Component Instances\n\n'; 188 | instances.forEach(instance => { 189 | output += `### ${instance.name}\n`; 190 | output += `- ID: ${instance.id}\n`; 191 | output += `- Component ID: ${instance.componentId}\n`; 192 | if (instance.size.width && instance.size.height) { 193 | output += `- Size: ${instance.size.width}x${instance.size.height}\n`; 194 | } 195 | output += '\n'; 196 | }); 197 | spinner.succeed('Component instances processed'); 198 | 199 | // Generate component structure 200 | spinner.start('Generating component structure...'); 201 | output += '## Component Structure\n\n```yaml\n'; 202 | const componentYAML = generateComponentYAML(tokens.components, instances); 203 | output += componentYAML; 204 | output += '```\n\n'; 205 | spinner.succeed('Component structure generated'); 206 | 207 | // Generate pseudo components and frames 208 | spinner.start('Generating pseudo components and frames...'); 209 | const frames = canvases.flatMap(canvas => canvas.frames); 210 | const pseudoCode = await generateAllPseudoCode(tokens.components, instances, frames, tokens, figmaData); 211 | spinner.succeed('Pseudo components and frames generated'); 212 | 213 | // Add pseudo code 214 | output += '## Pseudo Components\n\n```xml\n'; 215 | pseudoCode.components.forEach((component, id) => { 216 | output += component.pseudoCode + '\n\n'; 217 | }); 218 | output += '```\n\n'; 219 | 220 | output += '## Frame Layouts\n\n```xml\n'; 221 | pseudoCode.frames.forEach((frame, id) => { 222 | output += frame.pseudoCode + '\n\n'; 223 | }); 224 | output += '```\n'; 225 | 226 | // Save to .designrules file 227 | spinner.start('Saving design rules...'); 228 | await fs.promises.writeFile('.designrules', output); 229 | spinner.succeed('Design rules saved successfully'); 230 | 231 | } catch (error) { 232 | spinner.fail(chalk.red('Error: ' + error.message)); 233 | process.exit(1); 234 | } 235 | } 236 | 237 | main(); -------------------------------------------------------------------------------- /src/generators/pseudo-generator.js: -------------------------------------------------------------------------------- 1 | import OpenAI from 'openai'; 2 | import ora from 'ora'; 3 | import chalk from 'chalk'; 4 | import { rgbToHex } from '../utils/color.js'; 5 | import { ClaudeClient } from '../utils/claude-api.js'; 6 | 7 | let client; 8 | let hasAICapability = false; 9 | 10 | export function initializeAI(model = 'claude') { 11 | // Check if --no-ai flag is present 12 | if (process.argv.includes('--no-ai')) { 13 | hasAICapability = false; 14 | return; 15 | } 16 | 17 | try { 18 | if (model === 'gpt4' && process.env.OPENAI_API_KEY) { 19 | client = new OpenAI({ 20 | apiKey: process.env.OPENAI_API_KEY 21 | }); 22 | hasAICapability = true; 23 | } else if (model === 'claude' && process.env.CLAUDE_API_KEY) { 24 | client = new ClaudeClient(process.env.CLAUDE_API_KEY); 25 | hasAICapability = true; 26 | } 27 | } catch (error) { 28 | console.warn(chalk.yellow('Failed to initialize AI client:', error.message)); 29 | hasAICapability = false; 30 | } 31 | } 32 | 33 | async function generatePseudoComponent(component, instance, tokens, figmaData) { 34 | if (!hasAICapability || !client) { 35 | return { 36 | componentName: component.name, 37 | pseudoCode: `# ${component.name}\n\`\`\`\n${JSON.stringify(instance, null, 2)}\n\`\`\`` 38 | }; 39 | } 40 | 41 | // Create a more detailed design system summary with exact values 42 | const designSystem = { 43 | typography: { 44 | headings: Object.fromEntries( 45 | Object.entries(tokens.typography.headings) 46 | .map(([key, styles]) => [key, styles[0]?.style || null]) 47 | .filter(([_, style]) => style !== null) 48 | ), 49 | body: tokens.typography.body[0]?.style || null 50 | }, 51 | colors: { 52 | primary: tokens.colors.primary.map(c => ({ 53 | name: c.name, 54 | hex: c.hex, 55 | rgb: `${c.color.r},${c.color.g},${c.color.b}`, 56 | opacity: c.opacity 57 | })), 58 | secondary: tokens.colors.secondary.map(c => ({ 59 | name: c.name, 60 | hex: c.hex, 61 | rgb: `${c.color.r},${c.color.g},${c.color.b}`, 62 | opacity: c.opacity 63 | })), 64 | text: tokens.colors.text.map(c => ({ 65 | name: c.name, 66 | hex: c.hex, 67 | rgb: `${c.color.r},${c.color.g},${c.color.b}`, 68 | opacity: c.opacity 69 | })), 70 | background: tokens.colors.background.map(c => ({ 71 | name: c.name, 72 | hex: c.hex, 73 | rgb: `${c.color.r},${c.color.g},${c.color.b}`, 74 | opacity: c.opacity 75 | })), 76 | other: tokens.colors.other.map(c => ({ 77 | name: c.name, 78 | hex: c.hex, 79 | rgb: `${c.color.r},${c.color.g},${c.color.b}`, 80 | opacity: c.opacity 81 | })) 82 | }, 83 | spacing: tokens.spacing.map(s => ({ 84 | name: s.name, 85 | value: s.itemSpacing, 86 | padding: s.padding 87 | })), 88 | effects: { 89 | shadows: tokens.effects.shadows.map(s => ({ 90 | name: s.name, 91 | type: s.type, 92 | ...s.value, 93 | color: s.value.color ? { 94 | hex: rgbToHex( 95 | Math.round(s.value.color.r * 255), 96 | Math.round(s.value.color.g * 255), 97 | Math.round(s.value.color.b * 255) 98 | ), 99 | rgb: `${Math.round(s.value.color.r * 255)},${Math.round(s.value.color.g * 255)},${Math.round(s.value.color.b * 255)}`, 100 | opacity: s.value.color.a 101 | } : null 102 | })), 103 | blurs: tokens.effects.blurs.map(b => ({ 104 | name: b.name, 105 | type: b.type, 106 | ...b.value 107 | })) 108 | } 109 | }; 110 | 111 | // Extract component-specific styles and references 112 | const componentStyles = { 113 | styles: {}, // Will be populated with expanded styles 114 | fills: instance.fills?.map(fill => { 115 | if (fill.type === 'SOLID') { 116 | // Check if this fill comes from a style 117 | const styleId = instance.styles?.fills || instance.styles?.fill; 118 | if (styleId) { 119 | // Find the style in tokens 120 | const style = tokens.styles.find(s => s.id === styleId); 121 | // Find the actual style definition in the Figma data 122 | const styleDefinition = figmaData.styles?.[styleId]; 123 | return { 124 | type: fill.type, 125 | styleId, 126 | styleName: style?.name || 'Unknown Style', 127 | styleType: 'fill', 128 | description: styleDefinition?.description || null, 129 | color: { 130 | hex: rgbToHex( 131 | Math.round(fill.color.r * 255), 132 | Math.round(fill.color.g * 255), 133 | Math.round(fill.color.b * 255) 134 | ), 135 | rgb: `${Math.round(fill.color.r * 255)},${Math.round(fill.color.g * 255)},${Math.round(fill.color.b * 255)}`, 136 | opacity: fill.color.a 137 | } 138 | }; 139 | } 140 | return { 141 | type: fill.type, 142 | color: { 143 | hex: rgbToHex( 144 | Math.round(fill.color.r * 255), 145 | Math.round(fill.color.g * 255), 146 | Math.round(fill.color.b * 255) 147 | ), 148 | rgb: `${Math.round(fill.color.r * 255)},${Math.round(fill.color.g * 255)},${Math.round(fill.color.b * 255)}`, 149 | opacity: fill.color.a 150 | } 151 | }; 152 | } 153 | return fill; 154 | }), 155 | effects: instance.effects?.map(effect => { 156 | const styleId = instance.styles?.effects || instance.styles?.effect; 157 | if (styleId) { 158 | const style = tokens.styles.find(s => s.id === styleId); 159 | const styleDefinition = figmaData.styles?.[styleId]; 160 | return { 161 | type: effect.type, 162 | styleId, 163 | styleName: style?.name || 'Unknown Style', 164 | styleType: 'effect', 165 | description: styleDefinition?.description || null, 166 | value: { 167 | ...effect, 168 | color: effect.color ? { 169 | hex: rgbToHex( 170 | Math.round(effect.color.r * 255), 171 | Math.round(effect.color.g * 255), 172 | Math.round(effect.color.b * 255) 173 | ), 174 | rgb: `${Math.round(effect.color.r * 255)},${Math.round(effect.color.g * 255)},${Math.round(effect.color.b * 255)}`, 175 | opacity: effect.color.a 176 | } : null 177 | } 178 | }; 179 | } 180 | return effect; 181 | }) 182 | }; 183 | 184 | // Expand all style references 185 | if (instance.styles) { 186 | Object.entries(instance.styles).forEach(([key, styleId]) => { 187 | const style = tokens.styles.find(s => s.id === styleId); 188 | const styleDefinition = figmaData.styles?.[styleId]; 189 | 190 | componentStyles.styles[key] = { 191 | id: styleId, 192 | name: style?.name || 'Unknown Style', 193 | type: key, 194 | description: styleDefinition?.description || null, 195 | value: styleDefinition || null 196 | }; 197 | }); 198 | } 199 | 200 | const functions = [ 201 | { 202 | name: "create_pseudo_component", 203 | description: "Generate a pseudo-XML component based on Figma component details", 204 | parameters: { 205 | type: "object", 206 | properties: { 207 | componentName: { 208 | type: "string", 209 | description: "The name of the component" 210 | }, 211 | pseudoCode: { 212 | type: "string", 213 | description: "The pseudo-XML code for the component with detailed styling" 214 | } 215 | }, 216 | required: ["componentName", "pseudoCode"] 217 | } 218 | } 219 | ]; 220 | 221 | const prompt = `Design System Details: 222 | 223 | \`\`\` 224 | ${JSON.stringify(designSystem, null, 2)} 225 | \`\`\` 226 | 227 | Component to Generate: 228 | Name: ${component.name} 229 | Type: ${component.type} 230 | Description: ${component.description || 'No description provided'} 231 | Size: ${instance.size.width}x${instance.size.height} 232 | 233 | Component Specific Styles and References: 234 | \`\`\` 235 | ${JSON.stringify(componentStyles, null, 2)} 236 | \`\`\` 237 | 238 | Requirements: 239 | 1. Generate pseudo-XML code that represents this component 240 | 2. Use style references (styleId) when available instead of direct values 241 | 3. Include ALL styling details (colors, shadows, effects) 242 | 4. Use exact color values (HEX and RGB) when no style reference exists 243 | 5. Include shadow and effect details with style references 244 | 6. Specify padding and spacing 245 | 7. Include background colors and gradients 246 | 8. Make it accessible 247 | 9. Keep it readable 248 | 249 | Example format: 250 | 260 | 261 | Generate ONLY the pseudo-XML code with detailed styling attributes, preferring style references over direct values.`; 262 | 263 | try { 264 | const completion = await client.chat( 265 | [{ role: "user", content: prompt }], 266 | functions, 267 | { name: "create_pseudo_component" } 268 | ); 269 | 270 | const response = JSON.parse(completion.choices[0].message.function_call.arguments); 271 | return response; 272 | } catch (error) { 273 | console.warn(chalk.yellow(`Skipping pseudo generation for component ${component.name} - ${error.message}`)); 274 | return { 275 | componentName: component.name, 276 | pseudoCode: `# ${component.name}\n${JSON.stringify(instance, null, 2)}` 277 | }; 278 | } 279 | } 280 | 281 | async function generatePseudoFrame(frame, components, tokens, canvas) { 282 | if (!hasAICapability || !client) { 283 | return { 284 | frameName: frame.name, 285 | pseudoCode: `# ${frame.name} (Canvas: ${canvas.name})\n${JSON.stringify(frame, null, 2)}` 286 | }; 287 | } 288 | 289 | const functions = [ 290 | { 291 | name: "create_pseudo_frame", 292 | description: "Generate a pseudo-XML frame layout based on Figma frame details", 293 | parameters: { 294 | type: "object", 295 | properties: { 296 | frameName: { 297 | type: "string", 298 | description: "The name of the frame" 299 | }, 300 | pseudoCode: { 301 | type: "string", 302 | description: "The pseudo-XML code for the frame layout" 303 | } 304 | }, 305 | required: ["frameName", "pseudoCode"] 306 | } 307 | } 308 | ]; 309 | 310 | // Extract frame dimensions and properties for the summary 311 | const frameSize = frame.absoluteBoundingBox ? { 312 | width: frame.absoluteBoundingBox.width, 313 | height: frame.absoluteBoundingBox.height 314 | } : { width: 0, height: 0 }; 315 | 316 | const framePadding = { 317 | top: frame.paddingTop || 0, 318 | right: frame.paddingRight || 0, 319 | bottom: frame.paddingBottom || 0, 320 | left: frame.paddingLeft || 0 321 | }; 322 | 323 | const canvasSize = canvas.absoluteBoundingBox ? { 324 | width: canvas.absoluteBoundingBox.width, 325 | height: canvas.absoluteBoundingBox.height 326 | } : { width: 0, height: 0 }; 327 | 328 | const prompt = `Frame Summary: 329 | Name: ${frame.name} 330 | Size: ${frameSize.width}x${frameSize.height} 331 | Layout: ${frame.layoutMode || 'FREE'} 332 | Spacing: ${frame.itemSpacing || 0} 333 | Padding: ${JSON.stringify(framePadding)} 334 | Elements: ${frame.children?.length || 0} 335 | Position: x=${frame.absoluteBoundingBox?.x || 0}, y=${frame.absoluteBoundingBox?.y || 0} 336 | 337 | Canvas Summary: 338 | Name: ${canvas.name} 339 | Type: ${canvas.type} 340 | Size: ${canvasSize.width}x${canvasSize.height} 341 | 342 | Available Components: 343 | ${components.map(c => `- ${c.name}`).join('\n')} 344 | 345 | Complete Frame Data: 346 | \`\`\` 347 | ${JSON.stringify(frame, null, 2)} 348 | \`\`\` 349 | 350 | Complete Canvas Data: 351 | \`\`\` 352 | ${JSON.stringify(canvas, null, 2)} 353 | \`\`\` 354 | 355 | Requirements: 356 | 1. Generate pseudo-XML layout code for this frame 357 | 2. Use semantic container elements 358 | 3. Include layout attributes (flex, grid, etc.) 359 | 4. Use appropriate spacing and padding 360 | 5. Place components in a logical layout 361 | 6. Consider canvas context for positioning and constraints 362 | 7. Include all text content exactly as specified in the frame data 363 | 8. Preserve all styling information from the frame data 364 | 9. Keep the hierarchy of nested elements 365 | 10. Keep it readable while being accurate to the source data 366 | 367 | Example format: 368 | 380 | 381 | 382 | 383 | 384 | 385 | Generate ONLY the pseudo-XML code without any additional explanation. Ensure all text content and styling from the frame data is accurately represented.`; 386 | 387 | try { 388 | const completion = await client.chat( 389 | [{ role: "user", content: prompt }], 390 | functions, 391 | { name: "create_pseudo_frame" } 392 | ); 393 | 394 | const response = JSON.parse(completion.choices[0].message.function_call.arguments); 395 | return response; 396 | } catch (error) { 397 | console.warn(chalk.yellow(`Skipping pseudo generation for frame ${frame.name} - ${error.message}`)); 398 | return { 399 | frameName: frame.name, 400 | pseudoCode: `# ${frame.name} (Canvas: ${canvas.name})\n${JSON.stringify(frame, null, 2)}` 401 | }; 402 | } 403 | } 404 | 405 | export async function generateAllPseudoCode(components, instances, frames, tokens, figmaData) { 406 | const pseudoComponents = new Map(); 407 | const spinner = ora(); 408 | 409 | if (!hasAICapability) { 410 | spinner.info('Running without AI enhancement - will output raw data'); 411 | } 412 | 413 | // Generate components first 414 | spinner.start('Processing components...'); 415 | for (const component of components) { 416 | spinner.text = `Processing component: ${component.name}`; 417 | const componentInstances = instances.filter(i => i.componentId === component.id); 418 | if (componentInstances.length > 0) { 419 | const mainInstance = componentInstances[0]; 420 | const pseudoComponent = await generatePseudoComponent(component, mainInstance, tokens, figmaData); 421 | if (pseudoComponent) { 422 | pseudoComponents.set(component.id, pseudoComponent); 423 | spinner.stop(); 424 | console.log(chalk.green(`✓ Processed component: ${component.name}`)); 425 | spinner.start(); 426 | } 427 | } 428 | } 429 | spinner.succeed('All components processed'); 430 | 431 | spinner.start('Processing frame layouts...'); 432 | const pseudoFrames = new Map(); 433 | 434 | // Generate frames using the components 435 | for (const canvas of figmaData.document.children) { 436 | spinner.stop(); 437 | console.log(chalk.blue(`\nProcessing canvas: ${canvas.name}`)); 438 | spinner.start(); 439 | for (const frame of canvas.children?.filter(child => child.type === 'FRAME') || []) { 440 | spinner.text = `Processing frame: ${frame.name}`; 441 | const pseudoFrame = await generatePseudoFrame(frame, components, tokens, canvas); 442 | if (pseudoFrame) { 443 | pseudoFrames.set(frame.id, pseudoFrame); 444 | spinner.stop(); 445 | console.log(chalk.green(` ✓ Processed frame: ${frame.name}`)); 446 | spinner.start(); 447 | } 448 | } 449 | } 450 | spinner.succeed('All frames processed'); 451 | 452 | return { components: pseudoComponents, frames: pseudoFrames }; 453 | } --------------------------------------------------------------------------------