├── .gitignore ├── .prettierignore ├── .prettierrc ├── LICENSE ├── README.md ├── biome.json ├── build.config.ts ├── package-lock.json ├── package.json ├── src └── index.ts └── tsconfig.json /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | lerna-debug.log* 8 | .pnpm-debug.log* 9 | 10 | # Diagnostic reports (https://nodejs.org/api/report.html) 11 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 12 | 13 | # Runtime data 14 | pids 15 | *.pid 16 | *.seed 17 | *.pid.lock 18 | 19 | # Directory for instrumented libs generated by jscoverage/JSCover 20 | lib-cov 21 | 22 | # Coverage directory used by tools like istanbul 23 | coverage 24 | *.lcov 25 | 26 | # nyc test coverage 27 | .nyc_output 28 | 29 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 30 | .grunt 31 | 32 | # Bower dependency directory (https://bower.io/) 33 | bower_components 34 | 35 | # node-waf configuration 36 | .lock-wscript 37 | 38 | # Compiled binary addons (https://nodejs.org/api/addons.html) 39 | build/Release 40 | 41 | # Dependency directories 42 | node_modules/ 43 | jspm_packages/ 44 | 45 | # Snowpack dependency directory (https://snowpack.dev/) 46 | web_modules/ 47 | 48 | # TypeScript cache 49 | *.tsbuildinfo 50 | 51 | # Optional npm cache directory 52 | .npm 53 | 54 | # Optional eslint cache 55 | .eslintcache 56 | 57 | # Optional stylelint cache 58 | .stylelintcache 59 | 60 | # Microbundle cache 61 | .rpt2_cache/ 62 | .rts2_cache_cjs/ 63 | .rts2_cache_es/ 64 | .rts2_cache_umd/ 65 | 66 | # Optional REPL history 67 | .node_repl_history 68 | 69 | # Output of 'npm pack' 70 | *.tgz 71 | 72 | # Yarn Integrity file 73 | .yarn-integrity 74 | 75 | # dotenv environment variable files 76 | .env 77 | .env.development.local 78 | .env.test.local 79 | .env.production.local 80 | .env.local 81 | 82 | # parcel-bundler cache (https://parceljs.org/) 83 | .cache 84 | .parcel-cache 85 | 86 | # Next.js build output 87 | .next 88 | out 89 | 90 | # Nuxt.js build / generate output 91 | .nuxt 92 | dist 93 | 94 | # Gatsby files 95 | .cache/ 96 | # Comment in the public line in if your project uses Gatsby and not Next.js 97 | # https://nextjs.org/blog/next-9-1#public-directory-support 98 | # public 99 | 100 | # vuepress build output 101 | .vuepress/dist 102 | 103 | # vuepress v2.x temp and cache directory 104 | .temp 105 | .cache 106 | 107 | # Docusaurus cache and generated files 108 | .docusaurus 109 | 110 | # Serverless directories 111 | .serverless/ 112 | 113 | # FuseBox cache 114 | .fusebox/ 115 | 116 | # DynamoDB Local files 117 | .dynamodb/ 118 | 119 | # TernJS port file 120 | .tern-port 121 | 122 | # Stores VSCode versions used for testing VSCode extensions 123 | .vscode-test 124 | 125 | # yarn v2 126 | .yarn/cache 127 | .yarn/unplugged 128 | .yarn/build-state.yml 129 | .yarn/install-state.gz 130 | .pnp.* 131 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | dist 2 | .zeabur 3 | node_modules 4 | public/images 5 | 6 | # The MDX files 7 | *.mdx 8 | *.md 9 | -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "trailingComma": "all", 3 | "tabWidth": 2, 4 | "semi": true, 5 | "singleQuote": true, 6 | "printWidth": 120, 7 | "bracketSpacing": true, 8 | "astroAllowShorthand": true, 9 | "astroOrganizeImportsMode": "All", 10 | "endOfLine": "lf", 11 | "plugins": ["prettier-plugin-astro", "prettier-plugin-organize-imports", "prettier-plugin-astro-organize-imports"], 12 | "overrides": [ 13 | { 14 | "files": "**/*.astro", 15 | "options": { 16 | "parser": "astro" 17 | } 18 | } 19 | ] 20 | } 21 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Yufan Sheng 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Astro Uploader 2 | 3 | A uploader for uploading the Astro generated files through the S3 API. 4 | This uploader is based on the [Apache OpenDAL™](https://github.com/apache/opendal). If you have any issues in uploading, it could be the issues in OpenDAL, remember to upgrade the OpenDAL to the latest version. 5 | 6 | ## Installation 7 | 8 | ```bash 9 | # Use npm 10 | npm install -D astro-uploader 11 | 12 | # Use pnpm 13 | pnpm add -D astro-uploader 14 | 15 | # Use yarn 16 | yarn add -D astro-uploader 17 | ``` 18 | 19 | ```ts 20 | // astro.config.ts 21 | import { defineConfig } from 'astro/config' 22 | import { uploader, type Options } from 'astro-uploader' 23 | 24 | export default defineConfig({ 25 | integrations: [ 26 | uploader({ 27 | paths: ['images', 'og', 'cats'], 28 | endpoint: process.env.S3_ENDPOINT, 29 | bucket: process.env.S3_BUCKET as string, 30 | accessKey: process.env.S3_ACCESS_KEY as string, 31 | secretAccessKey: process.env.S3_SECRET_ACCESS_KEY as string, 32 | }), 33 | ], 34 | }) 35 | ``` 36 | 37 | ### Vite Dependency Optimization 38 | 39 | If you have issues like '' in using this tool. Remember to change your Astro configuration for add the code shown below. 40 | 41 | ```ts 42 | export default defineConfig({ 43 | vite: { 44 | // Add this for avoiding the needless import optimize in Vite. 45 | optimizeDeps: { exclude: ['opendal'] }, 46 | }, 47 | }); 48 | ``` 49 | 50 | ## Options 51 | 52 | ```ts 53 | type Path = { 54 | // The directory in the astro static build that you want to upload to S3. 55 | path: string; 56 | // Whether to upload the files that locates in the inner directory. 57 | recursive?: boolean; 58 | // Whether to keep the original files after uploading. 59 | keep?: boolean; 60 | // Whether to override the existing files on S3. 61 | // It will be override only when the content-length don't match the file size by default. 62 | override?: boolean; 63 | }; 64 | 65 | type Options = { 66 | // Enable the uploader 67 | enable?: boolean; 68 | // The directory in the astro static build that you want to upload to S3. 69 | paths: Array; 70 | // Whether to upload the files that locates in the inner directory. 71 | recursive?: boolean; 72 | // Whether to keep the original files after uploading. 73 | keep?: boolean; 74 | // Whether to override the existing files on S3. 75 | // It will be override only when the content-length don't match the file size by default. 76 | override?: boolean; 77 | // The S3 region, set it if you use AWS S3 service. 78 | region?: string; 79 | // The endpoint, set it if you use 3rd-party S3 service. 80 | endpoint?: string; 81 | // The name of the bucket. 82 | bucket: string; 83 | // The root directory in S3 service that you want to upload files. 84 | root?: string; 85 | // The access key id. 86 | accessKey: string; 87 | // The secret access key. 88 | secretAccessKey: string; 89 | // All the methods in https://docs.rs/opendal/latest/opendal/services/struct.S3.html#implementations can be treated as an extra option. 90 | extraOptions?: Record; 91 | }; 92 | ``` 93 | -------------------------------------------------------------------------------- /biome.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://biomejs.dev/schemas/1.9.4/schema.json", 3 | "formatter": { 4 | "enabled": false 5 | }, 6 | "organizeImports": { 7 | "enabled": false 8 | }, 9 | "linter": { 10 | "enabled": true, 11 | "rules": { 12 | "recommended": true, 13 | "a11y": { 14 | "useGenericFontNames": "off" 15 | } 16 | } 17 | }, 18 | "vcs": { 19 | "enabled": true, 20 | "clientKind": "git", 21 | "useIgnoreFile": true 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /build.config.ts: -------------------------------------------------------------------------------- 1 | import { defineBuildConfig } from 'unbuild'; 2 | 3 | export default defineBuildConfig({ 4 | entries: ['src/index'], 5 | clean: true, 6 | declaration: true, 7 | rollup: { 8 | emitCJS: true, 9 | }, 10 | externals: ['astro', 'astro/zod'], 11 | }); 12 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "astro-uploader", 3 | "version": "1.2.3", 4 | "description": "A uploader for uploading the Astro generated files through the S3 API.", 5 | "keywords": [ 6 | "Astro", 7 | "S3", 8 | "withastro", 9 | "opendal" 10 | ], 11 | "bugs": { 12 | "url": "https://github.com/syhily/astro-uploader/issues" 13 | }, 14 | "repository": { 15 | "type": "git", 16 | "url": "https://github.com/syhily/astro-uploader" 17 | }, 18 | "license": "MIT", 19 | "author": "Yufan Sheng ", 20 | "sideEffects": false, 21 | "type": "module", 22 | "exports": { 23 | ".": { 24 | "types": "./dist/index.d.mts", 25 | "default": "./dist/index.mjs" 26 | } 27 | }, 28 | "main": "./dist/index.mjs", 29 | "module": "./dist/index.mjs", 30 | "types": "./dist/index.d.ts", 31 | "files": [ 32 | "dist" 33 | ], 34 | "scripts": { 35 | "build": "unbuild", 36 | "lint": "biome check --write . && prettier . --write", 37 | "stub": "unbuild --stub" 38 | }, 39 | "dependencies": { 40 | "mime": "^4.0.6", 41 | "opendal": "^0.47.7", 42 | "rimraf": "^6.0.1" 43 | }, 44 | "devDependencies": { 45 | "@biomejs/biome": "^1.9.4", 46 | "@types/node": "^22.10.5", 47 | "astro": "^5.1.3", 48 | "prettier": "^3.4.2", 49 | "prettier-plugin-astro": "^0.14.1", 50 | "prettier-plugin-astro-organize-imports": "^0.4.11", 51 | "prettier-plugin-organize-imports": "^4.1.0", 52 | "unbuild": "^3.2.0" 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | import type { AstroIntegration, AstroIntegrationLogger } from 'astro'; 2 | import { z } from 'astro/zod'; 3 | import mime from 'mime'; 4 | import fs from 'node:fs'; 5 | import path from 'node:path'; 6 | import { Operator } from 'opendal'; 7 | import { rimrafSync } from 'rimraf'; 8 | 9 | type Path = { 10 | // The directory in the astro static build that you want to upload to S3. 11 | path: string; 12 | // Whether to upload the files that locates in the inner directory. 13 | recursive?: boolean; 14 | // Whether to keep the original files after uploading. 15 | keep?: boolean; 16 | // Whether to override the existing files on S3. 17 | // It will be override only when the content-length don't match the file size by default. 18 | override?: boolean; 19 | }; 20 | 21 | type Options = { 22 | // Enable the uploader 23 | enable?: boolean; 24 | // The directory in the astro static build that you want to upload to S3. 25 | paths: Array; 26 | // Whether to upload the files that locates in the inner directory. 27 | recursive?: boolean; 28 | // Whether to keep the original files after uploading. 29 | keep?: boolean; 30 | // Whether to override the existing files on S3. 31 | // It will be override only when the content-length don't match the file size by default. 32 | override?: boolean; 33 | // The S3 region, set it if you use AWS S3 service. 34 | region?: string; 35 | // The endpoint, set it if you use 3rd-party S3 service. 36 | endpoint?: string; 37 | // The name of the bucket. 38 | bucket: string; 39 | // The root directory in S3 service that you want to upload files. 40 | root?: string; 41 | // The access key id. 42 | accessKey: string; 43 | // The secret access key. 44 | secretAccessKey: string; 45 | // All the methods in https://docs.rs/opendal/latest/opendal/services/struct.S3.html#implementations can be treated as an extra option. 46 | extraOptions?: Record; 47 | }; 48 | 49 | const S3Options = z 50 | .object({ 51 | enable: z.boolean().optional().default(true), 52 | paths: z 53 | .array( 54 | z.union([ 55 | z.string(), 56 | z.object({ 57 | path: z.string(), 58 | keep: z.boolean().optional(), 59 | recursive: z.boolean().optional(), 60 | override: z.boolean().optional(), 61 | }), 62 | ]), 63 | ) 64 | .min(1), 65 | keep: z.boolean().optional().default(false), 66 | recursive: z.boolean().optional().default(true), 67 | override: z.boolean().optional().default(false), 68 | region: z.string().min(1).default('auto'), 69 | endpoint: z.string().url().optional(), 70 | bucket: z.string().min(1), 71 | root: z.string().default(''), 72 | accessKey: z.string().min(1), 73 | secretAccessKey: z.string().min(1), 74 | extraOptions: z.record(z.string(), z.string()).default({}), 75 | }) 76 | .strict() 77 | .superRefine((opts, { addIssue }) => { 78 | if (opts.region === 'auto' && opts.endpoint === undefined) { 79 | addIssue({ 80 | fatal: true, 81 | code: 'custom', 82 | message: 'either the region or the endpoint should be provided', 83 | }); 84 | } 85 | }); 86 | 87 | const parseOptions = (opts: Options, logger: AstroIntegrationLogger) => { 88 | try { 89 | const { 90 | enable, 91 | paths, 92 | recursive, 93 | keep, 94 | override, 95 | region, 96 | endpoint, 97 | bucket, 98 | root, 99 | accessKey, 100 | secretAccessKey, 101 | extraOptions, 102 | } = S3Options.parse(opts); 103 | 104 | // Create opendal operator options. 105 | // The common configurations are listed here https://docs.rs/opendal/latest/opendal/services/struct.S3.html#configuration 106 | const options: Record = { 107 | ...extraOptions, 108 | root: root, 109 | bucket: bucket, 110 | region: region, 111 | access_key_id: accessKey, 112 | secret_access_key: secretAccessKey, 113 | }; 114 | if (endpoint !== undefined) { 115 | options.endpoint = endpoint; 116 | } 117 | 118 | const resolvedPaths = paths.map((path) => 119 | typeof path === 'string' 120 | ? { path, recursive, keep, override } 121 | : { 122 | path: path.path, 123 | recursive: path.recursive === undefined ? recursive : path.recursive, 124 | keep: path.keep === undefined ? keep : path.keep, 125 | override: path.override === undefined ? override : path.override, 126 | }, 127 | ); 128 | 129 | return { options, paths: resolvedPaths, enable }; 130 | } catch (err) { 131 | if (err instanceof z.ZodError) { 132 | logger.error(`Uploader options validation error, there are ${err.issues.length} errors:`); 133 | for (const issue of err.issues) { 134 | logger.error(issue.message); 135 | } 136 | } 137 | 138 | throw err; 139 | } 140 | }; 141 | 142 | class Uploader { 143 | private operator: Operator; 144 | 145 | constructor(operator: Operator) { 146 | this.operator = operator; 147 | } 148 | 149 | async isExist(key: string, size: number, override: boolean): Promise { 150 | try { 151 | const { contentLength } = await this.operator.stat(key); 152 | if ((contentLength !== null && contentLength !== BigInt(size)) || override) { 153 | await this.operator.delete(key); 154 | return false; 155 | } 156 | return true; 157 | } catch (err) { 158 | // Just ignore the error for now. If we find better solution for how to handle the opendal error. 159 | if (err instanceof Error) { 160 | const msg = err.toString(); 161 | if (msg.includes('Error: NotFound')) { 162 | return false; 163 | } 164 | } 165 | throw err; 166 | } 167 | } 168 | 169 | async write(key: string, body: Buffer) { 170 | const contentType = mime.getType(key); 171 | await this.operator.write(key, body, { contentType: contentType === null ? undefined : contentType }); 172 | } 173 | } 174 | 175 | export const uploader = (opts: Options): AstroIntegration => ({ 176 | name: 'S3 Uploader', 177 | hooks: { 178 | 'astro:build:done': async ({ dir, logger }: { dir: URL; logger: AstroIntegrationLogger }) => { 179 | const { options, paths, enable } = parseOptions(opts, logger); 180 | if (!enable) { 181 | logger.warn('Skip the astro uploader.'); 182 | return; 183 | } 184 | 185 | logger.info('Try to verify the S3 credentials.'); 186 | const operator = new Operator('s3', options); 187 | await operator.check(); 188 | 189 | logger.info(`Start to upload static files in dir ${paths} to S3 compatible backend.`); 190 | const uploader = new Uploader(operator); 191 | for (const current of paths) { 192 | await uploadFile(uploader, logger, current, dir.pathname); 193 | if (!current.keep && current.recursive) { 194 | const resolvedPath = path.join(dir.pathname, current.path); 195 | logger.info(`Remove the path: ${resolvedPath}`); 196 | // Delete the whole path 197 | rimrafSync(resolvedPath); 198 | } 199 | } 200 | 201 | logger.info('Upload all the files successfully.'); 202 | }, 203 | }, 204 | }); 205 | 206 | // Change the windows path into the unix path. 207 | const normalizePath = (current: string): string => { 208 | return current.includes(path.win32.sep) ? current.split(path.win32.sep).join(path.posix.sep) : current; 209 | }; 210 | 211 | const uploadFile = async ( 212 | uploader: Uploader, 213 | logger: AstroIntegrationLogger, 214 | current: { 215 | path: string; 216 | recursive: boolean; 217 | keep: boolean; 218 | override: boolean; 219 | }, 220 | buildPath: string, 221 | ) => { 222 | const filePath = path.join(buildPath, current.path); 223 | const fileStats = fs.statSync(filePath); 224 | const isFile = !fileStats.isDirectory(); 225 | const uploadAction = async (key: string) => { 226 | logger.info(`Start to upload file: ${key}`); 227 | const body = fs.readFileSync(filePath); 228 | await uploader.write(key, body); 229 | }; 230 | 231 | if (isFile) { 232 | const key = normalizePath(current.path); 233 | if (await uploader.isExist(key, fileStats.size, current.override)) { 234 | logger.info(`${key} exists on backend, skip.`); 235 | } else { 236 | await uploadAction(key); 237 | } 238 | 239 | if (!current.keep && !current.recursive) { 240 | rimrafSync(current.path); 241 | } 242 | } else { 243 | // Reclusive upload files or only upload the first hierarchy of the files. 244 | for (const next of fs.readdirSync(filePath)) { 245 | if (next.startsWith('.')) { 246 | continue; 247 | } 248 | 249 | const nextFilePath = path.join(current.path, next); 250 | if (current.recursive || !fs.statSync(nextFilePath).isDirectory()) { 251 | await uploadFile(uploader, logger, { ...current, path: nextFilePath }, buildPath); 252 | } 253 | } 254 | } 255 | }; 256 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://json.schemastore.org/tsconfig.json", 3 | "extends": "astro/tsconfigs/strict", 4 | "compilerOptions": { 5 | "baseUrl": ".", 6 | "strict": true, 7 | "target": "ESNext", 8 | "module": "ESNext", 9 | "strictNullChecks": true, 10 | "paths": { 11 | "@/options": ["./options.ts"], 12 | "@/*": ["src/*"] 13 | } 14 | } 15 | } 16 | --------------------------------------------------------------------------------