├── .editorconfig ├── .eslintrc ├── .gitignore ├── .prettierrc ├── LICENSE ├── README.md ├── examples ├── links-in-portabletext │ ├── README.md │ ├── joinInternalLink.groq │ ├── portableText.js │ ├── post.js │ └── react-portableText.js ├── menu-tree │ ├── README.md │ ├── groq-query.ts │ ├── menu-document.ts │ ├── menuBranch-object.ts │ └── menuItem-object.ts └── studio-with-custom-asset-source │ ├── README.md │ ├── config │ ├── .checksums │ └── @sanity │ │ ├── data-aspects.json │ │ ├── default-layout.json │ │ ├── default-login.json │ │ └── form-builder.json │ ├── package.json │ ├── plugins │ ├── .gitkeep │ └── asset-source-webcam │ │ ├── MyTool.css │ │ ├── MyTool.js │ │ ├── MyToolIcon.js │ │ ├── index.js │ │ └── sanity.json │ ├── sanity.json │ ├── schemas │ ├── author.js │ ├── blockContent.js │ ├── category.js │ ├── post.js │ └── schema.js │ └── static │ ├── .gitkeep │ └── favicon.ico ├── package.json ├── snippets ├── CustomObjectInputWithLanguageFilter.js ├── blocksToText.js ├── conditionalFieldsCustomInputComponent.js ├── convertQuotationMarks.js ├── customDefaultDeskPane.js ├── deleteDocsByFilter.js ├── deleteUnusedAssets.js ├── deskStructureGroupByYear.js ├── deskStructureWithCustomRoles.js ├── deskStructureWithRoles.js ├── doSomethingWithUploadFile.md ├── initialTemplateValueWithCurrentUser.js ├── migrateDocumentType.js ├── migratePortableTextToPlainText.js ├── normalizeAllBlocks.js ├── removeReferenceFromArray ├── renameField.js ├── stringInputWithValuesFromAnotherDocument.js ├── structureForAssetDocs.js ├── test-images │ ├── image01.jpg │ ├── image02.jpg │ ├── image03.jpg │ ├── image04.jpg │ ├── image05.jpg │ ├── image06.jpg │ ├── image07.jpg │ ├── image08.jpg │ ├── image09.jpg │ ├── image10.jpg │ ├── image11.jpg │ ├── image12.jpg │ ├── image13.jpg │ ├── image14.jpg │ ├── image15.jpg │ ├── image16.jpg │ ├── image17.jpg │ ├── image18.jpg │ ├── image19.jpg │ └── image20.jpg ├── uploadImageFromURLandCreateNewDocument.js └── uploadImagesConcurrently.js └── test ├── .eslintrc ├── __snapshots__ ├── blocksToText.test.js.snap └── convertQuotationMarks.test.js.snap ├── blocksToText.test.js ├── convertQuotationMarks.test.js └── fixtures ├── doubleQuotationMarks.fixture.js ├── manyBlocks.fixture.js └── singleQuotationMarks.fixture.js /.editorconfig: -------------------------------------------------------------------------------- 1 | ; editorconfig.org 2 | root = true 3 | charset= utf8 4 | 5 | [*] 6 | end_of_line = lf 7 | insert_final_newline = true 8 | trim_trailing_whitespace = true 9 | indent_style = space 10 | indent_size = 2 11 | 12 | [*.md] 13 | trim_trailing_whitespace = false 14 | -------------------------------------------------------------------------------- /.eslintrc: -------------------------------------------------------------------------------- 1 | { 2 | "extends": ["sanity", "prettier"], 3 | "env": {"node": true, "browser": true}, 4 | "parserOptions": { 5 | "ecmaVersion": 2017 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | 8 | # Runtime data 9 | pids 10 | *.pid 11 | *.seed 12 | *.pid.lock 13 | 14 | # Directory for instrumented libs generated by jscoverage/JSCover 15 | lib-cov 16 | 17 | # Coverage directory used by tools like istanbul 18 | coverage 19 | 20 | # nyc test coverage 21 | .nyc_output 22 | 23 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 24 | .grunt 25 | 26 | # Bower dependency directory (https://bower.io/) 27 | bower_components 28 | 29 | # node-waf configuration 30 | .lock-wscript 31 | 32 | # Compiled binary addons (https://nodejs.org/api/addons.html) 33 | build/Release 34 | 35 | # Dependency directories 36 | node_modules/ 37 | jspm_packages/ 38 | 39 | # TypeScript v1 declaration files 40 | typings/ 41 | 42 | # Optional npm cache directory 43 | .npm 44 | 45 | # Optional eslint cache 46 | .eslintcache 47 | 48 | # Optional REPL history 49 | .node_repl_history 50 | 51 | # Output of 'npm pack' 52 | *.tgz 53 | 54 | # Yarn Integrity file 55 | .yarn-integrity 56 | 57 | # dotenv environment variables file 58 | .env 59 | 60 | # next.js build output 61 | .next 62 | 63 | # Lockfiles 64 | yarn.lock 65 | package-lock.json 66 | 67 | # macos finder cache 68 | .DS_Store 69 | 70 | -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "semi": false, 3 | "printWidth": 100, 4 | "bracketSpacing": false, 5 | "singleQuote": true 6 | } 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Sanity.io 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # sanity-recipes 2 | 3 | Just a collection of recipies / snippets / frequently asked questions about Sanity. 4 | 5 | Note that these things are just meant as inspiration and is usually not documented. You'll usually have to copy + paste the code and possibly adjust it slightly to fit your use case. Some might be published as NPM modules at some point if it makes sense. 6 | 7 | ## Table of Contents 8 | 9 | - [sanity-recipes](#sanity-recipes) 10 | - [Table of Contents](#table-of-contents) 11 | - [Portable Text Content](#portable-text-content) 12 | - [Portable Text as plain text](#portable-text-as-plain-text) 13 | - [Convert quotation marks in Portable Text](#convert-quotation-marks-in-portable-text) 14 | - [Content Migrations](#content-migrations) 15 | - [Delete unused assets](#delete-unused-assets) 16 | - [Rename fields](#rename-fields) 17 | - [License](#license) 18 | 19 | --- 20 | 21 | ## Portable Text Content 22 | 23 | ### Portable Text as plain text 24 | 25 | Sometimes you just want to skim over a whole bunch of [Portable Text](https://www.portabletext.org) as text. [This function](snippets/blocksToText.js) does this, with some basic options for handling non-text nodes. 26 | 27 | ### Convert quotation marks in Portable Text 28 | 29 | Someone wanted to convert "whatever" to «whatever» when rendering. [`convertQuotationMarks()`](snippets/convertQuotationMarks.js) to the rescue! 30 | 31 | ## Content Migrations 32 | 33 | ### Delete unused assets 34 | 35 | When uploading images and files, sometimes you are left with assets that are no longer in use by any documents. In these cases, you may want to purge the unused assets to free up some space and clear the clutter. This will probably be a core part of the Sanity toolkit at some point, but until then, [this script](snippets/deleteUnusedAssets.js) should help you! 36 | 37 | ### Uploading assets concurrently 38 | 39 | A script which demonstrates how to upload multiple assets (images in this case) concurrently. Edit projectId, dataset and token. Also, edit the concurrency constant to change how many uploads run in parallel. To run the script: `node snippets/uploadImagesConcurrently.js` 40 | 41 | ### Rename fields 42 | 43 | Sometimes you want to change the name of a field. [This function](snippets/renameField.js) can be run with `sanity exec renameField.js --with-user-credentials` and do a migration even while users are working. 44 | 45 | ## Components 46 | 47 | ### Filter-aware Custom Input Component 48 | 49 | The [Sanity documentation](https://sanity.io/docs) contains several examples on how to create a Custom Input Component, but what if you want one which is also aware of language filter options? [This component](snippets/CustomObjectInputWithLanguageFilter.js) provides a good point of departure for making your own. 50 | 51 | ### Conditional fields Custom Input Component 52 | 53 | This [custom input component](snippets/conditionalFieldsCustomInputComponent.js) shows an example of how to render a field conditionally of a field’s value in your document. 54 | 55 | ## License 56 | 57 | All code is MIT-licensed. See LICENSE. 58 | -------------------------------------------------------------------------------- /examples/links-in-portabletext/README.md: -------------------------------------------------------------------------------- 1 | # Minimal example of links in Portable Text 2 | 3 | You would often want to have links in rich text. In this example we differ between internal and external links in order to take advantage of `references` in Sanity. This means that internal links will prevent referenced documents from being deleted. 4 | 5 | * `portableText.js` => Configuration of the rich text editor with buttons for internal and external links in the toolbar 6 | * `post.js` => example schema that use the `portableText` configuration 7 | * `react-portableText.js` => a component that returns rich text with links 8 | * `joinInternalLink.groq` => A query in GROQ that demonstrates how to resolve internal links 9 | 10 | ## Learn more 11 | 12 | * [The Block type](https://www.sanity.io/docs/schema-types/block-type) 13 | * [Select in GROQ](https://www.sanity.io/docs/groq/groq-functions) 14 | * [Concatination in GROQ](https://www.sanity.io/docs/groq/groq-operators#object-object-addition-and-concatenation) 15 | * [Portable Text (Block Content) in React](https://github.com/sanity-io/block-content-to-react) 16 | 17 | Join our [community slack](https://slack.sanity.io) to get help. 18 | -------------------------------------------------------------------------------- /examples/links-in-portabletext/joinInternalLink.groq: -------------------------------------------------------------------------------- 1 | *[_type == "post"]{ 2 | ..., 3 | body[]{ 4 | ..., 5 | markDefs[]{ 6 | ..., 7 | // Join the referenced document and get the slug and type 8 | _type == "internalLink" => { 9 | _key, 10 | "slug": @.reference->slug, 11 | "type": @.reference->_type, 12 | // You can also build an URL using string concatination 13 | "url": "https://yourdomain.com/" + @.reference->_type + "/" + @.reference->slug.current 14 | } 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /examples/links-in-portabletext/portableText.js: -------------------------------------------------------------------------------- 1 | import External from 'react-icons/lib/fa/external-link' 2 | import Link from 'react-icons/lib/go/link' 3 | /** 4 | * This is the schema definition for the rich text fields used for 5 | * for this blog studio. When you import it in schemas.js it can be 6 | * reused in other parts of the studio with: 7 | * { 8 | * name: 'someName', 9 | * title: 'Some title', 10 | * type: 'portableText' 11 | * } 12 | */ 13 | export default { 14 | title: 'Portable Text', 15 | name: 'portableText', 16 | type: 'array', 17 | of: [ 18 | { 19 | title: 'Block', 20 | type: 'block', 21 | /** 22 | * Styles let you set what your user can mark up blocks with. These 23 | * corrensponds with HTML tags, but you can set any title or value 24 | * you want and decide how you want to deal with it where you want to 25 | * use your content. 26 | */ 27 | styles: [ 28 | { title: 'Normal', value: 'normal' }, 29 | { title: 'H1', value: 'h1' }, 30 | { title: 'H2', value: 'h2' }, 31 | { title: 'H3', value: 'h3' }, 32 | { title: 'H4', value: 'h4' }, 33 | { title: 'Quote', value: 'blockquote' } 34 | ], 35 | lists: [{ title: 'Bullet', value: 'bullet' }], 36 | /* Marks let you mark up inline text in the block editor. */ 37 | marks: { 38 | /** 39 | * Decorators usually describe a single property – e.g. a typographic preference or highlighting by editors. 40 | * */ 41 | decorators: [ 42 | { title: 'Strong', value: 'strong' }, 43 | { title: 'Emphasis', value: 'em' } 44 | ], 45 | /* Annotations can be any object structure – e.g. a link or a footnote. */ 46 | annotations: [ 47 | { 48 | title: 'Internal link', 49 | name: 'internalLink', 50 | type: 'object', 51 | blockEditor: { 52 | icon: Link 53 | }, 54 | fields: [ 55 | { 56 | name: 'reference', 57 | type: 'reference', 58 | to: [ 59 | { type: 'post' }, 60 | { type: 'author' } 61 | ] 62 | } 63 | ] 64 | }, 65 | { 66 | title: 'External Link', 67 | name: 'externalLink', 68 | type: 'object', 69 | blockEditor: { 70 | icon: External 71 | }, 72 | fields: [ 73 | { 74 | title: 'URL', 75 | name: 'href', 76 | type: 'url', 77 | validation: Rule => Rule.uri({allowRelative: true, scheme: ['https', 'http', 'mailto', 'tel']}) 78 | }, 79 | { 80 | title: 'Open in new tab', 81 | name: 'blank', 82 | description: 'Read https://css-tricks.com/use-target_blank/', 83 | type: 'boolean' 84 | } 85 | ] 86 | } 87 | ] 88 | } 89 | } 90 | ] 91 | } 92 | -------------------------------------------------------------------------------- /examples/links-in-portabletext/post.js: -------------------------------------------------------------------------------- 1 | export default { 2 | name: 'post', 3 | type: 'document', 4 | title: 'Post', 5 | fields: [ 6 | { 7 | name: 'title', 8 | type: 'string', 9 | title: 'Title', 10 | }, 11 | { 12 | name: 'slug', 13 | type: 'slug', 14 | title: 'Slug', 15 | options: { 16 | source: 'title' 17 | } 18 | }, 19 | { 20 | name: 'body', 21 | type: 'portableText', 22 | title: 'Body' 23 | } 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /examples/links-in-portabletext/react-portableText.js: -------------------------------------------------------------------------------- 1 | import { PortableText } from '@portabletext/react' 2 | 3 | const components = { 4 | marks: { 5 | internalLink: ({value, children}) => { 6 | const {_type, slug = {}, url} = value 7 | const href = url ? url : `/${_type}/${slug.current}` 8 | return {children} 9 | }, 10 | externalLink: ({value, children}) => { 11 | // Read https://css-tricks.com/use-target_blank/ 12 | const { blank, href } = value 13 | return blank ? 14 | {children} 15 | : {children} 16 | } 17 | } 18 | } 19 | 20 | const BodyText = (props) => ( 21 | 25 | ) 26 | 27 | export default BodyText 28 | -------------------------------------------------------------------------------- /examples/menu-tree/README.md: -------------------------------------------------------------------------------- 1 | # Self contained menu / tree document 2 | 3 | This is a document type that can be used to build a tree-like structure. Each leaf is a reference to another, existing document. This example allows for one depth of branches - you can make it recursive by allowing branches to be added to branches. 4 | 5 | The accompanying GROQ query builds an easily parseable structure for your frontend application. The file includes a TypeScript projection of what the structure will look like. It also includes fallbacks for handling branches with or without attached documents, in this order: 6 | 7 | - A branch item itself can reference a document 8 | - If no document is referenced, the first child of the branch item is referenced 9 | 10 | [Oliver](https://github.com/genox) -------------------------------------------------------------------------------- /examples/menu-tree/groq-query.ts: -------------------------------------------------------------------------------- 1 | export type MenuItemType = { 2 | _key: string; 3 | _type: string; 4 | as: string; 5 | href: string; 6 | name: string; 7 | slug: string; 8 | children?: Array; 9 | }; 10 | 11 | export type MenuType = { 12 | title: string; 13 | slug: string; 14 | children: Array; 15 | }; 16 | 17 | export const menus = ({ locale }) => /* groq */ ` 18 | *[_type == "menu" && locale == "${locale}"]{ 19 | title, 20 | "slug": slug.current, 21 | "children": children[]{ 22 | _type == "menuItem" => { 23 | _key, 24 | _type, 25 | "name": page -> name, 26 | "slug": slug.current, 27 | "slug": page -> slug.current, 28 | "as": "/" + page -> locale + "/" + page -> slug.current, 29 | "href": "/" + page -> _type + "?locale=" + page -> locale + "&slug=" + page -> slug.current, 30 | }, 31 | _type == "menuBranch" => { 32 | ..., 33 | _key, 34 | _type, 35 | name, 36 | "slug": menuItem -> slug.current, 37 | !defined(menuItem) => { 38 | "slug": children[0].page -> slug.current, 39 | "as": "/" + children[0].page -> locale + "/" + children[0].page -> slug.current, 40 | "href": "/" + children[0].page -> _type + "?locale=" + children[0].page -> locale + "&slug=" + children[0].page -> slug.current, 41 | }, 42 | defined(menuItem) => { 43 | "slug": menuItem -> slug.current, 44 | "as": "/" + menuItem -> locale + "/" + menuItem -> slug.current, 45 | "href": "/" + menuItem -> _type + "?locale=" + menuItem -> locale + "&slug=" + menuItem -> slug.current, 46 | }, 47 | "children": children[] { 48 | _key, 49 | "_type": "menuItem", 50 | "name": page -> name, 51 | "slug": page -> slug.current, 52 | "as": "/" + page -> locale + "/" + page -> slug.current, 53 | "href": "/" + page -> _type + "?locale=" + page -> locale + "&slug=" + page -> slug.current, 54 | } 55 | } 56 | } 57 | } 58 | `; 59 | 60 | -------------------------------------------------------------------------------- /examples/menu-tree/menu-document.ts: -------------------------------------------------------------------------------- 1 | export const Menu = { 2 | title: 'Menu', 3 | name: 'menu', 4 | type: 'document', 5 | fields: [ 6 | { 7 | title: 'Title', 8 | name: 'title', 9 | type: 'string', 10 | validation: Rule => Rule.required(), 11 | }, 12 | { 13 | title: 'Slug', 14 | name: 'slug', 15 | type: 'slug', 16 | options: { 17 | source: doc => `${doc.title && doc.title}-${doc.locale && doc.locale}`, 18 | maxLength: 200, 19 | }, 20 | validation: Rule => Rule.required(), 21 | }, 22 | { 23 | title: 'Menu Items', 24 | name: 'children', 25 | type: 'array', 26 | of: [{ type: 'menuItem' }, { type: 'menuBranch' }], 27 | validation: Rule => Rule.required(), 28 | }, 29 | { 30 | title: 'Language', 31 | name: 'locale', 32 | type: 'string', 33 | options: { 34 | list: [ 'de', 'en' ], 35 | layout: 'dropdown', 36 | }, 37 | validation: Rule => Rule.required(), 38 | }, 39 | ], 40 | initialValue: { 41 | locale: 'en', 42 | }, 43 | preview: { 44 | select: { 45 | title: 'slug.current', 46 | subtitle: 'title', 47 | }, 48 | prepare(selection) { 49 | return { 50 | ...selection, 51 | }; 52 | }, 53 | }, 54 | }; 55 | -------------------------------------------------------------------------------- /examples/menu-tree/menuBranch-object.ts: -------------------------------------------------------------------------------- 1 | export const MenuBranch = { 2 | title: 'Menu Branch', 3 | name: 'menuBranch', 4 | type: 'object', 5 | fields: [ 6 | { 7 | title: 'Name', 8 | name: 'name', 9 | type: 'string', 10 | validation: Rule => Rule.required(), 11 | }, 12 | { 13 | title: 'Slug', 14 | name: 'slug', 15 | type: 'slug', 16 | options: { 17 | source: (doc, options) => { 18 | const parent = doc.children.find(item => item._key === options.parentPath[1]._key); 19 | return parent.name; 20 | }, 21 | }, 22 | validation: Rule => Rule.required(), 23 | }, 24 | { 25 | title: 'Page', 26 | description: 27 | 'The content item assigned to this branch item. This page is shown if the user clicks on the branch name. Uses the first child item if no page is assigned, e.g. forwarding the user.', 28 | name: 'menuItem', 29 | type: 'reference', 30 | to: [{ type: 'page' }], 31 | }, 32 | { 33 | title: 'Menu Items', 34 | description: 'Child menu entries of this branch', 35 | name: 'children', 36 | type: 'array', 37 | of: [ 38 | { type: 'menuItem' }, 39 | // { type: 'menuBranch' } // Allow menuBranches for infinite recursiveness. Breaks GraphQL though. 40 | ], 41 | validation: Rule => Rule.required(), 42 | }, 43 | ], 44 | preview: { 45 | select: { 46 | title: 'name', 47 | subtitle: '', 48 | media: '', 49 | }, 50 | prepare(selection) { 51 | return { 52 | ...selection, 53 | subtitle: 'Menu Branch', 54 | }; 55 | }, 56 | }, 57 | }; 58 | -------------------------------------------------------------------------------- /examples/menu-tree/menuItem-object.ts: -------------------------------------------------------------------------------- 1 | export const MenuItem = { 2 | title: 'Menu Item', 3 | name: 'menuItem', 4 | type: 'object', 5 | fields: [ 6 | { 7 | title: 'Content Item', 8 | description: 'Reference existing content to be used in the navigation structure.', 9 | name: 'page', 10 | type: 'reference', 11 | to: [{ type: 'page' }], 12 | }, 13 | ], 14 | preview: { 15 | select: { 16 | title: '', 17 | pageSlug: 'page.slug', 18 | pageTitle: 'page.name', 19 | media: '', 20 | }, 21 | prepare(selection) { 22 | const { pageSlug, pageTitle } = selection; 23 | return { 24 | ...selection, 25 | title: pageTitle, 26 | subtitle: `Content Item | ${pageSlug.current}`, 27 | }; 28 | }, 29 | }, 30 | }; 31 | -------------------------------------------------------------------------------- /examples/studio-with-custom-asset-source/README.md: -------------------------------------------------------------------------------- 1 | # Tutorial Studio: How to create a custom asset source plugin for Sanity Studio 2 | 3 | Here's the code for the [video tutorial](https://www.youtube.com/watch?v=qdA9gIdYb5s&feature=youtu.be) on how to make a custom asset source plugin. This repo is as is and will not be maintained. 4 | 5 | You can run this studio locally by [downloading this folder](https://download-directory.github.io/?url=https://github.com/sanity-io/sanity-recipes/tree/master/examples/studio-with-custom-asset-source) and: 6 | 7 | ```bash 8 | # make sure you have the Sanity CLI 9 | # npm i -g @sanity/cli 10 | 11 | sanity install 12 | sanity init 13 | # create a new project of it, or tie it to an exciting project 14 | 15 | # to start the studoi 16 | sanity start 17 | ``` 18 | 19 | - [Documentation for custom asset sources](https://www.sanity.io/docs/custom-asset-sources) 20 | - [Documentation for react-webcam](https://www.npmjs.com/package/react-webcam) 21 | -------------------------------------------------------------------------------- /examples/studio-with-custom-asset-source/config/.checksums: -------------------------------------------------------------------------------- 1 | { 2 | "#": "Used by Sanity to keep track of configuration file checksums, do not delete or modify!", 3 | "@sanity/default-layout": "bb034f391ba508a6ca8cd971967cbedeb131c4d19b17b28a0895f32db5d568ea", 4 | "@sanity/default-login": "6fb6d3800aa71346e1b84d95bbcaa287879456f2922372bb0294e30b968cd37f", 5 | "@sanity/form-builder": "b38478227ba5e22c91981da4b53436df22e48ff25238a55a973ed620be5068aa", 6 | "@sanity/data-aspects": "d199e2c199b3e26cd28b68dc84d7fc01c9186bf5089580f2e2446994d36b3cb6" 7 | } 8 | -------------------------------------------------------------------------------- /examples/studio-with-custom-asset-source/config/@sanity/data-aspects.json: -------------------------------------------------------------------------------- 1 | { 2 | "listOptions": {} 3 | } 4 | -------------------------------------------------------------------------------- /examples/studio-with-custom-asset-source/config/@sanity/default-layout.json: -------------------------------------------------------------------------------- 1 | { 2 | "toolSwitcher": { 3 | "order": [], 4 | "hidden": [] 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /examples/studio-with-custom-asset-source/config/@sanity/default-login.json: -------------------------------------------------------------------------------- 1 | { 2 | "providers": { 3 | "mode": "append", 4 | "redirectOnSingle": false, 5 | "entries": [] 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /examples/studio-with-custom-asset-source/config/@sanity/form-builder.json: -------------------------------------------------------------------------------- 1 | { 2 | "images": { 3 | "directUploads": true 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /examples/studio-with-custom-asset-source/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "theweekofdam", 3 | "private": true, 4 | "version": "1.0.0", 5 | "description": "", 6 | "main": "package.json", 7 | "author": "Knut Melvær ", 8 | "license": "UNLICENSED", 9 | "scripts": { 10 | "start": "sanity start", 11 | "test": "sanity check" 12 | }, 13 | "keywords": [ 14 | "sanity" 15 | ], 16 | "dependencies": { 17 | "@sanity/base": "^0.146.3", 18 | "@sanity/components": "^0.146.3", 19 | "@sanity/core": "^0.146.0", 20 | "@sanity/default-layout": "^0.146.3", 21 | "@sanity/default-login": "^0.146.0", 22 | "@sanity/desk-tool": "^0.146.3", 23 | "@sanity/vision": "^0.146.3", 24 | "eslint": "^6.7.1", 25 | "global": "^4.4.0", 26 | "prop-types": "^15.6", 27 | "react": "^16.2", 28 | "react-dom": "^16.2", 29 | "react-webcam": "^4.0.0" 30 | }, 31 | "devDependencies": {} 32 | } 33 | -------------------------------------------------------------------------------- /examples/studio-with-custom-asset-source/plugins/.gitkeep: -------------------------------------------------------------------------------- 1 | User-specific packages can be placed here 2 | -------------------------------------------------------------------------------- /examples/studio-with-custom-asset-source/plugins/asset-source-webcam/MyTool.css: -------------------------------------------------------------------------------- 1 | .container { 2 | padding: 10px; 3 | } 4 | -------------------------------------------------------------------------------- /examples/studio-with-custom-asset-source/plugins/asset-source-webcam/MyTool.js: -------------------------------------------------------------------------------- 1 | import React from 'react' 2 | import Dialog from 'part:@sanity/components/dialogs/fullscreen' 3 | import Button from 'part:@sanity/components/buttons/default' 4 | import Webcam from 'react-webcam' 5 | import styles from './MyTool.css' 6 | 7 | const videoConstraints = { 8 | width: 1280, 9 | height: 720, 10 | facingMode: 'user' 11 | } 12 | 13 | function WebcamSource (props) { 14 | const webcamRef = React.useRef(null) 15 | const [imageData, setImageData] = React.useState('') 16 | 17 | const handleCapture = React.useCallback( 18 | () => { 19 | const imageSrc = webcamRef.current.getScreenshot() 20 | setImageData(imageSrc) 21 | }, 22 | [webcamRef] 23 | ) 24 | 25 | const handleSelect = () => { 26 | props.onSelect([ 27 | { 28 | kind: 'base64', 29 | value: imageData, 30 | options: { 31 | originalFilename: `webcam-${new Date().toISOString()}.jpg`, 32 | source: 'my-webcam' 33 | } 34 | } 35 | ]) 36 | } 37 | 38 | return ( 39 | 40 | 48 | 49 | {imageData && ( 50 | 51 | A webcam photo 52 | 53 | 54 | )} 55 | 56 | ) 57 | } 58 | 59 | export default WebcamSource 60 | -------------------------------------------------------------------------------- /examples/studio-with-custom-asset-source/plugins/asset-source-webcam/MyToolIcon.js: -------------------------------------------------------------------------------- 1 | import React from 'react' 2 | 3 | /** 4 | * Couple of things to note: 5 | * - width and height is set to 1em 6 | * - fill is `currentColor` - this will ensure that the icon looks uniform and 7 | * that the hover/active state works. You can of course render anything you 8 | * would like here, but for plugins that are to be used in more than one 9 | * studio, we suggest these rules are followed 10 | **/ 11 | export default () => '📸' 12 | -------------------------------------------------------------------------------- /examples/studio-with-custom-asset-source/plugins/asset-source-webcam/index.js: -------------------------------------------------------------------------------- 1 | import React from 'react' 2 | import MyTool from './MyTool' 3 | import MyToolIcon from './MyToolIcon' 4 | 5 | export default { 6 | title: 'Webcam', 7 | name: 'webcam', 8 | icon: MyToolIcon, 9 | component: MyTool 10 | } 11 | -------------------------------------------------------------------------------- /examples/studio-with-custom-asset-source/plugins/asset-source-webcam/sanity.json: -------------------------------------------------------------------------------- 1 | { 2 | "parts": [ 3 | { 4 | "name": "part:sanity-plugin-asset-source-webcam/image-asset-source", 5 | "implements": "part:@sanity/form-builder/input/image/asset-source", 6 | "path": "./index.js" 7 | } 8 | ] 9 | } 10 | -------------------------------------------------------------------------------- /examples/studio-with-custom-asset-source/sanity.json: -------------------------------------------------------------------------------- 1 | { 2 | "root": true, 3 | "project": { 4 | "name": "📷" 5 | }, 6 | "api": { 7 | "projectId": "18gse3yb", 8 | "dataset": "production" 9 | }, 10 | "plugins": [ 11 | "@sanity/base", 12 | "@sanity/components", 13 | "@sanity/default-layout", 14 | "@sanity/default-login", 15 | "@sanity/desk-tool", 16 | "asset-source-webcam" 17 | ], 18 | "env": { 19 | "development": { 20 | "plugins": [ 21 | "@sanity/vision" 22 | ] 23 | } 24 | }, 25 | "parts": [ 26 | { 27 | "name": "part:@sanity/base/schema", 28 | "path": "./schemas/schema.js" 29 | } 30 | ] 31 | } 32 | -------------------------------------------------------------------------------- /examples/studio-with-custom-asset-source/schemas/author.js: -------------------------------------------------------------------------------- 1 | import Webcam from 'part:sanity-plugin-asset-source-webcam/image-asset-source' 2 | 3 | export default { 4 | name: 'author', 5 | title: 'Author', 6 | type: 'document', 7 | fields: [ 8 | { 9 | name: 'name', 10 | title: 'Name', 11 | type: 'string' 12 | }, 13 | { 14 | name: 'slug', 15 | title: 'Slug', 16 | type: 'slug', 17 | options: { 18 | source: 'name', 19 | maxLength: 96 20 | } 21 | }, 22 | { 23 | name: 'image', 24 | title: 'Image', 25 | type: 'image', 26 | options: { 27 | hotspot: true, 28 | sources: [Webcam] 29 | } 30 | }, 31 | { 32 | name: 'bio', 33 | title: 'Bio', 34 | type: 'array', 35 | of: [ 36 | { 37 | title: 'Block', 38 | type: 'block', 39 | styles: [{ title: 'Normal', value: 'normal' }], 40 | lists: [] 41 | } 42 | ] 43 | } 44 | ], 45 | preview: { 46 | select: { 47 | title: 'name', 48 | media: 'image' 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /examples/studio-with-custom-asset-source/schemas/blockContent.js: -------------------------------------------------------------------------------- 1 | /** 2 | * This is the schema definition for the rich text fields used for 3 | * for this blog studio. When you import it in schemas.js it can be 4 | * reused in other parts of the studio with: 5 | * { 6 | * name: 'someName', 7 | * title: 'Some title', 8 | * type: 'blockContent' 9 | * } 10 | */ 11 | export default { 12 | title: 'Block Content', 13 | name: 'blockContent', 14 | type: 'array', 15 | of: [ 16 | { 17 | title: 'Block', 18 | type: 'block', 19 | // Styles let you set what your user can mark up blocks with. These 20 | // corrensponds with HTML tags, but you can set any title or value 21 | // you want and decide how you want to deal with it where you want to 22 | // use your content. 23 | styles: [ 24 | {title: 'Normal', value: 'normal'}, 25 | {title: 'H1', value: 'h1'}, 26 | {title: 'H2', value: 'h2'}, 27 | {title: 'H3', value: 'h3'}, 28 | {title: 'H4', value: 'h4'}, 29 | {title: 'Quote', value: 'blockquote'} 30 | ], 31 | lists: [{title: 'Bullet', value: 'bullet'}], 32 | // Marks let you mark up inline text in the block editor. 33 | marks: { 34 | // Decorators usually describe a single property – e.g. a typographic 35 | // preference or highlighting by editors. 36 | decorators: [{title: 'Strong', value: 'strong'}, {title: 'Emphasis', value: 'em'}], 37 | // Annotations can be any object structure – e.g. a link or a footnote. 38 | annotations: [ 39 | { 40 | title: 'URL', 41 | name: 'link', 42 | type: 'object', 43 | fields: [ 44 | { 45 | title: 'URL', 46 | name: 'href', 47 | type: 'url' 48 | } 49 | ] 50 | } 51 | ] 52 | } 53 | }, 54 | // You can add additional types here. Note that you can't use 55 | // primitive types such as 'string' and 'number' in the same array 56 | // as a block type. 57 | { 58 | type: 'image', 59 | options: {hotspot: true} 60 | } 61 | ] 62 | } 63 | -------------------------------------------------------------------------------- /examples/studio-with-custom-asset-source/schemas/category.js: -------------------------------------------------------------------------------- 1 | export default { 2 | name: 'category', 3 | title: 'Category', 4 | type: 'document', 5 | fields: [ 6 | { 7 | name: 'title', 8 | title: 'Title', 9 | type: 'string' 10 | }, 11 | { 12 | name: 'description', 13 | title: 'Description', 14 | type: 'text' 15 | } 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /examples/studio-with-custom-asset-source/schemas/post.js: -------------------------------------------------------------------------------- 1 | export default { 2 | name: 'post', 3 | title: 'Post', 4 | type: 'document', 5 | fields: [ 6 | { 7 | name: 'title', 8 | title: 'Title', 9 | type: 'string' 10 | }, 11 | { 12 | name: 'slug', 13 | title: 'Slug', 14 | type: 'slug', 15 | options: { 16 | source: 'title', 17 | maxLength: 96 18 | } 19 | }, 20 | { 21 | name: 'author', 22 | title: 'Author', 23 | type: 'reference', 24 | to: {type: 'author'} 25 | }, 26 | { 27 | name: 'mainImage', 28 | title: 'Main image', 29 | type: 'image', 30 | options: { 31 | hotspot: true 32 | } 33 | }, 34 | { 35 | name: 'categories', 36 | title: 'Categories', 37 | type: 'array', 38 | of: [{type: 'reference', to: {type: 'category'}}] 39 | }, 40 | { 41 | name: 'publishedAt', 42 | title: 'Published at', 43 | type: 'datetime' 44 | }, 45 | { 46 | name: 'body', 47 | title: 'Body', 48 | type: 'blockContent' 49 | } 50 | ], 51 | 52 | preview: { 53 | select: { 54 | title: 'title', 55 | author: 'author.name', 56 | media: 'mainImage' 57 | }, 58 | prepare(selection) { 59 | const {author} = selection 60 | return Object.assign({}, selection, { 61 | subtitle: author && `by ${author}` 62 | }) 63 | } 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /examples/studio-with-custom-asset-source/schemas/schema.js: -------------------------------------------------------------------------------- 1 | // First, we must import the schema creator 2 | import createSchema from 'part:@sanity/base/schema-creator' 3 | 4 | // Then import schema types from any plugins that might expose them 5 | import schemaTypes from 'all:part:@sanity/base/schema-type' 6 | 7 | // We import object and document schemas 8 | import blockContent from './blockContent' 9 | import category from './category' 10 | import post from './post' 11 | import author from './author' 12 | 13 | // Then we give our schema to the builder and provide the result to Sanity 14 | export default createSchema({ 15 | // We name our schema 16 | name: 'default', 17 | // Then proceed to concatenate our document type 18 | // to the ones provided by any plugins that are installed 19 | types: schemaTypes.concat([ 20 | // The following are document types which will appear 21 | // in the studio. 22 | post, 23 | author, 24 | category, 25 | // When added to this list, object types can be used as 26 | // { type: 'typename' } in other document schemas 27 | blockContent 28 | ]) 29 | }) 30 | -------------------------------------------------------------------------------- /examples/studio-with-custom-asset-source/static/.gitkeep: -------------------------------------------------------------------------------- 1 | Files placed here will be served by the Sanity server under the `/static`-prefix 2 | -------------------------------------------------------------------------------- /examples/studio-with-custom-asset-source/static/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sanity-io/sanity-recipes/b41cfae4b73808b88ee3bce64adec44a07ff89ff/examples/studio-with-custom-asset-source/static/favicon.ico -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@sanity/sanity-recipes", 3 | "private": true, 4 | "version": "0.0.1", 5 | "description": "Just a collection of recipies / snippets / frequently asked questions about Sanity", 6 | "main": "index.js", 7 | "pre-commit": [ 8 | "test" 9 | ], 10 | "devDependencies": { 11 | "@sanity/client": "^1.149.7", 12 | "eslint": "^4.19.1", 13 | "eslint-config-prettier": "^2.9.0", 14 | "eslint-config-sanity": "^0.132.5", 15 | "jest": "^23.3.0", 16 | "p-map": "^4.0.0", 17 | "pre-commit": "^1.2.2", 18 | "prettier": "^1.13.7" 19 | }, 20 | "scripts": { 21 | "test": "jest" 22 | }, 23 | "repository": { 24 | "type": "git", 25 | "url": "git+https://github.com/sanity-io/sanity-recipes.git" 26 | }, 27 | "keywords": [ 28 | "sanity", 29 | "recipes" 30 | ], 31 | "author": "Sanity.io ", 32 | "license": "MIT", 33 | "bugs": { 34 | "url": "https://github.com/sanity-io/sanity-recipes/issues" 35 | }, 36 | "homepage": "https://github.com/sanity-io/sanity-recipes#readme", 37 | "dependencies": {} 38 | } 39 | -------------------------------------------------------------------------------- /snippets/CustomObjectInputWithLanguageFilter.js: -------------------------------------------------------------------------------- 1 | import PropTypes from 'prop-types' 2 | import React from 'react' 3 | import Fieldset from 'part:@sanity/components/fieldsets/default' 4 | import {setIfMissing} from 'part:@sanity/form-builder/patch-event' 5 | import {FormBuilderInput} from 'part:@sanity/form-builder' 6 | import filterFieldFn$ from 'part:@sanity/desk-tool/filter-fields-fn?' 7 | 8 | export default class CustomObjectInput extends React.PureComponent { 9 | static propTypes = { 10 | type: PropTypes.shape({ 11 | title: PropTypes.string, 12 | name: PropTypes.string 13 | }).isRequired, 14 | level: PropTypes.number, 15 | value: PropTypes.shape({ 16 | _type: PropTypes.string 17 | }), 18 | focusPath: PropTypes.array.isRequired, 19 | onFocus: PropTypes.func.isRequired, 20 | onChange: PropTypes.func.isRequired, 21 | onBlur: PropTypes.func.isRequired 22 | } 23 | 24 | state = { 25 | filterField: () => true 26 | } 27 | 28 | firstFieldInput = React.createRef() 29 | 30 | handleFieldChange = (field, fieldPatchEvent) => { 31 | const {onChange, type} = this.props 32 | onChange(fieldPatchEvent.prefixAll(field.name).prepend(setIfMissing({_type: type.name}))) 33 | } 34 | 35 | focus() { 36 | this.firstFieldInput.current.focus() 37 | } 38 | 39 | componentDidMount(props) { 40 | if (filterFieldFn$) { 41 | this.filterFieldFnSubscription = filterFieldFn$.subscribe(filterField => 42 | this.setState({filterField}) 43 | ) 44 | } 45 | } 46 | componentWillUnmount(props) { 47 | if (this.filterFieldFnSubscription) { 48 | this.filterFieldFnSubscription.unsubscribe() 49 | } 50 | } 51 | 52 | render() { 53 | const {type, value, level, focusPath, onFocus, onBlur} = this.props 54 | const {filterField} = this.state 55 | return ( 56 |
57 | 58 | Custom input, yeah! 59 | 60 |
61 | {type.fields.map((field, i) => ( 62 | this.handleFieldChange(field, patchEvent)} 69 | path={[field.name]} 70 | focusPath={focusPath} 71 | onFocus={onFocus} 72 | onBlur={onBlur} 73 | filterField={filterField} 74 | /> 75 | ))} 76 |
77 |
78 | ) 79 | } 80 | } -------------------------------------------------------------------------------- /snippets/blocksToText.js: -------------------------------------------------------------------------------- 1 | const defaults = {nonTextBehavior: 'remove'} 2 | 3 | module.exports = function blocksToText(blocks, opts = {}) { 4 | const options = Object.assign({}, defaults, opts) 5 | return blocks 6 | .map(block => { 7 | if (block._type !== 'block' || !block.children) { 8 | return options.nonTextBehavior === 'remove' ? '' : `[${block._type} block]` 9 | } 10 | 11 | return block.children.map(child => child.text).join('') 12 | }) 13 | .join('\n\n') 14 | } 15 | -------------------------------------------------------------------------------- /snippets/conditionalFieldsCustomInputComponent.js: -------------------------------------------------------------------------------- 1 | // This is an example of _custom_ object type with conditional fields. 2 | // Sanity Studio now supports conditional fields out-of-the-box by passing a callback function to the `hidden` property. Read more here: 3 | // https://www.sanity.io/docs/conditional-fields 4 | import PropTypes from 'prop-types' 5 | import React from 'react' 6 | import Fieldset from 'part:@sanity/components/fieldsets/default' 7 | import {setIfMissing} from 'part:@sanity/form-builder/patch-event' 8 | // FormBuilderInput automatically generates fields from a schema 9 | import {FormBuilderInput} from 'part:@sanity/form-builder' 10 | // a Higher Order Component that passes document values as props 11 | import {withDocument} from 'part:@sanity/form-builder' 12 | 13 | class CustomObjectInput extends React.PureComponent { 14 | static propTypes = { 15 | type: PropTypes.shape({ 16 | title: PropTypes.string, 17 | name: PropTypes.string 18 | }).isRequired, 19 | level: PropTypes.number, 20 | value: PropTypes.shape({ 21 | _type: PropTypes.string 22 | }), 23 | focusPath: PropTypes.array.isRequired, 24 | onFocus: PropTypes.func.isRequired, 25 | onChange: PropTypes.func.isRequired, 26 | onBlur: PropTypes.func.isRequired 27 | } 28 | 29 | firstFieldInput = React.createRef() 30 | 31 | handleFieldChange = (field, fieldPatchEvent) => { 32 | const {onChange, type} = this.props 33 | // Whenever the field input emits a patch event, we need to make sure to each of the included patches 34 | // are prefixed with its field name, e.g. going from: 35 | // {path: [], set: } to {path: [], set: } 36 | // and ensure this input's value exists 37 | onChange(fieldPatchEvent.prefixAll(field.name).prepend(setIfMissing({_type: type.name}))) 38 | } 39 | 40 | focus() { 41 | this.firstFieldInput.current.focus() 42 | } 43 | 44 | render() { 45 | console.log(this.props) 46 | const {document, type, value, level, focusPath, onFocus, onBlur} = this.props 47 | /** 48 | * condition comes from a field in the document schema 49 | * { 50 | * name: 'condition', 51 | * type: 'boolean' 52 | * } 53 | */ 54 | const {condition = false} = document 55 | return ( 56 |
63 | This is my custom object input with fields 64 |
65 | {type.fields 66 | /** 67 | * You can add any kind of logic here depending on how your 68 | * schemas look and what you want to do: 69 | * 70 | * { 71 | * name: 'aFieldWithConditions', 72 | * type: 'object', 73 | * inputComponent: conditionalFields, 74 | * fields: [ 75 | * { 76 | * name: 'a', 77 | * type: 'string' 78 | * }, 79 | * { 80 | * name: 'b', 81 | * type: 'text' 82 | * } 83 | * ] 84 | * } 85 | * Here the field 'b' will show only if the 'condition' boolean is set to true 86 | */ 87 | .filter(field => (field.name === 'b' ? condition : true)) 88 | .map((field, i) => ( 89 | // Delegate to the generic FormBuilderInput. It will resolve and insert the actual input component 90 | // for the given field type 91 | this.handleFieldChange(field, patchEvent)} 98 | path={[field.name]} 99 | focusPath={focusPath} 100 | onFocus={onFocus} 101 | onBlur={onBlur} 102 | /> 103 | ))} 104 |
105 |
106 | ) 107 | } 108 | } 109 | 110 | export default withDocument(CustomObjectInput) 111 | -------------------------------------------------------------------------------- /snippets/convertQuotationMarks.js: -------------------------------------------------------------------------------- 1 | const defaults = {open: '«', close: '»', find: '"'} 2 | 3 | module.exports = function convertQuotationMarks(blocks, chars = defaults) { 4 | const characters = chars === defaults ? defaults : Object.assign({}, defaults, chars) 5 | const find = characters.find.replace(/([?!${}*:()|=^[\]/\\.+])/g, '\\$1') 6 | const pattern = new RegExp(find, 'g') 7 | 8 | return blocks.map(block => { 9 | if (block._type !== 'block' || !block.children) { 10 | return block 11 | } 12 | 13 | let isOpen = false 14 | const children = block.children.map(child => { 15 | if (child._type !== 'span' || !child.text) { 16 | return child 17 | } 18 | 19 | const text = child.text.replace(pattern, () => { 20 | const char = isOpen ? characters.close : characters.open 21 | isOpen = !isOpen 22 | return char 23 | }) 24 | 25 | return Object.assign({}, child, {text}) 26 | }) 27 | 28 | return Object.assign({}, block, {children}) 29 | }) 30 | } 31 | -------------------------------------------------------------------------------- /snippets/customDefaultDeskPane.js: -------------------------------------------------------------------------------- 1 | // An example of overriding the default Studio Desk pane to add some business logic around menu items. 2 | // You'll need to tell the studio to use this implementation instead of the built-in DefaultPane by adding the following to your sanity.json parts array 3 | /* 4 | { 5 | "implements": "part:@sanity/components/panes/default", 6 | "path": "./customDefaultDeskPane.js" 7 | } 8 | */ 9 | 10 | import React, { 11 | useEffect, useState 12 | } from 'react' 13 | import DefaultPane from '@sanity/components/lib/panes/DefaultPane' // The component we are replacing (wrapping, actually) 14 | import client from 'part:@sanity/base/client' 15 | 16 | const CustomPane = (props) => { 17 | const [groups, setGroups] = useState([]) 18 | 19 | useEffect(() => { 20 | // Get the current users groups. 21 | 22 | // NOTE: The user will need read access to the "system.group" documents in 23 | // order to figure out which groups they are a part of. If they don't have 24 | // access to read those documents, we can never determine membership, so all 25 | // relevant groups should be given the 'read' grant for type 'system.group' 26 | // in the case of custom access control. If you do not have custom access 27 | // control set up, you can instead of querying for these documents: 28 | 29 | // import userStore from 'part:@sanity/base/user' 30 | 31 | // and check the 'role' property of the user returned from the 32 | // userStore.currentUser observable. 33 | client.fetch('* [_type == "system.group" && $identity in members] {_id}') 34 | .then(groups => setGroups(groups.map(g => g._id))) 35 | }, []) 36 | 37 | // Default is to show no menu items, until we can inspect the 38 | // roles (groups) the user is a part of. 39 | let menuItems = [] 40 | if (groups.length) { 41 | // In this example, we check if the user is a member of a 42 | // specific group with _id '_.groups.administrator'. 43 | if (groups.includes('_.groups.administrator')) { 44 | // Since the user is a part of this group, use the standard menuItems, 45 | // which will include "Create new…" 46 | menuItems = props.menuItems 47 | } else { 48 | // Filter out any actions you don't want to expose to the users who 49 | // do not match the group check above. This can be the standard Create New 50 | // action (item with intent.type === 'create'), but could also be initial 51 | // value template actions etc. Inspect and adjust the code as needed. 52 | menuItems = props.menuItems.filter(item => { 53 | if (item.intent) return item.intent.type !== 'create' 54 | return true 55 | }) 56 | } 57 | } 58 | 59 | // Use the standard DefaultPane and give it our manually changed menuItems 60 | return 61 | } 62 | 63 | CustomPane.propTypes = DefaultPane.propTypes 64 | CustomPane.defaultProps = DefaultPane.defaultProps 65 | 66 | export default CustomPane 67 | -------------------------------------------------------------------------------- /snippets/deleteDocsByFilter.js: -------------------------------------------------------------------------------- 1 | /** 2 | * THIS SCRIPT DELETES DATA! 3 | * 4 | * To use this script: 5 | * 1. Put this script in your studio-folder 6 | * 2. Write a GROQ filter that outputs the documents you want to delete 7 | * 3. Run `sanity dataset export` to backup your dataset before deleting a bunch of documents 8 | * 4. Run `sanity exec deleteDocsByFilter.js --with-user-token` to delete the documents 9 | * 10 | * NOTE: For the time being you should not delete more than ~1000 documents in one transaction. This will change in the future. 11 | * See docs:https://www.sanity.io/docs/http-api/http-mutations#deleting-multiple-documents-by-query 12 | */ 13 | 14 | import client from 'part:@sanity/base/client' 15 | 16 | client 17 | .delete({query: '*[_type == "aDocumentType"]'}) 18 | .then(console.log) 19 | .catch(console.error) 20 | -------------------------------------------------------------------------------- /snippets/deleteUnusedAssets.js: -------------------------------------------------------------------------------- 1 | // This script will find and delete all assets that are not referenced (in use) 2 | // by other documents. Sometimes refered to as "orphaned" assets. 3 | // 4 | // Place this script somewhere and run it through 5 | // `sanity exec --with-user-token` 6 | 7 | /* eslint-disable no-console */ 8 | import client from 'part:@sanity/base/client' 9 | 10 | const query = ` 11 | *[ _type in ["sanity.imageAsset", "sanity.fileAsset"] ] 12 | {_id, "refs": count(*[ references(^._id) ])} 13 | [ refs == 0 ] 14 | ._id 15 | ` 16 | 17 | client 18 | .fetch(query) 19 | .then(ids => { 20 | if (!ids.length) { 21 | console.log('No assets to delete') 22 | return true 23 | } 24 | 25 | console.log(`Deleting ${ids.length} assets`) 26 | return ids 27 | .reduce((trx, id) => trx.delete(id), client.transaction()) 28 | .commit() 29 | .then(() => console.log('Done!')) 30 | }) 31 | .catch(err => { 32 | if (err.message.includes('Insufficient permissions')) { 33 | console.error(err.message) 34 | console.error('Did you forget to pass `--with-user-token`?') 35 | } else { 36 | console.error(err.stack) 37 | } 38 | }) 39 | -------------------------------------------------------------------------------- /snippets/deskStructureGroupByYear.js: -------------------------------------------------------------------------------- 1 | import S from '@sanity/desk-tool/structure-builder' 2 | import client from 'part:@sanity/base/client' 3 | 4 | export default () => 5 | S.list() 6 | .title('Content') 7 | .items([ 8 | S.listItem() 9 | .title('Posts by year') 10 | .child(() => { 11 | const type = 'post' 12 | return client.fetch('* [_type == $type && defined(publishedAt)] {_id, _type, publishedAt}', { 13 | type 14 | }) 15 | .then(docs => { 16 | // Create a map of years 17 | const years = {} 18 | docs.forEach(d => { 19 | const date = new Date(d.publishedAt) 20 | const year = date.getFullYear() 21 | if (!years[year]) { years[year] = [] } 22 | years[year].push(d._id) 23 | }) 24 | return S.list() 25 | .title('Posts by year') 26 | .id('year') 27 | .items( 28 | Object.keys(years).map(year => { 29 | return S.listItem() 30 | .id(year) 31 | .title(year) 32 | .child( 33 | S.documentList() 34 | .id(type) 35 | .title(`Posts from ${year}`) 36 | .filter(`_id in $ids`) 37 | .params({ ids: years[year] }) 38 | ) 39 | } 40 | ) 41 | ) 42 | }) 43 | }) 44 | ]) 45 | -------------------------------------------------------------------------------- /snippets/deskStructureWithCustomRoles.js: -------------------------------------------------------------------------------- 1 | import S from '@sanity/base/structure-builder' 2 | import client from 'part:@sanity/base/client' 3 | 4 | // A groq query to find all access groups the current user is a member of. This 5 | // includes both built in groups like 'administrator' and any custom groups you 6 | // may have created as part of SSO etc. 7 | const groupQuery = '* [_type == "system.group" && $identity in members] {_id}' 8 | 9 | // A fallback standard Desk structure 10 | 11 | export default () => client.fetch(groupQuery) 12 | 13 | // Convenience: Get the last portion of the group documents '_id' property, 14 | // since we'd like to just work with the string 'editors' instead of 15 | // '_.groups.editors' 16 | .then(docs => docs.map(doc => doc._id.split('.').pop())) 17 | .then(groupNames => { 18 | // groupNames now reflect the groups the current user is a member of 19 | 20 | // Build up an array of items depending on group membership. You may of 21 | // course do this completely different. This is just an example. 22 | const deskItems = [] 23 | if (groupNames.includes('editors')) { 24 | // Add the items that editors should see 25 | //deskItems.push(...partOfStructureOnlyForEditors) 26 | } 27 | 28 | if (groupNames.includes('wizards')) { 29 | // Add the items that wizards should see 30 | //deskItems.push(...wizardDeskItems) 31 | } 32 | 33 | if (groupNames.includes('translators')) { 34 | // Completely separate desk structure 35 | return S.list().title('Translations').items( 36 | [] // Would contain items only for translators, for instance 37 | ) 38 | } 39 | 40 | return S.list().title('Content').items( 41 | deskItems 42 | ) 43 | }) 44 | .catch(() => { 45 | // In case of any errors fetching the groups, just return some standard 46 | // structure. This will only happen if the query cannot be performed for 47 | // some reason. 48 | return S.list() 49 | .title('Standard structure') 50 | .items([]) 51 | }) 52 | -------------------------------------------------------------------------------- /snippets/deskStructureWithRoles.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Simple example on how to return a structure depending on the logged in user‘s role. 3 | * 4 | * Go to https://www.sanity.io/docs/overview-structure-builder for more information on 5 | * how to configure structure builder. 6 | */ 7 | 8 | import S from '@sanity/base/structure-builder' 9 | import userStore from 'part:@sanity/base/user' 10 | // remember to add rxjs/operators to your dependencies with npm or yarn 11 | import { 12 | map 13 | } from 'rxjs/operators' 14 | 15 | export default () => userStore.me.pipe( 16 | map((user) => { 17 | const {role} = user 18 | 19 | if (role === 'administrator') { 20 | return S.list().title('Admin structure') 21 | // add more structure 22 | } 23 | return S.list().title('Editor structure') 24 | // add more structure 25 | }) 26 | ) 27 | -------------------------------------------------------------------------------- /snippets/doSomethingWithUploadFile.md: -------------------------------------------------------------------------------- 1 | # Do something with a uploaded file and populate a field in the document based on that 2 | 3 | Consider the following schema, where we describe a route with a gpx map file of some sort, and we want to populate a field "bounds" in that document computed from the uploaded file. 4 | 5 | ## The schema 6 | 7 | ```javascript 8 | import RouteFileInput from "../components/RouteFileInput"; 9 | export default { 10 | name: "route", 11 | title: "Route", 12 | type: "document", 13 | fields: [ 14 | { 15 | name: "title", 16 | title: "Title", 17 | type: "string" 18 | }, 19 | { 20 | title: "Map", 21 | name: "map", 22 | type: "object", 23 | inputComponent: RouteFileInput, 24 | fields: [ 25 | { 26 | title: "GPX file", 27 | name: "mapfile", 28 | type: "file", 29 | }, 30 | { 31 | name: "bounds", 32 | title: "Bounds", 33 | description: "Will be populated by file upload", 34 | type: "string" 35 | } 36 | ] 37 | } 38 | ] 39 | }; 40 | ``` 41 | 42 | ## The input component 43 | 44 | ```javascript 45 | import PropTypes from "prop-types"; 46 | import React from "react"; 47 | import Fieldset from "part:@sanity/components/fieldsets/default"; 48 | import { 49 | setIfMissing, 50 | set, 51 | unset 52 | } from "part:@sanity/form-builder/patch-event"; 53 | import { FormBuilderInput } from "part:@sanity/form-builder"; 54 | import { withDocument } from "part:@sanity/form-builder"; 55 | import sanityClient from "part:@sanity/base/client"; 56 | import { PatchEvent } from "part:@sanity/form-builder"; 57 | 58 | function computeBounds(asset) { 59 | return sanityClient.getDocument(asset._ref).then(asset => { 60 | console.log("Computing bounds for map file", asset.url); 61 | const { url } = asset; 62 | // Fetch file, and compute bounds here then return the result 63 | // (let's pretend it's done here for the sake of the example) 64 | return { north: 0, south: 10, west: 20, east: 40 }; 65 | }); 66 | } 67 | 68 | class CustomObjectInput extends React.PureComponent { 69 | static propTypes = { 70 | type: PropTypes.shape({ 71 | title: PropTypes.string, 72 | name: PropTypes.string 73 | }).isRequired, 74 | level: PropTypes.number, 75 | value: PropTypes.shape({ 76 | _type: PropTypes.string 77 | }), 78 | focusPath: PropTypes.array.isRequired, 79 | onFocus: PropTypes.func.isRequired, 80 | onChange: PropTypes.func.isRequired, 81 | onBlur: PropTypes.func.isRequired 82 | }; 83 | 84 | firstFieldInput = React.createRef(); 85 | 86 | handleFieldChange = (field, fieldPatchEvent) => { 87 | const { onChange, type, document } = this.props; 88 | 89 | // If we see a set patch that sets the asset, use the file to compute the bounds 90 | const setAssetPatch = fieldPatchEvent.patches.find( 91 | patch => 92 | patch.type === "set" && 93 | patch.path.length === 1 && 94 | patch.path[0] === "asset" && 95 | patch.value && 96 | patch.value._ref 97 | ); 98 | if (field.name === "mapfile" && setAssetPatch) { 99 | computeBounds(setAssetPatch.value).then(bounds => { 100 | onChange(PatchEvent.from([set(JSON.stringify(bounds), ["bounds"])])); 101 | }); 102 | } 103 | 104 | // If we see a patch that removes the map asset file, unset the bounds field 105 | if ( 106 | fieldPatchEvent.patches.find( 107 | patch => 108 | patch.type === "unset" && 109 | patch.path.length === 1 && 110 | patch.path[0] === "asset" 111 | ) 112 | ) { 113 | onChange(PatchEvent.from([unset(["bounds"])])); 114 | } 115 | 116 | onChange( 117 | fieldPatchEvent 118 | .prefixAll(field.name) 119 | .prepend(setIfMissing({ _type: type.name })) 120 | ); 121 | }; 122 | 123 | focus() { 124 | this.firstFieldInput.current.focus(); 125 | } 126 | 127 | render() { 128 | const { 129 | document, 130 | type, 131 | value, 132 | level, 133 | focusPath, 134 | onFocus, 135 | onBlur 136 | } = this.props; 137 | return ( 138 |
143 |
144 | {type.fields.map((field, i) => ( 145 | // Delegate to the generic FormBuilderInput. It will resolve and insert the actual input component 146 | // for the given field type 147 | this.handleFieldChange(field, patchEvent)} 154 | path={[field.name]} 155 | focusPath={focusPath} 156 | onFocus={onFocus} 157 | onBlur={onBlur} 158 | /> 159 | ))} 160 |
161 |
162 | ); 163 | } 164 | } 165 | 166 | export default withDocument(CustomObjectInput); 167 | 168 | ``` -------------------------------------------------------------------------------- /snippets/initialTemplateValueWithCurrentUser.js: -------------------------------------------------------------------------------- 1 | // initialValueTemplates.js 2 | 3 | /** 4 | * Learn more about initial values on https://www.sanity.io/docs/initial-value-templates-api 5 | */ 6 | 7 | import T from '@sanity/base/initial-value-template-builder' 8 | import userStore from 'part:@sanity/base/user' 9 | 10 | export default { 11 | ...T.defaults, 12 | T.template({ 13 | id: 'newsStory', 14 | title: 'News story', 15 | schemaType: 'newsStory', 16 | value: async () => { 17 | const {name, id} = await userStore.getUser('me') 18 | return { 19 | author: { 20 | name, 21 | id, 22 | _type: 'author' 23 | }, 24 | } 25 | } 26 | }) 27 | } 28 | 29 | /* 30 | Shape of the user object from userStore.getUser('me'): 31 | { 32 | "id": "", 33 | "name": "Your Name", 34 | "email": "your@email.com", 35 | "profileImage": "https://url.to.loginprovider/image", 36 | "role": "", 37 | } 38 | */ 39 | -------------------------------------------------------------------------------- /snippets/migrateDocumentType.js: -------------------------------------------------------------------------------- 1 | import client from "part:@sanity/base/client"; 2 | 3 | // Run this script with: `sanity exec --with-user-token migrations/migrate-document-type.js` 4 | // 5 | // This example shows how you can perform a migration where a document _type field is changing 6 | // 7 | // This will migrate documents in batches of 10 and continue patching until no more documents are 8 | // returned from the query. 9 | // 10 | // A few things to note: 11 | // - Changing the _type field on a document isn't allowed. The solution to this is to create a duplicate 12 | // with a new _id and _type, then delete the old document and patch all referencing documents with the new _id 13 | // - This script will exit if any of the patches on the referencing documents fail due to a revision mismatch 14 | // (which means the document was edited between fetch => update) 15 | // - The query must eventually return an empty set, or else this script will continue indefinitely 16 | // - There's no guard against loosing data on the old, as it might change between fetch and create of new 17 | 18 | const OLD_TYPE = "movie"; 19 | const NEW_TYPE = "film"; 20 | 21 | const fetchDocuments = () => 22 | client.fetch( 23 | `*[_type == $oldType][0...10] {..., "incomingReferences": *[references(^._id)]{...}}`, 24 | { oldType: OLD_TYPE } 25 | ); 26 | 27 | const buildMutations = docs => { 28 | const mutations = []; 29 | 30 | docs.forEach(doc => { 31 | console.log("movie", doc._id); 32 | // Updating an document _type field isn't allowed, we have to create a new and delete the old 33 | const newDocId = `${doc._id}-migrated`; 34 | const newDocument = { ...doc, ...{ _id: newDocId, _type: NEW_TYPE } }; 35 | delete newDocument.incomingReferences; 36 | delete newDocument._rev; 37 | 38 | mutations.push({ create: newDocument }); 39 | 40 | // Patch each of the incoming references 41 | doc.incomingReferences.forEach(referencingDocument => { 42 | console.log("ref", referencingDocument._id); 43 | // ⚠️ We're assuming the field is named the same as the type! 44 | // There might be another structure involved, perhaps an array, that needs patching 45 | const updatedReference = { 46 | [NEW_TYPE]: { 47 | _ref: newDocId, 48 | _type: "reference" 49 | } 50 | }; 51 | mutations.push({ 52 | id: referencingDocument._id, 53 | patch: { 54 | set: updatedReference, 55 | unset: [OLD_TYPE], 56 | ifRevisionID: referencingDocument._rev 57 | } 58 | }); 59 | }); 60 | 61 | // Apply the delete mutation after references have been changed 62 | mutations.push({ delete: doc._id }); 63 | }); 64 | return mutations.filter(Boolean); 65 | }; 66 | 67 | const createTransaction = mutations => { 68 | return mutations.reduce((tx, mutation) => { 69 | if (mutation.patch) { 70 | return tx.patch(mutation.id, mutation.patch); 71 | } 72 | if (mutation.delete) { 73 | return tx.delete(mutation.delete); 74 | } 75 | if (mutation.create) { 76 | return tx.createIfNotExists(mutation.create); 77 | } 78 | }, client.transaction()); 79 | }; 80 | 81 | const migrateNextBatch = async () => { 82 | const documents = await fetchDocuments(); 83 | if (documents.length === 0) { 84 | console.log("No more documents to migrate!"); 85 | return null; 86 | } 87 | const mutations = buildMutations(documents); 88 | const transaction = createTransaction(mutations); 89 | await transaction.commit(); 90 | return migrateNextBatch(); 91 | }; 92 | 93 | migrateNextBatch().catch(err => { 94 | console.error(JSON.stringify(err, null, 2)); 95 | process.exit(1); 96 | }); 97 | -------------------------------------------------------------------------------- /snippets/migratePortableTextToPlainText.js: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-console */ 2 | import client from 'part:@sanity/base/client' 3 | 4 | // Run this script with: `sanity exec --with-user-token migrations/migratePortableTextToPlainText.js` 5 | // 6 | // This example shows how you may write a migration script that migrates Portable Text 7 | // into plain text on a specific document type (author). 8 | // This will migrate documents in batches of 100 and continue patching until no more documents are 9 | // returned from the query. 10 | // 11 | // This script can safely be run, even if documents are being concurrently modified by others. 12 | // If a document gets modified in the time between fetch => submit patch, this script will fail, 13 | // but can safely be re-run multiple times until it eventually runs out of documents to migrate. 14 | 15 | // A few things to note: 16 | // - This script will exit if any of the mutations fail due to a revision mismatch (which means the 17 | // document was edited between fetch => update) 18 | // - The query must eventually return an empty set, or else this script will continue indefinitely 19 | 20 | // Fetching documents that matches the precondition for the migration. 21 | // NOTE: This query should eventually return an empty set of documents to mark the migration 22 | // as complete 23 | 24 | function blocksToText(blocks) { 25 | return blocks 26 | .filter((blk) => blk.type === 'block') 27 | .map((block) => { 28 | return block.children.map((child) => child.text).join('') 29 | }) 30 | .join('\n\n') 31 | } 32 | 33 | const fetchDocuments = () => 34 | client.fetch(`*[_type == 'author' && defined(bio) && bio._type === 'array'][0...100] {_id, _rev, name}`) 35 | 36 | const buildPatches = (docs) => 37 | docs.map((doc) => ({ 38 | id: doc._id, 39 | patch: { 40 | set: {bio: {text: blocksToText(doc.bio), _type: 'text'}}, 41 | // this will cause the migration to fail if any of the documents has been 42 | // modified since it was fetched. 43 | ifRevisionID: doc._rev, 44 | }, 45 | })) 46 | 47 | const createTransaction = (patches) => 48 | patches.reduce((tx, patch) => tx.patch(patch.id, patch.patch), client.transaction()) 49 | 50 | const commitTransaction = (tx) => tx.commit() 51 | 52 | const migrateNextBatch = async () => { 53 | const documents = await fetchDocuments() 54 | const patches = buildPatches(documents) 55 | if (patches.length === 0) { 56 | console.log('No more documents to migrate!') 57 | return null 58 | } 59 | console.log( 60 | `Migrating batch:\n %s`, 61 | patches.map((patch) => `${patch.id} => ${JSON.stringify(patch.patch)}`).join('\n') 62 | ) 63 | const transaction = createTransaction(patches) 64 | await commitTransaction(transaction) 65 | return migrateNextBatch() 66 | } 67 | 68 | migrateNextBatch().catch((err) => { 69 | console.error(err) 70 | process.exit(1) 71 | }) 72 | -------------------------------------------------------------------------------- /snippets/normalizeAllBlocks.js: -------------------------------------------------------------------------------- 1 | // This will normalize every block in your dataset. For instance if you regret allowing a special decorator. 2 | // Put on the studio root folder and run with SANITY_TOKEN=XXX npx node ./normalizeBlocks.js 3 | // Could be used as an inspiration to migrate some type in your dataset, not just blocks. 4 | 5 | const sanityClient = require("@sanity/client"); 6 | const { normalizeBlock } = require("@sanity/block-tools"); 7 | const { extractWithPath } = require("@sanity/mutator"); 8 | const config = require("./sanity.json"); 9 | 10 | const sanityToken = process.env.SANITY_TOKEN 11 | 12 | if (!sanityToken) { 13 | throw new Error('No Sanity token found. Set with env var SANITY_TOKEN=xxxx') 14 | } 15 | 16 | // Act on all documents 17 | const query = "*[]"; 18 | 19 | // Adjust the decorators to the set you want to allow 20 | const allowedDecaorators = [ 21 | "strong", 22 | "em", 23 | "code", 24 | "underline", 25 | "strike-through", 26 | "sub", 27 | "sup" 28 | ]; 29 | 30 | const client = sanityClient({ 31 | projectId: config.api.projectId, 32 | dataset: config.api.dataset, 33 | useCdn: false, 34 | token: sanityToken 35 | }); 36 | 37 | function convertPath(pathArr) { 38 | return pathArr 39 | .map(part => { 40 | if (Number.isInteger(part)) { 41 | return `[${part}]`; 42 | } 43 | return `.${part}`; 44 | }) 45 | .join("") 46 | .substring(1); 47 | } 48 | 49 | client.fetch(query).then(results => { 50 | const patchedDocuments = []; 51 | results.forEach(async result => { 52 | const matches = extractWithPath('..[_type=="block"]', result); 53 | let patch = client.patch(result._id); 54 | matches.forEach(match => { 55 | const block = match.value; 56 | const path = convertPath(match.path); 57 | const normalizedBlock = normalizeBlock(block, { allowedDecaorators }); 58 | const patchData = { [path]: normalizedBlock }; 59 | patch = patch.set(patchData); 60 | }); 61 | const patchLength = patch.operations.set 62 | ? Object.keys(patch.operations.set).length 63 | : 0; 64 | if (patchLength > 0) { 65 | patchedDocuments.push(result._id); 66 | await patch.commit(); 67 | console.log( 68 | `Patched ${patchLength} blocks in document ${result._id}` 69 | ); 70 | } 71 | }); 72 | console.log( 73 | `Patched ${patchedDocuments.length} documents with ids: ${JSON.stringify( 74 | patchedDocuments, 75 | null, 76 | 2 77 | )}` 78 | ); 79 | }); 80 | -------------------------------------------------------------------------------- /snippets/removeReferenceFromArray: -------------------------------------------------------------------------------- 1 | import client from 'part:@sanity/base/client'; 2 | 3 | // Run this script with: `sanity exec --with-user-token removeReferenceFromArray.js` 4 | // 5 | // This example shows how you may write a script that filters an array of references (in this case links) 6 | // which contains "org" in their 'permalink' field to be removed from an array of references in your document 7 | // This will patch documents in batches of 100 and continue patching until no more documents are 8 | // returned from the query. 9 | // 10 | // This script can safely be run, even if documents are being concurrently modified by others. 11 | // If a document gets modified in the time between fetch => submit patch, this script will fail, 12 | // but can safely be re-run multiple times until it eventually runs out of documents to patch. 13 | 14 | // A few things to note: 15 | // - This script will exit if any of the mutations fail due to a revision mismatch (which means the 16 | // document was edited between fetch => update) 17 | // - The query must eventually return an empty set, or else this script will continue indefinitely. 18 | // - The query on line 25 works to progress through documents because having successfully patched 19 | // the first 100, the patched documents won't show up in a subsequent query. 20 | 21 | // Fetching documents that matches the precondition for the migration. 22 | // NOTE: This query should eventually return an empty set of documents to mark the patch 23 | // as complete 24 | const fetchDocuments = () => 25 | client.fetch( 26 | `*[_type == 'page' && defined(links) && references(*[permalink match '*org*']._id)][0...100] {..., "followLinks": links[]->}` 27 | ); 28 | 29 | const buildPatches = docs => 30 | docs.map(doc => { 31 | return { 32 | id: doc._id, 33 | patch: { 34 | unset: doc.followLinks 35 | .filter(elem => { 36 | return elem.permalink.includes('org'); 37 | }) 38 | .map(item => `links[_ref=="${item._id}"]`), 39 | // This will cause the migration to fail if any of the documents has been 40 | // modified since it was fetched. 41 | ifRevisionID: doc._rev 42 | } 43 | }; 44 | }); 45 | 46 | const createTransaction = patches => 47 | patches.reduce( 48 | (tx, patch) => tx.patch(patch.id, patch.patch), 49 | client.transaction() 50 | ); 51 | 52 | const commitTransaction = tx => tx.commit(); 53 | 54 | const migrateNextBatch = async () => { 55 | const documents = await fetchDocuments(); 56 | const patches = buildPatches(documents); 57 | if (patches.length === 0) { 58 | console.log('No more documents to migrate!'); 59 | return null; 60 | } 61 | 62 | console.log( 63 | `Migrating batch:\n %s`, 64 | patches 65 | .map(patch => `${patch.id} => ${JSON.stringify(patch.patch)}`) 66 | .join('\n') 67 | ); 68 | const transaction = createTransaction(patches); 69 | await commitTransaction(transaction); 70 | return migrateNextBatch(); 71 | }; 72 | 73 | migrateNextBatch().catch(error => { 74 | console.error(error); 75 | process.exit(1); 76 | }); 77 | -------------------------------------------------------------------------------- /snippets/renameField.js: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-console */ 2 | import client from 'part:@sanity/base/client' 3 | 4 | // Run this script with: `sanity exec --with-user-token migrations/renameField.js` 5 | // 6 | // This example shows how you may write a migration script that renames a field (name => fullname) 7 | // on a specific document type (author). 8 | // This will migrate documents in batches of 100 and continue patching until no more documents are 9 | // returned from the query. 10 | // 11 | // This script can safely be run, even if documents are being concurrently modified by others. 12 | // If a document gets modified in the time between fetch => submit patch, this script will fail, 13 | // but can safely be re-run multiple times until it eventually runs out of documents to migrate. 14 | 15 | // A few things to note: 16 | // - This script will exit if any of the mutations fail due to a revision mismatch (which means the 17 | // document was edited between fetch => update) 18 | // - The query must eventually return an empty set, or else this script will continue indefinitely 19 | 20 | // Fetching documents that matches the precondition for the migration. 21 | // NOTE: This query should eventually return an empty set of documents to mark the migration 22 | // as complete 23 | const fetchDocuments = () => 24 | client.fetch(`*[_type == 'author' && defined(name)][0...100] {_id, _rev, name}`) 25 | 26 | const buildPatches = docs => 27 | docs.map(doc => ({ 28 | id: doc._id, 29 | patch: { 30 | set: {fullname: doc.name}, 31 | unset: ['name'], 32 | // this will cause the migration to fail if any of the documents has been 33 | // modified since it was fetched. 34 | ifRevisionID: doc._rev 35 | } 36 | })) 37 | 38 | const createTransaction = patches => 39 | patches.reduce((tx, patch) => tx.patch(patch.id, patch.patch), client.transaction()) 40 | 41 | const commitTransaction = tx => tx.commit() 42 | 43 | const migrateNextBatch = async () => { 44 | const documents = await fetchDocuments() 45 | const patches = buildPatches(documents) 46 | if (patches.length === 0) { 47 | console.log('No more documents to migrate!') 48 | return null 49 | } 50 | console.log( 51 | `Migrating batch:\n %s`, 52 | patches.map(patch => `${patch.id} => ${JSON.stringify(patch.patch)}`).join('\n') 53 | ) 54 | const transaction = createTransaction(patches) 55 | await commitTransaction(transaction) 56 | return migrateNextBatch() 57 | } 58 | 59 | migrateNextBatch().catch(err => { 60 | console.error(err) 61 | process.exit(1) 62 | }) 63 | -------------------------------------------------------------------------------- /snippets/stringInputWithValuesFromAnotherDocument.js: -------------------------------------------------------------------------------- 1 | import PropTypes from 'prop-types' 2 | import React from 'react' 3 | import client from 'part:@sanity/base/client' 4 | import {PatchEvent, set, unset} from 'part:@sanity/form-builder/patch-event' 5 | 6 | const DOCUMENT_ID = 'myConfigDocument' 7 | 8 | class CustomStringListInput extends React.Component { 9 | static propTypes = { 10 | value: PropTypes.string, 11 | type: PropTypes.object, 12 | onChange: PropTypes.func 13 | } 14 | handleChange = event => { 15 | if (!event.target.value) { 16 | this.props.onChange(PatchEvent.from(unset())) 17 | return 18 | } 19 | this.props.onChange(PatchEvent.from(set(event.target.value))) 20 | } 21 | 22 | state = {strings: []} 23 | 24 | componentDidMount() { 25 | client.getDocument(DOCUMENT_ID).then(document => { 26 | const strings = document.someArray.map(item => item.identifier) 27 | this.setState({strings}) 28 | }) 29 | this.subscription = client.listen(`*[_id == '${DOCUMENT_ID}']`).subscribe(changes => { 30 | this.setState({strings: changes.result.someArray.map(item => item.identifier)}) 31 | }) 32 | } 33 | 34 | componentWillUnmount() { 35 | this.subscription.unsubscibe() 36 | } 37 | 38 | render() { 39 | const {value, type} = this.props 40 | return ( 41 |
42 |

{type.title}

43 |

{type.description}

44 | 54 |
55 | ) 56 | } 57 | } 58 | 59 | export default CustomStringListInput 60 | -------------------------------------------------------------------------------- /snippets/structureForAssetDocs.js: -------------------------------------------------------------------------------- 1 | /* 2 | This is an example of how to use Structure Builder to organize image assets. 3 | Usage: 4 | 5 | import assetsStructure from './assetsStructure' 6 | 7 | export default () => S.list() 8 | .title('Content') 9 | .items([ 10 | // other list items, 11 | assetsStructure 12 | ]) 13 | */ 14 | import React from 'react' 15 | import S from '@sanity/desk-tool/structure-builder' 16 | 17 | const AssetPreview = ({ document }) => { 18 | const { displayed } = document 19 | return ( 20 | displayed.url && ( 21 |
22 | 23 |
24 | ) 25 | ) 26 | } 27 | const AssetDoc = assetId => 28 | S.document() 29 | .documentId(assetId) 30 | .views([ 31 | S.view.component(AssetPreview).title('Image preview'), 32 | S.view.form().title('Meta-information') 33 | ]) 34 | 35 | const assetsStructure = S.listItem() 36 | .title('Assets') 37 | .child( 38 | S.list() 39 | .title('Assets') 40 | .items([ 41 | S.listItem() 42 | .title('All images') 43 | .child(S.documentTypeList('sanity.imageAsset').child(AssetDoc)), 44 | // List images with width over 1000px 45 | S.listItem() 46 | .title('Large images (1000px+)') 47 | .child( 48 | S.documentList() 49 | .title('Large images') 50 | .filter( 51 | '_type == "sanity.imageAsset" && metadata.dimensions.width > 1000' 52 | ) 53 | .child(AssetDoc) 54 | ), 55 | // List images with the file extension of “gif” 56 | S.listItem() 57 | .title('GIFs') 58 | .child( 59 | S.documentList() 60 | .title('GIFs') 61 | .filter('_type == "sanity.imageAsset" && extension == "gif"') 62 | .child(AssetDoc) 63 | ), 64 | // List images that has been uploaded with the unsplash asset selector 65 | S.listItem() 66 | .title('From Unsplash') 67 | .child( 68 | S.documentList() 69 | .title('From Unsplash') 70 | .filter( 71 | '_type == "sanity.imageAsset" && source.name == "unsplash"' 72 | ) 73 | .child(AssetDoc) 74 | ) 75 | ]) 76 | ) 77 | 78 | export default assetsStructure 79 | -------------------------------------------------------------------------------- /snippets/test-images/image01.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sanity-io/sanity-recipes/b41cfae4b73808b88ee3bce64adec44a07ff89ff/snippets/test-images/image01.jpg -------------------------------------------------------------------------------- /snippets/test-images/image02.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sanity-io/sanity-recipes/b41cfae4b73808b88ee3bce64adec44a07ff89ff/snippets/test-images/image02.jpg -------------------------------------------------------------------------------- /snippets/test-images/image03.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sanity-io/sanity-recipes/b41cfae4b73808b88ee3bce64adec44a07ff89ff/snippets/test-images/image03.jpg -------------------------------------------------------------------------------- /snippets/test-images/image04.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sanity-io/sanity-recipes/b41cfae4b73808b88ee3bce64adec44a07ff89ff/snippets/test-images/image04.jpg -------------------------------------------------------------------------------- /snippets/test-images/image05.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sanity-io/sanity-recipes/b41cfae4b73808b88ee3bce64adec44a07ff89ff/snippets/test-images/image05.jpg -------------------------------------------------------------------------------- /snippets/test-images/image06.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sanity-io/sanity-recipes/b41cfae4b73808b88ee3bce64adec44a07ff89ff/snippets/test-images/image06.jpg -------------------------------------------------------------------------------- /snippets/test-images/image07.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sanity-io/sanity-recipes/b41cfae4b73808b88ee3bce64adec44a07ff89ff/snippets/test-images/image07.jpg -------------------------------------------------------------------------------- /snippets/test-images/image08.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sanity-io/sanity-recipes/b41cfae4b73808b88ee3bce64adec44a07ff89ff/snippets/test-images/image08.jpg -------------------------------------------------------------------------------- /snippets/test-images/image09.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sanity-io/sanity-recipes/b41cfae4b73808b88ee3bce64adec44a07ff89ff/snippets/test-images/image09.jpg -------------------------------------------------------------------------------- /snippets/test-images/image10.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sanity-io/sanity-recipes/b41cfae4b73808b88ee3bce64adec44a07ff89ff/snippets/test-images/image10.jpg -------------------------------------------------------------------------------- /snippets/test-images/image11.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sanity-io/sanity-recipes/b41cfae4b73808b88ee3bce64adec44a07ff89ff/snippets/test-images/image11.jpg -------------------------------------------------------------------------------- /snippets/test-images/image12.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sanity-io/sanity-recipes/b41cfae4b73808b88ee3bce64adec44a07ff89ff/snippets/test-images/image12.jpg -------------------------------------------------------------------------------- /snippets/test-images/image13.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sanity-io/sanity-recipes/b41cfae4b73808b88ee3bce64adec44a07ff89ff/snippets/test-images/image13.jpg -------------------------------------------------------------------------------- /snippets/test-images/image14.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sanity-io/sanity-recipes/b41cfae4b73808b88ee3bce64adec44a07ff89ff/snippets/test-images/image14.jpg -------------------------------------------------------------------------------- /snippets/test-images/image15.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sanity-io/sanity-recipes/b41cfae4b73808b88ee3bce64adec44a07ff89ff/snippets/test-images/image15.jpg -------------------------------------------------------------------------------- /snippets/test-images/image16.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sanity-io/sanity-recipes/b41cfae4b73808b88ee3bce64adec44a07ff89ff/snippets/test-images/image16.jpg -------------------------------------------------------------------------------- /snippets/test-images/image17.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sanity-io/sanity-recipes/b41cfae4b73808b88ee3bce64adec44a07ff89ff/snippets/test-images/image17.jpg -------------------------------------------------------------------------------- /snippets/test-images/image18.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sanity-io/sanity-recipes/b41cfae4b73808b88ee3bce64adec44a07ff89ff/snippets/test-images/image18.jpg -------------------------------------------------------------------------------- /snippets/test-images/image19.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sanity-io/sanity-recipes/b41cfae4b73808b88ee3bce64adec44a07ff89ff/snippets/test-images/image19.jpg -------------------------------------------------------------------------------- /snippets/test-images/image20.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sanity-io/sanity-recipes/b41cfae4b73808b88ee3bce64adec44a07ff89ff/snippets/test-images/image20.jpg -------------------------------------------------------------------------------- /snippets/uploadImageFromURLandCreateNewDocument.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | require('dotenv').config() 4 | const got = require('got') 5 | const SanityClient = require('@sanity/client') 6 | 7 | const client = SanityClient({ 8 | projectId: process.env.SANITY_PROJECT_ID, 9 | dataset: process.env.SANITY_DATASET, 10 | token: process.env.SANITY_WRITE_TOKEN. 11 | useCdn: false 12 | }) 13 | 14 | async createADocumentWithAsset(imageURL) { 15 | const {_id} = await client.upload('image', got(imageURL)).catch(error => console.error(error)) 16 | const newDoc = await client.create({ 17 | _type: 'someType', 18 | mainImage: { 19 | _type: 'mainImage', 20 | asset: { 21 | _type: 'reference', 22 | _ref: _id 23 | } 24 | } 25 | }).catch(error => console.error(error)) 26 | 27 | return newDoc 28 | } 29 | 30 | const URL_TO_KITTEN_IMAGE = 'https://placekitten.com/g/2000/3000' 31 | 32 | createADocumentWithAsset(URL_TO_KITTEN_IMAGE).then(doc => console.log('This is your new doc:\n', JSON.stringify(doc, null, 2))) 33 | -------------------------------------------------------------------------------- /snippets/uploadImagesConcurrently.js: -------------------------------------------------------------------------------- 1 | /* eslint-disable id-length, no-console */ 2 | const sanityClient = require('@sanity/client') 3 | const path = require('path') 4 | const pMap = require('p-map') 5 | const { 6 | createReadStream, 7 | promises: {readdir, stat} 8 | } = require('fs') 9 | 10 | const client = sanityClient({ 11 | projectId: 'PROJECT_ID', 12 | dataset: 'DATASET', 13 | useCdn: false, 14 | token: 'TOKEN' 15 | }) 16 | 17 | const IMAGES_FOLDER = `${__dirname}/test-images` 18 | const CONCURRENCY = 10 19 | 20 | function round(num) { 21 | return Number(num.toFixed(2)) 22 | } 23 | 24 | async function uploadImage(filename, i) { 25 | const filepath = path.join(IMAGES_FOLDER, filename) 26 | const size = (await stat(filepath)).size 27 | const start = new Date() 28 | 29 | await client.assets 30 | .upload('image', createReadStream(filepath), { 31 | filename 32 | }) 33 | .catch(err => console.log('unable to upload ', filename, err.message)) 34 | 35 | const elapsed = new Date().getTime() - start.getTime() 36 | 37 | console.log( 38 | `Upload file #${i} ${filename} [SIZE: ${round(size / 1000)} KB --- TIME: ${round( 39 | elapsed / 1000 40 | )} s --- SPEED: ${round(size / elapsed)} KB/s]` 41 | ) 42 | } 43 | 44 | async function run() { 45 | const files = (await readdir(IMAGES_FOLDER)).filter(f => path.extname(f).toLowerCase() === '.jpg') 46 | const globalStart = new Date() 47 | 48 | await pMap(files, uploadImage, {concurrency: CONCURRENCY}) 49 | 50 | const elapsed = new Date().getTime() - globalStart.getTime() 51 | console.log(`All done in ${round(elapsed / 1000)} seconds`) 52 | } 53 | 54 | run() 55 | -------------------------------------------------------------------------------- /test/.eslintrc: -------------------------------------------------------------------------------- 1 | { 2 | "env": { 3 | "node": true, 4 | "jest": true 5 | }, 6 | "rules": { 7 | "no-sync": 0, 8 | "max-nested-callbacks": ["error", 4] 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/__snapshots__/blocksToText.test.js.snap: -------------------------------------------------------------------------------- 1 | // Jest Snapshot v1, https://goo.gl/fbAQLP 2 | 3 | exports[`converts blocks to text while adding placeholders for non-text blocks 1`] = ` 4 | "The idea behind our query language GROQ is to be able to describe exactly what information your application needs, potentially joining together information from several sets of documents, then stitching together a very specific response with only the exact fields you need. 5 | 6 | If you need help setting up a client to perform these queries in your front end, you should check out the documentation for the client for javascript or PHP. 7 | 8 | [image block] 9 | 10 | Introduction 11 | 12 | There are loads of advanced topics to cover, but let us start with the basics. We will take this simple query and pick it apart: 13 | 14 | [code block] 15 | 16 | A query typically starts with *. This asterisk represents every document in your dataset. To do any useful work this is typically followed by a filter in brackets. The filter above has two terms: 17 | 18 | The filter 19 | 20 | First we filter by document type. Every document in Sanity is required to have a type, and the type is always in the _type field. (We prefix any Sanity-specific fields with an underscore in an attempt to avoid clashing with any of your field names.) So _type == 'movie' limits this query to documents of the type ‘movie’. && is the operator “and” of course. 21 | 22 | The second term releaseYear > 1979 assumes that the movies have a field called releaseYear contain numbers. It will match any document where this number is larger than or equal to 1979. 23 | 24 | Projections 25 | 26 | So if we run this query, the result will be an array containing all movies from the year 1979 onwards in the dataset. Nice! However in a typical application movies might be huge documents containing information on actors, staff, posters, tag-lines, show-times, ratings and what not. If our goal is to render a list of movies in an overview, we are wasting bandwidth. Projections to the rescue. 27 | 28 | The typical projection is wrapped in braces and describe the data we want to see for each movie. A nice and simple projection for this query would give us the id, title and release year for each movie. It could look like this: {_id, title, releaseYear}. Putting it all together: 29 | 30 | [code block] 31 | 32 | Basic sorting 33 | 34 | Now there is another problem. Our movies appear in some unspecified order. Let’s say we want to sort our movies by year. For this we use the order-function. Order takes a number of fields and sort directions and orders your documents accordingly. We wanted to sort our movies by releaseYear. This is easily accomplished with order(releaseYear), like this: 35 | 36 | [code block] 37 | 38 | (We need the | operator here in front of the order()-function, we'll discuss that more later.) 39 | 40 | We think of GROQ statements as describing a data flow from left to right. First everything (*) flows through the filter [_type == 'movie' && …], then all those movies flow through the order()-function which are then all mapped through the projection {_id, title, ...} which picks out the bits we want returned. 41 | 42 | The order-function accepts a list of fields, and optionally you can specify the sort direction for each field. If you wanted to sort the movies by year, and then within each year we want them alphabetical by title, we could use this ordering: order(releaseYear, title) And if we wanted the newest movies first, we could reverse the direction like this: order(releaseYear desc, title). 43 | 44 | (asc means “ascending” and desc means descending in this context. If you leave out the sort-direction, Sanity will assume you want the ascending order.) 45 | 46 | Slicing the result set 47 | 48 | This brings us to our final problem for this query: There are many movies in the world. Maybe our dataset contains tens of thousands. We need a way to describe which slice of that list we want to show. This is done using a selector. Let’s say we just wanted the first movie, we could add a [0] at the end. This works exactly like an array accessor and would return only the first element. If we want a slice, we can add the range operator like this: [0...100] this would return the first hundred movies from index 0 through 99. Obviously we can just as well ask for [1023...1048] or any other slice we desire. So there we are, our first basic query with filtering, ordering, projections and selector: 49 | 50 | [code block] 51 | 52 | References and joins 53 | 54 | A reference in Sanity is a link from one document to another. Standard references are “hard” meaning when a document references another document, the target document must exist, and is actually prevented from being deleted until the reference is removed. (There is also weak-references that do not \\"hold on to\\" the target. You make them by adding a _weak-key to the reference object like this: {_ref: \\"\\", _weak: true}) 55 | 56 | Let’s say we have “person”-type documents that looks something like this: 57 | 58 | [code block] 59 | 60 | Keeping it simple, maybe our movies had a field director that contained a reference to a person. It could look something like this: 61 | 62 | [code block] 63 | 64 | Remember Sanity-specific fields are prefixed with an underscore, and an object containing a _ref key appearing anywhere in the document becomes a hard reference. 65 | 66 | Expanding references 67 | 68 | Now we can do a number of useful things with this reference. The most basic thing is expanding the reference in place. Let’s revisit our movie-queries from the introduction. 69 | 70 | [code block] 71 | 72 | Let’s say we wanted to include the director in the returned result. If we didn't know any better, we'd perhaps try something like this: 73 | 74 | [code block] 75 | 76 | But if we just naïvely include the director in like this, we will just get whatever is in the director field on this document, which is the literal reference description: 77 | 78 | [code block] 79 | 80 | This is obviously not what we wanted, we wanted to follow that reference! By adding the dereferencing operator -> we ask Sanity to follow the reference and replace the it with the actual content of the document referenced: 81 | 82 | [code block] 83 | 84 | Now, this is useful. We’d get something like this: 85 | 86 | [code block] 87 | 88 | Then maybe we didn’t want all that metadata with our director? We can add a separate projection for our director: 89 | 90 | [code block] 91 | 92 | Our Alien-movie now looks neat like this: 93 | 94 | [code block] 95 | 96 | But we can do one better. We are not limited to the existing fields in the document in our projections, we can actually declare new fields. Let’s say we are building our compact movie list and we wanted just the title, year and director name. We can get minimal cruft by extracting just the name and putting it in a new field, like this: 97 | 98 | [code block] 99 | 100 | Now our Alien movie record is super neat: 101 | 102 | [code block] 103 | 104 | Filtering by references 105 | 106 | When dealing with references, we have a useful function called references() which can be used in filters to select only documents that reference specific other documents. Let’s say we want to list every movie Ridley Scott has been involved in. It’s as simple as this: 107 | 108 | [code block] 109 | 110 | Our first join 111 | 112 | It is time to write our first proper join: Say we wanted to list people and include all the movies they were involved in? We’ll be querying the “person”-type documents, but in the projections for each person, we’ll ask for the movies they have been involved in. To do this we have to briefly cover the parent-operator ^. Let’s look at the query first: 113 | 114 | [code block] 115 | 116 | In a join, the parent operator is a way to reference the “parent” document. In this example the outer query for “person”-type documents fetches a bunch of people, and for each person it returns the _id and name. Then we want to fetch the movies referencing that person. Now we declare the new field “movies” where we start a new query for “movie”-type documents, but for each person we want to limit our movie-query to movies referencing that person. To achieve this we need the _id of the person, but if we just wrote _id in the movies-query we’d reference the _id of the movie. To get to the fields of the person record we go “up” one level using the parent operator ^. So ^ means the specific “person”-document that our movie-query is about, and then ^._id is the _id of that person, just as ^.name would be her name. So when we say references(^._id) in the query above, we limit our movies to movies referencing the current person. 117 | 118 | Naked projections 119 | 120 | There is one more new thing we haven’t talked about in this query. We could have written the movies-sub-query like this: 121 | 122 | [code block] 123 | 124 | Our list of movies would have looked something like this: 125 | 126 | [code block] 127 | 128 | Since we just wanted the titles, we can use a “naked projection”. By just naming the field we want, like this: 129 | 130 | [code block] 131 | 132 | We get a nice, simple array of values, like this: 133 | 134 | [code block] 135 | 136 | So, for completeness, the result of the full person w/movies query above could look something like this: 137 | 138 | [code block] 139 | 140 | More ways to filter 141 | 142 | Sanity supports a growing number of ways to filter your documents. We have shown simple attribute comparisions with _type == ‘movie’ and releaseYear > 1979. We have shown filtering by references using the references()-function. In addition we support: 143 | 144 | Text search using the match operator, e.g. *[title match \\"Alien*\\"] 145 | 146 | Filtering by the presence of a field, e.g. *[defined(status)] which only match document that have the status property set to any value. 147 | 148 | The in-operator which matches values in arrays, as in *[\\"sci-fi\\" in genres], that matches all documents where genres is an array and that array contains the value \\"sci-fi\\". 149 | 150 | You can of course combine these filters using the boolean operators && (and), || (or), ! (not), like this *[_type == \\"movie\\" && (!(\\"sci-fi\\" in genres) || releaseYear > 1979)]. 151 | 152 | We are working on a full reference for the GROQ feature set. In the mean time you'll find a comprehensive set of examples in the cheat sheet. 153 | 154 | Queries in projections 155 | 156 | A useful thing in GROQ is that filtering and projections also can be used inside your projections. Let’s say you work for an architect and every project has a number of milestones. A document might look something like this: 157 | 158 | [code block] 159 | 160 | And let’s say the view we are producing is about showing the current status of the project. We could achieve this by finding the latest milestone and extracting its status tag. This can be done in GROQ like this: 161 | 162 | [code block] 163 | 164 | Let’s pick apart the status query milestones|order(year desc)[0].status in some detail: 165 | 166 | First we take the field milestones which contain the (potentially unordered) list of milestones for the project. Using the pipe-operator | we send the contents of this array to the order function with is instructed to sort the array by year in descending order order(year desc). Then We take only the first element [0] (which is the latest milestone) and return the value of it’s status field. So now our project list would look something like this: 167 | 168 | [code block] 169 | 170 | Let’s try another clever trick querying the contents of this object. Instead of a status field, we just want a boolean flag telling whether the project is completed. We could achieve this like this: 171 | 172 | [code block] 173 | 174 | Here we take the milestones, but select only the ones having the status “completed”. Then we count() the number of milestones matching this constraint. If that count is > 0 the result is true. So now our result would look something like this: 175 | 176 | [code block] 177 | 178 | Some comments on the pipe-operator 179 | 180 | In the project-status example above we used the pipe operator | for a second time. Let's explore that in some detail: 181 | 182 | [code block] 183 | 184 | The pipe operator takes the output from its left hand side and sends it to the operation to its right. \\"But isn’t this what all GROQ statements does?\\", I hear you ask. And you’d be right. Actually, if you prefer, you can use the pipe operator a lot more. These queries are the same: 185 | 186 | [code block] 187 | 188 | To make basic GROQ statements appear simpler we automatically insert the pipe operator when it is obvious. Basically this happens when the {} or [] characters are used. In one sense they are always parsed like this |{}and |[]. 189 | 190 | In some situations, like in the project-status example, we needed an explicit pipe-operator because there were no way for the GROQ parser to infer it. milestones order(year desc) would be a syntax error, so in this instance we have to explicitly state the pipe operator, like this: milestones|order(year desc). As a simple rule of thumb you always need the | in front of order() and in the future any other function that handle document streams like order() does. 191 | 192 | When programatically building queries in the front end, the pipe-operator can be very handy. You can chain several statements together using the pipe-operator knowing that you never create an ambiguous statement. Something like this: 193 | 194 | [code block] 195 | 196 | Which would build the query: 197 | 198 | [code block] 199 | 200 | Which is equivalent to: 201 | 202 | [code block] 203 | 204 | Some fine points on arrays and projections 205 | 206 | Let’s consider this document with some deep structure: 207 | 208 | [code block] 209 | 210 | So we have a movie with a poster image, and an array of other images. Each image has some metadata represented here by a caption, then a reference to an asset record containing all the metadata on the specific image including its url. A simplified asset record could look something like this: 211 | 212 | [code block] 213 | 214 | Now we can easily retrieve the poster image url and attach it to our result for each movies like this: 215 | 216 | [code block] 217 | 218 | But what if we wanted to do the same thing for the other images? Since the images field is an array, we can’t just images.asset->url. We somehow have to apply the asset->url-part to each member of the array. This is accomplished by adding a blank filter, like this: images[].asset->url which will return the image urls as a simple array. So the full query would look like this: 219 | 220 | [code block] 221 | 222 | This would yield something like this: 223 | 224 | [code block] 225 | 226 | If you wanted a richer data-set with your images you could use a normal projection like this (taking care to add the blank filter to apply the projection to every array member): 227 | 228 | [code block] 229 | 230 | Now your result looks something like this: 231 | 232 | [code block] 233 | 234 | The ellipsis operator 235 | 236 | Sometimes you might want to compute some properties of a document, but still want the entire set of attributes returned. This can be a problem since the moment you specify a projection, you'll have to list all the fields you want included. Let's say we wanted to count the actors in a movie doing something like this: 237 | 238 | [code block] 239 | 240 | There is an obvious problem with this. We just wanted to add a custom field, but since we needed a projection to do it, now all we got is something like this: 241 | 242 | [code block] 243 | 244 | What we wanted was our custom field in addition to the normal fields. This can be achieved with the ellipsis operator. By appending it like this, we effectively say we want the fields we just specified, but also everything else: 245 | 246 | [code block] 247 | 248 | Which brings us a result that could look something like this: 249 | 250 | [code block] 251 | 252 | Queries that don't start with an * 253 | 254 | We said initially that most GROQ queries start with the asterisk, but they don't have to. Any valid GROQ expression can be the entire query. This is a valid query: 255 | 256 | [code block] 257 | 258 | It will return the number of documents in the dataset. This is also valid: 259 | 260 | [code block] 261 | 262 | It will return true if any document in the entire dataset has a name-field containing the word \\"sigourney\\". 263 | 264 | More usefully, you can actually have a projection be your outer statement. Like this: 265 | 266 | [code block] 267 | 268 | This combines three completely separate queries into one query and returns an object containing the result of all of them. This can be a useful way to speed up page loads. By combining queries in this manner you can often get all of the core content for a web page to load in a single, cacheable query. 269 | 270 | Finally 271 | 272 | So there you go, this should cover 99% of what you need to understand in day-to-day use of GROQ. Reference documentation is imminent, but while we're writing it you should partake our Query Cheat Sheet which contain examples of all operators and functions currently supported." 273 | `; 274 | 275 | exports[`converts blocks to text while removing non-text blocks by default 1`] = ` 276 | "The idea behind our query language GROQ is to be able to describe exactly what information your application needs, potentially joining together information from several sets of documents, then stitching together a very specific response with only the exact fields you need. 277 | 278 | If you need help setting up a client to perform these queries in your front end, you should check out the documentation for the client for javascript or PHP. 279 | 280 | 281 | 282 | Introduction 283 | 284 | There are loads of advanced topics to cover, but let us start with the basics. We will take this simple query and pick it apart: 285 | 286 | 287 | 288 | A query typically starts with *. This asterisk represents every document in your dataset. To do any useful work this is typically followed by a filter in brackets. The filter above has two terms: 289 | 290 | The filter 291 | 292 | First we filter by document type. Every document in Sanity is required to have a type, and the type is always in the _type field. (We prefix any Sanity-specific fields with an underscore in an attempt to avoid clashing with any of your field names.) So _type == 'movie' limits this query to documents of the type ‘movie’. && is the operator “and” of course. 293 | 294 | The second term releaseYear > 1979 assumes that the movies have a field called releaseYear contain numbers. It will match any document where this number is larger than or equal to 1979. 295 | 296 | Projections 297 | 298 | So if we run this query, the result will be an array containing all movies from the year 1979 onwards in the dataset. Nice! However in a typical application movies might be huge documents containing information on actors, staff, posters, tag-lines, show-times, ratings and what not. If our goal is to render a list of movies in an overview, we are wasting bandwidth. Projections to the rescue. 299 | 300 | The typical projection is wrapped in braces and describe the data we want to see for each movie. A nice and simple projection for this query would give us the id, title and release year for each movie. It could look like this: {_id, title, releaseYear}. Putting it all together: 301 | 302 | 303 | 304 | Basic sorting 305 | 306 | Now there is another problem. Our movies appear in some unspecified order. Let’s say we want to sort our movies by year. For this we use the order-function. Order takes a number of fields and sort directions and orders your documents accordingly. We wanted to sort our movies by releaseYear. This is easily accomplished with order(releaseYear), like this: 307 | 308 | 309 | 310 | (We need the | operator here in front of the order()-function, we'll discuss that more later.) 311 | 312 | We think of GROQ statements as describing a data flow from left to right. First everything (*) flows through the filter [_type == 'movie' && …], then all those movies flow through the order()-function which are then all mapped through the projection {_id, title, ...} which picks out the bits we want returned. 313 | 314 | The order-function accepts a list of fields, and optionally you can specify the sort direction for each field. If you wanted to sort the movies by year, and then within each year we want them alphabetical by title, we could use this ordering: order(releaseYear, title) And if we wanted the newest movies first, we could reverse the direction like this: order(releaseYear desc, title). 315 | 316 | (asc means “ascending” and desc means descending in this context. If you leave out the sort-direction, Sanity will assume you want the ascending order.) 317 | 318 | Slicing the result set 319 | 320 | This brings us to our final problem for this query: There are many movies in the world. Maybe our dataset contains tens of thousands. We need a way to describe which slice of that list we want to show. This is done using a selector. Let’s say we just wanted the first movie, we could add a [0] at the end. This works exactly like an array accessor and would return only the first element. If we want a slice, we can add the range operator like this: [0...100] this would return the first hundred movies from index 0 through 99. Obviously we can just as well ask for [1023...1048] or any other slice we desire. So there we are, our first basic query with filtering, ordering, projections and selector: 321 | 322 | 323 | 324 | References and joins 325 | 326 | A reference in Sanity is a link from one document to another. Standard references are “hard” meaning when a document references another document, the target document must exist, and is actually prevented from being deleted until the reference is removed. (There is also weak-references that do not \\"hold on to\\" the target. You make them by adding a _weak-key to the reference object like this: {_ref: \\"\\", _weak: true}) 327 | 328 | Let’s say we have “person”-type documents that looks something like this: 329 | 330 | 331 | 332 | Keeping it simple, maybe our movies had a field director that contained a reference to a person. It could look something like this: 333 | 334 | 335 | 336 | Remember Sanity-specific fields are prefixed with an underscore, and an object containing a _ref key appearing anywhere in the document becomes a hard reference. 337 | 338 | Expanding references 339 | 340 | Now we can do a number of useful things with this reference. The most basic thing is expanding the reference in place. Let’s revisit our movie-queries from the introduction. 341 | 342 | 343 | 344 | Let’s say we wanted to include the director in the returned result. If we didn't know any better, we'd perhaps try something like this: 345 | 346 | 347 | 348 | But if we just naïvely include the director in like this, we will just get whatever is in the director field on this document, which is the literal reference description: 349 | 350 | 351 | 352 | This is obviously not what we wanted, we wanted to follow that reference! By adding the dereferencing operator -> we ask Sanity to follow the reference and replace the it with the actual content of the document referenced: 353 | 354 | 355 | 356 | Now, this is useful. We’d get something like this: 357 | 358 | 359 | 360 | Then maybe we didn’t want all that metadata with our director? We can add a separate projection for our director: 361 | 362 | 363 | 364 | Our Alien-movie now looks neat like this: 365 | 366 | 367 | 368 | But we can do one better. We are not limited to the existing fields in the document in our projections, we can actually declare new fields. Let’s say we are building our compact movie list and we wanted just the title, year and director name. We can get minimal cruft by extracting just the name and putting it in a new field, like this: 369 | 370 | 371 | 372 | Now our Alien movie record is super neat: 373 | 374 | 375 | 376 | Filtering by references 377 | 378 | When dealing with references, we have a useful function called references() which can be used in filters to select only documents that reference specific other documents. Let’s say we want to list every movie Ridley Scott has been involved in. It’s as simple as this: 379 | 380 | 381 | 382 | Our first join 383 | 384 | It is time to write our first proper join: Say we wanted to list people and include all the movies they were involved in? We’ll be querying the “person”-type documents, but in the projections for each person, we’ll ask for the movies they have been involved in. To do this we have to briefly cover the parent-operator ^. Let’s look at the query first: 385 | 386 | 387 | 388 | In a join, the parent operator is a way to reference the “parent” document. In this example the outer query for “person”-type documents fetches a bunch of people, and for each person it returns the _id and name. Then we want to fetch the movies referencing that person. Now we declare the new field “movies” where we start a new query for “movie”-type documents, but for each person we want to limit our movie-query to movies referencing that person. To achieve this we need the _id of the person, but if we just wrote _id in the movies-query we’d reference the _id of the movie. To get to the fields of the person record we go “up” one level using the parent operator ^. So ^ means the specific “person”-document that our movie-query is about, and then ^._id is the _id of that person, just as ^.name would be her name. So when we say references(^._id) in the query above, we limit our movies to movies referencing the current person. 389 | 390 | Naked projections 391 | 392 | There is one more new thing we haven’t talked about in this query. We could have written the movies-sub-query like this: 393 | 394 | 395 | 396 | Our list of movies would have looked something like this: 397 | 398 | 399 | 400 | Since we just wanted the titles, we can use a “naked projection”. By just naming the field we want, like this: 401 | 402 | 403 | 404 | We get a nice, simple array of values, like this: 405 | 406 | 407 | 408 | So, for completeness, the result of the full person w/movies query above could look something like this: 409 | 410 | 411 | 412 | More ways to filter 413 | 414 | Sanity supports a growing number of ways to filter your documents. We have shown simple attribute comparisions with _type == ‘movie’ and releaseYear > 1979. We have shown filtering by references using the references()-function. In addition we support: 415 | 416 | Text search using the match operator, e.g. *[title match \\"Alien*\\"] 417 | 418 | Filtering by the presence of a field, e.g. *[defined(status)] which only match document that have the status property set to any value. 419 | 420 | The in-operator which matches values in arrays, as in *[\\"sci-fi\\" in genres], that matches all documents where genres is an array and that array contains the value \\"sci-fi\\". 421 | 422 | You can of course combine these filters using the boolean operators && (and), || (or), ! (not), like this *[_type == \\"movie\\" && (!(\\"sci-fi\\" in genres) || releaseYear > 1979)]. 423 | 424 | We are working on a full reference for the GROQ feature set. In the mean time you'll find a comprehensive set of examples in the cheat sheet. 425 | 426 | Queries in projections 427 | 428 | A useful thing in GROQ is that filtering and projections also can be used inside your projections. Let’s say you work for an architect and every project has a number of milestones. A document might look something like this: 429 | 430 | 431 | 432 | And let’s say the view we are producing is about showing the current status of the project. We could achieve this by finding the latest milestone and extracting its status tag. This can be done in GROQ like this: 433 | 434 | 435 | 436 | Let’s pick apart the status query milestones|order(year desc)[0].status in some detail: 437 | 438 | First we take the field milestones which contain the (potentially unordered) list of milestones for the project. Using the pipe-operator | we send the contents of this array to the order function with is instructed to sort the array by year in descending order order(year desc). Then We take only the first element [0] (which is the latest milestone) and return the value of it’s status field. So now our project list would look something like this: 439 | 440 | 441 | 442 | Let’s try another clever trick querying the contents of this object. Instead of a status field, we just want a boolean flag telling whether the project is completed. We could achieve this like this: 443 | 444 | 445 | 446 | Here we take the milestones, but select only the ones having the status “completed”. Then we count() the number of milestones matching this constraint. If that count is > 0 the result is true. So now our result would look something like this: 447 | 448 | 449 | 450 | Some comments on the pipe-operator 451 | 452 | In the project-status example above we used the pipe operator | for a second time. Let's explore that in some detail: 453 | 454 | 455 | 456 | The pipe operator takes the output from its left hand side and sends it to the operation to its right. \\"But isn’t this what all GROQ statements does?\\", I hear you ask. And you’d be right. Actually, if you prefer, you can use the pipe operator a lot more. These queries are the same: 457 | 458 | 459 | 460 | To make basic GROQ statements appear simpler we automatically insert the pipe operator when it is obvious. Basically this happens when the {} or [] characters are used. In one sense they are always parsed like this |{}and |[]. 461 | 462 | In some situations, like in the project-status example, we needed an explicit pipe-operator because there were no way for the GROQ parser to infer it. milestones order(year desc) would be a syntax error, so in this instance we have to explicitly state the pipe operator, like this: milestones|order(year desc). As a simple rule of thumb you always need the | in front of order() and in the future any other function that handle document streams like order() does. 463 | 464 | When programatically building queries in the front end, the pipe-operator can be very handy. You can chain several statements together using the pipe-operator knowing that you never create an ambiguous statement. Something like this: 465 | 466 | 467 | 468 | Which would build the query: 469 | 470 | 471 | 472 | Which is equivalent to: 473 | 474 | 475 | 476 | Some fine points on arrays and projections 477 | 478 | Let’s consider this document with some deep structure: 479 | 480 | 481 | 482 | So we have a movie with a poster image, and an array of other images. Each image has some metadata represented here by a caption, then a reference to an asset record containing all the metadata on the specific image including its url. A simplified asset record could look something like this: 483 | 484 | 485 | 486 | Now we can easily retrieve the poster image url and attach it to our result for each movies like this: 487 | 488 | 489 | 490 | But what if we wanted to do the same thing for the other images? Since the images field is an array, we can’t just images.asset->url. We somehow have to apply the asset->url-part to each member of the array. This is accomplished by adding a blank filter, like this: images[].asset->url which will return the image urls as a simple array. So the full query would look like this: 491 | 492 | 493 | 494 | This would yield something like this: 495 | 496 | 497 | 498 | If you wanted a richer data-set with your images you could use a normal projection like this (taking care to add the blank filter to apply the projection to every array member): 499 | 500 | 501 | 502 | Now your result looks something like this: 503 | 504 | 505 | 506 | The ellipsis operator 507 | 508 | Sometimes you might want to compute some properties of a document, but still want the entire set of attributes returned. This can be a problem since the moment you specify a projection, you'll have to list all the fields you want included. Let's say we wanted to count the actors in a movie doing something like this: 509 | 510 | 511 | 512 | There is an obvious problem with this. We just wanted to add a custom field, but since we needed a projection to do it, now all we got is something like this: 513 | 514 | 515 | 516 | What we wanted was our custom field in addition to the normal fields. This can be achieved with the ellipsis operator. By appending it like this, we effectively say we want the fields we just specified, but also everything else: 517 | 518 | 519 | 520 | Which brings us a result that could look something like this: 521 | 522 | 523 | 524 | Queries that don't start with an * 525 | 526 | We said initially that most GROQ queries start with the asterisk, but they don't have to. Any valid GROQ expression can be the entire query. This is a valid query: 527 | 528 | 529 | 530 | It will return the number of documents in the dataset. This is also valid: 531 | 532 | 533 | 534 | It will return true if any document in the entire dataset has a name-field containing the word \\"sigourney\\". 535 | 536 | More usefully, you can actually have a projection be your outer statement. Like this: 537 | 538 | 539 | 540 | This combines three completely separate queries into one query and returns an object containing the result of all of them. This can be a useful way to speed up page loads. By combining queries in this manner you can often get all of the core content for a web page to load in a single, cacheable query. 541 | 542 | Finally 543 | 544 | So there you go, this should cover 99% of what you need to understand in day-to-day use of GROQ. Reference documentation is imminent, but while we're writing it you should partake our Query Cheat Sheet which contain examples of all operators and functions currently supported." 545 | `; 546 | -------------------------------------------------------------------------------- /test/__snapshots__/convertQuotationMarks.test.js.snap: -------------------------------------------------------------------------------- 1 | // Jest Snapshot v1, https://goo.gl/fbAQLP 2 | 3 | exports[`can converts quotation marks to specific characters 1`] = ` 4 | Array [ 5 | Object { 6 | "_type": "block", 7 | "children": Array [ 8 | Object { 9 | "_type": "span", 10 | "marks": Array [], 11 | "text": "Warning: “", 12 | }, 13 | Object { 14 | "_type": "span", 15 | "marks": Array [ 16 | "strong", 17 | ], 18 | "text": "Sanity”", 19 | }, 20 | Object { 21 | "_type": "span", 22 | "marks": Array [ 23 | "strong", 24 | ], 25 | "text": " is", 26 | }, 27 | Object { 28 | "_type": "span", 29 | "marks": Array [ 30 | "em", 31 | ], 32 | "text": " “addictive”.", 33 | }, 34 | ], 35 | }, 36 | Object { 37 | "_type": "block", 38 | "children": Array [ 39 | Object { 40 | "_type": "span", 41 | "marks": Array [ 42 | "strong", 43 | ], 44 | "text": "Unmatched quotation marks? “", 45 | }, 46 | Object { 47 | "_type": "span", 48 | "marks": Array [ 49 | "strong", 50 | ], 51 | "text": "sure, why not?", 52 | }, 53 | ], 54 | }, 55 | ] 56 | `; 57 | 58 | exports[`can converts single quotation marks 1`] = ` 59 | Array [ 60 | Object { 61 | "_type": "block", 62 | "children": Array [ 63 | Object { 64 | "_type": "span", 65 | "marks": Array [], 66 | "text": "Warning: «", 67 | }, 68 | Object { 69 | "_type": "span", 70 | "marks": Array [ 71 | "strong", 72 | ], 73 | "text": "Sanity»", 74 | }, 75 | Object { 76 | "_type": "span", 77 | "marks": Array [ 78 | "strong", 79 | ], 80 | "text": " is", 81 | }, 82 | Object { 83 | "_type": "span", 84 | "marks": Array [ 85 | "em", 86 | ], 87 | "text": " «addictive».", 88 | }, 89 | ], 90 | }, 91 | Object { 92 | "_type": "block", 93 | "children": Array [ 94 | Object { 95 | "_type": "span", 96 | "marks": Array [ 97 | "strong", 98 | ], 99 | "text": "Unmatched quotation marks? «", 100 | }, 101 | Object { 102 | "_type": "span", 103 | "marks": Array [ 104 | "strong", 105 | ], 106 | "text": "sure, why not?", 107 | }, 108 | ], 109 | }, 110 | ] 111 | `; 112 | 113 | exports[`converts quotation marks to guillemets by default 1`] = ` 114 | Array [ 115 | Object { 116 | "_type": "block", 117 | "children": Array [ 118 | Object { 119 | "_type": "span", 120 | "marks": Array [], 121 | "text": "Warning: «", 122 | }, 123 | Object { 124 | "_type": "span", 125 | "marks": Array [ 126 | "strong", 127 | ], 128 | "text": "Sanity»", 129 | }, 130 | Object { 131 | "_type": "span", 132 | "marks": Array [ 133 | "strong", 134 | ], 135 | "text": " is", 136 | }, 137 | Object { 138 | "_type": "span", 139 | "marks": Array [ 140 | "em", 141 | ], 142 | "text": " «addictive».", 143 | }, 144 | ], 145 | }, 146 | Object { 147 | "_type": "block", 148 | "children": Array [ 149 | Object { 150 | "_type": "span", 151 | "marks": Array [ 152 | "strong", 153 | ], 154 | "text": "Unmatched quotation marks? «", 155 | }, 156 | Object { 157 | "_type": "span", 158 | "marks": Array [ 159 | "strong", 160 | ], 161 | "text": "sure, why not?", 162 | }, 163 | ], 164 | }, 165 | ] 166 | `; 167 | -------------------------------------------------------------------------------- /test/blocksToText.test.js: -------------------------------------------------------------------------------- 1 | const blocksToText = require('../snippets/blocksToText') 2 | const manyBlocks = require('./fixtures/manyBlocks.fixture') 3 | 4 | test('converts blocks to text while removing non-text blocks by default', () => { 5 | expect(blocksToText(manyBlocks)).toMatchSnapshot() 6 | }) 7 | 8 | test('converts blocks to text while adding placeholders for non-text blocks', () => { 9 | expect(blocksToText(manyBlocks, {nonTextBehavior: 'placeholder'})).toMatchSnapshot() 10 | }) 11 | -------------------------------------------------------------------------------- /test/convertQuotationMarks.test.js: -------------------------------------------------------------------------------- 1 | const convertQuotationMarks = require('../snippets/convertQuotationMarks') 2 | const singleQuotationMarks = require('./fixtures/singleQuotationMarks.fixture') 3 | const doubleQuotationMarks = require('./fixtures/doubleQuotationMarks.fixture') 4 | 5 | test('converts quotation marks to guillemets by default', () => { 6 | expect(convertQuotationMarks(doubleQuotationMarks)).toMatchSnapshot() 7 | }) 8 | 9 | test('can converts quotation marks to specific characters', () => { 10 | expect(convertQuotationMarks(doubleQuotationMarks, {open: '“', close: '”'})).toMatchSnapshot() 11 | }) 12 | 13 | test('can converts single quotation marks', () => { 14 | expect(convertQuotationMarks(singleQuotationMarks, {find: "'"})).toMatchSnapshot() 15 | }) 16 | 17 | test('can converts single quotation marks to specific characters', () => { 18 | expect(convertQuotationMarks(singleQuotationMarks, {find: "'", open: '"', close: '"'})).toEqual( 19 | doubleQuotationMarks 20 | ) 21 | }) 22 | -------------------------------------------------------------------------------- /test/fixtures/doubleQuotationMarks.fixture.js: -------------------------------------------------------------------------------- 1 | module.exports = [ 2 | { 3 | _type: 'block', 4 | children: [ 5 | { 6 | _type: 'span', 7 | marks: [], 8 | text: `Warning: "` 9 | }, 10 | { 11 | _type: 'span', 12 | marks: ['strong'], 13 | text: `Sanity"` 14 | }, 15 | { 16 | _type: 'span', 17 | marks: ['strong'], 18 | text: ' is' 19 | }, 20 | { 21 | _type: 'span', 22 | marks: ['em'], 23 | text: ` "addictive".` 24 | } 25 | ] 26 | }, 27 | { 28 | _type: 'block', 29 | children: [ 30 | { 31 | _type: 'span', 32 | marks: ['strong'], 33 | text: `Unmatched quotation marks? "` 34 | }, 35 | { 36 | _type: 'span', 37 | marks: ['strong'], 38 | text: 'sure, why not?' 39 | } 40 | ] 41 | } 42 | ] 43 | -------------------------------------------------------------------------------- /test/fixtures/manyBlocks.fixture.js: -------------------------------------------------------------------------------- 1 | module.exports = [ 2 | { 3 | _key: '2efd1a392b97', 4 | _type: 'block', 5 | children: [ 6 | { 7 | _key: '2efd1a392b970', 8 | _type: 'span', 9 | marks: [], 10 | text: 11 | 'The idea behind our query language GROQ is to be able to describe exactly what information your application needs, potentially joining together information from several sets of documents, then stitching together a very specific response with only the exact fields you need.' 12 | } 13 | ], 14 | markDefs: [], 15 | style: 'normal' 16 | }, 17 | { 18 | _key: '12b466082a1d', 19 | _type: 'block', 20 | children: [ 21 | { 22 | _key: '12b466082a1d0', 23 | _type: 'span', 24 | marks: [], 25 | text: 26 | 'If you need help setting up a client to perform these queries in your front end, you should check out the documentation for the client for ' 27 | }, 28 | { 29 | _key: '12b466082a1d1', 30 | _type: 'span', 31 | marks: ['bb6bbab9104'], 32 | text: 'javascript' 33 | }, 34 | { 35 | _key: '12b466082a1d2', 36 | _type: 'span', 37 | marks: [], 38 | text: ' or ' 39 | }, 40 | { 41 | _key: '12b466082a1d3', 42 | _type: 'span', 43 | marks: ['27eda499105'], 44 | text: 'PHP' 45 | }, 46 | { 47 | _key: '12b466082a1d4', 48 | _type: 'span', 49 | marks: [], 50 | text: '.' 51 | } 52 | ], 53 | markDefs: [ 54 | { 55 | _key: 'bb6bbab9104', 56 | _ref: '1395f31b-6663-4f1c-bf00-7910d7ec6b8f', 57 | _type: 'internalLink', 58 | _weak: true 59 | }, 60 | { 61 | _key: '27eda499105', 62 | _ref: '96d29265-3ee7-487a-9900-9084171d9889', 63 | _type: 'internalLink', 64 | _weak: true 65 | } 66 | ], 67 | style: 'normal' 68 | }, 69 | { 70 | _key: 'f45e4adb051a', 71 | _type: 'image', 72 | asset: { 73 | _ref: 'image-SVmRparvDX5NJwj1kTe3OSl2-1600x800-png', 74 | _type: 'reference' 75 | } 76 | }, 77 | { 78 | _key: '741625421a76', 79 | _type: 'block', 80 | children: [ 81 | { 82 | _key: '741625421a760', 83 | _type: 'span', 84 | marks: [], 85 | text: 'Introduction' 86 | } 87 | ], 88 | markDefs: [], 89 | style: 'h2' 90 | }, 91 | { 92 | _key: 'e086c62ee5ff', 93 | _type: 'block', 94 | children: [ 95 | { 96 | _key: 'e086c62ee5ff0', 97 | _type: 'span', 98 | marks: [], 99 | text: 100 | 'There are loads of advanced topics to cover, but let us start with the basics. We will take this simple query and pick it apart:' 101 | } 102 | ], 103 | markDefs: [], 104 | style: 'normal' 105 | }, 106 | { 107 | _key: '9436ab6ac88f', 108 | _type: 'code', 109 | code: "*[_type == 'movie' && releaseYear > 1979] \n", 110 | language: 'text' 111 | }, 112 | { 113 | _key: '78f4405b6852', 114 | _type: 'block', 115 | children: [ 116 | { 117 | _key: '78f4405b68520', 118 | _type: 'span', 119 | marks: [], 120 | text: 'A query typically starts with ' 121 | }, 122 | { 123 | _key: '78f4405b68521', 124 | _type: 'span', 125 | marks: ['code'], 126 | text: '*' 127 | }, 128 | { 129 | _key: '78f4405b68522', 130 | _type: 'span', 131 | marks: [], 132 | text: 133 | '. This asterisk represents every document in your dataset. To do any useful work this is typically followed by a ' 134 | }, 135 | { 136 | _key: '78f4405b68523', 137 | _type: 'span', 138 | marks: ['em'], 139 | text: 'filter' 140 | }, 141 | { 142 | _key: '78f4405b68524', 143 | _type: 'span', 144 | marks: [], 145 | text: ' in brackets. The filter above has two terms:' 146 | } 147 | ], 148 | markDefs: [], 149 | style: 'normal' 150 | }, 151 | { 152 | _key: 'e9a260f5e979', 153 | _type: 'block', 154 | children: [ 155 | { 156 | _key: 'e9a260f5e9790', 157 | _type: 'span', 158 | marks: [], 159 | text: 'The filter' 160 | } 161 | ], 162 | markDefs: [], 163 | style: 'h3' 164 | }, 165 | { 166 | _key: '8de2f515c7c7', 167 | _type: 'block', 168 | children: [ 169 | { 170 | _key: '8de2f515c7c70', 171 | _type: 'span', 172 | marks: [], 173 | text: 174 | 'First we filter by document type. Every document in Sanity is required to have a type, and the type is always in the ' 175 | }, 176 | { 177 | _key: '8de2f515c7c71', 178 | _type: 'span', 179 | marks: ['code'], 180 | text: '_type' 181 | }, 182 | { 183 | _key: '8de2f515c7c72', 184 | _type: 'span', 185 | marks: [], 186 | text: 187 | ' field. (We prefix any Sanity-specific fields with an underscore in an attempt to avoid clashing with any of ' 188 | }, 189 | { 190 | _key: '8de2f515c7c73', 191 | _type: 'span', 192 | marks: ['em'], 193 | text: 'your ' 194 | }, 195 | { 196 | _key: '8de2f515c7c74', 197 | _type: 'span', 198 | marks: [], 199 | text: 'field names.) So ' 200 | }, 201 | { 202 | _key: '8de2f515c7c75', 203 | _type: 'span', 204 | marks: ['code'], 205 | text: "_type == 'movie'" 206 | }, 207 | { 208 | _key: '8de2f515c7c76', 209 | _type: 'span', 210 | marks: [], 211 | text: ' limits this query to documents of the type ‘movie’. ' 212 | }, 213 | { 214 | _key: '8de2f515c7c77', 215 | _type: 'span', 216 | marks: ['code'], 217 | text: '&&' 218 | }, 219 | { 220 | _key: '8de2f515c7c78', 221 | _type: 'span', 222 | marks: [], 223 | text: ' is the operator “and” of course.' 224 | } 225 | ], 226 | markDefs: [], 227 | style: 'normal' 228 | }, 229 | { 230 | _key: 'c9e568845521', 231 | _type: 'block', 232 | children: [ 233 | { 234 | _key: 'c9e5688455210', 235 | _type: 'span', 236 | marks: [], 237 | text: 'The second term ' 238 | }, 239 | { 240 | _key: 'c9e5688455211', 241 | _type: 'span', 242 | marks: ['code'], 243 | text: 'releaseYear > 1979' 244 | }, 245 | { 246 | _key: 'c9e5688455212', 247 | _type: 'span', 248 | marks: [], 249 | text: ' assumes that the movies have a field called ' 250 | }, 251 | { 252 | _key: 'c9e5688455213', 253 | _type: 'span', 254 | marks: ['code'], 255 | text: 'releaseYear' 256 | }, 257 | { 258 | _key: 'c9e5688455214', 259 | _type: 'span', 260 | marks: [], 261 | text: 262 | ' contain numbers. It will match any document where this number is larger than or equal to 1979.' 263 | } 264 | ], 265 | markDefs: [], 266 | style: 'normal' 267 | }, 268 | { 269 | _key: '727ecb6f5e15', 270 | _type: 'block', 271 | children: [ 272 | { 273 | _key: '727ecb6f5e150', 274 | _type: 'span', 275 | marks: [], 276 | text: 'Projections' 277 | } 278 | ], 279 | markDefs: [], 280 | style: 'h3' 281 | }, 282 | { 283 | _key: 'af716e064e70', 284 | _type: 'block', 285 | children: [ 286 | { 287 | _key: 'af716e064e700', 288 | _type: 'span', 289 | marks: [], 290 | text: 291 | 'So if we run this query, the result will be an array containing all movies from the year 1979 onwards in the dataset. Nice! However in a typical application movies might be huge documents containing information on actors, staff, posters, tag-lines, show-times, ratings and what not. If our goal is to render a list of movies in an overview, we are wasting bandwidth. ' 292 | }, 293 | { 294 | _key: 'af716e064e701', 295 | _type: 'span', 296 | marks: ['em'], 297 | text: 'Projections' 298 | }, 299 | { 300 | _key: 'af716e064e702', 301 | _type: 'span', 302 | marks: [], 303 | text: ' to the rescue.' 304 | } 305 | ], 306 | markDefs: [], 307 | style: 'normal' 308 | }, 309 | { 310 | _key: '0cad788a7fbc', 311 | _type: 'block', 312 | children: [ 313 | { 314 | _key: '0cad788a7fbc0', 315 | _type: 'span', 316 | marks: [], 317 | text: 318 | 'The typical projection is wrapped in braces and describe the data we want to see for each movie. A nice and simple projection for this query would give us the id, title and release year for each movie. It could look like this: ' 319 | }, 320 | { 321 | _key: '0cad788a7fbc1', 322 | _type: 'span', 323 | marks: ['code'], 324 | text: '{_id, title, releaseYear}' 325 | }, 326 | { 327 | _key: '0cad788a7fbc2', 328 | _type: 'span', 329 | marks: [], 330 | text: '. Putting it all together:' 331 | } 332 | ], 333 | markDefs: [], 334 | style: 'normal' 335 | }, 336 | { 337 | _key: 'd3f644ea34db', 338 | _type: 'code', 339 | code: "*[_type == 'movie' && releaseYear >= 1979]{ _id, title, releaseYear } ", 340 | language: 'text' 341 | }, 342 | { 343 | _key: '935c175cf329', 344 | _type: 'block', 345 | children: [ 346 | { 347 | _key: '935c175cf3290', 348 | _type: 'span', 349 | marks: [], 350 | text: 'Basic sorting' 351 | } 352 | ], 353 | markDefs: [], 354 | style: 'h3' 355 | }, 356 | { 357 | _key: '1bed3aca2a6b', 358 | _type: 'block', 359 | children: [ 360 | { 361 | _key: '1bed3aca2a6b0', 362 | _type: 'span', 363 | marks: [], 364 | text: 365 | 'Now there is another problem. Our movies appear in some unspecified order. Let’s say we want to sort our movies by year. For this we use the ' 366 | }, 367 | { 368 | _key: '1bed3aca2a6b1', 369 | _type: 'span', 370 | marks: ['code'], 371 | text: 'order' 372 | }, 373 | { 374 | _key: '1bed3aca2a6b2', 375 | _type: 'span', 376 | marks: [], 377 | text: 378 | '-function. Order takes a number of fields and sort directions and orders your documents accordingly. We wanted to sort our movies by ' 379 | }, 380 | { 381 | _key: '1bed3aca2a6b3', 382 | _type: 'span', 383 | marks: ['code'], 384 | text: 'releaseYear' 385 | }, 386 | { 387 | _key: '1bed3aca2a6b4', 388 | _type: 'span', 389 | marks: [], 390 | text: '. This is easily accomplished with ' 391 | }, 392 | { 393 | _key: '1bed3aca2a6b5', 394 | _type: 'span', 395 | marks: ['code'], 396 | text: 'order(releaseYear)' 397 | }, 398 | { 399 | _key: '1bed3aca2a6b6', 400 | _type: 'span', 401 | marks: [], 402 | text: ', like this:' 403 | } 404 | ], 405 | markDefs: [], 406 | style: 'normal' 407 | }, 408 | { 409 | _key: '2b049ab11490', 410 | _type: 'code', 411 | code: 412 | "*[_type == 'movie' && releaseYear >= 1979] | order(releaseYear) {\n _id, title, releaseYear \n} \n", 413 | language: 'text' 414 | }, 415 | { 416 | _key: 'bd4ad9601345', 417 | _type: 'block', 418 | children: [ 419 | { 420 | _key: 'bd4ad96013450', 421 | _type: 'span', 422 | marks: ['em'], 423 | text: '(We need the ' 424 | }, 425 | { 426 | _key: 'bd4ad96013451', 427 | _type: 'span', 428 | marks: ['code', 'em'], 429 | text: '|' 430 | }, 431 | { 432 | _key: 'bd4ad96013452', 433 | _type: 'span', 434 | marks: ['em'], 435 | text: ' operator here in front of the ' 436 | }, 437 | { 438 | _key: 'bd4ad96013453', 439 | _type: 'span', 440 | marks: ['code', 'em'], 441 | text: 'order()' 442 | }, 443 | { 444 | _key: 'bd4ad96013454', 445 | _type: 'span', 446 | marks: ['em'], 447 | text: "-function, we'll discuss that more later.)" 448 | } 449 | ], 450 | markDefs: [], 451 | style: 'normal' 452 | }, 453 | { 454 | _key: '82053d9206d2', 455 | _type: 'block', 456 | children: [ 457 | { 458 | _key: '82053d9206d20', 459 | _type: 'span', 460 | marks: [], 461 | text: 462 | 'We think of GROQ statements as describing a data flow from left to right. First everything (' 463 | }, 464 | { 465 | _key: '82053d9206d21', 466 | _type: 'span', 467 | marks: ['code'], 468 | text: '*' 469 | }, 470 | { 471 | _key: '82053d9206d22', 472 | _type: 'span', 473 | marks: [], 474 | text: ') flows through the filter' 475 | }, 476 | { 477 | _key: '82053d9206d23', 478 | _type: 'span', 479 | marks: ['code'], 480 | text: " [_type == 'movie' && …]" 481 | }, 482 | { 483 | _key: '82053d9206d24', 484 | _type: 'span', 485 | marks: [], 486 | text: ', then all those movies flow through the ' 487 | }, 488 | { 489 | _key: '82053d9206d25', 490 | _type: 'span', 491 | marks: ['code'], 492 | text: 'order()' 493 | }, 494 | { 495 | _key: '82053d9206d26', 496 | _type: 'span', 497 | marks: [], 498 | text: '-function which are then all mapped through the projection ' 499 | }, 500 | { 501 | _key: '82053d9206d27', 502 | _type: 'span', 503 | marks: ['code'], 504 | text: '{_id, title, ...}' 505 | }, 506 | { 507 | _key: '82053d9206d28', 508 | _type: 'span', 509 | marks: [], 510 | text: ' which picks out the bits we want returned.' 511 | } 512 | ], 513 | markDefs: [], 514 | style: 'normal' 515 | }, 516 | { 517 | _key: '35425e1bb232', 518 | _type: 'block', 519 | children: [ 520 | { 521 | _key: '35425e1bb2320', 522 | _type: 'span', 523 | marks: [], 524 | text: 525 | 'The order-function accepts a list of fields, and optionally you can specify the sort direction for each field. If you wanted to sort the movies by year, and then within each year we want them alphabetical by title, we could use this ordering: ' 526 | }, 527 | { 528 | _key: '35425e1bb2321', 529 | _type: 'span', 530 | marks: ['code'], 531 | text: 'order(releaseYear, title)' 532 | }, 533 | { 534 | _key: '35425e1bb2322', 535 | _type: 'span', 536 | marks: [], 537 | text: 538 | ' And if we wanted the newest movies first, we could reverse the direction like this: ' 539 | }, 540 | { 541 | _key: '35425e1bb2323', 542 | _type: 'span', 543 | marks: ['code'], 544 | text: 'order(releaseYear desc, title)' 545 | }, 546 | { 547 | _key: '35425e1bb2324', 548 | _type: 'span', 549 | marks: [], 550 | text: '.' 551 | } 552 | ], 553 | markDefs: [], 554 | style: 'normal' 555 | }, 556 | { 557 | _key: '76e0c1d8aa92', 558 | _type: 'block', 559 | children: [ 560 | { 561 | _key: '76e0c1d8aa920', 562 | _type: 'span', 563 | marks: [], 564 | text: '(' 565 | }, 566 | { 567 | _key: '76e0c1d8aa921', 568 | _type: 'span', 569 | marks: ['code'], 570 | text: 'asc' 571 | }, 572 | { 573 | _key: '76e0c1d8aa922', 574 | _type: 'span', 575 | marks: [], 576 | text: ' means “ascending” and ' 577 | }, 578 | { 579 | _key: '76e0c1d8aa923', 580 | _type: 'span', 581 | marks: ['code'], 582 | text: 'desc' 583 | }, 584 | { 585 | _key: '76e0c1d8aa924', 586 | _type: 'span', 587 | marks: [], 588 | text: 589 | ' means descending in this context. If you leave out the sort-direction, Sanity will assume you want the ascending order.)' 590 | } 591 | ], 592 | markDefs: [], 593 | style: 'normal' 594 | }, 595 | { 596 | _key: '16b34b599b2f', 597 | _type: 'block', 598 | children: [ 599 | { 600 | _key: '16b34b599b2f0', 601 | _type: 'span', 602 | marks: [], 603 | text: 'Slicing the result set' 604 | } 605 | ], 606 | markDefs: [], 607 | style: 'h3' 608 | }, 609 | { 610 | _key: 'f40a6c179299', 611 | _type: 'block', 612 | children: [ 613 | { 614 | _key: 'f40a6c1792990', 615 | _type: 'span', 616 | marks: [], 617 | text: 618 | 'This brings us to our final problem for this query: There are many movies in the world. Maybe our dataset contains tens of thousands. We need a way to describe which slice of that list we want to show. This is done using a ' 619 | }, 620 | { 621 | _key: 'f40a6c1792991', 622 | _type: 'span', 623 | marks: ['em'], 624 | text: 'selector' 625 | }, 626 | { 627 | _key: 'f40a6c1792992', 628 | _type: 'span', 629 | marks: [], 630 | text: '. Let’s say we just wanted the first movie, we could add a ' 631 | }, 632 | { 633 | _key: 'f40a6c1792993', 634 | _type: 'span', 635 | marks: ['code'], 636 | text: '[0]' 637 | }, 638 | { 639 | _key: 'f40a6c1792994', 640 | _type: 'span', 641 | marks: [], 642 | text: 643 | ' at the end. This works exactly like an array accessor and would return only the first element. If we want a slice, we can add the range operator like this: ' 644 | }, 645 | { 646 | _key: 'f40a6c1792995', 647 | _type: 'span', 648 | marks: ['code'], 649 | text: '[0...100]' 650 | }, 651 | { 652 | _key: 'f40a6c1792996', 653 | _type: 'span', 654 | marks: [], 655 | text: 656 | ' this would return the first hundred movies from index 0 through 99. Obviously we can just as well ask for ' 657 | }, 658 | { 659 | _key: 'f40a6c1792997', 660 | _type: 'span', 661 | marks: ['code'], 662 | text: '[1023...1048] ' 663 | }, 664 | { 665 | _key: 'f40a6c1792998', 666 | _type: 'span', 667 | marks: [], 668 | text: 669 | 'or any other slice we desire. So there we are, our first basic query with filtering, ordering, projections and selector:' 670 | } 671 | ], 672 | markDefs: [], 673 | style: 'normal' 674 | }, 675 | { 676 | _key: '752f6a5eb13d', 677 | _type: 'code', 678 | code: 679 | "*[_type == 'movie' && releaseYear >= 1979] | order(releaseYear) {\n _id, title, releaseYear\n}[0...100]\n", 680 | language: 'text' 681 | }, 682 | { 683 | _key: 'db43dfd18d7d', 684 | _type: 'block', 685 | children: [ 686 | { 687 | _key: 'db43dfd18d7d0', 688 | _type: 'span', 689 | marks: [], 690 | text: 'References and joins' 691 | } 692 | ], 693 | markDefs: [], 694 | style: 'h3' 695 | }, 696 | { 697 | _key: '6526f7185fda', 698 | _type: 'block', 699 | children: [ 700 | { 701 | _key: '6526f7185fda0', 702 | _type: 'span', 703 | marks: [], 704 | text: 705 | 'A reference in Sanity is a link from one document to another. Standard references are “hard” meaning when a document references another document, the target document ' 706 | }, 707 | { 708 | _key: '6526f7185fda1', 709 | _type: 'span', 710 | marks: ['em'], 711 | text: 'must' 712 | }, 713 | { 714 | _key: '6526f7185fda2', 715 | _type: 'span', 716 | marks: [], 717 | text: 718 | ' exist, and is actually prevented from being deleted until the reference is removed. (There is also weak-references that do not "hold on to" the target. You make them by adding a ' 719 | }, 720 | { 721 | _key: '6526f7185fda3', 722 | _type: 'span', 723 | marks: ['code'], 724 | text: '_weak' 725 | }, 726 | { 727 | _key: '6526f7185fda4', 728 | _type: 'span', 729 | marks: [], 730 | text: '-key to the reference object like this: ' 731 | }, 732 | { 733 | _key: '6526f7185fda5', 734 | _type: 'span', 735 | marks: ['code'], 736 | text: '{_ref: "", _weak: true}' 737 | }, 738 | { 739 | _key: '6526f7185fda6', 740 | _type: 'span', 741 | marks: [], 742 | text: ')' 743 | } 744 | ], 745 | markDefs: [], 746 | style: 'normal' 747 | }, 748 | { 749 | _key: '750dfade8a31', 750 | _type: 'block', 751 | children: [ 752 | { 753 | _key: '750dfade8a310', 754 | _type: 'span', 755 | marks: [], 756 | text: 'Let’s say we have “person”-type documents that looks something like this:' 757 | } 758 | ], 759 | markDefs: [], 760 | style: 'normal' 761 | }, 762 | { 763 | _key: '3469639b0249', 764 | _type: 'code', 765 | code: '{\n _id: "ridley-scott",\n _type: "person",\n name: "Ridley Scott"\n}\n', 766 | language: 'javascript' 767 | }, 768 | { 769 | _key: '8dac9131e52e', 770 | _type: 'block', 771 | children: [ 772 | { 773 | _key: '8dac9131e52e0', 774 | _type: 'span', 775 | marks: [], 776 | text: 'Keeping it simple, maybe our movies had a field ' 777 | }, 778 | { 779 | _key: '8dac9131e52e1', 780 | _type: 'span', 781 | marks: ['code'], 782 | text: 'director' 783 | }, 784 | { 785 | _key: '8dac9131e52e2', 786 | _type: 'span', 787 | marks: [], 788 | text: ' that contained a reference to a person. It could look something like this:' 789 | } 790 | ], 791 | markDefs: [], 792 | style: 'normal' 793 | }, 794 | { 795 | _key: '26056a399708', 796 | _type: 'code', 797 | code: 798 | '{\n _id: "alien",\n _type: "movie",\n title: "Alien",\n releaseYear: 1979,\n director: { _ref: "ridley-scott" }\n}', 799 | language: 'javascript' 800 | }, 801 | { 802 | _key: 'b7ec9e433bec', 803 | _type: 'block', 804 | children: [ 805 | { 806 | _key: 'b7ec9e433bec0', 807 | _type: 'span', 808 | marks: [], 809 | text: 810 | 'Remember Sanity-specific fields are prefixed with an underscore, and an object containing a _ref key appearing anywhere in the document becomes a hard reference.' 811 | } 812 | ], 813 | markDefs: [], 814 | style: 'normal' 815 | }, 816 | { 817 | _key: '8ca3cefc3a31', 818 | _type: 'block', 819 | children: [ 820 | { 821 | _key: '8ca3cefc3a310', 822 | _type: 'span', 823 | marks: [], 824 | text: 'Expanding references' 825 | } 826 | ], 827 | markDefs: [], 828 | style: 'h3' 829 | }, 830 | { 831 | _key: '362d6b2a8c7b', 832 | _type: 'block', 833 | children: [ 834 | { 835 | _key: '362d6b2a8c7b0', 836 | _type: 'span', 837 | marks: [], 838 | text: 839 | 'Now we can do a number of useful things with this reference. The most basic thing is expanding the reference in place. Let’s revisit our movie-queries from the introduction.' 840 | } 841 | ], 842 | markDefs: [], 843 | style: 'normal' 844 | }, 845 | { 846 | _key: '49b4ad74a05f', 847 | _type: 'code', 848 | code: "*[_type == 'movie' && releaseYear >= 1979]{\n _id, title, releaseYear\n}\n", 849 | language: 'text' 850 | }, 851 | { 852 | _key: 'fbbcb1db0cb9', 853 | _type: 'block', 854 | children: [ 855 | { 856 | _key: 'fbbcb1db0cb90', 857 | _type: 'span', 858 | marks: [], 859 | text: 860 | "Let’s say we wanted to include the director in the returned result. If we didn't know any better, we'd perhaps try something like this:" 861 | } 862 | ], 863 | markDefs: [], 864 | style: 'normal' 865 | }, 866 | { 867 | _key: '708da0561b32', 868 | _type: 'code', 869 | code: 870 | "*[_type == 'movie' && releaseYear >= 1979]{\n _id, title, releaseYear,\n director\n}\n", 871 | highlightedLines: [3], 872 | language: 'text' 873 | }, 874 | { 875 | _key: '2a6eee5406f0', 876 | _type: 'block', 877 | children: [ 878 | { 879 | _key: '2a6eee5406f00', 880 | _type: 'span', 881 | marks: [], 882 | text: 883 | 'But if we just naïvely include the director in like this, we will just get whatever is in the director field on this document, which is the literal reference description:' 884 | } 885 | ], 886 | markDefs: [], 887 | style: 'normal' 888 | }, 889 | { 890 | _key: '675b1e1fd34c', 891 | _type: 'code', 892 | code: 893 | '[\n {\n _id: "alien",\n title: "Alien",\n releaseYear: "1979",\n director: {\n _ref: "ridley-scott"\n }\n },\n … (more movies)\n]\n\n', 894 | highlightedLines: [7, 8, 6], 895 | language: 'javascript' 896 | }, 897 | { 898 | _key: '0555bca6cec9', 899 | _type: 'block', 900 | children: [ 901 | { 902 | _key: '0555bca6cec90', 903 | _type: 'span', 904 | marks: [], 905 | text: 906 | 'This is obviously not what we wanted, we wanted to follow that reference! By adding the dereferencing operator ' 907 | }, 908 | { 909 | _key: '0555bca6cec91', 910 | _type: 'span', 911 | marks: ['code'], 912 | text: '->' 913 | }, 914 | { 915 | _key: '0555bca6cec92', 916 | _type: 'span', 917 | marks: [], 918 | text: 919 | ' we ask Sanity to follow the reference and replace the it with the actual content of the document referenced:' 920 | } 921 | ], 922 | markDefs: [], 923 | style: 'normal' 924 | }, 925 | { 926 | _key: '47dccf2d291b', 927 | _type: 'code', 928 | code: 929 | "*[_type == 'movie' && releaseYear >= 1979]{\n _id, title, releaseYear,\n director->\n}\n", 930 | highlightedLines: [3], 931 | language: 'text' 932 | }, 933 | { 934 | _key: 'e922324ab678', 935 | _type: 'block', 936 | children: [ 937 | { 938 | _key: 'e922324ab6780', 939 | _type: 'span', 940 | marks: [], 941 | text: 'Now, this is useful. We’d get something like this:' 942 | } 943 | ], 944 | markDefs: [], 945 | style: 'normal' 946 | }, 947 | { 948 | _key: 'd51dfeeecd1c', 949 | _type: 'code', 950 | code: 951 | '[\n {\n _id: "alien",\n title: "Alien",\n releaseYear: "1979",\n director: {\n _id: "ridley-scott",\n _type: "person",\n name: "Ridley Scott"\n }\n },\n … (more movies)\n]\n\n', 952 | highlightedLines: [7, 8, 9, 10, 6], 953 | language: 'javascript' 954 | }, 955 | { 956 | _key: 'c53b42035abb', 957 | _type: 'block', 958 | children: [ 959 | { 960 | _key: 'c53b42035abb0', 961 | _type: 'span', 962 | marks: [], 963 | text: 964 | 'Then maybe we didn’t want all that metadata with our director? We can add a separate projection for our director:' 965 | } 966 | ], 967 | markDefs: [], 968 | style: 'normal' 969 | }, 970 | { 971 | _key: 'ca589ec52606', 972 | _type: 'code', 973 | code: 974 | "*[_type == 'movie' && releaseYear >= 1979]{\n _id, title, releaseYear,\n director->{name}\n}\n", 975 | highlightedLines: [3], 976 | language: 'text' 977 | }, 978 | { 979 | _key: 'd0d02703116d', 980 | _type: 'block', 981 | children: [ 982 | { 983 | _key: 'd0d02703116d0', 984 | _type: 'span', 985 | marks: [], 986 | text: 'Our Alien-movie now looks neat like this:' 987 | } 988 | ], 989 | markDefs: [], 990 | style: 'normal' 991 | }, 992 | { 993 | _key: '075a19f72d17', 994 | _type: 'code', 995 | code: 996 | '{\n _id: "alien",\n title: "Alien",\n releaseYear: "1979",\n director: {\n name: "Ridley Scott"\n }\n}\n', 997 | highlightedLines: [6, 7, 5], 998 | language: 'javascript' 999 | }, 1000 | { 1001 | _key: 'd15dc9064cc0', 1002 | _type: 'block', 1003 | children: [ 1004 | { 1005 | _key: 'd15dc9064cc00', 1006 | _type: 'span', 1007 | marks: [], 1008 | text: 1009 | 'But we can do one better. We are not limited to the existing fields in the document in our projections, we can actually declare new fields. Let’s say we are building our compact movie list and we wanted just the title, year and director name. We can get minimal cruft by extracting just the name and putting it in a new field, like this:' 1010 | } 1011 | ], 1012 | markDefs: [], 1013 | style: 'normal' 1014 | }, 1015 | { 1016 | _key: '709f203619da', 1017 | _type: 'code', 1018 | code: 1019 | '*[_type == \'movie\' && releaseYear >= 1979]{\n _id, title, releaseYear,\n "directorName": director->name\n}\n', 1020 | highlightedLines: [3], 1021 | language: 'text' 1022 | }, 1023 | { 1024 | _key: '0e9533c1cb58', 1025 | _type: 'block', 1026 | children: [ 1027 | { 1028 | _key: '0e9533c1cb580', 1029 | _type: 'span', 1030 | marks: [], 1031 | text: 'Now our Alien movie record is super neat:' 1032 | } 1033 | ], 1034 | markDefs: [], 1035 | style: 'normal' 1036 | }, 1037 | { 1038 | _key: '1350ef054af7', 1039 | _type: 'code', 1040 | code: 1041 | '{\n _id: "alien",\n title: "Alien",\n releaseYear: "1979",\n directorName: "Ridley Scott"\n}\n', 1042 | highlightedLines: [5], 1043 | language: 'javascript' 1044 | }, 1045 | { 1046 | _key: '584ed2426ff5', 1047 | _type: 'block', 1048 | children: [ 1049 | { 1050 | _key: '584ed2426ff50', 1051 | _type: 'span', 1052 | marks: [], 1053 | text: 'Filtering by references' 1054 | } 1055 | ], 1056 | markDefs: [], 1057 | style: 'h3' 1058 | }, 1059 | { 1060 | _key: '8c36f553fb70', 1061 | _type: 'block', 1062 | children: [ 1063 | { 1064 | _key: '8c36f553fb700', 1065 | _type: 'span', 1066 | marks: [], 1067 | text: 'When dealing with references, we have a useful function called ' 1068 | }, 1069 | { 1070 | _key: '8c36f553fb701', 1071 | _type: 'span', 1072 | marks: ['code'], 1073 | text: 'references()' 1074 | }, 1075 | { 1076 | _key: '8c36f553fb702', 1077 | _type: 'span', 1078 | marks: [], 1079 | text: 1080 | ' which can be used in filters to select only documents that reference specific other documents. Let’s say we want to list every movie Ridley Scott has been involved in. It’s as simple as this:' 1081 | } 1082 | ], 1083 | markDefs: [], 1084 | style: 'normal' 1085 | }, 1086 | { 1087 | _key: '36302b6a223e', 1088 | _type: 'code', 1089 | code: "*[_type == 'movie' && references('ridley-scott')]", 1090 | language: 'text' 1091 | }, 1092 | { 1093 | _key: '52023b22ca05', 1094 | _type: 'block', 1095 | children: [ 1096 | { 1097 | _key: '52023b22ca050', 1098 | _type: 'span', 1099 | marks: [], 1100 | text: 'Our first join' 1101 | } 1102 | ], 1103 | markDefs: [], 1104 | style: 'h3' 1105 | }, 1106 | { 1107 | _key: '9c63d1e15cf9', 1108 | _type: 'block', 1109 | children: [ 1110 | { 1111 | _key: '9c63d1e15cf90', 1112 | _type: 'span', 1113 | marks: [], 1114 | text: 1115 | 'It is time to write our first proper join: Say we wanted to list people and include all the movies they were involved in? We’ll be querying the “person”-type documents, but in the projections for each person, we’ll ask for the movies they have been involved in. To do this we have to briefly cover the parent-operator ' 1116 | }, 1117 | { 1118 | _key: '9c63d1e15cf91', 1119 | _type: 'span', 1120 | marks: ['code'], 1121 | text: '^' 1122 | }, 1123 | { 1124 | _key: '9c63d1e15cf92', 1125 | _type: 'span', 1126 | marks: [], 1127 | text: '. Let’s look at the query first:' 1128 | } 1129 | ], 1130 | markDefs: [], 1131 | style: 'normal' 1132 | }, 1133 | { 1134 | _key: '7ed0c87251a8', 1135 | _type: 'code', 1136 | code: 1137 | '*[_type == "person"]{\n _id, name,\n "movies": *[_type == "movie" && references(^._id)].title\n}\n', 1138 | language: 'text' 1139 | }, 1140 | { 1141 | _key: '41bf6dd443f7', 1142 | _type: 'block', 1143 | children: [ 1144 | { 1145 | _key: '41bf6dd443f70', 1146 | _type: 'span', 1147 | marks: [], 1148 | text: 1149 | 'In a join, the parent operator is a way to reference the “parent” document. In this example the outer query for “person”-type documents fetches a bunch of people, and for each person it returns the ' 1150 | }, 1151 | { 1152 | _key: '41bf6dd443f71', 1153 | _type: 'span', 1154 | marks: ['code'], 1155 | text: '_id' 1156 | }, 1157 | { 1158 | _key: '41bf6dd443f72', 1159 | _type: 'span', 1160 | marks: [], 1161 | text: ' and ' 1162 | }, 1163 | { 1164 | _key: '41bf6dd443f73', 1165 | _type: 'span', 1166 | marks: ['code'], 1167 | text: 'name' 1168 | }, 1169 | { 1170 | _key: '41bf6dd443f74', 1171 | _type: 'span', 1172 | marks: [], 1173 | text: 1174 | '. Then we want to fetch the movies referencing that person. Now we declare the new field “movies” where we start a new query for “movie”-type documents, but for each person we want to limit our movie-query to movies referencing that person. To achieve this we need the _id of the person, but if we just wrote ' 1175 | }, 1176 | { 1177 | _key: '41bf6dd443f75', 1178 | _type: 'span', 1179 | marks: ['code'], 1180 | text: '_id' 1181 | }, 1182 | { 1183 | _key: '41bf6dd443f76', 1184 | _type: 'span', 1185 | marks: [], 1186 | text: 1187 | ' in the movies-query we’d reference the _id of the movie. To get to the fields of the person record we go “up” one level using the parent operator ' 1188 | }, 1189 | { 1190 | _key: '41bf6dd443f77', 1191 | _type: 'span', 1192 | marks: ['code'], 1193 | text: '^' 1194 | }, 1195 | { 1196 | _key: '41bf6dd443f78', 1197 | _type: 'span', 1198 | marks: [], 1199 | text: '. So ' 1200 | }, 1201 | { 1202 | _key: '41bf6dd443f79', 1203 | _type: 'span', 1204 | marks: ['code'], 1205 | text: '^' 1206 | }, 1207 | { 1208 | _key: '41bf6dd443f710', 1209 | _type: 'span', 1210 | marks: [], 1211 | text: ' means the specific “person”-document that our movie-query is about, and then ' 1212 | }, 1213 | { 1214 | _key: '41bf6dd443f711', 1215 | _type: 'span', 1216 | marks: ['code'], 1217 | text: '^._id' 1218 | }, 1219 | { 1220 | _key: '41bf6dd443f712', 1221 | _type: 'span', 1222 | marks: [], 1223 | text: ' is the _id of that person, just as ' 1224 | }, 1225 | { 1226 | _key: '41bf6dd443f713', 1227 | _type: 'span', 1228 | marks: ['code'], 1229 | text: '^.name' 1230 | }, 1231 | { 1232 | _key: '41bf6dd443f714', 1233 | _type: 'span', 1234 | marks: [], 1235 | text: ' would be her name. So when we say ' 1236 | }, 1237 | { 1238 | _key: '41bf6dd443f715', 1239 | _type: 'span', 1240 | marks: ['code'], 1241 | text: 'references(^._id)' 1242 | }, 1243 | { 1244 | _key: '41bf6dd443f716', 1245 | _type: 'span', 1246 | marks: [], 1247 | text: ' in the query above, we limit our movies to movies referencing the current person.' 1248 | } 1249 | ], 1250 | markDefs: [], 1251 | style: 'normal' 1252 | }, 1253 | { 1254 | _key: 'dd66cae5ed8f', 1255 | _type: 'block', 1256 | children: [ 1257 | { 1258 | _key: 'dd66cae5ed8f0', 1259 | _type: 'span', 1260 | marks: [], 1261 | text: 'Naked projections' 1262 | } 1263 | ], 1264 | markDefs: [], 1265 | style: 'h3' 1266 | }, 1267 | { 1268 | _key: '9cff648d5a1f', 1269 | _type: 'block', 1270 | children: [ 1271 | { 1272 | _key: '9cff648d5a1f0', 1273 | _type: 'span', 1274 | marks: [], 1275 | text: 1276 | 'There is one more new thing we haven’t talked about in this query. We could have written the movies-sub-query like this:' 1277 | } 1278 | ], 1279 | markDefs: [], 1280 | style: 'normal' 1281 | }, 1282 | { 1283 | _key: 'ae75769cc905', 1284 | _type: 'code', 1285 | code: '*[_type == "movie" && references(^._id)]{title}', 1286 | language: 'text' 1287 | }, 1288 | { 1289 | _key: '2e5bd1930851', 1290 | _type: 'block', 1291 | children: [ 1292 | { 1293 | _key: '2e5bd19308510', 1294 | _type: 'span', 1295 | marks: [], 1296 | text: 'Our list of movies would have looked something like this:' 1297 | } 1298 | ], 1299 | markDefs: [], 1300 | style: 'normal' 1301 | }, 1302 | { 1303 | _key: 'bebba90a2467', 1304 | _type: 'code', 1305 | code: '”movies”: [{title: “Alien”}, {title: “Blade Runner”}, …]', 1306 | language: 'javascript' 1307 | }, 1308 | { 1309 | _key: 'dfb7c6128dd7', 1310 | _type: 'block', 1311 | children: [ 1312 | { 1313 | _key: 'dfb7c6128dd70', 1314 | _type: 'span', 1315 | marks: [], 1316 | text: 1317 | 'Since we just wanted the titles, we can use a “naked projection”. By just naming the field we want, like this:' 1318 | } 1319 | ], 1320 | markDefs: [], 1321 | style: 'normal' 1322 | }, 1323 | { 1324 | _key: 'b598ba4e1bc8', 1325 | _type: 'code', 1326 | code: '*[_type == "movie" && references(^._id)].title ', 1327 | language: 'text' 1328 | }, 1329 | { 1330 | _key: '2d28c1d970c1', 1331 | _type: 'block', 1332 | children: [ 1333 | { 1334 | _key: '2d28c1d970c10', 1335 | _type: 'span', 1336 | marks: [], 1337 | text: 'We get a nice, simple array of values, like this:' 1338 | } 1339 | ], 1340 | markDefs: [], 1341 | style: 'normal' 1342 | }, 1343 | { 1344 | _key: '4004cd48e0db', 1345 | _type: 'code', 1346 | code: '”movies”: [“Alien”, “Blade Runner”, …]', 1347 | language: 'javascript' 1348 | }, 1349 | { 1350 | _key: '75145a6998f8', 1351 | _type: 'block', 1352 | children: [ 1353 | { 1354 | _key: '75145a6998f80', 1355 | _type: 'span', 1356 | marks: [], 1357 | text: 1358 | 'So, for completeness, the result of the full person w/movies query above could look something like this:' 1359 | } 1360 | ], 1361 | markDefs: [], 1362 | style: 'normal' 1363 | }, 1364 | { 1365 | _key: '923ccf2273cc', 1366 | _type: 'code', 1367 | code: 1368 | '[\n {\n _id: "river-phoenix",\n name: "River Phoenix",\n movies: ["My Own Private Idaho", "Stand By Me", …]\n },\n {\n _id: "ridley-scott",\n name: "Ridley Scott",\n movies: ["Alien", "Blade Runner", …]\n },\n …\n]\n', 1369 | language: 'javascript' 1370 | }, 1371 | { 1372 | _key: '99fd60e475f2', 1373 | _type: 'block', 1374 | children: [ 1375 | { 1376 | _key: '99fd60e475f20', 1377 | _type: 'span', 1378 | marks: [], 1379 | text: 'More ways to filter' 1380 | } 1381 | ], 1382 | markDefs: [], 1383 | style: 'h2' 1384 | }, 1385 | { 1386 | _key: '2c2edd160dfb', 1387 | _type: 'block', 1388 | children: [ 1389 | { 1390 | _key: '2c2edd160dfb0', 1391 | _type: 'span', 1392 | marks: [], 1393 | text: 1394 | 'Sanity supports a growing number of ways to filter your documents. We have shown simple attribute comparisions with ' 1395 | }, 1396 | { 1397 | _key: '2c2edd160dfb1', 1398 | _type: 'span', 1399 | marks: ['code'], 1400 | text: '_type == ‘movie’' 1401 | }, 1402 | { 1403 | _key: '2c2edd160dfb2', 1404 | _type: 'span', 1405 | marks: [], 1406 | text: ' and ' 1407 | }, 1408 | { 1409 | _key: '2c2edd160dfb3', 1410 | _type: 'span', 1411 | marks: ['code'], 1412 | text: 'releaseYear > 1979' 1413 | }, 1414 | { 1415 | _key: '2c2edd160dfb4', 1416 | _type: 'span', 1417 | marks: [], 1418 | text: '. We have shown filtering by references using the ' 1419 | }, 1420 | { 1421 | _key: '2c2edd160dfb5', 1422 | _type: 'span', 1423 | marks: ['code'], 1424 | text: 'references()' 1425 | }, 1426 | { 1427 | _key: '2c2edd160dfb6', 1428 | _type: 'span', 1429 | marks: [], 1430 | text: '-function. In addition we support:' 1431 | } 1432 | ], 1433 | markDefs: [], 1434 | style: 'normal' 1435 | }, 1436 | { 1437 | _key: '1d8187ad8f03', 1438 | _type: 'block', 1439 | children: [ 1440 | { 1441 | _key: '1d8187ad8f030', 1442 | _type: 'span', 1443 | marks: [], 1444 | text: 'Text search using the match operator, e.g. ' 1445 | }, 1446 | { 1447 | _key: '1d8187ad8f031', 1448 | _type: 'span', 1449 | marks: ['code'], 1450 | text: '*[title match "Alien*"]' 1451 | } 1452 | ], 1453 | listItem: 'bullet', 1454 | markDefs: [], 1455 | style: 'normal' 1456 | }, 1457 | { 1458 | _key: 'db0a34df9ee0', 1459 | _type: 'block', 1460 | children: [ 1461 | { 1462 | _key: 'db0a34df9ee00', 1463 | _type: 'span', 1464 | marks: [], 1465 | text: 'Filtering by the presence of a field, e.g. ' 1466 | }, 1467 | { 1468 | _key: 'db0a34df9ee01', 1469 | _type: 'span', 1470 | marks: ['code'], 1471 | text: '*[defined(status)]' 1472 | }, 1473 | { 1474 | _key: 'db0a34df9ee02', 1475 | _type: 'span', 1476 | marks: [], 1477 | text: ' which only match document that have the status property set to any value.' 1478 | } 1479 | ], 1480 | listItem: 'bullet', 1481 | markDefs: [], 1482 | style: 'normal' 1483 | }, 1484 | { 1485 | _key: 'b9721d670370', 1486 | _type: 'block', 1487 | children: [ 1488 | { 1489 | _key: 'b9721d6703700', 1490 | _type: 'span', 1491 | marks: [], 1492 | text: 'The ' 1493 | }, 1494 | { 1495 | _key: 'b9721d6703701', 1496 | _type: 'span', 1497 | marks: ['code'], 1498 | text: 'in' 1499 | }, 1500 | { 1501 | _key: 'b9721d6703702', 1502 | _type: 'span', 1503 | marks: [], 1504 | text: '-operator which matches values in arrays, as in ' 1505 | }, 1506 | { 1507 | _key: 'b9721d6703703', 1508 | _type: 'span', 1509 | marks: ['code'], 1510 | text: '*["sci-fi" in genres]' 1511 | }, 1512 | { 1513 | _key: 'b9721d6703704', 1514 | _type: 'span', 1515 | marks: [], 1516 | text: ', that matches all documents where ' 1517 | }, 1518 | { 1519 | _key: 'b9721d6703705', 1520 | _type: 'span', 1521 | marks: ['code'], 1522 | text: 'genres' 1523 | }, 1524 | { 1525 | _key: 'b9721d6703706', 1526 | _type: 'span', 1527 | marks: [], 1528 | text: ' is an array and that array contains the value ' 1529 | }, 1530 | { 1531 | _key: 'b9721d6703707', 1532 | _type: 'span', 1533 | marks: ['code'], 1534 | text: '"sci-fi"' 1535 | }, 1536 | { 1537 | _key: 'b9721d6703708', 1538 | _type: 'span', 1539 | marks: [], 1540 | text: '.' 1541 | } 1542 | ], 1543 | listItem: 'bullet', 1544 | markDefs: [], 1545 | style: 'normal' 1546 | }, 1547 | { 1548 | _key: 'c86c4450b10e', 1549 | _type: 'block', 1550 | children: [ 1551 | { 1552 | _key: 'c86c4450b10e0', 1553 | _type: 'span', 1554 | marks: [], 1555 | text: 'You can of course combine these filters using the boolean operators ' 1556 | }, 1557 | { 1558 | _key: 'c86c4450b10e1', 1559 | _type: 'span', 1560 | marks: ['code'], 1561 | text: '&&' 1562 | }, 1563 | { 1564 | _key: 'c86c4450b10e2', 1565 | _type: 'span', 1566 | marks: [], 1567 | text: ' (and), ' 1568 | }, 1569 | { 1570 | _key: 'c86c4450b10e3', 1571 | _type: 'span', 1572 | marks: ['code'], 1573 | text: '|| ' 1574 | }, 1575 | { 1576 | _key: 'c86c4450b10e4', 1577 | _type: 'span', 1578 | marks: [], 1579 | text: '(or), ' 1580 | }, 1581 | { 1582 | _key: 'c86c4450b10e5', 1583 | _type: 'span', 1584 | marks: ['code'], 1585 | text: '!' 1586 | }, 1587 | { 1588 | _key: 'c86c4450b10e6', 1589 | _type: 'span', 1590 | marks: [], 1591 | text: ' (not), like this ' 1592 | }, 1593 | { 1594 | _key: 'c86c4450b10e7', 1595 | _type: 'span', 1596 | marks: ['code'], 1597 | text: '*[_type == "movie" && (!("sci-fi" in genres) || releaseYear > 1979)]' 1598 | }, 1599 | { 1600 | _key: 'c86c4450b10e8', 1601 | _type: 'span', 1602 | marks: [], 1603 | text: '.' 1604 | } 1605 | ], 1606 | listItem: 'bullet', 1607 | markDefs: [], 1608 | style: 'normal' 1609 | }, 1610 | { 1611 | _key: 'bead2408ec96', 1612 | _type: 'block', 1613 | children: [ 1614 | { 1615 | _key: 'bead2408ec960', 1616 | _type: 'span', 1617 | marks: [], 1618 | text: 1619 | "We are working on a full reference for the GROQ feature set. In the mean time you'll find a comprehensive set of examples in the " 1620 | }, 1621 | { 1622 | _key: 'bead2408ec961', 1623 | _type: 'span', 1624 | marks: ['eb9019d3'], 1625 | text: 'cheat sheet' 1626 | }, 1627 | { 1628 | _key: 'bead2408ec962', 1629 | _type: 'span', 1630 | marks: [], 1631 | text: '.' 1632 | } 1633 | ], 1634 | markDefs: [ 1635 | { 1636 | _key: 'eb9019d3', 1637 | _ref: '81b839a4-2fc1-4769-941a-ec4de9276492', 1638 | _type: 'internalLink' 1639 | } 1640 | ], 1641 | style: 'normal' 1642 | }, 1643 | { 1644 | _key: '0fbcc468c3af', 1645 | _type: 'block', 1646 | children: [ 1647 | { 1648 | _key: '0fbcc468c3af0', 1649 | _type: 'span', 1650 | marks: [], 1651 | text: 'Queries in projections' 1652 | } 1653 | ], 1654 | markDefs: [], 1655 | style: 'h2' 1656 | }, 1657 | { 1658 | _key: 'c6eb29a8b6ce', 1659 | _type: 'block', 1660 | children: [ 1661 | { 1662 | _key: 'c6eb29a8b6ce0', 1663 | _type: 'span', 1664 | marks: [], 1665 | text: 1666 | 'A useful thing in GROQ is that filtering and projections also can be used inside your projections. Let’s say you work for an architect and every project has a number of milestones. A document might look something like this:' 1667 | } 1668 | ], 1669 | markDefs: [], 1670 | style: 'normal' 1671 | }, 1672 | { 1673 | _key: '21f09ac51a7e', 1674 | _type: 'code', 1675 | code: 1676 | '{\n _id: "timmerhuis"\n _type: "project",\n title: "Timmerhuis",\n milestones: [\n {status: "competition", year: 2009},\n {status: "design-development", year: 2011},\n {status: "breaking-ground", year: 2013},\n {status: "completed", year: 2015}\n ]\n}\n', 1677 | language: 'javascript' 1678 | }, 1679 | { 1680 | _key: 'd4977454af38', 1681 | _type: 'block', 1682 | children: [ 1683 | { 1684 | _key: 'd4977454af380', 1685 | _type: 'span', 1686 | marks: [], 1687 | text: 1688 | 'And let’s say the view we are producing is about showing the current status of the project. We could achieve this by finding the latest milestone and extracting its status tag. This can be done in GROQ like this:' 1689 | } 1690 | ], 1691 | markDefs: [], 1692 | style: 'normal' 1693 | }, 1694 | { 1695 | _key: '4b995002ec5b', 1696 | _type: 'code', 1697 | code: 1698 | '*[_type == "project"]{\n _id, title,\n "status": milestones|order(year desc)[0].status\n}\n', 1699 | language: 'text' 1700 | }, 1701 | { 1702 | _key: 'd5c04ea46176', 1703 | _type: 'block', 1704 | children: [ 1705 | { 1706 | _key: 'd5c04ea461760', 1707 | _type: 'span', 1708 | marks: [], 1709 | text: 'Let’s pick apart the status query ' 1710 | }, 1711 | { 1712 | _key: 'd5c04ea461761', 1713 | _type: 'span', 1714 | marks: ['code'], 1715 | text: 'milestones|order(year desc)[0].status' 1716 | }, 1717 | { 1718 | _key: 'd5c04ea461762', 1719 | _type: 'span', 1720 | marks: [], 1721 | text: ' in some detail:' 1722 | } 1723 | ], 1724 | markDefs: [], 1725 | style: 'normal' 1726 | }, 1727 | { 1728 | _key: 'ed2e73a57e2f', 1729 | _type: 'block', 1730 | children: [ 1731 | { 1732 | _key: 'ed2e73a57e2f0', 1733 | _type: 'span', 1734 | marks: [], 1735 | text: 'First we take the field ' 1736 | }, 1737 | { 1738 | _key: 'ed2e73a57e2f1', 1739 | _type: 'span', 1740 | marks: ['code'], 1741 | text: 'milestones' 1742 | }, 1743 | { 1744 | _key: 'ed2e73a57e2f2', 1745 | _type: 'span', 1746 | marks: [], 1747 | text: 1748 | ' which contain the (potentially unordered) list of milestones for the project. Using the pipe-operator ' 1749 | }, 1750 | { 1751 | _key: 'ed2e73a57e2f3', 1752 | _type: 'span', 1753 | marks: ['code'], 1754 | text: '|' 1755 | }, 1756 | { 1757 | _key: 'ed2e73a57e2f4', 1758 | _type: 'span', 1759 | marks: [], 1760 | text: 1761 | ' we send the contents of this array to the order function with is instructed to sort the array by year in descending order ' 1762 | }, 1763 | { 1764 | _key: 'ed2e73a57e2f5', 1765 | _type: 'span', 1766 | marks: ['code'], 1767 | text: 'order(year desc)' 1768 | }, 1769 | { 1770 | _key: 'ed2e73a57e2f6', 1771 | _type: 'span', 1772 | marks: [], 1773 | text: '. Then We take only the first element ' 1774 | }, 1775 | { 1776 | _key: 'ed2e73a57e2f7', 1777 | _type: 'span', 1778 | marks: ['code'], 1779 | text: '[0]' 1780 | }, 1781 | { 1782 | _key: 'ed2e73a57e2f8', 1783 | _type: 'span', 1784 | marks: [], 1785 | text: ' (which is the latest milestone) and return the value of it’s ' 1786 | }, 1787 | { 1788 | _key: 'ed2e73a57e2f9', 1789 | _type: 'span', 1790 | marks: ['code'], 1791 | text: 'status' 1792 | }, 1793 | { 1794 | _key: 'ed2e73a57e2f10', 1795 | _type: 'span', 1796 | marks: [], 1797 | text: ' field. So now our project list would look something like this:' 1798 | } 1799 | ], 1800 | markDefs: [], 1801 | style: 'normal' 1802 | }, 1803 | { 1804 | _key: '72378ea2e966', 1805 | _type: 'code', 1806 | code: 1807 | '[\n {\n _id: "timmerhuis",\n title: "Timmerhuis",\n status: "completed"\n },\n …\n]\n', 1808 | language: 'javascript' 1809 | }, 1810 | { 1811 | _key: '34af8a5dec6a', 1812 | _type: 'block', 1813 | children: [ 1814 | { 1815 | _key: '34af8a5dec6a0', 1816 | _type: 'span', 1817 | marks: [], 1818 | text: 1819 | 'Let’s try another clever trick querying the contents of this object. Instead of a status field, we just want a boolean flag telling whether the project is completed. We could achieve this like this:' 1820 | } 1821 | ], 1822 | markDefs: [], 1823 | style: 'normal' 1824 | }, 1825 | { 1826 | _key: '23bbc19f7ad3', 1827 | _type: 'code', 1828 | code: 1829 | '*[_type == "project"]{\n _id, title,\n "completed": count(milestones[status == \'completed\']) > 0\n}\n', 1830 | language: 'text' 1831 | }, 1832 | { 1833 | _key: 'a0e5578b1732', 1834 | _type: 'block', 1835 | children: [ 1836 | { 1837 | _key: 'a0e5578b17320', 1838 | _type: 'span', 1839 | marks: [], 1840 | text: 1841 | 'Here we take the milestones, but select only the ones having the status “completed”. Then we ' 1842 | }, 1843 | { 1844 | _key: 'a0e5578b17321', 1845 | _type: 'span', 1846 | marks: ['code'], 1847 | text: 'count()' 1848 | }, 1849 | { 1850 | _key: 'a0e5578b17322', 1851 | _type: 'span', 1852 | marks: [], 1853 | text: ' the number of milestones matching this constraint. If that count is ' 1854 | }, 1855 | { 1856 | _key: 'a0e5578b17323', 1857 | _type: 'span', 1858 | marks: ['code'], 1859 | text: '> 0' 1860 | }, 1861 | { 1862 | _key: 'a0e5578b17324', 1863 | _type: 'span', 1864 | marks: [], 1865 | text: ' the result is ' 1866 | }, 1867 | { 1868 | _key: 'a0e5578b17325', 1869 | _type: 'span', 1870 | marks: ['code'], 1871 | text: 'true' 1872 | }, 1873 | { 1874 | _key: 'a0e5578b17326', 1875 | _type: 'span', 1876 | marks: [], 1877 | text: '. So now our result would look something like this:' 1878 | } 1879 | ], 1880 | markDefs: [], 1881 | style: 'normal' 1882 | }, 1883 | { 1884 | _key: '75883e8ac28b', 1885 | _type: 'code', 1886 | code: 1887 | '[\n {\n _id: "timmerhuis",\n title: "Timmerhuis",\n completed: true\n },\n …\n]\n', 1888 | language: 'javascript' 1889 | }, 1890 | { 1891 | _key: '6591a17c0bf9', 1892 | _type: 'block', 1893 | children: [ 1894 | { 1895 | _key: '6591a17c0bf90', 1896 | _type: 'span', 1897 | marks: [], 1898 | text: 'Some comments on the pipe-operator' 1899 | } 1900 | ], 1901 | markDefs: [], 1902 | style: 'h2' 1903 | }, 1904 | { 1905 | _key: 'c332f5858e91', 1906 | _type: 'block', 1907 | children: [ 1908 | { 1909 | _key: 'c332f5858e910', 1910 | _type: 'span', 1911 | marks: [], 1912 | text: 'In the project-status example above we used the pipe operator ' 1913 | }, 1914 | { 1915 | _key: 'c332f5858e911', 1916 | _type: 'span', 1917 | marks: ['code'], 1918 | text: '|' 1919 | }, 1920 | { 1921 | _key: 'c332f5858e912', 1922 | _type: 'span', 1923 | marks: [], 1924 | text: " for a second time. Let's explore that in some detail:" 1925 | } 1926 | ], 1927 | markDefs: [], 1928 | style: 'normal' 1929 | }, 1930 | { 1931 | _key: 'bf6e387045d9', 1932 | _type: 'code', 1933 | code: 1934 | '*[_type == "project"]{\n _id, title,\n "status": milestones|order(year desc)[0].status\n}\n', 1935 | language: 'text' 1936 | }, 1937 | { 1938 | _key: '22cdfe048562', 1939 | _type: 'block', 1940 | children: [ 1941 | { 1942 | _key: '22cdfe0485620', 1943 | _type: 'span', 1944 | marks: [], 1945 | text: 1946 | 'The pipe operator takes the output from its left hand side and sends it to the operation to its right. "But isn’t this what all GROQ statements does?", I hear you ask. And you’d be right. Actually, if you prefer, you can use the pipe operator a lot more. These queries are the same:' 1947 | } 1948 | ], 1949 | markDefs: [], 1950 | style: 'normal' 1951 | }, 1952 | { 1953 | _key: '7c5561baef34', 1954 | _type: 'code', 1955 | code: 1956 | '*[_type == "movie"] | order(year) {title, body}\n\t\n* | [_type == "movie"] | order(year) | {title, body}\n', 1957 | language: 'text' 1958 | }, 1959 | { 1960 | _key: 'fcbdb8d884b9', 1961 | _type: 'block', 1962 | children: [ 1963 | { 1964 | _key: 'fcbdb8d884b90', 1965 | _type: 'span', 1966 | marks: [], 1967 | text: 1968 | 'To make basic GROQ statements appear simpler we automatically insert the pipe operator when it is obvious. Basically this happens when the ' 1969 | }, 1970 | { 1971 | _key: 'fcbdb8d884b91', 1972 | _type: 'span', 1973 | marks: ['code'], 1974 | text: '{}' 1975 | }, 1976 | { 1977 | _key: 'fcbdb8d884b92', 1978 | _type: 'span', 1979 | marks: [], 1980 | text: ' or ' 1981 | }, 1982 | { 1983 | _key: 'fcbdb8d884b93', 1984 | _type: 'span', 1985 | marks: ['code'], 1986 | text: '[]' 1987 | }, 1988 | { 1989 | _key: 'fcbdb8d884b94', 1990 | _type: 'span', 1991 | marks: [], 1992 | text: ' characters are used. In one sense they are always parsed like this ' 1993 | }, 1994 | { 1995 | _key: 'fcbdb8d884b95', 1996 | _type: 'span', 1997 | marks: ['code'], 1998 | text: '|{}' 1999 | }, 2000 | { 2001 | _key: 'fcbdb8d884b96', 2002 | _type: 'span', 2003 | marks: [], 2004 | text: 'and ' 2005 | }, 2006 | { 2007 | _key: 'fcbdb8d884b97', 2008 | _type: 'span', 2009 | marks: ['code'], 2010 | text: '|[]' 2011 | }, 2012 | { 2013 | _key: 'fcbdb8d884b98', 2014 | _type: 'span', 2015 | marks: [], 2016 | text: '.' 2017 | } 2018 | ], 2019 | markDefs: [], 2020 | style: 'normal' 2021 | }, 2022 | { 2023 | _key: 'a5b33a77d042', 2024 | _type: 'block', 2025 | children: [ 2026 | { 2027 | _key: 'a5b33a77d0420', 2028 | _type: 'span', 2029 | marks: [], 2030 | text: 2031 | 'In some situations, like in the project-status example, we needed an explicit pipe-operator because there were no way for the GROQ parser to infer it. ' 2032 | }, 2033 | { 2034 | _key: 'a5b33a77d0421', 2035 | _type: 'span', 2036 | marks: ['code'], 2037 | text: 'milestones order(year desc)' 2038 | }, 2039 | { 2040 | _key: 'a5b33a77d0422', 2041 | _type: 'span', 2042 | marks: [], 2043 | text: 2044 | ' would be a syntax error, so in this instance we have to explicitly state the pipe operator, like this: ' 2045 | }, 2046 | { 2047 | _key: 'a5b33a77d0423', 2048 | _type: 'span', 2049 | marks: ['code'], 2050 | text: 'milestones|order(year desc)' 2051 | }, 2052 | { 2053 | _key: 'a5b33a77d0424', 2054 | _type: 'span', 2055 | marks: [], 2056 | text: '. As a simple rule of thumb you always need the ' 2057 | }, 2058 | { 2059 | _key: 'a5b33a77d0425', 2060 | _type: 'span', 2061 | marks: ['code'], 2062 | text: '|' 2063 | }, 2064 | { 2065 | _key: 'a5b33a77d0426', 2066 | _type: 'span', 2067 | marks: [], 2068 | text: ' in front of ' 2069 | }, 2070 | { 2071 | _key: 'a5b33a77d0427', 2072 | _type: 'span', 2073 | marks: ['code'], 2074 | text: 'order()' 2075 | }, 2076 | { 2077 | _key: 'a5b33a77d0428', 2078 | _type: 'span', 2079 | marks: [], 2080 | text: 2081 | ' and in the future any other function that handle document streams like order() does.' 2082 | } 2083 | ], 2084 | markDefs: [], 2085 | style: 'normal' 2086 | }, 2087 | { 2088 | _key: '02a2187cb2f6', 2089 | _type: 'block', 2090 | children: [ 2091 | { 2092 | _key: '02a2187cb2f60', 2093 | _type: 'span', 2094 | marks: [], 2095 | text: 2096 | 'When programatically building queries in the front end, the pipe-operator can be very handy. You can chain several statements together using the pipe-operator knowing that you never create an ambiguous statement. Something like this:' 2097 | } 2098 | ], 2099 | markDefs: [], 2100 | style: 'normal' 2101 | }, 2102 | { 2103 | _key: '4415dab102ee', 2104 | _type: 'code', 2105 | code: 2106 | "const filters = ['[_type == \"movie\"]', '[\"sci-fi\" in genres]'\nconst sorts = ['order(title)', 'order(releaseYear desc)']\nconst projection = \"{title, releaseYear}\"\nconst query = ['*'].concat(filters).concat(sorts).concat([projection]).join('|')\n", 2107 | language: 'javascript' 2108 | }, 2109 | { 2110 | _key: '415d15deaa48', 2111 | _type: 'block', 2112 | children: [ 2113 | { 2114 | _key: '415d15deaa480', 2115 | _type: 'span', 2116 | marks: [], 2117 | text: 'Which would build the query:' 2118 | } 2119 | ], 2120 | markDefs: [], 2121 | style: 'normal' 2122 | }, 2123 | { 2124 | _key: 'e6868e61106a', 2125 | _type: 'code', 2126 | code: 2127 | '* |[_type == "movie"]|["sci-fi" in genres]\n |order(title)|order(releaseYear desc)\n |{title, releaseYear}\n', 2128 | language: 'text' 2129 | }, 2130 | { 2131 | _key: '7105bfd8b5af', 2132 | _type: 'block', 2133 | children: [ 2134 | { 2135 | _key: '7105bfd8b5af0', 2136 | _type: 'span', 2137 | marks: [], 2138 | text: 'Which is equivalent to:' 2139 | } 2140 | ], 2141 | markDefs: [], 2142 | style: 'normal' 2143 | }, 2144 | { 2145 | _key: 'f81e8bd8889c', 2146 | _type: 'code', 2147 | code: 2148 | '*[_type == "movie" && "sci-fi" in genres]\n | order(releaseYear desc, title)\n {title, releaseYear}\n', 2149 | language: 'text' 2150 | }, 2151 | { 2152 | _key: '649d43d7d179', 2153 | _type: 'block', 2154 | children: [ 2155 | { 2156 | _key: '649d43d7d1790', 2157 | _type: 'span', 2158 | marks: [], 2159 | text: 'Some fine points on arrays and projections' 2160 | } 2161 | ], 2162 | markDefs: [], 2163 | style: 'h2' 2164 | }, 2165 | { 2166 | _key: '8da82e7852d3', 2167 | _type: 'block', 2168 | children: [ 2169 | { 2170 | _key: '8da82e7852d30', 2171 | _type: 'span', 2172 | marks: [], 2173 | text: 'Let’s consider this document with some deep structure:' 2174 | } 2175 | ], 2176 | markDefs: [], 2177 | style: 'normal' 2178 | }, 2179 | { 2180 | _key: '289cd5eef3e5', 2181 | _type: 'code', 2182 | code: 2183 | '{\n _id: "alien",\n _type: "movie",\n title: "Alien",\n poster: {\n asset: {_ref: "image-1234"}\n },\n images: [\n {\n caption: "Sigourney Weaver and the cat Jones on set",\n asset: {_ref: "image-1235"}\n },\n {\n caption: "Bolaji Badejo suiting up for the role of the Alien",\n asset: {_ref: "image-1236"}\n },\n ]\n}\n', 2184 | language: 'javascript' 2185 | }, 2186 | { 2187 | _key: '247307f69e20', 2188 | _type: 'block', 2189 | children: [ 2190 | { 2191 | _key: '247307f69e200', 2192 | _type: 'span', 2193 | marks: [], 2194 | text: 2195 | 'So we have a movie with a poster image, and an array of other images. Each image has some metadata represented here by a caption, then a reference to an asset record containing all the metadata on the specific image including its url. A simplified asset record could look something like this:' 2196 | } 2197 | ], 2198 | markDefs: [], 2199 | style: 'normal' 2200 | }, 2201 | { 2202 | _key: 'f2152f7285db', 2203 | _type: 'code', 2204 | code: 2205 | '{\n _id: "image-1234",\n _type: "sanity.imageAsset",\n url: "http:///cdn.sanity.io/images/…"\n}\n', 2206 | language: 'javascript' 2207 | }, 2208 | { 2209 | _key: '31fee9c914a9', 2210 | _type: 'block', 2211 | children: [ 2212 | { 2213 | _key: '31fee9c914a90', 2214 | _type: 'span', 2215 | marks: [], 2216 | text: 2217 | 'Now we can easily retrieve the poster image url and attach it to our result for each movies like this:' 2218 | } 2219 | ], 2220 | markDefs: [], 2221 | style: 'normal' 2222 | }, 2223 | { 2224 | _key: 'eee12404b55a', 2225 | _type: 'code', 2226 | code: '*[_type == "movie"]{\n title,\n "posterImage": poster.asset->url\n}\n', 2227 | language: 'text' 2228 | }, 2229 | { 2230 | _key: '12039115bd39', 2231 | _type: 'block', 2232 | children: [ 2233 | { 2234 | _key: '12039115bd390', 2235 | _type: 'span', 2236 | marks: [], 2237 | text: 'But what if we wanted to do the same thing for the other images? Since the ' 2238 | }, 2239 | { 2240 | _key: '12039115bd391', 2241 | _type: 'span', 2242 | marks: ['code'], 2243 | text: 'images' 2244 | }, 2245 | { 2246 | _key: '12039115bd392', 2247 | _type: 'span', 2248 | marks: [], 2249 | text: ' field is an array, we can’t just ' 2250 | }, 2251 | { 2252 | _key: '12039115bd393', 2253 | _type: 'span', 2254 | marks: ['code'], 2255 | text: 'images.asset->url' 2256 | }, 2257 | { 2258 | _key: '12039115bd394', 2259 | _type: 'span', 2260 | marks: [], 2261 | text: '. We somehow have to apply the ' 2262 | }, 2263 | { 2264 | _key: '12039115bd395', 2265 | _type: 'span', 2266 | marks: ['code'], 2267 | text: 'asset->url' 2268 | }, 2269 | { 2270 | _key: '12039115bd396', 2271 | _type: 'span', 2272 | marks: [], 2273 | text: 2274 | '-part to each member of the array. This is accomplished by adding a blank filter, like this: ' 2275 | }, 2276 | { 2277 | _key: '12039115bd397', 2278 | _type: 'span', 2279 | marks: ['code'], 2280 | text: 'images[].asset->url' 2281 | }, 2282 | { 2283 | _key: '12039115bd398', 2284 | _type: 'span', 2285 | marks: [], 2286 | text: 2287 | ' which will return the image urls as a simple array. So the full query would look like this:' 2288 | } 2289 | ], 2290 | markDefs: [], 2291 | style: 'normal' 2292 | }, 2293 | { 2294 | _key: 'c1fa72dc4e8d', 2295 | _type: 'code', 2296 | code: '*[_type == "movie"]{\n title,\n "imageUrls": images[].asset->url\n}\n', 2297 | language: 'text' 2298 | }, 2299 | { 2300 | _key: '2907c6a88aca', 2301 | _type: 'block', 2302 | children: [ 2303 | { 2304 | _key: '2907c6a88aca0', 2305 | _type: 'span', 2306 | marks: [], 2307 | text: 'This would yield something like this:' 2308 | } 2309 | ], 2310 | markDefs: [], 2311 | style: 'normal' 2312 | }, 2313 | { 2314 | _key: '77bc559b1746', 2315 | _type: 'code', 2316 | code: 2317 | '[\n {\n title: "Alien",\n imageUrls: ["http://cdn.sanity.io/…", "http://cdn.sanity.io/…"]\n },\n …\n]\n', 2318 | language: 'javascript' 2319 | }, 2320 | { 2321 | _key: 'b6681767c081', 2322 | _type: 'block', 2323 | children: [ 2324 | { 2325 | _key: 'b6681767c0810', 2326 | _type: 'span', 2327 | marks: [], 2328 | text: 2329 | 'If you wanted a richer data-set with your images you could use a normal projection like this (taking care to add the blank filter to apply the projection to every array member):' 2330 | } 2331 | ], 2332 | markDefs: [], 2333 | style: 'normal' 2334 | }, 2335 | { 2336 | _key: '1cca9c80c55c', 2337 | _type: 'code', 2338 | code: 2339 | '*[_type == "movie"]{\n title,\n "images": images[]{\n caption\n "url": asset->url,\n }\n}\n', 2340 | language: 'text' 2341 | }, 2342 | { 2343 | _key: '4c8563794f1b', 2344 | _type: 'block', 2345 | children: [ 2346 | { 2347 | _key: '4c8563794f1b0', 2348 | _type: 'span', 2349 | marks: [], 2350 | text: 'Now your result looks something like this:' 2351 | } 2352 | ], 2353 | markDefs: [], 2354 | style: 'normal' 2355 | }, 2356 | { 2357 | _key: '829ff03f5ccb', 2358 | _type: 'code', 2359 | code: 2360 | '[\n {\n title: "Alien",\n images: [\n {\n caption: "Sigourney Weaver and the cat Jones on set",\n url: "http://cdn.sanity.io/…"\n },\n {\n caption: "Bolaji Badejo suiting up for the role of the Alien",\n url: "http://cdn.sanity.io/…"\n }\n ]\n },\n …\n]\n', 2361 | language: 'javascript' 2362 | }, 2363 | { 2364 | _key: '8d57d7b0dccb', 2365 | _type: 'block', 2366 | children: [ 2367 | { 2368 | _key: '8d57d7b0dccb0', 2369 | _type: 'span', 2370 | marks: [], 2371 | text: 'The ellipsis operator' 2372 | } 2373 | ], 2374 | markDefs: [], 2375 | style: 'h2' 2376 | }, 2377 | { 2378 | _key: '0781cf59db38', 2379 | _type: 'block', 2380 | children: [ 2381 | { 2382 | _key: '0781cf59db380', 2383 | _type: 'span', 2384 | marks: [], 2385 | text: 2386 | "Sometimes you might want to compute some properties of a document, but still want the entire set of attributes returned. This can be a problem since the moment you specify a projection, you'll have to list all the fields you want included. Let's say we wanted to count the actors in a movie doing something like this:" 2387 | } 2388 | ], 2389 | markDefs: [], 2390 | style: 'normal' 2391 | }, 2392 | { 2393 | _key: '1a88ebe18ee0', 2394 | _type: 'code', 2395 | code: '*[_type == "movie"]{\n "actorCount": count(actors)\n}', 2396 | language: 'text' 2397 | }, 2398 | { 2399 | _key: '68118622e8ee', 2400 | _type: 'block', 2401 | children: [ 2402 | { 2403 | _key: '68118622e8ee0', 2404 | _type: 'span', 2405 | marks: [], 2406 | text: 2407 | 'There is an obvious problem with this. We just wanted to add a custom field, but since we needed a projection to do it, now all we got is something like this:' 2408 | } 2409 | ], 2410 | markDefs: [], 2411 | style: 'normal' 2412 | }, 2413 | { 2414 | _key: '3cc404dc7455', 2415 | _type: 'code', 2416 | code: '[\n {actorCount: 3},\n {actorCount: 27},\n {actorCount: 15}\n]', 2417 | language: 'javascript' 2418 | }, 2419 | { 2420 | _key: '989d44304892', 2421 | _type: 'block', 2422 | children: [ 2423 | { 2424 | _key: '989d443048920', 2425 | _type: 'span', 2426 | marks: [], 2427 | text: 'What we wanted was our custom field in ' 2428 | }, 2429 | { 2430 | _key: '989d443048921', 2431 | _type: 'span', 2432 | marks: ['em'], 2433 | text: 'addition' 2434 | }, 2435 | { 2436 | _key: '989d443048922', 2437 | _type: 'span', 2438 | marks: [], 2439 | text: 2440 | ' to the normal fields. This can be achieved with the ellipsis operator. By appending it like this, we effectively say we want the fields we just specified, but also everything else:' 2441 | } 2442 | ], 2443 | markDefs: [], 2444 | style: 'normal' 2445 | }, 2446 | { 2447 | _key: 'f798100dba58', 2448 | _type: 'code', 2449 | code: '*[_type == "movie"]{\n "actorCount": count(actors),\n ...\n}', 2450 | highlightedLines: [3], 2451 | language: 'text' 2452 | }, 2453 | { 2454 | _key: 'bb9afdbe00b7', 2455 | _type: 'block', 2456 | children: [ 2457 | { 2458 | _key: 'bb9afdbe00b70', 2459 | _type: 'span', 2460 | marks: [], 2461 | text: 'Which brings us a result that could look something like this:' 2462 | } 2463 | ], 2464 | markDefs: [], 2465 | style: 'normal' 2466 | }, 2467 | { 2468 | _key: '576631da8c7c', 2469 | _type: 'code', 2470 | code: 2471 | '{\n {\n title: "Alien",\n releaseYear: 1979,\n actorCount: 23,\n // And loads more fields, probably\n },\n // and many more movies\n}', 2472 | language: 'javascript' 2473 | }, 2474 | { 2475 | _key: 'd8cbfd8dd14a', 2476 | _type: 'block', 2477 | children: [ 2478 | { 2479 | _key: 'd8cbfd8dd14a0', 2480 | _type: 'span', 2481 | marks: [], 2482 | text: "Queries that don't start with an " 2483 | }, 2484 | { 2485 | _key: 'd8cbfd8dd14a1', 2486 | _type: 'span', 2487 | marks: ['code'], 2488 | text: '*' 2489 | } 2490 | ], 2491 | markDefs: [], 2492 | style: 'h2' 2493 | }, 2494 | { 2495 | _key: '882fba59a1cb', 2496 | _type: 'block', 2497 | children: [ 2498 | { 2499 | _key: '882fba59a1cb0', 2500 | _type: 'span', 2501 | marks: [], 2502 | text: 2503 | "We said initially that most GROQ queries start with the asterisk, but they don't have to. Any valid GROQ expression can be the entire query. This is a valid query:" 2504 | } 2505 | ], 2506 | markDefs: [], 2507 | style: 'normal' 2508 | }, 2509 | { 2510 | _key: '5b3af17438a8', 2511 | _type: 'code', 2512 | code: 'count(*)\n', 2513 | language: 'text' 2514 | }, 2515 | { 2516 | _key: '5f4f0d0596fd', 2517 | _type: 'block', 2518 | children: [ 2519 | { 2520 | _key: '5f4f0d0596fd0', 2521 | _type: 'span', 2522 | marks: [], 2523 | text: 'It will return the number of documents in the dataset. This is also valid:' 2524 | } 2525 | ], 2526 | markDefs: [], 2527 | style: 'normal' 2528 | }, 2529 | { 2530 | _key: '74659c47b405', 2531 | _type: 'code', 2532 | code: 'count(*[name match "sigourney"]) > 0\n', 2533 | language: 'text' 2534 | }, 2535 | { 2536 | _key: '8117d6349659', 2537 | _type: 'block', 2538 | children: [ 2539 | { 2540 | _key: '8117d63496590', 2541 | _type: 'span', 2542 | marks: [], 2543 | text: 'It will return ' 2544 | }, 2545 | { 2546 | _key: '8117d63496591', 2547 | _type: 'span', 2548 | marks: ['code'], 2549 | text: 'true' 2550 | }, 2551 | { 2552 | _key: '8117d63496592', 2553 | _type: 'span', 2554 | marks: [], 2555 | text: ' if any document in the entire dataset has a ' 2556 | }, 2557 | { 2558 | _key: '8117d63496593', 2559 | _type: 'span', 2560 | marks: ['code'], 2561 | text: 'name' 2562 | }, 2563 | { 2564 | _key: '8117d63496594', 2565 | _type: 'span', 2566 | marks: [], 2567 | text: '-field containing the word "sigourney".' 2568 | } 2569 | ], 2570 | markDefs: [], 2571 | style: 'normal' 2572 | }, 2573 | { 2574 | _key: '44a08ff13784', 2575 | _type: 'block', 2576 | children: [ 2577 | { 2578 | _key: '44a08ff137840', 2579 | _type: 'span', 2580 | marks: [], 2581 | text: 2582 | 'More usefully, you can actually have a projection be your outer statement. Like this:' 2583 | } 2584 | ], 2585 | markDefs: [], 2586 | style: 'normal' 2587 | }, 2588 | { 2589 | _key: '4659c5574f32', 2590 | _type: 'code', 2591 | code: 2592 | '{\n "mainStory": *[_id == "story-1234"],\n "campaign": *[_id == "campaign-1234"],\n "topStories: *[_type == "story"] order(publishAt desc) [0..10]\n}', 2593 | language: 'text' 2594 | }, 2595 | { 2596 | _key: 'b06ed1afd3c9', 2597 | _type: 'block', 2598 | children: [ 2599 | { 2600 | _key: 'b06ed1afd3c90', 2601 | _type: 'span', 2602 | marks: [], 2603 | text: 2604 | 'This combines three completely separate queries into one query and returns an object containing the result of all of them. This can be a useful way to speed up page loads. By combining queries in this manner you can often get all of the core content for a web page to load in a single, cacheable query.' 2605 | } 2606 | ], 2607 | markDefs: [], 2608 | style: 'normal' 2609 | }, 2610 | { 2611 | _key: '51bc589070b8', 2612 | _type: 'block', 2613 | children: [ 2614 | { 2615 | _key: '51bc589070b80', 2616 | _type: 'span', 2617 | marks: [], 2618 | text: 'Finally' 2619 | } 2620 | ], 2621 | markDefs: [], 2622 | style: 'h2' 2623 | }, 2624 | { 2625 | _key: 'aea14216e7b6', 2626 | _type: 'block', 2627 | children: [ 2628 | { 2629 | _key: 'aea14216e7b60', 2630 | _type: 'span', 2631 | marks: [], 2632 | text: 2633 | "So there you go, this should cover 99% of what you need to understand in day-to-day use of GROQ. Reference documentation is imminent, but while we're writing it you should partake our " 2634 | }, 2635 | { 2636 | _key: 'aea14216e7b61', 2637 | _type: 'span', 2638 | marks: ['1a600461'], 2639 | text: 'Query Cheat Sheet' 2640 | }, 2641 | { 2642 | _key: 'aea14216e7b62', 2643 | _type: 'span', 2644 | marks: [], 2645 | text: ' which contain examples of all operators and functions currently supported.' 2646 | } 2647 | ], 2648 | markDefs: [ 2649 | { 2650 | _key: '1a600461', 2651 | _ref: '81b839a4-2fc1-4769-941a-ec4de9276492', 2652 | _type: 'internalLink', 2653 | _weak: true 2654 | } 2655 | ], 2656 | style: 'normal' 2657 | } 2658 | ] 2659 | -------------------------------------------------------------------------------- /test/fixtures/singleQuotationMarks.fixture.js: -------------------------------------------------------------------------------- 1 | module.exports = [ 2 | { 3 | _type: 'block', 4 | children: [ 5 | { 6 | _type: 'span', 7 | marks: [], 8 | text: `Warning: '` 9 | }, 10 | { 11 | _type: 'span', 12 | marks: ['strong'], 13 | text: `Sanity'` 14 | }, 15 | { 16 | _type: 'span', 17 | marks: ['strong'], 18 | text: ' is' 19 | }, 20 | { 21 | _type: 'span', 22 | marks: ['em'], 23 | text: ` 'addictive'.` 24 | } 25 | ] 26 | }, 27 | { 28 | _type: 'block', 29 | children: [ 30 | { 31 | _type: 'span', 32 | marks: ['strong'], 33 | text: `Unmatched quotation marks? '` 34 | }, 35 | { 36 | _type: 'span', 37 | marks: ['strong'], 38 | text: 'sure, why not?' 39 | } 40 | ] 41 | } 42 | ] 43 | --------------------------------------------------------------------------------