├── .gitignore ├── CMakeLists.txt ├── README.md ├── flashops.c ├── include ├── flashops.h ├── linux-err.h ├── linux-types.h ├── nand.h ├── serprog.h ├── spi-mem-drvs.h ├── spi-mem.h ├── spi.h └── spinand.h ├── main.c ├── spi-mem ├── ch347 │ ├── ch347.c │ ├── ch347.h │ └── spi-mem.c ├── spi-mem-drvs.c ├── spi-mem-fx2qspi.c ├── spi-mem-serprog.c └── spi-mem.c └── spi-nand ├── core.c ├── gigadevice.c ├── macronix.c ├── micron.c ├── paragon.c ├── toshiba.c └── winbond.c /.gitignore: -------------------------------------------------------------------------------- 1 | CMakeLists.txt.user 2 | CMakeCache.txt 3 | CMakeFiles 4 | CMakeScripts 5 | Testing 6 | Makefile 7 | cmake_install.cmake 8 | install_manifest.txt 9 | compile_commands.json 10 | CTestTestfile.cmake 11 | _deps 12 | /build/** 13 | .vscode 14 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.1) 2 | set(EXE_NAME spi-nand-prog) 3 | project(${EXE_NAME} C) 4 | find_package(PkgConfig) 5 | pkg_check_modules(libusb-1.0 REQUIRED libusb-1.0) 6 | 7 | set(CMAKE_C_FLAGS_RELWITHDEBINFO "-O3 -ggdb -Wall") 8 | 9 | include_directories(include ${libusb-1.0_INCLUDEDIR}) 10 | 11 | set(SPI_MEM_SRCS 12 | spi-mem/spi-mem.c 13 | spi-mem/spi-mem-drvs.c 14 | spi-mem/spi-mem-fx2qspi.c 15 | spi-mem/spi-mem-serprog.c 16 | spi-mem/ch347/ch347.c 17 | spi-mem/ch347/spi-mem.c 18 | ) 19 | 20 | set(SPI_NAND_SRCS 21 | spi-nand/core.c 22 | spi-nand/gigadevice.c 23 | spi-nand/macronix.c 24 | spi-nand/micron.c 25 | spi-nand/paragon.c 26 | spi-nand/toshiba.c 27 | spi-nand/winbond.c 28 | ) 29 | add_executable(${EXE_NAME} ${SPI_MEM_SRCS} ${SPI_NAND_SRCS} main.c flashops.c) 30 | target_link_libraries(${EXE_NAME} ${libusb-1.0_LIBRARIES}) 31 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SPI-NAND Programmer 2 | 3 | ## About 4 | 5 | A SPI-NAND flash programmer software botched together using SPI-MEM and SPI-NAND framework taken from Linux v5.8. 6 | 7 | ## Features 8 | 9 | * Reading/Writing SPI NAND 10 | * Operations with on-die ECC enabled/disabled 11 | * Operations with OOB data included or not 12 | * Skip bad blocks during writing 13 | * Data verification for writing when on-die ECC is enabled 14 | 15 | ## Supported devices 16 | 17 | [WCH CH347](https://www.wch.cn/products/CH347.html) 18 | 19 | The default driver. No extra arguments needed. 20 | 21 | [dword1511/stm32-vserprog](https://github.com/dword1511/stm32-vserprog) 22 | 23 | add the following arguments to select this driver: 24 | 25 | ``` 26 | -d serprog -a /dev/ttyACM0 27 | ``` 28 | 29 | ## Usage 30 | ``` 31 | spi-nand-prog [file name] [arguments] 32 | 33 | Operations: read/write/erase/scan 34 | Arguments: 35 | -d : hardware driver to be used. 36 | -a : additional argument provided to current driver. 37 | -o : Flash offset. Should be aligned to page boundary when reading and block boundary when writing. default: 0 38 | -l : read length. default: flash_size 39 | --no-ecc: disable on-die ECC. This also disables data verification when writing. 40 | --with-oob: include OOB data during operation. 41 | ``` 42 | -------------------------------------------------------------------------------- /flashops.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | int snand_read(struct spinand_device *snand, size_t offs, size_t len, 9 | bool ecc_enabled, bool read_oob, FILE *fp) 10 | { 11 | struct nand_device *nand = spinand_to_nand(snand); 12 | size_t page_size = nanddev_page_size(nand); 13 | size_t oob_size = nanddev_per_page_oobsize(nand); 14 | size_t fwrite_size; 15 | struct nand_page_io_req io_req; 16 | size_t rdlen = 0; 17 | uint8_t *buf; 18 | int ret; 19 | 20 | if (offs % page_size) { 21 | fprintf(stderr, "Reading should start at page boundary.\n"); 22 | return -EINVAL; 23 | } 24 | 25 | if (!len) 26 | len = nanddev_size(nand) - offs; 27 | 28 | buf = malloc(page_size + oob_size); 29 | if (!buf) 30 | return -ENOMEM; 31 | 32 | memset(&io_req, 0, sizeof(io_req)); 33 | io_req.databuf.in = buf; 34 | io_req.datalen = page_size; 35 | io_req.dataoffs = 0; 36 | fwrite_size = page_size; 37 | if (read_oob) { 38 | io_req.oobbuf.in = buf + page_size; 39 | io_req.ooblen = oob_size; 40 | io_req.ooboffs = 0; 41 | fwrite_size += oob_size; 42 | } 43 | nanddev_offs_to_pos(nand, offs, &io_req.pos); 44 | 45 | while (rdlen < len) { 46 | printf("reading offset (%lX block %u page %u)\r", offs + rdlen, 47 | io_req.pos.eraseblock, io_req.pos.page); 48 | ret = spinand_read_page(snand, &io_req, ecc_enabled); 49 | if (ret > 0) { 50 | printf("\necc corrected %d bitflips.\n", ret); 51 | } else if (ret < 0) { 52 | printf("\nreading failed. errno %d\n", ret); 53 | memset(buf, 0, fwrite_size); 54 | } 55 | fwrite(buf, 1, fwrite_size, fp); 56 | rdlen += page_size; 57 | nanddev_pos_next_page(nand, &io_req.pos); 58 | } 59 | printf("\n\ndone.\n"); 60 | free(buf); 61 | return 0; 62 | } 63 | 64 | bool snand_isbad(struct spinand_device *snand, const struct nand_pos *pos, 65 | size_t bbm_offs, size_t bbm_len) 66 | { 67 | struct nand_device *nand = spinand_to_nand(snand); 68 | size_t page_size = nanddev_page_size(nand); 69 | struct nand_page_io_req req; 70 | size_t i; 71 | 72 | u8 marker[8] = {}; 73 | if (bbm_len > 8) { 74 | fprintf(stderr, "bbm too long.\n"); 75 | return true; 76 | } 77 | 78 | if (!bbm_len) { 79 | bbm_offs = page_size; 80 | bbm_len = 2; 81 | } 82 | 83 | memset(&req, 0, sizeof(req)); 84 | req.pos = *pos; 85 | req.pos.page = 0; 86 | if (bbm_offs < page_size) { 87 | req.databuf.in = marker; 88 | req.datalen = bbm_len; 89 | req.dataoffs = bbm_offs; 90 | } else { 91 | req.oobbuf.in = marker; 92 | req.ooblen = bbm_len; 93 | req.ooboffs = bbm_offs - page_size; 94 | } 95 | spinand_read_page(snand, &req, false); 96 | 97 | for (i = 0; i < bbm_len; i++) 98 | if (marker[i] != 0xff) 99 | return true; 100 | return false; 101 | } 102 | 103 | int snand_markbad(struct spinand_device *snand, const struct nand_pos *pos, 104 | size_t bbm_offs, size_t bbm_len) 105 | { 106 | struct nand_device *nand = spinand_to_nand(snand); 107 | size_t page_size = nanddev_page_size(nand); 108 | struct nand_page_io_req req; 109 | u8 marker[8]; 110 | if (bbm_len > 8) { 111 | fprintf(stderr, "bbm too long.\n"); 112 | return -EINVAL; 113 | } 114 | 115 | if (!bbm_len) { 116 | bbm_offs = page_size; 117 | bbm_len = 2; 118 | } 119 | 120 | memset(&req, 0, sizeof(req)); 121 | memset(marker, 0, sizeof(marker)); 122 | req.pos = *pos; 123 | req.pos.page = 0; 124 | if (bbm_offs < page_size) { 125 | req.databuf.out = marker; 126 | req.datalen = bbm_len; 127 | req.dataoffs = bbm_offs; 128 | } else { 129 | req.oobbuf.out = marker; 130 | req.ooblen = bbm_len; 131 | req.ooboffs = bbm_offs - page_size; 132 | } 133 | 134 | return spinand_write_page(snand, &req, false); 135 | } 136 | 137 | int snand_erase_remark(struct spinand_device *snand, const struct nand_pos *pos, 138 | size_t old_bbm_offs, size_t old_bbm_len, size_t bbm_offs, 139 | size_t bbm_len) 140 | { 141 | int ret; 142 | if (snand_isbad(snand, pos, old_bbm_offs, old_bbm_len)) { 143 | printf("bad block: target %u block %u.\n", pos->target, 144 | pos->eraseblock); 145 | goto BAD_BLOCK; 146 | } 147 | 148 | ret = spinand_erase(snand, pos); 149 | if (ret) { 150 | printf("erase failed: target %u block %u. ret: %d\n", 151 | pos->target, pos->eraseblock, ret); 152 | goto BAD_BLOCK; 153 | } 154 | 155 | return 0; 156 | BAD_BLOCK: 157 | snand_markbad(snand, pos, bbm_offs, bbm_len); 158 | return -EIO; 159 | } 160 | 161 | int snand_write(struct spinand_device *snand, size_t offs, bool ecc_enabled, 162 | bool write_oob, bool erase_rest, FILE *fp, size_t old_bbm_offs, 163 | size_t old_bbm_len, size_t bbm_offs, size_t bbm_len) 164 | { 165 | struct nand_device *nand = spinand_to_nand(snand); 166 | size_t page_size = nanddev_page_size(nand); 167 | size_t oob_size = nanddev_per_page_oobsize(nand); 168 | size_t eb_size = nanddev_eraseblock_size(nand); 169 | size_t flash_size = nanddev_size(nand); 170 | size_t fread_len, actual_read_len = 0; 171 | struct nand_page_io_req wr_req, rd_req; 172 | size_t cur_offs = offs, eb_rd_offs = 0; 173 | uint8_t *buf, *rdbuf; 174 | int ret; 175 | 176 | if (offs % eb_size) { 177 | fprintf(stderr, "Writing should start at eb boundary.\n"); 178 | return -EINVAL; 179 | } 180 | 181 | buf = malloc((page_size + oob_size) * 2); 182 | if (!buf) 183 | return -ENOMEM; 184 | 185 | rdbuf = buf + page_size + oob_size; 186 | 187 | memset(&wr_req, 0, sizeof(wr_req)); 188 | wr_req.databuf.out = buf; 189 | wr_req.datalen = page_size; 190 | wr_req.dataoffs = 0; 191 | fread_len = page_size; 192 | if (write_oob) { 193 | wr_req.oobbuf.out = buf + page_size; 194 | wr_req.ooblen = oob_size; 195 | wr_req.ooboffs = 0; 196 | fread_len += oob_size; 197 | } 198 | 199 | if (fp) 200 | actual_read_len = fread_len; // for the EOF check in loop. 201 | 202 | nanddev_offs_to_pos(nand, offs, &wr_req.pos); 203 | 204 | while (cur_offs < flash_size) { 205 | if (!wr_req.pos.page) { 206 | eb_rd_offs = 0; 207 | printf("erasing %lX (block %u)\r", cur_offs, 208 | wr_req.pos.eraseblock); 209 | ret = snand_erase_remark(snand, &wr_req.pos, 210 | old_bbm_offs, old_bbm_len, 211 | bbm_offs, bbm_len); 212 | if (ret) { 213 | printf("\nskipping current block: %d\n", ret); 214 | cur_offs += eb_size; 215 | nanddev_pos_next_eraseblock(nand, &wr_req.pos); 216 | continue; 217 | } 218 | } 219 | 220 | if (actual_read_len == fread_len) { 221 | actual_read_len = fread(buf, 1, fread_len, fp); 222 | printf("writing %lu bytes to %lX (block %u page %u)\r", 223 | actual_read_len, cur_offs, wr_req.pos.eraseblock, 224 | wr_req.pos.page); 225 | if (actual_read_len < fread_len) 226 | memset(buf + actual_read_len, 0xff, 227 | fread_len - actual_read_len); 228 | 229 | eb_rd_offs += actual_read_len; 230 | 231 | ret = spinand_write_page(snand, &wr_req, ecc_enabled); 232 | if (ret) { 233 | printf("\npage writing failed.\n"); 234 | goto BAD_BLOCK; 235 | } 236 | 237 | if (ecc_enabled && !write_oob) { 238 | rd_req = wr_req; 239 | rd_req.databuf.out = rdbuf; 240 | rd_req.oobbuf.out = rdbuf + page_size; 241 | ret = spinand_read_page(snand, &rd_req, 242 | ecc_enabled); 243 | if (ret > 0) { 244 | printf("\necc corrected %d bitflips.\n", 245 | ret); 246 | } else if (ret < 0) { 247 | printf("\nreading failed. errno %d\n", 248 | ret); 249 | goto BAD_BLOCK; 250 | } 251 | if (memcmp(buf, rdbuf, fread_len)) { 252 | printf("\ndata verification failed.\n"); 253 | goto BAD_BLOCK; 254 | } 255 | } 256 | cur_offs += page_size; 257 | nanddev_pos_next_page(nand, &wr_req.pos); 258 | } else if (erase_rest) { 259 | nanddev_pos_next_eraseblock(nand, &wr_req.pos); 260 | cur_offs = nanddev_pos_to_offs(nand, &wr_req.pos); 261 | } else { 262 | break; 263 | } 264 | 265 | continue; 266 | BAD_BLOCK: 267 | snand_markbad(snand, &wr_req.pos, bbm_offs, bbm_len); 268 | fseek(fp, -eb_rd_offs, SEEK_CUR); 269 | nanddev_pos_next_eraseblock(nand, &wr_req.pos); 270 | cur_offs = nanddev_pos_to_offs(nand, &wr_req.pos); 271 | } 272 | printf("\ndone.\n"); 273 | return 0; 274 | } 275 | 276 | void snand_scan_bbm(struct spinand_device *snand) 277 | { 278 | struct nand_device *nand = spinand_to_nand(snand); 279 | size_t eb_size = nanddev_eraseblock_size(nand); 280 | size_t flash_size = nanddev_size(nand); 281 | size_t offs = 0; 282 | struct nand_pos pos; 283 | nanddev_offs_to_pos(nand, 0, &pos); 284 | while (offs < flash_size) { 285 | printf("scaning block %u\r", pos.eraseblock); 286 | if (snand_isbad(snand, &pos, 0, 0)) 287 | printf("\ntarget %u block %u is bad.\n", pos.target, 288 | pos.eraseblock); 289 | nanddev_pos_next_eraseblock(nand, &pos); 290 | offs += eb_size; 291 | } 292 | printf("\ndone.\n"); 293 | } 294 | -------------------------------------------------------------------------------- /include/flashops.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | int snand_read(struct spinand_device *snand, size_t offs, size_t len, 5 | bool ecc_enabled, bool read_oob, FILE *fp); 6 | void snand_scan_bbm(struct spinand_device *snand); 7 | int snand_write(struct spinand_device *snand, size_t offs, bool ecc_enabled, 8 | bool write_oob, bool erase_rest, FILE *fp, size_t old_bbm_offs, 9 | size_t old_bbm_len, size_t bbm_offs, size_t bbm_len); 10 | -------------------------------------------------------------------------------- /include/linux-err.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #define __must_check __attribute__((__warn_unused_result__)) 3 | #define __force 4 | 5 | #define MAX_ERRNO 4095 6 | 7 | #define IS_ERR_VALUE(x) ((unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO) 8 | 9 | 10 | static inline void * __must_check ERR_PTR(long error) 11 | { 12 | return (void *) error; 13 | } 14 | 15 | static inline long __must_check PTR_ERR(__force const void *ptr) 16 | { 17 | return (long) ptr; 18 | } 19 | 20 | static inline bool __must_check IS_ERR(__force const void *ptr) 21 | { 22 | return IS_ERR_VALUE((unsigned long)ptr); 23 | } 24 | 25 | static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr) 26 | { 27 | return (!ptr) || IS_ERR_VALUE((unsigned long)ptr); 28 | } 29 | -------------------------------------------------------------------------------- /include/linux-types.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | typedef __s8 s8; 4 | typedef __u8 u8; 5 | typedef __s16 s16; 6 | typedef __u16 u16; 7 | typedef __s32 s32; 8 | typedef __u32 u32; 9 | typedef __s64 s64; 10 | typedef __u64 u64; 11 | 12 | #define container_of(ptr, type, member) ({ \ 13 | void *__mptr = (void *)(ptr); \ 14 | ((type *)(__mptr - offsetof(type, member))); }) 15 | 16 | #define BIT(_B) (1 << (_B)) 17 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) 18 | #define BITS_PER_LONG (sizeof(long) * 8) 19 | #define GENMASK(h, l) \ 20 | (((~0LU) - (1LU << (l)) + 1) & \ 21 | (~0LU >> (BITS_PER_LONG - 1 - (h)))) 22 | -------------------------------------------------------------------------------- /include/nand.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | /* 3 | * Copyright 2017 - Free Electrons 4 | * 5 | * Authors: 6 | * Boris Brezillon 7 | * Peter Pan 8 | */ 9 | 10 | #ifndef __LINUX_MTD_NAND_H 11 | #define __LINUX_MTD_NAND_H 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | #define do_div(n,base) ({ \ 21 | uint32_t __base = (base); \ 22 | uint32_t __rem; \ 23 | __rem = ((uint64_t)(n)) % __base; \ 24 | (n) = ((uint64_t)(n)) / __base; \ 25 | __rem; \ 26 | }) 27 | 28 | struct nand_device; 29 | 30 | /** 31 | * struct nand_memory_organization - Memory organization structure 32 | * @bits_per_cell: number of bits per NAND cell 33 | * @pagesize: page size 34 | * @oobsize: OOB area size 35 | * @pages_per_eraseblock: number of pages per eraseblock 36 | * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number) 37 | * @max_bad_eraseblocks_per_lun: maximum number of eraseblocks per LUN 38 | * @planes_per_lun: number of planes per LUN 39 | * @luns_per_target: number of LUN per target (target is a synonym for die) 40 | * @ntargets: total number of targets exposed by the NAND device 41 | */ 42 | struct nand_memory_organization { 43 | unsigned int bits_per_cell; 44 | unsigned int pagesize; 45 | unsigned int oobsize; 46 | unsigned int pages_per_eraseblock; 47 | unsigned int eraseblocks_per_lun; 48 | unsigned int max_bad_eraseblocks_per_lun; 49 | unsigned int planes_per_lun; 50 | unsigned int luns_per_target; 51 | unsigned int ntargets; 52 | }; 53 | 54 | #define NAND_MEMORG(bpc, ps, os, ppe, epl, mbb, ppl, lpt, nt) \ 55 | { \ 56 | .bits_per_cell = (bpc), \ 57 | .pagesize = (ps), \ 58 | .oobsize = (os), \ 59 | .pages_per_eraseblock = (ppe), \ 60 | .eraseblocks_per_lun = (epl), \ 61 | .max_bad_eraseblocks_per_lun = (mbb), \ 62 | .planes_per_lun = (ppl), \ 63 | .luns_per_target = (lpt), \ 64 | .ntargets = (nt), \ 65 | } 66 | 67 | /** 68 | * struct nand_row_converter - Information needed to convert an absolute offset 69 | * into a row address 70 | * @lun_addr_shift: position of the LUN identifier in the row address 71 | * @eraseblock_addr_shift: position of the eraseblock identifier in the row 72 | * address 73 | */ 74 | struct nand_row_converter { 75 | unsigned int lun_addr_shift; 76 | unsigned int eraseblock_addr_shift; 77 | }; 78 | 79 | /** 80 | * struct nand_pos - NAND position object 81 | * @target: the NAND target/die 82 | * @lun: the LUN identifier 83 | * @plane: the plane within the LUN 84 | * @eraseblock: the eraseblock within the LUN 85 | * @page: the page within the LUN 86 | * 87 | * These information are usually used by specific sub-layers to select the 88 | * appropriate target/die and generate a row address to pass to the device. 89 | */ 90 | struct nand_pos { 91 | unsigned int target; 92 | unsigned int lun; 93 | unsigned int plane; 94 | unsigned int eraseblock; 95 | unsigned int page; 96 | }; 97 | 98 | /** 99 | * struct nand_page_io_req - NAND I/O request object 100 | * @pos: the position this I/O request is targeting 101 | * @dataoffs: the offset within the page 102 | * @datalen: number of data bytes to read from/write to this page 103 | * @databuf: buffer to store data in or get data from 104 | * @ooboffs: the OOB offset within the page 105 | * @ooblen: the number of OOB bytes to read from/write to this page 106 | * @oobbuf: buffer to store OOB data in or get OOB data from 107 | * @mode: one of the %MTD_OPS_XXX mode 108 | * 109 | * This object is used to pass per-page I/O requests to NAND sub-layers. This 110 | * way all useful information are already formatted in a useful way and 111 | * specific NAND layers can focus on translating these information into 112 | * specific commands/operations. 113 | */ 114 | struct nand_page_io_req { 115 | struct nand_pos pos; 116 | unsigned int dataoffs; 117 | unsigned int datalen; 118 | union { 119 | const void *out; 120 | void *in; 121 | } databuf; 122 | unsigned int ooboffs; 123 | unsigned int ooblen; 124 | union { 125 | const void *out; 126 | void *in; 127 | } oobbuf; 128 | }; 129 | 130 | /** 131 | * struct nand_ecc_props - NAND ECC properties 132 | * @strength: ECC strength 133 | * @step_size: Number of bytes per step 134 | */ 135 | struct nand_ecc_props { 136 | unsigned int strength; 137 | unsigned int step_size; 138 | }; 139 | 140 | #define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) } 141 | 142 | /** 143 | * struct nand_device - NAND device 144 | * @memorg: memory layout 145 | * @eccreq: ECC requirements 146 | * @rowconv: position to row address converter 147 | * 148 | * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND) 149 | * should declare their own NAND object embedding a nand_device struct (that's 150 | * how inheritance is done). 151 | * struct_nand_device->memorg and struct_nand_device->eccreq should be filled 152 | * at device detection time to reflect the NAND device 153 | */ 154 | struct nand_device { 155 | struct nand_memory_organization memorg; 156 | struct nand_ecc_props eccreq; 157 | struct nand_row_converter rowconv; 158 | }; 159 | 160 | /** 161 | * struct nand_io_iter - NAND I/O iterator 162 | * @req: current I/O request 163 | * @oobbytes_per_page: maximum number of OOB bytes per page 164 | * @dataleft: remaining number of data bytes to read/write 165 | * @oobleft: remaining number of OOB bytes to read/write 166 | * 167 | * Can be used by specialized NAND layers to iterate over all pages covered 168 | * by an MTD I/O request, which should greatly simplifies the boiler-plate 169 | * code needed to read/write data from/to a NAND device. 170 | */ 171 | struct nand_io_iter { 172 | struct nand_page_io_req req; 173 | unsigned int oobbytes_per_page; 174 | unsigned int dataleft; 175 | unsigned int oobleft; 176 | }; 177 | 178 | /* 179 | * nanddev_bits_per_cell() - Get the number of bits per cell 180 | * @nand: NAND device 181 | * 182 | * Return: the number of bits per cell. 183 | */ 184 | static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand) 185 | { 186 | return nand->memorg.bits_per_cell; 187 | } 188 | 189 | /** 190 | * nanddev_page_size() - Get NAND page size 191 | * @nand: NAND device 192 | * 193 | * Return: the page size. 194 | */ 195 | static inline size_t nanddev_page_size(const struct nand_device *nand) 196 | { 197 | return nand->memorg.pagesize; 198 | } 199 | 200 | /** 201 | * nanddev_per_page_oobsize() - Get NAND OOB size 202 | * @nand: NAND device 203 | * 204 | * Return: the OOB size. 205 | */ 206 | static inline unsigned int 207 | nanddev_per_page_oobsize(const struct nand_device *nand) 208 | { 209 | return nand->memorg.oobsize; 210 | } 211 | 212 | /** 213 | * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock 214 | * @nand: NAND device 215 | * 216 | * Return: the number of pages per eraseblock. 217 | */ 218 | static inline unsigned int 219 | nanddev_pages_per_eraseblock(const struct nand_device *nand) 220 | { 221 | return nand->memorg.pages_per_eraseblock; 222 | } 223 | 224 | /** 225 | * nanddev_pages_per_target() - Get the number of pages per target 226 | * @nand: NAND device 227 | * 228 | * Return: the number of pages per target. 229 | */ 230 | static inline unsigned int 231 | nanddev_pages_per_target(const struct nand_device *nand) 232 | { 233 | return nand->memorg.pages_per_eraseblock * 234 | nand->memorg.eraseblocks_per_lun * 235 | nand->memorg.luns_per_target; 236 | } 237 | 238 | /** 239 | * nanddev_per_page_oobsize() - Get NAND erase block size 240 | * @nand: NAND device 241 | * 242 | * Return: the eraseblock size. 243 | */ 244 | static inline size_t nanddev_eraseblock_size(const struct nand_device *nand) 245 | { 246 | return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock; 247 | } 248 | 249 | /** 250 | * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN 251 | * @nand: NAND device 252 | * 253 | * Return: the number of eraseblocks per LUN. 254 | */ 255 | static inline unsigned int 256 | nanddev_eraseblocks_per_lun(const struct nand_device *nand) 257 | { 258 | return nand->memorg.eraseblocks_per_lun; 259 | } 260 | 261 | /** 262 | * nanddev_eraseblocks_per_target() - Get the number of eraseblocks per target 263 | * @nand: NAND device 264 | * 265 | * Return: the number of eraseblocks per target. 266 | */ 267 | static inline unsigned int 268 | nanddev_eraseblocks_per_target(const struct nand_device *nand) 269 | { 270 | return nand->memorg.eraseblocks_per_lun * nand->memorg.luns_per_target; 271 | } 272 | 273 | /** 274 | * nanddev_target_size() - Get the total size provided by a single target/die 275 | * @nand: NAND device 276 | * 277 | * Return: the total size exposed by a single target/die in bytes. 278 | */ 279 | static inline u64 nanddev_target_size(const struct nand_device *nand) 280 | { 281 | return (u64)nand->memorg.luns_per_target * 282 | nand->memorg.eraseblocks_per_lun * 283 | nand->memorg.pages_per_eraseblock * 284 | nand->memorg.pagesize; 285 | } 286 | 287 | /** 288 | * nanddev_ntarget() - Get the total of targets 289 | * @nand: NAND device 290 | * 291 | * Return: the number of targets/dies exposed by @nand. 292 | */ 293 | static inline unsigned int nanddev_ntargets(const struct nand_device *nand) 294 | { 295 | return nand->memorg.ntargets; 296 | } 297 | 298 | /** 299 | * nanddev_neraseblocks() - Get the total number of eraseblocks 300 | * @nand: NAND device 301 | * 302 | * Return: the total number of eraseblocks exposed by @nand. 303 | */ 304 | static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand) 305 | { 306 | return nand->memorg.ntargets * nand->memorg.luns_per_target * 307 | nand->memorg.eraseblocks_per_lun; 308 | } 309 | 310 | /** 311 | * nanddev_size() - Get NAND size 312 | * @nand: NAND device 313 | * 314 | * Return: the total size (in bytes) exposed by @nand. 315 | */ 316 | static inline u64 nanddev_size(const struct nand_device *nand) 317 | { 318 | return nanddev_target_size(nand) * nanddev_ntargets(nand); 319 | } 320 | 321 | /** 322 | * nanddev_get_memorg() - Extract memory organization info from a NAND device 323 | * @nand: NAND device 324 | * 325 | * This can be used by the upper layer to fill the memorg info before calling 326 | * nanddev_init(). 327 | * 328 | * Return: the memorg object embedded in the NAND device. 329 | */ 330 | static inline struct nand_memory_organization * 331 | nanddev_get_memorg(struct nand_device *nand) 332 | { 333 | return &nand->memorg; 334 | } 335 | 336 | /** 337 | * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position 338 | * @nand: NAND device 339 | * @offs: absolute NAND offset (usually passed by the MTD layer) 340 | * @pos: a NAND position object to fill in 341 | * 342 | * Converts @offs into a nand_pos representation. 343 | * 344 | * Return: the offset within the NAND page pointed by @pos. 345 | */ 346 | static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand, 347 | loff_t offs, 348 | struct nand_pos *pos) 349 | { 350 | unsigned int pageoffs; 351 | u64 tmp = offs; 352 | 353 | pageoffs = do_div(tmp, nand->memorg.pagesize); 354 | pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock); 355 | pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun); 356 | pos->plane = pos->eraseblock % nand->memorg.planes_per_lun; 357 | pos->lun = do_div(tmp, nand->memorg.luns_per_target); 358 | pos->target = tmp; 359 | 360 | return pageoffs; 361 | } 362 | 363 | /** 364 | * nanddev_pos_cmp() - Compare two NAND positions 365 | * @a: First NAND position 366 | * @b: Second NAND position 367 | * 368 | * Compares two NAND positions. 369 | * 370 | * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b. 371 | */ 372 | static inline int nanddev_pos_cmp(const struct nand_pos *a, 373 | const struct nand_pos *b) 374 | { 375 | if (a->target != b->target) 376 | return a->target < b->target ? -1 : 1; 377 | 378 | if (a->lun != b->lun) 379 | return a->lun < b->lun ? -1 : 1; 380 | 381 | if (a->eraseblock != b->eraseblock) 382 | return a->eraseblock < b->eraseblock ? -1 : 1; 383 | 384 | if (a->page != b->page) 385 | return a->page < b->page ? -1 : 1; 386 | 387 | return 0; 388 | } 389 | 390 | /** 391 | * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset 392 | * @nand: NAND device 393 | * @pos: the NAND position to convert 394 | * 395 | * Converts @pos NAND position into an absolute offset. 396 | * 397 | * Return: the absolute offset. Note that @pos points to the beginning of a 398 | * page, if one wants to point to a specific offset within this page 399 | * the returned offset has to be adjusted manually. 400 | */ 401 | static inline loff_t nanddev_pos_to_offs(struct nand_device *nand, 402 | const struct nand_pos *pos) 403 | { 404 | unsigned int npages; 405 | 406 | npages = pos->page + 407 | ((pos->eraseblock + 408 | (pos->lun + 409 | (pos->target * nand->memorg.luns_per_target)) * 410 | nand->memorg.eraseblocks_per_lun) * 411 | nand->memorg.pages_per_eraseblock); 412 | 413 | return (loff_t)npages * nand->memorg.pagesize; 414 | } 415 | 416 | /** 417 | * nanddev_pos_to_row() - Extract a row address from a NAND position 418 | * @nand: NAND device 419 | * @pos: the position to convert 420 | * 421 | * Converts a NAND position into a row address that can then be passed to the 422 | * device. 423 | * 424 | * Return: the row address extracted from @pos. 425 | */ 426 | static inline unsigned int nanddev_pos_to_row(struct nand_device *nand, 427 | const struct nand_pos *pos) 428 | { 429 | return (pos->lun << nand->rowconv.lun_addr_shift) | 430 | (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) | 431 | pos->page; 432 | } 433 | 434 | /** 435 | * nanddev_pos_next_target() - Move a position to the next target/die 436 | * @nand: NAND device 437 | * @pos: the position to update 438 | * 439 | * Updates @pos to point to the start of the next target/die. Useful when you 440 | * want to iterate over all targets/dies of a NAND device. 441 | */ 442 | static inline void nanddev_pos_next_target(struct nand_device *nand, 443 | struct nand_pos *pos) 444 | { 445 | pos->page = 0; 446 | pos->plane = 0; 447 | pos->eraseblock = 0; 448 | pos->lun = 0; 449 | pos->target++; 450 | } 451 | 452 | /** 453 | * nanddev_pos_next_lun() - Move a position to the next LUN 454 | * @nand: NAND device 455 | * @pos: the position to update 456 | * 457 | * Updates @pos to point to the start of the next LUN. Useful when you want to 458 | * iterate over all LUNs of a NAND device. 459 | */ 460 | static inline void nanddev_pos_next_lun(struct nand_device *nand, 461 | struct nand_pos *pos) 462 | { 463 | if (pos->lun >= nand->memorg.luns_per_target - 1) 464 | return nanddev_pos_next_target(nand, pos); 465 | 466 | pos->lun++; 467 | pos->page = 0; 468 | pos->plane = 0; 469 | pos->eraseblock = 0; 470 | } 471 | 472 | /** 473 | * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock 474 | * @nand: NAND device 475 | * @pos: the position to update 476 | * 477 | * Updates @pos to point to the start of the next eraseblock. Useful when you 478 | * want to iterate over all eraseblocks of a NAND device. 479 | */ 480 | static inline void nanddev_pos_next_eraseblock(struct nand_device *nand, 481 | struct nand_pos *pos) 482 | { 483 | if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1) 484 | return nanddev_pos_next_lun(nand, pos); 485 | 486 | pos->eraseblock++; 487 | pos->page = 0; 488 | pos->plane = pos->eraseblock % nand->memorg.planes_per_lun; 489 | } 490 | 491 | /** 492 | * nanddev_pos_next_page() - Move a position to the next page 493 | * @nand: NAND device 494 | * @pos: the position to update 495 | * 496 | * Updates @pos to point to the start of the next page. Useful when you want to 497 | * iterate over all pages of a NAND device. 498 | */ 499 | static inline void nanddev_pos_next_page(struct nand_device *nand, 500 | struct nand_pos *pos) 501 | { 502 | if (pos->page >= nand->memorg.pages_per_eraseblock - 1) 503 | return nanddev_pos_next_eraseblock(nand, pos); 504 | 505 | pos->page++; 506 | } 507 | 508 | static __always_inline int fls(int x) 509 | { 510 | int r = 32; 511 | 512 | if (!x) 513 | return 0; 514 | if (!(x & 0xffff0000u)) { 515 | x <<= 16; 516 | r -= 16; 517 | } 518 | if (!(x & 0xff000000u)) { 519 | x <<= 8; 520 | r -= 8; 521 | } 522 | if (!(x & 0xf0000000u)) { 523 | x <<= 4; 524 | r -= 4; 525 | } 526 | if (!(x & 0xc0000000u)) { 527 | x <<= 2; 528 | r -= 2; 529 | } 530 | if (!(x & 0x80000000u)) { 531 | x <<= 1; 532 | r -= 1; 533 | } 534 | return r; 535 | } 536 | 537 | static inline int nanddev_init(struct nand_device *nand) 538 | { 539 | struct nand_memory_organization *memorg = nanddev_get_memorg(nand); 540 | 541 | if (!nand) 542 | return -EINVAL; 543 | 544 | if (!memorg->bits_per_cell || !memorg->pagesize || 545 | !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun || 546 | !memorg->planes_per_lun || !memorg->luns_per_target || 547 | !memorg->ntargets) 548 | return -EINVAL; 549 | 550 | nand->rowconv.eraseblock_addr_shift = 551 | fls(memorg->pages_per_eraseblock - 1); 552 | nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) + 553 | nand->rowconv.eraseblock_addr_shift; 554 | return 0; 555 | } 556 | #endif /* __LINUX_MTD_NAND_H */ 557 | -------------------------------------------------------------------------------- /include/serprog.h: -------------------------------------------------------------------------------- 1 | /* According to Serial Flasher Protocol Specification - version 1 */ 2 | #define S_ACK 0x06 3 | #define S_NAK 0x15 4 | #define S_CMD_NOP 0x00 /* No operation */ 5 | #define S_CMD_Q_IFACE 0x01 /* Query interface version */ 6 | #define S_CMD_Q_CMDMAP 0x02 /* Query supported commands bitmap */ 7 | #define S_CMD_Q_PGMNAME 0x03 /* Query programmer name */ 8 | #define S_CMD_Q_SERBUF 0x04 /* Query Serial Buffer Size */ 9 | #define S_CMD_Q_BUSTYPE 0x05 /* Query supported bustypes */ 10 | #define S_CMD_Q_CHIPSIZE 0x06 /* Query supported chipsize (2^n format) */ 11 | #define S_CMD_Q_OPBUF 0x07 /* Query operation buffer size */ 12 | #define S_CMD_Q_WRNMAXLEN 0x08 /* Query Write to opbuf: Write-N maximum length */ 13 | #define S_CMD_R_BYTE 0x09 /* Read a single byte */ 14 | #define S_CMD_R_NBYTES 0x0A /* Read n bytes */ 15 | #define S_CMD_O_INIT 0x0B /* Initialize operation buffer */ 16 | #define S_CMD_O_WRITEB 0x0C /* Write opbuf: Write byte with address */ 17 | #define S_CMD_O_WRITEN 0x0D /* Write to opbuf: Write-N */ 18 | #define S_CMD_O_DELAY 0x0E /* Write opbuf: udelay */ 19 | #define S_CMD_O_EXEC 0x0F /* Execute operation buffer */ 20 | #define S_CMD_SYNCNOP 0x10 /* Special no-operation that returns NAK+ACK */ 21 | #define S_CMD_Q_RDNMAXLEN 0x11 /* Query read-n maximum length */ 22 | #define S_CMD_S_BUSTYPE 0x12 /* Set used bustype(s). */ 23 | #define S_CMD_O_SPIOP 0x13 /* Perform SPI operation. */ 24 | #define S_CMD_S_SPI_FREQ 0x14 /* Set SPI clock frequency */ 25 | #define S_CMD_S_PIN_STATE 0x15 /* Enable/disable output drivers */ 26 | -------------------------------------------------------------------------------- /include/spi-mem-drvs.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | struct spi_mem *spi_mem_probe(const char *drv, const char *drvarg); 5 | void spi_mem_remove(const char *drv, struct spi_mem *mem); 6 | struct spi_mem *fx2qspi_probe(); 7 | void fx2qspi_remove(struct spi_mem *mem); 8 | struct spi_mem *serprog_probe(const char *devpath); 9 | void serprog_remove(struct spi_mem *mem); 10 | struct spi_mem *ch347_probe(); 11 | void ch347_remove(struct spi_mem *mem); 12 | -------------------------------------------------------------------------------- /include/spi-mem.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0+ */ 2 | /* 3 | * This is based on include/linux/spi/spi-mem.h in Linux 4 | * Original file header: 5 | * 6 | * Copyright (C) 2018 Exceet Electronics GmbH 7 | * Copyright (C) 2018 Bootlin 8 | * 9 | * Author: 10 | * Peter Pan 11 | * Boris Brezillon 12 | */ 13 | 14 | #ifndef __LINUX_SPI_MEM_H 15 | #define __LINUX_SPI_MEM_H 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | typedef long ssize_t; 22 | struct spi_controller_mem_ops; 23 | 24 | #define SPI_MEM_OP_CMD(__opcode, __buswidth) \ 25 | { \ 26 | .buswidth = __buswidth, \ 27 | .opcode = __opcode, \ 28 | } 29 | 30 | #define SPI_MEM_OP_ADDR(__nbytes, __val, __buswidth) \ 31 | { \ 32 | .nbytes = __nbytes, \ 33 | .val = __val, \ 34 | .buswidth = __buswidth, \ 35 | } 36 | 37 | #define SPI_MEM_OP_NO_ADDR { } 38 | 39 | #define SPI_MEM_OP_DUMMY(__nbytes, __buswidth) \ 40 | { \ 41 | .nbytes = __nbytes, \ 42 | .buswidth = __buswidth, \ 43 | } 44 | 45 | #define SPI_MEM_OP_NO_DUMMY { } 46 | 47 | #define SPI_MEM_OP_DATA_IN(__nbytes, __buf, __buswidth) \ 48 | { \ 49 | .dir = SPI_MEM_DATA_IN, \ 50 | .nbytes = __nbytes, \ 51 | .buf.in = __buf, \ 52 | .buswidth = __buswidth, \ 53 | } 54 | 55 | #define SPI_MEM_OP_DATA_OUT(__nbytes, __buf, __buswidth) \ 56 | { \ 57 | .dir = SPI_MEM_DATA_OUT, \ 58 | .nbytes = __nbytes, \ 59 | .buf.out = __buf, \ 60 | .buswidth = __buswidth, \ 61 | } 62 | 63 | #define SPI_MEM_OP_NO_DATA { } 64 | 65 | /** 66 | * enum spi_mem_data_dir - describes the direction of a SPI memory data 67 | * transfer from the controller perspective 68 | * @SPI_MEM_NO_DATA: no data transferred 69 | * @SPI_MEM_DATA_IN: data coming from the SPI memory 70 | * @SPI_MEM_DATA_OUT: data sent to the SPI memory 71 | */ 72 | enum spi_mem_data_dir { 73 | SPI_MEM_NO_DATA, 74 | SPI_MEM_DATA_IN, 75 | SPI_MEM_DATA_OUT, 76 | }; 77 | 78 | /** 79 | * struct spi_mem_op - describes a SPI memory operation 80 | * @cmd.buswidth: number of IO lines used to transmit the command 81 | * @cmd.opcode: operation opcode 82 | * @addr.nbytes: number of address bytes to send. Can be zero if the operation 83 | * does not need to send an address 84 | * @addr.buswidth: number of IO lines used to transmit the address cycles 85 | * @addr.val: address value. This value is always sent MSB first on the bus. 86 | * Note that only @addr.nbytes are taken into account in this 87 | * address value, so users should make sure the value fits in the 88 | * assigned number of bytes. 89 | * @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can 90 | * be zero if the operation does not require dummy bytes 91 | * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes 92 | * @data.buswidth: number of IO lanes used to send/receive the data 93 | * @data.dir: direction of the transfer 94 | * @data.nbytes: number of data bytes to send/receive. Can be zero if the 95 | * operation does not involve transferring data 96 | * @data.buf.in: input buffer (must be DMA-able) 97 | * @data.buf.out: output buffer (must be DMA-able) 98 | */ 99 | struct spi_mem_op { 100 | struct { 101 | u8 buswidth; 102 | u8 opcode; 103 | } cmd; 104 | 105 | struct { 106 | u8 nbytes; 107 | u8 buswidth; 108 | u64 val; 109 | } addr; 110 | 111 | struct { 112 | u8 nbytes; 113 | u8 buswidth; 114 | } dummy; 115 | 116 | struct { 117 | u8 buswidth; 118 | enum spi_mem_data_dir dir; 119 | unsigned int nbytes; 120 | union { 121 | void *in; 122 | const void *out; 123 | } buf; 124 | } data; 125 | }; 126 | 127 | #define SPI_MEM_OP(__cmd, __addr, __dummy, __data) \ 128 | { \ 129 | .cmd = __cmd, \ 130 | .addr = __addr, \ 131 | .dummy = __dummy, \ 132 | .data = __data, \ 133 | } 134 | 135 | /** 136 | * struct spi_mem_dirmap_info - Direct mapping information 137 | * @op_tmpl: operation template that should be used by the direct mapping when 138 | * the memory device is accessed 139 | * @offset: absolute offset this direct mapping is pointing to 140 | * @length: length in byte of this direct mapping 141 | * 142 | * These information are used by the controller specific implementation to know 143 | * the portion of memory that is directly mapped and the spi_mem_op that should 144 | * be used to access the device. 145 | * A direct mapping is only valid for one direction (read or write) and this 146 | * direction is directly encoded in the ->op_tmpl.data.dir field. 147 | */ 148 | struct spi_mem_dirmap_info { 149 | struct spi_mem_op op_tmpl; 150 | u64 offset; 151 | u64 length; 152 | }; 153 | 154 | /** 155 | * struct spi_mem_dirmap_desc - Direct mapping descriptor 156 | * @mem: the SPI memory device this direct mapping is attached to 157 | * @info: information passed at direct mapping creation time 158 | * @nodirmap: set to 1 if the SPI controller does not implement 159 | * ->mem_ops->dirmap_create() or when this function returned an 160 | * error. If @nodirmap is true, all spi_mem_dirmap_{read,write}() 161 | * calls will use spi_mem_exec_op() to access the memory. This is a 162 | * degraded mode that allows spi_mem drivers to use the same code 163 | * no matter whether the controller supports direct mapping or not 164 | * @priv: field pointing to controller specific data 165 | * 166 | * Common part of a direct mapping descriptor. This object is created by 167 | * spi_mem_dirmap_create() and controller implementation of ->create_dirmap() 168 | * can create/attach direct mapping resources to the descriptor in the ->priv 169 | * field. 170 | */ 171 | struct spi_mem_dirmap_desc { 172 | struct spi_mem *mem; 173 | struct spi_mem_dirmap_info info; 174 | unsigned int nodirmap; 175 | void *priv; 176 | }; 177 | 178 | /** 179 | * struct spi_mem - describes a SPI memory device 180 | * @spi: the underlying SPI device 181 | * @drvpriv: spi_mem_driver private data 182 | * @name: name of the SPI memory device 183 | * 184 | * Extra information that describe the SPI memory device and may be needed by 185 | * the controller to properly handle this device should be placed here. 186 | * 187 | * One example would be the device size since some controller expose their SPI 188 | * mem devices through a io-mapped region. 189 | */ 190 | struct spi_mem { 191 | const struct spi_controller_mem_ops *ops; 192 | u32 spi_mode; 193 | void *drvpriv; 194 | const char *name; 195 | }; 196 | 197 | /** 198 | * struct spi_mem_set_drvdata() - attach driver private data to a SPI mem 199 | * device 200 | * @mem: memory device 201 | * @data: data to attach to the memory device 202 | */ 203 | static inline void spi_mem_set_drvdata(struct spi_mem *mem, void *data) 204 | { 205 | mem->drvpriv = data; 206 | } 207 | 208 | /** 209 | * struct spi_mem_get_drvdata() - get driver private data attached to a SPI mem 210 | * device 211 | * @mem: memory device 212 | * 213 | * Return: the data attached to the mem device. 214 | */ 215 | static inline void *spi_mem_get_drvdata(struct spi_mem *mem) 216 | { 217 | return mem->drvpriv; 218 | } 219 | 220 | /** 221 | * spi_mem_get_name() - Return the SPI mem device name to be used by the 222 | * upper layer if necessary 223 | * @mem: the SPI memory 224 | * 225 | * This function allows SPI mem users to retrieve the SPI mem device name. 226 | * It is useful if the upper layer needs to expose a custom name for 227 | * compatibility reasons. 228 | * 229 | * Return: a string containing the name of the memory device to be used 230 | * by the SPI mem user 231 | */ 232 | static inline const char *spi_mem_get_name(struct spi_mem *mem) 233 | { 234 | return mem->name ?: "spi-mem"; 235 | } 236 | 237 | /** 238 | * struct spi_controller_mem_ops - SPI memory operations 239 | * @adjust_op_size: shrink the data xfer of an operation to match controller's 240 | * limitations (can be alignment of max RX/TX size 241 | * limitations) 242 | * @supports_op: check if an operation is supported by the controller 243 | * @exec_op: execute a SPI memory operation 244 | * @get_name: get a custom name for the SPI mem device from the controller. 245 | * This might be needed if the controller driver has been ported 246 | * to use the SPI mem layer and a custom name is used to keep 247 | * mtdparts compatible. 248 | * Note that if the implementation of this function allocates memory 249 | * dynamically, then it should do so with devm_xxx(), as we don't 250 | * have a ->free_name() function. 251 | * @dirmap_create: create a direct mapping descriptor that can later be used to 252 | * access the memory device. This method is optional 253 | * @dirmap_destroy: destroy a memory descriptor previous created by 254 | * ->dirmap_create() 255 | * @dirmap_read: read data from the memory device using the direct mapping 256 | * created by ->dirmap_create(). The function can return less 257 | * data than requested (for example when the request is crossing 258 | * the currently mapped area), and the caller of 259 | * spi_mem_dirmap_read() is responsible for calling it again in 260 | * this case. 261 | * @dirmap_write: write data to the memory device using the direct mapping 262 | * created by ->dirmap_create(). The function can return less 263 | * data than requested (for example when the request is crossing 264 | * the currently mapped area), and the caller of 265 | * spi_mem_dirmap_write() is responsible for calling it again in 266 | * this case. 267 | * 268 | * This interface should be implemented by SPI controllers providing an 269 | * high-level interface to execute SPI memory operation, which is usually the 270 | * case for QSPI controllers. 271 | * 272 | * Note on ->dirmap_{read,write}(): drivers should avoid accessing the direct 273 | * mapping from the CPU because doing that can stall the CPU waiting for the 274 | * SPI mem transaction to finish, and this will make real-time maintainers 275 | * unhappy and might make your system less reactive. Instead, drivers should 276 | * use DMA to access this direct mapping. 277 | */ 278 | struct spi_controller_mem_ops { 279 | int (*adjust_op_size)(struct spi_mem *mem, struct spi_mem_op *op); 280 | bool (*supports_op)(struct spi_mem *mem, 281 | const struct spi_mem_op *op); 282 | int (*exec_op)(struct spi_mem *mem, 283 | const struct spi_mem_op *op); 284 | const char *(*get_name)(struct spi_mem *mem); 285 | int (*dirmap_create)(struct spi_mem_dirmap_desc *desc); 286 | void (*dirmap_destroy)(struct spi_mem_dirmap_desc *desc); 287 | ssize_t (*dirmap_read)(struct spi_mem_dirmap_desc *desc, 288 | u64 offs, size_t len, void *buf); 289 | ssize_t (*dirmap_write)(struct spi_mem_dirmap_desc *desc, 290 | u64 offs, size_t len, const void *buf); 291 | }; 292 | 293 | bool spi_mem_default_supports_op(struct spi_mem *mem, 294 | const struct spi_mem_op *op); 295 | 296 | int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op); 297 | 298 | bool spi_mem_supports_op(struct spi_mem *mem, 299 | const struct spi_mem_op *op); 300 | 301 | int spi_mem_exec_op(struct spi_mem *mem, 302 | const struct spi_mem_op *op); 303 | 304 | 305 | 306 | struct spi_mem_dirmap_desc * 307 | spi_mem_dirmap_create(struct spi_mem *mem, 308 | const struct spi_mem_dirmap_info *info); 309 | void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc); 310 | ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, 311 | u64 offs, size_t len, void *buf); 312 | ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, 313 | u64 offs, size_t len, const void *buf); 314 | #endif 315 | -------------------------------------------------------------------------------- /include/spi.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #define SPI_TX_DUAL 0x100 /* transmit with 2 wires */ 4 | #define SPI_TX_QUAD 0x200 /* transmit with 4 wires */ 5 | #define SPI_RX_DUAL 0x400 /* receive with 2 wires */ 6 | #define SPI_RX_QUAD 0x800 /* receive with 4 wires */ 7 | #define SPI_TX_OCTAL 0x2000 /* transmit with 8 wires */ 8 | #define SPI_RX_OCTAL 0x4000 /* receive with 8 wires */ -------------------------------------------------------------------------------- /include/spinand.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | /* 3 | * Copyright (c) 2016-2017 Micron Technology, Inc. 4 | * 5 | * Authors: 6 | * Peter Pan 7 | */ 8 | #ifndef __LINUX_MTD_SPINAND_H 9 | #define __LINUX_MTD_SPINAND_H 10 | 11 | #include 12 | #include 13 | #include 14 | 15 | /** 16 | * Standard SPI NAND flash operations 17 | */ 18 | 19 | #define SPINAND_RESET_OP \ 20 | SPI_MEM_OP(SPI_MEM_OP_CMD(0xff, 1), \ 21 | SPI_MEM_OP_NO_ADDR, \ 22 | SPI_MEM_OP_NO_DUMMY, \ 23 | SPI_MEM_OP_NO_DATA) 24 | 25 | #define SPINAND_WR_EN_DIS_OP(enable) \ 26 | SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1), \ 27 | SPI_MEM_OP_NO_ADDR, \ 28 | SPI_MEM_OP_NO_DUMMY, \ 29 | SPI_MEM_OP_NO_DATA) 30 | 31 | #define SPINAND_READID_OP(naddr, ndummy, buf, len) \ 32 | SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \ 33 | SPI_MEM_OP_ADDR(naddr, 0, 1), \ 34 | SPI_MEM_OP_DUMMY(ndummy, 1), \ 35 | SPI_MEM_OP_DATA_IN(len, buf, 1)) 36 | 37 | #define SPINAND_SET_FEATURE_OP(reg, valptr) \ 38 | SPI_MEM_OP(SPI_MEM_OP_CMD(0x1f, 1), \ 39 | SPI_MEM_OP_ADDR(1, reg, 1), \ 40 | SPI_MEM_OP_NO_DUMMY, \ 41 | SPI_MEM_OP_DATA_OUT(1, valptr, 1)) 42 | 43 | #define SPINAND_GET_FEATURE_OP(reg, valptr) \ 44 | SPI_MEM_OP(SPI_MEM_OP_CMD(0x0f, 1), \ 45 | SPI_MEM_OP_ADDR(1, reg, 1), \ 46 | SPI_MEM_OP_NO_DUMMY, \ 47 | SPI_MEM_OP_DATA_IN(1, valptr, 1)) 48 | 49 | #define SPINAND_BLK_ERASE_OP(addr) \ 50 | SPI_MEM_OP(SPI_MEM_OP_CMD(0xd8, 1), \ 51 | SPI_MEM_OP_ADDR(3, addr, 1), \ 52 | SPI_MEM_OP_NO_DUMMY, \ 53 | SPI_MEM_OP_NO_DATA) 54 | 55 | #define SPINAND_PAGE_READ_OP(addr) \ 56 | SPI_MEM_OP(SPI_MEM_OP_CMD(0x13, 1), \ 57 | SPI_MEM_OP_ADDR(3, addr, 1), \ 58 | SPI_MEM_OP_NO_DUMMY, \ 59 | SPI_MEM_OP_NO_DATA) 60 | 61 | #define SPINAND_PAGE_READ_FROM_CACHE_OP(fast, addr, ndummy, buf, len) \ 62 | SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \ 63 | SPI_MEM_OP_ADDR(2, addr, 1), \ 64 | SPI_MEM_OP_DUMMY(ndummy, 1), \ 65 | SPI_MEM_OP_DATA_IN(len, buf, 1)) 66 | 67 | #define SPINAND_PAGE_READ_FROM_CACHE_OP_3A(fast, addr, ndummy, buf, len) \ 68 | SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \ 69 | SPI_MEM_OP_ADDR(3, addr, 1), \ 70 | SPI_MEM_OP_DUMMY(ndummy, 1), \ 71 | SPI_MEM_OP_DATA_IN(len, buf, 1)) 72 | 73 | #define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \ 74 | SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \ 75 | SPI_MEM_OP_ADDR(2, addr, 1), \ 76 | SPI_MEM_OP_DUMMY(ndummy, 1), \ 77 | SPI_MEM_OP_DATA_IN(len, buf, 2)) 78 | 79 | #define SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(addr, ndummy, buf, len) \ 80 | SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \ 81 | SPI_MEM_OP_ADDR(3, addr, 1), \ 82 | SPI_MEM_OP_DUMMY(ndummy, 1), \ 83 | SPI_MEM_OP_DATA_IN(len, buf, 2)) 84 | 85 | #define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \ 86 | SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \ 87 | SPI_MEM_OP_ADDR(2, addr, 1), \ 88 | SPI_MEM_OP_DUMMY(ndummy, 1), \ 89 | SPI_MEM_OP_DATA_IN(len, buf, 4)) 90 | 91 | #define SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(addr, ndummy, buf, len) \ 92 | SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \ 93 | SPI_MEM_OP_ADDR(3, addr, 1), \ 94 | SPI_MEM_OP_DUMMY(ndummy, 1), \ 95 | SPI_MEM_OP_DATA_IN(len, buf, 4)) 96 | 97 | #define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \ 98 | SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \ 99 | SPI_MEM_OP_ADDR(2, addr, 2), \ 100 | SPI_MEM_OP_DUMMY(ndummy, 2), \ 101 | SPI_MEM_OP_DATA_IN(len, buf, 2)) 102 | 103 | #define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP_3A(addr, ndummy, buf, len) \ 104 | SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \ 105 | SPI_MEM_OP_ADDR(3, addr, 2), \ 106 | SPI_MEM_OP_DUMMY(ndummy, 2), \ 107 | SPI_MEM_OP_DATA_IN(len, buf, 2)) 108 | 109 | #define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \ 110 | SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \ 111 | SPI_MEM_OP_ADDR(2, addr, 4), \ 112 | SPI_MEM_OP_DUMMY(ndummy, 4), \ 113 | SPI_MEM_OP_DATA_IN(len, buf, 4)) 114 | 115 | #define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP_3A(addr, ndummy, buf, len) \ 116 | SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \ 117 | SPI_MEM_OP_ADDR(3, addr, 4), \ 118 | SPI_MEM_OP_DUMMY(ndummy, 4), \ 119 | SPI_MEM_OP_DATA_IN(len, buf, 4)) 120 | 121 | #define SPINAND_PROG_EXEC_OP(addr) \ 122 | SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \ 123 | SPI_MEM_OP_ADDR(3, addr, 1), \ 124 | SPI_MEM_OP_NO_DUMMY, \ 125 | SPI_MEM_OP_NO_DATA) 126 | 127 | #define SPINAND_PROG_LOAD(reset, addr, buf, len) \ 128 | SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x02 : 0x84, 1), \ 129 | SPI_MEM_OP_ADDR(2, addr, 1), \ 130 | SPI_MEM_OP_NO_DUMMY, \ 131 | SPI_MEM_OP_DATA_OUT(len, buf, 1)) 132 | 133 | #define SPINAND_PROG_LOAD_X4(reset, addr, buf, len) \ 134 | SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x32 : 0x34, 1), \ 135 | SPI_MEM_OP_ADDR(2, addr, 1), \ 136 | SPI_MEM_OP_NO_DUMMY, \ 137 | SPI_MEM_OP_DATA_OUT(len, buf, 4)) 138 | 139 | /** 140 | * Standard SPI NAND flash commands 141 | */ 142 | #define SPINAND_CMD_PROG_LOAD_X4 0x32 143 | #define SPINAND_CMD_PROG_LOAD_RDM_DATA_X4 0x34 144 | 145 | /* feature register */ 146 | #define REG_BLOCK_LOCK 0xa0 147 | #define BL_ALL_UNLOCKED 0x00 148 | 149 | /* configuration register */ 150 | #define REG_CFG 0xb0 151 | #define CFG_OTP_ENABLE BIT(6) 152 | #define CFG_ECC_ENABLE BIT(4) 153 | #define CFG_QUAD_ENABLE BIT(0) 154 | 155 | /* status register */ 156 | #define REG_STATUS 0xc0 157 | #define STATUS_BUSY BIT(0) 158 | #define STATUS_ERASE_FAILED BIT(2) 159 | #define STATUS_PROG_FAILED BIT(3) 160 | #define STATUS_ECC_MASK GENMASK(5, 4) 161 | #define STATUS_ECC_NO_BITFLIPS (0 << 4) 162 | #define STATUS_ECC_HAS_BITFLIPS (1 << 4) 163 | #define STATUS_ECC_UNCOR_ERROR (2 << 4) 164 | 165 | struct spinand_op; 166 | struct spinand_device; 167 | 168 | #define SPINAND_MAX_ID_LEN 4 169 | 170 | /** 171 | * struct spinand_id - SPI NAND id structure 172 | * @data: buffer containing the id bytes. Currently 4 bytes large, but can 173 | * be extended if required 174 | * @len: ID length 175 | */ 176 | struct spinand_id { 177 | u8 data[SPINAND_MAX_ID_LEN]; 178 | int len; 179 | }; 180 | 181 | enum spinand_readid_method { 182 | SPINAND_READID_METHOD_OPCODE, 183 | SPINAND_READID_METHOD_OPCODE_ADDR, 184 | SPINAND_READID_METHOD_OPCODE_DUMMY, 185 | }; 186 | 187 | /** 188 | * struct spinand_devid - SPI NAND device id structure 189 | * @id: device id of current chip 190 | * @len: number of bytes in device id 191 | * @method: method to read chip id 192 | * There are 3 possible variants: 193 | * SPINAND_READID_METHOD_OPCODE: chip id is returned immediately 194 | * after read_id opcode. 195 | * SPINAND_READID_METHOD_OPCODE_ADDR: chip id is returned after 196 | * read_id opcode + 1-byte address. 197 | * SPINAND_READID_METHOD_OPCODE_DUMMY: chip id is returned after 198 | * read_id opcode + 1 dummy byte. 199 | */ 200 | struct spinand_devid { 201 | const u8 *id; 202 | const u8 len; 203 | const enum spinand_readid_method method; 204 | }; 205 | 206 | /** 207 | * struct manufacurer_ops - SPI NAND manufacturer specific operations 208 | * @init: initialize a SPI NAND device 209 | * @cleanup: cleanup a SPI NAND device 210 | * 211 | * Each SPI NAND manufacturer driver should implement this interface so that 212 | * NAND chips coming from this vendor can be initialized properly. 213 | */ 214 | struct spinand_manufacturer_ops { 215 | int (*init)(struct spinand_device *spinand); 216 | void (*cleanup)(struct spinand_device *spinand); 217 | }; 218 | 219 | /** 220 | * struct spinand_manufacturer - SPI NAND manufacturer instance 221 | * @id: manufacturer ID 222 | * @name: manufacturer name 223 | * @devid_len: number of bytes in device ID 224 | * @chips: supported SPI NANDs under current manufacturer 225 | * @nchips: number of SPI NANDs available in chips array 226 | * @ops: manufacturer operations 227 | */ 228 | struct spinand_manufacturer { 229 | u8 id; 230 | char *name; 231 | const struct spinand_info *chips; 232 | const size_t nchips; 233 | const struct spinand_manufacturer_ops *ops; 234 | }; 235 | 236 | /* SPI NAND manufacturers */ 237 | extern const struct spinand_manufacturer gigadevice_spinand_manufacturer; 238 | extern const struct spinand_manufacturer macronix_spinand_manufacturer; 239 | extern const struct spinand_manufacturer micron_spinand_manufacturer; 240 | extern const struct spinand_manufacturer paragon_spinand_manufacturer; 241 | extern const struct spinand_manufacturer toshiba_spinand_manufacturer; 242 | extern const struct spinand_manufacturer winbond_spinand_manufacturer; 243 | 244 | /** 245 | * struct spinand_op_variants - SPI NAND operation variants 246 | * @ops: the list of variants for a given operation 247 | * @nops: the number of variants 248 | * 249 | * Some operations like read-from-cache/write-to-cache have several variants 250 | * depending on the number of IO lines you use to transfer data or address 251 | * cycles. This structure is a way to describe the different variants supported 252 | * by a chip and let the core pick the best one based on the SPI mem controller 253 | * capabilities. 254 | */ 255 | struct spinand_op_variants { 256 | const struct spi_mem_op *ops; 257 | unsigned int nops; 258 | }; 259 | 260 | #define SPINAND_OP_VARIANTS(name, ...) \ 261 | const struct spinand_op_variants name = { \ 262 | .ops = (struct spi_mem_op[]) { __VA_ARGS__ }, \ 263 | .nops = sizeof((struct spi_mem_op[]){ __VA_ARGS__ }) / \ 264 | sizeof(struct spi_mem_op), \ 265 | } 266 | 267 | /** 268 | * spinand_ecc_info - description of the on-die ECC implemented by a SPI NAND 269 | * chip 270 | * @get_status: get the ECC status. Should return a positive number encoding 271 | * the number of corrected bitflips if correction was possible or 272 | * -EBADMSG if there are uncorrectable errors. I can also return 273 | * other negative error codes if the error is not caused by 274 | * uncorrectable bitflips 275 | */ 276 | struct spinand_ecc_info { 277 | int (*get_status)(struct spinand_device *spinand, u8 status); 278 | }; 279 | 280 | #define SPINAND_HAS_QE_BIT BIT(0) 281 | #define SPINAND_HAS_CR_FEAT_BIT BIT(1) 282 | 283 | /** 284 | * struct spinand_info - Structure used to describe SPI NAND chips 285 | * @model: model name 286 | * @devid: device ID 287 | * @flags: OR-ing of the SPINAND_XXX flags 288 | * @memorg: memory organization 289 | * @eccreq: ECC requirements 290 | * @eccinfo: on-die ECC info 291 | * @op_variants: operations variants 292 | * @op_variants.read_cache: variants of the read-cache operation 293 | * @op_variants.write_cache: variants of the write-cache operation 294 | * @op_variants.update_cache: variants of the update-cache operation 295 | * @select_target: function used to select a target/die. Required only for 296 | * multi-die chips 297 | * 298 | * Each SPI NAND manufacturer driver should have a spinand_info table 299 | * describing all the chips supported by the driver. 300 | */ 301 | struct spinand_info { 302 | const char *model; 303 | struct spinand_devid devid; 304 | u32 flags; 305 | struct nand_memory_organization memorg; 306 | struct nand_ecc_props eccreq; 307 | struct spinand_ecc_info eccinfo; 308 | struct { 309 | const struct spinand_op_variants *read_cache; 310 | const struct spinand_op_variants *write_cache; 311 | const struct spinand_op_variants *update_cache; 312 | } op_variants; 313 | int (*select_target)(struct spinand_device *spinand, 314 | unsigned int target); 315 | }; 316 | 317 | #define SPINAND_ID(__method, ...) \ 318 | { \ 319 | .id = (const u8[]){ __VA_ARGS__ }, \ 320 | .len = sizeof((u8[]){ __VA_ARGS__ }), \ 321 | .method = __method, \ 322 | } 323 | 324 | #define SPINAND_INFO_OP_VARIANTS(__read, __write, __update) \ 325 | { \ 326 | .read_cache = __read, \ 327 | .write_cache = __write, \ 328 | .update_cache = __update, \ 329 | } 330 | 331 | #define SPINAND_ECCINFO(__get_status) \ 332 | .eccinfo = { \ 333 | .get_status = __get_status, \ 334 | } 335 | 336 | #define SPINAND_SELECT_TARGET(__func) \ 337 | .select_target = __func, 338 | 339 | #define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \ 340 | __flags, ...) \ 341 | { \ 342 | .model = __model, \ 343 | .devid = __id, \ 344 | .memorg = __memorg, \ 345 | .eccreq = __eccreq, \ 346 | .op_variants = __op_variants, \ 347 | .flags = __flags, \ 348 | __VA_ARGS__ \ 349 | } 350 | 351 | struct spinand_dirmap { 352 | struct spi_mem_dirmap_desc *wdesc; 353 | struct spi_mem_dirmap_desc *rdesc; 354 | }; 355 | 356 | /** 357 | * struct spinand_device - SPI NAND device instance 358 | * @base: NAND device instance 359 | * @spimem: pointer to the SPI mem object 360 | * @id: NAND ID as returned by READ_ID 361 | * @flags: NAND flags 362 | * @op_templates: various SPI mem op templates 363 | * @op_templates.read_cache: read cache op template 364 | * @op_templates.write_cache: write cache op template 365 | * @op_templates.update_cache: update cache op template 366 | * @select_target: select a specific target/die. Usually called before sending 367 | * a command addressing a page or an eraseblock embedded in 368 | * this die. Only required if your chip exposes several dies 369 | * @cur_target: currently selected target/die 370 | * @eccinfo: on-die ECC information 371 | * @cfg_cache: config register cache. One entry per die 372 | * @databuf: bounce buffer for data 373 | * @oobbuf: bounce buffer for OOB data 374 | * @scratchbuf: buffer used for everything but page accesses. This is needed 375 | * because the spi-mem interface explicitly requests that buffers 376 | * passed in spi_mem_op be DMA-able, so we can't based the bufs on 377 | * the stack 378 | * @manufacturer: SPI NAND manufacturer information 379 | * @priv: manufacturer private data 380 | */ 381 | struct spinand_device { 382 | struct nand_device base; 383 | struct spi_mem *spimem; 384 | struct spinand_id id; 385 | u32 flags; 386 | 387 | struct { 388 | const struct spi_mem_op *read_cache; 389 | const struct spi_mem_op *write_cache; 390 | const struct spi_mem_op *update_cache; 391 | } op_templates; 392 | 393 | struct spinand_dirmap *dirmaps; 394 | 395 | int (*select_target)(struct spinand_device *spinand, 396 | unsigned int target); 397 | unsigned int cur_target; 398 | 399 | struct spinand_ecc_info eccinfo; 400 | 401 | u8 *cfg_cache; 402 | u8 *databuf; 403 | u8 *oobbuf; 404 | u8 *scratchbuf; 405 | const struct spinand_manufacturer *manufacturer; 406 | void *priv; 407 | }; 408 | 409 | /** 410 | * nand_to_spinand() - Get the SPI NAND device embedding an NAND object 411 | * @nand: NAND object 412 | * 413 | * Return: the SPI NAND device embedding @nand. 414 | */ 415 | static inline struct spinand_device *nand_to_spinand(struct nand_device *nand) 416 | { 417 | return container_of(nand, struct spinand_device, base); 418 | } 419 | 420 | /** 421 | * spinand_to_nand() - Get the NAND device embedded in a SPI NAND object 422 | * @spinand: SPI NAND device 423 | * 424 | * Return: the NAND device embedded in @spinand. 425 | */ 426 | static inline struct nand_device * 427 | spinand_to_nand(struct spinand_device *spinand) 428 | { 429 | return &spinand->base; 430 | } 431 | 432 | int spinand_match_and_init(struct spinand_device *spinand, 433 | const struct spinand_info *table, 434 | unsigned int table_size, 435 | enum spinand_readid_method rdid_method); 436 | 437 | int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val); 438 | int spinand_ecc_enable(struct spinand_device *spinand, bool enable); 439 | int spinand_select_target(struct spinand_device *spinand, unsigned int target); 440 | 441 | int spinand_read_page(struct spinand_device *spinand, 442 | const struct nand_page_io_req *req, 443 | bool ecc_enabled); 444 | int spinand_write_page(struct spinand_device *spinand, 445 | const struct nand_page_io_req *req, bool ecc_enabled); 446 | int spinand_erase(struct spinand_device *spinand, const struct nand_pos *pos); 447 | 448 | struct spinand_device *spinand_probe(struct spi_mem *mem); 449 | void spinand_remove(struct spinand_device *spinand); 450 | #endif /* __LINUX_MTD_SPINAND_H */ 451 | -------------------------------------------------------------------------------- /main.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | static int no_ecc = 0; 9 | static int with_oob = 0; 10 | static int erase_rest = 0; 11 | static size_t offs = 0; 12 | static size_t length = 0; 13 | static const char *drv = "ch347"; 14 | static const char *drvarg = NULL; 15 | static const struct option long_opts[] = { 16 | { "no-ecc", no_argument, &no_ecc, 1 }, 17 | { "with-oob", no_argument, &with_oob, 1 }, 18 | { "erase-rest", no_argument, &erase_rest, 1 }, 19 | { "offset", required_argument, NULL, 'o' }, 20 | { "length", required_argument, NULL, 'l' }, 21 | { "driver", required_argument, NULL, 'd' }, 22 | { "driver-arg", required_argument, NULL, 'a' }, 23 | { 0, 0, NULL, 0 }, 24 | }; 25 | 26 | int main(int argc, char *argv[]) 27 | { 28 | int ret = 0; 29 | const char *fpath = NULL; 30 | FILE *fp = NULL; 31 | char opt; 32 | int long_optind = 0; 33 | int left_argc; 34 | struct spinand_device *snand; 35 | struct spi_mem *mem; 36 | 37 | while ((opt = getopt_long(argc, argv, "o:l:d:a:", long_opts, 38 | &long_optind)) >= 0) { 39 | switch (opt) { 40 | case 'o': 41 | offs = strtoul(optarg, NULL, 0); 42 | break; 43 | case 'l': 44 | length = strtoul(optarg, NULL, 0); 45 | break; 46 | case 'd': 47 | drv = optarg; 48 | break; 49 | case 'a': 50 | drvarg = optarg; 51 | break; 52 | case '?': 53 | puts("???"); 54 | return -1; 55 | default: 56 | break; 57 | } 58 | } 59 | 60 | left_argc = argc - optind; 61 | if (left_argc < 1) { 62 | puts("missing action."); 63 | return -1; 64 | } 65 | 66 | //reuse opt here. It's now actual action. 67 | opt = argv[optind][0]; 68 | 69 | switch (opt) { 70 | case 'r': 71 | case 'w': 72 | if (left_argc < 2) { 73 | puts("missing filename."); 74 | return -1; 75 | } 76 | fpath = argv[optind + 1]; 77 | break; 78 | case 'e': 79 | case 's': 80 | break; 81 | default: 82 | puts("unknown operation."); 83 | return -1; 84 | } 85 | 86 | mem = spi_mem_probe(drv, drvarg); 87 | if (!mem) { 88 | fprintf(stderr, "device not found.\n"); 89 | return -1; 90 | } 91 | 92 | snand = spinand_probe(mem); 93 | if (!snand) { 94 | fprintf(stderr, "unknown SPI NAND.\n"); 95 | goto CLEANUP1; 96 | } 97 | if (fpath) { 98 | fp = fopen(fpath, opt == 'r' ? "wb" : "rb"); 99 | if (!fp) { 100 | perror("failed to open file"); 101 | goto CLEANUP2; 102 | } 103 | } 104 | switch (opt) { 105 | case 'r': 106 | snand_read(snand, offs, length, !no_ecc, with_oob, fp); 107 | break; 108 | case 'w': 109 | snand_write(snand, offs, !no_ecc, with_oob, erase_rest, fp, 0, 110 | 0, 0, 0); 111 | break; 112 | case 'e': 113 | snand_write(snand, offs, false, false, true, NULL, 0, 0, 0, 0); 114 | break; 115 | case 's': 116 | snand_scan_bbm(snand); 117 | break; 118 | } 119 | if (fp) 120 | fclose(fp); 121 | 122 | CLEANUP2: 123 | spinand_remove(snand); 124 | CLEANUP1: 125 | spi_mem_remove(drv, mem); 126 | return ret; 127 | } 128 | -------------------------------------------------------------------------------- /spi-mem/ch347/ch347.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-1-Clause 2 | /* 3 | * Copyright (C) 2022 Chuanhong Guo 4 | * 5 | * CH347 SPI library using libusb. Protocol reverse-engineered from WCH linux library. 6 | * FIXME: Every numbers used in the USB protocol should be little-endian. 7 | */ 8 | 9 | #include "ch347.h" 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) 16 | #error You need to convert every USB communications to little endian before this library would work. 17 | #endif 18 | 19 | int ch347_spi_write_packet(struct ch347_priv *priv, uint8_t cmd, const void *tx, int len) { 20 | uint8_t *ptr; 21 | int cur_len; 22 | int err, transferred; 23 | if (len > CH347_SPI_MAX_TRX) 24 | return -EINVAL; 25 | 26 | priv->tmpbuf[0] = cmd; 27 | priv->tmpbuf[1] = len & 0xff; 28 | priv->tmpbuf[2] = len >> 8; 29 | cur_len = sizeof(priv->tmpbuf) - 3; 30 | if (len < cur_len) 31 | cur_len = len; 32 | memcpy(priv->tmpbuf + 3, tx, cur_len); 33 | err = libusb_bulk_transfer(priv->handle, CH347_EPOUT, priv->tmpbuf, cur_len + 3, &transferred, 1000); 34 | if (err) { 35 | fprintf(stderr, "ch347: libusb: failed to send packet: %d\n", err); 36 | return err; 37 | } 38 | if (cur_len < len) { 39 | /* This discards the const qualifier. However, libusb won't be writing to it. */ 40 | ptr = (uint8_t *) (tx + cur_len); 41 | err = libusb_bulk_transfer(priv->handle, CH347_EPOUT, ptr, len - cur_len, &transferred, 1000); 42 | if (err) { 43 | fprintf(stderr, "ch347: libusb: failed to send packet: %d\n", err); 44 | return err; 45 | } 46 | } 47 | return 0; 48 | } 49 | 50 | int ch347_spi_read_packet(struct ch347_priv *priv, uint8_t cmd, void *rx, int len, int *actual_len) { 51 | int cur_len, rxlen, rx_received; 52 | int err, transferred; 53 | 54 | err = libusb_bulk_transfer(priv->handle, CH347_EPIN, priv->tmpbuf, sizeof(priv->tmpbuf), &transferred, 1000); 55 | if (err) { 56 | fprintf(stderr, "ch347: libusb: failed to receive packet: %d\n", err); 57 | return err; 58 | } 59 | 60 | if (priv->tmpbuf[0] != cmd) { 61 | fprintf(stderr, "ch347: unexpected packet cmd: expecting 0x%02x but we got 0x%02x.\n", cmd, priv->tmpbuf[0]); 62 | return -EINVAL; 63 | } 64 | 65 | rxlen = priv->tmpbuf[1] | priv->tmpbuf[2] << 8; 66 | if (rxlen > len) { 67 | fprintf(stderr, "ch347: packet too big.\n"); 68 | return -EINVAL; 69 | } 70 | 71 | cur_len = transferred - 3; 72 | if (rxlen < cur_len) 73 | cur_len = rxlen; 74 | memcpy(rx, priv->tmpbuf + 3, cur_len); 75 | rx_received = cur_len; 76 | while (rx_received < rxlen) { 77 | /* The leftover data length is known so we don't need to deal with packet overflow using tmpbuf. */ 78 | err = libusb_bulk_transfer(priv->handle, CH347_EPIN, rx + rx_received, rxlen - rx_received, &transferred, 1000); 79 | if (err) { 80 | fprintf(stderr, "ch347: libusb: failed to receive packet: %d\n", err); 81 | return err; 82 | } 83 | rx_received += transferred; 84 | } 85 | 86 | *actual_len = rx_received; 87 | return 0; 88 | } 89 | 90 | int ch347_get_hw_config(struct ch347_priv *priv) { 91 | int err, transferred; 92 | uint8_t unknown_data = 0x01; 93 | 94 | err = ch347_spi_write_packet(priv, CH347_CMD_INFO_RD, &unknown_data, 1); 95 | if (err) 96 | return err; 97 | 98 | err = ch347_spi_read_packet(priv, CH347_CMD_INFO_RD, &priv->cfg, sizeof(priv->cfg), &transferred); 99 | if (err) 100 | return err; 101 | 102 | if (transferred != sizeof(priv->cfg)) { 103 | fprintf(stderr, "ch347: config returned isn't long enough.\n"); 104 | return -EINVAL; 105 | } 106 | 107 | return 0; 108 | } 109 | 110 | int ch347_commit_settings(struct ch347_priv *priv) { 111 | int err, transferred; 112 | uint8_t unknown_data; 113 | err = ch347_spi_write_packet(priv, CH347_CMD_SPI_INIT, &priv->cfg, sizeof(priv->cfg)); 114 | if (err) 115 | return err; 116 | 117 | return ch347_spi_read_packet(priv, CH347_CMD_SPI_INIT, &unknown_data, 1, &transferred); 118 | } 119 | 120 | int ch347_set_cs(struct ch347_priv *priv, int cs, int val, uint16_t autodeactive_us) { 121 | uint8_t buf[10] = {}; 122 | uint8_t *entry = cs ? buf + 5 : buf; 123 | 124 | entry[0] = val ? 0xc0 : 0x80; 125 | if(autodeactive_us) { 126 | entry[0] |= 0x20; 127 | entry[3] = autodeactive_us & 0xff; 128 | entry[4] = autodeactive_us >> 8; 129 | } 130 | 131 | return ch347_spi_write_packet(priv, CH347_CMD_SPI_CONTROL, buf, 10); 132 | } 133 | 134 | int ch347_set_spi_freq(struct ch347_priv *priv, int *clk_khz) { 135 | int freq = CH347_SPI_MAX_FREQ; 136 | int prescaler; 137 | for (prescaler = 0; prescaler < CH347_SPI_MAX_PRESCALER; prescaler++) { 138 | if (freq <= *clk_khz) 139 | break; 140 | freq /= 2; 141 | } 142 | if (freq > *clk_khz) 143 | return -EINVAL; 144 | priv->cfg.SPI_BaudRatePrescaler = prescaler * 8; 145 | *clk_khz = freq; 146 | return ch347_commit_settings(priv); 147 | } 148 | 149 | int ch347_setup_spi(struct ch347_priv *priv, int spi_mode, bool lsb_first, bool cs0_active_high, bool cs1_active_high) { 150 | priv->cfg.SPI_Direction = SPI_Direction_2Lines_FullDuplex; 151 | priv->cfg.SPI_Mode = SPI_Mode_Master; 152 | priv->cfg.SPI_DataSize = SPI_DataSize_8b; 153 | priv->cfg.SPI_CPOL = (spi_mode & 2) ? SPI_CPOL_High : SPI_CPOL_Low; 154 | priv->cfg.SPI_CPHA = (spi_mode & 1) ? SPI_CPHA_2Edge : SPI_CPHA_1Edge; 155 | priv->cfg.SPI_NSS = SPI_NSS_Software; 156 | priv->cfg.SPI_FirstBit = lsb_first ? SPI_FirstBit_LSB : SPI_FirstBit_MSB; 157 | priv->cfg.SPI_WriteReadInterval = 0; 158 | priv->cfg.SPI_OutDefaultData = 0; 159 | 160 | if (cs0_active_high) 161 | priv->cfg.OtherCfg |= 0x80; 162 | else 163 | priv->cfg.OtherCfg &= 0x7f; 164 | if (cs1_active_high) 165 | priv->cfg.OtherCfg |= 0x40; 166 | else 167 | priv->cfg.OtherCfg &= 0xbf; 168 | 169 | return ch347_commit_settings(priv); 170 | } 171 | 172 | static int ch347_spi_trx_full_duplex_one(struct ch347_priv *priv, void *buf, uint32_t len) { 173 | int err, transferred; 174 | 175 | err = ch347_spi_write_packet(priv, CH347_CMD_SPI_RD_WR, buf, len); 176 | if (err) 177 | return err; 178 | 179 | err = ch347_spi_read_packet(priv, CH347_CMD_SPI_RD_WR, buf, len, &transferred); 180 | if (err) 181 | return err; 182 | 183 | if (transferred != len) { 184 | fprintf(stderr, "ch347: not enough data received."); 185 | return -EINVAL; 186 | } 187 | return 0; 188 | } 189 | 190 | int ch347_spi_trx_full_duplex(struct ch347_priv *priv, void *buf, uint32_t len) { 191 | int err; 192 | while (len > CH347_SPI_MAX_TRX) { 193 | err = ch347_spi_trx_full_duplex_one(priv, buf, CH347_SPI_MAX_TRX); 194 | if (err) 195 | return err; 196 | len -= CH347_SPI_MAX_TRX; 197 | } 198 | return ch347_spi_trx_full_duplex_one(priv, buf, len); 199 | } 200 | 201 | int ch347_spi_tx(struct ch347_priv *priv, const void *tx, uint32_t len) { 202 | int err, transferred; 203 | uint8_t unknown_data; 204 | const void *ptr = tx; 205 | while (len) { 206 | int cur_len = len > CH347_SPI_MAX_TRX ? CH347_SPI_MAX_TRX : len; 207 | err = ch347_spi_write_packet(priv, CH347_CMD_SPI_BLCK_WR, ptr, cur_len); 208 | if (err) 209 | return err; 210 | err = ch347_spi_read_packet(priv, CH347_CMD_SPI_BLCK_WR, &unknown_data, 1, &transferred); 211 | if (err) 212 | return err; 213 | ptr += cur_len; 214 | len -= cur_len; 215 | } 216 | return 0; 217 | } 218 | 219 | int ch347_spi_rx(struct ch347_priv *priv, void *rx, uint32_t len) { 220 | int err, transferred; 221 | void *ptr = rx; 222 | uint32_t rxlen = 0; 223 | /* FIXME: len should be little endian! */ 224 | err = ch347_spi_write_packet(priv, CH347_CMD_SPI_BLCK_RD, &len, sizeof(len)); 225 | if (err) 226 | return err; 227 | while(rxlen < len) { 228 | uint32_t cur_rx = len - rxlen; 229 | if(cur_rx > CH347_SPI_MAX_TRX) 230 | cur_rx = CH347_SPI_MAX_TRX; 231 | err = ch347_spi_read_packet(priv, CH347_CMD_SPI_BLCK_RD, ptr, (int)cur_rx, &transferred); 232 | if (err) 233 | return err; 234 | rxlen += transferred; 235 | ptr += transferred; 236 | } 237 | return 0; 238 | } 239 | 240 | struct ch347_priv *ch347_open() { 241 | struct ch347_priv *priv = calloc(1, sizeof(struct ch347_priv)); 242 | int ret; 243 | 244 | if (!priv) { 245 | fprintf(stderr, "ch347: faied to allocate memory.\n"); 246 | return NULL; 247 | } 248 | ret = libusb_init(&priv->ctx); 249 | if (ret < 0) { 250 | perror("ch347: libusb: init"); 251 | goto ERR_0; 252 | } 253 | 254 | libusb_set_option(priv->ctx, LIBUSB_OPTION_LOG_LEVEL, LIBUSB_LOG_LEVEL_INFO); 255 | priv->handle = libusb_open_device_with_vid_pid(priv->ctx, CH347_SPI_VID, CH347_SPI_PID); 256 | if (!priv->handle) { 257 | perror("ch347: libusb: open"); 258 | goto ERR_1; 259 | } 260 | 261 | libusb_set_auto_detach_kernel_driver(priv->handle, 1); 262 | 263 | ret = libusb_claim_interface(priv->handle, CH347_SPI_IF); 264 | if (ret < 0) { 265 | perror("ch347: libusb: claim_if"); 266 | goto ERR_2; 267 | } 268 | 269 | if (ch347_get_hw_config(priv)) 270 | goto ERR_3; 271 | 272 | return priv; 273 | 274 | ERR_3: 275 | libusb_release_interface(priv->handle, CH347_SPI_IF); 276 | ERR_2: 277 | libusb_close(priv->handle); 278 | ERR_1: 279 | libusb_exit(priv->ctx); 280 | ERR_0: 281 | free(priv); 282 | return NULL; 283 | } 284 | 285 | void ch347_close(struct ch347_priv *priv) { 286 | libusb_release_interface(priv->handle, CH347_SPI_IF); 287 | libusb_close(priv->handle); 288 | libusb_exit(priv->ctx); 289 | free(priv); 290 | } -------------------------------------------------------------------------------- /spi-mem/ch347/ch347.h: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-1-Clause 2 | /* 3 | * Copyright (C) 2022 Chuanhong Guo 4 | * 5 | * CH347 SPI library using libusb. Protocol reverse-engineered from WCH linux library. 6 | * FIXME: Every numbers used in the USB protocol should be little-endian. 7 | */ 8 | 9 | #ifndef CH347_H 10 | #define CH347_H 11 | 12 | #ifdef __cplusplus 13 | extern "C" { 14 | #endif 15 | 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | #define CH347_SPI_VID 0x1a86 22 | #define CH347_SPI_PID 0x55db 23 | #define CH347_SPI_IF 2 24 | #define CH347_EPOUT (6 | LIBUSB_ENDPOINT_OUT) 25 | #define CH347_EPIN (6 | LIBUSB_ENDPOINT_IN) 26 | 27 | #define CH347_SPI_MAX_FREQ 60000 28 | #define CH347_SPI_MAX_PRESCALER 7 29 | #define CH347_SPI_MAX_TRX 4096 30 | 31 | /* SPI_data_direction */ 32 | #define SPI_Direction_2Lines_FullDuplex 0x0000 33 | #define SPI_Direction_2Lines_RxOnly 0x0400 34 | #define SPI_Direction_1Line_Rx 0x8000 35 | #define SPI_Direction_1Line_Tx 0xC000 36 | 37 | /* SPI_mode */ 38 | #define SPI_Mode_Master 0x0104 39 | #define SPI_Mode_Slave 0x0000 40 | 41 | /* SPI_data_size */ 42 | #define SPI_DataSize_16b 0x0800 43 | #define SPI_DataSize_8b 0x0000 44 | 45 | /* SPI_Clock_Polarity */ 46 | #define SPI_CPOL_Low 0x0000 47 | #define SPI_CPOL_High 0x0002 48 | 49 | /* SPI_Clock_Phase */ 50 | #define SPI_CPHA_1Edge 0x0000 51 | #define SPI_CPHA_2Edge 0x0001 52 | 53 | /* SPI_Slave_Select_management */ 54 | #define SPI_NSS_Software 0x0200 55 | #define SPI_NSS_Hardware 0x0000 56 | 57 | /* SPI_MSB_LSB_transmission */ 58 | #define SPI_FirstBit_MSB 0x0000 59 | #define SPI_FirstBit_LSB 0x0080 60 | 61 | /* CH347 commands */ 62 | #define CH347_CMD_SPI_INIT 0xC0 63 | #define CH347_CMD_SPI_CONTROL 0xC1 64 | #define CH347_CMD_SPI_RD_WR 0xC2 65 | #define CH347_CMD_SPI_BLCK_RD 0xC3 66 | #define CH347_CMD_SPI_BLCK_WR 0xC4 67 | #define CH347_CMD_INFO_RD 0xCA 68 | 69 | struct ch347_spi_hw_config { 70 | uint16_t SPI_Direction; 71 | uint16_t SPI_Mode; 72 | uint16_t SPI_DataSize; 73 | uint16_t SPI_CPOL; 74 | uint16_t SPI_CPHA; 75 | uint16_t SPI_NSS; /* hardware or software managed CS */ 76 | uint16_t SPI_BaudRatePrescaler; /* prescaler = x * 8. x: 0=60MHz, 1=30MHz, 2=15MHz, 3=7.5MHz, 4=3.75MHz, 5=1.875MHz, 6=937.5KHz,7=468.75KHz */ 77 | uint16_t SPI_FirstBit; /* MSB or LSB first */ 78 | uint16_t SPI_CRCPolynomial; /* polynomial used for the CRC calculation. */ 79 | uint16_t SPI_WriteReadInterval; /* No idea what this is... Original comment from WCH: SPI接口常规读取写入数据命令(DEF_CMD_SPI_RD_WR)),单位为uS */ 80 | uint8_t SPI_OutDefaultData; /* Data to output on MOSI during SPI reading */ 81 | /* 82 | * Miscellaneous settings: 83 | * Bit 7: CS0 polarity 84 | * Bit 6: CS1 polarity 85 | * Bit 5: Enable I2C clock stretching 86 | * Bit 4: NACK on last I2C reading 87 | * Bit 3-0: reserved 88 | */ 89 | uint8_t OtherCfg; 90 | 91 | uint8_t Reserved[4]; 92 | }; 93 | 94 | struct ch347_priv { 95 | struct ch347_spi_hw_config cfg; 96 | libusb_context *ctx; 97 | libusb_device_handle *handle; 98 | uint8_t tmpbuf[512]; 99 | }; 100 | 101 | struct ch347_priv *ch347_open(); 102 | 103 | void ch347_close(struct ch347_priv *priv); 104 | 105 | int ch347_commit_settings(struct ch347_priv *priv); 106 | 107 | int ch347_set_cs(struct ch347_priv *priv, int cs, int val, uint16_t autodeactive_us); 108 | 109 | int ch347_set_spi_freq(struct ch347_priv *priv, int *clk_khz); 110 | 111 | int ch347_setup_spi(struct ch347_priv *priv, int spi_mode, bool lsb_first, bool cs0_active_high, bool cs1_active_high); 112 | 113 | int ch347_spi_trx_full_duplex(struct ch347_priv *priv, void *buf, uint32_t len); 114 | 115 | int ch347_spi_tx(struct ch347_priv *priv, const void *tx, uint32_t len); 116 | 117 | int ch347_spi_rx(struct ch347_priv *priv, void *rx, uint32_t len); 118 | 119 | #ifdef __cplusplus 120 | } 121 | #endif 122 | 123 | #endif //CH347_H 124 | -------------------------------------------------------------------------------- /spi-mem/ch347/spi-mem.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "ch347.h" 7 | 8 | static int ch347_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) { 9 | size_t left_data = CH347_SPI_MAX_TRX - 1 - op->addr.nbytes - op->dummy.nbytes; 10 | if (op->data.nbytes > left_data) 11 | op->data.nbytes = left_data; 12 | return 0; 13 | } 14 | 15 | static int ch347_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) { 16 | struct ch347_priv *priv = mem->drvpriv; 17 | uint8_t buf[16]; 18 | int p; 19 | int i, ret; 20 | 21 | buf[0] = op->cmd.opcode; 22 | 23 | if (op->addr.nbytes > 4) 24 | return -EINVAL; 25 | if (op->addr.nbytes) { 26 | uint32_t tmp = op->addr.val; 27 | for (i = op->addr.nbytes; i; i--) { 28 | buf[i] = tmp & 0xff; 29 | tmp >>= 8; 30 | } 31 | } 32 | 33 | p = op->addr.nbytes + 1; 34 | 35 | for (i = 0; i < op->dummy.nbytes; i++) 36 | buf[p++] = 0; 37 | 38 | if (sizeof(buf) - p >= op->data.nbytes) { 39 | ch347_set_cs(priv, 0, 0, 1); 40 | uint8_t *data_ptr = buf + p; 41 | if (op->data.dir == SPI_MEM_DATA_OUT && op->data.nbytes) { 42 | const uint8_t *ptr = op->data.buf.out; 43 | for (i = 0; i < op->data.nbytes; i++) 44 | buf[p++] = ptr[i]; 45 | } else if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes) { 46 | for (i = 0; i < op->data.nbytes; i++) 47 | buf[p++] = 0; 48 | } 49 | ret = ch347_spi_trx_full_duplex(priv, buf, p); 50 | if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes) { 51 | uint8_t *ptr = op->data.buf.in; 52 | for (i = 0; i < op->data.nbytes; i++) 53 | ptr[i] = data_ptr[i]; 54 | } 55 | } else { 56 | ch347_set_cs(priv, 0, 0, 0); 57 | ret = ch347_spi_tx(priv, buf, p); 58 | if (ret) 59 | return ret; 60 | if (op->data.dir == SPI_MEM_DATA_OUT && op->data.nbytes) 61 | ret = ch347_spi_tx(priv, op->data.buf.out, op->data.nbytes); 62 | else if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes) 63 | ret = ch347_spi_rx(priv, op->data.buf.in, op->data.nbytes); 64 | ch347_set_cs(priv, 0, 1, 0); 65 | } 66 | 67 | 68 | return ret; 69 | } 70 | 71 | static const struct spi_controller_mem_ops ch347_mem_ops = { 72 | .adjust_op_size = ch347_adjust_op_size, 73 | .exec_op = ch347_mem_exec_op, 74 | }; 75 | 76 | static struct spi_mem ch347_mem = { 77 | .ops = &ch347_mem_ops, 78 | .spi_mode = 0, 79 | .name = "ch347", 80 | .drvpriv = NULL, 81 | }; 82 | 83 | struct spi_mem *ch347_probe() { 84 | struct ch347_priv *priv; 85 | int ret; 86 | priv = ch347_open(); 87 | if (!priv) 88 | return NULL; 89 | ret = ch347_setup_spi(priv, 3, false, false, false); 90 | if (ret) 91 | return false; 92 | int freq = 30000; 93 | ch347_mem.drvpriv = priv; 94 | ret = ch347_set_spi_freq(priv, &freq); 95 | return ret ? NULL : &ch347_mem; 96 | } 97 | 98 | void ch347_remove(struct spi_mem *mem) { 99 | ch347_close((struct ch347_priv *) mem->drvpriv); 100 | } -------------------------------------------------------------------------------- /spi-mem/spi-mem-drvs.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | struct spi_mem *spi_mem_probe(const char *drv, const char *drvarg) { 6 | if (!strcmp(drv, "ch347")) 7 | return ch347_probe(); 8 | if (!strcmp(drv, "fx2qspi")) 9 | return fx2qspi_probe(); 10 | if (!strcmp(drv, "serprog")) 11 | return serprog_probe(drvarg); 12 | return NULL; 13 | } 14 | 15 | void spi_mem_remove(const char *drv, struct spi_mem *mem) { 16 | if (!strcmp(drv, "ch347")) 17 | return ch347_remove(mem); 18 | if (!strcmp(drv, "fx2qspi")) 19 | return fx2qspi_remove(mem); 20 | if (!strcmp(drv, "serprog")) 21 | return serprog_remove(mem); 22 | } 23 | -------------------------------------------------------------------------------- /spi-mem/spi-mem-fx2qspi.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #define FX2_BUF_SIZE 512 9 | #define FX2_VID 0x1209 10 | #define FX2_PID 0x0001 11 | #define FX2_EPOUT (2 | LIBUSB_ENDPOINT_OUT) 12 | #define FX2_EPIN (6 | LIBUSB_ENDPOINT_IN) 13 | #define FX2_MAX_TRANSFER 0xfc0000 14 | 15 | #define FX2QSPI_CS 0x80 16 | #define FX2QSPI_QUAD 0x40 17 | #define FX2QSPI_DUAL 0x20 18 | #define FX2QSPI_READ 0x10 19 | 20 | static u8 fx2_op_buffer[FX2_BUF_SIZE]; 21 | typedef struct { 22 | libusb_context *ctx; 23 | libusb_device_handle *handle; 24 | } fx2qspi_priv; 25 | 26 | static fx2qspi_priv _priv; 27 | 28 | static int fx2qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 29 | { 30 | if (op->data.nbytes > FX2_MAX_TRANSFER) 31 | op->data.nbytes = FX2_MAX_TRANSFER; 32 | return 0; 33 | } 34 | 35 | static void fx2qspi_fill_op(u8 buswidth, bool is_read, u16 len, size_t *ptr) 36 | { 37 | 38 | if (buswidth == 4) 39 | fx2_op_buffer[*ptr] = FX2QSPI_CS | FX2QSPI_QUAD; 40 | else if (buswidth == 2) 41 | fx2_op_buffer[*ptr] = FX2QSPI_CS | FX2QSPI_DUAL; 42 | else 43 | fx2_op_buffer[*ptr] = FX2QSPI_CS; 44 | if (is_read) 45 | fx2_op_buffer[*ptr] |= FX2QSPI_READ; 46 | fx2_op_buffer[(*ptr)++] |= ((len >> 8) & 0xff); 47 | fx2_op_buffer[(*ptr)++] = len & 0xff; 48 | } 49 | 50 | static int fx2qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 51 | { 52 | size_t ptr = 0; 53 | int i, llen, alen, ret; 54 | fx2qspi_priv *priv = spi_mem_get_drvdata(mem); 55 | fx2qspi_fill_op(op->cmd.buswidth, false, 1, &ptr); 56 | fx2_op_buffer[ptr++] = op->cmd.opcode; 57 | if (op->addr.nbytes) { 58 | fx2qspi_fill_op(op->addr.buswidth, false, op->addr.nbytes, 59 | &ptr); 60 | for (i = op->addr.nbytes - 1; i >= 0; i--) 61 | fx2_op_buffer[ptr++] = (op->addr.val >> (i * 8)) & 0xff; 62 | } 63 | if (op->dummy.nbytes) { 64 | fx2qspi_fill_op(op->dummy.buswidth, false, op->dummy.nbytes, 65 | &ptr); 66 | for (i = 0; i < op->dummy.nbytes; i++) 67 | fx2_op_buffer[ptr++] = 0; 68 | } 69 | if (op->data.nbytes) { 70 | fx2qspi_fill_op(op->data.buswidth, 71 | op->data.dir == SPI_MEM_DATA_IN, 72 | op->data.nbytes, &ptr); 73 | } 74 | 75 | ret = libusb_bulk_transfer(priv->handle, FX2_EPOUT, fx2_op_buffer, ptr, 76 | &alen, 10); 77 | if (ret) 78 | return ret; 79 | 80 | if (op->data.nbytes) { 81 | if (op->data.dir == SPI_MEM_DATA_OUT) { 82 | ret = libusb_bulk_transfer(priv->handle, FX2_EPOUT, 83 | (unsigned char *)op->data.buf.out, 84 | op->data.nbytes, &alen, 20); 85 | if (ret) 86 | return ret; 87 | } else if (op->data.dir == SPI_MEM_DATA_IN) { 88 | llen = op->data.nbytes; 89 | ptr = 0; 90 | while (llen) { 91 | if (llen >= FX2_BUF_SIZE) 92 | ret = libusb_bulk_transfer( 93 | priv->handle, FX2_EPIN, 94 | op->data.buf.in + ptr, 95 | FX2_BUF_SIZE, &alen, 20); 96 | else 97 | ret = libusb_bulk_transfer( 98 | priv->handle, FX2_EPIN, 99 | fx2_op_buffer, FX2_BUF_SIZE, 100 | &alen, 20); 101 | if (ret) 102 | return ret; 103 | if (llen < FX2_BUF_SIZE) 104 | memcpy(op->data.buf.in + ptr, 105 | fx2_op_buffer, alen); 106 | ptr += alen; 107 | llen -= alen; 108 | } 109 | } 110 | } 111 | 112 | fx2_op_buffer[0] = 0; 113 | return libusb_bulk_transfer(priv->handle, FX2_EPOUT, fx2_op_buffer, 1, 114 | &alen, 20) ? 115 | -ETIMEDOUT : 116 | 0; 117 | } 118 | 119 | static const struct spi_controller_mem_ops _fx2qspi_mem_ops = { 120 | .adjust_op_size = fx2qspi_adjust_op_size, 121 | .exec_op = fx2qspi_exec_op, 122 | }; 123 | 124 | static struct spi_mem _fx2qspi_mem = { 125 | .ops = &_fx2qspi_mem_ops, 126 | .spi_mode = SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD, 127 | .name = "fx2qspi", 128 | .drvpriv = &_priv, 129 | }; 130 | 131 | static int fx2qspi_reset(fx2qspi_priv *priv) 132 | { 133 | int i, actual_len, ret; 134 | memset(fx2_op_buffer, 0, sizeof(fx2_op_buffer)); 135 | // write 4096 bytes of 0 136 | for (i = 0; i < 4; i++) { 137 | ret = libusb_bulk_transfer(priv->handle, FX2_EPOUT, 138 | fx2_op_buffer, FX2_BUF_SIZE, 139 | &actual_len, 5); 140 | if (ret) 141 | return ret; 142 | } 143 | // tell fx2 to send garbage data back 144 | fx2_op_buffer[0] = 0x60; 145 | fx2_op_buffer[1] = 0x60; 146 | ret = libusb_bulk_transfer(priv->handle, FX2_EPOUT, fx2_op_buffer, 3, 147 | &actual_len, 1); 148 | if (ret) 149 | return ret; 150 | return libusb_bulk_transfer(priv->handle, FX2_EPIN, fx2_op_buffer, 151 | FX2_BUF_SIZE, &actual_len, 1); 152 | } 153 | 154 | struct spi_mem *fx2qspi_probe() 155 | { 156 | int ret; 157 | fx2qspi_priv *priv = &_priv; 158 | 159 | ret = libusb_init(&priv->ctx); 160 | if (ret < 0) { 161 | perror("libusb: init"); 162 | return NULL; 163 | } 164 | 165 | libusb_set_option(priv->ctx, LIBUSB_OPTION_LOG_LEVEL, 166 | LIBUSB_LOG_LEVEL_INFO); 167 | priv->handle = 168 | libusb_open_device_with_vid_pid(priv->ctx, FX2_VID, FX2_PID); 169 | if (!priv->handle) { 170 | perror("libusb: open"); 171 | goto ERR_1; 172 | } 173 | 174 | libusb_set_auto_detach_kernel_driver(priv->handle, 1); 175 | 176 | ret = libusb_claim_interface(priv->handle, 0); 177 | if (ret < 0) { 178 | perror("libusb: claim_if"); 179 | goto ERR_2; 180 | } 181 | 182 | if (fx2qspi_reset(priv)) 183 | goto ERR_3; 184 | 185 | return &_fx2qspi_mem; 186 | ERR_3: 187 | libusb_release_interface(priv->handle, 0); 188 | ERR_2: 189 | libusb_close(priv->handle); 190 | ERR_1: 191 | libusb_exit(priv->ctx); 192 | return NULL; 193 | } 194 | 195 | void fx2qspi_remove(struct spi_mem *mem) 196 | { 197 | fx2qspi_priv *priv = spi_mem_get_drvdata(mem); 198 | libusb_release_interface(priv->handle, 0); 199 | libusb_close(priv->handle); 200 | libusb_exit(priv->ctx); 201 | } 202 | -------------------------------------------------------------------------------- /spi-mem/spi-mem-serprog.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | static int serial_fd; 14 | u8 zero_buf[4]; 15 | 16 | static int serial_config(int fd, int speed) 17 | { 18 | struct termios tty; 19 | if (tcgetattr(fd, &tty) != 0) { 20 | perror("serial: tcgetattr"); 21 | return -1; 22 | } 23 | 24 | cfsetospeed(&tty, speed); 25 | cfsetispeed(&tty, speed); 26 | 27 | tty.c_cflag &= ~(PARENB | CSTOPB | CSIZE); 28 | tty.c_cflag |= (CS8 | CLOCAL | CREAD); 29 | tty.c_lflag &= ~(ICANON | ECHO | ECHOE | ISIG | IEXTEN); 30 | tty.c_iflag &= ~(IXON | IXOFF | IXANY | ICRNL | IGNCR | INLCR); 31 | tty.c_oflag &= ~OPOST; 32 | 33 | if (tcsetattr(fd, TCSANOW, &tty) != 0) { 34 | perror("serial: tcsetattr"); 35 | return -1; 36 | } 37 | return 0; 38 | } 39 | 40 | static int serial_init(const char *devpath) 41 | { 42 | int ret; 43 | 44 | // Use O_NDELAY to ignore DCD state 45 | serial_fd = open(devpath, O_RDWR | O_NOCTTY | O_NDELAY); 46 | if (serial_fd < 0) { 47 | perror("serial: open"); 48 | return -EINVAL; 49 | } 50 | 51 | /* Ensure that we use blocking I/O */ 52 | ret = fcntl(serial_fd, F_GETFL); 53 | if (ret == -1) { 54 | perror("serial: fcntl_getfl"); 55 | goto ERR; 56 | } 57 | 58 | ret = fcntl(serial_fd, F_SETFL, ret & ~O_NONBLOCK); 59 | if (ret != 0) { 60 | perror("serial: fcntl_setfl"); 61 | goto ERR; 62 | } 63 | 64 | if (serial_config(serial_fd, B4000000) != 0) { 65 | ret = -EINVAL; 66 | goto ERR; 67 | } 68 | ret = tcflush(serial_fd, TCIOFLUSH); 69 | if (ret != 0) { 70 | perror("serial: flush"); 71 | goto ERR; 72 | } 73 | return 0; 74 | ERR: 75 | close(serial_fd); 76 | return ret; 77 | } 78 | 79 | static int serprog_sync() 80 | { 81 | char c; 82 | int ret; 83 | c = S_CMD_SYNCNOP; 84 | write(serial_fd, &c, 1); 85 | ret = read(serial_fd, &c, 1); 86 | if (ret != 1) { 87 | perror("serprog: sync r1"); 88 | return -EINVAL; 89 | } 90 | if (c != S_NAK) { 91 | fprintf(stderr, "serprog: sync NAK failed.\n"); 92 | return -EINVAL; 93 | } 94 | ret = read(serial_fd, &c, 1); 95 | if (ret != 1) { 96 | perror("serprog: sync r2"); 97 | return -EINVAL; 98 | } 99 | if (c != S_ACK) { 100 | fprintf(stderr, "serprog: sync ACK failed.\n"); 101 | return -EINVAL; 102 | } 103 | return 0; 104 | } 105 | 106 | static int serprog_check_ack() 107 | { 108 | unsigned char c; 109 | if (read(serial_fd, &c, 1) <= 0) { 110 | perror("serprog: exec_op: read status"); 111 | return errno; 112 | } 113 | if (c == S_NAK) { 114 | fprintf(stderr, "serprog: exec_op: NAK\n"); 115 | return -EINVAL; 116 | } 117 | if (c != S_ACK) { 118 | fprintf(stderr, 119 | "serprog: exec_op: invalid response 0x%02X from device.\n", 120 | c); 121 | return -EINVAL; 122 | } 123 | return 0; 124 | } 125 | 126 | static int serprog_exec_op(u8 command, u32 parmlen, u8 *params, 127 | u32 retlen, void *retparms) 128 | { 129 | if (write(serial_fd, &command, 1) < 0) { 130 | perror("serprog: exec_op: write cmd"); 131 | return errno; 132 | } 133 | if (write(serial_fd, params, parmlen) < 0) { 134 | perror("serprog: exec_op: write param"); 135 | return errno; 136 | } 137 | if (serprog_check_ack() < 0) 138 | return -EINVAL; 139 | if (retlen) { 140 | if (read(serial_fd, retparms, retlen) != retlen) { 141 | perror("serprog: exec_op: read return buffer"); 142 | return 1; 143 | } 144 | } 145 | return 0; 146 | } 147 | 148 | static int serprog_get_cmdmap(u32 *cmdmap) 149 | { 150 | u8 buf[32]; 151 | 152 | if (serprog_exec_op(S_CMD_Q_CMDMAP, 0, NULL, 32, buf) < 0) 153 | return -EINVAL; 154 | 155 | *cmdmap = buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24); 156 | return 0; 157 | } 158 | 159 | static int serprog_set_spi_speed(u32 speed) 160 | { 161 | u8 buf[4]; 162 | u32 cmdmap = 0; 163 | int ret; 164 | 165 | ret = serprog_get_cmdmap(&cmdmap); 166 | if (ret < 0) 167 | return ret; 168 | 169 | if (!(cmdmap & (1 << S_CMD_S_SPI_FREQ))) { 170 | printf("serprog: programmer do not support set SPI clock freq.\n"); 171 | return 0; 172 | } 173 | 174 | buf[0] = speed & 0xff; 175 | buf[1] = (speed >> (1 * 8)) & 0xff; 176 | buf[2] = (speed >> (2 * 8)) & 0xff; 177 | buf[3] = (speed >> (3 * 8)) & 0xff; 178 | 179 | if (serprog_exec_op(S_CMD_S_SPI_FREQ, 4, buf, 4, buf) < 0) 180 | return -EINVAL; 181 | 182 | speed = buf[0]; 183 | speed |= buf[1] << (1 * 8); 184 | speed |= buf[2] << (2 * 8); 185 | speed |= buf[3] << (3 * 8); 186 | printf("serprog: SPI clock frequency is set to %u Hz.\n", speed); 187 | return 0; 188 | } 189 | 190 | static int serprog_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 191 | { 192 | size_t left_data = 0xffffff - 1 - op->addr.nbytes - op->dummy.nbytes; 193 | if (op->data.nbytes > left_data) 194 | op->data.nbytes = left_data; 195 | return 0; 196 | } 197 | 198 | static int serprog_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 199 | { 200 | size_t i; 201 | u32 wrlen, rdlen, tmp; 202 | u8 buf[10]; 203 | ssize_t rwdone, rwpending, rwsize; 204 | 205 | wrlen = 1 + op->addr.nbytes + op->dummy.nbytes; 206 | 207 | if (op->data.dir == SPI_MEM_DATA_OUT) 208 | wrlen += op->data.nbytes; 209 | if (op->data.dir == SPI_MEM_DATA_IN) 210 | rdlen = op->data.nbytes; 211 | else 212 | rdlen = 0; 213 | 214 | if (wrlen & 0xff000000) { 215 | fprintf(stderr, "serprog: too much data to send.\n"); 216 | return -E2BIG; 217 | } 218 | 219 | if (rdlen & 0xff000000) { 220 | fprintf(stderr, "serprog: too much data to receive.\n"); 221 | return -E2BIG; 222 | } 223 | 224 | buf[0] = S_CMD_O_SPIOP; 225 | buf[1] = wrlen & 0xff; 226 | buf[2] = (wrlen >> 8) & 0xff; 227 | buf[3] = (wrlen >> 16) & 0xff; 228 | buf[4] = rdlen & 0xff; 229 | buf[5] = (rdlen >> 8) & 0xff; 230 | buf[6] = (rdlen >> 16) & 0xff; 231 | 232 | if (write(serial_fd, buf, 7) != 7) { 233 | perror("serprog: spimem_exec_op: write serprog cmd"); 234 | return errno; 235 | } 236 | 237 | buf[0] = op->cmd.opcode; 238 | if (write(serial_fd, buf, 1) != 1) { 239 | perror("serprog: spimem_exec_op: write opcode"); 240 | return errno; 241 | } 242 | 243 | if (op->addr.nbytes > 4) 244 | return -EINVAL; 245 | if (op->addr.nbytes) { 246 | tmp = op->addr.val; 247 | for (i = op->addr.nbytes; i; i--) { 248 | buf[i - 1] = tmp & 0xff; 249 | tmp >>= 8; 250 | } 251 | if (write(serial_fd, buf, op->addr.nbytes) != op->addr.nbytes) { 252 | perror("serprog: spimem_exec_op: write addr"); 253 | return errno; 254 | } 255 | } 256 | 257 | if (op->dummy.nbytes) { 258 | buf[0] = 0; 259 | for (i = 0; i < op->dummy.nbytes; i++) { 260 | if (write(serial_fd, buf, 1) != 1) { 261 | perror("serprog: spimem_exec_op: write dummy"); 262 | return errno; 263 | } 264 | } 265 | } 266 | 267 | if (op->data.dir == SPI_MEM_DATA_OUT && op->data.nbytes) { 268 | rwpending = op->data.nbytes; 269 | rwdone = 0; 270 | while (rwpending) { 271 | rwsize = write(serial_fd, op->data.buf.out + rwdone, rwpending); 272 | if (rwsize < 0) { 273 | perror("serprog: spimem_exec_op: write data"); 274 | return errno; 275 | } 276 | rwpending -= rwsize; 277 | rwdone += rwsize; 278 | } 279 | } 280 | 281 | if (serprog_check_ack() < 0) 282 | return -EINVAL; 283 | if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes) { 284 | rwpending = op->data.nbytes; 285 | rwdone = 0; 286 | while (rwpending) { 287 | rwsize = read(serial_fd, op->data.buf.in + rwdone, rwpending); 288 | if (rwsize < 0) { 289 | perror("serprog: spimem_exec_op: read data"); 290 | return errno; 291 | } 292 | rwpending -= rwsize; 293 | rwdone += rwsize; 294 | } 295 | } 296 | return 0; 297 | } 298 | 299 | static const struct spi_controller_mem_ops _serprog_mem_ops = { 300 | .adjust_op_size = serprog_adjust_op_size, 301 | .exec_op = serprog_mem_exec_op, 302 | }; 303 | 304 | static struct spi_mem _serprog_mem = { 305 | .ops = &_serprog_mem_ops, 306 | .spi_mode = 0, 307 | .name = "serprog", 308 | .drvpriv = NULL, 309 | }; 310 | 311 | static int serprog_init(const char *devpath, u32 speed) 312 | { 313 | int ret; 314 | ret = serial_init(devpath); 315 | if (ret < 0) 316 | return ret; 317 | ret = serprog_sync(); 318 | if (ret < 0) 319 | goto ERR; 320 | ret = serprog_set_spi_speed(speed); 321 | if (ret < 0) 322 | goto ERR; 323 | return 0; 324 | ERR: 325 | close(serial_fd); 326 | return ret; 327 | } 328 | 329 | struct spi_mem *serprog_probe(const char *devpath) 330 | { 331 | return serprog_init(devpath, 24000000) ? NULL : &_serprog_mem; 332 | } 333 | 334 | void serprog_remove(struct spi_mem *mem) 335 | { 336 | close(serial_fd); 337 | } 338 | -------------------------------------------------------------------------------- /spi-mem/spi-mem.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0+ 2 | /* 3 | * This is based on include/linux/spi/spi-mem.h in Linux 4 | * Original file header: 5 | * 6 | * Copyright (C) 2018 Exceet Electronics GmbH 7 | * Copyright (C) 2018 Bootlin 8 | * 9 | * Author: Boris Brezillon 10 | */ 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx) 18 | { 19 | u32 mode = mem->spi_mode; 20 | 21 | switch (buswidth) { 22 | case 1: 23 | return 0; 24 | 25 | case 2: 26 | if ((tx && 27 | (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) || 28 | (!tx && 29 | (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))) 30 | return 0; 31 | 32 | break; 33 | 34 | case 4: 35 | if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) || 36 | (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL)))) 37 | return 0; 38 | 39 | break; 40 | 41 | case 8: 42 | if ((tx && (mode & SPI_TX_OCTAL)) || 43 | (!tx && (mode & SPI_RX_OCTAL))) 44 | return 0; 45 | 46 | break; 47 | 48 | default: 49 | break; 50 | } 51 | 52 | return -EOPNOTSUPP; 53 | } 54 | 55 | bool spi_mem_default_supports_op(struct spi_mem *mem, 56 | const struct spi_mem_op *op) 57 | { 58 | if (spi_check_buswidth_req(mem, op->cmd.buswidth, true)) 59 | return false; 60 | 61 | if (op->addr.nbytes && 62 | spi_check_buswidth_req(mem, op->addr.buswidth, true)) 63 | return false; 64 | 65 | if (op->dummy.nbytes && 66 | spi_check_buswidth_req(mem, op->dummy.buswidth, true)) 67 | return false; 68 | 69 | if (op->data.dir != SPI_MEM_NO_DATA && 70 | spi_check_buswidth_req(mem, op->data.buswidth, 71 | op->data.dir == SPI_MEM_DATA_OUT)) 72 | return false; 73 | 74 | return true; 75 | } 76 | 77 | static bool spi_mem_buswidth_is_valid(u8 buswidth) 78 | { 79 | if ((buswidth != 0) && (buswidth != 1) && (buswidth != 2) && 80 | (buswidth != 4) && (buswidth != 8)) 81 | return false; 82 | 83 | return true; 84 | } 85 | 86 | static int spi_mem_check_op(const struct spi_mem_op *op) 87 | { 88 | if (!op->cmd.buswidth) 89 | return -EINVAL; 90 | 91 | if ((op->addr.nbytes && !op->addr.buswidth) || 92 | (op->dummy.nbytes && !op->dummy.buswidth) || 93 | (op->data.nbytes && !op->data.buswidth)) 94 | return -EINVAL; 95 | 96 | if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) || 97 | !spi_mem_buswidth_is_valid(op->addr.buswidth) || 98 | !spi_mem_buswidth_is_valid(op->dummy.buswidth) || 99 | !spi_mem_buswidth_is_valid(op->data.buswidth)) 100 | return -EINVAL; 101 | 102 | return 0; 103 | } 104 | 105 | static bool spi_mem_internal_supports_op(struct spi_mem *mem, 106 | const struct spi_mem_op *op) 107 | { 108 | if (mem->ops->supports_op) 109 | return mem->ops->supports_op(mem, op); 110 | 111 | return spi_mem_default_supports_op(mem, op); 112 | } 113 | 114 | /** 115 | * spi_mem_supports_op() - Check if a memory device and the controller it is 116 | * connected to support a specific memory operation 117 | * @mem: the SPI memory 118 | * @op: the memory operation to check 119 | * 120 | * Some controllers are only supporting Single or Dual IOs, others might only 121 | * support specific opcodes, or it can even be that the controller and device 122 | * both support Quad IOs but the hardware prevents you from using it because 123 | * only 2 IO lines are connected. 124 | * 125 | * This function checks whether a specific operation is supported. 126 | * 127 | * Return: true if @op is supported, false otherwise. 128 | */ 129 | bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) 130 | { 131 | if (spi_mem_check_op(op)) 132 | return false; 133 | 134 | return spi_mem_internal_supports_op(mem, op); 135 | } 136 | 137 | /** 138 | * spi_mem_exec_op() - Execute a memory operation 139 | * @mem: the SPI memory 140 | * @op: the memory operation to execute 141 | * 142 | * Executes a memory operation. 143 | * 144 | * This function first checks that @op is supported and then tries to execute 145 | * it. 146 | * 147 | * Return: 0 in case of success, a negative error code otherwise. 148 | */ 149 | int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 150 | { 151 | int ret; 152 | 153 | ret = spi_mem_check_op(op); 154 | if (ret) 155 | return ret; 156 | 157 | if (!spi_mem_internal_supports_op(mem, op)) 158 | return -EOPNOTSUPP; 159 | 160 | return mem->ops->exec_op(mem, op); 161 | } 162 | 163 | /** 164 | * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to 165 | * match controller limitations 166 | * @mem: the SPI memory 167 | * @op: the operation to adjust 168 | * 169 | * Some controllers have FIFO limitations and must split a data transfer 170 | * operation into multiple ones, others require a specific alignment for 171 | * optimized accesses. This function allows SPI mem drivers to split a single 172 | * operation into multiple sub-operations when required. 173 | * 174 | * Return: a negative error code if the controller can't properly adjust @op, 175 | * 0 otherwise. Note that @op->data.nbytes will be updated if @op 176 | * can't be handled in a single step. 177 | */ 178 | int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 179 | { 180 | if (mem->ops->adjust_op_size) 181 | return mem->ops->adjust_op_size(mem, op); 182 | 183 | return 0; 184 | } 185 | 186 | static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc, 187 | u64 offs, size_t len, void *buf) 188 | { 189 | struct spi_mem_op op = desc->info.op_tmpl; 190 | int ret; 191 | 192 | op.addr.val = desc->info.offset + offs; 193 | op.data.buf.in = buf; 194 | op.data.nbytes = len; 195 | ret = spi_mem_adjust_op_size(desc->mem, &op); 196 | if (ret) 197 | return ret; 198 | 199 | ret = spi_mem_exec_op(desc->mem, &op); 200 | if (ret) 201 | return ret; 202 | 203 | return op.data.nbytes; 204 | } 205 | 206 | static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc, 207 | u64 offs, size_t len, const void *buf) 208 | { 209 | struct spi_mem_op op = desc->info.op_tmpl; 210 | int ret; 211 | 212 | op.addr.val = desc->info.offset + offs; 213 | op.data.buf.out = buf; 214 | op.data.nbytes = len; 215 | ret = spi_mem_adjust_op_size(desc->mem, &op); 216 | if (ret) 217 | return ret; 218 | 219 | ret = spi_mem_exec_op(desc->mem, &op); 220 | if (ret) 221 | return ret; 222 | 223 | return op.data.nbytes; 224 | } 225 | 226 | /** 227 | * spi_mem_dirmap_create() - Create a direct mapping descriptor 228 | * @mem: SPI mem device this direct mapping should be created for 229 | * @info: direct mapping information 230 | * 231 | * This function is creating a direct mapping descriptor which can then be used 232 | * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write(). 233 | * If the SPI controller driver does not support direct mapping, this function 234 | * falls back to an implementation using spi_mem_exec_op(), so that the caller 235 | * doesn't have to bother implementing a fallback on his own. 236 | * 237 | * Return: a valid pointer in case of success, and ERR_PTR() otherwise. 238 | */ 239 | struct spi_mem_dirmap_desc * 240 | spi_mem_dirmap_create(struct spi_mem *mem, 241 | const struct spi_mem_dirmap_info *info) 242 | { 243 | struct spi_mem_dirmap_desc *desc; 244 | int ret = -EOPNOTSUPP; 245 | 246 | /* Make sure the number of address cycles is between 1 and 8 bytes. */ 247 | if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8) 248 | return ERR_PTR(-EINVAL); 249 | 250 | /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */ 251 | if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA) 252 | return ERR_PTR(-EINVAL); 253 | 254 | desc = calloc(1, sizeof(*desc)); 255 | if (!desc) 256 | return ERR_PTR(-ENOMEM); 257 | 258 | desc->mem = mem; 259 | desc->info = *info; 260 | if (mem->ops->dirmap_create) 261 | ret = mem->ops->dirmap_create(desc); 262 | 263 | if (ret) { 264 | desc->nodirmap = true; 265 | if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl)) 266 | ret = -EOPNOTSUPP; 267 | else 268 | ret = 0; 269 | } 270 | 271 | if (ret) { 272 | free(desc); 273 | return ERR_PTR(ret); 274 | } 275 | 276 | return desc; 277 | } 278 | 279 | /** 280 | * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor 281 | * @desc: the direct mapping descriptor to destroy 282 | * 283 | * This function destroys a direct mapping descriptor previously created by 284 | * spi_mem_dirmap_create(). 285 | */ 286 | void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc) 287 | { 288 | if (!desc->nodirmap && desc->mem->ops->dirmap_destroy) 289 | desc->mem->ops->dirmap_destroy(desc); 290 | 291 | free(desc); 292 | } 293 | 294 | /** 295 | * spi_mem_dirmap_read() - Read data through a direct mapping 296 | * @desc: direct mapping descriptor 297 | * @offs: offset to start reading from. Note that this is not an absolute 298 | * offset, but the offset within the direct mapping which already has 299 | * its own offset 300 | * @len: length in bytes 301 | * @buf: destination buffer. This buffer must be DMA-able 302 | * 303 | * This function reads data from a memory device using a direct mapping 304 | * previously instantiated with spi_mem_dirmap_create(). 305 | * 306 | * Return: the amount of data read from the memory device or a negative error 307 | * code. Note that the returned size might be smaller than @len, and the caller 308 | * is responsible for calling spi_mem_dirmap_read() again when that happens. 309 | */ 310 | ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, 311 | u64 offs, size_t len, void *buf) 312 | { 313 | ssize_t ret; 314 | 315 | if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN) 316 | return -EINVAL; 317 | 318 | if (!len) 319 | return 0; 320 | 321 | if (desc->nodirmap) { 322 | ret = spi_mem_no_dirmap_read(desc, offs, len, buf); 323 | } else if (desc->mem->ops->dirmap_read) { 324 | ret = desc->mem->ops->dirmap_read(desc, offs, len, buf); 325 | } else { 326 | ret = -EOPNOTSUPP; 327 | } 328 | 329 | return ret; 330 | } 331 | 332 | /** 333 | * spi_mem_dirmap_write() - Write data through a direct mapping 334 | * @desc: direct mapping descriptor 335 | * @offs: offset to start writing from. Note that this is not an absolute 336 | * offset, but the offset within the direct mapping which already has 337 | * its own offset 338 | * @len: length in bytes 339 | * @buf: source buffer. This buffer must be DMA-able 340 | * 341 | * This function writes data to a memory device using a direct mapping 342 | * previously instantiated with spi_mem_dirmap_create(). 343 | * 344 | * Return: the amount of data written to the memory device or a negative error 345 | * code. Note that the returned size might be smaller than @len, and the caller 346 | * is responsible for calling spi_mem_dirmap_write() again when that happens. 347 | */ 348 | ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, 349 | u64 offs, size_t len, const void *buf) 350 | { 351 | ssize_t ret; 352 | 353 | if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT) 354 | return -EINVAL; 355 | 356 | if (!len) 357 | return 0; 358 | 359 | if (desc->nodirmap) { 360 | ret = spi_mem_no_dirmap_write(desc, offs, len, buf); 361 | } else if (desc->mem->ops->dirmap_write) { 362 | ret = desc->mem->ops->dirmap_write(desc, offs, len, buf); 363 | } else { 364 | ret = -EOPNOTSUPP; 365 | } 366 | 367 | return ret; 368 | } 369 | -------------------------------------------------------------------------------- /spi-nand/core.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Copyright (C) 2016-2017 Micron Technology, Inc. 4 | * 5 | * Authors: 6 | * Peter Pan 7 | * Boris Brezillon 8 | */ 9 | 10 | #define pr_fmt(fmt) "spi-nand: " fmt 11 | 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val) 20 | { 21 | struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg, 22 | spinand->scratchbuf); 23 | int ret; 24 | 25 | ret = spi_mem_exec_op(spinand->spimem, &op); 26 | if (ret) 27 | return ret; 28 | 29 | *val = *spinand->scratchbuf; 30 | return 0; 31 | } 32 | 33 | static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val) 34 | { 35 | struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg, 36 | spinand->scratchbuf); 37 | 38 | *spinand->scratchbuf = val; 39 | return spi_mem_exec_op(spinand->spimem, &op); 40 | } 41 | 42 | static int spinand_read_status(struct spinand_device *spinand, u8 *status) 43 | { 44 | return spinand_read_reg_op(spinand, REG_STATUS, status); 45 | } 46 | 47 | static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg) 48 | { 49 | struct nand_device *nand = spinand_to_nand(spinand); 50 | 51 | if (spinand->cur_target < 0 || 52 | spinand->cur_target >= nand->memorg.ntargets) 53 | return -EINVAL; 54 | 55 | *cfg = spinand->cfg_cache[spinand->cur_target]; 56 | return 0; 57 | } 58 | 59 | static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg) 60 | { 61 | struct nand_device *nand = spinand_to_nand(spinand); 62 | int ret; 63 | 64 | if (spinand->cur_target < 0 || 65 | spinand->cur_target >= nand->memorg.ntargets) 66 | return -EINVAL; 67 | 68 | if (spinand->cfg_cache[spinand->cur_target] == cfg) 69 | return 0; 70 | 71 | ret = spinand_write_reg_op(spinand, REG_CFG, cfg); 72 | if (ret) 73 | return ret; 74 | 75 | spinand->cfg_cache[spinand->cur_target] = cfg; 76 | return 0; 77 | } 78 | 79 | /** 80 | * spinand_upd_cfg() - Update the configuration register 81 | * @spinand: the spinand device 82 | * @mask: the mask encoding the bits to update in the config reg 83 | * @val: the new value to apply 84 | * 85 | * Update the configuration register. 86 | * 87 | * Return: 0 on success, a negative error code otherwise. 88 | */ 89 | int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val) 90 | { 91 | int ret; 92 | u8 cfg; 93 | 94 | ret = spinand_get_cfg(spinand, &cfg); 95 | if (ret) 96 | return ret; 97 | 98 | cfg &= ~mask; 99 | cfg |= val; 100 | 101 | return spinand_set_cfg(spinand, cfg); 102 | } 103 | 104 | /** 105 | * spinand_select_target() - Select a specific NAND target/die 106 | * @spinand: the spinand device 107 | * @target: the target/die to select 108 | * 109 | * Select a new target/die. If chip only has one die, this function is a NOOP. 110 | * 111 | * Return: 0 on success, a negative error code otherwise. 112 | */ 113 | int spinand_select_target(struct spinand_device *spinand, unsigned int target) 114 | { 115 | struct nand_device *nand = spinand_to_nand(spinand); 116 | int ret; 117 | 118 | if (target >= nand->memorg.ntargets) 119 | return -EINVAL; 120 | 121 | if (spinand->cur_target == target) 122 | return 0; 123 | 124 | if (nand->memorg.ntargets == 1) { 125 | spinand->cur_target = target; 126 | return 0; 127 | } 128 | 129 | ret = spinand->select_target(spinand, target); 130 | if (ret) 131 | return ret; 132 | 133 | spinand->cur_target = target; 134 | return 0; 135 | } 136 | 137 | static int spinand_init_cfg_cache(struct spinand_device *spinand) 138 | { 139 | struct nand_device *nand = spinand_to_nand(spinand); 140 | unsigned int target; 141 | int ret; 142 | 143 | spinand->cfg_cache = 144 | calloc(nand->memorg.ntargets, sizeof(*spinand->cfg_cache)); 145 | if (!spinand->cfg_cache) 146 | return -ENOMEM; 147 | 148 | for (target = 0; target < nand->memorg.ntargets; target++) { 149 | ret = spinand_select_target(spinand, target); 150 | if (ret) 151 | return ret; 152 | 153 | /* 154 | * We use spinand_read_reg_op() instead of spinand_get_cfg() 155 | * here to bypass the config cache. 156 | */ 157 | ret = spinand_read_reg_op(spinand, REG_CFG, 158 | &spinand->cfg_cache[target]); 159 | if (ret) 160 | return ret; 161 | } 162 | 163 | return 0; 164 | } 165 | 166 | static int spinand_init_quad_enable(struct spinand_device *spinand) 167 | { 168 | bool enable = false; 169 | 170 | if (!(spinand->flags & SPINAND_HAS_QE_BIT)) 171 | return 0; 172 | 173 | if (spinand->op_templates.read_cache->data.buswidth == 4 || 174 | spinand->op_templates.write_cache->data.buswidth == 4 || 175 | spinand->op_templates.update_cache->data.buswidth == 4) 176 | enable = true; 177 | 178 | return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE, 179 | enable ? CFG_QUAD_ENABLE : 0); 180 | } 181 | 182 | int spinand_ecc_enable(struct spinand_device *spinand, 183 | bool enable) 184 | { 185 | return spinand_upd_cfg(spinand, CFG_ECC_ENABLE, 186 | enable ? CFG_ECC_ENABLE : 0); 187 | } 188 | 189 | static int spinand_write_enable_op(struct spinand_device *spinand) 190 | { 191 | struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true); 192 | 193 | return spi_mem_exec_op(spinand->spimem, &op); 194 | } 195 | 196 | static int spinand_load_page_op(struct spinand_device *spinand, 197 | const struct nand_page_io_req *req) 198 | { 199 | struct nand_device *nand = spinand_to_nand(spinand); 200 | unsigned int row = nanddev_pos_to_row(nand, &req->pos); 201 | struct spi_mem_op op = SPINAND_PAGE_READ_OP(row); 202 | 203 | return spi_mem_exec_op(spinand->spimem, &op); 204 | } 205 | 206 | static int spinand_read_from_cache_op(struct spinand_device *spinand, 207 | const struct nand_page_io_req *req) 208 | { 209 | struct nand_device *nand = spinand_to_nand(spinand); 210 | struct spi_mem_dirmap_desc *rdesc; 211 | unsigned int nbytes = 0; 212 | void *buf = NULL; 213 | u16 column = 0; 214 | ssize_t ret; 215 | 216 | if (req->datalen) { 217 | buf = spinand->databuf; 218 | nbytes = nanddev_page_size(nand); 219 | column = 0; 220 | } 221 | 222 | if (req->ooblen) { 223 | nbytes += nanddev_per_page_oobsize(nand); 224 | if (!buf) { 225 | buf = spinand->oobbuf; 226 | column = nanddev_page_size(nand); 227 | } 228 | } 229 | 230 | rdesc = spinand->dirmaps[req->pos.plane].rdesc; 231 | 232 | while (nbytes) { 233 | ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf); 234 | if (ret < 0) 235 | return ret; 236 | 237 | if (!ret || ret > nbytes) 238 | return -EIO; 239 | 240 | nbytes -= ret; 241 | column += ret; 242 | buf += ret; 243 | } 244 | 245 | if (req->datalen) 246 | memcpy(req->databuf.in, spinand->databuf + req->dataoffs, 247 | req->datalen); 248 | 249 | if (req->ooblen) 250 | memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs, 251 | req->ooblen); 252 | 253 | return 0; 254 | } 255 | 256 | static int spinand_write_to_cache_op(struct spinand_device *spinand, 257 | const struct nand_page_io_req *req) 258 | { 259 | struct nand_device *nand = spinand_to_nand(spinand); 260 | struct spi_mem_dirmap_desc *wdesc; 261 | unsigned int nbytes, column = 0; 262 | void *buf = spinand->databuf; 263 | ssize_t ret; 264 | 265 | /* 266 | * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset 267 | * the cache content to 0xFF (depends on vendor implementation), so we 268 | * must fill the page cache entirely even if we only want to program 269 | * the data portion of the page, otherwise we might corrupt the BBM or 270 | * user data previously programmed in OOB area. 271 | */ 272 | nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); 273 | memset(spinand->databuf, 0xff, nbytes); 274 | 275 | if (req->datalen) 276 | memcpy(spinand->databuf + req->dataoffs, req->databuf.out, 277 | req->datalen); 278 | 279 | if (req->ooblen) 280 | memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, 281 | req->ooblen); 282 | 283 | wdesc = spinand->dirmaps[req->pos.plane].wdesc; 284 | 285 | while (nbytes) { 286 | ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf); 287 | if (ret < 0) 288 | return ret; 289 | 290 | if (!ret || ret > nbytes) 291 | return -EIO; 292 | 293 | nbytes -= ret; 294 | column += ret; 295 | buf += ret; 296 | } 297 | 298 | return 0; 299 | } 300 | 301 | static int spinand_program_op(struct spinand_device *spinand, 302 | const struct nand_page_io_req *req) 303 | { 304 | struct nand_device *nand = spinand_to_nand(spinand); 305 | unsigned int row = nanddev_pos_to_row(nand, &req->pos); 306 | struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row); 307 | 308 | return spi_mem_exec_op(spinand->spimem, &op); 309 | } 310 | 311 | static int spinand_erase_op(struct spinand_device *spinand, 312 | const struct nand_pos *pos) 313 | { 314 | struct nand_device *nand = spinand_to_nand(spinand); 315 | unsigned int row = nanddev_pos_to_row(nand, pos); 316 | struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row); 317 | 318 | return spi_mem_exec_op(spinand->spimem, &op); 319 | } 320 | 321 | static int spinand_wait(struct spinand_device *spinand, u8 *s) 322 | { 323 | time_t ctime, otime; 324 | u8 status; 325 | int ret; 326 | time(&otime); 327 | do { 328 | ret = spinand_read_status(spinand, &status); 329 | if (ret) 330 | return ret; 331 | 332 | if (!(status & STATUS_BUSY)) 333 | goto out; 334 | 335 | time(&ctime); 336 | } while (ctime - otime < 3); 337 | 338 | /* 339 | * Extra read, just in case the STATUS_READY bit has changed 340 | * since our last check 341 | */ 342 | ret = spinand_read_status(spinand, &status); 343 | if (ret) 344 | return ret; 345 | 346 | out: 347 | if (s) 348 | *s = status; 349 | 350 | return status & STATUS_BUSY ? -ETIMEDOUT : 0; 351 | } 352 | 353 | static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr, 354 | u8 ndummy, u8 *buf) 355 | { 356 | struct spi_mem_op op = SPINAND_READID_OP( 357 | naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN); 358 | int ret; 359 | 360 | ret = spi_mem_exec_op(spinand->spimem, &op); 361 | if (!ret) 362 | memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN); 363 | 364 | return ret; 365 | } 366 | 367 | static int spinand_reset_op(struct spinand_device *spinand) 368 | { 369 | struct spi_mem_op op = SPINAND_RESET_OP; 370 | int ret; 371 | 372 | ret = spi_mem_exec_op(spinand->spimem, &op); 373 | if (ret) 374 | return ret; 375 | 376 | return spinand_wait(spinand, NULL); 377 | } 378 | 379 | static int spinand_lock_block(struct spinand_device *spinand, u8 lock) 380 | { 381 | return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock); 382 | } 383 | 384 | static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status) 385 | { 386 | struct nand_device *nand = spinand_to_nand(spinand); 387 | 388 | if (spinand->eccinfo.get_status) 389 | return spinand->eccinfo.get_status(spinand, status); 390 | 391 | switch (status & STATUS_ECC_MASK) { 392 | case STATUS_ECC_NO_BITFLIPS: 393 | return 0; 394 | 395 | case STATUS_ECC_HAS_BITFLIPS: 396 | /* 397 | * We have no way to know exactly how many bitflips have been 398 | * fixed, so let's return the maximum possible value so that 399 | * wear-leveling layers move the data immediately. 400 | */ 401 | return nand->eccreq.strength; 402 | 403 | case STATUS_ECC_UNCOR_ERROR: 404 | return -EBADMSG; 405 | 406 | default: 407 | break; 408 | } 409 | 410 | return -EINVAL; 411 | } 412 | 413 | int spinand_read_page(struct spinand_device *spinand, 414 | const struct nand_page_io_req *req, 415 | bool ecc_enabled) 416 | { 417 | u8 status; 418 | int ret; 419 | 420 | ret = spinand_select_target(spinand, req->pos.target); 421 | if (ret) 422 | return ret; 423 | 424 | ret = spinand_ecc_enable(spinand, ecc_enabled); 425 | if (ret) 426 | return ret; 427 | 428 | ret = spinand_load_page_op(spinand, req); 429 | if (ret) 430 | return ret; 431 | 432 | ret = spinand_wait(spinand, &status); 433 | if (ret < 0) 434 | return ret; 435 | 436 | ret = spinand_read_from_cache_op(spinand, req); 437 | if (ret) 438 | return ret; 439 | 440 | if (!ecc_enabled) 441 | return 0; 442 | 443 | return spinand_check_ecc_status(spinand, status); 444 | } 445 | 446 | int spinand_write_page(struct spinand_device *spinand, 447 | const struct nand_page_io_req *req, bool ecc_enabled) 448 | { 449 | u8 status; 450 | int ret; 451 | 452 | ret = spinand_select_target(spinand, req->pos.target); 453 | if (ret) 454 | return ret; 455 | 456 | ret = spinand_ecc_enable(spinand, ecc_enabled); 457 | if (ret) 458 | return ret; 459 | 460 | ret = spinand_write_enable_op(spinand); 461 | if (ret) 462 | return ret; 463 | 464 | ret = spinand_write_to_cache_op(spinand, req); 465 | if (ret) 466 | return ret; 467 | 468 | ret = spinand_program_op(spinand, req); 469 | if (ret) 470 | return ret; 471 | 472 | ret = spinand_wait(spinand, &status); 473 | if (!ret && (status & STATUS_PROG_FAILED)) 474 | ret = -EIO; 475 | 476 | return ret; 477 | } 478 | 479 | int spinand_erase(struct spinand_device *spinand, const struct nand_pos *pos) 480 | { 481 | u8 status; 482 | int ret; 483 | 484 | ret = spinand_select_target(spinand, pos->target); 485 | if (ret) 486 | return ret; 487 | 488 | ret = spinand_write_enable_op(spinand); 489 | if (ret) 490 | return ret; 491 | 492 | ret = spinand_erase_op(spinand, pos); 493 | if (ret) 494 | return ret; 495 | 496 | ret = spinand_wait(spinand, &status); 497 | if (!ret && (status & STATUS_ERASE_FAILED)) 498 | ret = -EIO; 499 | 500 | return ret; 501 | } 502 | 503 | static int spinand_create_dirmap(struct spinand_device *spinand, 504 | unsigned int plane) 505 | { 506 | struct nand_device *nand = spinand_to_nand(spinand); 507 | struct spi_mem_dirmap_info info = { 508 | .length = nanddev_page_size(nand) + 509 | nanddev_per_page_oobsize(nand), 510 | }; 511 | struct spi_mem_dirmap_desc *desc; 512 | 513 | /* The plane number is passed in MSB just above the column address */ 514 | info.offset = plane << fls(nand->memorg.pagesize); 515 | 516 | info.op_tmpl = *spinand->op_templates.update_cache; 517 | desc = spi_mem_dirmap_create(spinand->spimem, &info); 518 | if (IS_ERR(desc)) 519 | return PTR_ERR(desc); 520 | 521 | spinand->dirmaps[plane].wdesc = desc; 522 | 523 | info.op_tmpl = *spinand->op_templates.read_cache; 524 | desc = spi_mem_dirmap_create(spinand->spimem, &info); 525 | if (IS_ERR(desc)) 526 | return PTR_ERR(desc); 527 | 528 | spinand->dirmaps[plane].rdesc = desc; 529 | 530 | return 0; 531 | } 532 | 533 | static int spinand_create_dirmaps(struct spinand_device *spinand) 534 | { 535 | struct nand_device *nand = spinand_to_nand(spinand); 536 | int i, ret; 537 | 538 | spinand->dirmaps = calloc(1, sizeof(*spinand->dirmaps) * 539 | nand->memorg.planes_per_lun); 540 | if (!spinand->dirmaps) 541 | return -ENOMEM; 542 | 543 | for (i = 0; i < nand->memorg.planes_per_lun; i++) { 544 | ret = spinand_create_dirmap(spinand, i); 545 | if (ret) 546 | return ret; 547 | } 548 | 549 | return 0; 550 | } 551 | 552 | static const struct spinand_manufacturer *spinand_manufacturers[] = { 553 | &gigadevice_spinand_manufacturer, 554 | ¯onix_spinand_manufacturer, 555 | µn_spinand_manufacturer, 556 | ¶gon_spinand_manufacturer, 557 | &toshiba_spinand_manufacturer, 558 | &winbond_spinand_manufacturer, 559 | }; 560 | 561 | static int spinand_manufacturer_match(struct spinand_device *spinand, 562 | enum spinand_readid_method rdid_method) 563 | { 564 | u8 *id = spinand->id.data; 565 | unsigned int i; 566 | int ret; 567 | 568 | for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) { 569 | const struct spinand_manufacturer *manufacturer = 570 | spinand_manufacturers[i]; 571 | 572 | if (id[0] != manufacturer->id) 573 | continue; 574 | 575 | ret = spinand_match_and_init(spinand, 576 | manufacturer->chips, 577 | manufacturer->nchips, 578 | rdid_method); 579 | if (ret < 0) 580 | continue; 581 | 582 | spinand->manufacturer = manufacturer; 583 | return 0; 584 | } 585 | return -EOPNOTSUPP; 586 | } 587 | 588 | static int spinand_id_detect(struct spinand_device *spinand) 589 | { 590 | u8 *id = spinand->id.data; 591 | int ret; 592 | 593 | ret = spinand_read_id_op(spinand, 0, 0, id); 594 | if (ret) 595 | return ret; 596 | ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE); 597 | if (!ret) 598 | return 0; 599 | 600 | ret = spinand_read_id_op(spinand, 1, 0, id); 601 | if (ret) 602 | return ret; 603 | ret = spinand_manufacturer_match(spinand, 604 | SPINAND_READID_METHOD_OPCODE_ADDR); 605 | if (!ret) 606 | return 0; 607 | 608 | ret = spinand_read_id_op(spinand, 0, 1, id); 609 | if (ret) 610 | return ret; 611 | ret = spinand_manufacturer_match(spinand, 612 | SPINAND_READID_METHOD_OPCODE_DUMMY); 613 | 614 | return ret; 615 | } 616 | 617 | static int spinand_manufacturer_init(struct spinand_device *spinand) 618 | { 619 | if (spinand->manufacturer->ops->init) 620 | return spinand->manufacturer->ops->init(spinand); 621 | 622 | return 0; 623 | } 624 | 625 | static void spinand_manufacturer_cleanup(struct spinand_device *spinand) 626 | { 627 | /* Release manufacturer private data */ 628 | if (spinand->manufacturer->ops->cleanup) 629 | return spinand->manufacturer->ops->cleanup(spinand); 630 | } 631 | 632 | static const struct spi_mem_op * 633 | spinand_select_op_variant(struct spinand_device *spinand, 634 | const struct spinand_op_variants *variants) 635 | { 636 | struct nand_device *nand = spinand_to_nand(spinand); 637 | unsigned int i; 638 | 639 | for (i = 0; i < variants->nops; i++) { 640 | struct spi_mem_op op = variants->ops[i]; 641 | unsigned int nbytes; 642 | int ret; 643 | 644 | nbytes = nanddev_per_page_oobsize(nand) + 645 | nanddev_page_size(nand); 646 | 647 | while (nbytes) { 648 | op.data.nbytes = nbytes; 649 | ret = spi_mem_adjust_op_size(spinand->spimem, &op); 650 | if (ret) 651 | break; 652 | 653 | if (!spi_mem_supports_op(spinand->spimem, &op)) 654 | break; 655 | 656 | nbytes -= op.data.nbytes; 657 | } 658 | 659 | if (!nbytes) 660 | return &variants->ops[i]; 661 | } 662 | 663 | return NULL; 664 | } 665 | 666 | /** 667 | * spinand_match_and_init() - Try to find a match between a device ID and an 668 | * entry in a spinand_info table 669 | * @spinand: SPI NAND object 670 | * @table: SPI NAND device description table 671 | * @table_size: size of the device description table 672 | * @rdid_method: read id method to match 673 | * 674 | * Match between a device ID retrieved through the READ_ID command and an 675 | * entry in the SPI NAND description table. If a match is found, the spinand 676 | * object will be initialized with information provided by the matching 677 | * spinand_info entry. 678 | * 679 | * Return: 0 on success, a negative error code otherwise. 680 | */ 681 | int spinand_match_and_init(struct spinand_device *spinand, 682 | const struct spinand_info *table, 683 | unsigned int table_size, 684 | enum spinand_readid_method rdid_method) 685 | { 686 | u8 *id = spinand->id.data; 687 | struct nand_device *nand = spinand_to_nand(spinand); 688 | unsigned int i; 689 | 690 | for (i = 0; i < table_size; i++) { 691 | const struct spinand_info *info = &table[i]; 692 | const struct spi_mem_op *op; 693 | 694 | if (rdid_method != info->devid.method) 695 | continue; 696 | 697 | if (memcmp(id + 1, info->devid.id, info->devid.len)) 698 | continue; 699 | 700 | nand->memorg = table[i].memorg; 701 | nand->eccreq = table[i].eccreq; 702 | spinand->eccinfo = table[i].eccinfo; 703 | spinand->flags = table[i].flags; 704 | spinand->id.len = 1 + table[i].devid.len; 705 | spinand->select_target = table[i].select_target; 706 | 707 | op = spinand_select_op_variant(spinand, 708 | info->op_variants.read_cache); 709 | if (!op) 710 | return -EOPNOTSUPP; 711 | 712 | spinand->op_templates.read_cache = op; 713 | 714 | op = spinand_select_op_variant(spinand, 715 | info->op_variants.write_cache); 716 | if (!op) 717 | return -EOPNOTSUPP; 718 | 719 | spinand->op_templates.write_cache = op; 720 | 721 | op = spinand_select_op_variant(spinand, 722 | info->op_variants.update_cache); 723 | spinand->op_templates.update_cache = op; 724 | 725 | printf("Found SPI NAND model: %s\n", table[i].model); 726 | 727 | return 0; 728 | } 729 | 730 | return -EOPNOTSUPP; 731 | } 732 | 733 | static int spinand_detect(struct spinand_device *spinand) 734 | { 735 | struct nand_device *nand = spinand_to_nand(spinand); 736 | int ret; 737 | 738 | ret = spinand_reset_op(spinand); 739 | if (ret) 740 | return ret; 741 | 742 | ret = spinand_id_detect(spinand); 743 | if (ret) { 744 | fprintf(stderr, "unknown raw ID %hhX %hhX %hhX %hhX\n", 745 | spinand->id.data[0], spinand->id.data[1], 746 | spinand->id.data[2], spinand->id.data[3]); 747 | return ret; 748 | } 749 | 750 | if (nand->memorg.ntargets > 1 && !spinand->select_target) { 751 | fprintf(stderr, 752 | "SPI NANDs with more than one die must implement ->select_target()\n"); 753 | return -EINVAL; 754 | } 755 | 756 | printf("%s SPI NAND was found.\n", spinand->manufacturer->name); 757 | printf("%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n", 758 | nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10, 759 | nanddev_page_size(nand), nanddev_per_page_oobsize(nand)); 760 | 761 | return 0; 762 | } 763 | 764 | static int spinand_init(struct spinand_device *spinand) 765 | { 766 | struct nand_device *nand = spinand_to_nand(spinand); 767 | int ret, i; 768 | 769 | /* 770 | * We need a scratch buffer because the spi_mem interface requires that 771 | * buf passed in spi_mem_op->data.buf be DMA-able. 772 | */ 773 | spinand->scratchbuf = calloc(SPINAND_MAX_ID_LEN, 1); 774 | if (!spinand->scratchbuf) 775 | return -ENOMEM; 776 | 777 | ret = spinand_detect(spinand); 778 | if (ret) 779 | goto err_free_bufs; 780 | 781 | /* 782 | * Use kzalloc() instead of devm_kzalloc() here, because some drivers 783 | * may use this buffer for DMA access. 784 | * Memory allocated by devm_ does not guarantee DMA-safe alignment. 785 | */ 786 | spinand->databuf = malloc(nanddev_page_size(nand) + 787 | nanddev_per_page_oobsize(nand)); 788 | if (!spinand->databuf) { 789 | ret = -ENOMEM; 790 | goto err_free_bufs; 791 | } 792 | 793 | spinand->oobbuf = spinand->databuf + nanddev_page_size(nand); 794 | 795 | ret = spinand_init_cfg_cache(spinand); 796 | if (ret) 797 | goto err_free_bufs; 798 | 799 | ret = spinand_init_quad_enable(spinand); 800 | if (ret) 801 | goto err_free_bufs; 802 | 803 | ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0); 804 | if (ret) 805 | goto err_free_bufs; 806 | 807 | ret = spinand_manufacturer_init(spinand); 808 | if (ret) { 809 | fprintf(stderr, 810 | "Failed to initialize the SPI NAND chip (err = %d)\n", 811 | ret); 812 | goto err_free_bufs; 813 | } 814 | 815 | ret = spinand_create_dirmaps(spinand); 816 | if (ret) { 817 | fprintf(stderr, 818 | "Failed to create direct mappings for read/write operations (err = %d)\n", 819 | ret); 820 | goto err_manuf_cleanup; 821 | } 822 | 823 | /* After power up, all blocks are locked, so unlock them here. */ 824 | for (i = 0; i < nand->memorg.ntargets; i++) { 825 | ret = spinand_select_target(spinand, i); 826 | if (ret) 827 | goto err_manuf_cleanup; 828 | 829 | ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); 830 | if (ret) 831 | goto err_manuf_cleanup; 832 | } 833 | 834 | ret = nanddev_init(nand); 835 | if (ret) 836 | goto err_manuf_cleanup; 837 | return 0; 838 | 839 | err_manuf_cleanup: 840 | spinand_manufacturer_cleanup(spinand); 841 | 842 | err_free_bufs: 843 | free(spinand->databuf); 844 | free(spinand->scratchbuf); 845 | return ret; 846 | } 847 | 848 | static void spinand_cleanup(struct spinand_device *spinand) 849 | { 850 | spinand_manufacturer_cleanup(spinand); 851 | free(spinand->databuf); 852 | free(spinand->scratchbuf); 853 | } 854 | 855 | struct spinand_device _spinand; 856 | 857 | struct spinand_device *spinand_probe(struct spi_mem *mem) 858 | { 859 | struct spinand_device *spinand; 860 | int ret; 861 | 862 | spinand = &_spinand; 863 | 864 | spinand->spimem = mem; 865 | ret = spinand_init(spinand); 866 | if (ret) 867 | return NULL; 868 | return spinand; 869 | } 870 | 871 | void spinand_remove(struct spinand_device *spinand) 872 | { 873 | spinand_cleanup(spinand); 874 | } 875 | -------------------------------------------------------------------------------- /spi-nand/gigadevice.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Author: 4 | * Chuanhong Guo 5 | */ 6 | 7 | #include 8 | 9 | #define SPINAND_MFR_GIGADEVICE 0xC8 10 | 11 | #define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4) 12 | #define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4) 13 | 14 | #define GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS (1 << 4) 15 | #define GD5FXGQ5XE_STATUS_ECC_4_BITFLIPS (3 << 4) 16 | 17 | #define GD5FXGQXXEXXG_REG_STATUS2 0xf0 18 | 19 | #define GD5FXGQ4UXFXXG_STATUS_ECC_MASK (7 << 4) 20 | #define GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS (0 << 4) 21 | #define GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS (1 << 4) 22 | #define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4) 23 | 24 | static SPINAND_OP_VARIANTS(read_cache_variants, 25 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0), 26 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 27 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 28 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 29 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 30 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 31 | 32 | static SPINAND_OP_VARIANTS(read_cache_variants_f, 33 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0), 34 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0), 35 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 36 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0), 37 | SPINAND_PAGE_READ_FROM_CACHE_OP_3A(true, 0, 1, NULL, 0), 38 | SPINAND_PAGE_READ_FROM_CACHE_OP_3A(false, 0, 0, NULL, 0)); 39 | 40 | static SPINAND_OP_VARIANTS(read_cache_variants_1gq5, 41 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), 42 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 43 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 44 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 45 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 46 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 47 | 48 | static SPINAND_OP_VARIANTS(read_cache_variants_2gq5, 49 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0), 50 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 51 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0), 52 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 53 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 54 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 55 | 56 | static SPINAND_OP_VARIANTS(write_cache_variants, 57 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 58 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 59 | 60 | static SPINAND_OP_VARIANTS(update_cache_variants, 61 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 62 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 63 | 64 | static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand, 65 | u8 status) 66 | { 67 | switch (status & STATUS_ECC_MASK) { 68 | case STATUS_ECC_NO_BITFLIPS: 69 | return 0; 70 | 71 | case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS: 72 | /* 1-7 bits are flipped. return the maximum. */ 73 | return 7; 74 | 75 | case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS: 76 | return 8; 77 | 78 | case STATUS_ECC_UNCOR_ERROR: 79 | return -EBADMSG; 80 | 81 | default: 82 | break; 83 | } 84 | 85 | return -EINVAL; 86 | } 87 | 88 | static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand, 89 | u8 status) 90 | { 91 | u8 status2; 92 | struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2, 93 | &status2); 94 | int ret; 95 | 96 | switch (status & STATUS_ECC_MASK) { 97 | case STATUS_ECC_NO_BITFLIPS: 98 | return 0; 99 | 100 | case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS: 101 | /* 102 | * Read status2 register to determine a more fine grained 103 | * bit error status 104 | */ 105 | ret = spi_mem_exec_op(spinand->spimem, &op); 106 | if (ret) 107 | return ret; 108 | 109 | /* 110 | * 4 ... 7 bits are flipped (1..4 can't be detected, so 111 | * report the maximum of 4 in this case 112 | */ 113 | /* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */ 114 | return ((status & STATUS_ECC_MASK) >> 2) | 115 | ((status2 & STATUS_ECC_MASK) >> 4); 116 | 117 | case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS: 118 | return 8; 119 | 120 | case STATUS_ECC_UNCOR_ERROR: 121 | return -EBADMSG; 122 | 123 | default: 124 | break; 125 | } 126 | 127 | return -EINVAL; 128 | } 129 | 130 | static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand, 131 | u8 status) 132 | { 133 | u8 status2; 134 | struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2, 135 | &status2); 136 | int ret; 137 | 138 | switch (status & STATUS_ECC_MASK) { 139 | case STATUS_ECC_NO_BITFLIPS: 140 | return 0; 141 | 142 | case GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS: 143 | /* 144 | * Read status2 register to determine a more fine grained 145 | * bit error status 146 | */ 147 | ret = spi_mem_exec_op(spinand->spimem, &op); 148 | if (ret) 149 | return ret; 150 | 151 | /* 152 | * 1 ... 4 bits are flipped (and corrected) 153 | */ 154 | /* bits sorted this way (1...0): ECCSE1, ECCSE0 */ 155 | return ((status2 & STATUS_ECC_MASK) >> 4) + 1; 156 | 157 | case STATUS_ECC_UNCOR_ERROR: 158 | return -EBADMSG; 159 | 160 | default: 161 | break; 162 | } 163 | 164 | return -EINVAL; 165 | } 166 | 167 | static int gd5fxgq4ufxxg_ecc_get_status(struct spinand_device *spinand, 168 | u8 status) 169 | { 170 | switch (status & GD5FXGQ4UXFXXG_STATUS_ECC_MASK) { 171 | case GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS: 172 | return 0; 173 | 174 | case GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS: 175 | return 3; 176 | 177 | case GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR: 178 | return -EBADMSG; 179 | 180 | default: /* (2 << 4) through (6 << 4) are 4-8 corrected errors */ 181 | return ((status & GD5FXGQ4UXFXXG_STATUS_ECC_MASK) >> 4) + 2; 182 | } 183 | 184 | return -EINVAL; 185 | } 186 | 187 | static const struct spinand_info gigadevice_spinand_table[] = { 188 | SPINAND_INFO("GD5F1GQ4xA", 189 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf1), 190 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 191 | NAND_ECCREQ(8, 512), 192 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 193 | &write_cache_variants, 194 | &update_cache_variants), 195 | SPINAND_HAS_QE_BIT, 196 | SPINAND_ECCINFO(gd5fxgq4xa_ecc_get_status)), 197 | SPINAND_INFO("GD5F2GQ4xA", 198 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf2), 199 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), 200 | NAND_ECCREQ(8, 512), 201 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 202 | &write_cache_variants, 203 | &update_cache_variants), 204 | SPINAND_HAS_QE_BIT, 205 | SPINAND_ECCINFO(gd5fxgq4xa_ecc_get_status)), 206 | SPINAND_INFO("GD5F4GQ4xA", 207 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf4), 208 | NAND_MEMORG(1, 2048, 64, 64, 4096, 80, 1, 1, 1), 209 | NAND_ECCREQ(8, 512), 210 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 211 | &write_cache_variants, 212 | &update_cache_variants), 213 | SPINAND_HAS_QE_BIT, 214 | SPINAND_ECCINFO(gd5fxgq4xa_ecc_get_status)), 215 | SPINAND_INFO("GD5F4GQ4RC", 216 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xa4, 0x68), 217 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), 218 | NAND_ECCREQ(8, 512), 219 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f, 220 | &write_cache_variants, 221 | &update_cache_variants), 222 | SPINAND_HAS_QE_BIT, 223 | SPINAND_ECCINFO(gd5fxgq4ufxxg_ecc_get_status)), 224 | SPINAND_INFO("GD5F4GQ4UC", 225 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xb4, 0x68), 226 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), 227 | NAND_ECCREQ(8, 512), 228 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f, 229 | &write_cache_variants, 230 | &update_cache_variants), 231 | SPINAND_HAS_QE_BIT, 232 | SPINAND_ECCINFO(gd5fxgq4ufxxg_ecc_get_status)), 233 | SPINAND_INFO("GD5F1GQ4UExxG", 234 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xd1), 235 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 236 | NAND_ECCREQ(8, 512), 237 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 238 | &write_cache_variants, 239 | &update_cache_variants), 240 | SPINAND_HAS_QE_BIT, 241 | SPINAND_ECCINFO(gd5fxgq4uexxg_ecc_get_status)), 242 | SPINAND_INFO("GD5F1GQ4RExxG", 243 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xc1), 244 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 245 | NAND_ECCREQ(8, 512), 246 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 247 | &write_cache_variants, 248 | &update_cache_variants), 249 | SPINAND_HAS_QE_BIT, 250 | SPINAND_ECCINFO(gd5fxgq4uexxg_ecc_get_status)), 251 | SPINAND_INFO("GD5F2GQ4UExxG", 252 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xd2), 253 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 254 | NAND_ECCREQ(8, 512), 255 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 256 | &write_cache_variants, 257 | &update_cache_variants), 258 | SPINAND_HAS_QE_BIT, 259 | SPINAND_ECCINFO(gd5fxgq4uexxg_ecc_get_status)), 260 | SPINAND_INFO("GD5F2GQ4RExxG", 261 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xc2), 262 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 263 | NAND_ECCREQ(8, 512), 264 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 265 | &write_cache_variants, 266 | &update_cache_variants), 267 | SPINAND_HAS_QE_BIT, 268 | SPINAND_ECCINFO(gd5fxgq4uexxg_ecc_get_status)), 269 | SPINAND_INFO("GD5F1GQ4UFxxG", 270 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xb1, 0x48), 271 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 272 | NAND_ECCREQ(8, 512), 273 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f, 274 | &write_cache_variants, 275 | &update_cache_variants), 276 | SPINAND_HAS_QE_BIT, 277 | SPINAND_ECCINFO(gd5fxgq4ufxxg_ecc_get_status)), 278 | SPINAND_INFO("GD5F1GQ5UExxG", 279 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x51), 280 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 281 | NAND_ECCREQ(4, 512), 282 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, 283 | &write_cache_variants, 284 | &update_cache_variants), 285 | SPINAND_HAS_QE_BIT, 286 | SPINAND_ECCINFO(gd5fxgq5xexxg_ecc_get_status)), 287 | SPINAND_INFO("GD5F1GQ5RExxG", 288 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x41), 289 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 290 | NAND_ECCREQ(4, 512), 291 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, 292 | &write_cache_variants, 293 | &update_cache_variants), 294 | SPINAND_HAS_QE_BIT, 295 | SPINAND_ECCINFO(gd5fxgq5xexxg_ecc_get_status)), 296 | SPINAND_INFO("GD5F2GQ5UExxG", 297 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x52), 298 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 299 | NAND_ECCREQ(4, 512), 300 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5, 301 | &write_cache_variants, 302 | &update_cache_variants), 303 | SPINAND_HAS_QE_BIT, 304 | SPINAND_ECCINFO(gd5fxgq5xexxg_ecc_get_status)), 305 | SPINAND_INFO("GD5F2GQ5RExxG", 306 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x42), 307 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 308 | NAND_ECCREQ(4, 512), 309 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5, 310 | &write_cache_variants, 311 | &update_cache_variants), 312 | SPINAND_HAS_QE_BIT, 313 | SPINAND_ECCINFO(gd5fxgq5xexxg_ecc_get_status)), 314 | SPINAND_INFO("GD5F4GQ6UExxG", 315 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x55), 316 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 2, 1), 317 | NAND_ECCREQ(4, 512), 318 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5, 319 | &write_cache_variants, 320 | &update_cache_variants), 321 | SPINAND_HAS_QE_BIT, 322 | SPINAND_ECCINFO(gd5fxgq5xexxg_ecc_get_status)), 323 | SPINAND_INFO("GD5F4GQ6RExxG", 324 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x45), 325 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 2, 1), 326 | NAND_ECCREQ(4, 512), 327 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5, 328 | &write_cache_variants, 329 | &update_cache_variants), 330 | SPINAND_HAS_QE_BIT, 331 | SPINAND_ECCINFO(gd5fxgq5xexxg_ecc_get_status)), 332 | SPINAND_INFO("GD5F1GM7UExxG", 333 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x91), 334 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 335 | NAND_ECCREQ(8, 512), 336 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, 337 | &write_cache_variants, 338 | &update_cache_variants), 339 | SPINAND_HAS_QE_BIT, 340 | SPINAND_ECCINFO(gd5fxgq4uexxg_ecc_get_status)), 341 | SPINAND_INFO("GD5F1GM7RExxG", 342 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x81), 343 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 344 | NAND_ECCREQ(8, 512), 345 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, 346 | &write_cache_variants, 347 | &update_cache_variants), 348 | SPINAND_HAS_QE_BIT, 349 | SPINAND_ECCINFO(gd5fxgq4uexxg_ecc_get_status)), 350 | SPINAND_INFO("GD5F2GM7UExxG", 351 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92), 352 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 353 | NAND_ECCREQ(8, 512), 354 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, 355 | &write_cache_variants, 356 | &update_cache_variants), 357 | SPINAND_HAS_QE_BIT, 358 | SPINAND_ECCINFO(gd5fxgq4uexxg_ecc_get_status)), 359 | SPINAND_INFO("GD5F2GM7RExxG", 360 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x82), 361 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 362 | NAND_ECCREQ(8, 512), 363 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, 364 | &write_cache_variants, 365 | &update_cache_variants), 366 | SPINAND_HAS_QE_BIT, 367 | SPINAND_ECCINFO(gd5fxgq4uexxg_ecc_get_status)), 368 | SPINAND_INFO("GD5F4GM8UExxG", 369 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x95), 370 | NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 1, 1), 371 | NAND_ECCREQ(8, 512), 372 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, 373 | &write_cache_variants, 374 | &update_cache_variants), 375 | SPINAND_HAS_QE_BIT, 376 | SPINAND_ECCINFO(gd5fxgq4uexxg_ecc_get_status)), 377 | SPINAND_INFO("GD5F4GM8RExxG", 378 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x85), 379 | NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 1, 1), 380 | NAND_ECCREQ(8, 512), 381 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, 382 | &write_cache_variants, 383 | &update_cache_variants), 384 | SPINAND_HAS_QE_BIT, 385 | SPINAND_ECCINFO(gd5fxgq4uexxg_ecc_get_status)), 386 | }; 387 | 388 | static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = { 389 | }; 390 | 391 | const struct spinand_manufacturer gigadevice_spinand_manufacturer = { 392 | .id = SPINAND_MFR_GIGADEVICE, 393 | .name = "GigaDevice", 394 | .chips = gigadevice_spinand_table, 395 | .nchips = ARRAY_SIZE(gigadevice_spinand_table), 396 | .ops = &gigadevice_spinand_manuf_ops, 397 | }; 398 | -------------------------------------------------------------------------------- /spi-nand/macronix.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Copyright (c) 2018 Macronix 4 | * 5 | * Author: Boris Brezillon 6 | */ 7 | 8 | #include 9 | 10 | #define SPINAND_MFR_MACRONIX 0xC2 11 | #define MACRONIX_ECCSR_MASK 0x0F 12 | 13 | static SPINAND_OP_VARIANTS(read_cache_variants, 14 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 15 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 16 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 17 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 18 | 19 | static SPINAND_OP_VARIANTS(write_cache_variants, 20 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 21 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 22 | 23 | static SPINAND_OP_VARIANTS(update_cache_variants, 24 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 25 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 26 | 27 | static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr) 28 | { 29 | struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x7c, 1), 30 | SPI_MEM_OP_NO_ADDR, 31 | SPI_MEM_OP_DUMMY(1, 1), 32 | SPI_MEM_OP_DATA_IN(1, eccsr, 1)); 33 | 34 | int ret = spi_mem_exec_op(spinand->spimem, &op); 35 | if (ret) 36 | return ret; 37 | 38 | *eccsr &= MACRONIX_ECCSR_MASK; 39 | return 0; 40 | } 41 | 42 | static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand, 43 | u8 status) 44 | { 45 | struct nand_device *nand = spinand_to_nand(spinand); 46 | u8 eccsr; 47 | 48 | switch (status & STATUS_ECC_MASK) { 49 | case STATUS_ECC_NO_BITFLIPS: 50 | return 0; 51 | 52 | case STATUS_ECC_UNCOR_ERROR: 53 | return -EBADMSG; 54 | 55 | case STATUS_ECC_HAS_BITFLIPS: 56 | /* 57 | * Let's try to retrieve the real maximum number of bitflips 58 | * in order to avoid forcing the wear-leveling layer to move 59 | * data around if it's not necessary. 60 | */ 61 | if (mx35lf1ge4ab_get_eccsr(spinand, &eccsr)) 62 | return nand->eccreq.strength; 63 | 64 | if (eccsr > nand->eccreq.strength || !eccsr) 65 | return nand->eccreq.strength; 66 | 67 | return eccsr; 68 | 69 | default: 70 | break; 71 | } 72 | 73 | return -EINVAL; 74 | } 75 | 76 | static const struct spinand_info macronix_spinand_table[] = { 77 | SPINAND_INFO("MX35LF1GE4AB", 78 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x12), 79 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 80 | NAND_ECCREQ(4, 512), 81 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 82 | &write_cache_variants, 83 | &update_cache_variants), 84 | SPINAND_HAS_QE_BIT, 85 | SPINAND_ECCINFO(mx35lf1ge4ab_ecc_get_status)), 86 | SPINAND_INFO("MX35LF2GE4AB", 87 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x22), 88 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1), 89 | NAND_ECCREQ(4, 512), 90 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 91 | &write_cache_variants, 92 | &update_cache_variants), 93 | SPINAND_HAS_QE_BIT, 94 | SPINAND_ECCINFO(NULL)), 95 | }; 96 | 97 | static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = { 98 | }; 99 | 100 | const struct spinand_manufacturer macronix_spinand_manufacturer = { 101 | .id = SPINAND_MFR_MACRONIX, 102 | .name = "Macronix", 103 | .chips = macronix_spinand_table, 104 | .nchips = ARRAY_SIZE(macronix_spinand_table), 105 | .ops = ¯onix_spinand_manuf_ops, 106 | }; 107 | -------------------------------------------------------------------------------- /spi-nand/micron.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Copyright (c) 2016-2017 Micron Technology, Inc. 4 | * 5 | * Authors: 6 | * Peter Pan 7 | */ 8 | 9 | #include 10 | 11 | #define SPINAND_MFR_MICRON 0x2c 12 | 13 | #define MICRON_STATUS_ECC_MASK GENMASK(7, 4) 14 | #define MICRON_STATUS_ECC_NO_BITFLIPS (0 << 4) 15 | #define MICRON_STATUS_ECC_1TO3_BITFLIPS (1 << 4) 16 | #define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4) 17 | #define MICRON_STATUS_ECC_7TO8_BITFLIPS (5 << 4) 18 | 19 | #define MICRON_CFG_CR BIT(0) 20 | 21 | /* 22 | * As per datasheet, die selection is done by the 6th bit of Die 23 | * Select Register (Address 0xD0). 24 | */ 25 | #define MICRON_DIE_SELECT_REG 0xD0 26 | 27 | #define MICRON_SELECT_DIE(x) ((x) << 6) 28 | 29 | static SPINAND_OP_VARIANTS(read_cache_variants, 30 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), 31 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 32 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 33 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 34 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 35 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 36 | 37 | static SPINAND_OP_VARIANTS(write_cache_variants, 38 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 39 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 40 | 41 | static SPINAND_OP_VARIANTS(update_cache_variants, 42 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 43 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 44 | 45 | static int micron_select_target(struct spinand_device *spinand, 46 | unsigned int target) 47 | { 48 | struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MICRON_DIE_SELECT_REG, 49 | spinand->scratchbuf); 50 | 51 | if (target > 1) 52 | return -EINVAL; 53 | 54 | *spinand->scratchbuf = MICRON_SELECT_DIE(target); 55 | 56 | return spi_mem_exec_op(spinand->spimem, &op); 57 | } 58 | 59 | static int micron_8_ecc_get_status(struct spinand_device *spinand, 60 | u8 status) 61 | { 62 | switch (status & MICRON_STATUS_ECC_MASK) { 63 | case STATUS_ECC_NO_BITFLIPS: 64 | return 0; 65 | 66 | case STATUS_ECC_UNCOR_ERROR: 67 | return -EBADMSG; 68 | 69 | case MICRON_STATUS_ECC_1TO3_BITFLIPS: 70 | return 3; 71 | 72 | case MICRON_STATUS_ECC_4TO6_BITFLIPS: 73 | return 6; 74 | 75 | case MICRON_STATUS_ECC_7TO8_BITFLIPS: 76 | return 8; 77 | 78 | default: 79 | break; 80 | } 81 | 82 | return -EINVAL; 83 | } 84 | 85 | static const struct spinand_info micron_spinand_table[] = { 86 | /* M79A 2Gb 3.3V */ 87 | SPINAND_INFO("MT29F2G01ABAGD", 88 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24), 89 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1), 90 | NAND_ECCREQ(8, 512), 91 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 92 | &write_cache_variants, 93 | &update_cache_variants), 94 | 0, 95 | SPINAND_ECCINFO(micron_8_ecc_get_status)), 96 | /* M79A 2Gb 1.8V */ 97 | SPINAND_INFO("MT29F2G01ABBGD", 98 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25), 99 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1), 100 | NAND_ECCREQ(8, 512), 101 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 102 | &write_cache_variants, 103 | &update_cache_variants), 104 | 0, 105 | SPINAND_ECCINFO(micron_8_ecc_get_status)), 106 | /* M78A 1Gb 3.3V */ 107 | SPINAND_INFO("MT29F1G01ABAFD", 108 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14), 109 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 110 | NAND_ECCREQ(8, 512), 111 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 112 | &write_cache_variants, 113 | &update_cache_variants), 114 | 0, 115 | SPINAND_ECCINFO(micron_8_ecc_get_status)), 116 | /* M78A 1Gb 1.8V */ 117 | SPINAND_INFO("MT29F1G01ABAFD", 118 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x15), 119 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 120 | NAND_ECCREQ(8, 512), 121 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 122 | &write_cache_variants, 123 | &update_cache_variants), 124 | 0, 125 | SPINAND_ECCINFO(micron_8_ecc_get_status)), 126 | /* M79A 4Gb 3.3V */ 127 | SPINAND_INFO("MT29F4G01ADAGD", 128 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x36), 129 | NAND_MEMORG(1, 2048, 128, 64, 2048, 80, 2, 1, 2), 130 | NAND_ECCREQ(8, 512), 131 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 132 | &write_cache_variants, 133 | &update_cache_variants), 134 | 0, 135 | SPINAND_ECCINFO(micron_8_ecc_get_status), 136 | SPINAND_SELECT_TARGET(micron_select_target)), 137 | /* M70A 4Gb 3.3V */ 138 | SPINAND_INFO("MT29F4G01ABAFD", 139 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x34), 140 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), 141 | NAND_ECCREQ(8, 512), 142 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 143 | &write_cache_variants, 144 | &update_cache_variants), 145 | SPINAND_HAS_CR_FEAT_BIT, 146 | SPINAND_ECCINFO(micron_8_ecc_get_status)), 147 | /* M70A 4Gb 1.8V */ 148 | SPINAND_INFO("MT29F4G01ABBFD", 149 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35), 150 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), 151 | NAND_ECCREQ(8, 512), 152 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 153 | &write_cache_variants, 154 | &update_cache_variants), 155 | SPINAND_HAS_CR_FEAT_BIT, 156 | SPINAND_ECCINFO(micron_8_ecc_get_status)), 157 | /* M70A 8Gb 3.3V */ 158 | SPINAND_INFO("MT29F8G01ADAFD", 159 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x46), 160 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2), 161 | NAND_ECCREQ(8, 512), 162 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 163 | &write_cache_variants, 164 | &update_cache_variants), 165 | SPINAND_HAS_CR_FEAT_BIT, 166 | SPINAND_ECCINFO(micron_8_ecc_get_status), 167 | SPINAND_SELECT_TARGET(micron_select_target)), 168 | /* M70A 8Gb 1.8V */ 169 | SPINAND_INFO("MT29F8G01ADBFD", 170 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x47), 171 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2), 172 | NAND_ECCREQ(8, 512), 173 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 174 | &write_cache_variants, 175 | &update_cache_variants), 176 | SPINAND_HAS_CR_FEAT_BIT, 177 | SPINAND_ECCINFO(micron_8_ecc_get_status), 178 | SPINAND_SELECT_TARGET(micron_select_target)), 179 | }; 180 | 181 | static int micron_spinand_init(struct spinand_device *spinand) 182 | { 183 | /* 184 | * M70A device series enable Continuous Read feature at Power-up, 185 | * which is not supported. Disable this bit to avoid any possible 186 | * failure. 187 | */ 188 | if (spinand->flags & SPINAND_HAS_CR_FEAT_BIT) 189 | return spinand_upd_cfg(spinand, MICRON_CFG_CR, 0); 190 | 191 | return 0; 192 | } 193 | 194 | static const struct spinand_manufacturer_ops micron_spinand_manuf_ops = { 195 | .init = micron_spinand_init, 196 | }; 197 | 198 | const struct spinand_manufacturer micron_spinand_manufacturer = { 199 | .id = SPINAND_MFR_MICRON, 200 | .name = "Micron", 201 | .chips = micron_spinand_table, 202 | .nchips = ARRAY_SIZE(micron_spinand_table), 203 | .ops = µn_spinand_manuf_ops, 204 | }; 205 | -------------------------------------------------------------------------------- /spi-nand/paragon.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Copyright (C) 2019 Jeff Kletsky 4 | * 5 | * Author: Jeff Kletsky 6 | */ 7 | 8 | #include 9 | 10 | 11 | #define SPINAND_MFR_PARAGON 0xa1 12 | 13 | 14 | #define PN26G0XA_STATUS_ECC_BITMASK (3 << 4) 15 | 16 | #define PN26G0XA_STATUS_ECC_NONE_DETECTED (0 << 4) 17 | #define PN26G0XA_STATUS_ECC_1_7_CORRECTED (1 << 4) 18 | #define PN26G0XA_STATUS_ECC_ERRORED (2 << 4) 19 | #define PN26G0XA_STATUS_ECC_8_CORRECTED (3 << 4) 20 | 21 | 22 | static SPINAND_OP_VARIANTS(read_cache_variants, 23 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), 24 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 25 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 26 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 27 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 28 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 29 | 30 | static SPINAND_OP_VARIANTS(write_cache_variants, 31 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 32 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 33 | 34 | static SPINAND_OP_VARIANTS(update_cache_variants, 35 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 36 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 37 | 38 | static int pn26g0xa_ecc_get_status(struct spinand_device *spinand, 39 | u8 status) 40 | { 41 | switch (status & PN26G0XA_STATUS_ECC_BITMASK) { 42 | case PN26G0XA_STATUS_ECC_NONE_DETECTED: 43 | return 0; 44 | 45 | case PN26G0XA_STATUS_ECC_1_7_CORRECTED: 46 | return 7; /* Return upper limit by convention */ 47 | 48 | case PN26G0XA_STATUS_ECC_8_CORRECTED: 49 | return 8; 50 | 51 | case PN26G0XA_STATUS_ECC_ERRORED: 52 | return -EBADMSG; 53 | 54 | default: 55 | break; 56 | } 57 | 58 | return -EINVAL; 59 | } 60 | 61 | static const struct spinand_info paragon_spinand_table[] = { 62 | SPINAND_INFO("PN26G01A", 63 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe1), 64 | NAND_MEMORG(1, 2048, 128, 64, 1024, 21, 1, 1, 1), 65 | NAND_ECCREQ(8, 512), 66 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 67 | &write_cache_variants, 68 | &update_cache_variants), 69 | 0, 70 | SPINAND_ECCINFO(pn26g0xa_ecc_get_status)), 71 | SPINAND_INFO("PN26G02A", 72 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe2), 73 | NAND_MEMORG(1, 2048, 128, 64, 2048, 41, 1, 1, 1), 74 | NAND_ECCREQ(8, 512), 75 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 76 | &write_cache_variants, 77 | &update_cache_variants), 78 | 0, 79 | SPINAND_ECCINFO(pn26g0xa_ecc_get_status)), 80 | }; 81 | 82 | static const struct spinand_manufacturer_ops paragon_spinand_manuf_ops = { 83 | }; 84 | 85 | const struct spinand_manufacturer paragon_spinand_manufacturer = { 86 | .id = SPINAND_MFR_PARAGON, 87 | .name = "Paragon", 88 | .chips = paragon_spinand_table, 89 | .nchips = ARRAY_SIZE(paragon_spinand_table), 90 | .ops = ¶gon_spinand_manuf_ops, 91 | }; 92 | -------------------------------------------------------------------------------- /spi-nand/toshiba.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Copyright (c) 2018 exceet electronics GmbH 4 | * Copyright (c) 2018 Kontron Electronics GmbH 5 | * 6 | * Author: Frieder Schrempf 7 | */ 8 | 9 | #include 10 | 11 | /* Kioxia is new name of Toshiba memory. */ 12 | #define SPINAND_MFR_TOSHIBA 0x98 13 | #define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4) 14 | 15 | static SPINAND_OP_VARIANTS(read_cache_variants, 16 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 17 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 18 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 19 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 20 | 21 | static SPINAND_OP_VARIANTS(write_cache_x4_variants, 22 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 23 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 24 | 25 | static SPINAND_OP_VARIANTS(update_cache_x4_variants, 26 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 27 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 28 | 29 | /** 30 | * Backward compatibility for 1st generation Serial NAND devices 31 | * which don't support Quad Program Load operation. 32 | */ 33 | static SPINAND_OP_VARIANTS(write_cache_variants, 34 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 35 | 36 | static SPINAND_OP_VARIANTS(update_cache_variants, 37 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 38 | 39 | static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand, 40 | u8 status) 41 | { 42 | struct nand_device *nand = spinand_to_nand(spinand); 43 | u8 mbf = 0; 44 | struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf); 45 | 46 | switch (status & STATUS_ECC_MASK) { 47 | case STATUS_ECC_NO_BITFLIPS: 48 | return 0; 49 | 50 | case STATUS_ECC_UNCOR_ERROR: 51 | return -EBADMSG; 52 | 53 | case STATUS_ECC_HAS_BITFLIPS: 54 | case TOSH_STATUS_ECC_HAS_BITFLIPS_T: 55 | /* 56 | * Let's try to retrieve the real maximum number of bitflips 57 | * in order to avoid forcing the wear-leveling layer to move 58 | * data around if it's not necessary. 59 | */ 60 | if (spi_mem_exec_op(spinand->spimem, &op)) 61 | return nand->eccreq.strength; 62 | 63 | mbf >>= 4; 64 | 65 | if (mbf > nand->eccreq.strength || !mbf) 66 | return nand->eccreq.strength; 67 | 68 | return mbf; 69 | 70 | default: 71 | break; 72 | } 73 | 74 | return -EINVAL; 75 | } 76 | 77 | static const struct spinand_info toshiba_spinand_table[] = { 78 | /* 3.3V 1Gb (1st generation) */ 79 | SPINAND_INFO("TC58CVG0S3HRAIG", 80 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xC2), 81 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 82 | NAND_ECCREQ(8, 512), 83 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 84 | &write_cache_variants, 85 | &update_cache_variants), 86 | 0, 87 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 88 | /* 3.3V 2Gb (1st generation) */ 89 | SPINAND_INFO("TC58CVG1S3HRAIG", 90 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xCB), 91 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 92 | NAND_ECCREQ(8, 512), 93 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 94 | &write_cache_variants, 95 | &update_cache_variants), 96 | 0, 97 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 98 | /* 3.3V 4Gb (1st generation) */ 99 | SPINAND_INFO("TC58CVG2S0HRAIG", 100 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xCD), 101 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), 102 | NAND_ECCREQ(8, 512), 103 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 104 | &write_cache_variants, 105 | &update_cache_variants), 106 | 0, 107 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 108 | /* 1.8V 1Gb (1st generation) */ 109 | SPINAND_INFO("TC58CYG0S3HRAIG", 110 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xB2), 111 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 112 | NAND_ECCREQ(8, 512), 113 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 114 | &write_cache_variants, 115 | &update_cache_variants), 116 | 0, 117 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 118 | /* 1.8V 2Gb (1st generation) */ 119 | SPINAND_INFO("TC58CYG1S3HRAIG", 120 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBB), 121 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 122 | NAND_ECCREQ(8, 512), 123 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 124 | &write_cache_variants, 125 | &update_cache_variants), 126 | 0, 127 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 128 | /* 1.8V 4Gb (1st generation) */ 129 | SPINAND_INFO("TC58CYG2S0HRAIG", 130 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBD), 131 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), 132 | NAND_ECCREQ(8, 512), 133 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 134 | &write_cache_variants, 135 | &update_cache_variants), 136 | 0, 137 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 138 | 139 | /* 140 | * 2nd generation serial nand has HOLD_D which is equivalent to 141 | * QE_BIT. 142 | */ 143 | /* 3.3V 1Gb (2nd generation) */ 144 | SPINAND_INFO("TC58CVG0S3HRAIJ", 145 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE2), 146 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 147 | NAND_ECCREQ(8, 512), 148 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 149 | &write_cache_x4_variants, 150 | &update_cache_x4_variants), 151 | SPINAND_HAS_QE_BIT, 152 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 153 | /* 3.3V 2Gb (2nd generation) */ 154 | SPINAND_INFO("TC58CVG1S3HRAIJ", 155 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xEB), 156 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 157 | NAND_ECCREQ(8, 512), 158 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 159 | &write_cache_x4_variants, 160 | &update_cache_x4_variants), 161 | SPINAND_HAS_QE_BIT, 162 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 163 | /* 3.3V 4Gb (2nd generation) */ 164 | SPINAND_INFO("TC58CVG2S0HRAIJ", 165 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xED), 166 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), 167 | NAND_ECCREQ(8, 512), 168 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 169 | &write_cache_x4_variants, 170 | &update_cache_x4_variants), 171 | SPINAND_HAS_QE_BIT, 172 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 173 | /* 3.3V 8Gb (2nd generation) */ 174 | SPINAND_INFO("TH58CVG3S0HRAIJ", 175 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE4), 176 | NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1), 177 | NAND_ECCREQ(8, 512), 178 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 179 | &write_cache_x4_variants, 180 | &update_cache_x4_variants), 181 | SPINAND_HAS_QE_BIT, 182 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 183 | /* 1.8V 1Gb (2nd generation) */ 184 | SPINAND_INFO("TC58CYG0S3HRAIJ", 185 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xD2), 186 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 187 | NAND_ECCREQ(8, 512), 188 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 189 | &write_cache_x4_variants, 190 | &update_cache_x4_variants), 191 | SPINAND_HAS_QE_BIT, 192 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 193 | /* 1.8V 2Gb (2nd generation) */ 194 | SPINAND_INFO("TC58CYG1S3HRAIJ", 195 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xDB), 196 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 197 | NAND_ECCREQ(8, 512), 198 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 199 | &write_cache_x4_variants, 200 | &update_cache_x4_variants), 201 | SPINAND_HAS_QE_BIT, 202 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 203 | /* 1.8V 4Gb (2nd generation) */ 204 | SPINAND_INFO("TC58CYG2S0HRAIJ", 205 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xDD), 206 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), 207 | NAND_ECCREQ(8, 512), 208 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 209 | &write_cache_x4_variants, 210 | &update_cache_x4_variants), 211 | SPINAND_HAS_QE_BIT, 212 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 213 | /* 1.8V 8Gb (2nd generation) */ 214 | SPINAND_INFO("TH58CYG3S0HRAIJ", 215 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xD4), 216 | NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1), 217 | NAND_ECCREQ(8, 512), 218 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 219 | &write_cache_x4_variants, 220 | &update_cache_x4_variants), 221 | SPINAND_HAS_QE_BIT, 222 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 223 | }; 224 | 225 | static const struct spinand_manufacturer_ops toshiba_spinand_manuf_ops = { 226 | }; 227 | 228 | const struct spinand_manufacturer toshiba_spinand_manufacturer = { 229 | .id = SPINAND_MFR_TOSHIBA, 230 | .name = "Toshiba", 231 | .chips = toshiba_spinand_table, 232 | .nchips = ARRAY_SIZE(toshiba_spinand_table), 233 | .ops = &toshiba_spinand_manuf_ops, 234 | }; 235 | -------------------------------------------------------------------------------- /spi-nand/winbond.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Copyright (c) 2017 exceet electronics GmbH 4 | * 5 | * Authors: 6 | * Frieder Schrempf 7 | * Boris Brezillon 8 | */ 9 | 10 | #include 11 | 12 | #define SPINAND_MFR_WINBOND 0xEF 13 | 14 | #define WINBOND_CFG_BUF_READ BIT(3) 15 | 16 | static SPINAND_OP_VARIANTS(read_cache_variants, 17 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), 18 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 19 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 20 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 21 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 22 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 23 | 24 | static SPINAND_OP_VARIANTS(write_cache_variants, 25 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 26 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 27 | 28 | static SPINAND_OP_VARIANTS(update_cache_variants, 29 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 30 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 31 | 32 | static int w25m02gv_select_target(struct spinand_device *spinand, 33 | unsigned int target) 34 | { 35 | struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0xc2, 1), 36 | SPI_MEM_OP_NO_ADDR, 37 | SPI_MEM_OP_NO_DUMMY, 38 | SPI_MEM_OP_DATA_OUT(1, 39 | spinand->scratchbuf, 40 | 1)); 41 | 42 | *spinand->scratchbuf = target; 43 | return spi_mem_exec_op(spinand->spimem, &op); 44 | } 45 | 46 | static const struct spinand_info winbond_spinand_table[] = { 47 | SPINAND_INFO("W25M02GV", 48 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab), 49 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 2), 50 | NAND_ECCREQ(1, 512), 51 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 52 | &write_cache_variants, 53 | &update_cache_variants), 54 | 0, 55 | SPINAND_ECCINFO(NULL), 56 | SPINAND_SELECT_TARGET(w25m02gv_select_target)), 57 | SPINAND_INFO("W25N01GV", 58 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa), 59 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 60 | NAND_ECCREQ(1, 512), 61 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 62 | &write_cache_variants, 63 | &update_cache_variants), 64 | 0, 65 | SPINAND_ECCINFO(NULL)), 66 | }; 67 | 68 | static int winbond_spinand_init(struct spinand_device *spinand) 69 | { 70 | struct nand_device *nand = spinand_to_nand(spinand); 71 | unsigned int i; 72 | 73 | /* 74 | * Make sure all dies are in buffer read mode and not continuous read 75 | * mode. 76 | */ 77 | for (i = 0; i < nand->memorg.ntargets; i++) { 78 | spinand_select_target(spinand, i); 79 | spinand_upd_cfg(spinand, WINBOND_CFG_BUF_READ, 80 | WINBOND_CFG_BUF_READ); 81 | } 82 | 83 | return 0; 84 | } 85 | 86 | static const struct spinand_manufacturer_ops winbond_spinand_manuf_ops = { 87 | .init = winbond_spinand_init, 88 | }; 89 | 90 | const struct spinand_manufacturer winbond_spinand_manufacturer = { 91 | .id = SPINAND_MFR_WINBOND, 92 | .name = "Winbond", 93 | .chips = winbond_spinand_table, 94 | .nchips = ARRAY_SIZE(winbond_spinand_table), 95 | .ops = &winbond_spinand_manuf_ops, 96 | }; 97 | --------------------------------------------------------------------------------