├── udev └── 99-ch347.rules ├── ch347-hardware ├── ch347-prog-v1.2-sch.jpg ├── ch347-prog-v1.2-3dpcb.jpg └── ch347-prog-v1.2-kicad_pro.zip ├── .gitignore ├── include ├── spi.h ├── spi-mem-drvs.h ├── flashops.h ├── linux-err.h ├── linux-types.h ├── serprog.h ├── spi-mem.h ├── spinand.h └── nand.h ├── spi-mem ├── spi-mem-drvs.c ├── ch347 │ ├── spi-mem.c │ ├── ch347.h │ └── ch347.c ├── spi-mem-fx2qspi.c ├── spi-mem.c └── spi-mem-serprog.c ├── CMakeLists.txt ├── spi-nand ├── silicongo.c ├── gsto.c ├── unim.c ├── jsc.c ├── biwin.c ├── fmsh.c ├── paragon.c ├── skyhigh.c ├── esmt.c ├── foresee.c ├── hyf.c ├── etron.c ├── dosilicon.c ├── xtx.c ├── winbond.c ├── micron.c ├── toshiba.c ├── macronix.c └── gigadevice.c ├── README.md ├── main.c └── flashops.c /udev/99-ch347.rules: -------------------------------------------------------------------------------- 1 | SUBSYSTEM=="usb", ATTRS{idProduct}=="1a86", ATTRS{idVendor}=="55db", MODE="0666" 2 | -------------------------------------------------------------------------------- /ch347-hardware/ch347-prog-v1.2-sch.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hanwckf/spi-nand-prog/HEAD/ch347-hardware/ch347-prog-v1.2-sch.jpg -------------------------------------------------------------------------------- /ch347-hardware/ch347-prog-v1.2-3dpcb.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hanwckf/spi-nand-prog/HEAD/ch347-hardware/ch347-prog-v1.2-3dpcb.jpg -------------------------------------------------------------------------------- /ch347-hardware/ch347-prog-v1.2-kicad_pro.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hanwckf/spi-nand-prog/HEAD/ch347-hardware/ch347-prog-v1.2-kicad_pro.zip -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | CMakeLists.txt.user 2 | CMakeCache.txt 3 | CMakeFiles 4 | CMakeScripts 5 | Testing 6 | Makefile 7 | cmake_install.cmake 8 | install_manifest.txt 9 | compile_commands.json 10 | CTestTestfile.cmake 11 | _deps 12 | /build/** 13 | .vscode 14 | -------------------------------------------------------------------------------- /include/spi.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #define SPI_TX_DUAL 0x100 /* transmit with 2 wires */ 4 | #define SPI_TX_QUAD 0x200 /* transmit with 4 wires */ 5 | #define SPI_RX_DUAL 0x400 /* receive with 2 wires */ 6 | #define SPI_RX_QUAD 0x800 /* receive with 4 wires */ 7 | #define SPI_TX_OCTAL 0x2000 /* transmit with 8 wires */ 8 | #define SPI_RX_OCTAL 0x4000 /* receive with 8 wires */ -------------------------------------------------------------------------------- /include/spi-mem-drvs.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | struct spi_mem *spi_mem_probe(const char *drv, const char *drvarg); 5 | void spi_mem_remove(const char *drv, struct spi_mem *mem); 6 | struct spi_mem *fx2qspi_probe(); 7 | void fx2qspi_remove(struct spi_mem *mem); 8 | struct spi_mem *serprog_probe(const char *devpath); 9 | void serprog_remove(struct spi_mem *mem); 10 | struct spi_mem *ch347_probe(); 11 | void ch347_remove(struct spi_mem *mem); 12 | -------------------------------------------------------------------------------- /include/flashops.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | int snand_read(struct spinand_device *snand, size_t offs, size_t len, 5 | bool ecc_enabled, bool read_oob, FILE *fp); 6 | void snand_scan_bbm(struct spinand_device *snand); 7 | int snand_write(struct spinand_device *snand, size_t offs, bool ecc_enabled, 8 | bool write_oob, bool erase_rest, FILE *fp, size_t old_bbm_offs, 9 | size_t old_bbm_len, size_t bbm_offs, size_t bbm_len); 10 | -------------------------------------------------------------------------------- /spi-mem/spi-mem-drvs.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | struct spi_mem *spi_mem_probe(const char *drv, const char *drvarg) { 6 | if (!strcmp(drv, "ch347")) 7 | return ch347_probe(drvarg); 8 | if (!strcmp(drv, "fx2qspi")) 9 | return fx2qspi_probe(); 10 | if (!strcmp(drv, "serprog")) 11 | return serprog_probe(drvarg); 12 | return NULL; 13 | } 14 | 15 | void spi_mem_remove(const char *drv, struct spi_mem *mem) { 16 | if (!strcmp(drv, "ch347")) 17 | return ch347_remove(mem); 18 | if (!strcmp(drv, "fx2qspi")) 19 | return fx2qspi_remove(mem); 20 | if (!strcmp(drv, "serprog")) 21 | return serprog_remove(mem); 22 | } 23 | -------------------------------------------------------------------------------- /include/linux-err.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #define __must_check __attribute__((__warn_unused_result__)) 3 | #define __force 4 | 5 | #ifndef _WIN32 6 | #include 7 | #endif 8 | 9 | #define MAX_ERRNO 4095 10 | 11 | #define IS_ERR_VALUE(x) ((uintptr_t)(void *)(x) >= (uintptr_t)-MAX_ERRNO) 12 | 13 | 14 | static inline void * __must_check ERR_PTR(intptr_t error) 15 | { 16 | return (void *) error; 17 | } 18 | 19 | static inline intptr_t __must_check PTR_ERR(__force const void *ptr) 20 | { 21 | return (intptr_t) ptr; 22 | } 23 | 24 | static inline bool __must_check IS_ERR(__force const void *ptr) 25 | { 26 | return IS_ERR_VALUE((uintptr_t)ptr); 27 | } 28 | 29 | static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr) 30 | { 31 | return (!ptr) || IS_ERR_VALUE((uintptr_t)ptr); 32 | } 33 | -------------------------------------------------------------------------------- /include/linux-types.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #ifdef _WIN32 4 | #include 5 | typedef int8_t s8; 6 | typedef uint8_t u8; 7 | typedef int16_t s16; 8 | typedef uint16_t u16; 9 | typedef int32_t s32; 10 | typedef uint32_t u32; 11 | typedef int64_t s64; 12 | typedef uint64_t u64; 13 | typedef s64 loff_t; 14 | #else 15 | #include 16 | typedef __s8 s8; 17 | typedef __u8 u8; 18 | typedef __s16 s16; 19 | typedef __u16 u16; 20 | typedef __s32 s32; 21 | typedef __u32 u32; 22 | typedef __s64 s64; 23 | typedef __u64 u64; 24 | #endif 25 | 26 | #define container_of(ptr, type, member) ({ \ 27 | void *__mptr = (void *)(ptr); \ 28 | ((type *)(__mptr - offsetof(type, member))); }) 29 | 30 | #define BIT(_B) (1 << (_B)) 31 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) 32 | #define BITS_PER_LONG (sizeof(intptr_t) * 8) 33 | #define GENMASK(h, l) \ 34 | ((uintptr_t)(((~0ULL) - (1ULL << (l)) + 1) & \ 35 | (~0ULL >> (BITS_PER_LONG - 1 - (h))))) 36 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.1) 2 | set(EXE_NAME spi-nand-prog) 3 | project(${EXE_NAME} C) 4 | find_package(PkgConfig) 5 | pkg_check_modules(libusb-1.0 REQUIRED libusb-1.0) 6 | 7 | set(CMAKE_C_FLAGS_RELWITHDEBINFO "-O3 -ggdb -Wall") 8 | 9 | include_directories(include ${libusb-1.0_INCLUDEDIR}) 10 | 11 | set(SPI_MEM_SRCS 12 | spi-mem/spi-mem.c 13 | spi-mem/spi-mem-drvs.c 14 | spi-mem/spi-mem-fx2qspi.c 15 | spi-mem/spi-mem-serprog.c 16 | spi-mem/ch347/ch347.c 17 | spi-mem/ch347/spi-mem.c 18 | ) 19 | 20 | set(SPI_NAND_SRCS 21 | spi-nand/core.c 22 | spi-nand/gigadevice.c 23 | spi-nand/macronix.c 24 | spi-nand/micron.c 25 | spi-nand/paragon.c 26 | spi-nand/toshiba.c 27 | spi-nand/winbond.c 28 | spi-nand/esmt.c 29 | spi-nand/xtx.c 30 | spi-nand/biwin.c 31 | spi-nand/dosilicon.c 32 | spi-nand/etron.c 33 | spi-nand/fmsh.c 34 | spi-nand/foresee.c 35 | spi-nand/gsto.c 36 | spi-nand/hyf.c 37 | spi-nand/jsc.c 38 | spi-nand/silicongo.c 39 | spi-nand/skyhigh.c 40 | spi-nand/unim.c 41 | ) 42 | add_executable(${EXE_NAME} ${SPI_MEM_SRCS} ${SPI_NAND_SRCS} main.c flashops.c) 43 | target_link_libraries(${EXE_NAME} ${libusb-1.0_LIBRARIES}) 44 | -------------------------------------------------------------------------------- /include/serprog.h: -------------------------------------------------------------------------------- 1 | /* According to Serial Flasher Protocol Specification - version 1 */ 2 | #define S_ACK 0x06 3 | #define S_NAK 0x15 4 | #define S_CMD_NOP 0x00 /* No operation */ 5 | #define S_CMD_Q_IFACE 0x01 /* Query interface version */ 6 | #define S_CMD_Q_CMDMAP 0x02 /* Query supported commands bitmap */ 7 | #define S_CMD_Q_PGMNAME 0x03 /* Query programmer name */ 8 | #define S_CMD_Q_SERBUF 0x04 /* Query Serial Buffer Size */ 9 | #define S_CMD_Q_BUSTYPE 0x05 /* Query supported bustypes */ 10 | #define S_CMD_Q_CHIPSIZE 0x06 /* Query supported chipsize (2^n format) */ 11 | #define S_CMD_Q_OPBUF 0x07 /* Query operation buffer size */ 12 | #define S_CMD_Q_WRNMAXLEN 0x08 /* Query Write to opbuf: Write-N maximum length */ 13 | #define S_CMD_R_BYTE 0x09 /* Read a single byte */ 14 | #define S_CMD_R_NBYTES 0x0A /* Read n bytes */ 15 | #define S_CMD_O_INIT 0x0B /* Initialize operation buffer */ 16 | #define S_CMD_O_WRITEB 0x0C /* Write opbuf: Write byte with address */ 17 | #define S_CMD_O_WRITEN 0x0D /* Write to opbuf: Write-N */ 18 | #define S_CMD_O_DELAY 0x0E /* Write opbuf: udelay */ 19 | #define S_CMD_O_EXEC 0x0F /* Execute operation buffer */ 20 | #define S_CMD_SYNCNOP 0x10 /* Special no-operation that returns NAK+ACK */ 21 | #define S_CMD_Q_RDNMAXLEN 0x11 /* Query read-n maximum length */ 22 | #define S_CMD_S_BUSTYPE 0x12 /* Set used bustype(s). */ 23 | #define S_CMD_O_SPIOP 0x13 /* Perform SPI operation. */ 24 | #define S_CMD_S_SPI_FREQ 0x14 /* Set SPI clock frequency */ 25 | #define S_CMD_S_PIN_STATE 0x15 /* Enable/disable output drivers */ 26 | -------------------------------------------------------------------------------- /spi-nand/silicongo.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Copyright (c) 2021 Rockchip Electronics Co., Ltd 4 | * 5 | * Authors: 6 | * Dingqiang Lin 7 | */ 8 | 9 | #include 10 | 11 | #define SPINAND_MFR_SILICONGO 0xEA 12 | 13 | static SPINAND_OP_VARIANTS(read_cache_variants, 14 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), 15 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 16 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 17 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 18 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 19 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 20 | 21 | static SPINAND_OP_VARIANTS(write_cache_variants, 22 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 23 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 24 | 25 | static SPINAND_OP_VARIANTS(update_cache_variants, 26 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 27 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 28 | 29 | static const struct spinand_info silicongo_spinand_table[] = { 30 | SPINAND_INFO("SGM7000I-S24W1GH", 31 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xC1), 32 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 33 | NAND_ECCREQ(4, 512), 34 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 35 | &write_cache_variants, 36 | &update_cache_variants), 37 | SPINAND_HAS_QE_BIT, 38 | SPINAND_ECCINFO(NULL)), 39 | }; 40 | 41 | static const struct spinand_manufacturer_ops silicongo_spinand_manuf_ops = { 42 | }; 43 | 44 | const struct spinand_manufacturer silicongo_spinand_manufacturer = { 45 | .id = SPINAND_MFR_SILICONGO, 46 | .name = "silicongo", 47 | .chips = silicongo_spinand_table, 48 | .nchips = ARRAY_SIZE(silicongo_spinand_table), 49 | .ops = &silicongo_spinand_manuf_ops, 50 | }; 51 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SPI-NAND Programmer 2 | 3 | ## About 4 | 5 | A SPI-NAND flash programmer software botched together using SPI-MEM and SPI-NAND framework taken from Linux v5.8. 6 | 7 | ## Features 8 | 9 | * Reading/Writing SPI NAND 10 | * Operations with on-die ECC enabled/disabled 11 | * Operations with OOB data included or not 12 | * Skip bad blocks during writing 13 | * Data verification for writing when on-die ECC is enabled 14 | 15 | ## Supported devices 16 | 17 | [WCH CH347](https://www.wch.cn/products/CH347.html) 18 | 19 | The default driver. 20 | 21 | add the arguments to set CH347 SPI clock (KHz), default is 30000: 22 | 23 | ```shell 24 | # set CH347 SPI clock to 15MHz 25 | -a 15000 26 | ``` 27 | 28 | [dword1511/stm32-vserprog](https://github.com/dword1511/stm32-vserprog) 29 | 30 | add the following arguments to select this driver: 31 | 32 | ```shell 33 | -d serprog -a /dev/ttyACM0 34 | ``` 35 | 36 | ## CH347 programmer hardware 37 | 38 | CH347 dual voltage (1.8V/3.3V) SPI/I2C/UART programmer 39 | 40 | **Note1: max speed of SPI clock is 30MHz** 41 | 42 | **Note2: CFG0 resistor is not connected** 43 | 44 | kicad project and gerber file: ch347-hardware/ch347-prog-v1.2-kicad_pro.zip 45 | 46 | ![](ch347-hardware/ch347-prog-v1.2-3dpcb.jpg) 47 | 48 | ![](ch347-hardware/ch347-prog-v1.2-sch.jpg) 49 | 50 | ## Usage 51 | ``` 52 | spi-nand-prog [file name] [arguments] 53 | 54 | Operations: read/write/erase/scan 55 | Arguments: 56 | -d : hardware driver to be used. 57 | -a : additional argument provided to current driver. 58 | -o : Flash offset. Should be aligned to page boundary when reading and block boundary when writing. default: 0 59 | -l : read length. default: flash_size 60 | --no-ecc: disable on-die ECC. This also disables data verification when writing. 61 | --with-oob: include OOB data during operation. 62 | ``` 63 | -------------------------------------------------------------------------------- /spi-nand/gsto.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Copyright (c) 2023 Rockchip Electronics Co., Ltd 4 | * 5 | * Authors: 6 | * Dingqiang Lin 7 | */ 8 | 9 | #include 10 | 11 | #define SPINAND_MFR_GSTO 0x52 12 | 13 | static SPINAND_OP_VARIANTS(read_cache_variants, 14 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), 15 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 16 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 17 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 18 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 19 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 20 | 21 | static SPINAND_OP_VARIANTS(write_cache_variants, 22 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 23 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 24 | 25 | static SPINAND_OP_VARIANTS(update_cache_variants, 26 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 27 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 28 | 29 | static const struct spinand_info gsto_spinand_table[] = { 30 | SPINAND_INFO("GSS01GSAK1", 31 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBA, 0x13), 32 | NAND_MEMORG(1, 2048, 64, 64, 1024, 10, 1, 1, 1), 33 | NAND_ECCREQ(4, 512), 34 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 35 | &write_cache_variants, 36 | &update_cache_variants), 37 | 0, 38 | SPINAND_ECCINFO(NULL)), 39 | SPINAND_INFO("GSS02GSAK1", 40 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBA, 0x23), 41 | NAND_MEMORG(1, 2048, 64, 64, 2048, 20, 1, 1, 1), 42 | NAND_ECCREQ(4, 512), 43 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 44 | &write_cache_variants, 45 | &update_cache_variants), 46 | 0, 47 | SPINAND_ECCINFO(NULL)), 48 | }; 49 | 50 | static const struct spinand_manufacturer_ops gsto_spinand_manuf_ops = { 51 | }; 52 | 53 | const struct spinand_manufacturer gsto_spinand_manufacturer = { 54 | .id = SPINAND_MFR_GSTO, 55 | .name = "GSTO", 56 | .chips = gsto_spinand_table, 57 | .nchips = ARRAY_SIZE(gsto_spinand_table), 58 | .ops = &gsto_spinand_manuf_ops, 59 | }; 60 | -------------------------------------------------------------------------------- /spi-nand/unim.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Copyright (c) 2021 Rockchip Electronics Co., Ltd 4 | * 5 | * Authors: 6 | * Dingqiang Lin 7 | */ 8 | 9 | #include 10 | 11 | #define SPINAND_MFR_UNIM 0xA1 12 | 13 | static SPINAND_OP_VARIANTS(read_cache_variants, 14 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), 15 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 16 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 17 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 18 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 19 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 20 | 21 | static SPINAND_OP_VARIANTS(write_cache_variants, 22 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 23 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 24 | 25 | static SPINAND_OP_VARIANTS(update_cache_variants, 26 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 27 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 28 | /* 29 | * ecc bits: 0xC0[4,6] 30 | * [0b000], No bit errors were detected; 31 | * [0b001, 0b011], 1~3 Bit errors were detected and corrected. Not 32 | * reach Flipping Bits; 33 | * [0b100], Bit error count equals the bit flip 34 | * detection threshold 35 | * others, Reserved. 36 | */ 37 | static int tx25g01_ecc_get_status(struct spinand_device *spinand, 38 | u8 status) 39 | { 40 | u8 eccsr = (status & GENMASK(6, 4)) >> 2; 41 | 42 | if (eccsr <= 7) 43 | return eccsr; 44 | else if (eccsr == 12) 45 | return 8; 46 | else 47 | return -EBADMSG; 48 | } 49 | 50 | static const struct spinand_info unim_spinand_table[] = { 51 | SPINAND_INFO("TX25G01", 52 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xF1), 53 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 54 | NAND_ECCREQ(4, 512), 55 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 56 | &write_cache_variants, 57 | &update_cache_variants), 58 | SPINAND_HAS_QE_BIT, 59 | SPINAND_ECCINFO(tx25g01_ecc_get_status)), 60 | }; 61 | 62 | static const struct spinand_manufacturer_ops unim_spinand_manuf_ops = { 63 | }; 64 | 65 | const struct spinand_manufacturer unim_spinand_manufacturer = { 66 | .id = SPINAND_MFR_UNIM, 67 | .name = "UNIM", 68 | .chips = unim_spinand_table, 69 | .nchips = ARRAY_SIZE(unim_spinand_table), 70 | .ops = &unim_spinand_manuf_ops, 71 | }; 72 | -------------------------------------------------------------------------------- /spi-nand/jsc.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Copyright (c) 2021 Rockchip Electronics Co., Ltd 4 | * 5 | * Authors: 6 | * Dingqiang Lin 7 | */ 8 | 9 | #include 10 | 11 | #define SPINAND_MFR_JSC 0xBF 12 | 13 | static SPINAND_OP_VARIANTS(read_cache_variants, 14 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), 15 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 16 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 17 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 18 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 19 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 20 | 21 | static SPINAND_OP_VARIANTS(write_cache_variants, 22 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 23 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 24 | 25 | static SPINAND_OP_VARIANTS(update_cache_variants, 26 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 27 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 28 | 29 | /* 30 | * ecc bits: 0xC0[4,6] 31 | * [0b000], No bit errors were detected; 32 | * [0b001, 0b011], 1~3 Bit errors were detected and corrected. Not 33 | * reach Flipping Bits; 34 | * [0b100], Bit error count equals the bit flip 35 | * detection threshold 36 | * others, Reserved. 37 | */ 38 | static int js28u1gqscahg_ecc_get_status(struct spinand_device *spinand, 39 | u8 status) 40 | { 41 | u8 eccsr = (status & GENMASK(6, 4)) >> 2; 42 | 43 | if (eccsr <= 7) 44 | return eccsr; 45 | else if (eccsr == 12) 46 | return 8; 47 | else 48 | return -EBADMSG; 49 | } 50 | 51 | static const struct spinand_info jsc_spinand_table[] = { 52 | SPINAND_INFO("JS28U1GQSCAHG-83", 53 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x21), 54 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 55 | NAND_ECCREQ(4, 512), 56 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 57 | &write_cache_variants, 58 | &update_cache_variants), 59 | SPINAND_HAS_QE_BIT, 60 | SPINAND_ECCINFO(js28u1gqscahg_ecc_get_status)), 61 | }; 62 | 63 | static const struct spinand_manufacturer_ops jsc_spinand_manuf_ops = { 64 | }; 65 | 66 | const struct spinand_manufacturer jsc_spinand_manufacturer = { 67 | .id = SPINAND_MFR_JSC, 68 | .name = "JSC", 69 | .chips = jsc_spinand_table, 70 | .nchips = ARRAY_SIZE(jsc_spinand_table), 71 | .ops = &jsc_spinand_manuf_ops, 72 | }; 73 | -------------------------------------------------------------------------------- /spi-nand/biwin.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: (GPL-2.0+ OR MIT) 2 | /* 3 | * Copyright (c) 2021 Rockchip Electronics Co., Ltd. 4 | */ 5 | 6 | #include 7 | 8 | #define SPINAND_MFR_BIWIN 0xBC 9 | 10 | #define BIWIN_CFG_BUF_READ BIT(3) 11 | #define BIWIN_STATUS_ECC_HAS_BITFLIPS_T (3 << 4) 12 | 13 | static SPINAND_OP_VARIANTS(read_cache_variants, 14 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), 15 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 16 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 17 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 18 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 19 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 20 | 21 | static SPINAND_OP_VARIANTS(write_cache_variants, 22 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 23 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 24 | 25 | static SPINAND_OP_VARIANTS(update_cache_variants, 26 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 27 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 28 | 29 | static int bwjx08k_ecc_get_status(struct spinand_device *spinand, 30 | u8 status) 31 | { 32 | struct nand_device *nand = spinand_to_nand(spinand); 33 | 34 | switch (status & STATUS_ECC_MASK) { 35 | case STATUS_ECC_NO_BITFLIPS: 36 | return 0; 37 | 38 | case STATUS_ECC_UNCOR_ERROR: 39 | return -EBADMSG; 40 | 41 | case STATUS_ECC_HAS_BITFLIPS: 42 | return 1; 43 | 44 | default: 45 | return nand->eccreq.strength; 46 | } 47 | 48 | return -EINVAL; 49 | } 50 | 51 | /* Another set for the same id[2] devices in one series */ 52 | static const struct spinand_info biwin_spinand_table[] = { 53 | SPINAND_INFO("BWJX08K", 54 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xB3), 55 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), 56 | NAND_ECCREQ(8, 512), 57 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 58 | &write_cache_variants, 59 | &update_cache_variants), 60 | SPINAND_HAS_QE_BIT, 61 | SPINAND_ECCINFO(bwjx08k_ecc_get_status)), 62 | }; 63 | 64 | static const struct spinand_manufacturer_ops biwin_spinand_manuf_ops = { 65 | }; 66 | 67 | const struct spinand_manufacturer biwin_spinand_manufacturer = { 68 | .id = SPINAND_MFR_BIWIN, 69 | .name = "BIWIN", 70 | .chips = biwin_spinand_table, 71 | .nchips = ARRAY_SIZE(biwin_spinand_table), 72 | .ops = &biwin_spinand_manuf_ops, 73 | }; 74 | -------------------------------------------------------------------------------- /spi-nand/fmsh.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Copyright (c) 2020-2021 Rockchip Electronics Co., Ltd 4 | * 5 | * Authors: 6 | * Dingqiang Lin 7 | */ 8 | 9 | #include 10 | 11 | #define SPINAND_MFR_FMSH 0xA1 12 | 13 | static SPINAND_OP_VARIANTS(read_cache_variants, 14 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), 15 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 16 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 17 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 18 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 19 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 20 | 21 | static SPINAND_OP_VARIANTS(write_cache_variants, 22 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 23 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 24 | 25 | static SPINAND_OP_VARIANTS(update_cache_variants, 26 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 27 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 28 | 29 | static const struct spinand_info fmsh_spinand_table[] = { 30 | SPINAND_INFO("FM25S01A", 31 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE4), 32 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 33 | NAND_ECCREQ(1, 512), 34 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 35 | &write_cache_variants, 36 | &update_cache_variants), 37 | 0, 38 | SPINAND_ECCINFO(NULL)), 39 | SPINAND_INFO("FM25S02A", 40 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE5), 41 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1), 42 | NAND_ECCREQ(1, 512), 43 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 44 | &write_cache_variants, 45 | &update_cache_variants), 46 | 1, 47 | SPINAND_ECCINFO(NULL)), 48 | SPINAND_INFO("FM25S01", 49 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xA1), 50 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 51 | NAND_ECCREQ(1, 512), 52 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 53 | &write_cache_variants, 54 | &update_cache_variants), 55 | 0, 56 | SPINAND_ECCINFO(NULL)), 57 | SPINAND_INFO("FM25LS01", 58 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xA5), 59 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 60 | NAND_ECCREQ(1, 512), 61 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 62 | &write_cache_variants, 63 | &update_cache_variants), 64 | 0, 65 | SPINAND_ECCINFO(NULL)), 66 | }; 67 | 68 | static const struct spinand_manufacturer_ops fmsh_spinand_manuf_ops = { 69 | }; 70 | 71 | const struct spinand_manufacturer fmsh_spinand_manufacturer = { 72 | .id = SPINAND_MFR_FMSH, 73 | .name = "FMSH", 74 | .chips = fmsh_spinand_table, 75 | .nchips = ARRAY_SIZE(fmsh_spinand_table), 76 | .ops = &fmsh_spinand_manuf_ops, 77 | }; 78 | -------------------------------------------------------------------------------- /spi-nand/paragon.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Copyright (C) 2019 Jeff Kletsky 4 | * 5 | * Author: Jeff Kletsky 6 | */ 7 | 8 | #include 9 | 10 | 11 | #define SPINAND_MFR_PARAGON 0xa1 12 | 13 | 14 | #define PN26G0XA_STATUS_ECC_BITMASK (3 << 4) 15 | 16 | #define PN26G0XA_STATUS_ECC_NONE_DETECTED (0 << 4) 17 | #define PN26G0XA_STATUS_ECC_1_7_CORRECTED (1 << 4) 18 | #define PN26G0XA_STATUS_ECC_ERRORED (2 << 4) 19 | #define PN26G0XA_STATUS_ECC_8_CORRECTED (3 << 4) 20 | 21 | 22 | static SPINAND_OP_VARIANTS(read_cache_variants, 23 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), 24 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 25 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 26 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 27 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 28 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 29 | 30 | static SPINAND_OP_VARIANTS(write_cache_variants, 31 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 32 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 33 | 34 | static SPINAND_OP_VARIANTS(update_cache_variants, 35 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 36 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 37 | 38 | static int pn26g0xa_ecc_get_status(struct spinand_device *spinand, 39 | u8 status) 40 | { 41 | switch (status & PN26G0XA_STATUS_ECC_BITMASK) { 42 | case PN26G0XA_STATUS_ECC_NONE_DETECTED: 43 | return 0; 44 | 45 | case PN26G0XA_STATUS_ECC_1_7_CORRECTED: 46 | return 7; /* Return upper limit by convention */ 47 | 48 | case PN26G0XA_STATUS_ECC_8_CORRECTED: 49 | return 8; 50 | 51 | case PN26G0XA_STATUS_ECC_ERRORED: 52 | return -EBADMSG; 53 | 54 | default: 55 | break; 56 | } 57 | 58 | return -EINVAL; 59 | } 60 | 61 | static const struct spinand_info paragon_spinand_table[] = { 62 | SPINAND_INFO("PN26G01A", 63 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe1), 64 | NAND_MEMORG(1, 2048, 128, 64, 1024, 21, 1, 1, 1), 65 | NAND_ECCREQ(8, 512), 66 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 67 | &write_cache_variants, 68 | &update_cache_variants), 69 | 0, 70 | SPINAND_ECCINFO(pn26g0xa_ecc_get_status)), 71 | SPINAND_INFO("PN26G02A", 72 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe2), 73 | NAND_MEMORG(1, 2048, 128, 64, 2048, 41, 1, 1, 1), 74 | NAND_ECCREQ(8, 512), 75 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 76 | &write_cache_variants, 77 | &update_cache_variants), 78 | 0, 79 | SPINAND_ECCINFO(pn26g0xa_ecc_get_status)), 80 | }; 81 | 82 | static const struct spinand_manufacturer_ops paragon_spinand_manuf_ops = { 83 | }; 84 | 85 | const struct spinand_manufacturer paragon_spinand_manufacturer = { 86 | .id = SPINAND_MFR_PARAGON, 87 | .name = "Paragon", 88 | .chips = paragon_spinand_table, 89 | .nchips = ARRAY_SIZE(paragon_spinand_table), 90 | .ops = ¶gon_spinand_manuf_ops, 91 | }; 92 | -------------------------------------------------------------------------------- /main.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | static int no_ecc = 0; 9 | static int with_oob = 0; 10 | static int erase_rest = 0; 11 | static size_t offs = 0; 12 | static size_t length = 0; 13 | static const char *drv = "ch347"; 14 | static const char *drvarg = NULL; 15 | static const struct option long_opts[] = { 16 | { "no-ecc", no_argument, &no_ecc, 1 }, 17 | { "with-oob", no_argument, &with_oob, 1 }, 18 | { "erase-rest", no_argument, &erase_rest, 1 }, 19 | { "offset", required_argument, NULL, 'o' }, 20 | { "length", required_argument, NULL, 'l' }, 21 | { "driver", required_argument, NULL, 'd' }, 22 | { "driver-arg", required_argument, NULL, 'a' }, 23 | { 0, 0, NULL, 0 }, 24 | }; 25 | 26 | int main(int argc, char *argv[]) 27 | { 28 | int ret = 0; 29 | const char *fpath = NULL; 30 | FILE *fp = NULL; 31 | int opt; 32 | int long_optind = 0; 33 | int left_argc; 34 | struct spinand_device *snand; 35 | struct spi_mem *mem; 36 | 37 | while ((opt = getopt_long(argc, argv, "o:l:d:a:", long_opts, 38 | &long_optind)) >= 0) { 39 | switch (opt) { 40 | case 'o': 41 | offs = strtoul(optarg, NULL, 0); 42 | break; 43 | case 'l': 44 | length = strtoul(optarg, NULL, 0); 45 | break; 46 | case 'd': 47 | drv = optarg; 48 | break; 49 | case 'a': 50 | drvarg = optarg; 51 | break; 52 | case '?': 53 | puts("???"); 54 | return -1; 55 | default: 56 | break; 57 | } 58 | } 59 | 60 | left_argc = argc - optind; 61 | if (left_argc < 1) { 62 | puts("missing action."); 63 | return -1; 64 | } 65 | 66 | //reuse opt here. It's now actual action. 67 | opt = argv[optind][0]; 68 | 69 | switch (opt) { 70 | case 'r': 71 | case 'w': 72 | if (left_argc < 2) { 73 | puts("missing filename."); 74 | return -1; 75 | } 76 | fpath = argv[optind + 1]; 77 | break; 78 | case 'e': 79 | case 's': 80 | break; 81 | default: 82 | puts("unknown operation."); 83 | return -1; 84 | } 85 | 86 | mem = spi_mem_probe(drv, drvarg); 87 | if (!mem) { 88 | fprintf(stderr, "device not found.\n"); 89 | return -1; 90 | } 91 | 92 | snand = spinand_probe(mem); 93 | if (!snand) { 94 | fprintf(stderr, "unknown SPI NAND.\n"); 95 | goto CLEANUP1; 96 | } 97 | if (fpath) { 98 | fp = fopen(fpath, opt == 'r' ? "wb" : "rb"); 99 | if (!fp) { 100 | perror("failed to open file"); 101 | goto CLEANUP2; 102 | } 103 | } 104 | switch (opt) { 105 | case 'r': 106 | snand_read(snand, offs, length, !no_ecc, with_oob, fp); 107 | break; 108 | case 'w': 109 | snand_write(snand, offs, !no_ecc, with_oob, erase_rest, fp, 0, 110 | 0, 0, 0); 111 | break; 112 | case 'e': 113 | snand_write(snand, offs, false, false, true, NULL, 0, 0, 0, 0); 114 | break; 115 | case 's': 116 | snand_scan_bbm(snand); 117 | break; 118 | } 119 | if (fp) 120 | fclose(fp); 121 | 122 | CLEANUP2: 123 | spinand_remove(snand); 124 | CLEANUP1: 125 | spi_mem_remove(drv, mem); 126 | return ret; 127 | } 128 | -------------------------------------------------------------------------------- /spi-nand/skyhigh.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: (GPL-2.0+ OR MIT) 2 | /* 3 | * Copyright (c) 2021 Rockchip Electronics Co., Ltd. 4 | */ 5 | 6 | #include 7 | 8 | #define SPINAND_MFR_SKYHIGH 0x01 9 | 10 | #define SKYHIGH_STATUS_ECC_1_2_BITFLIPS (1 << 4) 11 | #define SKYHIGH_STATUS_ECC_3_4_BITFLIPS (2 << 4) 12 | #define SKYHIGH_STATUS_ECC_UNCOR_ERROR (3 << 4) 13 | 14 | static SPINAND_OP_VARIANTS(read_cache_variants, 15 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), 16 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 17 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 18 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 19 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 20 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 21 | 22 | static SPINAND_OP_VARIANTS(write_cache_variants, 23 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 24 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 25 | 26 | static SPINAND_OP_VARIANTS(update_cache_variants, 27 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 28 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 29 | 30 | static int s35ml0xg3_ecc_get_status(struct spinand_device *spinand, 31 | u8 status) 32 | { 33 | struct nand_device *nand = spinand_to_nand(spinand); 34 | 35 | switch (status & STATUS_ECC_MASK) { 36 | case STATUS_ECC_NO_BITFLIPS: 37 | return 0; 38 | 39 | case SKYHIGH_STATUS_ECC_UNCOR_ERROR: 40 | return -EBADMSG; 41 | 42 | case SKYHIGH_STATUS_ECC_1_2_BITFLIPS: 43 | return 2; 44 | 45 | default: 46 | return nand->eccreq.strength; 47 | } 48 | 49 | return -EINVAL; 50 | } 51 | 52 | static const struct spinand_info skyhigh_spinand_table[] = { 53 | SPINAND_INFO("S35ML01G3", 54 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x15), 55 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 2, 1, 1), 56 | NAND_ECCREQ(4, 512), 57 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 58 | &write_cache_variants, 59 | &update_cache_variants), 60 | SPINAND_HAS_QE_BIT, 61 | SPINAND_ECCINFO(s35ml0xg3_ecc_get_status)), 62 | SPINAND_INFO("S35ML02G3", 63 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25), 64 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1), 65 | NAND_ECCREQ(4, 512), 66 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 67 | &write_cache_variants, 68 | &update_cache_variants), 69 | SPINAND_HAS_QE_BIT, 70 | SPINAND_ECCINFO(s35ml0xg3_ecc_get_status)), 71 | SPINAND_INFO("S35ML04G3", 72 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35), 73 | NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 2, 1, 1), 74 | NAND_ECCREQ(4, 512), 75 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 76 | &write_cache_variants, 77 | &update_cache_variants), 78 | SPINAND_HAS_QE_BIT, 79 | SPINAND_ECCINFO(s35ml0xg3_ecc_get_status)), 80 | }; 81 | 82 | static const struct spinand_manufacturer_ops skyhigh_spinand_manuf_ops = { 83 | }; 84 | 85 | const struct spinand_manufacturer skyhigh_spinand_manufacturer = { 86 | .id = SPINAND_MFR_SKYHIGH, 87 | .name = "skyhigh", 88 | .chips = skyhigh_spinand_table, 89 | .nchips = ARRAY_SIZE(skyhigh_spinand_table), 90 | .ops = &skyhigh_spinand_manuf_ops, 91 | }; 92 | -------------------------------------------------------------------------------- /spi-mem/ch347/spi-mem.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include "ch347.h" 8 | 9 | static int ch347_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) { 10 | size_t left_data = CH347_SPI_MAX_TRX - 1 - op->addr.nbytes - op->dummy.nbytes; 11 | if (op->data.nbytes > left_data) 12 | op->data.nbytes = left_data; 13 | return 0; 14 | } 15 | 16 | static int ch347_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) { 17 | struct ch347_priv *priv = mem->drvpriv; 18 | uint8_t buf[16]; 19 | int p; 20 | int i, ret; 21 | 22 | buf[0] = op->cmd.opcode; 23 | 24 | if (op->addr.nbytes > 4) 25 | return -EINVAL; 26 | if (op->addr.nbytes) { 27 | uint32_t tmp = op->addr.val; 28 | for (i = op->addr.nbytes; i; i--) { 29 | buf[i] = tmp & 0xff; 30 | tmp >>= 8; 31 | } 32 | } 33 | 34 | p = op->addr.nbytes + 1; 35 | 36 | for (i = 0; i < op->dummy.nbytes; i++) 37 | buf[p++] = 0; 38 | 39 | if (sizeof(buf) - p >= op->data.nbytes) { 40 | ch347_set_cs(priv, 0, 0, 1); 41 | uint8_t *data_ptr = buf + p; 42 | if (op->data.dir == SPI_MEM_DATA_OUT && op->data.nbytes) { 43 | const uint8_t *ptr = op->data.buf.out; 44 | for (i = 0; i < op->data.nbytes; i++) 45 | buf[p++] = ptr[i]; 46 | } else if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes) { 47 | for (i = 0; i < op->data.nbytes; i++) 48 | buf[p++] = 0; 49 | } 50 | ret = ch347_spi_trx_full_duplex(priv, buf, p); 51 | if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes) { 52 | uint8_t *ptr = op->data.buf.in; 53 | for (i = 0; i < op->data.nbytes; i++) 54 | ptr[i] = data_ptr[i]; 55 | } 56 | } else { 57 | ch347_set_cs(priv, 0, 0, 0); 58 | ret = ch347_spi_tx(priv, buf, p); 59 | if (ret) 60 | return ret; 61 | if (op->data.dir == SPI_MEM_DATA_OUT && op->data.nbytes) 62 | ret = ch347_spi_tx(priv, op->data.buf.out, op->data.nbytes); 63 | else if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes) 64 | ret = ch347_spi_rx(priv, op->data.buf.in, op->data.nbytes); 65 | ch347_set_cs(priv, 0, 1, 0); 66 | } 67 | 68 | 69 | return ret; 70 | } 71 | 72 | static const struct spi_controller_mem_ops ch347_mem_ops = { 73 | .adjust_op_size = ch347_adjust_op_size, 74 | .exec_op = ch347_mem_exec_op, 75 | }; 76 | 77 | static struct spi_mem ch347_mem = { 78 | .ops = &ch347_mem_ops, 79 | .spi_mode = 0, 80 | .name = "ch347", 81 | .drvpriv = NULL, 82 | }; 83 | 84 | struct spi_mem *ch347_probe(const char* arg) { 85 | struct ch347_priv *priv; 86 | int ret; 87 | int freq = 30000; 88 | priv = ch347_open(); 89 | if (!priv) 90 | return NULL; 91 | ret = ch347_setup_spi(priv, 3, false, false, false); 92 | if (ret) 93 | return false; 94 | if (arg) 95 | freq = strtoul(arg, NULL, 0); 96 | ch347_mem.drvpriv = priv; 97 | ret = ch347_set_spi_freq(priv, &freq); 98 | printf("ch347: spi freq = %d khz\n", freq); 99 | return ret ? NULL : &ch347_mem; 100 | } 101 | 102 | void ch347_remove(struct spi_mem *mem) { 103 | ch347_close((struct ch347_priv *) mem->drvpriv); 104 | } -------------------------------------------------------------------------------- /spi-nand/esmt.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Author: 4 | * Chuanhong Guo - the main driver logic 5 | * Martin Kurbanov - OOB layout 6 | */ 7 | 8 | #include 9 | 10 | /* ESMT uses GigaDevice 0xc8 JECDEC ID on some SPI NANDs */ 11 | #define SPINAND_MFR_ESMT_C8 0xc8 12 | 13 | static SPINAND_OP_VARIANTS(read_cache_variants, 14 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 15 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 16 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 17 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 18 | 19 | static SPINAND_OP_VARIANTS(write_cache_variants, 20 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 21 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 22 | 23 | static SPINAND_OP_VARIANTS(update_cache_variants, 24 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 25 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 26 | 27 | /* 28 | * OOB spare area map (64 bytes) 29 | * 30 | * Bad Block Markers 31 | * filled by HW and kernel Reserved 32 | * | +-----------------------+-----------------------+ 33 | * | | | | 34 | * | | OOB free data Area |non ECC protected | 35 | * | +-------------|-----+-----------------|-----+-----------------|-----+ 36 | * | | | | | | | | 37 | * +-|---|----------+--|-----|--------------+--|-----|--------------+--|-----|--------------+ 38 | * | | | section0 | | | section1 | | | section2 | | | section3 | 39 | * +-v-+-v-+---+----+--v--+--v--+-----+-----+--v--+--v--+-----+-----+--v--+--v--+-----+-----+ 40 | * | | | | | | | | | | | | | | | | | 41 | * |0:1|2:3|4:7|8:15|16:17|18:19|20:23|24:31|32:33|34:35|36:39|40:47|48:49|50:51|52:55|56:63| 42 | * | | | | | | | | | | | | | | | | | 43 | * +---+---+-^-+--^-+-----+-----+--^--+--^--+-----+-----+--^--+--^--+-----+-----+--^--+--^--+ 44 | * | | | | | | | | 45 | * | +----------------|-----+-----------------|-----+-----------------|-----+ 46 | * | ECC Area|(Main + Spare) - filled|by ESMT NAND HW | 47 | * | | | | 48 | * +---------------------+-----------------------+-----------------------+ 49 | * OOB ECC protected Area - not used due to 50 | * partial programming from some filesystems 51 | * (like JFFS2 with cleanmarkers) 52 | */ 53 | 54 | static const struct spinand_info esmt_c8_spinand_table[] = { 55 | SPINAND_INFO("F50L1G41LB", 56 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x01), 57 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 58 | NAND_ECCREQ(1, 512), 59 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 60 | &write_cache_variants, 61 | &update_cache_variants), 62 | 0, 63 | SPINAND_ECCINFO(NULL)), 64 | SPINAND_INFO("F50D1G41LB", 65 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11), 66 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 67 | NAND_ECCREQ(1, 512), 68 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 69 | &write_cache_variants, 70 | &update_cache_variants), 71 | 0, 72 | SPINAND_ECCINFO(NULL)), 73 | }; 74 | 75 | static const struct spinand_manufacturer_ops esmt_spinand_manuf_ops = { 76 | }; 77 | 78 | const struct spinand_manufacturer esmt_c8_spinand_manufacturer = { 79 | .id = SPINAND_MFR_ESMT_C8, 80 | .name = "ESMT", 81 | .chips = esmt_c8_spinand_table, 82 | .nchips = ARRAY_SIZE(esmt_c8_spinand_table), 83 | .ops = &esmt_spinand_manuf_ops, 84 | }; 85 | -------------------------------------------------------------------------------- /spi-mem/ch347/ch347.h: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-1-Clause 2 | /* 3 | * Copyright (C) 2022 Chuanhong Guo 4 | * 5 | * CH347 SPI library using libusb. Protocol reverse-engineered from WCH linux library. 6 | * FIXME: Every numbers used in the USB protocol should be little-endian. 7 | */ 8 | 9 | #ifndef CH347_H 10 | #define CH347_H 11 | 12 | #ifdef __cplusplus 13 | extern "C" { 14 | #endif 15 | 16 | #include 17 | #include 18 | #include 19 | 20 | #define CH347_SPI_VID 0x1a86 21 | #define CH347_SPI_PID 0x55db 22 | #define CH347_SPI_IF 2 23 | #define CH347_EPOUT (6 | LIBUSB_ENDPOINT_OUT) 24 | #define CH347_EPIN (6 | LIBUSB_ENDPOINT_IN) 25 | 26 | #define CH347_SPI_MAX_FREQ 60000 27 | #define CH347_SPI_MAX_PRESCALER 7 28 | #define CH347_SPI_MAX_TRX 4096 29 | #define CH341_MAX_BULK_SIZE 510 30 | 31 | /* SPI_data_direction */ 32 | #define SPI_Direction_2Lines_FullDuplex 0x0000 33 | #define SPI_Direction_2Lines_RxOnly 0x0400 34 | #define SPI_Direction_1Line_Rx 0x8000 35 | #define SPI_Direction_1Line_Tx 0xC000 36 | 37 | /* SPI_mode */ 38 | #define SPI_Mode_Master 0x0104 39 | #define SPI_Mode_Slave 0x0000 40 | 41 | /* SPI_data_size */ 42 | #define SPI_DataSize_16b 0x0800 43 | #define SPI_DataSize_8b 0x0000 44 | 45 | /* SPI_Clock_Polarity */ 46 | #define SPI_CPOL_Low 0x0000 47 | #define SPI_CPOL_High 0x0002 48 | 49 | /* SPI_Clock_Phase */ 50 | #define SPI_CPHA_1Edge 0x0000 51 | #define SPI_CPHA_2Edge 0x0001 52 | 53 | /* SPI_Slave_Select_management */ 54 | #define SPI_NSS_Software 0x0200 55 | #define SPI_NSS_Hardware 0x0000 56 | 57 | /* SPI_MSB_LSB_transmission */ 58 | #define SPI_FirstBit_MSB 0x0000 59 | #define SPI_FirstBit_LSB 0x0080 60 | 61 | /* CH347 commands */ 62 | #define CH347_CMD_SPI_INIT 0xC0 63 | #define CH347_CMD_SPI_CONTROL 0xC1 64 | #define CH347_CMD_SPI_RD_WR 0xC2 65 | #define CH347_CMD_SPI_BLCK_RD 0xC3 66 | #define CH347_CMD_SPI_BLCK_WR 0xC4 67 | #define CH347_CMD_INFO_RD 0xCA 68 | 69 | struct ch347_spi_hw_config { 70 | uint16_t SPI_Direction; 71 | uint16_t SPI_Mode; 72 | uint16_t SPI_DataSize; 73 | uint16_t SPI_CPOL; 74 | uint16_t SPI_CPHA; 75 | uint16_t SPI_NSS; /* hardware or software managed CS */ 76 | uint16_t SPI_BaudRatePrescaler; /* prescaler = x * 8. x: 0=60MHz, 1=30MHz, 2=15MHz, 3=7.5MHz, 4=3.75MHz, 5=1.875MHz, 6=937.5KHz,7=468.75KHz */ 77 | uint16_t SPI_FirstBit; /* MSB or LSB first */ 78 | uint16_t SPI_CRCPolynomial; /* polynomial used for the CRC calculation. */ 79 | uint16_t SPI_WriteReadInterval; /* No idea what this is... Original comment from WCH: SPI接口常规读取写入数据命令(DEF_CMD_SPI_RD_WR)),单位为uS */ 80 | uint8_t SPI_OutDefaultData; /* Data to output on MOSI during SPI reading */ 81 | /* 82 | * Miscellaneous settings: 83 | * Bit 7: CS0 polarity 84 | * Bit 6: CS1 polarity 85 | * Bit 5: Enable I2C clock stretching 86 | * Bit 4: NACK on last I2C reading 87 | * Bit 3-0: reserved 88 | */ 89 | uint8_t OtherCfg; 90 | 91 | uint8_t Reserved[4]; 92 | }; 93 | 94 | struct ch347_priv { 95 | struct ch347_spi_hw_config cfg; 96 | libusb_context *ctx; 97 | libusb_device_handle *handle; 98 | uint8_t tmpbuf[CH341_MAX_BULK_SIZE]; 99 | }; 100 | 101 | struct ch347_priv *ch347_open(); 102 | 103 | void ch347_close(struct ch347_priv *priv); 104 | 105 | int ch347_commit_settings(struct ch347_priv *priv); 106 | 107 | int ch347_set_cs(struct ch347_priv *priv, int cs, int val, uint16_t autodeactive_us); 108 | 109 | int ch347_set_spi_freq(struct ch347_priv *priv, int *clk_khz); 110 | 111 | int ch347_setup_spi(struct ch347_priv *priv, int spi_mode, bool lsb_first, bool cs0_active_high, bool cs1_active_high); 112 | 113 | int ch347_spi_trx_full_duplex(struct ch347_priv *priv, void *buf, uint32_t len); 114 | 115 | int ch347_spi_tx(struct ch347_priv *priv, const void *tx, uint32_t len); 116 | 117 | int ch347_spi_rx(struct ch347_priv *priv, void *rx, uint32_t len); 118 | 119 | #ifdef __cplusplus 120 | } 121 | #endif 122 | 123 | #endif //CH347_H 124 | -------------------------------------------------------------------------------- /spi-nand/foresee.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Copyright (c) 2020 Grandstream Networks, Inc 4 | * 5 | * Authors: 6 | * Carl 7 | */ 8 | 9 | #include 10 | 11 | #define SPINAND_MFR_FORESEE 0xCD 12 | 13 | static SPINAND_OP_VARIANTS(read_cache_variants, 14 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), 15 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 16 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 17 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 18 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 19 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 20 | 21 | static SPINAND_OP_VARIANTS(write_cache_variants, 22 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 23 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 24 | 25 | static SPINAND_OP_VARIANTS(update_cache_variants, 26 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 27 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 28 | 29 | static const struct spinand_info foresee_spinand_table[] = { 30 | SPINAND_INFO("FS35ND01G-S1Y2", 31 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xEA), 32 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 33 | NAND_ECCREQ(4, 512), 34 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 35 | &write_cache_variants, 36 | &update_cache_variants), 37 | 0, 38 | SPINAND_ECCINFO(NULL)), 39 | SPINAND_INFO("FS35ND02G-S3Y2", 40 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xEB), 41 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), 42 | NAND_ECCREQ(4, 512), 43 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 44 | &write_cache_variants, 45 | &update_cache_variants), 46 | 0, 47 | SPINAND_ECCINFO(NULL)), 48 | SPINAND_INFO("FS35ND04G-S2Y2", 49 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xEC), 50 | NAND_MEMORG(1, 2048, 64, 64, 4096, 80, 1, 1, 1), 51 | NAND_ECCREQ(4, 512), 52 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 53 | &write_cache_variants, 54 | &update_cache_variants), 55 | 0, 56 | SPINAND_ECCINFO(NULL)), 57 | SPINAND_INFO("F35SQA001G", 58 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x71), 59 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 60 | NAND_ECCREQ(1, 512), 61 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 62 | &write_cache_variants, 63 | &update_cache_variants), 64 | SPINAND_HAS_QE_BIT, 65 | SPINAND_ECCINFO(NULL)), 66 | SPINAND_INFO("F35SQA002G", 67 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x72), 68 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), 69 | NAND_ECCREQ(1, 512), 70 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 71 | &write_cache_variants, 72 | &update_cache_variants), 73 | SPINAND_HAS_QE_BIT, 74 | SPINAND_ECCINFO(NULL)), 75 | SPINAND_INFO("F35SQA512M", 76 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x70), 77 | NAND_MEMORG(1, 2048, 64, 64, 512, 20, 1, 1, 1), 78 | NAND_ECCREQ(1, 512), 79 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 80 | &write_cache_variants, 81 | &update_cache_variants), 82 | SPINAND_HAS_QE_BIT, 83 | SPINAND_ECCINFO(NULL)), 84 | SPINAND_INFO("F35UQA512M", 85 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x60), 86 | NAND_MEMORG(1, 2048, 64, 64, 512, 20, 1, 1, 1), 87 | NAND_ECCREQ(1, 512), 88 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 89 | &write_cache_variants, 90 | &update_cache_variants), 91 | SPINAND_HAS_QE_BIT, 92 | SPINAND_ECCINFO(NULL)), 93 | }; 94 | 95 | static const struct spinand_manufacturer_ops foresee_spinand_manuf_ops = { 96 | }; 97 | 98 | const struct spinand_manufacturer foresee_spinand_manufacturer = { 99 | .id = SPINAND_MFR_FORESEE, 100 | .name = "foresee", 101 | .chips = foresee_spinand_table, 102 | .nchips = ARRAY_SIZE(foresee_spinand_table), 103 | .ops = &foresee_spinand_manuf_ops, 104 | }; 105 | -------------------------------------------------------------------------------- /spi-nand/hyf.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Copyright (c) 2020-2021 Rockchip Electronics Co., Ltd 4 | * 5 | * Authors: 6 | * Dingqiang Lin 7 | */ 8 | 9 | #include 10 | 11 | #define SPINAND_MFR_HYF 0xC9 12 | 13 | static SPINAND_OP_VARIANTS(read_cache_variants, 14 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), 15 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 16 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 17 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 18 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 19 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 20 | 21 | static SPINAND_OP_VARIANTS(write_cache_variants, 22 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 23 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 24 | 25 | static SPINAND_OP_VARIANTS(update_cache_variants, 26 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 27 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 28 | 29 | static int hyf1gq4udacae_ecc_get_status(struct spinand_device *spinand, 30 | u8 status) 31 | { 32 | struct nand_device *nand = spinand_to_nand(spinand); 33 | 34 | switch (status & STATUS_ECC_MASK) { 35 | case STATUS_ECC_NO_BITFLIPS: 36 | return 0; 37 | 38 | case STATUS_ECC_UNCOR_ERROR: 39 | return -EBADMSG; 40 | 41 | case STATUS_ECC_HAS_BITFLIPS: 42 | return 1; 43 | 44 | default: 45 | return nand->eccreq.strength; 46 | } 47 | 48 | return -EINVAL; 49 | } 50 | 51 | static const struct spinand_info hyf_spinand_table[] = { 52 | SPINAND_INFO("HYF1GQ4UPACAE", 53 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xA1), 54 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 55 | NAND_ECCREQ(1, 512), 56 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 57 | &write_cache_variants, 58 | &update_cache_variants), 59 | SPINAND_HAS_QE_BIT, 60 | SPINAND_ECCINFO(NULL)), 61 | SPINAND_INFO("HYF1GQ4UDACAE", 62 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x21), 63 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 64 | NAND_ECCREQ(4, 512), 65 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 66 | &write_cache_variants, 67 | &update_cache_variants), 68 | SPINAND_HAS_QE_BIT, 69 | SPINAND_ECCINFO(hyf1gq4udacae_ecc_get_status)), 70 | SPINAND_INFO("HYF1GQ4UDACAE", 71 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x22), 72 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), 73 | NAND_ECCREQ(4, 512), 74 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 75 | &write_cache_variants, 76 | &update_cache_variants), 77 | SPINAND_HAS_QE_BIT, 78 | SPINAND_ECCINFO(hyf1gq4udacae_ecc_get_status)), 79 | SPINAND_INFO("HYF2GQ4UAACAE", 80 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x52), 81 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 82 | NAND_ECCREQ(14, 512), 83 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 84 | &write_cache_variants, 85 | &update_cache_variants), 86 | SPINAND_HAS_QE_BIT, 87 | SPINAND_ECCINFO(hyf1gq4udacae_ecc_get_status)), 88 | SPINAND_INFO("HYF2GQ4UHCCAE", 89 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x5A), 90 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 91 | NAND_ECCREQ(14, 512), 92 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 93 | &write_cache_variants, 94 | &update_cache_variants), 95 | SPINAND_HAS_QE_BIT, 96 | SPINAND_ECCINFO(hyf1gq4udacae_ecc_get_status)), 97 | SPINAND_INFO("HYF4GQ4UAACBE", 98 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xD4), 99 | NAND_MEMORG(1, 4096, 128, 64, 2048, 40, 1, 1, 1), 100 | NAND_ECCREQ(4, 512), 101 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 102 | &write_cache_variants, 103 | &update_cache_variants), 104 | SPINAND_HAS_QE_BIT, 105 | SPINAND_ECCINFO(hyf1gq4udacae_ecc_get_status)), 106 | SPINAND_INFO("HYF2GQ4IAACAE", 107 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x82), 108 | NAND_MEMORG(1, 2048, 128, 64, 2048, 20, 1, 1, 1), 109 | NAND_ECCREQ(14, 512), 110 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 111 | &write_cache_variants, 112 | &update_cache_variants), 113 | SPINAND_HAS_QE_BIT, 114 | SPINAND_ECCINFO(hyf1gq4udacae_ecc_get_status)), 115 | SPINAND_INFO("HYF1GQ4IDACAE", 116 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x81), 117 | NAND_MEMORG(1, 2048, 64, 64, 1024, 10, 1, 1, 1), 118 | NAND_ECCREQ(4, 512), 119 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 120 | &write_cache_variants, 121 | &update_cache_variants), 122 | SPINAND_HAS_QE_BIT, 123 | SPINAND_ECCINFO(hyf1gq4udacae_ecc_get_status)), 124 | }; 125 | 126 | static const struct spinand_manufacturer_ops hyf_spinand_manuf_ops = { 127 | }; 128 | 129 | const struct spinand_manufacturer hyf_spinand_manufacturer = { 130 | .id = SPINAND_MFR_HYF, 131 | .name = "hyf", 132 | .chips = hyf_spinand_table, 133 | .nchips = ARRAY_SIZE(hyf_spinand_table), 134 | .ops = &hyf_spinand_manuf_ops, 135 | }; 136 | -------------------------------------------------------------------------------- /spi-nand/etron.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Copyright (c) 2020 Etron Technology, Inc. 4 | * 5 | */ 6 | 7 | #include 8 | 9 | #define SPINAND_MFR_ETRON 0xD5 10 | 11 | #define STATUS_ECC_LIMIT_BITFLIPS (3 << 4) 12 | 13 | static SPINAND_OP_VARIANTS(read_cache_variants, 14 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0), 15 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 16 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 17 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 18 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 19 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 20 | 21 | static SPINAND_OP_VARIANTS(write_cache_variants, 22 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 23 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 24 | 25 | static SPINAND_OP_VARIANTS(update_cache_variants, 26 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 27 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 28 | 29 | static int etron_ecc_get_status(struct spinand_device *spinand, 30 | u8 status) 31 | { 32 | struct nand_device *nand = spinand_to_nand(spinand); 33 | 34 | switch (status & STATUS_ECC_MASK) { 35 | case STATUS_ECC_NO_BITFLIPS: 36 | return 0; 37 | 38 | case STATUS_ECC_UNCOR_ERROR: 39 | return -EBADMSG; 40 | 41 | case STATUS_ECC_HAS_BITFLIPS: 42 | return nand->eccreq.strength >> 1; 43 | 44 | case STATUS_ECC_LIMIT_BITFLIPS: 45 | return nand->eccreq.strength; 46 | 47 | default: 48 | break; 49 | } 50 | 51 | return -EINVAL; 52 | } 53 | 54 | static const struct spinand_info etron_spinand_table[] = { 55 | /* EM73C 1Gb 3.3V */ 56 | SPINAND_INFO("EM73C044VCF", 57 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x25), 58 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 59 | NAND_ECCREQ(4, 512), 60 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 61 | &write_cache_variants, 62 | &update_cache_variants), 63 | SPINAND_HAS_QE_BIT, 64 | SPINAND_ECCINFO(etron_ecc_get_status)), 65 | /* EM7xD 2Gb */ 66 | SPINAND_INFO("EM73D044VCR", 67 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x41), 68 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), 69 | NAND_ECCREQ(4, 512), 70 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 71 | &write_cache_variants, 72 | &update_cache_variants), 73 | SPINAND_HAS_QE_BIT, 74 | SPINAND_ECCINFO(etron_ecc_get_status)), 75 | SPINAND_INFO("EM73D044VCO", 76 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x3A), 77 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 78 | NAND_ECCREQ(8, 512), 79 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 80 | &write_cache_variants, 81 | &update_cache_variants), 82 | SPINAND_HAS_QE_BIT, 83 | SPINAND_ECCINFO(etron_ecc_get_status)), 84 | SPINAND_INFO("EM78D044VCM", 85 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x8E), 86 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 87 | NAND_ECCREQ(8, 512), 88 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 89 | &write_cache_variants, 90 | &update_cache_variants), 91 | SPINAND_HAS_QE_BIT, 92 | SPINAND_ECCINFO(etron_ecc_get_status)), 93 | /* EM7xE 4Gb */ 94 | SPINAND_INFO("EM73E044VCE", 95 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x3B), 96 | NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 1, 1), 97 | NAND_ECCREQ(8, 512), 98 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 99 | &write_cache_variants, 100 | &update_cache_variants), 101 | SPINAND_HAS_QE_BIT, 102 | SPINAND_ECCINFO(etron_ecc_get_status)), 103 | SPINAND_INFO("EM78E044VCD", 104 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x8F), 105 | NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 1, 1), 106 | NAND_ECCREQ(8, 512), 107 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 108 | &write_cache_variants, 109 | &update_cache_variants), 110 | SPINAND_HAS_QE_BIT, 111 | SPINAND_ECCINFO(etron_ecc_get_status)), 112 | /* EM7xF044VCA 8Gb */ 113 | SPINAND_INFO("EM73F044VCA", 114 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x15), 115 | NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1), 116 | NAND_ECCREQ(8, 512), 117 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 118 | &write_cache_variants, 119 | &update_cache_variants), 120 | SPINAND_HAS_QE_BIT, 121 | SPINAND_ECCINFO(etron_ecc_get_status)), 122 | SPINAND_INFO("EM78F044VCA", 123 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x8D), 124 | NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1), 125 | NAND_ECCREQ(8, 512), 126 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 127 | &write_cache_variants, 128 | &update_cache_variants), 129 | SPINAND_HAS_QE_BIT, 130 | SPINAND_ECCINFO(etron_ecc_get_status)), 131 | }; 132 | 133 | static const struct spinand_manufacturer_ops etron_spinand_manuf_ops = { 134 | }; 135 | 136 | const struct spinand_manufacturer etron_spinand_manufacturer = { 137 | .id = SPINAND_MFR_ETRON, 138 | .name = "Etron", 139 | .chips = etron_spinand_table, 140 | .nchips = ARRAY_SIZE(etron_spinand_table), 141 | .ops = &etron_spinand_manuf_ops, 142 | }; 143 | -------------------------------------------------------------------------------- /spi-mem/spi-mem-fx2qspi.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #define FX2_BUF_SIZE 512 9 | #define FX2_VID 0x1209 10 | #define FX2_PID 0x0001 11 | #define FX2_EPOUT (2 | LIBUSB_ENDPOINT_OUT) 12 | #define FX2_EPIN (6 | LIBUSB_ENDPOINT_IN) 13 | #define FX2_MAX_TRANSFER 0xfc0000 14 | 15 | #define FX2QSPI_CS 0x80 16 | #define FX2QSPI_QUAD 0x40 17 | #define FX2QSPI_DUAL 0x20 18 | #define FX2QSPI_READ 0x10 19 | 20 | static u8 fx2_op_buffer[FX2_BUF_SIZE]; 21 | typedef struct { 22 | libusb_context *ctx; 23 | libusb_device_handle *handle; 24 | } fx2qspi_priv; 25 | 26 | static fx2qspi_priv _priv; 27 | 28 | static int fx2qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 29 | { 30 | if (op->data.nbytes > FX2_MAX_TRANSFER) 31 | op->data.nbytes = FX2_MAX_TRANSFER; 32 | return 0; 33 | } 34 | 35 | static void fx2qspi_fill_op(u8 buswidth, bool is_read, u16 len, size_t *ptr) 36 | { 37 | 38 | if (buswidth == 4) 39 | fx2_op_buffer[*ptr] = FX2QSPI_CS | FX2QSPI_QUAD; 40 | else if (buswidth == 2) 41 | fx2_op_buffer[*ptr] = FX2QSPI_CS | FX2QSPI_DUAL; 42 | else 43 | fx2_op_buffer[*ptr] = FX2QSPI_CS; 44 | if (is_read) 45 | fx2_op_buffer[*ptr] |= FX2QSPI_READ; 46 | fx2_op_buffer[(*ptr)++] |= ((len >> 8) & 0xff); 47 | fx2_op_buffer[(*ptr)++] = len & 0xff; 48 | } 49 | 50 | static int fx2qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 51 | { 52 | size_t ptr = 0; 53 | int i, llen, alen, ret; 54 | fx2qspi_priv *priv = spi_mem_get_drvdata(mem); 55 | fx2qspi_fill_op(op->cmd.buswidth, false, 1, &ptr); 56 | fx2_op_buffer[ptr++] = op->cmd.opcode; 57 | if (op->addr.nbytes) { 58 | fx2qspi_fill_op(op->addr.buswidth, false, op->addr.nbytes, 59 | &ptr); 60 | for (i = op->addr.nbytes - 1; i >= 0; i--) 61 | fx2_op_buffer[ptr++] = (op->addr.val >> (i * 8)) & 0xff; 62 | } 63 | if (op->dummy.nbytes) { 64 | fx2qspi_fill_op(op->dummy.buswidth, false, op->dummy.nbytes, 65 | &ptr); 66 | for (i = 0; i < op->dummy.nbytes; i++) 67 | fx2_op_buffer[ptr++] = 0; 68 | } 69 | if (op->data.nbytes) { 70 | fx2qspi_fill_op(op->data.buswidth, 71 | op->data.dir == SPI_MEM_DATA_IN, 72 | op->data.nbytes, &ptr); 73 | } 74 | 75 | ret = libusb_bulk_transfer(priv->handle, FX2_EPOUT, fx2_op_buffer, ptr, 76 | &alen, 10); 77 | if (ret) 78 | return ret; 79 | 80 | if (op->data.nbytes) { 81 | if (op->data.dir == SPI_MEM_DATA_OUT) { 82 | ret = libusb_bulk_transfer(priv->handle, FX2_EPOUT, 83 | (unsigned char *)op->data.buf.out, 84 | op->data.nbytes, &alen, 20); 85 | if (ret) 86 | return ret; 87 | } else if (op->data.dir == SPI_MEM_DATA_IN) { 88 | llen = op->data.nbytes; 89 | ptr = 0; 90 | while (llen) { 91 | if (llen >= FX2_BUF_SIZE) 92 | ret = libusb_bulk_transfer( 93 | priv->handle, FX2_EPIN, 94 | op->data.buf.in + ptr, 95 | FX2_BUF_SIZE, &alen, 20); 96 | else 97 | ret = libusb_bulk_transfer( 98 | priv->handle, FX2_EPIN, 99 | fx2_op_buffer, FX2_BUF_SIZE, 100 | &alen, 20); 101 | if (ret) 102 | return ret; 103 | if (llen < FX2_BUF_SIZE) 104 | memcpy(op->data.buf.in + ptr, 105 | fx2_op_buffer, alen); 106 | ptr += alen; 107 | llen -= alen; 108 | } 109 | } 110 | } 111 | 112 | fx2_op_buffer[0] = 0; 113 | return libusb_bulk_transfer(priv->handle, FX2_EPOUT, fx2_op_buffer, 1, 114 | &alen, 20) ? 115 | -ETIMEDOUT : 116 | 0; 117 | } 118 | 119 | static const struct spi_controller_mem_ops _fx2qspi_mem_ops = { 120 | .adjust_op_size = fx2qspi_adjust_op_size, 121 | .exec_op = fx2qspi_exec_op, 122 | }; 123 | 124 | static struct spi_mem _fx2qspi_mem = { 125 | .ops = &_fx2qspi_mem_ops, 126 | .spi_mode = SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD, 127 | .name = "fx2qspi", 128 | .drvpriv = &_priv, 129 | }; 130 | 131 | static int fx2qspi_reset(fx2qspi_priv *priv) 132 | { 133 | int i, actual_len, ret; 134 | memset(fx2_op_buffer, 0, sizeof(fx2_op_buffer)); 135 | // write 4096 bytes of 0 136 | for (i = 0; i < 4; i++) { 137 | ret = libusb_bulk_transfer(priv->handle, FX2_EPOUT, 138 | fx2_op_buffer, FX2_BUF_SIZE, 139 | &actual_len, 5); 140 | if (ret) 141 | return ret; 142 | } 143 | // tell fx2 to send garbage data back 144 | fx2_op_buffer[0] = 0x60; 145 | fx2_op_buffer[1] = 0x60; 146 | ret = libusb_bulk_transfer(priv->handle, FX2_EPOUT, fx2_op_buffer, 3, 147 | &actual_len, 1); 148 | if (ret) 149 | return ret; 150 | return libusb_bulk_transfer(priv->handle, FX2_EPIN, fx2_op_buffer, 151 | FX2_BUF_SIZE, &actual_len, 1); 152 | } 153 | 154 | struct spi_mem *fx2qspi_probe() 155 | { 156 | int ret; 157 | fx2qspi_priv *priv = &_priv; 158 | 159 | ret = libusb_init(&priv->ctx); 160 | if (ret < 0) { 161 | perror("libusb: init"); 162 | return NULL; 163 | } 164 | 165 | libusb_set_option(priv->ctx, LIBUSB_OPTION_LOG_LEVEL, 166 | LIBUSB_LOG_LEVEL_INFO); 167 | priv->handle = 168 | libusb_open_device_with_vid_pid(priv->ctx, FX2_VID, FX2_PID); 169 | if (!priv->handle) { 170 | perror("libusb: open"); 171 | goto ERR_1; 172 | } 173 | 174 | libusb_set_auto_detach_kernel_driver(priv->handle, 1); 175 | 176 | ret = libusb_claim_interface(priv->handle, 0); 177 | if (ret < 0) { 178 | perror("libusb: claim_if"); 179 | goto ERR_2; 180 | } 181 | 182 | if (fx2qspi_reset(priv)) 183 | goto ERR_3; 184 | 185 | return &_fx2qspi_mem; 186 | ERR_3: 187 | libusb_release_interface(priv->handle, 0); 188 | ERR_2: 189 | libusb_close(priv->handle); 190 | ERR_1: 191 | libusb_exit(priv->ctx); 192 | return NULL; 193 | } 194 | 195 | void fx2qspi_remove(struct spi_mem *mem) 196 | { 197 | fx2qspi_priv *priv = spi_mem_get_drvdata(mem); 198 | libusb_release_interface(priv->handle, 0); 199 | libusb_close(priv->handle); 200 | libusb_exit(priv->ctx); 201 | } 202 | -------------------------------------------------------------------------------- /spi-nand/dosilicon.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Copyright (c) 2020 Rockchip Electronics Co., Ltd 4 | */ 5 | 6 | #include 7 | 8 | #define SPINAND_MFR_DOSILICON 0xE5 9 | 10 | #define DOSICON_STATUS_ECC_MASK GENMASK(7, 4) 11 | #define DOSICON_STATUS_ECC_NO_BITFLIPS (0 << 4) 12 | #define DOSICON_STATUS_ECC_1TO3_BITFLIPS (1 << 4) 13 | #define DOSICON_STATUS_ECC_4TO6_BITFLIPS (3 << 4) 14 | #define DOSICON_STATUS_ECC_7TO8_BITFLIPS (5 << 4) 15 | 16 | static SPINAND_OP_VARIANTS(read_cache_variants, 17 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), 18 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 19 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 20 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 21 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 22 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 23 | 24 | static SPINAND_OP_VARIANTS(write_cache_variants, 25 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 26 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 27 | 28 | static SPINAND_OP_VARIANTS(update_cache_variants, 29 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 30 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 31 | 32 | static int ds35xxgb_ecc_get_status(struct spinand_device *spinand, 33 | u8 status) 34 | { 35 | switch (status & DOSICON_STATUS_ECC_MASK) { 36 | case STATUS_ECC_NO_BITFLIPS: 37 | return 0; 38 | 39 | case STATUS_ECC_UNCOR_ERROR: 40 | return -EBADMSG; 41 | 42 | case DOSICON_STATUS_ECC_1TO3_BITFLIPS: 43 | return 3; 44 | 45 | case DOSICON_STATUS_ECC_4TO6_BITFLIPS: 46 | return 6; 47 | 48 | case DOSICON_STATUS_ECC_7TO8_BITFLIPS: 49 | return 8; 50 | 51 | default: 52 | break; 53 | } 54 | 55 | return -EINVAL; 56 | } 57 | 58 | static const struct spinand_info dosilicon_spinand_table[] = { 59 | SPINAND_INFO("DS35X1GA", 60 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x71), 61 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 62 | NAND_ECCREQ(4, 512), 63 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 64 | &write_cache_variants, 65 | &update_cache_variants), 66 | SPINAND_HAS_QE_BIT, 67 | SPINAND_ECCINFO(NULL)), 68 | SPINAND_INFO("DS35Q2GA", 69 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x72), 70 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1), 71 | NAND_ECCREQ(4, 512), 72 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 73 | &write_cache_variants, 74 | &update_cache_variants), 75 | SPINAND_HAS_QE_BIT, 76 | SPINAND_ECCINFO(NULL)), 77 | SPINAND_INFO("DS35M1GA", 78 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x21), 79 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 80 | NAND_ECCREQ(4, 512), 81 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 82 | &write_cache_variants, 83 | &update_cache_variants), 84 | SPINAND_HAS_QE_BIT, 85 | SPINAND_ECCINFO(NULL)), 86 | SPINAND_INFO("DS35M2GA", 87 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x22), 88 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1), 89 | NAND_ECCREQ(4, 512), 90 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 91 | &write_cache_variants, 92 | &update_cache_variants), 93 | SPINAND_HAS_QE_BIT, 94 | SPINAND_ECCINFO(NULL)), 95 | SPINAND_INFO("DS35Q2GB", 96 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xF2), 97 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1), 98 | NAND_ECCREQ(8, 512), 99 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 100 | &write_cache_variants, 101 | &update_cache_variants), 102 | SPINAND_HAS_QE_BIT, 103 | SPINAND_ECCINFO(ds35xxgb_ecc_get_status)), 104 | SPINAND_INFO("DS35M1GB", 105 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xA1), 106 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 107 | NAND_ECCREQ(8, 512), 108 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 109 | &write_cache_variants, 110 | &update_cache_variants), 111 | SPINAND_HAS_QE_BIT, 112 | SPINAND_ECCINFO(ds35xxgb_ecc_get_status)), 113 | SPINAND_INFO("DS35Q1GB", 114 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xF1), 115 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 116 | NAND_ECCREQ(8, 512), 117 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 118 | &write_cache_variants, 119 | &update_cache_variants), 120 | SPINAND_HAS_QE_BIT, 121 | SPINAND_ECCINFO(ds35xxgb_ecc_get_status)), 122 | SPINAND_INFO("DS35Q4GM", 123 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xF4), 124 | NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 2, 1, 1), 125 | NAND_ECCREQ(8, 512), 126 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 127 | &write_cache_variants, 128 | &update_cache_variants), 129 | SPINAND_HAS_QE_BIT, 130 | SPINAND_ECCINFO(ds35xxgb_ecc_get_status)), 131 | SPINAND_INFO("DS35Q12B", 132 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xF5), 133 | NAND_MEMORG(1, 2048, 128, 64, 512, 10, 1, 1, 1), 134 | NAND_ECCREQ(8, 512), 135 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 136 | &write_cache_variants, 137 | &update_cache_variants), 138 | SPINAND_HAS_QE_BIT, 139 | SPINAND_ECCINFO(ds35xxgb_ecc_get_status)), 140 | SPINAND_INFO("DS35M12B", 141 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xA5), 142 | NAND_MEMORG(1, 2048, 128, 64, 512, 10, 1, 1, 1), 143 | NAND_ECCREQ(8, 512), 144 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 145 | &write_cache_variants, 146 | &update_cache_variants), 147 | SPINAND_HAS_QE_BIT, 148 | SPINAND_ECCINFO(ds35xxgb_ecc_get_status)), 149 | }; 150 | 151 | static const struct spinand_manufacturer_ops dosilicon_spinand_manuf_ops = { 152 | }; 153 | 154 | const struct spinand_manufacturer dosilicon_spinand_manufacturer = { 155 | .id = SPINAND_MFR_DOSILICON, 156 | .name = "dosilicon", 157 | .chips = dosilicon_spinand_table, 158 | .nchips = ARRAY_SIZE(dosilicon_spinand_table), 159 | .ops = &dosilicon_spinand_manuf_ops, 160 | }; 161 | -------------------------------------------------------------------------------- /spi-nand/xtx.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Copyright (c) 2020 Rockchip Electronics Co., Ltd 4 | * 5 | * Authors: 6 | * Dingqiang Lin 7 | */ 8 | 9 | #include 10 | 11 | #define SPINAND_MFR_XTX 0x0B 12 | 13 | static SPINAND_OP_VARIANTS(read_cache_variants, 14 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 15 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 16 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 17 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 18 | 19 | static SPINAND_OP_VARIANTS(write_cache_variants, 20 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 21 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 22 | 23 | static SPINAND_OP_VARIANTS(update_cache_variants, 24 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 25 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 26 | /* 27 | * ecc bits: 0xC0[2,5] 28 | * [0x0000], No bit errors were detected; 29 | * [0x0001, 0x0111], Bit errors were detected and corrected. Not 30 | * reach Flipping Bits; 31 | * [0x1000], Multiple bit errors were detected and 32 | * not corrected. 33 | * [0x1100], Bit error count equals the bit flip 34 | * detectionthreshold 35 | * else, reserved 36 | */ 37 | static int xt26g0xa_ecc_get_status(struct spinand_device *spinand, 38 | u8 status) 39 | { 40 | u8 eccsr = (status & GENMASK(5, 2)) >> 2; 41 | 42 | if (eccsr <= 7) 43 | return eccsr; 44 | else if (eccsr == 12) 45 | return 8; 46 | else 47 | return -EBADMSG; 48 | } 49 | 50 | /* 51 | * ecc bits: 0xC0[4,6] 52 | * [0x0], No bit errors were detected; 53 | * [0x001, 0x011], Bit errors were detected and corrected. Not 54 | * reach Flipping Bits; 55 | * [0x100], Bit error count equals the bit flip 56 | * detectionthreshold 57 | * [0x101, 0x110], Reserved; 58 | * [0x111], Multiple bit errors were detected and 59 | * not corrected. 60 | */ 61 | static int xt26g02b_ecc_get_status(struct spinand_device *spinand, 62 | u8 status) 63 | { 64 | u8 eccsr = (status & GENMASK(6, 4)) >> 4; 65 | 66 | if (eccsr <= 4) 67 | return eccsr; 68 | else 69 | return -EBADMSG; 70 | } 71 | 72 | /* 73 | * ecc bits: 0xC0[4,7] 74 | * [0b0000], No bit errors were detected; 75 | * [0b0001, 0b0111], 1-7 Bit errors were detected and corrected. Not 76 | * reach Flipping Bits; 77 | * [0b1000], 8 Bit errors were detected and corrected. Bit error count 78 | * equals the bit flip detectionthreshold; 79 | * [0b1111], Bit errors greater than ECC capability(8 bits) and not corrected; 80 | * others, Reserved. 81 | */ 82 | static int xt26g01c_ecc_get_status(struct spinand_device *spinand, 83 | u8 status) 84 | { 85 | u8 eccsr = (status & GENMASK(7, 4)) >> 4; 86 | 87 | if (eccsr <= 8) 88 | return eccsr; 89 | else 90 | return -EBADMSG; 91 | } 92 | 93 | static const struct spinand_info xtx_spinand_table[] = { 94 | SPINAND_INFO("XT26G01A", 95 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xE1), 96 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 97 | NAND_ECCREQ(8, 512), 98 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 99 | &write_cache_variants, 100 | &update_cache_variants), 101 | SPINAND_HAS_QE_BIT, 102 | SPINAND_ECCINFO(xt26g0xa_ecc_get_status)), 103 | SPINAND_INFO("XT26G02A", 104 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xE2), 105 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), 106 | NAND_ECCREQ(8, 512), 107 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 108 | &write_cache_variants, 109 | &update_cache_variants), 110 | SPINAND_HAS_QE_BIT, 111 | SPINAND_ECCINFO(xt26g0xa_ecc_get_status)), 112 | SPINAND_INFO("XT26G04A", 113 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xE3), 114 | NAND_MEMORG(1, 2048, 64, 128, 2048, 80, 1, 1, 1), 115 | NAND_ECCREQ(8, 512), 116 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 117 | &write_cache_variants, 118 | &update_cache_variants), 119 | SPINAND_HAS_QE_BIT, 120 | SPINAND_ECCINFO(xt26g0xa_ecc_get_status)), 121 | SPINAND_INFO("XT26G01B", 122 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xF1), 123 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 124 | NAND_ECCREQ(8, 512), 125 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 126 | &write_cache_variants, 127 | &update_cache_variants), 128 | SPINAND_HAS_QE_BIT, 129 | SPINAND_ECCINFO(xt26g0xa_ecc_get_status)), 130 | SPINAND_INFO("XT26G02B", 131 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xF2), 132 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), 133 | NAND_ECCREQ(4, 512), 134 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 135 | &write_cache_variants, 136 | &update_cache_variants), 137 | SPINAND_HAS_QE_BIT, 138 | SPINAND_ECCINFO(xt26g02b_ecc_get_status)), 139 | SPINAND_INFO("XT26G01C", 140 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11), 141 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 142 | NAND_ECCREQ(8, 512), 143 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 144 | &write_cache_variants, 145 | &update_cache_variants), 146 | SPINAND_HAS_QE_BIT, 147 | SPINAND_ECCINFO(xt26g01c_ecc_get_status)), 148 | SPINAND_INFO("XT26G02C", 149 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x12), 150 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), 151 | NAND_ECCREQ(8, 512), 152 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 153 | &write_cache_variants, 154 | &update_cache_variants), 155 | SPINAND_HAS_QE_BIT, 156 | SPINAND_ECCINFO(xt26g01c_ecc_get_status)), 157 | SPINAND_INFO("XT26G04C", 158 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x13), 159 | NAND_MEMORG(1, 4096, 256, 64, 2048, 80, 1, 1, 1), 160 | NAND_ECCREQ(8, 512), 161 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 162 | &write_cache_variants, 163 | &update_cache_variants), 164 | SPINAND_HAS_QE_BIT, 165 | SPINAND_ECCINFO(xt26g01c_ecc_get_status)), 166 | SPINAND_INFO("XT26G11C", 167 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x15), 168 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 169 | NAND_ECCREQ(8, 512), 170 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 171 | &write_cache_variants, 172 | &update_cache_variants), 173 | SPINAND_HAS_QE_BIT, 174 | SPINAND_ECCINFO(xt26g01c_ecc_get_status)), 175 | }; 176 | 177 | static const struct spinand_manufacturer_ops xtx_spinand_manuf_ops = { 178 | }; 179 | 180 | const struct spinand_manufacturer xtx_spinand_manufacturer = { 181 | .id = SPINAND_MFR_XTX, 182 | .name = "XTX", 183 | .chips = xtx_spinand_table, 184 | .nchips = ARRAY_SIZE(xtx_spinand_table), 185 | .ops = &xtx_spinand_manuf_ops, 186 | }; 187 | -------------------------------------------------------------------------------- /spi-nand/winbond.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Copyright (c) 2017 exceet electronics GmbH 4 | * 5 | * Authors: 6 | * Frieder Schrempf 7 | * Boris Brezillon 8 | */ 9 | 10 | #include 11 | 12 | #define SPINAND_MFR_WINBOND 0xEF 13 | 14 | #define WINBOND_CFG_BUF_READ BIT(3) 15 | 16 | static SPINAND_OP_VARIANTS(read_cache_variants, 17 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), 18 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 19 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 20 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 21 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 22 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 23 | 24 | static SPINAND_OP_VARIANTS(write_cache_variants, 25 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 26 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 27 | 28 | static SPINAND_OP_VARIANTS(update_cache_variants, 29 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 30 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 31 | 32 | #define W25N02_N04KV_STATUS_ECC_MASK (3 << 4) 33 | #define W25N02_N04KV_STATUS_ECC_NO_BITFLIPS (0 << 4) 34 | #define W25N02_N04KV_STATUS_ECC_1_4_BITFLIPS (1 << 4) 35 | #define W25N02_N04KV_STATUS_ECC_5_8_BITFLIPS (3 << 4) 36 | #define W25N02_N04KV_STATUS_ECC_UNCOR_ERROR (2 << 4) 37 | 38 | #define W25N01_M02GV_STATUS_ECC_MASK (3 << 4) 39 | #define W25N01_M02GV_STATUS_ECC_NO_BITFLIPS (0 << 4) 40 | #define W25N01_M02GV_STATUS_ECC_1_BITFLIPS (1 << 4) 41 | #define W25N01_M02GV_STATUS_ECC_UNCOR_ERROR (2 << 4) 42 | 43 | #define W25N01KV_STATUS_ECC_MASK (3 << 4) 44 | #define W25N01KV_STATUS_ECC_NO_BITFLIPS (0 << 4) 45 | #define W25N01KV_STATUS_ECC_1_3_BITFLIPS (1 << 4) 46 | #define W25N01KV_STATUS_ECC_4_BITFLIPS (3 << 4) 47 | #define W25N01KV_STATUS_ECC_UNCOR_ERROR (2 << 4) 48 | 49 | static int w25m02gv_select_target(struct spinand_device *spinand, 50 | unsigned int target) 51 | { 52 | struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0xc2, 1), 53 | SPI_MEM_OP_NO_ADDR, 54 | SPI_MEM_OP_NO_DUMMY, 55 | SPI_MEM_OP_DATA_OUT(1, 56 | spinand->scratchbuf, 57 | 1)); 58 | 59 | *spinand->scratchbuf = target; 60 | return spi_mem_exec_op(spinand->spimem, &op); 61 | } 62 | 63 | 64 | static int w25n01kv_ecc_get_status(struct spinand_device *spinand, 65 | u8 status) 66 | { 67 | switch (status & W25N01KV_STATUS_ECC_MASK) { 68 | case W25N01KV_STATUS_ECC_NO_BITFLIPS: 69 | return 0; 70 | 71 | case W25N01KV_STATUS_ECC_1_3_BITFLIPS: 72 | return 3; 73 | 74 | case W25N01KV_STATUS_ECC_4_BITFLIPS: 75 | return 4; 76 | 77 | case W25N01KV_STATUS_ECC_UNCOR_ERROR: 78 | return -EBADMSG; 79 | 80 | default: 81 | break; 82 | } 83 | 84 | return -EINVAL; 85 | } 86 | 87 | static int w25n02kv_n04kv_ecc_get_status(struct spinand_device *spinand, 88 | u8 status) 89 | { 90 | switch (status & W25N02_N04KV_STATUS_ECC_MASK) { 91 | case W25N02_N04KV_STATUS_ECC_NO_BITFLIPS: 92 | return 0; 93 | 94 | case W25N02_N04KV_STATUS_ECC_1_4_BITFLIPS: 95 | return 3; 96 | 97 | case W25N02_N04KV_STATUS_ECC_5_8_BITFLIPS: 98 | return 4; 99 | 100 | /* W25N02_N04KV_use internal 8bit ECC algorithm. 101 | * But the ECC strength is 4 bit requried. 102 | * Return 3 if the bit bit flip count less than 5. 103 | * Return 4 if the bit bit flip count more than 5 to 8. 104 | */ 105 | 106 | case W25N02_N04KV_STATUS_ECC_UNCOR_ERROR: 107 | return -EBADMSG; 108 | 109 | default: 110 | break; 111 | } 112 | 113 | return -EINVAL; 114 | } 115 | 116 | static const struct spinand_info winbond_spinand_table[] = { 117 | SPINAND_INFO("W25M02GV", 118 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab, 0x21), 119 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 2), 120 | NAND_ECCREQ(1, 512), 121 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 122 | &write_cache_variants, 123 | &update_cache_variants), 124 | 0, 125 | SPINAND_ECCINFO(NULL), 126 | SPINAND_SELECT_TARGET(w25m02gv_select_target)), 127 | SPINAND_INFO("W25N01GV", 128 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x21), 129 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 130 | NAND_ECCREQ(1, 512), 131 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 132 | &write_cache_variants, 133 | &update_cache_variants), 134 | 0, 135 | SPINAND_ECCINFO(NULL)), 136 | SPINAND_INFO("W25N02KV", 137 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x22), 138 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1), 139 | NAND_ECCREQ(8, 512), 140 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 141 | &write_cache_variants, 142 | &update_cache_variants), 143 | 0, 144 | SPINAND_ECCINFO(w25n02kv_n04kv_ecc_get_status)), 145 | SPINAND_INFO("W25N512GV", 146 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xAA, 0x20), 147 | NAND_MEMORG(1, 2048, 64, 64, 512, 10, 1, 1, 1), 148 | NAND_ECCREQ(1, 512), 149 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 150 | &write_cache_variants, 151 | &update_cache_variants), 152 | 0, 153 | SPINAND_ECCINFO(NULL), 154 | SPINAND_SELECT_TARGET(w25m02gv_select_target)), 155 | SPINAND_INFO("W25N04KV", 156 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xAA, 0x23), 157 | NAND_MEMORG(1, 2048, 128, 64, 4096, 40, 2, 1, 1), 158 | NAND_ECCREQ(8, 512), 159 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 160 | &write_cache_variants, 161 | &update_cache_variants), 162 | 0, 163 | SPINAND_ECCINFO(w25n02kv_n04kv_ecc_get_status)), 164 | SPINAND_INFO("W25N01GW", 165 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBA, 0x21), 166 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 167 | NAND_ECCREQ(1, 512), 168 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 169 | &write_cache_variants, 170 | &update_cache_variants), 171 | 0, 172 | SPINAND_ECCINFO(NULL), 173 | SPINAND_SELECT_TARGET(w25m02gv_select_target)), 174 | SPINAND_INFO("W25N02KW", 175 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBA, 0x22), 176 | NAND_MEMORG(1, 2048, 128, 64, 2048, 20, 1, 1, 1), 177 | NAND_ECCREQ(8, 512), 178 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 179 | &write_cache_variants, 180 | &update_cache_variants), 181 | 0, 182 | SPINAND_ECCINFO(w25n02kv_n04kv_ecc_get_status)), 183 | SPINAND_INFO("W25N01KV", 184 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xAE, 0x21), 185 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 186 | NAND_ECCREQ(4, 512), 187 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 188 | &write_cache_variants, 189 | &update_cache_variants), 190 | 0, 191 | SPINAND_ECCINFO(w25n01kv_ecc_get_status)), 192 | }; 193 | 194 | static int winbond_spinand_init(struct spinand_device *spinand) 195 | { 196 | struct nand_device *nand = spinand_to_nand(spinand); 197 | unsigned int i; 198 | 199 | /* 200 | * Make sure all dies are in buffer read mode and not continuous read 201 | * mode. 202 | */ 203 | for (i = 0; i < nand->memorg.ntargets; i++) { 204 | spinand_select_target(spinand, i); 205 | spinand_upd_cfg(spinand, WINBOND_CFG_BUF_READ, 206 | WINBOND_CFG_BUF_READ); 207 | } 208 | 209 | return 0; 210 | } 211 | 212 | static const struct spinand_manufacturer_ops winbond_spinand_manuf_ops = { 213 | .init = winbond_spinand_init, 214 | }; 215 | 216 | const struct spinand_manufacturer winbond_spinand_manufacturer = { 217 | .id = SPINAND_MFR_WINBOND, 218 | .name = "Winbond", 219 | .chips = winbond_spinand_table, 220 | .nchips = ARRAY_SIZE(winbond_spinand_table), 221 | .ops = &winbond_spinand_manuf_ops, 222 | }; 223 | -------------------------------------------------------------------------------- /spi-nand/micron.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Copyright (c) 2016-2017 Micron Technology, Inc. 4 | * 5 | * Authors: 6 | * Peter Pan 7 | */ 8 | 9 | #include 10 | 11 | #define SPINAND_MFR_MICRON 0x2c 12 | 13 | #define MICRON_STATUS_ECC_MASK GENMASK(7, 4) 14 | #define MICRON_STATUS_ECC_NO_BITFLIPS (0 << 4) 15 | #define MICRON_STATUS_ECC_1TO3_BITFLIPS (1 << 4) 16 | #define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4) 17 | #define MICRON_STATUS_ECC_7TO8_BITFLIPS (5 << 4) 18 | 19 | #define MICRON_CFG_CR BIT(0) 20 | 21 | /* 22 | * As per datasheet, die selection is done by the 6th bit of Die 23 | * Select Register (Address 0xD0). 24 | */ 25 | #define MICRON_DIE_SELECT_REG 0xD0 26 | 27 | #define MICRON_SELECT_DIE(x) ((x) << 6) 28 | 29 | static SPINAND_OP_VARIANTS(read_cache_variants, 30 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), 31 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 32 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 33 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 34 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 35 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 36 | 37 | static SPINAND_OP_VARIANTS(write_cache_variants, 38 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 39 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 40 | 41 | static SPINAND_OP_VARIANTS(update_cache_variants, 42 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 43 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 44 | 45 | /* Micron MT29F2G01AAAED Device */ 46 | static SPINAND_OP_VARIANTS(x4_read_cache_variants, 47 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 48 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 49 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 50 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 51 | 52 | static SPINAND_OP_VARIANTS(x1_write_cache_variants, 53 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 54 | 55 | static SPINAND_OP_VARIANTS(x1_update_cache_variants, 56 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 57 | 58 | static int micron_select_target(struct spinand_device *spinand, 59 | unsigned int target) 60 | { 61 | struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MICRON_DIE_SELECT_REG, 62 | spinand->scratchbuf); 63 | 64 | if (target > 1) 65 | return -EINVAL; 66 | 67 | *spinand->scratchbuf = MICRON_SELECT_DIE(target); 68 | 69 | return spi_mem_exec_op(spinand->spimem, &op); 70 | } 71 | 72 | static int micron_8_ecc_get_status(struct spinand_device *spinand, 73 | u8 status) 74 | { 75 | switch (status & MICRON_STATUS_ECC_MASK) { 76 | case STATUS_ECC_NO_BITFLIPS: 77 | return 0; 78 | 79 | case STATUS_ECC_UNCOR_ERROR: 80 | return -EBADMSG; 81 | 82 | case MICRON_STATUS_ECC_1TO3_BITFLIPS: 83 | return 3; 84 | 85 | case MICRON_STATUS_ECC_4TO6_BITFLIPS: 86 | return 6; 87 | 88 | case MICRON_STATUS_ECC_7TO8_BITFLIPS: 89 | return 8; 90 | 91 | default: 92 | break; 93 | } 94 | 95 | return -EINVAL; 96 | } 97 | 98 | static const struct spinand_info micron_spinand_table[] = { 99 | /* M79A 2Gb 3.3V */ 100 | SPINAND_INFO("MT29F2G01ABAGD", 101 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24), 102 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1), 103 | NAND_ECCREQ(8, 512), 104 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 105 | &write_cache_variants, 106 | &update_cache_variants), 107 | 0, 108 | SPINAND_ECCINFO(micron_8_ecc_get_status)), 109 | /* M79A 2Gb 1.8V */ 110 | SPINAND_INFO("MT29F2G01ABBGD", 111 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25), 112 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1), 113 | NAND_ECCREQ(8, 512), 114 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 115 | &write_cache_variants, 116 | &update_cache_variants), 117 | 0, 118 | SPINAND_ECCINFO(micron_8_ecc_get_status)), 119 | /* M78A 1Gb 3.3V */ 120 | SPINAND_INFO("MT29F1G01ABAFD", 121 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14), 122 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 123 | NAND_ECCREQ(8, 512), 124 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 125 | &write_cache_variants, 126 | &update_cache_variants), 127 | 0, 128 | SPINAND_ECCINFO(micron_8_ecc_get_status)), 129 | /* M78A 1Gb 1.8V */ 130 | SPINAND_INFO("MT29F1G01ABAFD", 131 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x15), 132 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 133 | NAND_ECCREQ(8, 512), 134 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 135 | &write_cache_variants, 136 | &update_cache_variants), 137 | 0, 138 | SPINAND_ECCINFO(micron_8_ecc_get_status)), 139 | /* M79A 4Gb 3.3V */ 140 | SPINAND_INFO("MT29F4G01ADAGD", 141 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x36), 142 | NAND_MEMORG(1, 2048, 128, 64, 2048, 80, 2, 1, 2), 143 | NAND_ECCREQ(8, 512), 144 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 145 | &write_cache_variants, 146 | &update_cache_variants), 147 | 0, 148 | SPINAND_ECCINFO(micron_8_ecc_get_status), 149 | SPINAND_SELECT_TARGET(micron_select_target)), 150 | /* M70A 4Gb 3.3V */ 151 | SPINAND_INFO("MT29F4G01ABAFD", 152 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x34), 153 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), 154 | NAND_ECCREQ(8, 512), 155 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 156 | &write_cache_variants, 157 | &update_cache_variants), 158 | SPINAND_HAS_CR_FEAT_BIT, 159 | SPINAND_ECCINFO(micron_8_ecc_get_status)), 160 | /* M70A 4Gb 1.8V */ 161 | SPINAND_INFO("MT29F4G01ABBFD", 162 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35), 163 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), 164 | NAND_ECCREQ(8, 512), 165 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 166 | &write_cache_variants, 167 | &update_cache_variants), 168 | SPINAND_HAS_CR_FEAT_BIT, 169 | SPINAND_ECCINFO(micron_8_ecc_get_status)), 170 | /* M70A 8Gb 3.3V */ 171 | SPINAND_INFO("MT29F8G01ADAFD", 172 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x46), 173 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2), 174 | NAND_ECCREQ(8, 512), 175 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 176 | &write_cache_variants, 177 | &update_cache_variants), 178 | SPINAND_HAS_CR_FEAT_BIT, 179 | SPINAND_ECCINFO(micron_8_ecc_get_status), 180 | SPINAND_SELECT_TARGET(micron_select_target)), 181 | /* M70A 8Gb 1.8V */ 182 | SPINAND_INFO("MT29F8G01ADBFD", 183 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x47), 184 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2), 185 | NAND_ECCREQ(8, 512), 186 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 187 | &write_cache_variants, 188 | &update_cache_variants), 189 | SPINAND_HAS_CR_FEAT_BIT, 190 | SPINAND_ECCINFO(micron_8_ecc_get_status), 191 | SPINAND_SELECT_TARGET(micron_select_target)), 192 | /* M69A 2Gb 3.3V */ 193 | SPINAND_INFO("MT29F2G01AAAED", 194 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x9F), 195 | NAND_MEMORG(1, 2048, 64, 64, 2048, 80, 2, 1, 1), 196 | NAND_ECCREQ(4, 512), 197 | SPINAND_INFO_OP_VARIANTS(&x4_read_cache_variants, 198 | &x1_write_cache_variants, 199 | &x1_update_cache_variants), 200 | 0, 201 | SPINAND_ECCINFO(NULL)), 202 | }; 203 | 204 | static int micron_spinand_init(struct spinand_device *spinand) 205 | { 206 | /* 207 | * M70A device series enable Continuous Read feature at Power-up, 208 | * which is not supported. Disable this bit to avoid any possible 209 | * failure. 210 | */ 211 | if (spinand->flags & SPINAND_HAS_CR_FEAT_BIT) 212 | return spinand_upd_cfg(spinand, MICRON_CFG_CR, 0); 213 | 214 | return 0; 215 | } 216 | 217 | static const struct spinand_manufacturer_ops micron_spinand_manuf_ops = { 218 | .init = micron_spinand_init, 219 | }; 220 | 221 | const struct spinand_manufacturer micron_spinand_manufacturer = { 222 | .id = SPINAND_MFR_MICRON, 223 | .name = "Micron", 224 | .chips = micron_spinand_table, 225 | .nchips = ARRAY_SIZE(micron_spinand_table), 226 | .ops = µn_spinand_manuf_ops, 227 | }; 228 | -------------------------------------------------------------------------------- /flashops.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | int snand_read(struct spinand_device *snand, size_t offs, size_t len, 9 | bool ecc_enabled, bool read_oob, FILE *fp) 10 | { 11 | struct nand_device *nand = spinand_to_nand(snand); 12 | size_t page_size = nanddev_page_size(nand); 13 | size_t oob_size = nanddev_per_page_oobsize(nand); 14 | size_t fwrite_size; 15 | struct nand_page_io_req io_req; 16 | size_t rdlen = 0; 17 | uint8_t *buf; 18 | int ret; 19 | 20 | if (offs % page_size) { 21 | fprintf(stderr, "Reading should start at page boundary.\n"); 22 | return -EINVAL; 23 | } 24 | 25 | if (!len) 26 | len = nanddev_size(nand) - offs; 27 | 28 | buf = malloc(page_size + oob_size); 29 | if (!buf) 30 | return -ENOMEM; 31 | 32 | memset(&io_req, 0, sizeof(io_req)); 33 | io_req.databuf.in = buf; 34 | io_req.datalen = page_size; 35 | io_req.dataoffs = 0; 36 | fwrite_size = page_size; 37 | if (read_oob) { 38 | io_req.oobbuf.in = buf + page_size; 39 | io_req.ooblen = oob_size; 40 | io_req.ooboffs = 0; 41 | fwrite_size += oob_size; 42 | } 43 | nanddev_offs_to_pos(nand, offs, &io_req.pos); 44 | 45 | while (rdlen < len) { 46 | printf("reading offset (%lX block %u page %u)\r", offs + rdlen, 47 | io_req.pos.eraseblock, io_req.pos.page); 48 | ret = spinand_read_page(snand, &io_req, ecc_enabled); 49 | if (ret > 0) { 50 | printf("\necc corrected %d bitflips.\n", ret); 51 | } else if (ret < 0) { 52 | printf("\nreading failed. errno %d\n", ret); 53 | memset(buf, 0, fwrite_size); 54 | } 55 | fwrite(buf, 1, fwrite_size, fp); 56 | rdlen += page_size; 57 | nanddev_pos_next_page(nand, &io_req.pos); 58 | } 59 | printf("\n\ndone.\n"); 60 | free(buf); 61 | return 0; 62 | } 63 | 64 | bool snand_isbad(struct spinand_device *snand, const struct nand_pos *pos, 65 | size_t bbm_offs, size_t bbm_len) 66 | { 67 | struct nand_device *nand = spinand_to_nand(snand); 68 | size_t page_size = nanddev_page_size(nand); 69 | struct nand_page_io_req req; 70 | size_t i; 71 | 72 | u8 marker[8] = {}; 73 | if (bbm_len > 8) { 74 | fprintf(stderr, "bbm too long.\n"); 75 | return true; 76 | } 77 | 78 | if (!bbm_len) { 79 | bbm_offs = page_size; 80 | bbm_len = 2; 81 | } 82 | 83 | memset(&req, 0, sizeof(req)); 84 | req.pos = *pos; 85 | req.pos.page = 0; 86 | if (bbm_offs < page_size) { 87 | req.databuf.in = marker; 88 | req.datalen = bbm_len; 89 | req.dataoffs = bbm_offs; 90 | } else { 91 | req.oobbuf.in = marker; 92 | req.ooblen = bbm_len; 93 | req.ooboffs = bbm_offs - page_size; 94 | } 95 | spinand_read_page(snand, &req, false); 96 | 97 | for (i = 0; i < bbm_len; i++) 98 | if (marker[i] != 0xff) 99 | return true; 100 | return false; 101 | } 102 | 103 | int snand_markbad(struct spinand_device *snand, const struct nand_pos *pos, 104 | size_t bbm_offs, size_t bbm_len) 105 | { 106 | struct nand_device *nand = spinand_to_nand(snand); 107 | size_t page_size = nanddev_page_size(nand); 108 | struct nand_page_io_req req; 109 | u8 marker[8]; 110 | if (bbm_len > 8) { 111 | fprintf(stderr, "bbm too long.\n"); 112 | return -EINVAL; 113 | } 114 | 115 | if (!bbm_len) { 116 | bbm_offs = page_size; 117 | bbm_len = 2; 118 | } 119 | 120 | memset(&req, 0, sizeof(req)); 121 | memset(marker, 0, sizeof(marker)); 122 | req.pos = *pos; 123 | req.pos.page = 0; 124 | if (bbm_offs < page_size) { 125 | req.databuf.out = marker; 126 | req.datalen = bbm_len; 127 | req.dataoffs = bbm_offs; 128 | } else { 129 | req.oobbuf.out = marker; 130 | req.ooblen = bbm_len; 131 | req.ooboffs = bbm_offs - page_size; 132 | } 133 | 134 | return spinand_write_page(snand, &req, false); 135 | } 136 | 137 | int snand_erase_remark(struct spinand_device *snand, const struct nand_pos *pos, 138 | size_t old_bbm_offs, size_t old_bbm_len, size_t bbm_offs, 139 | size_t bbm_len) 140 | { 141 | int ret; 142 | if (snand_isbad(snand, pos, old_bbm_offs, old_bbm_len)) { 143 | printf("bad block: target %u block %u.\n", pos->target, 144 | pos->eraseblock); 145 | goto BAD_BLOCK; 146 | } 147 | 148 | ret = spinand_erase(snand, pos); 149 | if (ret) { 150 | printf("erase failed: target %u block %u. ret: %d\n", 151 | pos->target, pos->eraseblock, ret); 152 | goto BAD_BLOCK; 153 | } 154 | 155 | return 0; 156 | BAD_BLOCK: 157 | snand_markbad(snand, pos, bbm_offs, bbm_len); 158 | return -EIO; 159 | } 160 | 161 | int snand_write(struct spinand_device *snand, size_t offs, bool ecc_enabled, 162 | bool write_oob, bool erase_rest, FILE *fp, size_t old_bbm_offs, 163 | size_t old_bbm_len, size_t bbm_offs, size_t bbm_len) 164 | { 165 | struct nand_device *nand = spinand_to_nand(snand); 166 | size_t page_size = nanddev_page_size(nand); 167 | size_t oob_size = nanddev_per_page_oobsize(nand); 168 | size_t eb_size = nanddev_eraseblock_size(nand); 169 | size_t flash_size = nanddev_size(nand); 170 | size_t fread_len, actual_read_len = 0; 171 | struct nand_page_io_req wr_req, rd_req; 172 | size_t cur_offs = offs, eb_rd_offs = 0; 173 | uint8_t *buf, *rdbuf; 174 | int ret; 175 | 176 | if (offs % eb_size) { 177 | fprintf(stderr, "Writing should start at eb boundary.\n"); 178 | return -EINVAL; 179 | } 180 | 181 | buf = malloc((page_size + oob_size) * 2); 182 | if (!buf) 183 | return -ENOMEM; 184 | 185 | rdbuf = buf + page_size + oob_size; 186 | 187 | memset(&wr_req, 0, sizeof(wr_req)); 188 | wr_req.databuf.out = buf; 189 | wr_req.datalen = page_size; 190 | wr_req.dataoffs = 0; 191 | fread_len = page_size; 192 | if (write_oob) { 193 | wr_req.oobbuf.out = buf + page_size; 194 | wr_req.ooblen = oob_size; 195 | wr_req.ooboffs = 0; 196 | fread_len += oob_size; 197 | } 198 | 199 | if (fp) 200 | actual_read_len = fread_len; // for the EOF check in loop. 201 | 202 | nanddev_offs_to_pos(nand, offs, &wr_req.pos); 203 | 204 | while (cur_offs < flash_size) { 205 | if (!wr_req.pos.page) { 206 | eb_rd_offs = 0; 207 | printf("erasing %lX (block %u)\r", cur_offs, 208 | wr_req.pos.eraseblock); 209 | ret = snand_erase_remark(snand, &wr_req.pos, 210 | old_bbm_offs, old_bbm_len, 211 | bbm_offs, bbm_len); 212 | if (ret) { 213 | printf("\nskipping current block: %d\n", ret); 214 | cur_offs += eb_size; 215 | nanddev_pos_next_eraseblock(nand, &wr_req.pos); 216 | continue; 217 | } 218 | } 219 | 220 | if (actual_read_len == fread_len) { 221 | actual_read_len = fread(buf, 1, fread_len, fp); 222 | printf("writing %lu bytes to %lX (block %u page %u)\r", 223 | actual_read_len, cur_offs, wr_req.pos.eraseblock, 224 | wr_req.pos.page); 225 | if (actual_read_len < fread_len) 226 | memset(buf + actual_read_len, 0xff, 227 | fread_len - actual_read_len); 228 | 229 | eb_rd_offs += actual_read_len; 230 | 231 | ret = spinand_write_page(snand, &wr_req, ecc_enabled); 232 | if (ret) { 233 | printf("\npage writing failed.\n"); 234 | goto BAD_BLOCK; 235 | } 236 | 237 | if (ecc_enabled && !write_oob) { 238 | rd_req = wr_req; 239 | rd_req.databuf.out = rdbuf; 240 | rd_req.oobbuf.out = rdbuf + page_size; 241 | ret = spinand_read_page(snand, &rd_req, 242 | ecc_enabled); 243 | if (ret > 0) { 244 | printf("\necc corrected %d bitflips.\n", 245 | ret); 246 | } else if (ret < 0) { 247 | printf("\nreading failed. errno %d\n", 248 | ret); 249 | goto BAD_BLOCK; 250 | } 251 | if (memcmp(buf, rdbuf, fread_len)) { 252 | printf("\ndata verification failed.\n"); 253 | goto BAD_BLOCK; 254 | } 255 | } 256 | cur_offs += page_size; 257 | nanddev_pos_next_page(nand, &wr_req.pos); 258 | } else if (erase_rest) { 259 | nanddev_pos_next_eraseblock(nand, &wr_req.pos); 260 | cur_offs = nanddev_pos_to_offs(nand, &wr_req.pos); 261 | } else { 262 | break; 263 | } 264 | 265 | continue; 266 | BAD_BLOCK: 267 | snand_markbad(snand, &wr_req.pos, bbm_offs, bbm_len); 268 | fseek(fp, -eb_rd_offs, SEEK_CUR); 269 | nanddev_pos_next_eraseblock(nand, &wr_req.pos); 270 | cur_offs = nanddev_pos_to_offs(nand, &wr_req.pos); 271 | } 272 | printf("\ndone.\n"); 273 | return 0; 274 | } 275 | 276 | void snand_scan_bbm(struct spinand_device *snand) 277 | { 278 | struct nand_device *nand = spinand_to_nand(snand); 279 | size_t eb_size = nanddev_eraseblock_size(nand); 280 | size_t flash_size = nanddev_size(nand); 281 | size_t offs = 0; 282 | struct nand_pos pos; 283 | nanddev_offs_to_pos(nand, 0, &pos); 284 | while (offs < flash_size) { 285 | printf("scaning block %u\r", pos.eraseblock); 286 | if (snand_isbad(snand, &pos, 0, 0)) 287 | printf("\ntarget %u block %u is bad.\n", pos.target, 288 | pos.eraseblock); 289 | nanddev_pos_next_eraseblock(nand, &pos); 290 | offs += eb_size; 291 | } 292 | printf("\ndone.\n"); 293 | } 294 | -------------------------------------------------------------------------------- /spi-nand/toshiba.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Copyright (c) 2018 exceet electronics GmbH 4 | * Copyright (c) 2018 Kontron Electronics GmbH 5 | * 6 | * Author: Frieder Schrempf 7 | */ 8 | 9 | #include 10 | 11 | /* Kioxia is new name of Toshiba memory. */ 12 | #define SPINAND_MFR_TOSHIBA 0x98 13 | #define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4) 14 | 15 | static SPINAND_OP_VARIANTS(read_cache_variants, 16 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 17 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 18 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 19 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 20 | 21 | static SPINAND_OP_VARIANTS(write_cache_x4_variants, 22 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 23 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 24 | 25 | static SPINAND_OP_VARIANTS(update_cache_x4_variants, 26 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 27 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 28 | 29 | /** 30 | * Backward compatibility for 1st generation Serial NAND devices 31 | * which don't support Quad Program Load operation. 32 | */ 33 | static SPINAND_OP_VARIANTS(write_cache_variants, 34 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 35 | 36 | static SPINAND_OP_VARIANTS(update_cache_variants, 37 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 38 | 39 | static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand, 40 | u8 status) 41 | { 42 | struct nand_device *nand = spinand_to_nand(spinand); 43 | u8 mbf = 0; 44 | struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf); 45 | 46 | switch (status & STATUS_ECC_MASK) { 47 | case STATUS_ECC_NO_BITFLIPS: 48 | return 0; 49 | 50 | case STATUS_ECC_UNCOR_ERROR: 51 | return -EBADMSG; 52 | 53 | case STATUS_ECC_HAS_BITFLIPS: 54 | case TOSH_STATUS_ECC_HAS_BITFLIPS_T: 55 | /* 56 | * Let's try to retrieve the real maximum number of bitflips 57 | * in order to avoid forcing the wear-leveling layer to move 58 | * data around if it's not necessary. 59 | */ 60 | if (spi_mem_exec_op(spinand->spimem, &op)) 61 | return nand->eccreq.strength; 62 | 63 | mbf >>= 4; 64 | 65 | if (mbf > nand->eccreq.strength || !mbf) 66 | return nand->eccreq.strength; 67 | 68 | return mbf; 69 | 70 | default: 71 | break; 72 | } 73 | 74 | return -EINVAL; 75 | } 76 | 77 | static const struct spinand_info toshiba_spinand_table[] = { 78 | /* 3.3V 1Gb (1st generation) */ 79 | SPINAND_INFO("TC58CVG0S3HRAIG", 80 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xC2), 81 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 82 | NAND_ECCREQ(8, 512), 83 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 84 | &write_cache_variants, 85 | &update_cache_variants), 86 | 0, 87 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 88 | /* 3.3V 2Gb (1st generation) */ 89 | SPINAND_INFO("TC58CVG1S3HRAIG", 90 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xCB), 91 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 92 | NAND_ECCREQ(8, 512), 93 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 94 | &write_cache_variants, 95 | &update_cache_variants), 96 | 0, 97 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 98 | /* 3.3V 4Gb (1st generation) */ 99 | SPINAND_INFO("TC58CVG2S0HRAIG", 100 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xCD), 101 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), 102 | NAND_ECCREQ(8, 512), 103 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 104 | &write_cache_variants, 105 | &update_cache_variants), 106 | 0, 107 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 108 | /* 1.8V 1Gb (1st generation) */ 109 | SPINAND_INFO("TC58CYG0S3HRAIG", 110 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xB2), 111 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 112 | NAND_ECCREQ(8, 512), 113 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 114 | &write_cache_variants, 115 | &update_cache_variants), 116 | 0, 117 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 118 | /* 1.8V 2Gb (1st generation) */ 119 | SPINAND_INFO("TC58CYG1S3HRAIG", 120 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBB), 121 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 122 | NAND_ECCREQ(8, 512), 123 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 124 | &write_cache_variants, 125 | &update_cache_variants), 126 | 0, 127 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 128 | /* 1.8V 4Gb (1st generation) */ 129 | SPINAND_INFO("TC58CYG2S0HRAIG", 130 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBD), 131 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), 132 | NAND_ECCREQ(8, 512), 133 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 134 | &write_cache_variants, 135 | &update_cache_variants), 136 | 0, 137 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 138 | 139 | /* 140 | * 2nd generation serial nand has HOLD_D which is equivalent to 141 | * QE_BIT. 142 | */ 143 | /* 3.3V 1Gb (2nd generation) */ 144 | SPINAND_INFO("TC58CVG0S3HRAIJ", 145 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE2), 146 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 147 | NAND_ECCREQ(8, 512), 148 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 149 | &write_cache_x4_variants, 150 | &update_cache_x4_variants), 151 | SPINAND_HAS_QE_BIT, 152 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 153 | /* 3.3V 2Gb (2nd generation) */ 154 | SPINAND_INFO("TC58CVG1S3HRAIJ", 155 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xEB), 156 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 157 | NAND_ECCREQ(8, 512), 158 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 159 | &write_cache_x4_variants, 160 | &update_cache_x4_variants), 161 | SPINAND_HAS_QE_BIT, 162 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 163 | /* 3.3V 4Gb (2nd generation) */ 164 | SPINAND_INFO("TC58CVG2S0HRAIJ", 165 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xED), 166 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), 167 | NAND_ECCREQ(8, 512), 168 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 169 | &write_cache_x4_variants, 170 | &update_cache_x4_variants), 171 | SPINAND_HAS_QE_BIT, 172 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 173 | /* 3.3V 8Gb (2nd generation) */ 174 | SPINAND_INFO("TH58CVG3S0HRAIJ", 175 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE4), 176 | NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1), 177 | NAND_ECCREQ(8, 512), 178 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 179 | &write_cache_x4_variants, 180 | &update_cache_x4_variants), 181 | SPINAND_HAS_QE_BIT, 182 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 183 | /* 1.8V 1Gb (2nd generation) */ 184 | SPINAND_INFO("TC58CYG0S3HRAIJ", 185 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xD2), 186 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 187 | NAND_ECCREQ(8, 512), 188 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 189 | &write_cache_x4_variants, 190 | &update_cache_x4_variants), 191 | SPINAND_HAS_QE_BIT, 192 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 193 | /* 1.8V 2Gb (2nd generation) */ 194 | SPINAND_INFO("TC58CYG1S3HRAIJ", 195 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xDB), 196 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 197 | NAND_ECCREQ(8, 512), 198 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 199 | &write_cache_x4_variants, 200 | &update_cache_x4_variants), 201 | SPINAND_HAS_QE_BIT, 202 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 203 | /* 1.8V 4Gb (2nd generation) */ 204 | SPINAND_INFO("TC58CYG2S0HRAIJ", 205 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xDD), 206 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), 207 | NAND_ECCREQ(8, 512), 208 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 209 | &write_cache_x4_variants, 210 | &update_cache_x4_variants), 211 | SPINAND_HAS_QE_BIT, 212 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 213 | /* 1.8V 8Gb (2nd generation) */ 214 | SPINAND_INFO("TH58CYG3S0HRAIJ", 215 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xD4), 216 | NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1), 217 | NAND_ECCREQ(8, 512), 218 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 219 | &write_cache_x4_variants, 220 | &update_cache_x4_variants), 221 | SPINAND_HAS_QE_BIT, 222 | SPINAND_ECCINFO(tx58cxgxsxraix_ecc_get_status)), 223 | }; 224 | 225 | static const struct spinand_manufacturer_ops toshiba_spinand_manuf_ops = { 226 | }; 227 | 228 | const struct spinand_manufacturer toshiba_spinand_manufacturer = { 229 | .id = SPINAND_MFR_TOSHIBA, 230 | .name = "Toshiba", 231 | .chips = toshiba_spinand_table, 232 | .nchips = ARRAY_SIZE(toshiba_spinand_table), 233 | .ops = &toshiba_spinand_manuf_ops, 234 | }; 235 | -------------------------------------------------------------------------------- /spi-mem/ch347/ch347.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-1-Clause 2 | /* 3 | * Copyright (C) 2022 Chuanhong Guo 4 | * 5 | * CH347 SPI library using libusb. Protocol reverse-engineered from WCH linux library. 6 | * FIXME: Every numbers used in the USB protocol should be little-endian. 7 | */ 8 | 9 | #include "ch347.h" 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) 16 | #error You need to convert every USB communications to little endian before this library would work. 17 | #endif 18 | 19 | int ch347_spi_write_packet(struct ch347_priv *priv, uint8_t cmd, const void *tx, int len) { 20 | uint8_t *ptr; 21 | int err, transferred; 22 | if (len > sizeof(priv->tmpbuf) - 3) 23 | return -EINVAL; 24 | 25 | priv->tmpbuf[0] = cmd; 26 | priv->tmpbuf[1] = len & 0xff; 27 | priv->tmpbuf[2] = len >> 8; 28 | memcpy(priv->tmpbuf + 3, tx, len); 29 | err = libusb_bulk_transfer(priv->handle, CH347_EPOUT, priv->tmpbuf, len + 3, &transferred, 1000); 30 | if (err) { 31 | fprintf(stderr, "ch347: libusb: failed to send packet: %d\n", err); 32 | return err; 33 | } 34 | return 0; 35 | } 36 | 37 | int ch347_spi_read_packet(struct ch347_priv *priv, uint8_t cmd, void *rx, int len, int *actual_len) { 38 | int cur_len, rxlen, rx_received; 39 | int err, transferred; 40 | 41 | err = libusb_bulk_transfer(priv->handle, CH347_EPIN, priv->tmpbuf, sizeof(priv->tmpbuf), &transferred, 1000); 42 | if (err) { 43 | fprintf(stderr, "ch347: libusb: failed to receive packet: %d\n", err); 44 | return err; 45 | } 46 | 47 | if (priv->tmpbuf[0] != cmd) { 48 | fprintf(stderr, "ch347: unexpected packet cmd: expecting 0x%02x but we got 0x%02x.\n", cmd, priv->tmpbuf[0]); 49 | return -EINVAL; 50 | } 51 | 52 | rxlen = priv->tmpbuf[1] | priv->tmpbuf[2] << 8; 53 | if (rxlen > len) { 54 | fprintf(stderr, "ch347: packet too big.\n"); 55 | return -EINVAL; 56 | } 57 | 58 | cur_len = transferred - 3; 59 | if (rxlen < cur_len) 60 | cur_len = rxlen; 61 | memcpy(rx, priv->tmpbuf + 3, cur_len); 62 | rx_received = cur_len; 63 | while (rx_received < rxlen) { 64 | /* The leftover data length is known so we don't need to deal with packet overflow using tmpbuf. */ 65 | err = libusb_bulk_transfer(priv->handle, CH347_EPIN, rx + rx_received, rxlen - rx_received, &transferred, 1000); 66 | if (err) { 67 | fprintf(stderr, "ch347: libusb: failed to receive packet: %d\n", err); 68 | return err; 69 | } 70 | rx_received += transferred; 71 | } 72 | 73 | *actual_len = rx_received; 74 | return 0; 75 | } 76 | 77 | int ch347_get_hw_config(struct ch347_priv *priv) { 78 | int err, transferred; 79 | uint8_t unknown_data = 0x01; 80 | 81 | err = ch347_spi_write_packet(priv, CH347_CMD_INFO_RD, &unknown_data, 1); 82 | if (err) 83 | return err; 84 | 85 | err = ch347_spi_read_packet(priv, CH347_CMD_INFO_RD, &priv->cfg, sizeof(priv->cfg), &transferred); 86 | if (err) 87 | return err; 88 | 89 | if (transferred != sizeof(priv->cfg)) { 90 | fprintf(stderr, "ch347: config returned isn't long enough.\n"); 91 | return -EINVAL; 92 | } 93 | 94 | return 0; 95 | } 96 | 97 | int ch347_commit_settings(struct ch347_priv *priv) { 98 | int err, transferred; 99 | uint8_t unknown_data; 100 | err = ch347_spi_write_packet(priv, CH347_CMD_SPI_INIT, &priv->cfg, sizeof(priv->cfg)); 101 | if (err) 102 | return err; 103 | 104 | return ch347_spi_read_packet(priv, CH347_CMD_SPI_INIT, &unknown_data, 1, &transferred); 105 | } 106 | 107 | int ch347_set_cs(struct ch347_priv *priv, int cs, int val, uint16_t autodeactive_us) { 108 | uint8_t buf[10] = {}; 109 | uint8_t *entry = cs ? buf + 5 : buf; 110 | 111 | entry[0] = val ? 0xc0 : 0x80; 112 | if(autodeactive_us) { 113 | entry[0] |= 0x20; 114 | entry[3] = autodeactive_us & 0xff; 115 | entry[4] = autodeactive_us >> 8; 116 | } 117 | 118 | return ch347_spi_write_packet(priv, CH347_CMD_SPI_CONTROL, buf, 10); 119 | } 120 | 121 | int ch347_set_spi_freq(struct ch347_priv *priv, int *clk_khz) { 122 | int freq = CH347_SPI_MAX_FREQ; 123 | int prescaler; 124 | for (prescaler = 0; prescaler < CH347_SPI_MAX_PRESCALER; prescaler++) { 125 | if (freq <= *clk_khz) 126 | break; 127 | freq /= 2; 128 | } 129 | if (freq > *clk_khz) 130 | return -EINVAL; 131 | priv->cfg.SPI_BaudRatePrescaler = prescaler * 8; 132 | *clk_khz = freq; 133 | return ch347_commit_settings(priv); 134 | } 135 | 136 | int ch347_setup_spi(struct ch347_priv *priv, int spi_mode, bool lsb_first, bool cs0_active_high, bool cs1_active_high) { 137 | priv->cfg.SPI_Direction = SPI_Direction_2Lines_FullDuplex; 138 | priv->cfg.SPI_Mode = SPI_Mode_Master; 139 | priv->cfg.SPI_DataSize = SPI_DataSize_8b; 140 | priv->cfg.SPI_CPOL = (spi_mode & 2) ? SPI_CPOL_High : SPI_CPOL_Low; 141 | priv->cfg.SPI_CPHA = (spi_mode & 1) ? SPI_CPHA_2Edge : SPI_CPHA_1Edge; 142 | priv->cfg.SPI_NSS = SPI_NSS_Software; 143 | priv->cfg.SPI_FirstBit = lsb_first ? SPI_FirstBit_LSB : SPI_FirstBit_MSB; 144 | priv->cfg.SPI_WriteReadInterval = 0; 145 | priv->cfg.SPI_OutDefaultData = 0; 146 | 147 | if (cs0_active_high) 148 | priv->cfg.OtherCfg |= 0x80; 149 | else 150 | priv->cfg.OtherCfg &= 0x7f; 151 | if (cs1_active_high) 152 | priv->cfg.OtherCfg |= 0x40; 153 | else 154 | priv->cfg.OtherCfg &= 0xbf; 155 | 156 | return ch347_commit_settings(priv); 157 | } 158 | 159 | static int ch347_spi_trx_full_duplex_one(struct ch347_priv *priv, void *buf, uint32_t len) { 160 | int err, transferred; 161 | 162 | err = ch347_spi_write_packet(priv, CH347_CMD_SPI_RD_WR, buf, len); 163 | if (err) 164 | return err; 165 | 166 | err = ch347_spi_read_packet(priv, CH347_CMD_SPI_RD_WR, buf, len, &transferred); 167 | if (err) 168 | return err; 169 | 170 | if (transferred != len) { 171 | fprintf(stderr, "ch347: not enough data received."); 172 | return -EINVAL; 173 | } 174 | return 0; 175 | } 176 | 177 | int ch347_spi_trx_full_duplex(struct ch347_priv *priv, void *buf, uint32_t len) { 178 | int err; 179 | while (len > CH347_SPI_MAX_TRX) { 180 | err = ch347_spi_trx_full_duplex_one(priv, buf, CH347_SPI_MAX_TRX); 181 | if (err) 182 | return err; 183 | len -= CH347_SPI_MAX_TRX; 184 | } 185 | return ch347_spi_trx_full_duplex_one(priv, buf, len); 186 | } 187 | 188 | int ch347_spi_tx(struct ch347_priv *priv, const void *tx, uint32_t len) { 189 | int err, transferred; 190 | uint8_t unknown_data; 191 | const void *ptr = tx; 192 | while (len) { 193 | int cur_len = len > sizeof(priv->tmpbuf) - 3 ? sizeof(priv->tmpbuf) - 3 : len; 194 | err = ch347_spi_write_packet(priv, CH347_CMD_SPI_BLCK_WR, ptr, cur_len); 195 | if (err) 196 | return err; 197 | err = ch347_spi_read_packet(priv, CH347_CMD_SPI_BLCK_WR, &unknown_data, 1, &transferred); 198 | if (err) 199 | return err; 200 | ptr += cur_len; 201 | len -= cur_len; 202 | } 203 | return 0; 204 | } 205 | 206 | int ch347_spi_rx(struct ch347_priv *priv, void *rx, uint32_t len) { 207 | int err, transferred; 208 | void *ptr = rx; 209 | uint32_t rxlen = 0; 210 | /* FIXME: len should be little endian! */ 211 | err = ch347_spi_write_packet(priv, CH347_CMD_SPI_BLCK_RD, &len, sizeof(len)); 212 | if (err) 213 | return err; 214 | while(rxlen < len) { 215 | uint32_t cur_rx = len - rxlen; 216 | if(cur_rx > CH347_SPI_MAX_TRX) 217 | cur_rx = CH347_SPI_MAX_TRX; 218 | err = ch347_spi_read_packet(priv, CH347_CMD_SPI_BLCK_RD, ptr, (int)cur_rx, &transferred); 219 | if (err) 220 | return err; 221 | rxlen += transferred; 222 | ptr += transferred; 223 | } 224 | return 0; 225 | } 226 | 227 | struct ch347_priv *ch347_open() { 228 | struct ch347_priv *priv = calloc(1, sizeof(struct ch347_priv)); 229 | int ret; 230 | 231 | if (!priv) { 232 | fprintf(stderr, "ch347: faied to allocate memory.\n"); 233 | return NULL; 234 | } 235 | ret = libusb_init(&priv->ctx); 236 | if (ret < 0) { 237 | perror("ch347: libusb: init"); 238 | goto ERR_0; 239 | } 240 | 241 | libusb_set_option(priv->ctx, LIBUSB_OPTION_LOG_LEVEL, LIBUSB_LOG_LEVEL_INFO); 242 | priv->handle = libusb_open_device_with_vid_pid(priv->ctx, CH347_SPI_VID, CH347_SPI_PID); 243 | if (!priv->handle) { 244 | perror("ch347: libusb: open"); 245 | goto ERR_1; 246 | } 247 | 248 | libusb_set_auto_detach_kernel_driver(priv->handle, 1); 249 | 250 | ret = libusb_claim_interface(priv->handle, CH347_SPI_IF); 251 | if (ret < 0) { 252 | perror("ch347: libusb: claim_if"); 253 | goto ERR_2; 254 | } 255 | 256 | if (ch347_get_hw_config(priv)) 257 | goto ERR_3; 258 | 259 | return priv; 260 | 261 | ERR_3: 262 | libusb_release_interface(priv->handle, CH347_SPI_IF); 263 | ERR_2: 264 | libusb_close(priv->handle); 265 | ERR_1: 266 | libusb_exit(priv->ctx); 267 | ERR_0: 268 | free(priv); 269 | return NULL; 270 | } 271 | 272 | void ch347_close(struct ch347_priv *priv) { 273 | libusb_release_interface(priv->handle, CH347_SPI_IF); 274 | libusb_close(priv->handle); 275 | libusb_exit(priv->ctx); 276 | free(priv); 277 | } 278 | -------------------------------------------------------------------------------- /spi-mem/spi-mem.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0+ 2 | /* 3 | * This is based on include/linux/spi/spi-mem.h in Linux 4 | * Original file header: 5 | * 6 | * Copyright (C) 2018 Exceet Electronics GmbH 7 | * Copyright (C) 2018 Bootlin 8 | * 9 | * Author: Boris Brezillon 10 | */ 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx) 18 | { 19 | u32 mode = mem->spi_mode; 20 | 21 | switch (buswidth) { 22 | case 1: 23 | return 0; 24 | 25 | case 2: 26 | if ((tx && 27 | (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) || 28 | (!tx && 29 | (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))) 30 | return 0; 31 | 32 | break; 33 | 34 | case 4: 35 | if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) || 36 | (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL)))) 37 | return 0; 38 | 39 | break; 40 | 41 | case 8: 42 | if ((tx && (mode & SPI_TX_OCTAL)) || 43 | (!tx && (mode & SPI_RX_OCTAL))) 44 | return 0; 45 | 46 | break; 47 | 48 | default: 49 | break; 50 | } 51 | 52 | return -EOPNOTSUPP; 53 | } 54 | 55 | bool spi_mem_default_supports_op(struct spi_mem *mem, 56 | const struct spi_mem_op *op) 57 | { 58 | if (spi_check_buswidth_req(mem, op->cmd.buswidth, true)) 59 | return false; 60 | 61 | if (op->addr.nbytes && 62 | spi_check_buswidth_req(mem, op->addr.buswidth, true)) 63 | return false; 64 | 65 | if (op->dummy.nbytes && 66 | spi_check_buswidth_req(mem, op->dummy.buswidth, true)) 67 | return false; 68 | 69 | if (op->data.dir != SPI_MEM_NO_DATA && 70 | spi_check_buswidth_req(mem, op->data.buswidth, 71 | op->data.dir == SPI_MEM_DATA_OUT)) 72 | return false; 73 | 74 | return true; 75 | } 76 | 77 | static bool spi_mem_buswidth_is_valid(u8 buswidth) 78 | { 79 | if ((buswidth != 0) && (buswidth != 1) && (buswidth != 2) && 80 | (buswidth != 4) && (buswidth != 8)) 81 | return false; 82 | 83 | return true; 84 | } 85 | 86 | static int spi_mem_check_op(const struct spi_mem_op *op) 87 | { 88 | if (!op->cmd.buswidth) 89 | return -EINVAL; 90 | 91 | if ((op->addr.nbytes && !op->addr.buswidth) || 92 | (op->dummy.nbytes && !op->dummy.buswidth) || 93 | (op->data.nbytes && !op->data.buswidth)) 94 | return -EINVAL; 95 | 96 | if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) || 97 | !spi_mem_buswidth_is_valid(op->addr.buswidth) || 98 | !spi_mem_buswidth_is_valid(op->dummy.buswidth) || 99 | !spi_mem_buswidth_is_valid(op->data.buswidth)) 100 | return -EINVAL; 101 | 102 | return 0; 103 | } 104 | 105 | static bool spi_mem_internal_supports_op(struct spi_mem *mem, 106 | const struct spi_mem_op *op) 107 | { 108 | if (mem->ops->supports_op) 109 | return mem->ops->supports_op(mem, op); 110 | 111 | return spi_mem_default_supports_op(mem, op); 112 | } 113 | 114 | /** 115 | * spi_mem_supports_op() - Check if a memory device and the controller it is 116 | * connected to support a specific memory operation 117 | * @mem: the SPI memory 118 | * @op: the memory operation to check 119 | * 120 | * Some controllers are only supporting Single or Dual IOs, others might only 121 | * support specific opcodes, or it can even be that the controller and device 122 | * both support Quad IOs but the hardware prevents you from using it because 123 | * only 2 IO lines are connected. 124 | * 125 | * This function checks whether a specific operation is supported. 126 | * 127 | * Return: true if @op is supported, false otherwise. 128 | */ 129 | bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) 130 | { 131 | if (spi_mem_check_op(op)) 132 | return false; 133 | 134 | return spi_mem_internal_supports_op(mem, op); 135 | } 136 | 137 | /** 138 | * spi_mem_exec_op() - Execute a memory operation 139 | * @mem: the SPI memory 140 | * @op: the memory operation to execute 141 | * 142 | * Executes a memory operation. 143 | * 144 | * This function first checks that @op is supported and then tries to execute 145 | * it. 146 | * 147 | * Return: 0 in case of success, a negative error code otherwise. 148 | */ 149 | int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 150 | { 151 | int ret; 152 | 153 | ret = spi_mem_check_op(op); 154 | if (ret) 155 | return ret; 156 | 157 | if (!spi_mem_internal_supports_op(mem, op)) 158 | return -EOPNOTSUPP; 159 | 160 | return mem->ops->exec_op(mem, op); 161 | } 162 | 163 | /** 164 | * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to 165 | * match controller limitations 166 | * @mem: the SPI memory 167 | * @op: the operation to adjust 168 | * 169 | * Some controllers have FIFO limitations and must split a data transfer 170 | * operation into multiple ones, others require a specific alignment for 171 | * optimized accesses. This function allows SPI mem drivers to split a single 172 | * operation into multiple sub-operations when required. 173 | * 174 | * Return: a negative error code if the controller can't properly adjust @op, 175 | * 0 otherwise. Note that @op->data.nbytes will be updated if @op 176 | * can't be handled in a single step. 177 | */ 178 | int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 179 | { 180 | if (mem->ops->adjust_op_size) 181 | return mem->ops->adjust_op_size(mem, op); 182 | 183 | return 0; 184 | } 185 | 186 | static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc, 187 | u64 offs, size_t len, void *buf) 188 | { 189 | struct spi_mem_op op = desc->info.op_tmpl; 190 | int ret; 191 | 192 | op.addr.val = desc->info.offset + offs; 193 | op.data.buf.in = buf; 194 | op.data.nbytes = len; 195 | ret = spi_mem_adjust_op_size(desc->mem, &op); 196 | if (ret) 197 | return ret; 198 | 199 | ret = spi_mem_exec_op(desc->mem, &op); 200 | if (ret) 201 | return ret; 202 | 203 | return op.data.nbytes; 204 | } 205 | 206 | static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc, 207 | u64 offs, size_t len, const void *buf) 208 | { 209 | struct spi_mem_op op = desc->info.op_tmpl; 210 | int ret; 211 | 212 | op.addr.val = desc->info.offset + offs; 213 | op.data.buf.out = buf; 214 | op.data.nbytes = len; 215 | ret = spi_mem_adjust_op_size(desc->mem, &op); 216 | if (ret) 217 | return ret; 218 | 219 | ret = spi_mem_exec_op(desc->mem, &op); 220 | if (ret) 221 | return ret; 222 | 223 | return op.data.nbytes; 224 | } 225 | 226 | /** 227 | * spi_mem_dirmap_create() - Create a direct mapping descriptor 228 | * @mem: SPI mem device this direct mapping should be created for 229 | * @info: direct mapping information 230 | * 231 | * This function is creating a direct mapping descriptor which can then be used 232 | * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write(). 233 | * If the SPI controller driver does not support direct mapping, this function 234 | * falls back to an implementation using spi_mem_exec_op(), so that the caller 235 | * doesn't have to bother implementing a fallback on his own. 236 | * 237 | * Return: a valid pointer in case of success, and ERR_PTR() otherwise. 238 | */ 239 | struct spi_mem_dirmap_desc * 240 | spi_mem_dirmap_create(struct spi_mem *mem, 241 | const struct spi_mem_dirmap_info *info) 242 | { 243 | struct spi_mem_dirmap_desc *desc; 244 | int ret = -EOPNOTSUPP; 245 | 246 | /* Make sure the number of address cycles is between 1 and 8 bytes. */ 247 | if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8) 248 | return ERR_PTR(-EINVAL); 249 | 250 | /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */ 251 | if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA) 252 | return ERR_PTR(-EINVAL); 253 | 254 | desc = calloc(1, sizeof(*desc)); 255 | if (!desc) 256 | return ERR_PTR(-ENOMEM); 257 | 258 | desc->mem = mem; 259 | desc->info = *info; 260 | if (mem->ops->dirmap_create) 261 | ret = mem->ops->dirmap_create(desc); 262 | 263 | if (ret) { 264 | desc->nodirmap = true; 265 | if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl)) 266 | ret = -EOPNOTSUPP; 267 | else 268 | ret = 0; 269 | } 270 | 271 | if (ret) { 272 | free(desc); 273 | return ERR_PTR(ret); 274 | } 275 | 276 | return desc; 277 | } 278 | 279 | /** 280 | * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor 281 | * @desc: the direct mapping descriptor to destroy 282 | * 283 | * This function destroys a direct mapping descriptor previously created by 284 | * spi_mem_dirmap_create(). 285 | */ 286 | void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc) 287 | { 288 | if (!desc->nodirmap && desc->mem->ops->dirmap_destroy) 289 | desc->mem->ops->dirmap_destroy(desc); 290 | 291 | free(desc); 292 | } 293 | 294 | /** 295 | * spi_mem_dirmap_read() - Read data through a direct mapping 296 | * @desc: direct mapping descriptor 297 | * @offs: offset to start reading from. Note that this is not an absolute 298 | * offset, but the offset within the direct mapping which already has 299 | * its own offset 300 | * @len: length in bytes 301 | * @buf: destination buffer. This buffer must be DMA-able 302 | * 303 | * This function reads data from a memory device using a direct mapping 304 | * previously instantiated with spi_mem_dirmap_create(). 305 | * 306 | * Return: the amount of data read from the memory device or a negative error 307 | * code. Note that the returned size might be smaller than @len, and the caller 308 | * is responsible for calling spi_mem_dirmap_read() again when that happens. 309 | */ 310 | ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, 311 | u64 offs, size_t len, void *buf) 312 | { 313 | ssize_t ret; 314 | 315 | if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN) 316 | return -EINVAL; 317 | 318 | if (!len) 319 | return 0; 320 | 321 | if (desc->nodirmap) { 322 | ret = spi_mem_no_dirmap_read(desc, offs, len, buf); 323 | } else if (desc->mem->ops->dirmap_read) { 324 | ret = desc->mem->ops->dirmap_read(desc, offs, len, buf); 325 | } else { 326 | ret = -EOPNOTSUPP; 327 | } 328 | 329 | return ret; 330 | } 331 | 332 | /** 333 | * spi_mem_dirmap_write() - Write data through a direct mapping 334 | * @desc: direct mapping descriptor 335 | * @offs: offset to start writing from. Note that this is not an absolute 336 | * offset, but the offset within the direct mapping which already has 337 | * its own offset 338 | * @len: length in bytes 339 | * @buf: source buffer. This buffer must be DMA-able 340 | * 341 | * This function writes data to a memory device using a direct mapping 342 | * previously instantiated with spi_mem_dirmap_create(). 343 | * 344 | * Return: the amount of data written to the memory device or a negative error 345 | * code. Note that the returned size might be smaller than @len, and the caller 346 | * is responsible for calling spi_mem_dirmap_write() again when that happens. 347 | */ 348 | ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, 349 | u64 offs, size_t len, const void *buf) 350 | { 351 | ssize_t ret; 352 | 353 | if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT) 354 | return -EINVAL; 355 | 356 | if (!len) 357 | return 0; 358 | 359 | if (desc->nodirmap) { 360 | ret = spi_mem_no_dirmap_write(desc, offs, len, buf); 361 | } else if (desc->mem->ops->dirmap_write) { 362 | ret = desc->mem->ops->dirmap_write(desc, offs, len, buf); 363 | } else { 364 | ret = -EOPNOTSUPP; 365 | } 366 | 367 | return ret; 368 | } 369 | -------------------------------------------------------------------------------- /spi-nand/macronix.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Copyright (c) 2018 Macronix 4 | * 5 | * Author: Boris Brezillon 6 | */ 7 | 8 | #include 9 | 10 | #define SPINAND_MFR_MACRONIX 0xC2 11 | #define MACRONIX_ECCSR_MASK 0x0F 12 | 13 | static SPINAND_OP_VARIANTS(read_cache_variants, 14 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 15 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 16 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 17 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 18 | 19 | static SPINAND_OP_VARIANTS(write_cache_variants, 20 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 21 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 22 | 23 | static SPINAND_OP_VARIANTS(update_cache_variants, 24 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 25 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 26 | 27 | static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr) 28 | { 29 | struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x7c, 1), 30 | SPI_MEM_OP_NO_ADDR, 31 | SPI_MEM_OP_DUMMY(1, 1), 32 | SPI_MEM_OP_DATA_IN(1, eccsr, 1)); 33 | 34 | int ret = spi_mem_exec_op(spinand->spimem, &op); 35 | if (ret) 36 | return ret; 37 | 38 | *eccsr &= MACRONIX_ECCSR_MASK; 39 | return 0; 40 | } 41 | 42 | static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand, 43 | u8 status) 44 | { 45 | struct nand_device *nand = spinand_to_nand(spinand); 46 | u8 eccsr; 47 | 48 | switch (status & STATUS_ECC_MASK) { 49 | case STATUS_ECC_NO_BITFLIPS: 50 | return 0; 51 | 52 | case STATUS_ECC_UNCOR_ERROR: 53 | return -EBADMSG; 54 | 55 | case STATUS_ECC_HAS_BITFLIPS: 56 | /* 57 | * Let's try to retrieve the real maximum number of bitflips 58 | * in order to avoid forcing the wear-leveling layer to move 59 | * data around if it's not necessary. 60 | */ 61 | if (mx35lf1ge4ab_get_eccsr(spinand, &eccsr)) 62 | return nand->eccreq.strength; 63 | 64 | if (eccsr > nand->eccreq.strength || !eccsr) 65 | return nand->eccreq.strength; 66 | 67 | return eccsr; 68 | 69 | default: 70 | break; 71 | } 72 | 73 | return -EINVAL; 74 | } 75 | 76 | static const struct spinand_info macronix_spinand_table[] = { 77 | SPINAND_INFO("MX35LF1GE4AB", 78 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x12), 79 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 80 | NAND_ECCREQ(4, 512), 81 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 82 | &write_cache_variants, 83 | &update_cache_variants), 84 | SPINAND_HAS_QE_BIT, 85 | SPINAND_ECCINFO(mx35lf1ge4ab_ecc_get_status)), 86 | SPINAND_INFO("MX35LF2GE4AB", 87 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x22), 88 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1), 89 | NAND_ECCREQ(4, 512), 90 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 91 | &write_cache_variants, 92 | &update_cache_variants), 93 | SPINAND_HAS_QE_BIT, 94 | SPINAND_ECCINFO(NULL)), 95 | SPINAND_INFO("MX35LF2GE4AD", 96 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x26), 97 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), 98 | NAND_ECCREQ(8, 512), 99 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 100 | &write_cache_variants, 101 | &update_cache_variants), 102 | SPINAND_HAS_QE_BIT, 103 | SPINAND_ECCINFO(mx35lf1ge4ab_ecc_get_status)), 104 | SPINAND_INFO("MX35LF4GE4AD", 105 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x37), 106 | NAND_MEMORG(1, 4096, 128, 64, 2048, 40, 1, 1, 1), 107 | NAND_ECCREQ(8, 512), 108 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 109 | &write_cache_variants, 110 | &update_cache_variants), 111 | SPINAND_HAS_QE_BIT, 112 | SPINAND_ECCINFO(mx35lf1ge4ab_ecc_get_status)), 113 | SPINAND_INFO("MX35LF1G24AD", 114 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14), 115 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 116 | NAND_ECCREQ(8, 512), 117 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 118 | &write_cache_variants, 119 | &update_cache_variants), 120 | SPINAND_HAS_QE_BIT, 121 | SPINAND_ECCINFO(NULL)), 122 | SPINAND_INFO("MX35LF2G24AD", 123 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24), 124 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1), 125 | NAND_ECCREQ(8, 512), 126 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 127 | &write_cache_variants, 128 | &update_cache_variants), 129 | SPINAND_HAS_QE_BIT, 130 | SPINAND_ECCINFO(NULL)), 131 | SPINAND_INFO("MX35LF4G24AD", 132 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35), 133 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1), 134 | NAND_ECCREQ(8, 512), 135 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 136 | &write_cache_variants, 137 | &update_cache_variants), 138 | SPINAND_HAS_QE_BIT, 139 | SPINAND_ECCINFO(NULL)), 140 | SPINAND_INFO("MX31LF1GE4BC", 141 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x1e), 142 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 143 | NAND_ECCREQ(8, 512), 144 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 145 | &write_cache_variants, 146 | &update_cache_variants), 147 | SPINAND_HAS_QE_BIT, 148 | SPINAND_ECCINFO(mx35lf1ge4ab_ecc_get_status)), 149 | SPINAND_INFO("MX31UF1GE4BC", 150 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x9e), 151 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 152 | NAND_ECCREQ(8, 512), 153 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 154 | &write_cache_variants, 155 | &update_cache_variants), 156 | SPINAND_HAS_QE_BIT, 157 | SPINAND_ECCINFO(mx35lf1ge4ab_ecc_get_status)), 158 | SPINAND_INFO("MX35LF2G14AC", 159 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x20), 160 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1), 161 | NAND_ECCREQ(4, 512), 162 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 163 | &write_cache_variants, 164 | &update_cache_variants), 165 | SPINAND_HAS_QE_BIT, 166 | SPINAND_ECCINFO(mx35lf1ge4ab_ecc_get_status)), 167 | SPINAND_INFO("MX35UF4G24AD", 168 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb5), 169 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1), 170 | NAND_ECCREQ(8, 512), 171 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 172 | &write_cache_variants, 173 | &update_cache_variants), 174 | SPINAND_HAS_QE_BIT, 175 | SPINAND_ECCINFO(mx35lf1ge4ab_ecc_get_status)), 176 | SPINAND_INFO("MX35UF4GE4AD", 177 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb7), 178 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), 179 | NAND_ECCREQ(8, 512), 180 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 181 | &write_cache_variants, 182 | &update_cache_variants), 183 | SPINAND_HAS_QE_BIT, 184 | SPINAND_ECCINFO(mx35lf1ge4ab_ecc_get_status)), 185 | SPINAND_INFO("MX35UF2G14AC", 186 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa0), 187 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1), 188 | NAND_ECCREQ(4, 512), 189 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 190 | &write_cache_variants, 191 | &update_cache_variants), 192 | SPINAND_HAS_QE_BIT, 193 | SPINAND_ECCINFO(mx35lf1ge4ab_ecc_get_status)), 194 | SPINAND_INFO("MX35UF2G24AD", 195 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa4), 196 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1), 197 | NAND_ECCREQ(8, 512), 198 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 199 | &write_cache_variants, 200 | &update_cache_variants), 201 | SPINAND_HAS_QE_BIT, 202 | SPINAND_ECCINFO(mx35lf1ge4ab_ecc_get_status)), 203 | SPINAND_INFO("MX35UF2GE4AD", 204 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa6), 205 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 206 | NAND_ECCREQ(8, 512), 207 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 208 | &write_cache_variants, 209 | &update_cache_variants), 210 | SPINAND_HAS_QE_BIT, 211 | SPINAND_ECCINFO(mx35lf1ge4ab_ecc_get_status)), 212 | SPINAND_INFO("MX35UF2GE4AC", 213 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa2), 214 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), 215 | NAND_ECCREQ(4, 512), 216 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 217 | &write_cache_variants, 218 | &update_cache_variants), 219 | SPINAND_HAS_QE_BIT, 220 | SPINAND_ECCINFO(mx35lf1ge4ab_ecc_get_status)), 221 | SPINAND_INFO("MX35UF1G14AC", 222 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x90), 223 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 224 | NAND_ECCREQ(4, 512), 225 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 226 | &write_cache_variants, 227 | &update_cache_variants), 228 | SPINAND_HAS_QE_BIT, 229 | SPINAND_ECCINFO(mx35lf1ge4ab_ecc_get_status)), 230 | SPINAND_INFO("MX35UF1G24AD", 231 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x94), 232 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 233 | NAND_ECCREQ(8, 512), 234 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 235 | &write_cache_variants, 236 | &update_cache_variants), 237 | SPINAND_HAS_QE_BIT, 238 | SPINAND_ECCINFO(mx35lf1ge4ab_ecc_get_status)), 239 | SPINAND_INFO("MX35UF1GE4AD", 240 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x96), 241 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 242 | NAND_ECCREQ(8, 512), 243 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 244 | &write_cache_variants, 245 | &update_cache_variants), 246 | SPINAND_HAS_QE_BIT, 247 | SPINAND_ECCINFO(mx35lf1ge4ab_ecc_get_status)), 248 | SPINAND_INFO("MX35UF1GE4AC", 249 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92), 250 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 251 | NAND_ECCREQ(4, 512), 252 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 253 | &write_cache_variants, 254 | &update_cache_variants), 255 | SPINAND_HAS_QE_BIT, 256 | SPINAND_ECCINFO(mx35lf1ge4ab_ecc_get_status)), 257 | SPINAND_INFO("MX31LF2GE4BC", 258 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x2e), 259 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), 260 | NAND_ECCREQ(8, 512), 261 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 262 | &write_cache_variants, 263 | &update_cache_variants), 264 | SPINAND_HAS_QE_BIT, 265 | SPINAND_ECCINFO(mx35lf1ge4ab_ecc_get_status)), 266 | SPINAND_INFO("MX3UF2GE4BC", 267 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae), 268 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), 269 | NAND_ECCREQ(8, 512), 270 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 271 | &write_cache_variants, 272 | &update_cache_variants), 273 | SPINAND_HAS_QE_BIT, 274 | SPINAND_ECCINFO(mx35lf1ge4ab_ecc_get_status)), 275 | }; 276 | 277 | static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = { 278 | }; 279 | 280 | const struct spinand_manufacturer macronix_spinand_manufacturer = { 281 | .id = SPINAND_MFR_MACRONIX, 282 | .name = "Macronix", 283 | .chips = macronix_spinand_table, 284 | .nchips = ARRAY_SIZE(macronix_spinand_table), 285 | .ops = ¯onix_spinand_manuf_ops, 286 | }; 287 | -------------------------------------------------------------------------------- /include/spi-mem.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0+ */ 2 | /* 3 | * This is based on include/linux/spi/spi-mem.h in Linux 4 | * Original file header: 5 | * 6 | * Copyright (C) 2018 Exceet Electronics GmbH 7 | * Copyright (C) 2018 Bootlin 8 | * 9 | * Author: 10 | * Peter Pan 11 | * Boris Brezillon 12 | */ 13 | 14 | #ifndef __LINUX_SPI_MEM_H 15 | #define __LINUX_SPI_MEM_H 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | #ifdef _WIN32 22 | typedef intptr_t ssize_t; 23 | #else 24 | typedef long ssize_t; 25 | #endif 26 | struct spi_controller_mem_ops; 27 | 28 | #define SPI_MEM_OP_CMD(__opcode, __buswidth) \ 29 | { \ 30 | .buswidth = __buswidth, \ 31 | .opcode = __opcode, \ 32 | } 33 | 34 | #define SPI_MEM_OP_ADDR(__nbytes, __val, __buswidth) \ 35 | { \ 36 | .nbytes = __nbytes, \ 37 | .val = __val, \ 38 | .buswidth = __buswidth, \ 39 | } 40 | 41 | #define SPI_MEM_OP_NO_ADDR { } 42 | 43 | #define SPI_MEM_OP_DUMMY(__nbytes, __buswidth) \ 44 | { \ 45 | .nbytes = __nbytes, \ 46 | .buswidth = __buswidth, \ 47 | } 48 | 49 | #define SPI_MEM_OP_NO_DUMMY { } 50 | 51 | #define SPI_MEM_OP_DATA_IN(__nbytes, __buf, __buswidth) \ 52 | { \ 53 | .dir = SPI_MEM_DATA_IN, \ 54 | .nbytes = __nbytes, \ 55 | .buf.in = __buf, \ 56 | .buswidth = __buswidth, \ 57 | } 58 | 59 | #define SPI_MEM_OP_DATA_OUT(__nbytes, __buf, __buswidth) \ 60 | { \ 61 | .dir = SPI_MEM_DATA_OUT, \ 62 | .nbytes = __nbytes, \ 63 | .buf.out = __buf, \ 64 | .buswidth = __buswidth, \ 65 | } 66 | 67 | #define SPI_MEM_OP_NO_DATA { } 68 | 69 | /** 70 | * enum spi_mem_data_dir - describes the direction of a SPI memory data 71 | * transfer from the controller perspective 72 | * @SPI_MEM_NO_DATA: no data transferred 73 | * @SPI_MEM_DATA_IN: data coming from the SPI memory 74 | * @SPI_MEM_DATA_OUT: data sent to the SPI memory 75 | */ 76 | enum spi_mem_data_dir { 77 | SPI_MEM_NO_DATA, 78 | SPI_MEM_DATA_IN, 79 | SPI_MEM_DATA_OUT, 80 | }; 81 | 82 | /** 83 | * struct spi_mem_op - describes a SPI memory operation 84 | * @cmd.buswidth: number of IO lines used to transmit the command 85 | * @cmd.opcode: operation opcode 86 | * @addr.nbytes: number of address bytes to send. Can be zero if the operation 87 | * does not need to send an address 88 | * @addr.buswidth: number of IO lines used to transmit the address cycles 89 | * @addr.val: address value. This value is always sent MSB first on the bus. 90 | * Note that only @addr.nbytes are taken into account in this 91 | * address value, so users should make sure the value fits in the 92 | * assigned number of bytes. 93 | * @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can 94 | * be zero if the operation does not require dummy bytes 95 | * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes 96 | * @data.buswidth: number of IO lanes used to send/receive the data 97 | * @data.dir: direction of the transfer 98 | * @data.nbytes: number of data bytes to send/receive. Can be zero if the 99 | * operation does not involve transferring data 100 | * @data.buf.in: input buffer (must be DMA-able) 101 | * @data.buf.out: output buffer (must be DMA-able) 102 | */ 103 | struct spi_mem_op { 104 | struct { 105 | u8 buswidth; 106 | u8 opcode; 107 | } cmd; 108 | 109 | struct { 110 | u8 nbytes; 111 | u8 buswidth; 112 | u64 val; 113 | } addr; 114 | 115 | struct { 116 | u8 nbytes; 117 | u8 buswidth; 118 | } dummy; 119 | 120 | struct { 121 | u8 buswidth; 122 | enum spi_mem_data_dir dir; 123 | unsigned int nbytes; 124 | union { 125 | void *in; 126 | const void *out; 127 | } buf; 128 | } data; 129 | }; 130 | 131 | #define SPI_MEM_OP(__cmd, __addr, __dummy, __data) \ 132 | { \ 133 | .cmd = __cmd, \ 134 | .addr = __addr, \ 135 | .dummy = __dummy, \ 136 | .data = __data, \ 137 | } 138 | 139 | /** 140 | * struct spi_mem_dirmap_info - Direct mapping information 141 | * @op_tmpl: operation template that should be used by the direct mapping when 142 | * the memory device is accessed 143 | * @offset: absolute offset this direct mapping is pointing to 144 | * @length: length in byte of this direct mapping 145 | * 146 | * These information are used by the controller specific implementation to know 147 | * the portion of memory that is directly mapped and the spi_mem_op that should 148 | * be used to access the device. 149 | * A direct mapping is only valid for one direction (read or write) and this 150 | * direction is directly encoded in the ->op_tmpl.data.dir field. 151 | */ 152 | struct spi_mem_dirmap_info { 153 | struct spi_mem_op op_tmpl; 154 | u64 offset; 155 | u64 length; 156 | }; 157 | 158 | /** 159 | * struct spi_mem_dirmap_desc - Direct mapping descriptor 160 | * @mem: the SPI memory device this direct mapping is attached to 161 | * @info: information passed at direct mapping creation time 162 | * @nodirmap: set to 1 if the SPI controller does not implement 163 | * ->mem_ops->dirmap_create() or when this function returned an 164 | * error. If @nodirmap is true, all spi_mem_dirmap_{read,write}() 165 | * calls will use spi_mem_exec_op() to access the memory. This is a 166 | * degraded mode that allows spi_mem drivers to use the same code 167 | * no matter whether the controller supports direct mapping or not 168 | * @priv: field pointing to controller specific data 169 | * 170 | * Common part of a direct mapping descriptor. This object is created by 171 | * spi_mem_dirmap_create() and controller implementation of ->create_dirmap() 172 | * can create/attach direct mapping resources to the descriptor in the ->priv 173 | * field. 174 | */ 175 | struct spi_mem_dirmap_desc { 176 | struct spi_mem *mem; 177 | struct spi_mem_dirmap_info info; 178 | unsigned int nodirmap; 179 | void *priv; 180 | }; 181 | 182 | /** 183 | * struct spi_mem - describes a SPI memory device 184 | * @spi: the underlying SPI device 185 | * @drvpriv: spi_mem_driver private data 186 | * @name: name of the SPI memory device 187 | * 188 | * Extra information that describe the SPI memory device and may be needed by 189 | * the controller to properly handle this device should be placed here. 190 | * 191 | * One example would be the device size since some controller expose their SPI 192 | * mem devices through a io-mapped region. 193 | */ 194 | struct spi_mem { 195 | const struct spi_controller_mem_ops *ops; 196 | u32 spi_mode; 197 | void *drvpriv; 198 | const char *name; 199 | }; 200 | 201 | /** 202 | * struct spi_mem_set_drvdata() - attach driver private data to a SPI mem 203 | * device 204 | * @mem: memory device 205 | * @data: data to attach to the memory device 206 | */ 207 | static inline void spi_mem_set_drvdata(struct spi_mem *mem, void *data) 208 | { 209 | mem->drvpriv = data; 210 | } 211 | 212 | /** 213 | * struct spi_mem_get_drvdata() - get driver private data attached to a SPI mem 214 | * device 215 | * @mem: memory device 216 | * 217 | * Return: the data attached to the mem device. 218 | */ 219 | static inline void *spi_mem_get_drvdata(struct spi_mem *mem) 220 | { 221 | return mem->drvpriv; 222 | } 223 | 224 | /** 225 | * spi_mem_get_name() - Return the SPI mem device name to be used by the 226 | * upper layer if necessary 227 | * @mem: the SPI memory 228 | * 229 | * This function allows SPI mem users to retrieve the SPI mem device name. 230 | * It is useful if the upper layer needs to expose a custom name for 231 | * compatibility reasons. 232 | * 233 | * Return: a string containing the name of the memory device to be used 234 | * by the SPI mem user 235 | */ 236 | static inline const char *spi_mem_get_name(struct spi_mem *mem) 237 | { 238 | return mem->name ?: "spi-mem"; 239 | } 240 | 241 | /** 242 | * struct spi_controller_mem_ops - SPI memory operations 243 | * @adjust_op_size: shrink the data xfer of an operation to match controller's 244 | * limitations (can be alignment of max RX/TX size 245 | * limitations) 246 | * @supports_op: check if an operation is supported by the controller 247 | * @exec_op: execute a SPI memory operation 248 | * @get_name: get a custom name for the SPI mem device from the controller. 249 | * This might be needed if the controller driver has been ported 250 | * to use the SPI mem layer and a custom name is used to keep 251 | * mtdparts compatible. 252 | * Note that if the implementation of this function allocates memory 253 | * dynamically, then it should do so with devm_xxx(), as we don't 254 | * have a ->free_name() function. 255 | * @dirmap_create: create a direct mapping descriptor that can later be used to 256 | * access the memory device. This method is optional 257 | * @dirmap_destroy: destroy a memory descriptor previous created by 258 | * ->dirmap_create() 259 | * @dirmap_read: read data from the memory device using the direct mapping 260 | * created by ->dirmap_create(). The function can return less 261 | * data than requested (for example when the request is crossing 262 | * the currently mapped area), and the caller of 263 | * spi_mem_dirmap_read() is responsible for calling it again in 264 | * this case. 265 | * @dirmap_write: write data to the memory device using the direct mapping 266 | * created by ->dirmap_create(). The function can return less 267 | * data than requested (for example when the request is crossing 268 | * the currently mapped area), and the caller of 269 | * spi_mem_dirmap_write() is responsible for calling it again in 270 | * this case. 271 | * 272 | * This interface should be implemented by SPI controllers providing an 273 | * high-level interface to execute SPI memory operation, which is usually the 274 | * case for QSPI controllers. 275 | * 276 | * Note on ->dirmap_{read,write}(): drivers should avoid accessing the direct 277 | * mapping from the CPU because doing that can stall the CPU waiting for the 278 | * SPI mem transaction to finish, and this will make real-time maintainers 279 | * unhappy and might make your system less reactive. Instead, drivers should 280 | * use DMA to access this direct mapping. 281 | */ 282 | struct spi_controller_mem_ops { 283 | int (*adjust_op_size)(struct spi_mem *mem, struct spi_mem_op *op); 284 | bool (*supports_op)(struct spi_mem *mem, 285 | const struct spi_mem_op *op); 286 | int (*exec_op)(struct spi_mem *mem, 287 | const struct spi_mem_op *op); 288 | const char *(*get_name)(struct spi_mem *mem); 289 | int (*dirmap_create)(struct spi_mem_dirmap_desc *desc); 290 | void (*dirmap_destroy)(struct spi_mem_dirmap_desc *desc); 291 | ssize_t (*dirmap_read)(struct spi_mem_dirmap_desc *desc, 292 | u64 offs, size_t len, void *buf); 293 | ssize_t (*dirmap_write)(struct spi_mem_dirmap_desc *desc, 294 | u64 offs, size_t len, const void *buf); 295 | }; 296 | 297 | bool spi_mem_default_supports_op(struct spi_mem *mem, 298 | const struct spi_mem_op *op); 299 | 300 | int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op); 301 | 302 | bool spi_mem_supports_op(struct spi_mem *mem, 303 | const struct spi_mem_op *op); 304 | 305 | int spi_mem_exec_op(struct spi_mem *mem, 306 | const struct spi_mem_op *op); 307 | 308 | 309 | 310 | struct spi_mem_dirmap_desc * 311 | spi_mem_dirmap_create(struct spi_mem *mem, 312 | const struct spi_mem_dirmap_info *info); 313 | void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc); 314 | ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, 315 | u64 offs, size_t len, void *buf); 316 | ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, 317 | u64 offs, size_t len, const void *buf); 318 | #endif 319 | -------------------------------------------------------------------------------- /spi-mem/spi-mem-serprog.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #ifdef _WIN32 10 | #include 11 | 12 | static HANDLE hComPort; 13 | static DCB dcbOriginal; 14 | 15 | static int serial_config(HANDLE hDevice, int speed) 16 | { 17 | DCB dcb; 18 | 19 | memset(&dcb, 0, sizeof(dcb)); 20 | 21 | dcb.DCBlength = sizeof(dcb); 22 | if (!GetCommState(hDevice, &dcb)) { 23 | fprintf(stderr, "serial: failed to get port config, error = %d\n", 24 | GetLastError()); 25 | return -1; 26 | } 27 | 28 | dcb.BaudRate = speed; 29 | dcb.ByteSize = 8; 30 | dcb.fParity = 0; 31 | dcb.StopBits = ONESTOPBIT; 32 | dcb.fInX = 0; 33 | dcb.fOutX = 0; 34 | 35 | if (!SetCommState(hDevice, &dcb)) { 36 | fprintf(stderr, "serial: failed to set port config, error = %d\n", 37 | GetLastError()); 38 | return -1; 39 | } 40 | 41 | return 0; 42 | } 43 | 44 | static int serial_init(const char *devpath) 45 | { 46 | DWORD dwErrors; 47 | char dev[16]; 48 | uint32_t com; 49 | char *end; 50 | int ret; 51 | 52 | if (!strncmp(devpath, "\\\\.\\", 4)) 53 | devpath += 4; 54 | 55 | if (strncasecmp(devpath, "COM", 3)) { 56 | fprintf(stderr, "serial: not a serial device path\n"); 57 | return -EINVAL; 58 | } 59 | 60 | com = strtoul(devpath + 3, &end, 10); 61 | if (!com || com > 255 || *end) { 62 | fprintf(stderr, "serial: not a valid serial device\n"); 63 | return -EINVAL; 64 | } 65 | 66 | /* Make sure to support COM port >= 10 */ 67 | snprintf(dev, sizeof(dev), "\\\\.\\COM%u", com); 68 | 69 | hComPort = CreateFile(dev, GENERIC_READ | GENERIC_WRITE, 0, NULL, 70 | OPEN_EXISTING, 0, NULL); 71 | if (hComPort == INVALID_HANDLE_VALUE) { 72 | fprintf(stderr, "serial: failed to open port, error = %u\n", 73 | GetLastError()); 74 | return -ENODEV; 75 | } 76 | 77 | memset(&dcbOriginal, 0, sizeof(dcbOriginal)); 78 | 79 | dcbOriginal.DCBlength = sizeof(dcbOriginal); 80 | if (!GetCommState(hComPort, &dcbOriginal)) { 81 | fprintf(stderr, "serial: failed to get port config, error = %u\n", 82 | GetLastError()); 83 | ret = -EIO; 84 | goto cleanup; 85 | } 86 | 87 | if (!SetupComm(hComPort, 1024, 1024)) { 88 | fprintf(stderr, "serial: failed to get port FIFO size, error = %u\n", 89 | GetLastError()); 90 | ret = -EIO; 91 | goto cleanup; 92 | } 93 | 94 | if (serial_config(hComPort, 4000000) != 0) { 95 | ret = -EIO; 96 | goto cleanup; 97 | } 98 | 99 | if (!ClearCommError(hComPort, &dwErrors, NULL)) { 100 | fprintf(stderr, "serial: failed to clear port error, error = %u\n", 101 | GetLastError()); 102 | ret = -EIO; 103 | goto cleanup; 104 | } 105 | 106 | if (!PurgeComm(hComPort, PURGE_RXABORT | PURGE_RXCLEAR | PURGE_TXABORT | 107 | PURGE_TXCLEAR)) { 108 | fprintf(stderr, "serial: failed to flush port, error = %u\n", 109 | GetLastError()); 110 | ret = -EIO; 111 | goto cleanup; 112 | } 113 | 114 | return 0; 115 | 116 | cleanup: 117 | CloseHandle(hComPort); 118 | 119 | return ret; 120 | } 121 | 122 | static int serial_cleanup(void) 123 | { 124 | if (!SetCommState(hComPort, &dcbOriginal)) { 125 | fprintf(stderr, "serial: failed to restore port config, error = %u\n", 126 | GetLastError()); 127 | } 128 | 129 | CloseHandle(hComPort); 130 | 131 | return 0; 132 | } 133 | 134 | static int serial_read(void *buf, size_t len) 135 | { 136 | DWORD dwBytesRead; 137 | 138 | if (!ReadFile(hComPort, buf, len, &dwBytesRead, NULL)) { 139 | fprintf(stderr, "serial: read failed, error = %u\n", 140 | GetLastError()); 141 | return -EIO; 142 | } 143 | 144 | return dwBytesRead; 145 | } 146 | 147 | static int serial_write(const void *buf, size_t len) 148 | { 149 | DWORD dwBytesWritten; 150 | 151 | if (!WriteFile(hComPort, buf, len, &dwBytesWritten, NULL)) { 152 | fprintf(stderr, "serial: write failed, error = %u\n", 153 | GetLastError()); 154 | return -EIO; 155 | } 156 | 157 | FlushFileBuffers(hComPort); 158 | 159 | return dwBytesWritten; 160 | } 161 | #else 162 | #include 163 | #include 164 | #include 165 | 166 | static int serial_fd; 167 | 168 | static int serial_config(int fd, int speed) 169 | { 170 | struct termios tty; 171 | if (tcgetattr(fd, &tty) != 0) { 172 | perror("serial: tcgetattr"); 173 | return -1; 174 | } 175 | 176 | cfsetospeed(&tty, speed); 177 | cfsetispeed(&tty, speed); 178 | 179 | tty.c_cflag &= ~(PARENB | CSTOPB | CSIZE); 180 | tty.c_cflag |= (CS8 | CLOCAL | CREAD); 181 | tty.c_lflag &= ~(ICANON | ECHO | ECHOE | ISIG | IEXTEN); 182 | tty.c_iflag &= ~(IXON | IXOFF | IXANY | ICRNL | IGNCR | INLCR); 183 | tty.c_oflag &= ~OPOST; 184 | 185 | if (tcsetattr(fd, TCSANOW, &tty) != 0) { 186 | perror("serial: tcsetattr"); 187 | return -1; 188 | } 189 | return 0; 190 | } 191 | 192 | static int serial_init(const char *devpath) 193 | { 194 | int ret; 195 | 196 | // Use O_NDELAY to ignore DCD state 197 | serial_fd = open(devpath, O_RDWR | O_NOCTTY | O_NDELAY); 198 | if (serial_fd < 0) { 199 | perror("serial: open"); 200 | return -EINVAL; 201 | } 202 | 203 | /* Ensure that we use blocking I/O */ 204 | ret = fcntl(serial_fd, F_GETFL); 205 | if (ret == -1) { 206 | perror("serial: fcntl_getfl"); 207 | goto ERR; 208 | } 209 | 210 | ret = fcntl(serial_fd, F_SETFL, ret & ~O_NONBLOCK); 211 | if (ret != 0) { 212 | perror("serial: fcntl_setfl"); 213 | goto ERR; 214 | } 215 | 216 | if (serial_config(serial_fd, B4000000) != 0) { 217 | ret = -EINVAL; 218 | goto ERR; 219 | } 220 | ret = tcflush(serial_fd, TCIOFLUSH); 221 | if (ret != 0) { 222 | perror("serial: flush"); 223 | goto ERR; 224 | } 225 | return 0; 226 | ERR: 227 | close(serial_fd); 228 | return ret; 229 | } 230 | 231 | static int serial_cleanup(void) 232 | { 233 | return close(serial_fd); 234 | } 235 | 236 | static int serial_read(void *buf, size_t len) 237 | { 238 | return read(serial_fd, buf, len); 239 | } 240 | 241 | static int serial_write(const void *buf, size_t len) 242 | { 243 | return write(serial_fd, buf, len); 244 | } 245 | #endif 246 | 247 | static int serprog_sync() 248 | { 249 | char c; 250 | int ret; 251 | c = S_CMD_SYNCNOP; 252 | serial_write(&c, 1); 253 | ret = serial_read(&c, 1); 254 | if (ret != 1) { 255 | perror("serprog: sync r1"); 256 | return -EINVAL; 257 | } 258 | if (c != S_NAK) { 259 | fprintf(stderr, "serprog: sync NAK failed.\n"); 260 | return -EINVAL; 261 | } 262 | ret = serial_read(&c, 1); 263 | if (ret != 1) { 264 | perror("serprog: sync r2"); 265 | return -EINVAL; 266 | } 267 | if (c != S_ACK) { 268 | fprintf(stderr, "serprog: sync ACK failed.\n"); 269 | return -EINVAL; 270 | } 271 | return 0; 272 | } 273 | 274 | static int serprog_check_ack() 275 | { 276 | unsigned char c; 277 | if (serial_read(&c, 1) <= 0) { 278 | perror("serprog: exec_op: read status"); 279 | return errno; 280 | } 281 | if (c == S_NAK) { 282 | fprintf(stderr, "serprog: exec_op: NAK\n"); 283 | return -EINVAL; 284 | } 285 | if (c != S_ACK) { 286 | fprintf(stderr, 287 | "serprog: exec_op: invalid response 0x%02X from device.\n", 288 | c); 289 | return -EINVAL; 290 | } 291 | return 0; 292 | } 293 | 294 | static int serprog_exec_op(u8 command, u32 parmlen, u8 *params, 295 | u32 retlen, void *retparms) 296 | { 297 | if (serial_write(&command, 1) < 0) { 298 | perror("serprog: exec_op: write cmd"); 299 | return errno; 300 | } 301 | if (serial_write(params, parmlen) < 0) { 302 | perror("serprog: exec_op: write param"); 303 | return errno; 304 | } 305 | if (serprog_check_ack() < 0) 306 | return -EINVAL; 307 | if (retlen) { 308 | if (serial_read(retparms, retlen) != retlen) { 309 | perror("serprog: exec_op: read return buffer"); 310 | return 1; 311 | } 312 | } 313 | return 0; 314 | } 315 | 316 | static int serprog_get_cmdmap(u32 *cmdmap) 317 | { 318 | u8 buf[32]; 319 | 320 | if (serprog_exec_op(S_CMD_Q_CMDMAP, 0, NULL, 32, buf) < 0) 321 | return -EINVAL; 322 | 323 | *cmdmap = buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24); 324 | return 0; 325 | } 326 | 327 | static int serprog_set_spi_speed(u32 speed) 328 | { 329 | u8 buf[4]; 330 | u32 cmdmap = 0; 331 | int ret; 332 | 333 | ret = serprog_get_cmdmap(&cmdmap); 334 | if (ret < 0) 335 | return ret; 336 | 337 | if (!(cmdmap & (1 << S_CMD_S_SPI_FREQ))) { 338 | printf("serprog: programmer do not support set SPI clock freq.\n"); 339 | return 0; 340 | } 341 | 342 | buf[0] = speed & 0xff; 343 | buf[1] = (speed >> (1 * 8)) & 0xff; 344 | buf[2] = (speed >> (2 * 8)) & 0xff; 345 | buf[3] = (speed >> (3 * 8)) & 0xff; 346 | 347 | if (serprog_exec_op(S_CMD_S_SPI_FREQ, 4, buf, 4, buf) < 0) 348 | return -EINVAL; 349 | 350 | speed = buf[0]; 351 | speed |= buf[1] << (1 * 8); 352 | speed |= buf[2] << (2 * 8); 353 | speed |= buf[3] << (3 * 8); 354 | printf("serprog: SPI clock frequency is set to %u Hz.\n", speed); 355 | return 0; 356 | } 357 | 358 | static int serprog_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 359 | { 360 | size_t left_data = 0xffffff - 1 - op->addr.nbytes - op->dummy.nbytes; 361 | if (op->data.nbytes > left_data) 362 | op->data.nbytes = left_data; 363 | return 0; 364 | } 365 | 366 | static int serprog_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 367 | { 368 | size_t i; 369 | u32 wrlen, rdlen, tmp; 370 | u8 buf[10]; 371 | ssize_t rwdone, rwpending, rwsize; 372 | 373 | wrlen = 1 + op->addr.nbytes + op->dummy.nbytes; 374 | 375 | if (op->data.dir == SPI_MEM_DATA_OUT) 376 | wrlen += op->data.nbytes; 377 | if (op->data.dir == SPI_MEM_DATA_IN) 378 | rdlen = op->data.nbytes; 379 | else 380 | rdlen = 0; 381 | 382 | if (wrlen & 0xff000000) { 383 | fprintf(stderr, "serprog: too much data to send.\n"); 384 | return -E2BIG; 385 | } 386 | 387 | if (rdlen & 0xff000000) { 388 | fprintf(stderr, "serprog: too much data to receive.\n"); 389 | return -E2BIG; 390 | } 391 | 392 | buf[0] = S_CMD_O_SPIOP; 393 | buf[1] = wrlen & 0xff; 394 | buf[2] = (wrlen >> 8) & 0xff; 395 | buf[3] = (wrlen >> 16) & 0xff; 396 | buf[4] = rdlen & 0xff; 397 | buf[5] = (rdlen >> 8) & 0xff; 398 | buf[6] = (rdlen >> 16) & 0xff; 399 | 400 | if (serial_write(buf, 7) != 7) { 401 | perror("serprog: spimem_exec_op: write serprog cmd"); 402 | return errno; 403 | } 404 | 405 | buf[0] = op->cmd.opcode; 406 | if (serial_write(buf, 1) != 1) { 407 | perror("serprog: spimem_exec_op: write opcode"); 408 | return errno; 409 | } 410 | 411 | if (op->addr.nbytes > 4) 412 | return -EINVAL; 413 | if (op->addr.nbytes) { 414 | tmp = op->addr.val; 415 | for (i = op->addr.nbytes; i; i--) { 416 | buf[i - 1] = tmp & 0xff; 417 | tmp >>= 8; 418 | } 419 | if (serial_write(buf, op->addr.nbytes) != op->addr.nbytes) { 420 | perror("serprog: spimem_exec_op: write addr"); 421 | return errno; 422 | } 423 | } 424 | 425 | if (op->dummy.nbytes) { 426 | buf[0] = 0; 427 | for (i = 0; i < op->dummy.nbytes; i++) { 428 | if (serial_write(buf, 1) != 1) { 429 | perror("serprog: spimem_exec_op: write dummy"); 430 | return errno; 431 | } 432 | } 433 | } 434 | 435 | if (op->data.dir == SPI_MEM_DATA_OUT && op->data.nbytes) { 436 | rwpending = op->data.nbytes; 437 | rwdone = 0; 438 | while (rwpending) { 439 | rwsize = serial_write(op->data.buf.out + rwdone, rwpending); 440 | if (rwsize < 0) { 441 | perror("serprog: spimem_exec_op: write data"); 442 | return errno; 443 | } 444 | rwpending -= rwsize; 445 | rwdone += rwsize; 446 | } 447 | } 448 | 449 | if (serprog_check_ack() < 0) 450 | return -EINVAL; 451 | if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes) { 452 | rwpending = op->data.nbytes; 453 | rwdone = 0; 454 | while (rwpending) { 455 | rwsize = serial_read(op->data.buf.in + rwdone, rwpending); 456 | if (rwsize < 0) { 457 | perror("serprog: spimem_exec_op: read data"); 458 | return errno; 459 | } 460 | rwpending -= rwsize; 461 | rwdone += rwsize; 462 | } 463 | } 464 | return 0; 465 | } 466 | 467 | static const struct spi_controller_mem_ops _serprog_mem_ops = { 468 | .adjust_op_size = serprog_adjust_op_size, 469 | .exec_op = serprog_mem_exec_op, 470 | }; 471 | 472 | static struct spi_mem _serprog_mem = { 473 | .ops = &_serprog_mem_ops, 474 | .spi_mode = 0, 475 | .name = "serprog", 476 | .drvpriv = NULL, 477 | }; 478 | 479 | static int serprog_init(const char *devpath, u32 speed) 480 | { 481 | int ret; 482 | if (!devpath) 483 | devpath = "/dev/ttyACM0"; 484 | 485 | ret = serial_init(devpath); 486 | if (ret < 0) 487 | return ret; 488 | ret = serprog_sync(); 489 | if (ret < 0) 490 | goto ERR; 491 | ret = serprog_set_spi_speed(speed); 492 | if (ret < 0) 493 | goto ERR; 494 | return 0; 495 | ERR: 496 | serial_cleanup(); 497 | return ret; 498 | } 499 | 500 | struct spi_mem *serprog_probe(const char *devpath) 501 | { 502 | return serprog_init(devpath, 24000000) ? NULL : &_serprog_mem; 503 | } 504 | 505 | void serprog_remove(struct spi_mem *mem) 506 | { 507 | serial_cleanup(); 508 | } 509 | -------------------------------------------------------------------------------- /spi-nand/gigadevice.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* 3 | * Author: 4 | * Chuanhong Guo 5 | */ 6 | 7 | #include 8 | 9 | #define SPINAND_MFR_GIGADEVICE 0xC8 10 | 11 | #define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4) 12 | #define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4) 13 | 14 | #define GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS (1 << 4) 15 | #define GD5FXGQ5XE_STATUS_ECC_4_BITFLIPS (3 << 4) 16 | 17 | #define GD5FXGQXXEXXG_REG_STATUS2 0xf0 18 | 19 | #define GD5FXGQ4UXFXXG_STATUS_ECC_MASK (7 << 4) 20 | #define GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS (0 << 4) 21 | #define GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS (1 << 4) 22 | #define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4) 23 | 24 | static SPINAND_OP_VARIANTS(read_cache_variants, 25 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0), 26 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 27 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 28 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 29 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 30 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 31 | 32 | static SPINAND_OP_VARIANTS(read_cache_variants_f, 33 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0), 34 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0), 35 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 36 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0), 37 | SPINAND_PAGE_READ_FROM_CACHE_OP_3A(true, 0, 1, NULL, 0), 38 | SPINAND_PAGE_READ_FROM_CACHE_OP_3A(false, 0, 0, NULL, 0)); 39 | 40 | static SPINAND_OP_VARIANTS(read_cache_variants_1gq5, 41 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), 42 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 43 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 44 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 45 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 46 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 47 | 48 | static SPINAND_OP_VARIANTS(read_cache_variants_2gq5, 49 | SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0), 50 | SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 51 | SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0), 52 | SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 53 | SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 54 | SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 55 | 56 | static SPINAND_OP_VARIANTS(write_cache_variants, 57 | SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 58 | SPINAND_PROG_LOAD(true, 0, NULL, 0)); 59 | 60 | static SPINAND_OP_VARIANTS(update_cache_variants, 61 | SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 62 | SPINAND_PROG_LOAD(false, 0, NULL, 0)); 63 | 64 | static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand, 65 | u8 status) 66 | { 67 | switch (status & STATUS_ECC_MASK) { 68 | case STATUS_ECC_NO_BITFLIPS: 69 | return 0; 70 | 71 | case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS: 72 | /* 1-7 bits are flipped. return the maximum. */ 73 | return 7; 74 | 75 | case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS: 76 | return 8; 77 | 78 | case STATUS_ECC_UNCOR_ERROR: 79 | return -EBADMSG; 80 | 81 | default: 82 | break; 83 | } 84 | 85 | return -EINVAL; 86 | } 87 | 88 | static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand, 89 | u8 status) 90 | { 91 | u8 status2; 92 | struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2, 93 | &status2); 94 | int ret; 95 | 96 | switch (status & STATUS_ECC_MASK) { 97 | case STATUS_ECC_NO_BITFLIPS: 98 | return 0; 99 | 100 | case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS: 101 | /* 102 | * Read status2 register to determine a more fine grained 103 | * bit error status 104 | */ 105 | ret = spi_mem_exec_op(spinand->spimem, &op); 106 | if (ret) 107 | return ret; 108 | 109 | /* 110 | * 4 ... 7 bits are flipped (1..4 can't be detected, so 111 | * report the maximum of 4 in this case 112 | */ 113 | /* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */ 114 | return ((status & STATUS_ECC_MASK) >> 2) | 115 | ((status2 & STATUS_ECC_MASK) >> 4); 116 | 117 | case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS: 118 | return 8; 119 | 120 | case STATUS_ECC_UNCOR_ERROR: 121 | return -EBADMSG; 122 | 123 | default: 124 | break; 125 | } 126 | 127 | return -EINVAL; 128 | } 129 | 130 | static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand, 131 | u8 status) 132 | { 133 | u8 status2; 134 | struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2, 135 | &status2); 136 | int ret; 137 | 138 | switch (status & STATUS_ECC_MASK) { 139 | case STATUS_ECC_NO_BITFLIPS: 140 | return 0; 141 | 142 | case GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS: 143 | /* 144 | * Read status2 register to determine a more fine grained 145 | * bit error status 146 | */ 147 | ret = spi_mem_exec_op(spinand->spimem, &op); 148 | if (ret) 149 | return ret; 150 | 151 | /* 152 | * 1 ... 4 bits are flipped (and corrected) 153 | */ 154 | /* bits sorted this way (1...0): ECCSE1, ECCSE0 */ 155 | return ((status2 & STATUS_ECC_MASK) >> 4) + 1; 156 | 157 | case STATUS_ECC_UNCOR_ERROR: 158 | return -EBADMSG; 159 | 160 | default: 161 | break; 162 | } 163 | 164 | return -EINVAL; 165 | } 166 | 167 | static int gd5fxgq4ufxxg_ecc_get_status(struct spinand_device *spinand, 168 | u8 status) 169 | { 170 | switch (status & GD5FXGQ4UXFXXG_STATUS_ECC_MASK) { 171 | case GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS: 172 | return 0; 173 | 174 | case GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS: 175 | return 3; 176 | 177 | case GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR: 178 | return -EBADMSG; 179 | 180 | default: /* (2 << 4) through (6 << 4) are 4-8 corrected errors */ 181 | return ((status & GD5FXGQ4UXFXXG_STATUS_ECC_MASK) >> 4) + 2; 182 | } 183 | 184 | return -EINVAL; 185 | } 186 | 187 | static const struct spinand_info gigadevice_spinand_table[] = { 188 | SPINAND_INFO("GD5F1GQ4xA", 189 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf1), 190 | NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), 191 | NAND_ECCREQ(8, 512), 192 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 193 | &write_cache_variants, 194 | &update_cache_variants), 195 | SPINAND_HAS_QE_BIT, 196 | SPINAND_ECCINFO(gd5fxgq4xa_ecc_get_status)), 197 | SPINAND_INFO("GD5F2GQ4xA", 198 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf2), 199 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), 200 | NAND_ECCREQ(8, 512), 201 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 202 | &write_cache_variants, 203 | &update_cache_variants), 204 | SPINAND_HAS_QE_BIT, 205 | SPINAND_ECCINFO(gd5fxgq4xa_ecc_get_status)), 206 | SPINAND_INFO("GD5F4GQ4xA", 207 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf4), 208 | NAND_MEMORG(1, 2048, 64, 64, 4096, 80, 1, 1, 1), 209 | NAND_ECCREQ(8, 512), 210 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 211 | &write_cache_variants, 212 | &update_cache_variants), 213 | SPINAND_HAS_QE_BIT, 214 | SPINAND_ECCINFO(gd5fxgq4xa_ecc_get_status)), 215 | SPINAND_INFO("GD5F4GQ4RC", 216 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xa4, 0x68), 217 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), 218 | NAND_ECCREQ(8, 512), 219 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f, 220 | &write_cache_variants, 221 | &update_cache_variants), 222 | SPINAND_HAS_QE_BIT, 223 | SPINAND_ECCINFO(gd5fxgq4ufxxg_ecc_get_status)), 224 | SPINAND_INFO("GD5F4GQ4UC", 225 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xb4, 0x68), 226 | NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), 227 | NAND_ECCREQ(8, 512), 228 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f, 229 | &write_cache_variants, 230 | &update_cache_variants), 231 | SPINAND_HAS_QE_BIT, 232 | SPINAND_ECCINFO(gd5fxgq4ufxxg_ecc_get_status)), 233 | SPINAND_INFO("GD5F1GQ4UExxG", 234 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xd1), 235 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 236 | NAND_ECCREQ(8, 512), 237 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 238 | &write_cache_variants, 239 | &update_cache_variants), 240 | SPINAND_HAS_QE_BIT, 241 | SPINAND_ECCINFO(gd5fxgq4uexxg_ecc_get_status)), 242 | SPINAND_INFO("GD5F1GQ4RExxG", 243 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xc1), 244 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 245 | NAND_ECCREQ(8, 512), 246 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 247 | &write_cache_variants, 248 | &update_cache_variants), 249 | SPINAND_HAS_QE_BIT, 250 | SPINAND_ECCINFO(gd5fxgq4uexxg_ecc_get_status)), 251 | SPINAND_INFO("GD5F2GQ4UExxG", 252 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xd2), 253 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 254 | NAND_ECCREQ(8, 512), 255 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 256 | &write_cache_variants, 257 | &update_cache_variants), 258 | SPINAND_HAS_QE_BIT, 259 | SPINAND_ECCINFO(gd5fxgq4uexxg_ecc_get_status)), 260 | SPINAND_INFO("GD5F2GQ4RExxG", 261 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xc2), 262 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 263 | NAND_ECCREQ(8, 512), 264 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 265 | &write_cache_variants, 266 | &update_cache_variants), 267 | SPINAND_HAS_QE_BIT, 268 | SPINAND_ECCINFO(gd5fxgq4uexxg_ecc_get_status)), 269 | SPINAND_INFO("GD5F1GQ4UFxxG", 270 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xb1, 0x48), 271 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 272 | NAND_ECCREQ(8, 512), 273 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f, 274 | &write_cache_variants, 275 | &update_cache_variants), 276 | SPINAND_HAS_QE_BIT, 277 | SPINAND_ECCINFO(gd5fxgq4ufxxg_ecc_get_status)), 278 | SPINAND_INFO("GD5F1GQ5UExxG", 279 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x51), 280 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 281 | NAND_ECCREQ(4, 512), 282 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, 283 | &write_cache_variants, 284 | &update_cache_variants), 285 | SPINAND_HAS_QE_BIT, 286 | SPINAND_ECCINFO(gd5fxgq5xexxg_ecc_get_status)), 287 | SPINAND_INFO("GD5F1GQ5RExxG", 288 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x41), 289 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 290 | NAND_ECCREQ(4, 512), 291 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, 292 | &write_cache_variants, 293 | &update_cache_variants), 294 | SPINAND_HAS_QE_BIT, 295 | SPINAND_ECCINFO(gd5fxgq5xexxg_ecc_get_status)), 296 | SPINAND_INFO("GD5F2GQ5UExxG", 297 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x52), 298 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 299 | NAND_ECCREQ(4, 512), 300 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5, 301 | &write_cache_variants, 302 | &update_cache_variants), 303 | SPINAND_HAS_QE_BIT, 304 | SPINAND_ECCINFO(gd5fxgq5xexxg_ecc_get_status)), 305 | SPINAND_INFO("GD5F2GQ5RExxG", 306 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x42), 307 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 308 | NAND_ECCREQ(4, 512), 309 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5, 310 | &write_cache_variants, 311 | &update_cache_variants), 312 | SPINAND_HAS_QE_BIT, 313 | SPINAND_ECCINFO(gd5fxgq5xexxg_ecc_get_status)), 314 | SPINAND_INFO("GD5F4GQ6UExxG", 315 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x55), 316 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 2, 1), 317 | NAND_ECCREQ(4, 512), 318 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5, 319 | &write_cache_variants, 320 | &update_cache_variants), 321 | SPINAND_HAS_QE_BIT, 322 | SPINAND_ECCINFO(gd5fxgq5xexxg_ecc_get_status)), 323 | SPINAND_INFO("GD5F4GQ6RExxG", 324 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x45), 325 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 2, 1), 326 | NAND_ECCREQ(4, 512), 327 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5, 328 | &write_cache_variants, 329 | &update_cache_variants), 330 | SPINAND_HAS_QE_BIT, 331 | SPINAND_ECCINFO(gd5fxgq5xexxg_ecc_get_status)), 332 | SPINAND_INFO("GD5F1GM7UExxG", 333 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x91), 334 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 335 | NAND_ECCREQ(8, 512), 336 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, 337 | &write_cache_variants, 338 | &update_cache_variants), 339 | SPINAND_HAS_QE_BIT, 340 | SPINAND_ECCINFO(gd5fxgq4uexxg_ecc_get_status)), 341 | SPINAND_INFO("GD5F1GM7RExxG", 342 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x81), 343 | NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 344 | NAND_ECCREQ(8, 512), 345 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, 346 | &write_cache_variants, 347 | &update_cache_variants), 348 | SPINAND_HAS_QE_BIT, 349 | SPINAND_ECCINFO(gd5fxgq4uexxg_ecc_get_status)), 350 | SPINAND_INFO("GD5F2GM7UExxG", 351 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92), 352 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 353 | NAND_ECCREQ(8, 512), 354 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, 355 | &write_cache_variants, 356 | &update_cache_variants), 357 | SPINAND_HAS_QE_BIT, 358 | SPINAND_ECCINFO(gd5fxgq4uexxg_ecc_get_status)), 359 | SPINAND_INFO("GD5F2GM7RExxG", 360 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x82), 361 | NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 362 | NAND_ECCREQ(8, 512), 363 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, 364 | &write_cache_variants, 365 | &update_cache_variants), 366 | SPINAND_HAS_QE_BIT, 367 | SPINAND_ECCINFO(gd5fxgq4uexxg_ecc_get_status)), 368 | SPINAND_INFO("GD5F4GM8UExxG", 369 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x95), 370 | NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 1, 1), 371 | NAND_ECCREQ(8, 512), 372 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, 373 | &write_cache_variants, 374 | &update_cache_variants), 375 | SPINAND_HAS_QE_BIT, 376 | SPINAND_ECCINFO(gd5fxgq4uexxg_ecc_get_status)), 377 | SPINAND_INFO("GD5F4GM8RExxG", 378 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x85), 379 | NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 1, 1), 380 | NAND_ECCREQ(8, 512), 381 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, 382 | &write_cache_variants, 383 | &update_cache_variants), 384 | SPINAND_HAS_QE_BIT, 385 | SPINAND_ECCINFO(gd5fxgq4uexxg_ecc_get_status)), 386 | SPINAND_INFO("GD5F2GQ5xExxH", 387 | SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x22), 388 | NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), 389 | NAND_ECCREQ(4, 512), 390 | SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5, 391 | &write_cache_variants, 392 | &update_cache_variants), 393 | SPINAND_HAS_QE_BIT, 394 | SPINAND_ECCINFO(gd5fxgq4uexxg_ecc_get_status)), 395 | }; 396 | 397 | static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = { 398 | }; 399 | 400 | const struct spinand_manufacturer gigadevice_spinand_manufacturer = { 401 | .id = SPINAND_MFR_GIGADEVICE, 402 | .name = "GigaDevice", 403 | .chips = gigadevice_spinand_table, 404 | .nchips = ARRAY_SIZE(gigadevice_spinand_table), 405 | .ops = &gigadevice_spinand_manuf_ops, 406 | }; 407 | -------------------------------------------------------------------------------- /include/spinand.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | /* 3 | * Copyright (c) 2016-2017 Micron Technology, Inc. 4 | * 5 | * Authors: 6 | * Peter Pan 7 | */ 8 | #ifndef __LINUX_MTD_SPINAND_H 9 | #define __LINUX_MTD_SPINAND_H 10 | 11 | #include 12 | #include 13 | #include 14 | 15 | /** 16 | * Standard SPI NAND flash operations 17 | */ 18 | 19 | #define SPINAND_RESET_OP \ 20 | SPI_MEM_OP(SPI_MEM_OP_CMD(0xff, 1), \ 21 | SPI_MEM_OP_NO_ADDR, \ 22 | SPI_MEM_OP_NO_DUMMY, \ 23 | SPI_MEM_OP_NO_DATA) 24 | 25 | #define SPINAND_WR_EN_DIS_OP(enable) \ 26 | SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1), \ 27 | SPI_MEM_OP_NO_ADDR, \ 28 | SPI_MEM_OP_NO_DUMMY, \ 29 | SPI_MEM_OP_NO_DATA) 30 | 31 | #define SPINAND_READID_OP(naddr, ndummy, buf, len) \ 32 | SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \ 33 | SPI_MEM_OP_ADDR(naddr, 0, 1), \ 34 | SPI_MEM_OP_DUMMY(ndummy, 1), \ 35 | SPI_MEM_OP_DATA_IN(len, buf, 1)) 36 | 37 | #define SPINAND_SET_FEATURE_OP(reg, valptr) \ 38 | SPI_MEM_OP(SPI_MEM_OP_CMD(0x1f, 1), \ 39 | SPI_MEM_OP_ADDR(1, reg, 1), \ 40 | SPI_MEM_OP_NO_DUMMY, \ 41 | SPI_MEM_OP_DATA_OUT(1, valptr, 1)) 42 | 43 | #define SPINAND_GET_FEATURE_OP(reg, valptr) \ 44 | SPI_MEM_OP(SPI_MEM_OP_CMD(0x0f, 1), \ 45 | SPI_MEM_OP_ADDR(1, reg, 1), \ 46 | SPI_MEM_OP_NO_DUMMY, \ 47 | SPI_MEM_OP_DATA_IN(1, valptr, 1)) 48 | 49 | #define SPINAND_BLK_ERASE_OP(addr) \ 50 | SPI_MEM_OP(SPI_MEM_OP_CMD(0xd8, 1), \ 51 | SPI_MEM_OP_ADDR(3, addr, 1), \ 52 | SPI_MEM_OP_NO_DUMMY, \ 53 | SPI_MEM_OP_NO_DATA) 54 | 55 | #define SPINAND_PAGE_READ_OP(addr) \ 56 | SPI_MEM_OP(SPI_MEM_OP_CMD(0x13, 1), \ 57 | SPI_MEM_OP_ADDR(3, addr, 1), \ 58 | SPI_MEM_OP_NO_DUMMY, \ 59 | SPI_MEM_OP_NO_DATA) 60 | 61 | #define SPINAND_PAGE_READ_FROM_CACHE_OP(fast, addr, ndummy, buf, len) \ 62 | SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \ 63 | SPI_MEM_OP_ADDR(2, addr, 1), \ 64 | SPI_MEM_OP_DUMMY(ndummy, 1), \ 65 | SPI_MEM_OP_DATA_IN(len, buf, 1)) 66 | 67 | #define SPINAND_PAGE_READ_FROM_CACHE_OP_3A(fast, addr, ndummy, buf, len) \ 68 | SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \ 69 | SPI_MEM_OP_ADDR(3, addr, 1), \ 70 | SPI_MEM_OP_DUMMY(ndummy, 1), \ 71 | SPI_MEM_OP_DATA_IN(len, buf, 1)) 72 | 73 | #define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \ 74 | SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \ 75 | SPI_MEM_OP_ADDR(2, addr, 1), \ 76 | SPI_MEM_OP_DUMMY(ndummy, 1), \ 77 | SPI_MEM_OP_DATA_IN(len, buf, 2)) 78 | 79 | #define SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(addr, ndummy, buf, len) \ 80 | SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \ 81 | SPI_MEM_OP_ADDR(3, addr, 1), \ 82 | SPI_MEM_OP_DUMMY(ndummy, 1), \ 83 | SPI_MEM_OP_DATA_IN(len, buf, 2)) 84 | 85 | #define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \ 86 | SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \ 87 | SPI_MEM_OP_ADDR(2, addr, 1), \ 88 | SPI_MEM_OP_DUMMY(ndummy, 1), \ 89 | SPI_MEM_OP_DATA_IN(len, buf, 4)) 90 | 91 | #define SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(addr, ndummy, buf, len) \ 92 | SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \ 93 | SPI_MEM_OP_ADDR(3, addr, 1), \ 94 | SPI_MEM_OP_DUMMY(ndummy, 1), \ 95 | SPI_MEM_OP_DATA_IN(len, buf, 4)) 96 | 97 | #define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \ 98 | SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \ 99 | SPI_MEM_OP_ADDR(2, addr, 2), \ 100 | SPI_MEM_OP_DUMMY(ndummy, 2), \ 101 | SPI_MEM_OP_DATA_IN(len, buf, 2)) 102 | 103 | #define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP_3A(addr, ndummy, buf, len) \ 104 | SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \ 105 | SPI_MEM_OP_ADDR(3, addr, 2), \ 106 | SPI_MEM_OP_DUMMY(ndummy, 2), \ 107 | SPI_MEM_OP_DATA_IN(len, buf, 2)) 108 | 109 | #define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \ 110 | SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \ 111 | SPI_MEM_OP_ADDR(2, addr, 4), \ 112 | SPI_MEM_OP_DUMMY(ndummy, 4), \ 113 | SPI_MEM_OP_DATA_IN(len, buf, 4)) 114 | 115 | #define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP_3A(addr, ndummy, buf, len) \ 116 | SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \ 117 | SPI_MEM_OP_ADDR(3, addr, 4), \ 118 | SPI_MEM_OP_DUMMY(ndummy, 4), \ 119 | SPI_MEM_OP_DATA_IN(len, buf, 4)) 120 | 121 | #define SPINAND_PROG_EXEC_OP(addr) \ 122 | SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \ 123 | SPI_MEM_OP_ADDR(3, addr, 1), \ 124 | SPI_MEM_OP_NO_DUMMY, \ 125 | SPI_MEM_OP_NO_DATA) 126 | 127 | #define SPINAND_PROG_LOAD(reset, addr, buf, len) \ 128 | SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x02 : 0x84, 1), \ 129 | SPI_MEM_OP_ADDR(2, addr, 1), \ 130 | SPI_MEM_OP_NO_DUMMY, \ 131 | SPI_MEM_OP_DATA_OUT(len, buf, 1)) 132 | 133 | #define SPINAND_PROG_LOAD_X4(reset, addr, buf, len) \ 134 | SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x32 : 0x34, 1), \ 135 | SPI_MEM_OP_ADDR(2, addr, 1), \ 136 | SPI_MEM_OP_NO_DUMMY, \ 137 | SPI_MEM_OP_DATA_OUT(len, buf, 4)) 138 | 139 | /** 140 | * Standard SPI NAND flash commands 141 | */ 142 | #define SPINAND_CMD_PROG_LOAD_X4 0x32 143 | #define SPINAND_CMD_PROG_LOAD_RDM_DATA_X4 0x34 144 | 145 | /* feature register */ 146 | #define REG_BLOCK_LOCK 0xa0 147 | #define BL_ALL_UNLOCKED 0x00 148 | 149 | /* configuration register */ 150 | #define REG_CFG 0xb0 151 | #define CFG_OTP_ENABLE BIT(6) 152 | #define CFG_ECC_ENABLE BIT(4) 153 | #define CFG_QUAD_ENABLE BIT(0) 154 | 155 | /* status register */ 156 | #define REG_STATUS 0xc0 157 | #define STATUS_BUSY BIT(0) 158 | #define STATUS_ERASE_FAILED BIT(2) 159 | #define STATUS_PROG_FAILED BIT(3) 160 | #define STATUS_ECC_MASK GENMASK(5, 4) 161 | #define STATUS_ECC_NO_BITFLIPS (0 << 4) 162 | #define STATUS_ECC_HAS_BITFLIPS (1 << 4) 163 | #define STATUS_ECC_UNCOR_ERROR (2 << 4) 164 | 165 | struct spinand_op; 166 | struct spinand_device; 167 | 168 | #define SPINAND_MAX_ID_LEN 4 169 | 170 | /** 171 | * struct spinand_id - SPI NAND id structure 172 | * @data: buffer containing the id bytes. Currently 4 bytes large, but can 173 | * be extended if required 174 | * @len: ID length 175 | */ 176 | struct spinand_id { 177 | u8 data[SPINAND_MAX_ID_LEN]; 178 | int len; 179 | }; 180 | 181 | enum spinand_readid_method { 182 | SPINAND_READID_METHOD_OPCODE, 183 | SPINAND_READID_METHOD_OPCODE_ADDR, 184 | SPINAND_READID_METHOD_OPCODE_DUMMY, 185 | }; 186 | 187 | /** 188 | * struct spinand_devid - SPI NAND device id structure 189 | * @id: device id of current chip 190 | * @len: number of bytes in device id 191 | * @method: method to read chip id 192 | * There are 3 possible variants: 193 | * SPINAND_READID_METHOD_OPCODE: chip id is returned immediately 194 | * after read_id opcode. 195 | * SPINAND_READID_METHOD_OPCODE_ADDR: chip id is returned after 196 | * read_id opcode + 1-byte address. 197 | * SPINAND_READID_METHOD_OPCODE_DUMMY: chip id is returned after 198 | * read_id opcode + 1 dummy byte. 199 | */ 200 | struct spinand_devid { 201 | const u8 *id; 202 | const u8 len; 203 | const enum spinand_readid_method method; 204 | }; 205 | 206 | /** 207 | * struct manufacurer_ops - SPI NAND manufacturer specific operations 208 | * @init: initialize a SPI NAND device 209 | * @cleanup: cleanup a SPI NAND device 210 | * 211 | * Each SPI NAND manufacturer driver should implement this interface so that 212 | * NAND chips coming from this vendor can be initialized properly. 213 | */ 214 | struct spinand_manufacturer_ops { 215 | int (*init)(struct spinand_device *spinand); 216 | void (*cleanup)(struct spinand_device *spinand); 217 | }; 218 | 219 | /** 220 | * struct spinand_manufacturer - SPI NAND manufacturer instance 221 | * @id: manufacturer ID 222 | * @name: manufacturer name 223 | * @devid_len: number of bytes in device ID 224 | * @chips: supported SPI NANDs under current manufacturer 225 | * @nchips: number of SPI NANDs available in chips array 226 | * @ops: manufacturer operations 227 | */ 228 | struct spinand_manufacturer { 229 | u8 id; 230 | char *name; 231 | const struct spinand_info *chips; 232 | const size_t nchips; 233 | const struct spinand_manufacturer_ops *ops; 234 | }; 235 | 236 | /* SPI NAND manufacturers */ 237 | extern const struct spinand_manufacturer gigadevice_spinand_manufacturer; 238 | extern const struct spinand_manufacturer macronix_spinand_manufacturer; 239 | extern const struct spinand_manufacturer micron_spinand_manufacturer; 240 | extern const struct spinand_manufacturer paragon_spinand_manufacturer; 241 | extern const struct spinand_manufacturer toshiba_spinand_manufacturer; 242 | extern const struct spinand_manufacturer winbond_spinand_manufacturer; 243 | extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer; 244 | extern const struct spinand_manufacturer xtx_spinand_manufacturer; 245 | extern const struct spinand_manufacturer biwin_spinand_manufacturer; 246 | extern const struct spinand_manufacturer dosilicon_spinand_manufacturer; 247 | extern const struct spinand_manufacturer etron_spinand_manufacturer; 248 | extern const struct spinand_manufacturer fmsh_spinand_manufacturer; 249 | extern const struct spinand_manufacturer foresee_spinand_manufacturer; 250 | extern const struct spinand_manufacturer gsto_spinand_manufacturer; 251 | extern const struct spinand_manufacturer hyf_spinand_manufacturer; 252 | extern const struct spinand_manufacturer jsc_spinand_manufacturer; 253 | extern const struct spinand_manufacturer silicongo_spinand_manufacturer; 254 | extern const struct spinand_manufacturer skyhigh_spinand_manufacturer; 255 | extern const struct spinand_manufacturer unim_spinand_manufacturer; 256 | 257 | /** 258 | * struct spinand_op_variants - SPI NAND operation variants 259 | * @ops: the list of variants for a given operation 260 | * @nops: the number of variants 261 | * 262 | * Some operations like read-from-cache/write-to-cache have several variants 263 | * depending on the number of IO lines you use to transfer data or address 264 | * cycles. This structure is a way to describe the different variants supported 265 | * by a chip and let the core pick the best one based on the SPI mem controller 266 | * capabilities. 267 | */ 268 | struct spinand_op_variants { 269 | const struct spi_mem_op *ops; 270 | unsigned int nops; 271 | }; 272 | 273 | #define SPINAND_OP_VARIANTS(name, ...) \ 274 | const struct spinand_op_variants name = { \ 275 | .ops = (struct spi_mem_op[]) { __VA_ARGS__ }, \ 276 | .nops = sizeof((struct spi_mem_op[]){ __VA_ARGS__ }) / \ 277 | sizeof(struct spi_mem_op), \ 278 | } 279 | 280 | /** 281 | * spinand_ecc_info - description of the on-die ECC implemented by a SPI NAND 282 | * chip 283 | * @get_status: get the ECC status. Should return a positive number encoding 284 | * the number of corrected bitflips if correction was possible or 285 | * -EBADMSG if there are uncorrectable errors. I can also return 286 | * other negative error codes if the error is not caused by 287 | * uncorrectable bitflips 288 | */ 289 | struct spinand_ecc_info { 290 | int (*get_status)(struct spinand_device *spinand, u8 status); 291 | }; 292 | 293 | #define SPINAND_HAS_QE_BIT BIT(0) 294 | #define SPINAND_HAS_CR_FEAT_BIT BIT(1) 295 | 296 | /** 297 | * struct spinand_info - Structure used to describe SPI NAND chips 298 | * @model: model name 299 | * @devid: device ID 300 | * @flags: OR-ing of the SPINAND_XXX flags 301 | * @memorg: memory organization 302 | * @eccreq: ECC requirements 303 | * @eccinfo: on-die ECC info 304 | * @op_variants: operations variants 305 | * @op_variants.read_cache: variants of the read-cache operation 306 | * @op_variants.write_cache: variants of the write-cache operation 307 | * @op_variants.update_cache: variants of the update-cache operation 308 | * @select_target: function used to select a target/die. Required only for 309 | * multi-die chips 310 | * 311 | * Each SPI NAND manufacturer driver should have a spinand_info table 312 | * describing all the chips supported by the driver. 313 | */ 314 | struct spinand_info { 315 | const char *model; 316 | struct spinand_devid devid; 317 | u32 flags; 318 | struct nand_memory_organization memorg; 319 | struct nand_ecc_props eccreq; 320 | struct spinand_ecc_info eccinfo; 321 | struct { 322 | const struct spinand_op_variants *read_cache; 323 | const struct spinand_op_variants *write_cache; 324 | const struct spinand_op_variants *update_cache; 325 | } op_variants; 326 | int (*select_target)(struct spinand_device *spinand, 327 | unsigned int target); 328 | }; 329 | 330 | #define SPINAND_ID(__method, ...) \ 331 | { \ 332 | .id = (const u8[]){ __VA_ARGS__ }, \ 333 | .len = sizeof((u8[]){ __VA_ARGS__ }), \ 334 | .method = __method, \ 335 | } 336 | 337 | #define SPINAND_INFO_OP_VARIANTS(__read, __write, __update) \ 338 | { \ 339 | .read_cache = __read, \ 340 | .write_cache = __write, \ 341 | .update_cache = __update, \ 342 | } 343 | 344 | #define SPINAND_ECCINFO(__get_status) \ 345 | .eccinfo = { \ 346 | .get_status = __get_status, \ 347 | } 348 | 349 | #define SPINAND_SELECT_TARGET(__func) \ 350 | .select_target = __func, 351 | 352 | #define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \ 353 | __flags, ...) \ 354 | { \ 355 | .model = __model, \ 356 | .devid = __id, \ 357 | .memorg = __memorg, \ 358 | .eccreq = __eccreq, \ 359 | .op_variants = __op_variants, \ 360 | .flags = __flags, \ 361 | __VA_ARGS__ \ 362 | } 363 | 364 | struct spinand_dirmap { 365 | struct spi_mem_dirmap_desc *wdesc; 366 | struct spi_mem_dirmap_desc *rdesc; 367 | }; 368 | 369 | /** 370 | * struct spinand_device - SPI NAND device instance 371 | * @base: NAND device instance 372 | * @spimem: pointer to the SPI mem object 373 | * @id: NAND ID as returned by READ_ID 374 | * @flags: NAND flags 375 | * @op_templates: various SPI mem op templates 376 | * @op_templates.read_cache: read cache op template 377 | * @op_templates.write_cache: write cache op template 378 | * @op_templates.update_cache: update cache op template 379 | * @select_target: select a specific target/die. Usually called before sending 380 | * a command addressing a page or an eraseblock embedded in 381 | * this die. Only required if your chip exposes several dies 382 | * @cur_target: currently selected target/die 383 | * @eccinfo: on-die ECC information 384 | * @cfg_cache: config register cache. One entry per die 385 | * @databuf: bounce buffer for data 386 | * @oobbuf: bounce buffer for OOB data 387 | * @scratchbuf: buffer used for everything but page accesses. This is needed 388 | * because the spi-mem interface explicitly requests that buffers 389 | * passed in spi_mem_op be DMA-able, so we can't based the bufs on 390 | * the stack 391 | * @manufacturer: SPI NAND manufacturer information 392 | * @priv: manufacturer private data 393 | */ 394 | struct spinand_device { 395 | struct nand_device base; 396 | struct spi_mem *spimem; 397 | struct spinand_id id; 398 | u32 flags; 399 | 400 | struct { 401 | const struct spi_mem_op *read_cache; 402 | const struct spi_mem_op *write_cache; 403 | const struct spi_mem_op *update_cache; 404 | } op_templates; 405 | 406 | struct spinand_dirmap *dirmaps; 407 | 408 | int (*select_target)(struct spinand_device *spinand, 409 | unsigned int target); 410 | unsigned int cur_target; 411 | 412 | struct spinand_ecc_info eccinfo; 413 | 414 | u8 *cfg_cache; 415 | u8 *databuf; 416 | u8 *oobbuf; 417 | u8 *scratchbuf; 418 | const struct spinand_manufacturer *manufacturer; 419 | void *priv; 420 | }; 421 | 422 | /** 423 | * nand_to_spinand() - Get the SPI NAND device embedding an NAND object 424 | * @nand: NAND object 425 | * 426 | * Return: the SPI NAND device embedding @nand. 427 | */ 428 | static inline struct spinand_device *nand_to_spinand(struct nand_device *nand) 429 | { 430 | return container_of(nand, struct spinand_device, base); 431 | } 432 | 433 | /** 434 | * spinand_to_nand() - Get the NAND device embedded in a SPI NAND object 435 | * @spinand: SPI NAND device 436 | * 437 | * Return: the NAND device embedded in @spinand. 438 | */ 439 | static inline struct nand_device * 440 | spinand_to_nand(struct spinand_device *spinand) 441 | { 442 | return &spinand->base; 443 | } 444 | 445 | int spinand_match_and_init(struct spinand_device *spinand, 446 | const struct spinand_info *table, 447 | unsigned int table_size, 448 | enum spinand_readid_method rdid_method); 449 | 450 | int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val); 451 | int spinand_ecc_enable(struct spinand_device *spinand, bool enable); 452 | int spinand_select_target(struct spinand_device *spinand, unsigned int target); 453 | 454 | int spinand_read_page(struct spinand_device *spinand, 455 | const struct nand_page_io_req *req, 456 | bool ecc_enabled); 457 | int spinand_write_page(struct spinand_device *spinand, 458 | const struct nand_page_io_req *req, bool ecc_enabled); 459 | int spinand_erase(struct spinand_device *spinand, const struct nand_pos *pos); 460 | 461 | struct spinand_device *spinand_probe(struct spi_mem *mem); 462 | void spinand_remove(struct spinand_device *spinand); 463 | #endif /* __LINUX_MTD_SPINAND_H */ 464 | -------------------------------------------------------------------------------- /include/nand.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | /* 3 | * Copyright 2017 - Free Electrons 4 | * 5 | * Authors: 6 | * Boris Brezillon 7 | * Peter Pan 8 | */ 9 | 10 | #ifndef __LINUX_MTD_NAND_H 11 | #define __LINUX_MTD_NAND_H 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | #ifdef _WIN32 21 | #define __always_inline inline __attribute__((__always_inline__)) 22 | #endif 23 | 24 | #define do_div(n,base) ({ \ 25 | uint32_t __base = (base); \ 26 | uint32_t __rem; \ 27 | __rem = ((uint64_t)(n)) % __base; \ 28 | (n) = ((uint64_t)(n)) / __base; \ 29 | __rem; \ 30 | }) 31 | 32 | struct nand_device; 33 | 34 | /** 35 | * struct nand_memory_organization - Memory organization structure 36 | * @bits_per_cell: number of bits per NAND cell 37 | * @pagesize: page size 38 | * @oobsize: OOB area size 39 | * @pages_per_eraseblock: number of pages per eraseblock 40 | * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number) 41 | * @max_bad_eraseblocks_per_lun: maximum number of eraseblocks per LUN 42 | * @planes_per_lun: number of planes per LUN 43 | * @luns_per_target: number of LUN per target (target is a synonym for die) 44 | * @ntargets: total number of targets exposed by the NAND device 45 | */ 46 | struct nand_memory_organization { 47 | unsigned int bits_per_cell; 48 | unsigned int pagesize; 49 | unsigned int oobsize; 50 | unsigned int pages_per_eraseblock; 51 | unsigned int eraseblocks_per_lun; 52 | unsigned int max_bad_eraseblocks_per_lun; 53 | unsigned int planes_per_lun; 54 | unsigned int luns_per_target; 55 | unsigned int ntargets; 56 | }; 57 | 58 | #define NAND_MEMORG(bpc, ps, os, ppe, epl, mbb, ppl, lpt, nt) \ 59 | { \ 60 | .bits_per_cell = (bpc), \ 61 | .pagesize = (ps), \ 62 | .oobsize = (os), \ 63 | .pages_per_eraseblock = (ppe), \ 64 | .eraseblocks_per_lun = (epl), \ 65 | .max_bad_eraseblocks_per_lun = (mbb), \ 66 | .planes_per_lun = (ppl), \ 67 | .luns_per_target = (lpt), \ 68 | .ntargets = (nt), \ 69 | } 70 | 71 | /** 72 | * struct nand_row_converter - Information needed to convert an absolute offset 73 | * into a row address 74 | * @lun_addr_shift: position of the LUN identifier in the row address 75 | * @eraseblock_addr_shift: position of the eraseblock identifier in the row 76 | * address 77 | */ 78 | struct nand_row_converter { 79 | unsigned int lun_addr_shift; 80 | unsigned int eraseblock_addr_shift; 81 | }; 82 | 83 | /** 84 | * struct nand_pos - NAND position object 85 | * @target: the NAND target/die 86 | * @lun: the LUN identifier 87 | * @plane: the plane within the LUN 88 | * @eraseblock: the eraseblock within the LUN 89 | * @page: the page within the LUN 90 | * 91 | * These information are usually used by specific sub-layers to select the 92 | * appropriate target/die and generate a row address to pass to the device. 93 | */ 94 | struct nand_pos { 95 | unsigned int target; 96 | unsigned int lun; 97 | unsigned int plane; 98 | unsigned int eraseblock; 99 | unsigned int page; 100 | }; 101 | 102 | /** 103 | * struct nand_page_io_req - NAND I/O request object 104 | * @pos: the position this I/O request is targeting 105 | * @dataoffs: the offset within the page 106 | * @datalen: number of data bytes to read from/write to this page 107 | * @databuf: buffer to store data in or get data from 108 | * @ooboffs: the OOB offset within the page 109 | * @ooblen: the number of OOB bytes to read from/write to this page 110 | * @oobbuf: buffer to store OOB data in or get OOB data from 111 | * @mode: one of the %MTD_OPS_XXX mode 112 | * 113 | * This object is used to pass per-page I/O requests to NAND sub-layers. This 114 | * way all useful information are already formatted in a useful way and 115 | * specific NAND layers can focus on translating these information into 116 | * specific commands/operations. 117 | */ 118 | struct nand_page_io_req { 119 | struct nand_pos pos; 120 | unsigned int dataoffs; 121 | unsigned int datalen; 122 | union { 123 | const void *out; 124 | void *in; 125 | } databuf; 126 | unsigned int ooboffs; 127 | unsigned int ooblen; 128 | union { 129 | const void *out; 130 | void *in; 131 | } oobbuf; 132 | }; 133 | 134 | /** 135 | * struct nand_ecc_props - NAND ECC properties 136 | * @strength: ECC strength 137 | * @step_size: Number of bytes per step 138 | */ 139 | struct nand_ecc_props { 140 | unsigned int strength; 141 | unsigned int step_size; 142 | }; 143 | 144 | #define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) } 145 | 146 | /** 147 | * struct nand_device - NAND device 148 | * @memorg: memory layout 149 | * @eccreq: ECC requirements 150 | * @rowconv: position to row address converter 151 | * 152 | * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND) 153 | * should declare their own NAND object embedding a nand_device struct (that's 154 | * how inheritance is done). 155 | * struct_nand_device->memorg and struct_nand_device->eccreq should be filled 156 | * at device detection time to reflect the NAND device 157 | */ 158 | struct nand_device { 159 | struct nand_memory_organization memorg; 160 | struct nand_ecc_props eccreq; 161 | struct nand_row_converter rowconv; 162 | }; 163 | 164 | /** 165 | * struct nand_io_iter - NAND I/O iterator 166 | * @req: current I/O request 167 | * @oobbytes_per_page: maximum number of OOB bytes per page 168 | * @dataleft: remaining number of data bytes to read/write 169 | * @oobleft: remaining number of OOB bytes to read/write 170 | * 171 | * Can be used by specialized NAND layers to iterate over all pages covered 172 | * by an MTD I/O request, which should greatly simplifies the boiler-plate 173 | * code needed to read/write data from/to a NAND device. 174 | */ 175 | struct nand_io_iter { 176 | struct nand_page_io_req req; 177 | unsigned int oobbytes_per_page; 178 | unsigned int dataleft; 179 | unsigned int oobleft; 180 | }; 181 | 182 | /* 183 | * nanddev_bits_per_cell() - Get the number of bits per cell 184 | * @nand: NAND device 185 | * 186 | * Return: the number of bits per cell. 187 | */ 188 | static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand) 189 | { 190 | return nand->memorg.bits_per_cell; 191 | } 192 | 193 | /** 194 | * nanddev_page_size() - Get NAND page size 195 | * @nand: NAND device 196 | * 197 | * Return: the page size. 198 | */ 199 | static inline size_t nanddev_page_size(const struct nand_device *nand) 200 | { 201 | return nand->memorg.pagesize; 202 | } 203 | 204 | /** 205 | * nanddev_per_page_oobsize() - Get NAND OOB size 206 | * @nand: NAND device 207 | * 208 | * Return: the OOB size. 209 | */ 210 | static inline unsigned int 211 | nanddev_per_page_oobsize(const struct nand_device *nand) 212 | { 213 | return nand->memorg.oobsize; 214 | } 215 | 216 | /** 217 | * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock 218 | * @nand: NAND device 219 | * 220 | * Return: the number of pages per eraseblock. 221 | */ 222 | static inline unsigned int 223 | nanddev_pages_per_eraseblock(const struct nand_device *nand) 224 | { 225 | return nand->memorg.pages_per_eraseblock; 226 | } 227 | 228 | /** 229 | * nanddev_pages_per_target() - Get the number of pages per target 230 | * @nand: NAND device 231 | * 232 | * Return: the number of pages per target. 233 | */ 234 | static inline unsigned int 235 | nanddev_pages_per_target(const struct nand_device *nand) 236 | { 237 | return nand->memorg.pages_per_eraseblock * 238 | nand->memorg.eraseblocks_per_lun * 239 | nand->memorg.luns_per_target; 240 | } 241 | 242 | /** 243 | * nanddev_per_page_oobsize() - Get NAND erase block size 244 | * @nand: NAND device 245 | * 246 | * Return: the eraseblock size. 247 | */ 248 | static inline size_t nanddev_eraseblock_size(const struct nand_device *nand) 249 | { 250 | return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock; 251 | } 252 | 253 | /** 254 | * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN 255 | * @nand: NAND device 256 | * 257 | * Return: the number of eraseblocks per LUN. 258 | */ 259 | static inline unsigned int 260 | nanddev_eraseblocks_per_lun(const struct nand_device *nand) 261 | { 262 | return nand->memorg.eraseblocks_per_lun; 263 | } 264 | 265 | /** 266 | * nanddev_eraseblocks_per_target() - Get the number of eraseblocks per target 267 | * @nand: NAND device 268 | * 269 | * Return: the number of eraseblocks per target. 270 | */ 271 | static inline unsigned int 272 | nanddev_eraseblocks_per_target(const struct nand_device *nand) 273 | { 274 | return nand->memorg.eraseblocks_per_lun * nand->memorg.luns_per_target; 275 | } 276 | 277 | /** 278 | * nanddev_target_size() - Get the total size provided by a single target/die 279 | * @nand: NAND device 280 | * 281 | * Return: the total size exposed by a single target/die in bytes. 282 | */ 283 | static inline u64 nanddev_target_size(const struct nand_device *nand) 284 | { 285 | return (u64)nand->memorg.luns_per_target * 286 | nand->memorg.eraseblocks_per_lun * 287 | nand->memorg.pages_per_eraseblock * 288 | nand->memorg.pagesize; 289 | } 290 | 291 | /** 292 | * nanddev_ntarget() - Get the total of targets 293 | * @nand: NAND device 294 | * 295 | * Return: the number of targets/dies exposed by @nand. 296 | */ 297 | static inline unsigned int nanddev_ntargets(const struct nand_device *nand) 298 | { 299 | return nand->memorg.ntargets; 300 | } 301 | 302 | /** 303 | * nanddev_neraseblocks() - Get the total number of eraseblocks 304 | * @nand: NAND device 305 | * 306 | * Return: the total number of eraseblocks exposed by @nand. 307 | */ 308 | static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand) 309 | { 310 | return nand->memorg.ntargets * nand->memorg.luns_per_target * 311 | nand->memorg.eraseblocks_per_lun; 312 | } 313 | 314 | /** 315 | * nanddev_size() - Get NAND size 316 | * @nand: NAND device 317 | * 318 | * Return: the total size (in bytes) exposed by @nand. 319 | */ 320 | static inline u64 nanddev_size(const struct nand_device *nand) 321 | { 322 | return nanddev_target_size(nand) * nanddev_ntargets(nand); 323 | } 324 | 325 | /** 326 | * nanddev_get_memorg() - Extract memory organization info from a NAND device 327 | * @nand: NAND device 328 | * 329 | * This can be used by the upper layer to fill the memorg info before calling 330 | * nanddev_init(). 331 | * 332 | * Return: the memorg object embedded in the NAND device. 333 | */ 334 | static inline struct nand_memory_organization * 335 | nanddev_get_memorg(struct nand_device *nand) 336 | { 337 | return &nand->memorg; 338 | } 339 | 340 | /** 341 | * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position 342 | * @nand: NAND device 343 | * @offs: absolute NAND offset (usually passed by the MTD layer) 344 | * @pos: a NAND position object to fill in 345 | * 346 | * Converts @offs into a nand_pos representation. 347 | * 348 | * Return: the offset within the NAND page pointed by @pos. 349 | */ 350 | static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand, 351 | loff_t offs, 352 | struct nand_pos *pos) 353 | { 354 | unsigned int pageoffs; 355 | u64 tmp = offs; 356 | 357 | pageoffs = do_div(tmp, nand->memorg.pagesize); 358 | pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock); 359 | pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun); 360 | pos->plane = pos->eraseblock % nand->memorg.planes_per_lun; 361 | pos->lun = do_div(tmp, nand->memorg.luns_per_target); 362 | pos->target = tmp; 363 | 364 | return pageoffs; 365 | } 366 | 367 | /** 368 | * nanddev_pos_cmp() - Compare two NAND positions 369 | * @a: First NAND position 370 | * @b: Second NAND position 371 | * 372 | * Compares two NAND positions. 373 | * 374 | * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b. 375 | */ 376 | static inline int nanddev_pos_cmp(const struct nand_pos *a, 377 | const struct nand_pos *b) 378 | { 379 | if (a->target != b->target) 380 | return a->target < b->target ? -1 : 1; 381 | 382 | if (a->lun != b->lun) 383 | return a->lun < b->lun ? -1 : 1; 384 | 385 | if (a->eraseblock != b->eraseblock) 386 | return a->eraseblock < b->eraseblock ? -1 : 1; 387 | 388 | if (a->page != b->page) 389 | return a->page < b->page ? -1 : 1; 390 | 391 | return 0; 392 | } 393 | 394 | /** 395 | * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset 396 | * @nand: NAND device 397 | * @pos: the NAND position to convert 398 | * 399 | * Converts @pos NAND position into an absolute offset. 400 | * 401 | * Return: the absolute offset. Note that @pos points to the beginning of a 402 | * page, if one wants to point to a specific offset within this page 403 | * the returned offset has to be adjusted manually. 404 | */ 405 | static inline loff_t nanddev_pos_to_offs(struct nand_device *nand, 406 | const struct nand_pos *pos) 407 | { 408 | unsigned int npages; 409 | 410 | npages = pos->page + 411 | ((pos->eraseblock + 412 | (pos->lun + 413 | (pos->target * nand->memorg.luns_per_target)) * 414 | nand->memorg.eraseblocks_per_lun) * 415 | nand->memorg.pages_per_eraseblock); 416 | 417 | return (loff_t)npages * nand->memorg.pagesize; 418 | } 419 | 420 | /** 421 | * nanddev_pos_to_row() - Extract a row address from a NAND position 422 | * @nand: NAND device 423 | * @pos: the position to convert 424 | * 425 | * Converts a NAND position into a row address that can then be passed to the 426 | * device. 427 | * 428 | * Return: the row address extracted from @pos. 429 | */ 430 | static inline unsigned int nanddev_pos_to_row(struct nand_device *nand, 431 | const struct nand_pos *pos) 432 | { 433 | return (pos->lun << nand->rowconv.lun_addr_shift) | 434 | (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) | 435 | pos->page; 436 | } 437 | 438 | /** 439 | * nanddev_pos_next_target() - Move a position to the next target/die 440 | * @nand: NAND device 441 | * @pos: the position to update 442 | * 443 | * Updates @pos to point to the start of the next target/die. Useful when you 444 | * want to iterate over all targets/dies of a NAND device. 445 | */ 446 | static inline void nanddev_pos_next_target(struct nand_device *nand, 447 | struct nand_pos *pos) 448 | { 449 | pos->page = 0; 450 | pos->plane = 0; 451 | pos->eraseblock = 0; 452 | pos->lun = 0; 453 | pos->target++; 454 | } 455 | 456 | /** 457 | * nanddev_pos_next_lun() - Move a position to the next LUN 458 | * @nand: NAND device 459 | * @pos: the position to update 460 | * 461 | * Updates @pos to point to the start of the next LUN. Useful when you want to 462 | * iterate over all LUNs of a NAND device. 463 | */ 464 | static inline void nanddev_pos_next_lun(struct nand_device *nand, 465 | struct nand_pos *pos) 466 | { 467 | if (pos->lun >= nand->memorg.luns_per_target - 1) 468 | return nanddev_pos_next_target(nand, pos); 469 | 470 | pos->lun++; 471 | pos->page = 0; 472 | pos->plane = 0; 473 | pos->eraseblock = 0; 474 | } 475 | 476 | /** 477 | * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock 478 | * @nand: NAND device 479 | * @pos: the position to update 480 | * 481 | * Updates @pos to point to the start of the next eraseblock. Useful when you 482 | * want to iterate over all eraseblocks of a NAND device. 483 | */ 484 | static inline void nanddev_pos_next_eraseblock(struct nand_device *nand, 485 | struct nand_pos *pos) 486 | { 487 | if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1) 488 | return nanddev_pos_next_lun(nand, pos); 489 | 490 | pos->eraseblock++; 491 | pos->page = 0; 492 | pos->plane = pos->eraseblock % nand->memorg.planes_per_lun; 493 | } 494 | 495 | /** 496 | * nanddev_pos_next_page() - Move a position to the next page 497 | * @nand: NAND device 498 | * @pos: the position to update 499 | * 500 | * Updates @pos to point to the start of the next page. Useful when you want to 501 | * iterate over all pages of a NAND device. 502 | */ 503 | static inline void nanddev_pos_next_page(struct nand_device *nand, 504 | struct nand_pos *pos) 505 | { 506 | if (pos->page >= nand->memorg.pages_per_eraseblock - 1) 507 | return nanddev_pos_next_eraseblock(nand, pos); 508 | 509 | pos->page++; 510 | } 511 | 512 | static __always_inline int fls(int x) 513 | { 514 | int r = 32; 515 | 516 | if (!x) 517 | return 0; 518 | if (!(x & 0xffff0000u)) { 519 | x <<= 16; 520 | r -= 16; 521 | } 522 | if (!(x & 0xff000000u)) { 523 | x <<= 8; 524 | r -= 8; 525 | } 526 | if (!(x & 0xf0000000u)) { 527 | x <<= 4; 528 | r -= 4; 529 | } 530 | if (!(x & 0xc0000000u)) { 531 | x <<= 2; 532 | r -= 2; 533 | } 534 | if (!(x & 0x80000000u)) { 535 | x <<= 1; 536 | r -= 1; 537 | } 538 | return r; 539 | } 540 | 541 | static inline int nanddev_init(struct nand_device *nand) 542 | { 543 | struct nand_memory_organization *memorg = nanddev_get_memorg(nand); 544 | 545 | if (!nand) 546 | return -EINVAL; 547 | 548 | if (!memorg->bits_per_cell || !memorg->pagesize || 549 | !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun || 550 | !memorg->planes_per_lun || !memorg->luns_per_target || 551 | !memorg->ntargets) 552 | return -EINVAL; 553 | 554 | nand->rowconv.eraseblock_addr_shift = 555 | fls(memorg->pages_per_eraseblock - 1); 556 | nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) + 557 | nand->rowconv.eraseblock_addr_shift; 558 | return 0; 559 | } 560 | #endif /* __LINUX_MTD_NAND_H */ 561 | --------------------------------------------------------------------------------